code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from itertools import chain
from django.conf import settings
from django.contrib.gis.db import models as gis_models
from django.db import models, router, transaction
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ..fields import CleaningJsonField
from ..validators import DictListValidator, TextField, TimestampField
from .constants import GK25FIN_SRID
from .enforcement_domain import EnforcementDomain
from .mixins import TimestampedModelMixin
from .parking import Parking
class PermitArea(TimestampedModelMixin):
name = models.CharField(max_length=40, verbose_name=_('name'))
domain = models.ForeignKey(
EnforcementDomain, on_delete=models.PROTECT,
related_name='permit_areas')
identifier = models.CharField(max_length=10, verbose_name=_('identifier'))
geom = gis_models.MultiPolygonField(
srid=GK25FIN_SRID, verbose_name=_('geometry'))
permitted_user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("permitted_user"))
class Meta:
unique_together = [('domain', 'identifier')]
ordering = ('identifier',)
def __str__(self):
return '{}/{}: {}'.format(self.domain.code, self.identifier, self.name)
class PermitSeriesQuerySet(models.QuerySet):
def active(self):
return self.filter(active=True)
def latest_active(self):
return self.active().order_by('-modified_at').first()
def prunable(self, time_limit=None):
limit = time_limit or (
timezone.now() - settings.PARKKIHUBI_PERMITS_PRUNABLE_AFTER)
return self.filter(created_at__lt=limit, active=False)
class PermitSeries(TimestampedModelMixin, models.Model):
active = models.BooleanField(default=False)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("owner"))
objects = PermitSeriesQuerySet.as_manager()
class Meta:
ordering = ('created_at', 'id')
verbose_name = _("permit series")
verbose_name_plural = _("permit series")
@classmethod
def delete_prunable_series(cls, time_limit=None):
prunable = cls.objects.prunable(time_limit)
Permit.objects.filter(series__in=prunable).delete()
prunable.delete()
def __str__(self):
return str(self.id)
class PermitQuerySet(models.QuerySet):
def active(self):
return self.filter(series__active=True)
def by_time(self, timestamp):
lookup_items = PermitLookupItem.objects.by_time(timestamp)
return self.filter(lookup_items__in=lookup_items).distinct()
def by_subject(self, registration_number):
lookup_items = PermitLookupItem.objects.by_subject(registration_number)
return self.filter(lookup_items__in=lookup_items).distinct()
def by_area(self, area):
lookup_items = PermitLookupItem.objects.by_area(area)
return self.filter(lookup_items__in=lookup_items).distinct()
def bulk_create(self, permits, *args, **kwargs):
for permit in permits:
assert isinstance(permit, Permit)
permit.full_clean()
with transaction.atomic(using=self.db, savepoint=False):
created_permits = super().bulk_create(permits, *args, **kwargs)
PermitLookupItem.objects.using(self.db).bulk_create(
chain(*(x._make_lookup_items() for x in created_permits)))
return created_permits
class Permit(TimestampedModelMixin, models.Model):
domain = models.ForeignKey(
EnforcementDomain, on_delete=models.PROTECT,
related_name='permits')
series = models.ForeignKey(PermitSeries, on_delete=models.PROTECT)
external_id = models.CharField(max_length=50, null=True, blank=True)
subjects = CleaningJsonField(blank=True, validators=[DictListValidator({
'start_time': TimestampField(),
'end_time': TimestampField(),
'registration_number': TextField(max_length=20),
})])
areas = CleaningJsonField(blank=True, validators=[DictListValidator({
'start_time': TimestampField(),
'end_time': TimestampField(),
'area': TextField(max_length=10),
})])
objects = PermitQuerySet.as_manager()
class Meta:
unique_together = [('series', 'external_id')]
indexes = [
models.Index(fields=['series', 'id']),
]
ordering = ('series', 'id')
def __str__(self):
return 'Permit {id} ({series}{active}/{external_id} {dom})'.format(
id=self.id,
dom=self.domain.code,
series=self.series,
active='*' if self.series.active else '',
external_id=self.external_id)
def save(self, using=None, *args, **kwargs):
self.full_clean()
using = using or router.db_for_write(type(self), instance=self)
with transaction.atomic(using=using, savepoint=False):
super(Permit, self).save(using=using, *args, **kwargs)
self.lookup_items.all().using(using).delete()
new_lookup_items = self._make_lookup_items()
PermitLookupItem.objects.using(using).bulk_create(new_lookup_items)
def _make_lookup_items(self):
for area in self.areas:
for subject in self.subjects:
max_start_time = max(subject['start_time'], area['start_time'])
min_end_time = min(subject['end_time'], area['end_time'])
if max_start_time >= min_end_time:
continue
yield PermitLookupItem(
permit=self,
registration_number=Parking.normalize_reg_num(
subject['registration_number']),
area=PermitArea.objects.get(identifier=area['area'], domain=self.domain),
start_time=max_start_time,
end_time=min_end_time
)
class PermitLookupItemQuerySet(models.QuerySet):
def active(self):
return self.filter(permit__series__active=True)
def by_time(self, timestamp):
return self.filter(start_time__lte=timestamp, end_time__gte=timestamp)
def by_subject(self, registration_number):
normalized_reg_num = Parking.normalize_reg_num(registration_number)
return self.filter(registration_number=normalized_reg_num)
def by_area(self, area):
return self.filter(area=area)
class PermitLookupItem(models.Model):
permit = models.ForeignKey(
Permit, related_name="lookup_items", on_delete=models.CASCADE)
registration_number = models.CharField(max_length=20)
area = models.ForeignKey(PermitArea, on_delete=models.PROTECT, default=None, null=True, blank=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
objects = PermitLookupItemQuerySet.as_manager()
class Meta:
indexes = [
models.Index(fields=[
'registration_number', 'start_time', 'end_time',
'area', 'permit']),
]
ordering = ('registration_number', 'start_time', 'end_time')
def __str__(self):
return (
'{start_time:%Y-%m-%d %H:%M} -- {end_time:%Y-%m-%d %H:%M} / '
'{registration_number} / {area}'
).format(
start_time=self.start_time, end_time=self.end_time,
registration_number=self.registration_number,
area=self.area.identifier)
| [
"django.db.models.Index",
"django.db.models.ForeignKey",
"django.db.transaction.atomic",
"django.utils.translation.gettext_lazy",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((646, 742), 'django.db.models.ForeignKey', 'models.ForeignKey', (['EnforcementDomain'], {'on_delete': 'models.PROTECT', 'related_name': '"""permit_areas"""'}), "(EnforcementDomain, on_delete=models.PROTECT, related_name\n ='permit_areas')\n", (663, 742), False, 'from django.db import models, router, transaction\n'), ((1756, 1790), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1775, 1790), False, 'from django.db import models, router, transaction\n'), ((3551, 3642), 'django.db.models.ForeignKey', 'models.ForeignKey', (['EnforcementDomain'], {'on_delete': 'models.PROTECT', 'related_name': '"""permits"""'}), "(EnforcementDomain, on_delete=models.PROTECT, related_name\n ='permits')\n", (3568, 3642), False, 'from django.db import models, router, transaction\n'), ((3668, 3725), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PermitSeries'], {'on_delete': 'models.PROTECT'}), '(PermitSeries, on_delete=models.PROTECT)\n', (3685, 3725), False, 'from django.db import models, router, transaction\n'), ((3744, 3798), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (3760, 3798), False, 'from django.db import models, router, transaction\n'), ((6510, 6595), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Permit'], {'related_name': '"""lookup_items"""', 'on_delete': 'models.CASCADE'}), "(Permit, related_name='lookup_items', on_delete=models.CASCADE\n )\n", (6527, 6595), False, 'from django.db import models, router, transaction\n'), ((6626, 6657), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (6642, 6657), False, 'from django.db import models, router, transaction\n'), ((6669, 6766), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PermitArea'], {'on_delete': 'models.PROTECT', 'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(PermitArea, on_delete=models.PROTECT, default=None, null=\n True, blank=True)\n', (6686, 6766), False, 'from django.db import models, router, transaction\n'), ((6779, 6801), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (6799, 6801), False, 'from django.db import models, router, transaction\n'), ((6817, 6839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (6837, 6839), False, 'from django.db import models, router, transaction\n'), ((2035, 2053), 'django.utils.translation.gettext_lazy', '_', (['"""permit series"""'], {}), "('permit series')\n", (2036, 2053), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2084, 2102), 'django.utils.translation.gettext_lazy', '_', (['"""permit series"""'], {}), "('permit series')\n", (2085, 2102), True, 'from django.utils.translation import gettext_lazy as _\n'), ((622, 631), 'django.utils.translation.gettext_lazy', '_', (['"""name"""'], {}), "('name')\n", (623, 631), True, 'from django.utils.translation import gettext_lazy as _\n'), ((817, 832), 'django.utils.translation.gettext_lazy', '_', (['"""identifier"""'], {}), "('identifier')\n", (818, 832), True, 'from django.utils.translation import gettext_lazy as _\n'), ((915, 928), 'django.utils.translation.gettext_lazy', '_', (['"""geometry"""'], {}), "('geometry')\n", (916, 928), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1043, 1062), 'django.utils.translation.gettext_lazy', '_', (['"""permitted_user"""'], {}), "('permitted_user')\n", (1044, 1062), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1895, 1905), 'django.utils.translation.gettext_lazy', '_', (['"""owner"""'], {}), "('owner')\n", (1896, 1905), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3182, 3232), 'django.db.transaction.atomic', 'transaction.atomic', ([], {'using': 'self.db', 'savepoint': '(False)'}), '(using=self.db, savepoint=False)\n', (3200, 3232), False, 'from django.db import models, router, transaction\n'), ((4369, 4406), 'django.db.models.Index', 'models.Index', ([], {'fields': "['series', 'id']"}), "(fields=['series', 'id'])\n", (4381, 4406), False, 'from django.db import models, router, transaction\n'), ((4901, 4949), 'django.db.transaction.atomic', 'transaction.atomic', ([], {'using': 'using', 'savepoint': '(False)'}), '(using=using, savepoint=False)\n', (4919, 4949), False, 'from django.db import models, router, transaction\n'), ((6942, 7034), 'django.db.models.Index', 'models.Index', ([], {'fields': "['registration_number', 'start_time', 'end_time', 'area', 'permit']"}), "(fields=['registration_number', 'start_time', 'end_time',\n 'area', 'permit'])\n", (6954, 7034), False, 'from django.db import models, router, transaction\n'), ((1560, 1574), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1572, 1574), False, 'from django.utils import timezone\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests for the union find data structure.
"""
try:
from ..unionfind import UnionFind
except ValueError:
pass
def test_unionfind_basics():
"""
Test the basic properties of unionfind.
"""
u = UnionFind([1, 2, 3])
assert u.in_same_set(1, 2) is False
assert u.in_same_set(2, 3) is False
u.union(1, 3)
assert u.in_same_set(1, 2) is False
assert u.in_same_set(3, 1)
assert u.get_root(1) == u.get_root(3)
def test_unionfind_adding_elements():
"""
Test adding operations, mostly syntactic sugar.
"""
u = UnionFind([1, 2])
u.add(['a', 'b'])
assert 1 in u
assert 'a' in u
def test_unionfind_example():
"""
Test on a slightly more invovled example.
"""
u = UnionFind([1, 2, 3, 4, 5])
u.union(1, 3)
u.union(2, 4)
assert u.in_same_set(1, 3)
assert u.in_same_set(4, 2)
assert not u.in_same_set(2, 5)
assert not u.in_same_set(2, 1)
assert not u.in_same_set(1, 4)
u.union(5, 1)
assert u.in_same_set(3, 5)
def test_unionfind_several():
"""
Test that we can take union of more than two elements.
"""
u = UnionFind([1, 2, 3, 4, 5, 6, 7, 8])
u.union([1, 2, 3])
u.union([4, 5, 6])
u.union([7, 8])
assert u.in_same_set(1, 3)
assert u.in_same_set(6, 4)
assert u.in_same_set(7, 8)
assert not u.in_same_set(2, 5)
assert not u.in_same_set(4, 8)
def test_unionfind_compression():
"""
Test path compression and the union by rank.
"""
# Test the ranking
elements = list(range(100))
u = UnionFind(elements)
for i in range(len(elements) - 1):
u.union(elements[i], elements[i + 1])
assert max(u._rank.values()) == 1
# Test path compression
parent_nodes = list(u._parent.values())
assert all(parent == parent_nodes[0] for parent in parent_nodes)
if __name__ == "__main__":
import pytest
# --durations=10 <- May be used to show potentially slow tests
pytest.main(args=['.', '--doctest-modules', '-v']) | [
"pytest.main"
]
| [((2112, 2162), 'pytest.main', 'pytest.main', ([], {'args': "['.', '--doctest-modules', '-v']"}), "(args=['.', '--doctest-modules', '-v'])\n", (2123, 2162), False, 'import pytest\n')] |
# Generated by Django 3.2.8 on 2021-11-25 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0009_auto_20211125_1846'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='updated_at',
),
]
| [
"django.db.migrations.RemoveField"
]
| [((228, 291), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""product"""', 'name': '"""updated_at"""'}), "(model_name='product', name='updated_at')\n", (250, 291), False, 'from django.db import migrations\n')] |
from builtins import str
from builtins import range
from builtins import object
import logging
import inspect
import os
class CustomAttr(object):
"""This type handles non-flat data-types like
int, str, bool.
"""
def __init__(self, key, value):
self._value = value
self._key = key
def validate(self):
pass
def post_validation(self):
pass
class CustomAttrTlsContainer(CustomAttr):
def __init__(self, key, value):
super(CustomAttrTlsContainer, self).__init__(key, value)
def validate(self):
return True
def post_validation(self):
return self._value
def validate_custom_attributes(custom_attributes_dict, section,
custom_attributes):
section_dict = {}
if custom_attributes and section in custom_attributes_dict:
for key, value in list(custom_attributes.items()):
if key in custom_attributes_dict[section]:
#Sanitize the value
try:
type_attr = custom_attributes_dict[section][key]['type']
limits = custom_attributes_dict[section][key]['limits']
if type_attr == 'int':
value = int(value)
if value in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'str':
if len(value) in range(limits[0], limits[1]):
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif type_attr == 'bool':
if value in limits:
if value == 'True':
value = ''
elif value == 'False':
value = 'no '
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
elif inspect.isclass(eval(type_attr)):
new_custom_attr = eval(type_attr)(key, value)
if new_custom_attr.validate():
value = new_custom_attr.post_validation()
section_dict.update({key:value})
else:
logging.info("Skipping key: %s, value: %s due to" \
"validation failure" % (key, value))
except Exception as e:
logging.error(str(e))
continue
return section_dict
| [
"builtins.range",
"logging.info",
"builtins.str"
]
| [((1296, 1323), 'builtins.range', 'range', (['limits[0]', 'limits[1]'], {}), '(limits[0], limits[1])\n', (1301, 1323), False, 'from builtins import range\n'), ((1444, 1531), 'logging.info', 'logging.info', (["('Skipping key: %s, value: %s due tovalidation failure' % (key, value))"], {}), "('Skipping key: %s, value: %s due tovalidation failure' % (key,\n value))\n", (1456, 1531), False, 'import logging\n'), ((3001, 3007), 'builtins.str', 'str', (['e'], {}), '(e)\n', (3004, 3007), False, 'from builtins import str\n'), ((1650, 1677), 'builtins.range', 'range', (['limits[0]', 'limits[1]'], {}), '(limits[0], limits[1])\n', (1655, 1677), False, 'from builtins import range\n'), ((1798, 1885), 'logging.info', 'logging.info', (["('Skipping key: %s, value: %s due tovalidation failure' % (key, value))"], {}), "('Skipping key: %s, value: %s due tovalidation failure' % (key,\n value))\n", (1810, 1885), False, 'import logging\n'), ((2315, 2402), 'logging.info', 'logging.info', (["('Skipping key: %s, value: %s due tovalidation failure' % (key, value))"], {}), "('Skipping key: %s, value: %s due tovalidation failure' % (key,\n value))\n", (2327, 2402), False, 'import logging\n'), ((2808, 2895), 'logging.info', 'logging.info', (["('Skipping key: %s, value: %s due tovalidation failure' % (key, value))"], {}), "('Skipping key: %s, value: %s due tovalidation failure' % (key,\n value))\n", (2820, 2895), False, 'import logging\n')] |
import gdb
class TypeCache(object):
def __init__(self):
self.cache = {}
self.intptr_type = False
def clear(self):
self.cache = {}
self.intptr_type = False
def get_type(self, typename):
if typename in self.cache:
return self.cache[typename]
try:
gdb_type = gdb.lookup_type(typename)
self.cache[typename] = gdb_type
return gdb_type
except:
pass
try:
proto = gdb.parse_and_eval("(%s*)0" % typename)
gdb_type = proto.type.target()
self.cache[typename] = gdb_type
return gdb_type
except:
pass
return None
def get_intptr_type(self):
if self.intptr_type != False:
return self.intptr_type
ptr_type = self.get_type("void*")
if ptr_type == None:
self.intptr_type = None
return None
ulong_type = self.get_type("unsigned long")
if ulong_type == None:
self.intptr_type = None
return None
if ulong_type.sizeof >= ptr_type.sizeof:
self.intptr_type = ulong_type
return ulong_type
ullong_type = self.get_type("unsigned long long")
self.intptr_type = ullong_type
return ullong_type
cache = TypeCache()
| [
"gdb.lookup_type",
"gdb.parse_and_eval"
]
| [((361, 386), 'gdb.lookup_type', 'gdb.lookup_type', (['typename'], {}), '(typename)\n', (376, 386), False, 'import gdb\n'), ((534, 573), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (["('(%s*)0' % typename)"], {}), "('(%s*)0' % typename)\n", (552, 573), False, 'import gdb\n')] |
import os
import sys
directory = sys.argv[1]
outfile = open("key_phrases.csv","w")
files = {}
for filename in os.listdir(directory):
text=[]
with open(os.path.join(directory, filename)) as f:
text=[l.strip() for l in f if len(l.strip())>2]
data=''
for t in text:
if len(t.split()) > 1:
data = data+'. '+t.strip()
whitelist = set('abcdefghijklmnopqrstuvwxy ABCDEFGHIJKLMNOPQRSTUVWXYZ')
answer = ''.join(filter(whitelist.__contains__, data))
answer=' '.join(answer.split())
import rake
import operator
rake_object = rake.Rake("/home/ashutosh/Sudeshna/RAKE-tutorial/data/stoplists/SmartStoplist.txt", 3,3,1)
import pprint
pp = pprint.PrettyPrinter()
keywords = rake_object.run(answer)
for entry in keywords:
outfile.write("%s, %s\n" % (entry[0], str(entry[1])) )
outfile.close()
| [
"rake.Rake",
"os.listdir",
"os.path.join",
"pprint.PrettyPrinter"
]
| [((112, 133), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (122, 133), False, 'import os\n'), ((535, 636), 'rake.Rake', 'rake.Rake', (['"""/home/ashutosh/Sudeshna/RAKE-tutorial/data/stoplists/SmartStoplist.txt"""', '(3)', '(3)', '(1)'], {}), "(\n '/home/ashutosh/Sudeshna/RAKE-tutorial/data/stoplists/SmartStoplist.txt',\n 3, 3, 1)\n", (544, 636), False, 'import rake\n'), ((647, 669), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (667, 669), False, 'import pprint\n'), ((156, 189), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (168, 189), False, 'import os\n')] |
"""Unit tests for helper utilities in :mod:`dftinputgen.utils`."""
import os
import pytest
from ase import io as ase_io
from dftinputgen.utils import get_elem_symbol
from dftinputgen.utils import read_crystal_structure
from dftinputgen.utils import get_kpoint_grid_from_spacing
from dftinputgen.utils import DftInputGeneratorUtilsError
test_base_dir = os.path.dirname(__file__)
feo_conv_file = os.path.join(test_base_dir, "qe", "files", "feo_conv.vasp")
feo_conv = ase_io.read(feo_conv_file)
def test_get_elem_symbol():
assert get_elem_symbol("Fe-34") == "Fe"
assert get_elem_symbol("3RGe-34") == "Ge"
with pytest.raises(DftInputGeneratorUtilsError):
get_elem_symbol("G23")
def test_read_crystal_structure():
# str with path to crystal structure file is OK
cs = read_crystal_structure(feo_conv_file)
assert cs == feo_conv
# any other type of input should throw an error
with pytest.raises(TypeError):
read_crystal_structure(feo_conv)
def test_kpoint_grid_from_spacing():
assert get_kpoint_grid_from_spacing(feo_conv, 0.2) == pytest.approx(
[7, 7, 7]
)
| [
"pytest.approx",
"dftinputgen.utils.get_elem_symbol",
"dftinputgen.utils.read_crystal_structure",
"os.path.join",
"dftinputgen.utils.get_kpoint_grid_from_spacing",
"os.path.dirname",
"pytest.raises",
"ase.io.read"
]
| [((357, 382), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (372, 382), False, 'import os\n'), ((399, 458), 'os.path.join', 'os.path.join', (['test_base_dir', '"""qe"""', '"""files"""', '"""feo_conv.vasp"""'], {}), "(test_base_dir, 'qe', 'files', 'feo_conv.vasp')\n", (411, 458), False, 'import os\n'), ((470, 496), 'ase.io.read', 'ase_io.read', (['feo_conv_file'], {}), '(feo_conv_file)\n', (481, 496), True, 'from ase import io as ase_io\n'), ((799, 836), 'dftinputgen.utils.read_crystal_structure', 'read_crystal_structure', (['feo_conv_file'], {}), '(feo_conv_file)\n', (821, 836), False, 'from dftinputgen.utils import read_crystal_structure\n'), ((538, 562), 'dftinputgen.utils.get_elem_symbol', 'get_elem_symbol', (['"""Fe-34"""'], {}), "('Fe-34')\n", (553, 562), False, 'from dftinputgen.utils import get_elem_symbol\n'), ((582, 608), 'dftinputgen.utils.get_elem_symbol', 'get_elem_symbol', (['"""3RGe-34"""'], {}), "('3RGe-34')\n", (597, 608), False, 'from dftinputgen.utils import get_elem_symbol\n'), ((626, 668), 'pytest.raises', 'pytest.raises', (['DftInputGeneratorUtilsError'], {}), '(DftInputGeneratorUtilsError)\n', (639, 668), False, 'import pytest\n'), ((678, 700), 'dftinputgen.utils.get_elem_symbol', 'get_elem_symbol', (['"""G23"""'], {}), "('G23')\n", (693, 700), False, 'from dftinputgen.utils import get_elem_symbol\n'), ((924, 948), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (937, 948), False, 'import pytest\n'), ((958, 990), 'dftinputgen.utils.read_crystal_structure', 'read_crystal_structure', (['feo_conv'], {}), '(feo_conv)\n', (980, 990), False, 'from dftinputgen.utils import read_crystal_structure\n'), ((1041, 1084), 'dftinputgen.utils.get_kpoint_grid_from_spacing', 'get_kpoint_grid_from_spacing', (['feo_conv', '(0.2)'], {}), '(feo_conv, 0.2)\n', (1069, 1084), False, 'from dftinputgen.utils import get_kpoint_grid_from_spacing\n'), ((1088, 1112), 'pytest.approx', 'pytest.approx', (['[7, 7, 7]'], {}), '([7, 7, 7])\n', (1101, 1112), False, 'import pytest\n')] |
from django.contrib.auth.models import User
from django.db.models import (Model, TextField, DateTimeField, ForeignKey,
CASCADE)
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.db import models
import json
class MessageModel(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
user = ForeignKey(User, on_delete=CASCADE, verbose_name='user',
related_name='from_user', db_index=True)
recipient = ForeignKey(User, on_delete=CASCADE, verbose_name='recipient',
related_name='to_user', db_index=True)
timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False,
db_index=True)
body = TextField('body')
def __str__(self):
return str(self.id)
def characters(self):
"""
Toy function to count body characters.
:return: body's char number
"""
return len(self.body)
def notify_ws_clients(self):
"""
Inform client there is a new message.
"""
notification = {
'type': 'chat_message',
'message': '{}'.format(self.id)
}
channel_layer = get_channel_layer()
print("user.id {}".format(self.user.id))
print("user.id {}".format(self.recipient.id))
async_to_sync(channel_layer.group_send)("{}".format(self.user.id), notification)
async_to_sync(channel_layer.group_send)("{}".format(self.recipient.id), notification)
def save(self, *args, **kwargs):
"""
Trims white spaces, saves the message and notifies the recipient via WS
if the message is new.
"""
new = self.id
self.body = self.body.strip() # Trimming whitespaces from the body
super(MessageModel, self).save(*args, **kwargs)
if new is None:
self.notify_ws_clients()
# Meta
class Meta:
app_label = 'core'
verbose_name = 'message'
verbose_name_plural = 'messages'
ordering = ('-timestamp',)
class Group(models.Model):
name = models.CharField(max_length = 20)
members = models.TextField()
messages = models.TextField ()
def set_members(self,user_id_list):
self.members = json.dumps(user_id_list)
def get_members(self):
return json.loads(self.members)
def add(self,user_id):
current_list = self.get_members()
if user_id in current_list:
print("user is already in the group")
else:
new_list = current_list.append(user_id)
self.set_members(new_list)
def remove(self,user_id):
current_list = self.get_members()
if user_id in current_list:
new_list = current_list.remove(user_id)
self.set_members(new_list)
else:
print("User is not a member of theis group")
def has(self,user_id):
current_list = self.get_members()
return(user_id in current_list)
# Set of functions for dealing with group messages
def set_messages(self,message_id_list):
self.messages = json.dumps(message_id_list)
def get_messages(self):
return json.loads(self.messages)
def add_message(self,message_id):
current_list = self.get_messages()
new_list = current_list.append(message_id)
self.set_messages(new_list)
def delete_message(self,message_id):
current_list = self.get_messages()
if message_id in current_list:
new_list = current_list.remove(message_id)
self.set_messages(new_list)
def save(self, *args, **kwargs):
if self.pk is None or self.members is None or self.members == '':
self.set_members([])
if self.pk is None or self.messages is None or self.messages == '':
self.set_messages([])
super(Group, self).save(*args, **kwargs)
def __str__(self):
return self.name+" ID: "+str(self.id)
# Meta
class Meta:
app_label = 'core'
verbose_name = 'Group'
verbose_name_plural = 'Groups'
ordering = ('name',)
class GroupMessage(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
sender = ForeignKey(User, on_delete=CASCADE, verbose_name='sender',
related_name='from_sender', db_index=True)
group = ForeignKey(Group, on_delete=CASCADE, verbose_name='group',
related_name='to_group', db_index=True)
time = DateTimeField('time', auto_now_add=True, editable=False,
db_index=True)
body = TextField('body')
def __str__(self):
return str(self.id)
def characters(self):
"""
Toy function to count body characters.
:return: body's char number
"""
return len(self.body)
def notify_ws_clients(self):
"""
Inform client there is a new message.
"""
notification = {
'type': 'group_message',
'group': '{}'.format(self.id)
}
channel_layer = get_channel_layer()
group_id = "group"+str(self.group.id)
print("group.id {}".format(group_id))
async_to_sync(channel_layer.group_send)(group_id, notification)
def save(self, *args, **kwargs):
"""
Trims white spaces, saves the message and notifies the recipient via WS
if the message is new.
"""
new = self.id
self.body = self.body.strip() # Trimming whitespaces from the body
super(GroupMessage, self).save(*args, **kwargs)
if new is None:
self.notify_ws_clients()
# Meta
class Meta:
app_label = 'core'
verbose_name = 'group message'
verbose_name_plural = 'group messags'
ordering = ('-time',)
| [
"json.loads",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"json.dumps",
"channels.layers.get_channel_layer",
"django.db.models.DateTimeField",
"django.db.models.CharField",
"asgiref.sync.async_to_sync"
]
| [((443, 545), 'django.db.models.ForeignKey', 'ForeignKey', (['User'], {'on_delete': 'CASCADE', 'verbose_name': '"""user"""', 'related_name': '"""from_user"""', 'db_index': '(True)'}), "(User, on_delete=CASCADE, verbose_name='user', related_name=\n 'from_user', db_index=True)\n", (453, 545), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((579, 684), 'django.db.models.ForeignKey', 'ForeignKey', (['User'], {'on_delete': 'CASCADE', 'verbose_name': '"""recipient"""', 'related_name': '"""to_user"""', 'db_index': '(True)'}), "(User, on_delete=CASCADE, verbose_name='recipient', related_name=\n 'to_user', db_index=True)\n", (589, 684), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((723, 799), 'django.db.models.DateTimeField', 'DateTimeField', (['"""timestamp"""'], {'auto_now_add': '(True)', 'editable': '(False)', 'db_index': '(True)'}), "('timestamp', auto_now_add=True, editable=False, db_index=True)\n", (736, 799), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((841, 858), 'django.db.models.TextField', 'TextField', (['"""body"""'], {}), "('body')\n", (850, 858), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((2217, 2248), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (2233, 2248), False, 'from django.db import models\n'), ((2265, 2283), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2281, 2283), False, 'from django.db import models\n'), ((2299, 2317), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2315, 2317), False, 'from django.db import models\n'), ((4439, 4545), 'django.db.models.ForeignKey', 'ForeignKey', (['User'], {'on_delete': 'CASCADE', 'verbose_name': '"""sender"""', 'related_name': '"""from_sender"""', 'db_index': '(True)'}), "(User, on_delete=CASCADE, verbose_name='sender', related_name=\n 'from_sender', db_index=True)\n", (4449, 4545), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((4575, 4678), 'django.db.models.ForeignKey', 'ForeignKey', (['Group'], {'on_delete': 'CASCADE', 'verbose_name': '"""group"""', 'related_name': '"""to_group"""', 'db_index': '(True)'}), "(Group, on_delete=CASCADE, verbose_name='group', related_name=\n 'to_group', db_index=True)\n", (4585, 4678), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((4712, 4783), 'django.db.models.DateTimeField', 'DateTimeField', (['"""time"""'], {'auto_now_add': '(True)', 'editable': '(False)', 'db_index': '(True)'}), "('time', auto_now_add=True, editable=False, db_index=True)\n", (4725, 4783), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((4825, 4842), 'django.db.models.TextField', 'TextField', (['"""body"""'], {}), "('body')\n", (4834, 4842), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((1319, 1338), 'channels.layers.get_channel_layer', 'get_channel_layer', ([], {}), '()\n', (1336, 1338), False, 'from channels.layers import get_channel_layer\n'), ((2387, 2411), 'json.dumps', 'json.dumps', (['user_id_list'], {}), '(user_id_list)\n', (2397, 2411), False, 'import json\n'), ((2454, 2478), 'json.loads', 'json.loads', (['self.members'], {}), '(self.members)\n', (2464, 2478), False, 'import json\n'), ((3260, 3287), 'json.dumps', 'json.dumps', (['message_id_list'], {}), '(message_id_list)\n', (3270, 3287), False, 'import json\n'), ((3331, 3356), 'json.loads', 'json.loads', (['self.messages'], {}), '(self.messages)\n', (3341, 3356), False, 'import json\n'), ((5302, 5321), 'channels.layers.get_channel_layer', 'get_channel_layer', ([], {}), '()\n', (5319, 5321), False, 'from channels.layers import get_channel_layer\n'), ((1451, 1490), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (1464, 1490), False, 'from asgiref.sync import async_to_sync\n'), ((1540, 1579), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (1553, 1579), False, 'from asgiref.sync import async_to_sync\n'), ((5422, 5461), 'asgiref.sync.async_to_sync', 'async_to_sync', (['channel_layer.group_send'], {}), '(channel_layer.group_send)\n', (5435, 5461), False, 'from asgiref.sync import async_to_sync\n')] |
# -*- coding: utf-8 -*-
"""CNN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Tq6HUya2PrC0SmyOIFo2c_eVtguRED2q
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class CNN(nn.Module):
def __init__(self,in_channels = 1,num_classes = 10):
super(CNN,self).__init__()
self.conv1 = nn.Conv2d(in_channels= in_channels,out_channels = 8,kernel_size =(3,3),stride = (1,1),padding = (1,1))
self.pool1 = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
self.conv2 = nn.Conv2d(in_channels= 8,out_channels = 16,kernel_size =(3,3),stride = (1,1),padding = (1,1))
self.pool2 = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
self.fc1 = nn.Linear(16*7*7,num_classes)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = x.reshape(x.shape[0],-1)
x = self.fc1(x)
return x
model = CNN(1,10)
x = torch.randn((64,1,28,28))
print(model(x).shape)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
in_channels = 1
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 4
train_dataset = datasets.MNIST(root = "dataset/",train = True,transform = transforms.ToTensor(),download = True)
train_loader = DataLoader(dataset=train_dataset,batch_size=64,shuffle=True)
test_dataset = train_dataset = datasets.MNIST(root = "dataset/",train = False,transform = transforms.ToTensor(),download = True)
test_loader = DataLoader(dataset = test_dataset,batch_size = batch_size,shuffle = True)
model = CNN(1,10).to(device = device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr = learning_rate)
for epoch in range(num_epochs):
for batch_idx,(data,targets) in enumerate(train_loader):
#get data to cuda if possible
data = data.cuda()
targets = targets.cuda()
scores = model(data)
loss = criterion(scores,targets)
#backward
optimizer.zero_grad()
loss.backward()
#gradient_descent or adam-step
optimizer.step()
# Check the accuracy for the training step
def check_accuracy(loader,model):
if loader.dataset.train:
print("Checking accuracy on training data")
else:
print("Checking accuracy on test data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x,y in loader:
x = x.cuda()
y = y.cuda()
scores = model(x)
_,predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(f' Got {num_correct}/{num_samples} with accuracy ={float(num_correct)/float(num_samples)*100:.2f} ')
model.train()
check_accuracy(train_loader,model)
check_accuracy(test_loader,model)
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torch.randn"
]
| [((1138, 1166), 'torch.randn', 'torch.randn', (['(64, 1, 28, 28)'], {}), '((64, 1, 28, 28))\n', (1149, 1166), False, 'import torch\n'), ((1482, 1544), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(64)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=64, shuffle=True)\n', (1492, 1544), False, 'from torch.utils.data import DataLoader\n'), ((1686, 1755), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=True)\n', (1696, 1755), False, 'from torch.utils.data import DataLoader\n'), ((1812, 1833), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1831, 1833), True, 'import torch.nn as nn\n'), ((537, 642), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': '(8)', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(in_channels=in_channels, out_channels=8, kernel_size=(3, 3),\n stride=(1, 1), padding=(1, 1))\n', (546, 642), True, 'import torch.nn as nn\n'), ((657, 704), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2, 2)'}), '(kernel_size=(2, 2), stride=(2, 2))\n', (669, 704), True, 'import torch.nn as nn\n'), ((719, 815), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(8)', 'out_channels': '(16)', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(in_channels=8, out_channels=16, kernel_size=(3, 3), stride=(1, 1),\n padding=(1, 1))\n', (728, 815), True, 'import torch.nn as nn\n'), ((830, 877), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2, 2)'}), '(kernel_size=(2, 2), stride=(2, 2))\n', (842, 877), True, 'import torch.nn as nn\n'), ((890, 924), 'torch.nn.Linear', 'nn.Linear', (['(16 * 7 * 7)', 'num_classes'], {}), '(16 * 7 * 7, num_classes)\n', (899, 924), True, 'import torch.nn as nn\n'), ((1220, 1245), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1243, 1245), False, 'import torch\n'), ((1428, 1449), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1447, 1449), True, 'import torchvision.transforms as transforms\n'), ((1633, 1654), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1652, 1654), True, 'import torchvision.transforms as transforms\n'), ((2522, 2537), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2535, 2537), False, 'import torch\n')] |
"""Treadmill hierarchical scheduler.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import datetime
import heapq
import itertools
import logging
import operator
import sys
import time
import enum
import numpy as np
import six
_LOGGER = logging.getLogger(__name__)
MAX_PRIORITY = 100
DEFAULT_RANK = 100
_UNPLACED_RANK = sys.maxsize
DIMENSION_COUNT = None
_MAX_UTILIZATION = float('inf')
_GLOBAL_ORDER_BASE = time.mktime((2014, 1, 1, 0, 0, 0, 0, 0, 0))
# 21 day
DEFAULT_SERVER_UPTIME = 21 * 24 * 60 * 60
# 1 day
MIN_SERVER_UPTIME = 1 * 24 * 60 * 60
# 7 days
DEFAULT_MAX_APP_LEASE = 7 * 24 * 60 * 60
# Default partition threshold
DEFAULT_THRESHOLD = 0.9
# pylint: disable=C0302,too-many-lines
def _bit_count(value):
"""Returns number of bits set.
"""
count = 0
while value:
value &= value - 1
count += 1
return count
def zero_capacity():
"""Returns zero capacity vector.
"""
assert DIMENSION_COUNT is not None, 'Dimension count not set.'
return np.zeros(DIMENSION_COUNT)
def eps_capacity():
"""Returns eps capacity vector.
"""
assert DIMENSION_COUNT is not None, 'Dimension count not set.'
return np.array(
[np.finfo(float).eps for _x in range(0, DIMENSION_COUNT)]
)
def _global_order():
"""Use timestamp in nanoseconds, from Jan 1st 2014, to break tie in
scheduling conflicts for apps of the same priority, in a FIFO fashion.
"""
# Take the current EPOCH in nanosec
global_order = int(time.time() * 1000000) - _GLOBAL_ORDER_BASE
return global_order
def utilization(demand, allocated, available):
"""Calculates utilization score.
"""
return np.max(np.subtract(demand, allocated) / available)
def _all(oper, left, right):
"""Short circuit all for ndarray.
"""
return all(
oper(ai, bi)
for ai, bi in six.moves.zip(left, right)
)
def _any(oper, left, right):
"""Short circuit any for ndarray.
"""
return any(
oper(ai, bi)
for ai, bi in six.moves.zip(left, right)
)
def _any_eq(left, right):
"""Short circuit any eq for ndarray.
"""
return _any(operator.eq, left, right)
def _any_isclose(left, right):
"""Short circuit any isclose for ndarray.
"""
return _any(np.isclose, left, right)
def _any_lt(left, right):
"""Short circuit any lt for ndarray.
"""
return _any(operator.lt, left, right)
def _any_le(left, right):
"""Short circuit any le for ndarray.
"""
return _any(operator.le, left, right)
def _any_gt(left, right):
"""Short circuit any gt for ndarray.
"""
return _any(operator.gt, left, right)
def _any_ge(left, right):
"""Short circuit any ge for ndarray.
"""
return _any(operator.ge, left, right)
def _all_eq(left, right):
"""Short circuit all eq for ndarray.
"""
return _all(operator.eq, left, right)
def _all_isclose(left, right):
"""Short circuit all isclose for ndarray.
"""
return _all(np.isclose, left, right)
def _all_lt(left, right):
"""Short circuit all lt for ndarray.
"""
return _all(operator.lt, left, right)
def _all_le(left, right):
"""Short circuit all le for ndarray.
"""
return _all(operator.le, left, right)
def _all_gt(left, right):
"""Short circuit all gt for ndarray.
"""
return _all(operator.gt, left, right)
def _all_ge(left, right):
"""Short circuit all ge for ndarray.
"""
return _all(operator.ge, left, right)
class IdentityGroup:
"""Identity group.
"""
__slots__ = (
'available',
'count',
)
def __init__(self, count=0):
self.count = count
self.available = set(range(0, count))
def acquire(self):
"""Return next available identity or None.
"""
if self.available:
return self.available.pop()
else:
return None
def release(self, ident):
"""Mark identity as available.
"""
if ident < self.count:
self.available.add(ident)
def adjust(self, count):
"""Adjust identities with new count.
If count is larger, add additional identities to the set.
If count is lower, remove identities that are no longer valid.
All apps that have invalid identities will be adjusted in the
schedule cycle.
"""
if count >= self.count:
self.available ^= set(six.moves.xrange(self.count, count))
else:
self.available -= set(six.moves.xrange(count, self.count))
self.count = count
class State(enum.Enum):
"""Enumeration of node/server states.
"""
# Ready to accept new applications.
# TODO: Fix attribute name
up = 'up' # pylint: disable=invalid-name
# Applications need to be migrated.
down = 'down'
# Existing applications can stay, but will not accept new.
frozen = 'frozen'
class Affinity:
"""Model affinity and affinity limits.
"""
__slots__ = (
'name',
'limits',
'constraints',
)
def __init__(self, name, limits=None):
self.name = name
self.limits = collections.defaultdict(lambda: float('inf'))
if limits:
self.limits.update(limits)
# freeze affinity shape constraints.
self.constraints = tuple([self.name] + sorted(self.limits.values()))
class Application:
"""Application object.
"""
__slots__ = (
'global_order',
'name',
'demand',
'affinity',
'priority',
'allocation',
'data_retention_timeout',
'server',
'lease',
'identity',
'identity_group',
'identity_group_ref',
'schedule_once',
'evicted',
'placement_expiry',
'renew',
'unschedule',
'final_rank',
'final_util',
'constraints',
)
def __init__(self, name, priority, demand, affinity,
affinity_limits=None,
data_retention_timeout=0,
lease=0,
identity_group=None,
identity=None,
schedule_once=False):
self.global_order = _global_order()
self.allocation = None
self.server = None
self.name = name
self.affinity = Affinity(affinity, affinity_limits)
self.priority = priority
self.demand = np.array(demand, dtype=float)
self.data_retention_timeout = data_retention_timeout
self.lease = lease
self.identity_group = identity_group
self.identity = identity
self.identity_group_ref = None
self.schedule_once = schedule_once
self.evicted = False
self.unschedule = False
self.placement_expiry = None
self.renew = False
def shape(self):
"""Return tuple of application (constraints, demand).
Application shape is tuple of constraints that affect application
placement. Currently this includes affinity constraints and app lease
time.
"""
constraints = (self.affinity.constraints + (self.lease,))
if self.allocation:
constraints += self.allocation.constraints
return constraints, self.demand
def acquire_identity(self):
"""Try to acquire identity if belong to the group.
Returns True if successfull or if identity group is none.
"""
if not self.identity_group_ref:
return True
if self.identity is None:
self.identity = self.identity_group_ref.acquire()
_LOGGER.info('Acquired identity: %s: %s - %s',
self.name, self.identity_group, self.identity)
return self.identity is not None
def release_identity(self):
"""Release app identity.
"""
if self.identity_group_ref and self.identity is not None:
self.identity_group_ref.release(self.identity)
self.identity = None
def force_set_identity(self, identity):
"""Force identity of the app.
"""
if identity is not None:
assert self.identity_group_ref
self.identity = identity
self.identity_group_ref.available.discard(identity)
def has_identity(self):
"""Checks if app has identity if identity group is specified.
"""
return self.identity_group_ref is None or self.identity is not None
@property
def traits(self):
"""The app traits are derived from allocation.
"""
if self.allocation is None:
return 0
else:
return self.allocation.traits
@six.add_metaclass(abc.ABCMeta)
class Strategy:
"""Base class for all placement strategies.
"""
@abc.abstractmethod
def suggested_node(self):
"""Suggested node that should be tried first.
"""
pass
@abc.abstractmethod
def next_node(self):
"""Next node to try, if previous suggestion was rejected.
"""
pass
class SpreadStrategy(Strategy):
"""Spread strategy will suggest new node for each subsequent placement.
"""
__slots__ = (
'current_idx',
'node',
)
def __init__(self, node):
self.current_idx = 0
self.node = node
def suggested_node(self):
"""Suggest next node from the cycle.
"""
for _ in six.moves.xrange(0, len(self.node.children)):
if self.current_idx == len(self.node.children):
self.current_idx = 0
current = self.node.children[self.current_idx]
self.current_idx += 1
if current:
return current
# Not a single non-none node.
return None
def next_node(self):
"""Suggest next node from the cycle.
"""
return self.suggested_node()
class PackStrategy(Strategy):
"""Pack strategy will suggest same node until it is full.
"""
__slots__ = (
'current_idx',
'node',
)
def __init__(self, node):
self.current_idx = 0
self.node = node
def suggested_node(self):
"""Suggest same node as previous placement.
"""
for _ in six.moves.xrange(0, len(self.node.children)):
if self.current_idx == len(self.node.children):
self.current_idx = 0
node = self.node.children[self.current_idx]
if node:
return node
return None
def next_node(self):
"""Suggest next node from the cycle.
"""
self.current_idx += 1
return self.suggested_node()
class TraitSet:
"""Hierarchical set of traits.
"""
__slots__ = (
'self_traits',
'children_traits',
'traits',
)
def __init__(self, traits=0):
if not traits:
traits = 0
# Private traits.
assert isinstance(traits, six.integer_types)
self.self_traits = traits
# Union of all children traits.
self.children_traits = dict()
self._recalculate()
def _recalculate(self):
"""Calculate combined set of all traits.
"""
self.traits = self.self_traits
for trait in six.itervalues(self.children_traits):
self.traits |= trait
def has(self, traits):
"""Check if all traits are present.
"""
return (self.traits & traits) == traits
def add(self, child, traits):
"""Add a child with given traits.
"""
# Update children traits.
self.children_traits[child] = traits
self._recalculate()
def remove(self, child):
"""Remove child traits from the list.
"""
if child in self.children_traits:
del self.children_traits[child]
self._recalculate()
def is_same(self, other):
"""Compares own traits, ignore child.
"""
return self.self_traits == other.self_traits
class AffinityCounter:
"""Manages affinity count.
"""
__slots__ = (
'affinity_counter',
)
def __init__(self):
self.affinity_counter = collections.Counter()
class Node:
"""Abstract placement node.
"""
__slots__ = (
'name',
'level',
'free_capacity',
'parent',
'children',
'children_by_name',
'traits',
'labels',
'affinity_counters',
'valid_until',
'_state',
'_state_since',
)
def __init__(self, name, traits, level, valid_until=0):
self.name = name
self.level = level
self.free_capacity = zero_capacity()
self.parent = None
self.children = list()
self.children_by_name = dict()
self.traits = TraitSet(traits)
self.labels = set()
self.affinity_counters = collections.Counter()
self.valid_until = valid_until
self._state = State.up
self._state_since = time.time()
def empty(self):
"""Return true if there are no children.
"""
return not bool(self.children_by_name)
def children_iter(self):
"""Iterate over active children.
"""
for child in self.children:
if child:
yield child
def get_state(self):
"""Returns tuple of (state, since).
"""
return self. _state, self._state_since
def set_state(self, state, since):
"""Sets the state and time since.
"""
if self._state is not state:
self._state_since = since
self._state = state
_LOGGER.debug('state: %s - (%s, %s)',
self.name, self._state, self._state_since)
@property
def state(self):
"""Return current state.
"""
return self._state
@state.setter
def state(self, new_state):
"""Set node state and records time.
"""
self.set_state(new_state, time.time())
def add_child_traits(self, node):
"""Recursively add child traits up.
"""
self.traits.add(node.name, node.traits.traits)
if self.parent:
self.parent.remove_child_traits(self.name)
self.parent.add_child_traits(self)
def adjust_valid_until(self, child_valid_until):
"""Recursively adjust valid until time.
"""
if child_valid_until:
self.valid_until = max(self.valid_until, child_valid_until)
else:
if self.empty():
self.valid_until = 0
else:
self.valid_until = max([node.valid_until
for node in self.children_iter()])
if self.parent:
self.parent.adjust_valid_until(child_valid_until)
def remove_child_traits(self, node_name):
"""Recursively remove child traits up.
"""
self.traits.remove(node_name)
if self.parent:
self.parent.remove_child_traits(self.name)
self.parent.add_child_traits(self)
def reset_children(self):
"""Reset children to empty list.
"""
for child in self.children_iter():
child.parent = None
self.children = list()
self.children_by_name = dict()
def add_node(self, node):
"""Add child node, set the traits and propagate traits up.
"""
assert node.parent is None
assert node.name not in self.children_by_name
node.parent = self
self.children.append(node)
self.children_by_name[node.name] = node
self.add_child_traits(node)
self.increment_affinity(node.affinity_counters)
self.add_labels(node.labels)
self.adjust_valid_until(node.valid_until)
def add_labels(self, labels):
"""Recursively add labels to self and parents.
"""
self.labels.update(labels)
if self.parent:
self.parent.add_labels(self.labels)
def remove_node(self, node):
"""Remove child node and adjust the traits.
"""
assert node.name in self.children_by_name
del self.children_by_name[node.name]
for idx in six.moves.xrange(0, len(self.children)):
if self.children[idx] == node:
self.children[idx] = None
self.remove_child_traits(node.name)
self.decrement_affinity(node.affinity_counters)
self.adjust_valid_until(None)
node.parent = None
return node
def remove_node_by_name(self, nodename):
"""Removes node by name.
"""
assert nodename in self.children_by_name
return self.remove_node(self.children_by_name[nodename])
def check_app_constraints(self, app):
"""Find app placement on the node.
"""
if app.allocation is not None:
if app.allocation.label not in self.labels:
_LOGGER.info('Missing label: %s on %s', app.allocation.label,
self.name)
return False
if app.traits != 0 and not self.traits.has(app.traits):
_LOGGER.info('Missing traits: %s on %s', app.traits, self.name)
return False
if not self.check_app_affinity_limit(app):
return False
if _any_gt(app.demand, self.free_capacity):
_LOGGER.info('Not enough free capacity: %s', self.free_capacity)
return False
return True
def check_app_affinity_limit(self, app):
"""Check app affinity limits
"""
count = self.affinity_counters[app.affinity.name]
limit = app.affinity.limits[self.level]
return count < limit
def put(self, _app):
"""Abstract method, should never be called.
"""
raise Exception('Not implemented.')
def size(self, label):
"""Returns total capacity of the children.
"""
if self.empty() or label not in self.labels:
return eps_capacity()
return np.sum([
n.size(label) for n in self.children_iter()], 0)
def members(self):
"""Return set of all leaf node names.
"""
names = dict()
for node in self.children_iter():
names.update(node.members())
return names
def increment_affinity(self, counters):
"""Increment affinity counters recursively.
"""
self.affinity_counters.update(counters)
if self.parent:
self.parent.increment_affinity(counters)
def decrement_affinity(self, counters):
"""Decrement affinity counters recursively.
"""
self.affinity_counters.subtract(counters)
if self.parent:
self.parent.decrement_affinity(counters)
class Bucket(Node):
"""Collection of nodes/buckets.
"""
__slots__ = (
'affinity_strategies',
'traits',
)
_default_strategy_t = SpreadStrategy
def __init__(self, name, traits=0, level=None):
super(Bucket, self).__init__(name, traits, level)
self.affinity_strategies = dict()
self.traits = TraitSet(traits)
def set_affinity_strategy(self, affinity, strategy_t):
"""Initilaizes placement strategy for given affinity.
"""
self.affinity_strategies[affinity] = strategy_t(self)
def get_affinity_strategy(self, affinity):
"""Returns placement strategy for the affinity, defaults to spread.
"""
if affinity not in self.affinity_strategies:
self.set_affinity_strategy(affinity, Bucket._default_strategy_t)
return self.affinity_strategies[affinity]
def adjust_capacity_up(self, new_capacity):
"""Node can only increase capacity.
"""
self.free_capacity = np.maximum(self.free_capacity, new_capacity)
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
def adjust_capacity_down(self, prev_capacity=None):
"""Called when capacity is decreased.
"""
if self.empty():
self.free_capacity = zero_capacity()
if self.parent:
self.parent.adjust_capacity_down()
else:
if prev_capacity is not None and _all_lt(prev_capacity,
self.free_capacity):
return
free_capacity = zero_capacity()
for child_node in self.children_iter():
if child_node.state is not State.up:
continue
free_capacity = np.maximum(free_capacity,
child_node.free_capacity)
# If resulting free_capacity is less the previous, we need to
# adjust the parent, otherwise, nothing needs to be done.
prev_capacity = self.free_capacity.copy()
if _any_lt(free_capacity, self.free_capacity):
self.free_capacity = free_capacity
if self.parent:
self.parent.adjust_capacity_down(prev_capacity)
def add_node(self, node):
"""Adds node to the bucket.
"""
super(Bucket, self).add_node(node)
self.adjust_capacity_up(node.free_capacity)
def remove_node(self, node):
"""Removes node from the bucket.
"""
super(Bucket, self).remove_node(node)
# if _any_isclose(self.free_capacity, node.free_capacity):
self.adjust_capacity_down(node.free_capacity)
return node
def put(self, app):
"""Try to put app on one of the nodes that belong to the bucket.
"""
# Check if it is feasible to put app on some node low in the
# hierarchy
_LOGGER.debug('bucket.put: %s => %s', app.name, self.name)
if not self.check_app_constraints(app):
return False
strategy = self.get_affinity_strategy(app.affinity.name)
node = strategy.suggested_node()
if node is None:
_LOGGER.debug('All nodes in the bucket deleted.')
return False
nodename0 = node.name
first = True
while True:
# End of iteration.
if not first and node.name == nodename0:
_LOGGER.debug('Finished iterating on: %s.', self.name)
break
first = False
_LOGGER.debug('Trying node: %s:', node.name)
if node.state is not State.up:
_LOGGER.debug('Node not up: %s, %s', node.name, node.state)
else:
if node.put(app):
return True
node = strategy.next_node()
return False
class Server(Node):
"""Server object, final app placement.
"""
__slots__ = (
'init_capacity',
'apps',
'up_since',
'presence_id',
)
def __init__(self, name, capacity, up_since=0, valid_until=0,
traits=0, label=None, presence_id=None):
super(Server, self).__init__(name, traits=traits, level='server',
valid_until=valid_until)
self.labels = set([label])
self.init_capacity = np.array(capacity, dtype=float)
self.free_capacity = self.init_capacity.copy()
self.apps = dict()
self.up_since = up_since
self.presence_id = presence_id
def __str__(self):
return 'server: %s %s' % (self.name, self.init_capacity)
def is_same(self, other):
"""Compares capacity and traits against another server.
valid_until is ignored, as server comes up after reboot will have
different valid_until value.
"""
return (self.labels == other.labels and
_all_eq(self.init_capacity, other.init_capacity) and
self.traits.is_same(other.traits))
def put(self, app):
"""Tries to put the app on the server.
"""
assert app.name not in self.apps
_LOGGER.debug('server.put: %s => %s', app.name, self.name)
if not self.check_app_lifetime(app):
return False
if not self.check_app_constraints(app):
return False
prev_capacity = self.free_capacity.copy()
self.free_capacity -= app.demand
self.apps[app.name] = app
self.increment_affinity([app.affinity.name])
app.server = self.name
if self.parent:
self.parent.adjust_capacity_down(prev_capacity)
if app.placement_expiry is None:
app.placement_expiry = time.time() + app.lease
return True
def restore(self, app, placement_expiry=None):
"""Put app back on the server, ignore app lifetime.
"""
_LOGGER.debug('server.restore: %s => %s (%s)',
app.name, self.name, placement_expiry)
lease = app.lease
# If not explicit
if placement_expiry is None:
placement_expiry = app.placement_expiry
app.lease = 0
rc = self.put(app)
app.lease = lease
app.placement_expiry = placement_expiry
return rc
def renew(self, app):
"""Try to extend the placement for app lease.
"""
can_renew = self.check_app_lifetime(app)
if can_renew:
app.placement_expiry = time.time() + app.lease
return can_renew
def check_app_lifetime(self, app):
"""Check if the app lease fits until server is rebooted.
"""
# app with 0 lease can be placed anywhere (ignore potentially
# expired servers)
if not app.lease:
return True
return time.time() + app.lease < self.valid_until
def remove(self, app_name):
"""Removes app from the server.
"""
assert app_name in self.apps
app = self.apps[app_name]
del self.apps[app_name]
app.server = None
app.evicted = True
app.unschedule = False
app.placement_expiry = None
self.free_capacity += app.demand
self.decrement_affinity([app.affinity.name])
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
def remove_all(self):
"""Remove all apps.
"""
# iterate over copy of the keys, as we are removing them in the loop.
for appname in list(self.apps):
self.remove(appname)
def size(self, label):
"""Return server capacity.
"""
if label not in self.labels:
return eps_capacity()
return self.init_capacity
def members(self):
"""Return set of all leaf node names.
"""
return {self.name: self}
def set_state(self, state, since):
"""Change host state.
"""
if self.state is state:
return
super(Server, self).set_state(state, since)
if state == State.up:
if self.parent:
self.parent.adjust_capacity_up(self.free_capacity)
elif state in (State.down, State.frozen):
if self.parent:
self.parent.adjust_capacity_down(self.free_capacity)
else:
raise Exception('Invalid state: ' % state)
class Allocation:
"""Allocation manages queue of apps sharing same reserved capacity.
In reality allocation is tied to grn via application proid.
Applications within the allocation are organized by application priority.
Allocations are ranked, and the rank is used to globally order applications
from different allocations into global queue.
Default allocation has rank 100. Defining allocation with lower rank will
result in all it's applications to be evaluated first regardless of
utilization. This is used to model "system" applications that should be
always present regardless of utilization.
Allocation queue can be capped with max_utilization parameter. If set, it
will specify the max_utilization which will be considered for scheduling.
"""
__slots__ = (
'reserved',
'rank',
'rank_adjustment',
'traits',
'label',
'max_utilization',
'apps',
'sub_allocations',
'path',
'constraints',
)
def __init__(self, reserved=None, rank=None, traits=None,
max_utilization=None, partition=None):
self.set_reserved(reserved)
self.rank = None
self.rank_adjustment = 0
self.traits = 0
self.label = partition
self.max_utilization = _MAX_UTILIZATION
self.reserved = zero_capacity()
self.set_max_utilization(max_utilization)
self.set_traits(traits)
self.update(reserved, rank, 0)
self.apps = dict()
self.sub_allocations = dict()
self.path = []
# Freeze shape constraintes.
self.constraints = (self.label, self.traits,)
@property
def name(self):
"""Returns full allocation name.
"""
return '/'.join(self.path)
def set_reserved(self, reserved):
"""Update reserved capacity.
"""
if reserved is None:
self.reserved = zero_capacity()
elif isinstance(reserved, int):
assert reserved == 0
self.reserved = zero_capacity()
elif isinstance(reserved, float):
assert reserved == 0.0
self.reserved = zero_capacity()
elif isinstance(reserved, list):
assert len(reserved) == DIMENSION_COUNT
self.reserved = np.array(reserved, dtype=float)
elif isinstance(reserved, np.ndarray):
self.reserved = reserved
else:
assert 'Unsupported type: %r' % type(reserved)
def update(self, reserved, rank, rank_adjustment, max_utilization=None):
"""Updates allocation.
"""
if rank is not None:
self.rank = rank
else:
self.rank = DEFAULT_RANK
if rank_adjustment is not None:
self.rank_adjustment = rank_adjustment
self.set_reserved(reserved)
self.set_max_utilization(max_utilization)
def set_max_utilization(self, max_utilization):
"""Sets max_utilization, accounting for default None value.
"""
if max_utilization is not None:
self.max_utilization = max_utilization
else:
self.max_utilization = _MAX_UTILIZATION
def set_traits(self, traits):
"""Set traits, account for default None value.
"""
if not traits:
self.traits = 0
else:
self.traits = traits
def add(self, app):
"""Add application to the allocation queue.
Once added, the scheduler will make an attempt to place the app on one
of the cell nodes.
"""
# Check that there are no duplicate app names.
if app.name in self.apps:
_LOGGER.warning(
'Duplicate app on alllocation queue: %s', app.name
)
return
app.allocation = self
self.apps[app.name] = app
def remove(self, name):
"""Remove application from the allocation queue.
"""
if name in self.apps:
self.apps[name].allocation = None
del self.apps[name]
def priv_utilization_queue(self):
"""Returns tuples for sorted by global utilization.
Apps in the queue are ordered by priority, insertion order.
Adding or removing maintains invariant that apps utilization
monotonically increases as well.
Returns local prioritization queue in a tuple where first element is
utilization ratio, so that this queue is suitable for merging into
global priority queue.
"""
def _app_key(app):
"""Compares apps by priority, state, global index
"""
return (-app.priority, 0 if app.server else 1,
app.global_order, app.name)
prio_queue = sorted(six.viewvalues(self.apps), key=_app_key)
acc_demand = zero_capacity()
available = self.reserved + np.finfo(float).eps
util_before = utilization(acc_demand, self.reserved, available)
for app in prio_queue:
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, self.reserved, available)
# Priority 0 apps are treated specially - utilization is set to
# max float.
#
# This ensures that they are at the end of the all queues.
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# All things equal, already scheduled applications have priority
# over pending.
pending = 0 if app.server else 1
if util_after <= self.max_utilization - 1:
rank = self.rank
if util_before < 0:
rank -= self.rank_adjustment
else:
rank = _UNPLACED_RANK
entry = (rank, util_before, util_after, pending, app.global_order,
app)
util_before = util_after
yield entry
def utilization_queue(self, free_capacity, visitor=None):
"""Returns utilization queue including the sub-allocs.
All app queues from self and sub-allocs are merged in standard order,
and then utilization is recalculated based on total reserved capacity
of this alloc and sub-allocs combined.
The function maintains invariant that any app (self or inside sub-alloc
with utilization < 1 will remain with utilzation < 1.
"""
total_reserved = self.total_reserved()
queues = [
alloc.utilization_queue(free_capacity, visitor)
for alloc in six.itervalues(self.sub_allocations)
]
queues.append(self.priv_utilization_queue())
acc_demand = zero_capacity()
available = total_reserved + free_capacity + np.finfo(float).eps
util_before = utilization(acc_demand, total_reserved, available)
for item in heapq.merge(*queues):
rank, _u_before, _u_after, pending, order, app = item
acc_demand = acc_demand + app.demand
util_after = utilization(acc_demand, total_reserved, available)
if app.priority == 0:
util_before = _MAX_UTILIZATION
util_after = _MAX_UTILIZATION
# - lower rank allocations take precedence.
# - for same rank, utilization takes precedence
# - False < True, so for apps with same utilization we prefer
# those that already running (False == not pending)
# - Global order
entry = (rank, util_before, util_after, pending, order, app)
if visitor:
visitor(self, entry, acc_demand)
util_before = util_after
yield entry
def total_reserved(self):
"""Total reserved capacity including sub-allocs.
"""
return six.moves.reduce(
lambda acc, alloc: acc + alloc.total_reserved(),
six.itervalues(self.sub_allocations),
self.reserved
)
def add_sub_alloc(self, name, alloc):
"""Add child allocation.
"""
self.sub_allocations[name] = alloc
assert not alloc.path
alloc.path = self.path + [name]
alloc.label = self.label
def remove_sub_alloc(self, name):
"""Remove chlid allocation.
"""
if name in self.sub_allocations:
del self.sub_allocations[name]
def get_sub_alloc(self, name):
"""Return sub allocation, create empty if it does not exist.
"""
if name not in self.sub_allocations:
self.add_sub_alloc(name, Allocation())
return self.sub_allocations[name]
def all_apps(self):
"""Return all apps in allocation and sub-allocations."""
all_apps = list(six.itervalues(self.apps))
for alloc in six.itervalues(self.sub_allocations):
all_apps.extend(alloc.all_apps())
return all_apps
class Partition:
"""Cell partition.
"""
__slots__ = (
'allocation',
'max_server_uptime',
'max_lease',
'threshold',
'label',
'_reboot_buckets',
'_reboot_dates',
'_reboot_last',
)
def __init__(self, max_server_uptime=None, max_lease=None, threshold=None,
label=None, reboot_schedule=None, now=None):
self.label = label
self.allocation = Allocation(partition=label)
# Default -
if not max_server_uptime:
max_server_uptime = DEFAULT_SERVER_UPTIME
if not max_lease:
max_lease = DEFAULT_MAX_APP_LEASE
if not threshold:
threshold = DEFAULT_THRESHOLD
self.max_server_uptime = max_server_uptime
self.max_lease = max_lease
self.threshold = threshold
if not reboot_schedule:
# reboot every day
reboot_schedule = {day: (23, 59, 59) for day in range(7)}
if not now:
now = time.time()
self._reboot_dates = reboot_dates(
reboot_schedule,
start_date=datetime.date.fromtimestamp(now)
)
self._reboot_buckets = []
self._reboot_last = now
self.tick(now)
def _find_bucket(self, timestamp):
"""Try to find bucket with given timestamp.
"""
for bucket in self._reboot_buckets:
if bucket.timestamp == timestamp:
return bucket
return None
def add(self, server, timestamp=None):
"""Add server.
"""
bucket = None
if timestamp:
bucket = self._find_bucket(timestamp)
# servers with larger than max lifetime should be rebooted at
# the next opportunity
if (self._reboot_buckets[0].timestamp >
server.up_since + DEFAULT_SERVER_UPTIME):
bucket = self._reboot_buckets[0]
if not bucket:
bucket = min(reversed(self._reboot_buckets),
key=lambda b: b.cost(server))
bucket.add(server)
def remove(self, server):
"""Remove server.
"""
for bucket in self._reboot_buckets:
bucket.remove(server)
def tick(self, now):
"""Do per-tick-bookkeeping.
"""
while self._reboot_last <= now + DEFAULT_SERVER_UPTIME:
bucket = RebootBucket(next(self._reboot_dates))
self._reboot_buckets.append(bucket)
self._reboot_last = bucket.timestamp
while self._reboot_buckets[0].timestamp < now:
self._reboot_buckets.pop(0)
class PartitionDict(dict):
"""Dict that creates partitions on demand.
We use this instead of collections.defaultdict so that we can provide
the new partition with its label, to be propagated to its allocations.
"""
def __missing__(self, label):
"""Create a new partition, passing the label to its constructor.
"""
self[label] = Partition(label=label)
return self[label]
# pylint: disable=invalid-name
def reboot_dates(schedule, start_date=None):
"""Generate list of valid reboot dates.
"""
date = datetime.date.today()
if start_date:
date = start_date
while True:
weekday = date.weekday()
if weekday in schedule:
h, m, s = schedule[weekday]
yield time.mktime((date.year, date.month, date.day,
h, m, s, 0, 0, 0))
date += datetime.timedelta(days=1)
class RebootBucket:
"""Bucket of servers to be rebooted at the same time.
"""
__slots__ = (
'timestamp',
'servers',
)
def __init__(self, timestamp):
self.timestamp = timestamp
self.servers = []
def add(self, server):
"""Add server to this bucket.
"""
self.servers.append(server)
server.valid_until = self.timestamp
_LOGGER.info('Setting valid until on server: %s %s',
server.name, server.valid_until)
def remove(self, server):
"""Remove server from this bucket.
"""
try:
self.servers.remove(server)
except ValueError:
pass
def cost(self, server):
"""The cost of adding server to this bucket.
"""
if self.timestamp > server.up_since + DEFAULT_SERVER_UPTIME:
return float('inf')
if self.timestamp < server.up_since + MIN_SERVER_UPTIME:
return float('inf')
return len(self.servers)
class PlacementFeasibilityTracker:
"""Tracks similar apps placement failures."""
def __init__(self):
self.recorder = dict()
def feasible(self, app):
"""Checks if it is feasible to satisfy demand."""
constraints, demand = app.shape()
if constraints in self.recorder:
# If demand is >= than recorded failure, placement is not feasible.
if _all_ge(demand, self.recorder[constraints]):
return False
return True
def adjust(self, app):
"""Adjust info about failed placement."""
constraints, demand = app.shape()
if constraints not in self.recorder:
self.recorder[constraints] = demand
else:
if _all_le(demand, self.recorder[constraints]):
self.recorder[constraints] = demand
class Cell(Bucket):
"""Top level node.
"""
__slots__ = (
'partitions',
'next_event_at',
'apps',
'identity_groups',
)
def __init__(self, name):
super(Cell, self).__init__(name, traits=0, level='cell')
self.partitions = PartitionDict()
self.apps = dict()
self.identity_groups = collections.defaultdict(IdentityGroup)
self.next_event_at = np.inf
def add_app(self, allocation, app):
"""Adds application to the scheduled list.
"""
assert allocation is not None
if app.allocation:
app.allocation.remove(app.name)
allocation.add(app)
self.apps[app.name] = app
if app.identity_group:
app.identity_group_ref = self.identity_groups[app.identity_group]
def remove_app(self, appname):
"""Remove app from scheduled list.
"""
if appname not in self.apps:
return
app = self.apps[appname]
servers = self.members()
if app.server in servers:
servers[app.server].remove(app.name)
if app.allocation:
app.allocation.remove(app.name)
app.release_identity()
del self.apps[appname]
def configure_identity_group(self, name, count):
"""Add identity group to the cell.
"""
if name not in self.identity_groups:
self.identity_groups[name] = IdentityGroup(count)
else:
self.identity_groups[name].adjust(count)
def remove_identity_group(self, name):
"""Remove identity group.
"""
ident_group = self.identity_groups.get(name)
if ident_group:
in_use = False
for app in six.itervalues(self.apps):
if app.identity_group_ref == ident_group:
ident_group.adjust(0)
in_use = True
break
if not in_use:
del self.identity_groups[name]
def _fix_invalid_placements(self, queue, servers):
"""If app is placed on non-existent server, set server to None.
"""
for app in queue:
if app.server and app.server not in servers:
app.server = None
app.evicted = True
app.release_identity()
def _record_rank_and_util(self, queue):
"""Set final rank and utilization for all apps in the queue.
"""
for item in queue:
rank = item[0]
util = item[1]
app = item[-1]
app.final_rank = rank
app.final_util = util
def _fix_invalid_identities(self, queue, servers):
"""Check that app identity is valid for given identity group.
"""
for app in queue:
if app.identity is not None and app.identity_group_ref is not None:
# Can happen if identity group was adjusted to lower count.
if app.identity >= app.identity_group_ref.count:
# Can't release identity as it is invalid.
_LOGGER.info('Identity exceeds limit: %s - %s, limit %s',
app.name, app.identity,
app.identity_group_ref.count)
app.identity = None
# Invalidate any existing placement.
if app.server:
servers[app.server].remove(app.name)
def _handle_inactive_servers(self, servers):
"""Migrate apps from inactive servers.
"""
self.next_event_at = np.inf
for server in six.itervalues(servers):
state, since = server.get_state()
to_be_moved = []
if state == State.down:
_LOGGER.debug('Server state is down: %s', server.name)
for name, app in six.iteritems(server.apps):
if app.data_retention_timeout is None:
expires_at = 0
else:
expires_at = since + app.data_retention_timeout
if expires_at <= time.time():
_LOGGER.debug('Expired placement: %s', name)
app.release_identity()
to_be_moved.append(name)
else:
_LOGGER.debug('Keep placement: %s until %s',
name, expires_at)
self.next_event_at = min(expires_at,
self.next_event_at)
elif state == State.frozen:
_LOGGER.debug('Server state is frozen: %s', server.name)
to_be_moved = [
name for name, app in six.iteritems(server.apps)
if app.unschedule
]
for name in to_be_moved:
server.remove(name)
def _find_placements(self, queue, servers):
"""Run the queue and find placements.
"""
# TODO: refactor to get rid of warnings.
#
# pylint: disable=too-many-branches,too-many-statements
#
# At this point, if app.server is defined, it points to attached
# server.
evicted = dict()
reversed_queue = queue[::-1]
placement_tracker = PlacementFeasibilityTracker()
for app in queue:
_LOGGER.debug('scheduling %s', app.name)
if app.final_rank == _UNPLACED_RANK:
if app.server:
assert app.server in servers
assert app.has_identity()
servers[app.server].remove(app.name)
app.release_identity()
continue
restore = {}
if app.renew:
assert app.server
assert app.has_identity()
assert app.server in servers
server = servers[app.server]
if not server.renew(app):
# Save information that will be used to restore placement
# in case renewal fails.
_LOGGER.debug('Cannot renew app %s on server %s',
app.name, app.server)
restore['server'] = server
restore['placement_expiry'] = app.placement_expiry
server.remove(app.name)
# At this point app was either renewed on the same server, or
# temporarily removed from server if renew failed.
#
# If placement will be found, renew should remain False. If
# placement will not be found, renew will be set to True when
# placement is restored to the server it was running.
app.renew = False
if app.server:
assert app.server in servers
assert app.has_identity()
continue
assert app.server is None
if not app.acquire_identity():
_LOGGER.info('Unable to acquire identity: %s, %s', app.name,
app.identity_group)
continue
# If app was evicted before, try to restore to the same node.
if app in evicted:
assert app.has_identity()
evicted_from, app_expiry = evicted[app]
del evicted[app]
if evicted_from.restore(app, app_expiry):
app.evicted = False
continue
assert app.server is None
if app.schedule_once and app.evicted:
continue
# Check if placement is feasible.
if not placement_tracker.feasible(app):
_LOGGER.info(
'Placement not feasible: %s %r', app.name, app.shape()
)
continue
if not self.put(app):
# There is not enough capacity, from the end of the queue,
# evict apps, freeing capacity.
for evicted_app in reversed_queue:
# We reached the app we can't place
if evicted_app == app:
break
# The app is not yet placed, skip
if not evicted_app.server:
continue
assert evicted_app.server in servers
evicted_app_server = servers[evicted_app.server]
# Do not consider servers that are not up.
if evicted_app_server.state is not State.up:
continue
evicted[evicted_app] = (evicted_app_server,
evicted_app.placement_expiry)
evicted_app_server.remove(evicted_app.name)
# TODO: we need to check affinity limit constraints on
# each level, all the way to the top.
if evicted_app_server.put(app):
break
# Placement failed.
if not app.server:
# If renewal attempt failed, restore previous placement and
# expiry date.
if restore:
restore['server'].restore(app, restore['placement_expiry'])
app.renew = True
else:
app.release_identity()
placement_tracker.adjust(app)
def schedule_alloc(self, allocation, servers):
"""Run the scheduler for given allocation.
"""
begin = time.time()
size = self.size(allocation.label)
util_queue = list(allocation.utilization_queue(size))
self._record_rank_and_util(util_queue)
queue = [item[-1] for item in util_queue]
self._find_placements(queue, servers)
_LOGGER.info('Scheduled %s (%d) apps in %r',
allocation.label,
len(queue),
time.time() - begin)
def schedule(self):
"""Run the scheduler.
"""
begin = time.time()
all_apps = []
for label, partition in six.iteritems(self.partitions):
allocation = partition.allocation
all_apps.extend(allocation.all_apps())
before = [(app.name, app.server, app.placement_expiry)
for app in all_apps]
servers = self.members()
self._fix_invalid_placements(six.viewvalues(self.apps), servers)
self._handle_inactive_servers(servers)
self._fix_invalid_identities(six.viewvalues(self.apps), servers)
for label, partition in six.iteritems(self.partitions):
allocation = partition.allocation
allocation.label = label
self.schedule_alloc(allocation, servers)
after = [(app.server, app.placement_expiry)
for app in all_apps]
placement = [
tuple(itertools.chain(b, a))
for b, a in six.moves.zip(before, after)
]
for appname, s_before, exp_before, s_after, exp_after in placement:
if s_before != s_after:
_LOGGER.info('New placement: %s - %s => %s',
appname, s_before, s_after)
else:
if exp_before != exp_after:
_LOGGER.info('Renewed: %s [%s] - %s => %s',
appname, s_before, exp_before, exp_after)
_LOGGER.info('Total scheduler time for %s apps: %r (sec)',
len(all_apps),
time.time() - begin)
return placement
def resolve_reboot_conflicts(self):
"""Adjust server exipiration time to avoid conflicts.
"""
pass
def dumps(cell):
"""Serializes cell to string.
"""
del cell
return ''
def loads(data):
"""Loads scheduler from string.
"""
del data
assert False, 'not implemented.'
| [
"logging.getLogger",
"itertools.chain",
"six.itervalues",
"heapq.merge",
"numpy.array",
"datetime.date.fromtimestamp",
"six.moves.xrange",
"datetime.timedelta",
"numpy.subtract",
"numpy.maximum",
"six.moves.zip",
"six.viewvalues",
"numpy.finfo",
"datetime.date.today",
"time.time",
"time.mktime",
"six.add_metaclass",
"collections.Counter",
"numpy.zeros",
"collections.defaultdict",
"six.iteritems"
]
| [((377, 404), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (394, 404), False, 'import logging\n'), ((551, 594), 'time.mktime', 'time.mktime', (['(2014, 1, 1, 0, 0, 0, 0, 0, 0)'], {}), '((2014, 1, 1, 0, 0, 0, 0, 0, 0))\n', (562, 594), False, 'import time\n'), ((8874, 8904), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (8891, 8904), False, 'import six\n'), ((1147, 1172), 'numpy.zeros', 'np.zeros', (['DIMENSION_COUNT'], {}), '(DIMENSION_COUNT)\n', (1155, 1172), True, 'import numpy as np\n'), ((39765, 39786), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (39784, 39786), False, 'import datetime\n'), ((6599, 6628), 'numpy.array', 'np.array', (['demand'], {'dtype': 'float'}), '(demand, dtype=float)\n', (6607, 6628), True, 'import numpy as np\n'), ((11480, 11516), 'six.itervalues', 'six.itervalues', (['self.children_traits'], {}), '(self.children_traits)\n', (11494, 11516), False, 'import six\n'), ((12396, 12417), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (12415, 12417), False, 'import collections\n'), ((13106, 13127), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (13125, 13127), False, 'import collections\n'), ((13226, 13237), 'time.time', 'time.time', ([], {}), '()\n', (13235, 13237), False, 'import time\n'), ((20062, 20106), 'numpy.maximum', 'np.maximum', (['self.free_capacity', 'new_capacity'], {}), '(self.free_capacity, new_capacity)\n', (20072, 20106), True, 'import numpy as np\n'), ((23473, 23504), 'numpy.array', 'np.array', (['capacity'], {'dtype': 'float'}), '(capacity, dtype=float)\n', (23481, 23504), True, 'import numpy as np\n'), ((34511, 34531), 'heapq.merge', 'heapq.merge', (['*queues'], {}), '(*queues)\n', (34522, 34531), False, 'import heapq\n'), ((36447, 36483), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (36461, 36483), False, 'import six\n'), ((40086, 40112), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (40104, 40112), False, 'import datetime\n'), ((42352, 42390), 'collections.defaultdict', 'collections.defaultdict', (['IdentityGroup'], {}), '(IdentityGroup)\n', (42375, 42390), False, 'import collections\n'), ((45645, 45668), 'six.itervalues', 'six.itervalues', (['servers'], {}), '(servers)\n', (45659, 45668), False, 'import six\n'), ((51700, 51711), 'time.time', 'time.time', ([], {}), '()\n', (51709, 51711), False, 'import time\n'), ((52213, 52224), 'time.time', 'time.time', ([], {}), '()\n', (52222, 52224), False, 'import time\n'), ((52280, 52310), 'six.iteritems', 'six.iteritems', (['self.partitions'], {}), '(self.partitions)\n', (52293, 52310), False, 'import six\n'), ((52772, 52802), 'six.iteritems', 'six.iteritems', (['self.partitions'], {}), '(self.partitions)\n', (52785, 52802), False, 'import six\n'), ((1820, 1850), 'numpy.subtract', 'np.subtract', (['demand', 'allocated'], {}), '(demand, allocated)\n', (1831, 1850), True, 'import numpy as np\n'), ((14223, 14234), 'time.time', 'time.time', ([], {}), '()\n', (14232, 14234), False, 'import time\n'), ((32345, 32370), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (32359, 32370), False, 'import six\n'), ((35551, 35587), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (35565, 35587), False, 'import six\n'), ((36399, 36424), 'six.itervalues', 'six.itervalues', (['self.apps'], {}), '(self.apps)\n', (36413, 36424), False, 'import six\n'), ((37584, 37595), 'time.time', 'time.time', ([], {}), '()\n', (37593, 37595), False, 'import time\n'), ((43744, 43769), 'six.itervalues', 'six.itervalues', (['self.apps'], {}), '(self.apps)\n', (43758, 43769), False, 'import six\n'), ((52583, 52608), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (52597, 52608), False, 'import six\n'), ((52703, 52728), 'six.viewvalues', 'six.viewvalues', (['self.apps'], {}), '(self.apps)\n', (52717, 52728), False, 'import six\n'), ((1336, 1351), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1344, 1351), True, 'import numpy as np\n'), ((1640, 1651), 'time.time', 'time.time', ([], {}), '()\n', (1649, 1651), False, 'import time\n'), ((2000, 2026), 'six.moves.zip', 'six.moves.zip', (['left', 'right'], {}), '(left, right)\n', (2013, 2026), False, 'import six\n'), ((2169, 2195), 'six.moves.zip', 'six.moves.zip', (['left', 'right'], {}), '(left, right)\n', (2182, 2195), False, 'import six\n'), ((4600, 4635), 'six.moves.xrange', 'six.moves.xrange', (['self.count', 'count'], {}), '(self.count, count)\n', (4616, 4635), False, 'import six\n'), ((4685, 4720), 'six.moves.xrange', 'six.moves.xrange', (['count', 'self.count'], {}), '(count, self.count)\n', (4701, 4720), False, 'import six\n'), ((20853, 20904), 'numpy.maximum', 'np.maximum', (['free_capacity', 'child_node.free_capacity'], {}), '(free_capacity, child_node.free_capacity)\n', (20863, 20904), True, 'import numpy as np\n'), ((24844, 24855), 'time.time', 'time.time', ([], {}), '()\n', (24853, 24855), False, 'import time\n'), ((25613, 25624), 'time.time', 'time.time', ([], {}), '()\n', (25622, 25624), False, 'import time\n'), ((25943, 25954), 'time.time', 'time.time', ([], {}), '()\n', (25952, 25954), False, 'import time\n'), ((32460, 32475), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (32468, 32475), True, 'import numpy as np\n'), ((34205, 34241), 'six.itervalues', 'six.itervalues', (['self.sub_allocations'], {}), '(self.sub_allocations)\n', (34219, 34241), False, 'import six\n'), ((34397, 34412), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (34405, 34412), True, 'import numpy as np\n'), ((37692, 37724), 'datetime.date.fromtimestamp', 'datetime.date.fromtimestamp', (['now'], {}), '(now)\n', (37719, 37724), False, 'import datetime\n'), ((39973, 40037), 'time.mktime', 'time.mktime', (['(date.year, date.month, date.day, h, m, s, 0, 0, 0)'], {}), '((date.year, date.month, date.day, h, m, s, 0, 0, 0))\n', (39984, 40037), False, 'import time\n'), ((45886, 45912), 'six.iteritems', 'six.iteritems', (['server.apps'], {}), '(server.apps)\n', (45899, 45912), False, 'import six\n'), ((52109, 52120), 'time.time', 'time.time', ([], {}), '()\n', (52118, 52120), False, 'import time\n'), ((53072, 53093), 'itertools.chain', 'itertools.chain', (['b', 'a'], {}), '(b, a)\n', (53087, 53093), False, 'import itertools\n'), ((53119, 53147), 'six.moves.zip', 'six.moves.zip', (['before', 'after'], {}), '(before, after)\n', (53132, 53147), False, 'import six\n'), ((53715, 53726), 'time.time', 'time.time', ([], {}), '()\n', (53724, 53726), False, 'import time\n'), ((29858, 29889), 'numpy.array', 'np.array', (['reserved'], {'dtype': 'float'}), '(reserved, dtype=float)\n', (29866, 29889), True, 'import numpy as np\n'), ((46148, 46159), 'time.time', 'time.time', ([], {}), '()\n', (46157, 46159), False, 'import time\n'), ((46794, 46820), 'six.iteritems', 'six.iteritems', (['server.apps'], {}), '(server.apps)\n', (46807, 46820), False, 'import six\n')] |
import os
import sys
import re
import json
import logging
import torch
from transformers import (
HfArgumentParser,
set_seed,
AutoTokenizer,
AutoConfig,
EvalPrediction,
)
from src.model.ca_mtl import CaMtl, CaMtlArguments
from src.utils.misc import MultiTaskDataArguments, Split
from src.mtl_trainer import MultiTaskTrainer, MultiTaskTrainingArguments
from src.data.mtl_dataset import MultiTaskDataset
from src.data.task_dataset import TaskDataset
logger = logging.getLogger(__name__)
def setup_logging(training_args):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
def parse_cmd_args():
parser = HfArgumentParser(
(
CaMtlArguments,
MultiTaskDataArguments,
MultiTaskTrainingArguments,
)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
(
model_args,
data_args,
training_args,
) = parser.parse_args_into_dataclasses()
logger.info("Training/evaluation parameters %s", training_args)
return model_args, data_args, training_args
def create_eval_datasets(mode, data_args, tokenizer):
eval_datasets = {}
for task_id, task_name in enumerate(data_args.tasks):
eval_datasets[task_name] = TaskDataset(
task_name, task_id, data_args, tokenizer, mode=mode
)
if task_name == "mnli":
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_datasets["mnli-mm"] = TaskDataset(
"mnli-mm", task_id, data_args, tokenizer, mode=mode
)
return eval_datasets
def main():
model_args, data_args, training_args = parse_cmd_args()
setup_logging(training_args)
set_seed(training_args.seed)
config = AutoConfig.from_pretrained(
CaMtl.get_base_model(model_args.model_name_or_path),
)
model = CaMtl.from_pretrained(
CaMtl.get_base_model(model_args.model_name_or_path),
model_args,
data_args,
config=config)
model.freeze_encoder_layers(model_args)
logger.info(model)
tokenizer = AutoTokenizer.from_pretrained(
CaMtl.get_base_model(model_args.model_name_or_path),
)
logger.info("Training tasks: %s", ", ".join([t for t in data_args.tasks]))
trainer = MultiTaskTrainer(
tokenizer,
data_args,
model=model,
args=training_args,
train_dataset=MultiTaskDataset(data_args, tokenizer, limit_length=50)
if training_args.do_train
else None,
eval_datasets=create_eval_datasets(Split.dev, data_args, tokenizer)
if training_args.do_eval or training_args.evaluate_during_training
else None,
test_datasets=create_eval_datasets(Split.test, data_args, tokenizer)
if training_args.do_predict
else None,
)
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None
)
if training_args.do_eval:
trainer.evaluate()
if training_args.do_predict:
trainer.predict()
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.basicConfig",
"src.model.ca_mtl.CaMtl.get_base_model",
"transformers.HfArgumentParser",
"src.data.mtl_dataset.MultiTaskDataset",
"os.path.isdir",
"os.path.abspath",
"transformers.set_seed",
"src.data.task_dataset.TaskDataset"
]
| [((480, 507), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (497, 507), False, 'import logging\n'), ((548, 752), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in\n [-1, 0] else logging.WARN)\n", (567, 752), False, 'import logging\n'), ((1099, 1189), 'transformers.HfArgumentParser', 'HfArgumentParser', (['(CaMtlArguments, MultiTaskDataArguments, MultiTaskTrainingArguments)'], {}), '((CaMtlArguments, MultiTaskDataArguments,\n MultiTaskTrainingArguments))\n', (1115, 1189), False, 'from transformers import HfArgumentParser, set_seed, AutoTokenizer, AutoConfig, EvalPrediction\n'), ((2341, 2369), 'transformers.set_seed', 'set_seed', (['training_args.seed'], {}), '(training_args.seed)\n', (2349, 2369), False, 'from transformers import HfArgumentParser, set_seed, AutoTokenizer, AutoConfig, EvalPrediction\n'), ((1874, 1938), 'src.data.task_dataset.TaskDataset', 'TaskDataset', (['task_name', 'task_id', 'data_args', 'tokenizer'], {'mode': 'mode'}), '(task_name, task_id, data_args, tokenizer, mode=mode)\n', (1885, 1938), False, 'from src.data.task_dataset import TaskDataset\n'), ((2420, 2471), 'src.model.ca_mtl.CaMtl.get_base_model', 'CaMtl.get_base_model', (['model_args.model_name_or_path'], {}), '(model_args.model_name_or_path)\n', (2440, 2471), False, 'from src.model.ca_mtl import CaMtl, CaMtlArguments\n'), ((2523, 2574), 'src.model.ca_mtl.CaMtl.get_base_model', 'CaMtl.get_base_model', (['model_args.model_name_or_path'], {}), '(model_args.model_name_or_path)\n', (2543, 2574), False, 'from src.model.ca_mtl import CaMtl, CaMtlArguments\n'), ((2762, 2813), 'src.model.ca_mtl.CaMtl.get_base_model', 'CaMtl.get_base_model', (['model_args.model_name_or_path'], {}), '(model_args.model_name_or_path)\n', (2782, 2813), False, 'from src.model.ca_mtl import CaMtl, CaMtlArguments\n'), ((2107, 2171), 'src.data.task_dataset.TaskDataset', 'TaskDataset', (['"""mnli-mm"""', 'task_id', 'data_args', 'tokenizer'], {'mode': 'mode'}), "('mnli-mm', task_id, data_args, tokenizer, mode=mode)\n", (2118, 2171), False, 'from src.data.task_dataset import TaskDataset\n'), ((1402, 1430), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1417, 1430), False, 'import os\n'), ((3043, 3098), 'src.data.mtl_dataset.MultiTaskDataset', 'MultiTaskDataset', (['data_args', 'tokenizer'], {'limit_length': '(50)'}), '(data_args, tokenizer, limit_length=50)\n', (3059, 3098), False, 'from src.data.mtl_dataset import MultiTaskDataset\n'), ((3583, 3627), 'os.path.isdir', 'os.path.isdir', (['model_args.model_name_or_path'], {}), '(model_args.model_name_or_path)\n', (3596, 3627), False, 'import os\n')] |
#!/usr/bin/env python3
"""Read data in CSV format from websocket
"""
import sys
import asyncio
import websockets
# read url from command line
if len(sys.argv) >= 2:
uri = sys.argv[1]
else:
# host url and port
uri = "ws://localhost:8314"
print("*==* ", sys.argv[0], " Lese Daten von url ", uri)
async def read_ws():
"""asynchronous read from websocket
"""
async with websockets.connect(uri, ping_interval=None) as websocket:
# test connection
await websocket.send("req_connect")
answ = await websocket.recv()
if answ == "ack_connect":
print("** connected to websocket ", uri)
# get data
await websocket.send("getData")
while True:
inp = await websocket.recv()
if inp == '\n': # empty record, end
print("empty input - closing")
sys.exit(0)
else:
print('read: %s ' % inp, end='')
# run web client
asyncio.get_event_loop().run_until_complete(read_ws())
| [
"asyncio.get_event_loop",
"websockets.connect",
"sys.exit"
]
| [((394, 437), 'websockets.connect', 'websockets.connect', (['uri'], {'ping_interval': 'None'}), '(uri, ping_interval=None)\n', (412, 437), False, 'import websockets\n'), ((978, 1002), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1000, 1002), False, 'import asyncio\n'), ((880, 891), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (888, 891), False, 'import sys\n')] |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from tensorflow.contrib import learn
from input_helpers import InputHelper
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_string("eval_filepath", "match_valid.tsv", "Evaluate on this data (Default: None)")
tf.flags.DEFINE_string("vocab_filepath", "runs/1479874609/checkpoints/vocab", "Load training time vocabulary (Default: None)")
tf.flags.DEFINE_string("model", "runs/1479874609/checkpoints/model-32000", "Load trained model checkpoint (Default: None)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
if FLAGS.eval_filepath==None or FLAGS.vocab_filepath==None or FLAGS.model==None :
print("Eval or Vocab filepaths are empty.")
exit()
# load data and map id-transform based on training time vocabulary
inpH = InputHelper()
x1_test,x2_test,y_test = inpH.getTestDataSet(FLAGS.eval_filepath, FLAGS.vocab_filepath, 30)
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = FLAGS.model
print(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
sess.run(tf.initialize_all_variables())
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x1 = graph.get_operation_by_name("input_x1").outputs[0]
input_x2 = graph.get_operation_by_name("input_x2").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/distance").outputs[0]
accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0]
sim = graph.get_operation_by_name("accuracy/temp_sim").outputs[0]
#emb = graph.get_operation_by_name("embedding/W").outputs[0]
#embedded_chars = tf.nn.embedding_lookup(emb,input_x)
# Generate batches for one epoch
batches = inpH.batch_iter(list(zip(x1_test,x2_test,y_test)), 2*FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_d=[]
for db in batches:
x1_dev_b,x2_dev_b,y_dev_b = zip(*db)
batch_predictions, batch_acc, sim = sess.run([predictions,accuracy,sim], {input_x1: x1_dev_b, input_x2: x2_dev_b, input_y:y_dev_b, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
print(batch_predictions)
all_d = np.concatenate([all_d, sim])
print("DEV acc {}".format(batch_acc))
for ex in all_predictions:
print(ex)
correct_predictions = float(np.mean(all_d == y_test))
print("Accuracy: {:g}".format(correct_predictions))
| [
"tensorflow.flags.DEFINE_string",
"tensorflow.Graph",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"numpy.mean",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.Session",
"numpy.concatenate",
"tensorflow.flags.DEFINE_integer",
"input_helpers.InputHelper"
]
| [((265, 334), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (288, 334), True, 'import tensorflow as tf\n'), ((335, 425), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_dir"""', '""""""', '"""Checkpoint directory from training run"""'], {}), "('checkpoint_dir', '',\n 'Checkpoint directory from training run')\n", (357, 425), True, 'import tensorflow as tf\n'), ((422, 525), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""eval_filepath"""', '"""match_valid.tsv"""', '"""Evaluate on this data (Default: None)"""'], {}), "('eval_filepath', 'match_valid.tsv',\n 'Evaluate on this data (Default: None)')\n", (444, 525), True, 'import tensorflow as tf\n'), ((522, 656), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""vocab_filepath"""', '"""runs/1479874609/checkpoints/vocab"""', '"""Load training time vocabulary (Default: None)"""'], {}), "('vocab_filepath',\n 'runs/1479874609/checkpoints/vocab',\n 'Load training time vocabulary (Default: None)')\n", (544, 656), True, 'import tensorflow as tf\n'), ((649, 776), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""model"""', '"""runs/1479874609/checkpoints/model-32000"""', '"""Load trained model checkpoint (Default: None)"""'], {}), "('model', 'runs/1479874609/checkpoints/model-32000',\n 'Load trained model checkpoint (Default: None)')\n", (671, 776), True, 'import tensorflow as tf\n'), ((792, 887), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (815, 887), True, 'import tensorflow as tf\n'), ((884, 977), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (907, 977), True, 'import tensorflow as tf\n'), ((1367, 1380), 'input_helpers.InputHelper', 'InputHelper', ([], {}), '()\n', (1378, 1380), False, 'from input_helpers import InputHelper\n'), ((1629, 1639), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1637, 1639), True, 'import tensorflow as tf\n'), ((1684, 1800), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (1698, 1800), True, 'import tensorflow as tf\n'), ((1821, 1852), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (1831, 1852), True, 'import tensorflow as tf\n'), ((2034, 2063), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2061, 2063), True, 'import tensorflow as tf\n'), ((3374, 3426), 'numpy.concatenate', 'np.concatenate', (['[all_predictions, batch_predictions]'], {}), '([all_predictions, batch_predictions])\n', (3388, 3426), True, 'import numpy as np\n'), ((3484, 3512), 'numpy.concatenate', 'np.concatenate', (['[all_d, sim]'], {}), '([all_d, sim])\n', (3498, 3512), True, 'import numpy as np\n'), ((3657, 3681), 'numpy.mean', 'np.mean', (['(all_d == y_test)'], {}), '(all_d == y_test)\n', (3664, 3681), True, 'import numpy as np\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
'get_workspace_output',
]
@pulumi.output_type
class GetWorkspaceResult:
def __init__(__self__, bundle_id=None, directory_id=None, id=None, root_volume_encryption_enabled=None, tags=None, user_volume_encryption_enabled=None, volume_encryption_key=None, workspace_properties=None):
if bundle_id and not isinstance(bundle_id, str):
raise TypeError("Expected argument 'bundle_id' to be a str")
pulumi.set(__self__, "bundle_id", bundle_id)
if directory_id and not isinstance(directory_id, str):
raise TypeError("Expected argument 'directory_id' to be a str")
pulumi.set(__self__, "directory_id", directory_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if root_volume_encryption_enabled and not isinstance(root_volume_encryption_enabled, bool):
raise TypeError("Expected argument 'root_volume_encryption_enabled' to be a bool")
pulumi.set(__self__, "root_volume_encryption_enabled", root_volume_encryption_enabled)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if user_volume_encryption_enabled and not isinstance(user_volume_encryption_enabled, bool):
raise TypeError("Expected argument 'user_volume_encryption_enabled' to be a bool")
pulumi.set(__self__, "user_volume_encryption_enabled", user_volume_encryption_enabled)
if volume_encryption_key and not isinstance(volume_encryption_key, str):
raise TypeError("Expected argument 'volume_encryption_key' to be a str")
pulumi.set(__self__, "volume_encryption_key", volume_encryption_key)
if workspace_properties and not isinstance(workspace_properties, dict):
raise TypeError("Expected argument 'workspace_properties' to be a dict")
pulumi.set(__self__, "workspace_properties", workspace_properties)
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> Optional[str]:
return pulumi.get(self, "bundle_id")
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> Optional[str]:
return pulumi.get(self, "directory_id")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="rootVolumeEncryptionEnabled")
def root_volume_encryption_enabled(self) -> Optional[bool]:
return pulumi.get(self, "root_volume_encryption_enabled")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.WorkspaceTag']]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userVolumeEncryptionEnabled")
def user_volume_encryption_enabled(self) -> Optional[bool]:
return pulumi.get(self, "user_volume_encryption_enabled")
@property
@pulumi.getter(name="volumeEncryptionKey")
def volume_encryption_key(self) -> Optional[str]:
return pulumi.get(self, "volume_encryption_key")
@property
@pulumi.getter(name="workspaceProperties")
def workspace_properties(self) -> Optional['outputs.WorkspaceProperties']:
return pulumi.get(self, "workspace_properties")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
bundle_id=self.bundle_id,
directory_id=self.directory_id,
id=self.id,
root_volume_encryption_enabled=self.root_volume_encryption_enabled,
tags=self.tags,
user_volume_encryption_enabled=self.user_volume_encryption_enabled,
volume_encryption_key=self.volume_encryption_key,
workspace_properties=self.workspace_properties)
def get_workspace(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Resource Type definition for AWS::WorkSpaces::Workspace
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:workspaces:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
bundle_id=__ret__.bundle_id,
directory_id=__ret__.directory_id,
id=__ret__.id,
root_volume_encryption_enabled=__ret__.root_volume_encryption_enabled,
tags=__ret__.tags,
user_volume_encryption_enabled=__ret__.user_volume_encryption_enabled,
volume_encryption_key=__ret__.volume_encryption_key,
workspace_properties=__ret__.workspace_properties)
@_utilities.lift_output_func(get_workspace)
def get_workspace_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceResult]:
"""
Resource Type definition for AWS::WorkSpaces::Workspace
"""
...
| [
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
]
| [((2494, 2524), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""bundleId"""'}), "(name='bundleId')\n", (2507, 2524), False, 'import pulumi\n'), ((2632, 2665), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""directoryId"""'}), "(name='directoryId')\n", (2645, 2665), False, 'import pulumi\n'), ((2886, 2935), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""rootVolumeEncryptionEnabled"""'}), "(name='rootVolumeEncryptionEnabled')\n", (2899, 2935), False, 'import pulumi\n'), ((3226, 3275), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""userVolumeEncryptionEnabled"""'}), "(name='userVolumeEncryptionEnabled')\n", (3239, 3275), False, 'import pulumi\n'), ((3426, 3467), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeEncryptionKey"""'}), "(name='volumeEncryptionKey')\n", (3439, 3467), False, 'import pulumi\n'), ((3599, 3640), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""workspaceProperties"""'}), "(name='workspaceProperties')\n", (3612, 3640), False, 'import pulumi\n'), ((860, 904), 'pulumi.set', 'pulumi.set', (['__self__', '"""bundle_id"""', 'bundle_id'], {}), "(__self__, 'bundle_id', bundle_id)\n", (870, 904), False, 'import pulumi\n'), ((1052, 1102), 'pulumi.set', 'pulumi.set', (['__self__', '"""directory_id"""', 'directory_id'], {}), "(__self__, 'directory_id', directory_id)\n", (1062, 1102), False, 'import pulumi\n'), ((1220, 1250), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1230, 1250), False, 'import pulumi\n'), ((1454, 1544), 'pulumi.set', 'pulumi.set', (['__self__', '"""root_volume_encryption_enabled"""', 'root_volume_encryption_enabled'], {}), "(__self__, 'root_volume_encryption_enabled',\n root_volume_encryption_enabled)\n", (1464, 1544), False, 'import pulumi\n'), ((1666, 1700), 'pulumi.set', 'pulumi.set', (['__self__', '"""tags"""', 'tags'], {}), "(__self__, 'tags', tags)\n", (1676, 1700), False, 'import pulumi\n'), ((1904, 1994), 'pulumi.set', 'pulumi.set', (['__self__', '"""user_volume_encryption_enabled"""', 'user_volume_encryption_enabled'], {}), "(__self__, 'user_volume_encryption_enabled',\n user_volume_encryption_enabled)\n", (1914, 1994), False, 'import pulumi\n'), ((2165, 2233), 'pulumi.set', 'pulumi.set', (['__self__', '"""volume_encryption_key"""', 'volume_encryption_key'], {}), "(__self__, 'volume_encryption_key', volume_encryption_key)\n", (2175, 2233), False, 'import pulumi\n'), ((2407, 2473), 'pulumi.set', 'pulumi.set', (['__self__', '"""workspace_properties"""', 'workspace_properties'], {}), "(__self__, 'workspace_properties', workspace_properties)\n", (2417, 2473), False, 'import pulumi\n'), ((2582, 2611), 'pulumi.get', 'pulumi.get', (['self', '"""bundle_id"""'], {}), "(self, 'bundle_id')\n", (2592, 2611), False, 'import pulumi\n'), ((2726, 2758), 'pulumi.get', 'pulumi.get', (['self', '"""directory_id"""'], {}), "(self, 'directory_id')\n", (2736, 2758), False, 'import pulumi\n'), ((2843, 2865), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (2853, 2865), False, 'import pulumi\n'), ((3015, 3065), 'pulumi.get', 'pulumi.get', (['self', '"""root_volume_encryption_enabled"""'], {}), "(self, 'root_volume_encryption_enabled')\n", (3025, 3065), False, 'import pulumi\n'), ((3181, 3205), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (3191, 3205), False, 'import pulumi\n'), ((3355, 3405), 'pulumi.get', 'pulumi.get', (['self', '"""user_volume_encryption_enabled"""'], {}), "(self, 'user_volume_encryption_enabled')\n", (3365, 3405), False, 'import pulumi\n'), ((3537, 3578), 'pulumi.get', 'pulumi.get', (['self', '"""volume_encryption_key"""'], {}), "(self, 'volume_encryption_key')\n", (3547, 3578), False, 'import pulumi\n'), ((3735, 3775), 'pulumi.get', 'pulumi.get', (['self', '"""workspace_properties"""'], {}), "(self, 'workspace_properties')\n", (3745, 3775), False, 'import pulumi\n'), ((4691, 4713), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (4711, 4713), False, 'import pulumi\n'), ((4805, 4914), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""aws-native:workspaces:getWorkspace"""', '__args__'], {'opts': 'opts', 'typ': 'GetWorkspaceResult'}), "('aws-native:workspaces:getWorkspace', __args__, opts=\n opts, typ=GetWorkspaceResult)\n", (4826, 4914), False, 'import pulumi\n')] |
#!/usr/bin/env python
###############################################################################
# #
# manifestManager.py #
# #
# Work with online data manifests (creating / syncing / validating) #
# #
# Copyright (C) <NAME> #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "<NAME>"
__copyright__ = "Copyright 2014"
__credits__ = ["<NAME>"]
__license__ = "GPLv3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.35"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
__MANIFEST__ = ".dmanifest"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# system includes
import os
import hashlib
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import shutil
import errno
# local includes
from fileEntity import FileEntity as FE
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class ManifestManager(object):
"""Use this interface for storing and managing file and paths"""
def __init__(self, manType=None, timeout=30):
self.timeout = timeout
self.myExtensions = [".py",".sh"]
self.files = []
if manType is not None:
self.type = manType
else:
self.type = "generic"
def createManifest(self, path, manifestName=None):
"""inventory all files in path and create a manifest file"""
if manifestName is None:
manifestName = __MANIFEST__
# make the root file entity
root_path = os.path.abspath(path)
root_fe = FE('root', ".", None, "-", 0)
self.files.append(root_fe)
# now make all the ones below
parents = [root_fe]
dirs, files = self.listdir(path)[:2]
self.walk(parents, root_path, '', dirs, files, skipFile=manifestName)
with open(os.path.join(path, manifestName), 'w') as man_fh:
# print the header
man_fh.write("#\t::: %s ::: \tPizza3 manifest version %s\n\n" % (self.type, __version__))
for f in self.files:
if f.parent is not None:
man_fh.write("%s\n" % f)
def diffManifests(self,
localManifestLocation,
sourceManifestLocation,
localManifestName=None,
sourceManifestName=None,
printDiffs=False):
"""check for any differences between two manifests
if remote is true then sourceManifestLocation is a URL
returns a list of files that need to be updated
"""
if localManifestName is None:
localManifestName = __MANIFEST__
if sourceManifestName is None:
sourceManifestName = __MANIFEST__
# get the "type" of the local manifest
l_type = "generic"
with open(os.path.join(localManifestLocation, localManifestName)) as l_man:
for line in l_man:
if line[0] == "#":
l_type = self.getManType(line)
break
# load the source manifest
s_type = "generic"
source_man = {}
source = ""
# first we assume it is remote
try:
s_man = urllib.request.urlopen(sourceManifestLocation + "/" + sourceManifestName, None, self.timeout)
source = sourceManifestLocation + "/"
except ValueError:
# then it is probably a file
s_man = open(os.path.join(sourceManifestLocation, sourceManifestName))
source = os.path.join(sourceManifestLocation) + os.path.sep
except urllib.error.URLError:
# problems connecting to server, perhaps user is behind a proxy or firewall
print("Error: failed to connect to server.")
return (None, None, None, None, None)
first_line = True
for line in s_man:
if first_line:
first_line = False
if line[0] == "#":
# get the type of the manifest
s_type = self.getManType(line)
if s_type != l_type:
print("Error: type of source manifest (%s) does not match type of local manifest (%s)" % (s_type, l_type))
return (None, None, None, None, None)
else:
# no type specified
print("Error: type of source manifest is not specified. Is this a valid manifest file?")
return (None, None, None, None, None)
self.type = l_type
if line[0] != "#":
fields = line.rstrip().split("\t")
# set the dict up as {path => [hash, size, seenLocal]
source_man[fields[0]] = [fields[1], fields[2], False]
# keep lists of modifications
deleted = []
addedDirs = []
addedFiles = []
modified = []
with open(os.path.join(localManifestLocation, localManifestName)) as l_man:
for line in l_man:
if line[0] != "#":
fields = line.rstrip().split("\t")
try:
if source_man[fields[0]][0] != fields[1]:
# hashes don't match
modified.append(fields[0])
# seen this file
source_man[fields[0]][2] = True
except KeyError:
# this file has been deleted from the source manifest
deleted.append(fields[0])
# check for new files
for f in list(source_man.keys()):
if source_man[f][2] == False:
if source_man[f][0] == '-':
addedDirs.append(f)
else:
addedFiles.append(f)
if printDiffs:
new_size = 0
modified_size = 0
for f in addedFiles:
new_size += int(source_man[f][1])
for f in modified:
modified_size += int(source_man[f][1])
if len(addedFiles) > 0:
print("#------------------------------------------------------")
print("# Source contains %d new file(s) (%s)" % (len(addedFiles), self.formatData(new_size)))
for f in addedFiles:
print("\t".join([self.formatData(int(source_man[f][1])), f]))
if len(addedDirs) > 0:
print("#------------------------------------------------------")
print("# Source contains %d new folders(s)" % (len(addedDirs)))
for f in addedDirs:
print(f)
if len(modified) > 0:
print("#------------------------------------------------------")
print("# Source contains %d modified file(s) (%s)" % (len(modified), self.formatData(modified_size)))
for f in modified:
print(f)
if len(deleted) > 0:
print("#------------------------------------------------------")
print("# %d files have been deleted in the source:" % len(deleted))
for f in deleted:
print(f)
else:
return (source,
[(a, source_man[a]) for a in addedFiles],
[(a, source_man[a]) for a in addedDirs],
deleted,
[(m, source_man[m]) for m in modified])
def updateManifest(self,
localManifestLocation,
sourceManifestLocation,
localManifestName=None,
sourceManifestName=None,
prompt=True):
"""Update local files based on remote changes"""
# get the diffs
source, added_files, added_dirs, deleted, modified = self.diffManifests(localManifestLocation,
sourceManifestLocation,
localManifestName,
sourceManifestName)
# bail if the diff failed
if source is None:
return False
# no changes by default
do_down = False
if prompt:
total_size = 0
for f in added_files:
total_size += int(f[1][1])
for f in modified:
total_size += int(f[1][1])
if total_size != 0:
print("****************************************************************")
print("%d new file(s) to be downloaded from source" % len(added_files))
print("%d existing file(s) to be updated" % len(modified))
print("%s will need to be downloaded" % self.formatData(total_size))
do_down = self.promptUserDownload()
if not do_down:
print("Download aborted")
update_manifest = False
if do_down:
update_manifest = True
for add in added_dirs:
# make the dirs first
full_path = os.path.abspath(os.path.join(localManifestLocation, add[0]))
self.makeSurePathExists(full_path)
for add in added_files:
full_path = os.path.abspath(os.path.join(localManifestLocation, add[0]))
urllib.request.urlretrieve(source+add[0], full_path)
for modify in modified:
full_path = os.path.abspath(os.path.join(localManifestLocation, modify[0]))
urllib.request.urlretrieve(source+modify[0], full_path)
if update_manifest:
print("(re) creating manifest file (please be patient)")
self.createManifest(localManifestLocation, manifestName=localManifestName)
return True
def getManType(self, line):
"""Work out the manifest type from the first line of the file"""
return line.rstrip().split("##")[1]
def formatData(self, amount):
"""Pretty print file sizes"""
if amount < 1024*1024:
return "%d B" % amount
elif amount < 1024*1024*1024:
return "%0.2f MB" % (float(amount)/(1024.*1024.))
elif amount < 1024*1024*1024*1024:
return "%0.2f GB" % (float(amount)/(1024.*1024.*1024.))
elif amount < 1024*1024*1024*1024*1024:
return "%0.2f TB" % (float(amount)/(1024.*1024.*1024.*1024.))
#-----------------------------------------------------------------------------
# FS utilities
def makeSurePathExists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def promptUserDownload(self):
"""Check that the user is OK with making changes"""
input_not_ok = True
minimal=False
valid_responses = {'Y':True,'N':False}
vrs = ",".join([x.lower() for x in list(valid_responses.keys())])
while(input_not_ok):
if(minimal):
option = input("Download? ("+vrs+") : ").upper()
else:
option = input("Confirm you want to download this data\n" \
"Changes *WILL* be permanent\n" \
"Continue? ("+vrs+") : ").upper()
if(option in valid_responses):
print("****************************************************************")
return valid_responses[option]
else:
print("ERROR: unrecognised choice '"+option+"'")
minimal = True
def walk(self, parents, full_path, rel_path, dirs, files, skipFile=__MANIFEST__):
"""recursive walk through directory tree"""
# first do files here
for f in files:
if (f != skipFile) and os.path.splitext(f)[1] in self.myExtensions:
path = os.path.join(full_path, f)
self.files.append(FE(f,
rel_path,
parents[-1],
self.hashfile(path),
os.path.getsize(path)
)
)
for d in dirs:
# the walk will go into these dirs first
tmp_fe = FE(d, rel_path, parents[-1], "-", 0)
self.files.append(tmp_fe)
parents.append(tmp_fe)
new_full_path = os.path.join(full_path, d)
new_rel_path = os.path.join(rel_path, d)
new_dirs, new_files = self.listdir(new_full_path)[:2]
self.walk(parents, new_full_path, new_rel_path, new_dirs, new_files)
parents.pop()
def listdir(self, path):
"""List dirs, files etc in path (one dir deep)"""
dirs, files, links = [], [], []
for name in os.listdir(path):
path_name = os.path.join(path, name)
if os.path.isdir(path_name):
dirs.append(name)
elif os.path.isfile(path_name):
files.append(name)
elif os.path.islink(path_name):
links.append(name)
return dirs, files, links
def hashfile(self, fileName, blocksize=65536):
"""Hash a file and return the digest"""
hasher = hashlib.sha256()
with open(fileName,"rb") as fh:
buf = fh.read(blocksize)
while len(buf) > 0:
hasher.update(buf.strip())
buf = fh.read(blocksize)
return hasher.hexdigest()
return "?"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# %% DEBUG
# ===================================================
# main()
# ===================================================
# for debugging purposes (code called as a script)
# the code is called from here
# ===================================================
if __name__ == '__main__':
man = ManifestManager()
man.createManifest("/home/olivi/billy/python",manifestName="Pizza3.manifest") | [
"hashlib.sha256",
"os.listdir",
"os.path.getsize",
"os.makedirs",
"os.path.join",
"os.path.splitext",
"os.path.isfile",
"os.path.isdir",
"os.path.abspath",
"os.path.islink",
"fileEntity.FileEntity"
]
| [((3939, 3960), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (3954, 3960), False, 'import os\n'), ((3979, 4008), 'fileEntity.FileEntity', 'FE', (['"""root"""', '"""."""', 'None', '"""-"""', '(0)'], {}), "('root', '.', None, '-', 0)\n", (3981, 4008), True, 'from fileEntity import FileEntity as FE\n'), ((15546, 15562), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (15556, 15562), False, 'import os\n'), ((15997, 16013), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (16011, 16013), False, 'import hashlib\n'), ((13222, 13239), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (13233, 13239), False, 'import os\n'), ((15007, 15043), 'fileEntity.FileEntity', 'FE', (['d', 'rel_path', 'parents[-1]', '"""-"""', '(0)'], {}), "(d, rel_path, parents[-1], '-', 0)\n", (15009, 15043), True, 'from fileEntity import FileEntity as FE\n'), ((15145, 15171), 'os.path.join', 'os.path.join', (['full_path', 'd'], {}), '(full_path, d)\n', (15157, 15171), False, 'import os\n'), ((15199, 15224), 'os.path.join', 'os.path.join', (['rel_path', 'd'], {}), '(rel_path, d)\n', (15211, 15224), False, 'import os\n'), ((15588, 15612), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (15600, 15612), False, 'import os\n'), ((15628, 15652), 'os.path.isdir', 'os.path.isdir', (['path_name'], {}), '(path_name)\n', (15641, 15652), False, 'import os\n'), ((4252, 4284), 'os.path.join', 'os.path.join', (['path', 'manifestName'], {}), '(path, manifestName)\n', (4264, 4284), False, 'import os\n'), ((5260, 5314), 'os.path.join', 'os.path.join', (['localManifestLocation', 'localManifestName'], {}), '(localManifestLocation, localManifestName)\n', (5272, 5314), False, 'import os\n'), ((7366, 7420), 'os.path.join', 'os.path.join', (['localManifestLocation', 'localManifestName'], {}), '(localManifestLocation, localManifestName)\n', (7378, 7420), False, 'import os\n'), ((14554, 14580), 'os.path.join', 'os.path.join', (['full_path', 'f'], {}), '(full_path, f)\n', (14566, 14580), False, 'import os\n'), ((15705, 15730), 'os.path.isfile', 'os.path.isfile', (['path_name'], {}), '(path_name)\n', (15719, 15730), False, 'import os\n'), ((5881, 5937), 'os.path.join', 'os.path.join', (['sourceManifestLocation', 'sourceManifestName'], {}), '(sourceManifestLocation, sourceManifestName)\n', (5893, 5937), False, 'import os\n'), ((5960, 5996), 'os.path.join', 'os.path.join', (['sourceManifestLocation'], {}), '(sourceManifestLocation)\n', (5972, 5996), False, 'import os\n'), ((11731, 11774), 'os.path.join', 'os.path.join', (['localManifestLocation', 'add[0]'], {}), '(localManifestLocation, add[0])\n', (11743, 11774), False, 'import os\n'), ((11907, 11950), 'os.path.join', 'os.path.join', (['localManifestLocation', 'add[0]'], {}), '(localManifestLocation, add[0])\n', (11919, 11950), False, 'import os\n'), ((12101, 12147), 'os.path.join', 'os.path.join', (['localManifestLocation', 'modify[0]'], {}), '(localManifestLocation, modify[0])\n', (12113, 12147), False, 'import os\n'), ((15784, 15809), 'os.path.islink', 'os.path.islink', (['path_name'], {}), '(path_name)\n', (15798, 15809), False, 'import os\n'), ((14486, 14505), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (14502, 14505), False, 'import os\n'), ((14813, 14834), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (14828, 14834), False, 'import os\n')] |
import pytest
from flask_resty import Api
from flask_resty.testing import assert_response
# -----------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def routes(app):
api = Api(app, "/api")
api.add_ping("/ping")
# -----------------------------------------------------------------------------
def test_ping(base_client):
response = base_client.get("/ping")
assert_response(response, 200)
assert response.get_data(as_text=True) == ""
| [
"pytest.fixture",
"flask_resty.testing.assert_response",
"flask_resty.Api"
]
| [((175, 203), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (189, 203), False, 'import pytest\n'), ((231, 247), 'flask_resty.Api', 'Api', (['app', '"""/api"""'], {}), "(app, '/api')\n", (234, 247), False, 'from flask_resty import Api\n'), ((430, 460), 'flask_resty.testing.assert_response', 'assert_response', (['response', '(200)'], {}), '(response, 200)\n', (445, 460), False, 'from flask_resty.testing import assert_response\n')] |
import os
from subprocess import call
files = ['000002b66c9c498e.jpg', '000002b97e5471a0.jpg', '000002c707c9895e.jpg', '0000048549557964.jpg', '000004f4400f6ec5.jpg', '0000071d71a0a6f6.jpg', '000013ba71c12506.jpg', '000018acd19b4ad3.jpg', '00001bc2c4027449.jpg', '00001bcc92282a38.jpg', '0000201cd362f303.jpg', '000020780ccee28d.jpg', '000023aa04ab09ed.jpg', '0000253ea4ecbf19.jpg', '000025ea48cab6fc.jpg', '0000271195f2c007.jpg', '0000286a5c6a3eb5.jpg', '00002b368e91b947.jpg', '00002f4ff380c64c.jpg', '0000313e5dccf13b.jpg', '000032046c3f8371.jpg', '00003223e04e2e66.jpg', '0000333f08ced1cd.jpg']
for file in files:
if not os.path.exists('train/' + file + '.jpg'):
spath = "gs://open-images-dataset/train/%s " % file
call(["gsutil", "cp", spath, 'train/'])
print(file, 'done', 'count:')
else:
print(file, 'already downloaded')
| [
"os.path.exists",
"subprocess.call"
]
| [((631, 671), 'os.path.exists', 'os.path.exists', (["('train/' + file + '.jpg')"], {}), "('train/' + file + '.jpg')\n", (645, 671), False, 'import os\n'), ((741, 780), 'subprocess.call', 'call', (["['gsutil', 'cp', spath, 'train/']"], {}), "(['gsutil', 'cp', spath, 'train/'])\n", (745, 780), False, 'from subprocess import call\n')] |
""" SNMP_FRAMEWORK_MIB
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SnmpSecurityLevel(Enum):
"""
SnmpSecurityLevel (Enum Class)
.. data:: noAuthNoPriv = 1
.. data:: authNoPriv = 2
.. data:: authPriv = 3
"""
noAuthNoPriv = Enum.YLeaf(1, "noAuthNoPriv")
authNoPriv = Enum.YLeaf(2, "authNoPriv")
authPriv = Enum.YLeaf(3, "authPriv")
class SNMPFRAMEWORKMIB(Entity):
"""
.. attribute:: snmpengine
**type**\: :py:class:`Snmpengine <ydk.models.cisco_ios_xr.SNMP_FRAMEWORK_MIB.SNMPFRAMEWORKMIB.Snmpengine>`
"""
_prefix = 'SNMP_FRAMEWORK_MIB'
_revision = '2002-10-14'
def __init__(self):
super(SNMPFRAMEWORKMIB, self).__init__()
self._top_entity = None
self.yang_name = "SNMP-FRAMEWORK-MIB"
self.yang_parent_name = "SNMP-FRAMEWORK-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("snmpEngine", ("snmpengine", SNMPFRAMEWORKMIB.Snmpengine))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.snmpengine = SNMPFRAMEWORKMIB.Snmpengine()
self.snmpengine.parent = self
self._children_name_map["snmpengine"] = "snmpEngine"
self._children_yang_names.add("snmpEngine")
self._segment_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB"
class Snmpengine(Entity):
"""
.. attribute:: snmpengineid
**type**\: str
**pattern:** (([0\-9a\-fA\-F]){2}(\:([0\-9a\-fA\-F]){2})\*)?
.. attribute:: snmpengineboots
**type**\: int
**range:** 1..2147483647
.. attribute:: snmpenginetime
**type**\: int
**range:** 0..2147483647
.. attribute:: snmpenginemaxmessagesize
**type**\: int
**range:** 484..2147483647
"""
_prefix = 'SNMP_FRAMEWORK_MIB'
_revision = '2002-10-14'
def __init__(self):
super(SNMPFRAMEWORKMIB.Snmpengine, self).__init__()
self.yang_name = "snmpEngine"
self.yang_parent_name = "SNMP-FRAMEWORK-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('snmpengineid', YLeaf(YType.str, 'snmpEngineID')),
('snmpengineboots', YLeaf(YType.int32, 'snmpEngineBoots')),
('snmpenginetime', YLeaf(YType.int32, 'snmpEngineTime')),
('snmpenginemaxmessagesize', YLeaf(YType.int32, 'snmpEngineMaxMessageSize')),
])
self.snmpengineid = None
self.snmpengineboots = None
self.snmpenginetime = None
self.snmpenginemaxmessagesize = None
self._segment_path = lambda: "snmpEngine"
self._absolute_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(SNMPFRAMEWORKMIB.Snmpengine, ['snmpengineid', 'snmpengineboots', 'snmpenginetime', 'snmpenginemaxmessagesize'], name, value)
def clone_ptr(self):
self._top_entity = SNMPFRAMEWORKMIB()
return self._top_entity
| [
"collections.OrderedDict",
"ydk.types.YLeaf",
"ydk.types.Enum.YLeaf"
]
| [((541, 570), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(1)', '"""noAuthNoPriv"""'], {}), "(1, 'noAuthNoPriv')\n", (551, 570), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((589, 616), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(2)', '"""authNoPriv"""'], {}), "(2, 'authNoPriv')\n", (599, 616), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((633, 658), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(3)', '"""authPriv"""'], {}), "(3, 'authPriv')\n", (643, 658), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1308, 1382), 'collections.OrderedDict', 'OrderedDict', (["[('snmpEngine', ('snmpengine', SNMPFRAMEWORKMIB.Snmpengine))]"], {}), "([('snmpEngine', ('snmpengine', SNMPFRAMEWORKMIB.Snmpengine))])\n", (1319, 1382), False, 'from collections import OrderedDict\n'), ((1418, 1433), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (1429, 1433), False, 'from collections import OrderedDict\n'), ((1456, 1469), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1467, 1469), False, 'from collections import OrderedDict\n'), ((2853, 2868), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (2864, 2868), False, 'from collections import OrderedDict\n'), ((2908, 2923), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (2919, 2923), False, 'from collections import OrderedDict\n'), ((2997, 3029), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""snmpEngineID"""'], {}), "(YType.str, 'snmpEngineID')\n", (3002, 3029), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((3068, 3105), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""snmpEngineBoots"""'], {}), "(YType.int32, 'snmpEngineBoots')\n", (3073, 3105), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((3143, 3179), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""snmpEngineTime"""'], {}), "(YType.int32, 'snmpEngineTime')\n", (3148, 3179), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((3227, 3273), 'ydk.types.YLeaf', 'YLeaf', (['YType.int32', '"""snmpEngineMaxMessageSize"""'], {}), "(YType.int32, 'snmpEngineMaxMessageSize')\n", (3232, 3273), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n')] |
from aiogram.utils.markdown import hide_link
from aiogram.types import CallbackQuery
from loader import dp
from utils import (
get_object,
get_attributes_of_object
)
from keyboards import (
anime_choose_safe_category,
anime_sfw_categories,
anime_nsfw_categories,
animals_categories,
menu_with_categories,
control_buttons
)
@dp.callback_query_handler(text="menu")
async def call_menu_with_categories(call: CallbackQuery):
"""
Function for sending a menu,
with a selection of safe content
"""
await call.answer()
# Editing the message
await call.message.edit_text(
text=(
"<b>🔗 Select a category to get a picture.</b>"
),
reply_markup=menu_with_categories()
)
@dp.callback_query_handler(text="anime")
async def call_anime_categories(call: CallbackQuery):
"""
Redirect to select anime actions
"""
await call.answer()
# Editing the message
await call.message.edit_text(
text=(
"<b>⚜️ Choose what content you want to see.</b>"
),
reply_markup=anime_choose_safe_category()
)
@dp.callback_query_handler(text=["sfw", "nsfw"])
async def call_nsfw_categories(call: CallbackQuery):
"""
Redirect to anime content
"""
data = call.data.upper()
message = call.message
# Send answer
await call.answer()
if data == "SFW":
kb = anime_sfw_categories()
else:
kb = anime_nsfw_categories()
# Editing the message
await message.edit_text(
text=(
f"<b>🍿 You are in the {data} category.</b>"
),
reply_markup=kb
)
@dp.callback_query_handler(text="animals")
async def call_anime_categories(call: CallbackQuery):
"""
Redirect to animals content
"""
await call.answer()
# Editing the message
await call.message.edit_text(
text=(
"<b>🦄 You are in the category with animals.</b>"
),
reply_markup=animals_categories()
)
@dp.callback_query_handler()
async def call_get_photography(call: CallbackQuery):
"""
Function for sending photos
"""
message = call.message
data = call.data
# Get json document
api = get_attributes_of_object()
if data == "generate_new":
data = message.text.split("#")[1]
obj = api[data]["object"]
atr = api[data]["attribute"]
mark = api[data]["entity"]
if mark == "anime":
mark = api[data]["safe"]
if mark == "memes":
mark = "menu"
# We get a link to the preview photo
link = await get_object(obj, atr)
await call.answer()
# Editing the message
await message.edit_text(
text=(
f"{hide_link(link)} #{data}"
),
reply_markup=control_buttons(mark)
) | [
"keyboards.anime_sfw_categories",
"loader.dp.callback_query_handler",
"keyboards.control_buttons",
"utils.get_object",
"keyboards.anime_nsfw_categories",
"aiogram.utils.markdown.hide_link",
"keyboards.animals_categories",
"utils.get_attributes_of_object",
"keyboards.anime_choose_safe_category",
"keyboards.menu_with_categories"
]
| [((377, 415), 'loader.dp.callback_query_handler', 'dp.callback_query_handler', ([], {'text': '"""menu"""'}), "(text='menu')\n", (402, 415), False, 'from loader import dp\n'), ((798, 837), 'loader.dp.callback_query_handler', 'dp.callback_query_handler', ([], {'text': '"""anime"""'}), "(text='anime')\n", (823, 837), False, 'from loader import dp\n'), ((1190, 1237), 'loader.dp.callback_query_handler', 'dp.callback_query_handler', ([], {'text': "['sfw', 'nsfw']"}), "(text=['sfw', 'nsfw'])\n", (1215, 1237), False, 'from loader import dp\n'), ((1738, 1779), 'loader.dp.callback_query_handler', 'dp.callback_query_handler', ([], {'text': '"""animals"""'}), "(text='animals')\n", (1763, 1779), False, 'from loader import dp\n'), ((2119, 2146), 'loader.dp.callback_query_handler', 'dp.callback_query_handler', ([], {}), '()\n', (2144, 2146), False, 'from loader import dp\n'), ((2340, 2366), 'utils.get_attributes_of_object', 'get_attributes_of_object', ([], {}), '()\n', (2364, 2366), False, 'from utils import get_object, get_attributes_of_object\n'), ((1484, 1506), 'keyboards.anime_sfw_categories', 'anime_sfw_categories', ([], {}), '()\n', (1504, 1506), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((1532, 1555), 'keyboards.anime_nsfw_categories', 'anime_nsfw_categories', ([], {}), '()\n', (1553, 1555), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((2716, 2736), 'utils.get_object', 'get_object', (['obj', 'atr'], {}), '(obj, atr)\n', (2726, 2736), False, 'from utils import get_object, get_attributes_of_object\n'), ((762, 784), 'keyboards.menu_with_categories', 'menu_with_categories', ([], {}), '()\n', (782, 784), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((1148, 1176), 'keyboards.anime_choose_safe_category', 'anime_choose_safe_category', ([], {}), '()\n', (1174, 1176), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((2085, 2105), 'keyboards.animals_categories', 'animals_categories', ([], {}), '()\n', (2103, 2105), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((2913, 2934), 'keyboards.control_buttons', 'control_buttons', (['mark'], {}), '(mark)\n', (2928, 2934), False, 'from keyboards import anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons\n'), ((2853, 2868), 'aiogram.utils.markdown.hide_link', 'hide_link', (['link'], {}), '(link)\n', (2862, 2868), False, 'from aiogram.utils.markdown import hide_link\n')] |
#!c:\users\hooma\documents\github\spinesegmentation\segmentation_test\scripts\python.exe
"""
Execute a graph cut on a voxel image based on some foreground and background markers.
Copyright (C) 2013 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# build-in modules
from argparse import RawTextHelpFormatter
import argparse
import logging
import os
# third-party modules
import scipy
# path changes
# own modules
from medpy.core import ArgumentError, Logger
from medpy.io import load, save, header
from medpy import graphcut
from medpy.graphcut.wrapper import split_marker
# information
__author__ = "<NAME>"
__version__ = "r0.3.1, 2012-03-23"
__email__ = "<EMAIL>"
__status__ = "Release"
__description__ = """
Perform a binary graph cut using Boykov's max-flow/min-cut algorithm.
This implementation does only compute a boundary term and does not use
any regional term. The desired boundary term can be selected via the
--boundary argument. Depending on the selected term, an additional
image has to be supplied as badditional.
In the case of the difference of means, it is the original image.
Furthermore the algorithm requires a binary image with foreground
markers and a binary image with background markers.
Additionally a filename for the created binary mask marking foreground
and background has to be supplied.
Note that the input images must be of the same dimensionality,
otherwise an exception is thrown.
Note to take into account the input images orientation.
Note that the quality of the resulting segmentations depends also on
the quality of the supplied markers.
Copyright (C) 2013 <NAME>
This program comes with ABSOLUTELY NO WARRANTY; This is free software,
and you are welcome to redistribute it under certain conditions; see
the LICENSE file or <http://www.gnu.org/licenses/> for details.
"""
# code
def main():
# parse cmd arguments
parser = getParser()
parser.parse_args()
args = getArguments(parser)
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# check if output image exists
if not args.force:
if os.path.exists(args.output):
logger.warning('The output image {} already exists. Exiting.'.format(args.output))
exit(-1)
# select boundary term
['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow']
if 'diff_linear' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_difference_linear
logger.info('Selected boundary term: linear difference of intensities')
elif 'diff_exp' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_difference_exponential
logger.info('Selected boundary term: exponential difference of intensities')
elif 'diff_div' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_difference_division
logger.info('Selected boundary term: divided difference of intensities')
elif 'diff_pow' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_difference_power
logger.info('Selected boundary term: power based / raised difference of intensities')
elif 'max_linear' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_maximum_linear
logger.info('Selected boundary term: linear maximum of intensities')
elif 'max_exp' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_maximum_exponential
logger.info('Selected boundary term: exponential maximum of intensities')
elif 'max_div' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_maximum_division
logger.info('Selected boundary term: divided maximum of intensities')
elif 'max_pow' == args.boundary:
boundary_term = graphcut.energy_voxel.boundary_maximum_power
logger.info('Selected boundary term: power based / raised maximum of intensities')
# load input images
badditional_image_data, reference_header = load(args.badditional)
markers_image_data, _ = load(args.markers)
# split marker image into fg and bg images
fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data)
# check if all images dimensions are the same
if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape):
logger.critical('Not all of the supplied images are of the same shape.')
raise ArgumentError('Not all of the supplied images are of the same shape.')
# extract spacing if required
if args.spacing:
spacing = header.get_pixel_spacing(reference_header)
logger.info('Taking spacing of {} into account.'.format(spacing))
else:
spacing = False
# generate graph
logger.info('Preparing BK_MFMC C++ graph...')
gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data,
bgmarkers_image_data,
boundary_term = boundary_term,
boundary_term_args = (badditional_image_data, args.sigma, spacing))
# execute min-cut
logger.info('Executing min-cut...')
maxflow = gcgraph.maxflow()
logger.debug('Maxflow is {}'.format(maxflow))
# reshape results to form a valid mask
logger.info('Applying results...')
result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_)
for idx in range(len(result_image_data)):
result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1
result_image_data = result_image_data.reshape(bgmarkers_image_data.shape)
# save resulting mask
save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force)
logger.info('Successfully terminated.')
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
return parser.parse_args()
def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter)
parser.add_argument('sigma', type=float, help='The sigma required for the boundary terms.')
parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.')
parser.add_argument('markers', help='Image containing the foreground (=1) and background (=2) markers.')
parser.add_argument('output', help='The output image containing the segmentation.')
parser.add_argument('--boundary', default='diff_exp', help='The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.', choices=['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'])
parser.add_argument('-s', dest='spacing', action='store_true', help='Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.')
parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.')
parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
return parser
if __name__ == "__main__":
main() | [
"os.path.exists",
"medpy.io.load",
"argparse.ArgumentParser",
"scipy.zeros",
"medpy.core.Logger.getInstance",
"medpy.graphcut.wrapper.split_marker",
"medpy.io.header.get_pixel_spacing",
"medpy.graphcut.graph_from_voxels",
"medpy.core.ArgumentError"
]
| [((3039, 3059), 'medpy.core.Logger.getInstance', 'Logger.getInstance', ([], {}), '()\n', (3057, 3059), False, 'from medpy.core import ArgumentError, Logger\n'), ((5157, 5179), 'medpy.io.load', 'load', (['args.badditional'], {}), '(args.badditional)\n', (5161, 5179), False, 'from medpy.io import load, save, header\n'), ((5208, 5226), 'medpy.io.load', 'load', (['args.markers'], {}), '(args.markers)\n', (5212, 5226), False, 'from medpy.io import load, save, header\n'), ((5328, 5360), 'medpy.graphcut.wrapper.split_marker', 'split_marker', (['markers_image_data'], {}), '(markers_image_data)\n', (5340, 5360), False, 'from medpy.graphcut.wrapper import split_marker\n'), ((5999, 6172), 'medpy.graphcut.graph_from_voxels', 'graphcut.graph_from_voxels', (['fgmarkers_image_data', 'bgmarkers_image_data'], {'boundary_term': 'boundary_term', 'boundary_term_args': '(badditional_image_data, args.sigma, spacing)'}), '(fgmarkers_image_data, bgmarkers_image_data,\n boundary_term=boundary_term, boundary_term_args=(badditional_image_data,\n args.sigma, spacing))\n', (6025, 6172), False, 'from medpy import graphcut\n'), ((6552, 6609), 'scipy.zeros', 'scipy.zeros', (['bgmarkers_image_data.size'], {'dtype': 'scipy.bool_'}), '(bgmarkers_image_data.size, dtype=scipy.bool_)\n', (6563, 6609), False, 'import scipy\n'), ((7225, 7320), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__description__', 'formatter_class': 'RawTextHelpFormatter'}), '(description=__description__, formatter_class=\n RawTextHelpFormatter)\n', (7248, 7320), False, 'import argparse\n'), ((3241, 3268), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (3255, 3268), False, 'import os\n'), ((5617, 5687), 'medpy.core.ArgumentError', 'ArgumentError', (['"""Not all of the supplied images are of the same shape."""'], {}), "('Not all of the supplied images are of the same shape.')\n", (5630, 5687), False, 'from medpy.core import ArgumentError, Logger\n'), ((5762, 5804), 'medpy.io.header.get_pixel_spacing', 'header.get_pixel_spacing', (['reference_header'], {}), '(reference_header)\n', (5786, 5804), False, 'from medpy.io import load, save, header\n')] |
author = '<NAME>'
email = '<EMAIL>'
project = 'Astronaut Training Program'
description = 'Astronaut Training Program'
extensions = [
'sphinx.ext.todo',
'sphinx.ext.imgmath',
]
todo_emit_warnings = False
todo_include_todos = True
exclude_patterns = []
# -----------------------------------------------------------------------------
# Standard book config
# -----------------------------------------------------------------------------
import os
import re
import subprocess
import sys
from datetime import datetime
needs_sphinx = '2.2'
mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-MML-AM_CHTML'
mathjax_config = {
'extensions': ['tex2jax.js'],
'jax': ['input/TeX', 'output/HTML-CSS'],
}
html_theme = 'sphinx_rtd_theme'
exclude_patterns = exclude_patterns + [
'.*',
'venv*',
'virtualenv*',
'_extensions',
'_img',
'_slides',
'_static',
'_themes',
'_tmp',
'*/_template.rst',
'*/contrib/*',
'*/solution/*',
'*/solutions/*',
'**.ipynb_checkpoints',
'README.rst',
'TODO.rst',
]
numfig_format = {
'section': 'Sect. %s.',
'figure': 'Fig. %s.',
'table': 'Tab. %s.',
'code-block': 'Code Listing %s.',
}
language = 'en'
source_directory = '.'
master_doc = 'index'
highlight_language = 'python3'
pygments_style = 'borland'
numfig = True
templates_path = ['_templates']
source_suffix = ['.rst']
imgmath_image_format = 'svg'
today_fmt = '%Y-%m-%d'
project_slug = re.sub(r'[\W]+', '', project)
sha1 = subprocess.Popen('git log -1 --format="%h"', stdout=subprocess.PIPE, shell=True).stdout.read().decode().replace('\n', '')
now = datetime.now()
year = now.year
today = now.strftime('%Y-%m-%d')
version = f'#{sha1}, {today}'
release = f'#{sha1}, {today}'
copyright = f'{year}, {author} <{email}>'
extensions_dir = os.path.join(os.path.dirname(__file__), '', '_extensions')
sys.path.append(extensions_dir)
htmlhelp_basename = project
html_theme_path = ['_themes']
html_static_path = ['_static']
html_favicon = '_static/favicon.png'
html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
html_show_sphinx = False
html_context = {
'css_files': [
'_static/theme-overrides.css',
],
}
latex_documents = [(master_doc, f'{project_slug}.tex', project, author, 'manual')]
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
'figure_align': 'htbp',
# Fix for: LaTeX Backend Fails with Citations In Figure Captions
'preamble': r"""
\usepackage{etoolbox}
\AtBeginEnvironment{figure}{\renewcommand{\phantomsection}{}}
"""
}
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
man_pages = [
(master_doc, project_slug, project, [author], 1)
]
texinfo_documents = [
(master_doc, project_slug, project, author, project, '', 'Miscellaneous'),
]
| [
"subprocess.Popen",
"os.path.dirname",
"datetime.datetime.now",
"re.sub",
"sys.path.append"
]
| [((1499, 1528), 're.sub', 're.sub', (['"""[\\\\W]+"""', '""""""', 'project'], {}), "('[\\\\W]+', '', project)\n", (1505, 1528), False, 'import re\n'), ((1664, 1678), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1676, 1678), False, 'from datetime import datetime\n'), ((1908, 1939), 'sys.path.append', 'sys.path.append', (['extensions_dir'], {}), '(extensions_dir)\n', (1923, 1939), False, 'import sys\n'), ((1862, 1887), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1877, 1887), False, 'import os\n'), ((1536, 1621), 'subprocess.Popen', 'subprocess.Popen', (['"""git log -1 --format="%h\\""""'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(\'git log -1 --format="%h"\', stdout=subprocess.PIPE, shell=True\n )\n', (1552, 1621), False, 'import subprocess\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing import image
import os
import numpy as np
import matplotlib.pyplot as plt
# defining classes
def soil(result):
soil_type=""
if result[0]==2:
soil_type="Red soil"
elif result[0]==1:
soil_type="Black soil"
else:
soil_type="Alluvial soil"
return soil_type
# Adding dataset paths
PATH = 'new_datasets'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
train_red_dir = os.path.join(train_dir, 'Red_soil')
validation_red_dir = os.path.join(validation_dir, 'Red_soil')
train_black_dir = os.path.join(train_dir, 'Black_soil')
validation_black_dir = os.path.join(validation_dir, 'Black_soil')
train_all_dir = os.path.join(train_dir, 'Alluvial_soil')
validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil')
num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir))
num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir)))
print("Total training images = ",num_soil_tr)
print("Total validation images = ",num_soil_val)
# hyperparameters
batch_size = 100
epochs = 15
IMG_HEIGHT = 128
IMG_WIDTH = 128
classes_num=3
# data generators
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle=True,
class_mode='categorical')
# defining the model
model = Sequential([
Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(32, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(64, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.3),
Flatten(),
Dense(32, activation='relu'),
Dense(classes_num, activation='softmax')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit_generator(
train_data_gen,
steps_per_epoch= num_soil_tr// batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=num_soil_val // batch_size
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
# training and validation graphs
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
model.save('new_soil_classify.h5')
# for testing trained model with images differnent class
image_path="red.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
image_path1="black.jpg"
img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img1)
img1 = np.expand_dims(img1, axis=0)
result=model.predict_classes(img1)
plt.title(result[0])
plt.show()
image_path="all.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
| [
"matplotlib.pyplot.imshow",
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.plot",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"keras.preprocessing.image.load_img",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"numpy.expand_dims",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((834, 861), 'os.path.join', 'os.path.join', (['PATH', '"""train"""'], {}), "(PATH, 'train')\n", (846, 861), False, 'import os\n'), ((879, 911), 'os.path.join', 'os.path.join', (['PATH', '"""validation"""'], {}), "(PATH, 'validation')\n", (891, 911), False, 'import os\n'), ((923, 949), 'os.path.join', 'os.path.join', (['PATH', '"""test"""'], {}), "(PATH, 'test')\n", (935, 949), False, 'import os\n'), ((967, 1002), 'os.path.join', 'os.path.join', (['train_dir', '"""Red_soil"""'], {}), "(train_dir, 'Red_soil')\n", (979, 1002), False, 'import os\n'), ((1024, 1064), 'os.path.join', 'os.path.join', (['validation_dir', '"""Red_soil"""'], {}), "(validation_dir, 'Red_soil')\n", (1036, 1064), False, 'import os\n'), ((1084, 1121), 'os.path.join', 'os.path.join', (['train_dir', '"""Black_soil"""'], {}), "(train_dir, 'Black_soil')\n", (1096, 1121), False, 'import os\n'), ((1145, 1187), 'os.path.join', 'os.path.join', (['validation_dir', '"""Black_soil"""'], {}), "(validation_dir, 'Black_soil')\n", (1157, 1187), False, 'import os\n'), ((1205, 1245), 'os.path.join', 'os.path.join', (['train_dir', '"""Alluvial_soil"""'], {}), "(train_dir, 'Alluvial_soil')\n", (1217, 1245), False, 'import os\n'), ((1267, 1312), 'os.path.join', 'os.path.join', (['validation_dir', '"""Alluvial_soil"""'], {}), "(validation_dir, 'Alluvial_soil')\n", (1279, 1312), False, 'import os\n'), ((1793, 1830), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1811, 1830), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1858, 1895), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1876, 1895), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3744, 3770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3754, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3791), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3782, 3791), True, 'import matplotlib.pyplot as plt\n'), ((3792, 3846), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (3800, 3846), True, 'import matplotlib.pyplot as plt\n'), ((3847, 3907), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (3855, 3907), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3937), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3918, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3938, 3983), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (3947, 3983), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4005), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3996, 4005), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4057), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (4014, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4115), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (4066, 4115), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4145), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4126, 4145), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4187), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4155, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4188, 4198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4196, 4198), True, 'import matplotlib.pyplot as plt\n'), ((4323, 4386), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4337, 4386), False, 'from keras.preprocessing import image\n'), ((4387, 4402), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4397, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4436), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4423, 4436), True, 'import numpy as np\n'), ((4471, 4491), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4480, 4491), True, 'import matplotlib.pyplot as plt\n'), ((4492, 4502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4500, 4502), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4600), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path1'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4550, 4600), False, 'from keras.preprocessing import image\n'), ((4601, 4617), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img1'], {}), '(img1)\n', (4611, 4617), True, 'import matplotlib.pyplot as plt\n'), ((4625, 4653), 'numpy.expand_dims', 'np.expand_dims', (['img1'], {'axis': '(0)'}), '(img1, axis=0)\n', (4639, 4653), True, 'import numpy as np\n'), ((4689, 4709), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4698, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4710, 4720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4718, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4749, 4812), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_path'], {'target_size': '(IMG_HEIGHT, IMG_WIDTH)'}), '(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))\n', (4763, 4812), False, 'from keras.preprocessing import image\n'), ((4813, 4828), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4823, 4828), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4862), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4849, 4862), True, 'import numpy as np\n'), ((4897, 4917), 'matplotlib.pyplot.title', 'plt.title', (['result[0]'], {}), '(result[0])\n', (4906, 4917), True, 'import matplotlib.pyplot as plt\n'), ((4918, 4928), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4926, 4928), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1424), 'os.listdir', 'os.listdir', (['train_all_dir'], {}), '(train_all_dir)\n', (1409, 1424), False, 'import os\n'), ((1524, 1554), 'os.listdir', 'os.listdir', (['validation_all_dir'], {}), '(validation_all_dir)\n', (1534, 1554), False, 'import os\n'), ((2783, 2855), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(5)'], {'activation': '"""relu"""', 'input_shape': '(IMG_HEIGHT, IMG_WIDTH, 3)'}), "(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3))\n", (2789, 2855), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2861, 2891), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (2873, 2891), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2897, 2909), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2904, 2909), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2915, 2947), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(5)'], {'activation': '"""relu"""'}), "(32, 5, activation='relu')\n", (2921, 2947), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2953, 2983), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (2965, 2983), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((2989, 3001), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2996, 3001), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3007, 3039), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5)'], {'activation': '"""relu"""'}), "(64, 5, activation='relu')\n", (3013, 3039), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3045, 3075), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (3057, 3075), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3081, 3093), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (3088, 3093), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3099, 3108), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3106, 3108), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3114, 3142), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3119, 3142), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((3148, 3188), 'tensorflow.keras.layers.Dense', 'Dense', (['classes_num'], {'activation': '"""softmax"""'}), "(classes_num, activation='softmax')\n", (3153, 3188), False, 'from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n'), ((1332, 1357), 'os.listdir', 'os.listdir', (['train_red_dir'], {}), '(train_red_dir)\n', (1342, 1357), False, 'import os\n'), ((1365, 1392), 'os.listdir', 'os.listdir', (['train_black_dir'], {}), '(train_black_dir)\n', (1375, 1392), False, 'import os\n'), ((1445, 1475), 'os.listdir', 'os.listdir', (['validation_red_dir'], {}), '(validation_red_dir)\n', (1455, 1475), False, 'import os\n'), ((1483, 1515), 'os.listdir', 'os.listdir', (['validation_black_dir'], {}), '(validation_black_dir)\n', (1493, 1515), False, 'import os\n')] |
#client.py
#!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12352 # Reserve a port for your service.
s.connect((host, port))
while True:
message = input('Digite mensagem: ')
s.send(bytes(message, encoding='utf8'))
if message == 'SAIR':
breaks
print('Mensagem enviada.')
print('Esperando resposta.')
answer = s.recv(1024).decode('utf8')
print('Resposta recebida: ' + answer)
print('Desconectando.')
s.close()
| [
"socket.gethostname",
"socket.socket"
]
| [((162, 177), 'socket.socket', 'socket.socket', ([], {}), '()\n', (175, 177), False, 'import socket\n'), ((238, 258), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (256, 258), False, 'import socket\n')] |
"""Consts for Kaiterra integration."""
from datetime import timedelta
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
Platform,
)
DOMAIN = "kaiterra"
DISPATCHER_KAITERRA = "kaiterra_update"
AQI_SCALE = {
"cn": [0, 50, 100, 150, 200, 300, 400, 500],
"in": [0, 50, 100, 200, 300, 400, 500],
"us": [0, 50, 100, 150, 200, 300, 500],
}
AQI_LEVEL = {
"cn": [
"Good",
"Satisfactory",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
"in": [
"Good",
"Satisfactory",
"Moderately polluted",
"Poor",
"Very poor",
"Severe",
],
"us": [
"Good",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
}
ATTR_VOC = "volatile_organic_compounds"
ATTR_AQI_LEVEL = "air_quality_index_level"
ATTR_AQI_POLLUTANT = "air_quality_index_pollutant"
AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"]
AVAILABLE_UNITS = [
"x",
PERCENTAGE,
"C",
"F",
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONCENTRATION_PARTS_PER_BILLION,
]
AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"]
CONF_AQI_STANDARD = "aqi_standard"
CONF_PREFERRED_UNITS = "preferred_units"
DEFAULT_AQI_STANDARD = "us"
DEFAULT_PREFERRED_UNIT: list[str] = []
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
| [
"datetime.timedelta"
]
| [((1666, 1687), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (1675, 1687), False, 'from datetime import timedelta\n')] |
from datetime import datetime
from werkzeug.security import generate_password_hash
from flask import Blueprint, jsonify, request
from sqlalchemy.orm import joinedload
from flask_login import login_required
from app.models import db, User, Type
from app.forms import UpdateUserForm
from .auth_routes import authenticate, validation_errors_to_error_messages
user_routes = Blueprint('users', __name__)
@user_routes.route("/types")
def types():
types = db.session.query(Type).all()
return jsonify([type.name_to_id() for type in types])
@user_routes.route('/')
@login_required
def users():
users = db.session.query(User).all()
return jsonify([user.to_dict_full() for user in users])
@user_routes.route('/<int:id>')
@login_required
def user(id):
user = User.query.get(id)
return user.to_dict()
@user_routes.route('/<int:id>', methods=["DELETE"])
@login_required
def user_delete(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return { id: "Successfully deleted" }
@user_routes.route('/<int:id>', methods=["POST"])
@login_required
def user_update(id):
user = User.query.options(joinedload("type")).get(id)
form = UpdateUserForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
print("_______ FORM DATA",form.data)
user.username=form.data['username'],
user.email=form.data['email'],
user.hashed_password=generate_password_hash(form.password.data),
user.first_name=form.data['first_name'],
user.last_name=form.data['last_name'],
user.type_id=form.data['type_id'],
user.updated_at=datetime.now()
db.session.commit()
return user.to_dict_full()
return {'errors': validation_errors_to_error_messages(form.errors)}
@user_routes.route("/<int:id>/clients")
@login_required
def admin_fetch_clients(id):
authenticated = authenticate()
clientUsers = db.session.query(User).filter_by(type_id=2).all()
if authenticated["type_id"] != 1:
return jsonify({
"errors": [
"Unauthorized"
]
})
return jsonify([user.to_dict_full() for user in clientUsers])
| [
"app.models.db.session.delete",
"app.models.db.session.query",
"app.forms.UpdateUserForm",
"datetime.datetime.now",
"werkzeug.security.generate_password_hash",
"sqlalchemy.orm.joinedload",
"app.models.db.session.commit",
"app.models.User.query.get",
"flask.Blueprint",
"flask.jsonify"
]
| [((371, 399), 'flask.Blueprint', 'Blueprint', (['"""users"""', '__name__'], {}), "('users', __name__)\n", (380, 399), False, 'from flask import Blueprint, jsonify, request\n'), ((773, 791), 'app.models.User.query.get', 'User.query.get', (['id'], {}), '(id)\n', (787, 791), False, 'from app.models import db, User, Type\n'), ((920, 938), 'app.models.User.query.get', 'User.query.get', (['id'], {}), '(id)\n', (934, 938), False, 'from app.models import db, User, Type\n'), ((943, 966), 'app.models.db.session.delete', 'db.session.delete', (['user'], {}), '(user)\n', (960, 966), False, 'from app.models import db, User, Type\n'), ((971, 990), 'app.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (988, 990), False, 'from app.models import db, User, Type\n'), ((1191, 1207), 'app.forms.UpdateUserForm', 'UpdateUserForm', ([], {}), '()\n', (1205, 1207), False, 'from app.forms import UpdateUserForm\n'), ((1667, 1681), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1679, 1681), False, 'from datetime import datetime\n'), ((1690, 1709), 'app.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1707, 1709), False, 'from app.models import db, User, Type\n'), ((2065, 2102), 'flask.jsonify', 'jsonify', (["{'errors': ['Unauthorized']}"], {}), "({'errors': ['Unauthorized']})\n", (2072, 2102), False, 'from flask import Blueprint, jsonify, request\n'), ((456, 478), 'app.models.db.session.query', 'db.session.query', (['Type'], {}), '(Type)\n', (472, 478), False, 'from app.models import db, User, Type\n'), ((609, 631), 'app.models.db.session.query', 'db.session.query', (['User'], {}), '(User)\n', (625, 631), False, 'from app.models import db, User, Type\n'), ((1460, 1502), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['form.password.data'], {}), '(form.password.data)\n', (1482, 1502), False, 'from werkzeug.security import generate_password_hash\n'), ((1152, 1170), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""type"""'], {}), "('type')\n", (1162, 1170), False, 'from sqlalchemy.orm import joinedload\n'), ((1957, 1979), 'app.models.db.session.query', 'db.session.query', (['User'], {}), '(User)\n', (1973, 1979), False, 'from app.models import db, User, Type\n')] |
"""Machine Learning"""
import importlib
import numpy as np
import pandas as pd
import json
from jsonschema import validate
from sklearn.pipeline import make_pipeline
from timeflux.core.node import Node
from timeflux.core.exceptions import ValidationError, WorkerInterrupt
from timeflux.helpers.background import Task
from timeflux.helpers.port import make_event, match_events, get_meta
from timeflux.helpers.clock import now, min_time, max_time
# Statuses
IDLE = 0
ACCUMULATING = 1
FITTING = 2
READY = 3
class Pipeline(Node):
"""Fit, transform and predict.
Training on continuous data is always unsupervised.
Training on epoched data can either be supervised or unsupervised.
If fit is `False`, input events are ignored, and initital training is not performed.
Automatically set to False if mode is either 'fit_predict' or fit_transform'.
Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'.
Attributes:
i (Port): Continuous data input, expects DataFrame.
i_* (Port): Epoched data input, expects DataFrame.
i_training (Port): Continuous training data input, expects DataFrame.
i_training_* (Port): Epoched training data input, expects DataFrame.
i_events (Port): Event input, expects DataFrame.
o (Port): Continuous data output, provides DataFrame.
o_* (Port): Epoched data output, provides DataFrame.
o_events (Port): Event output, provides DataFrame.
Args:
steps (dict): Pipeline steps and settings
fit (bool):
mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'):
meta_label (str|tuple|None):
event_start_accumulation (str):
event_stop_accumulation (str):
event_start_training (str):
event_reset (str):
buffer_size (str):
passthrough (bool):
resample (bool):
resample_direction ('right'|'left'|'both'):
resample_rate (None|float):
model: Load a pickle model - NOT IMPLEMENTED
cv: Cross-validation - NOT IMPLEMENTED
"""
def __init__(
self,
steps,
fit=True,
mode="predict",
meta_label=("epoch", "context", "target"),
event_start_accumulation="accumulation_starts",
event_stop_accumulation="accumulation_stops",
event_start_training="training_starts",
event_reset=None,
buffer_size="5s",
passthrough=False,
resample=False,
resample_direction="right",
resample_rate=None,
model=None,
cv=None,
use_task = True,
):
# TODO: validation
# TODO: model loading from file
# TODO: cross-validation
# TODO: provide more context for errors
self.fit = fit
self.mode = mode
self.meta_label = meta_label
self.event_start_accumulation = event_start_accumulation
self.event_stop_accumulation = event_stop_accumulation
self.event_start_training = event_start_training
self.event_reset = event_reset
self.passthrough = passthrough
self.resample = resample
self.resample_direction = resample_direction
self.resample_rate = resample_rate
self.use_task = use_task
self._buffer_size = pd.Timedelta(buffer_size)
self._make_pipeline(steps)
self._reset()
def update(self):
# Let's get ready
self._clear()
# Reset
if self.event_reset:
matches = match_events(self.i_events, self.event_reset)
if matches is not None:
self.logger.debug("Reset")
if self._task is not None:
if self._status == FITTING:
self._task.stop()
self._reset()
# Are we dealing with continuous data or epochs?
if self._dimensions is None:
port_name = "i_training" if self.fit else "i"
if getattr(self, port_name).ready():
self._dimensions = 2
elif len(list(self.iterate(port_name + "_*"))) > 0:
self._dimensions = 3
# Set the accumulation boundaries
if self._accumulation_start is None:
matches = match_events(self.i_events, self.event_start_accumulation)
if matches is not None:
self._accumulation_start = matches.index.values[0]
self._status = ACCUMULATING
self.logger.debug("Start accumulation")
if self._accumulation_stop is None:
matches = match_events(self.i_events, self.event_stop_accumulation)
if matches is not None:
self._accumulation_stop = matches.index.values[0]
self.logger.debug("Stop accumulation")
# Always buffer a few seconds, in case the start event is coming late
if self._status == IDLE:
start = (now() - self._buffer_size).to_datetime64()
stop = max_time()
self._accumulate(start, stop)
# Accumulate between boundaries
if self._status == ACCUMULATING:
start = self._accumulation_start
stop = self._accumulation_stop if self._accumulation_stop else max_time()
self._accumulate(start, stop)
# Should we start fitting the model?
if self._status < FITTING:
if match_events(self.i_events, self.event_start_training) is not None:
self._status = FITTING
self.logger.debug("Start training")
if self.use_task:
self._task = Task(
self._pipeline, "fit", self._X_train, self._y_train
).start()
else:
try:
self._pipeline = self._pipeline.fit(self._X_train, self._y_train)
self._fitted_success = True
except Exception as error:
self._fitted_success = False
# Is the model ready?
if self._status == FITTING:
ready_to_proceed = False
if self.use_task:
status = self._task.status()
if status:
ready_to_proceed = True
else:
ready_to_proceed = True
if ready_to_proceed:
if self.use_task:
success = status["success"]
else:
success = self._fitted_success
if success:
if self.use_task:
self._pipeline = status["instance"]
self.logger.debug(f"Model fitted in {status['time']} seconds")
else:
self.logger.debug(f"Model fitted")
self._status = READY
# TODO: this can potentially be overwritten in _send()
self.o_events.data = make_event("ready")
else:
if self.use_task:
self.logger.error(
f"An error occured while fitting: {status['exception'].args[0]}"
)
self.logger.debug(
"\nTraceback (most recent call last):\n"
+ "".join(status["traceback"])
)
else:
self.logger.error(
f"An error occured while fitting"
)
raise WorkerInterrupt()
# Run the pipeline
if self._status == READY:
self._receive()
if self._X is not None:
args = [self._X]
if self.mode.startswith("fit"):
args.append(self._y)
# TODO: optionally loop through epochs instead of sending them all at once
self._out = getattr(self._pipeline, self.mode)(*args)
# Set output streams
self._send()
def terminate(self):
# Kill the fit subprocess
if self._task is not None:
self._task.stop()
def _reset(self):
self._X_train = None
self._y_train = None
self._X_train_indices = np.array([], dtype=np.datetime64)
self._accumulation_start = None
self._accumulation_stop = None
self._dimensions = None
self._shape = ()
self._task = None
if self.mode.startswith("fit"):
self.fit = False
elif self.mode.startswith("predict"):
self.fit = True
if self.fit:
self._status = IDLE
else:
self._status = READY
def _clear(self):
self._X = None
self._y = None
self._X_indices = []
self._X_columns = []
self._X_meta = None
self._out = None
def _make_pipeline(self, steps):
schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"module": {"type": "string"},
"class": {"type": "string"},
"args": {"type": "object"},
},
"required": ["module", "class"],
},
}
try:
validate(instance=steps, schema=schema)
except Exception as error:
raise ValidationError("steps", error.message)
pipeline = []
for step in steps:
try:
args = step["args"] if "args" in step else {}
m = importlib.import_module(step["module"])
c = getattr(m, step["class"])
i = c(**args)
pipeline.append(i)
except ImportError as error:
raise ValidationError("steps", f"could not import '{step['module']}'")
except AttributeError as error:
raise ValidationError(
"steps", f"could not find class '{step['class']}'"
)
except TypeError as error:
raise ValidationError(
"steps",
f"could not instantiate class '{step['class']}' with the given params",
)
# TODO: memory and verbose args
self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False)
def _accumulate(self, start, stop):
# Do nothing if no fitting required
if not self.fit:
return
# Set defaults
indices = np.array([], dtype=np.datetime64)
# Accumulate continuous data
if self._dimensions == 2:
if self.i_training.ready():
data = self.i_training.data
mask = (data.index >= start) & (data.index < stop)
data = data[mask]
if not data.empty:
if self._X_train is None:
self._X_train = data.values
self._shape = self._X_train.shape[1]
indices = data.index.values
else:
if data.shape[1] == self._shape:
self._X_train = np.vstack((self._X_train, data.values))
indices = data.index.values
else:
self.logger.warning("Invalid shape")
# Accumulate epoched data
if self._dimensions == 3:
for _, _, port in self.iterate("i_training_*"):
if port.ready():
index = port.data.index.values[0]
if index >= start and index < stop:
data = port.data.values
label = get_meta(port, self.meta_label)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X_train is None:
self._X_train = np.array([data])
self._shape = self._X_train.shape[1:]
else:
self._X_train = np.vstack((self._X_train, [data]))
indices = np.append(indices, index)
if label is not None:
if self._y_train is None:
self._y_train = np.array([label])
else:
self._y_train = np.append(self._y_train, [label])
# Store indices
if indices.size != 0:
self._X_train_indices = np.append(self._X_train_indices, indices)
# Trim
if self._X_train is not None:
mask = (self._X_train_indices >= start) & (self._X_train_indices < stop)
self._X_train = self._X_train[mask]
self._X_train_indices = self._X_train_indices[mask]
if self._y_train is not None:
self._y_train = self._y_train[mask]
def _receive(self):
# Continuous data
if self._dimensions == 2:
if self.i.ready():
if not self._X_columns:
self._X_columns = list(self.i.data.columns)
if self._shape and (self.i.data.shape[1] != self._shape):
self.logger.warning("Invalid shape")
else:
self._X = self.i.data.values
self._X_indices = self.i.data.index.values
self._X_meta = self.i.meta
# Epochs
if self._dimensions == 3:
for name, _, port in self.iterate("i_*"):
if port.ready() and "training" not in name and "events" not in name:
data = port.data.values
meta = port.meta
indices = port.data.index.values
label = get_meta(port, self.meta_label)
if not self._X_columns:
self._X_columns = list(port.data.columns)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if not self.fit and self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X is None:
self._X = []
if self._y is None and label is not None:
self._y = []
if self._X_meta is None:
self._X_meta = []
self._X.append(data)
self._X_indices.append(indices)
self._X_meta.append(meta)
if label is not None:
self._y.append(label)
def _send(self):
# Passthrough
if self._status < READY and self.passthrough:
inputs = []
for _, suffix, port in self.iterate("i*"):
if not suffix.startswith("_training") and not suffix.startswith(
"_events"
):
inputs.append((suffix, port))
for suffix, src_port in inputs:
dst_port = getattr(self, "o" + suffix)
dst_port.data = src_port.data
dst_port.meta = src_port.meta
# Model
if self._out is not None:
if "predict" in self.mode:
# Send events
if len(self._X_indices) == len(self._out):
# TODO: skip JSON serialization?
data = [
[self.mode, json.dumps({"result": self._np_to_native(result)})]
for result in self._out
]
times = (
self._X_indices
if self._dimensions == 2
else np.asarray(self._X_indices)[:, 0]
) # Keep the first timestamp of each epoch
names = ["label", "data"]
meta = (
self._X_meta
if self._dimensions == 2
else {"epochs": self._X_meta}
) # port.meta should always be an object
self.o_events.set(data, times, names, meta)
else:
self.logger.warning(
"Number of predictions inconsistent with input length"
)
else:
# Send data
if self._dimensions == 2:
try:
self.o.data = self._reindex(
self._out, self._X_indices, self._X_columns
)
self.o.meta = self._X_meta
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
if self._dimensions == 3:
if len(self._X_indices) == len(self._out):
for i, (data, times) in enumerate(
zip(self._out, self._X_indices)
):
try:
getattr(self, "o_" + str(i)).data = self._reindex(
data, times, self._X_columns
)
getattr(self, "o_" + str(i)).meta = self._X_meta[i]
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
else:
self.logger.warning(
"Number of transforms inconsistent with number of epochs"
)
def _np_to_native(self, data):
"""Convert numpy scalars and objects to native types."""
return getattr(data, "tolist", lambda: data)()
def _reindex(self, data, times, columns):
if len(data) != len(times):
if self.resample:
# Resample at a specific frequency
kwargs = {"periods": len(data)}
if self.resample_rate is None:
kwargs["freq"] = pd.infer_freq(times)
kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"])
else:
kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate)
if self.resample_direction == "right":
kwargs["start"] = times[0]
elif self.resample_direction == "left":
kwargs["end"] = times[-1]
else:
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
kwargs["start"] = times[middle(times)] - (
middle(data) * kwargs["freq"]
)
times = pd.date_range(**kwargs)
else:
# Linearly arange between first and last
times = pd.date_range(start=times[0], end=times[-1], periods=len(data))
return pd.DataFrame(data, times, columns)
| [
"timeflux.core.exceptions.WorkerInterrupt",
"timeflux.helpers.clock.max_time",
"pandas.infer_freq",
"numpy.array",
"timeflux.helpers.clock.now",
"timeflux.core.exceptions.ValidationError",
"pandas.date_range",
"timeflux.helpers.background.Task",
"timeflux.helpers.port.make_event",
"numpy.asarray",
"numpy.vstack",
"pandas.DataFrame",
"importlib.import_module",
"sklearn.pipeline.make_pipeline",
"jsonschema.validate",
"pandas.DateOffset",
"timeflux.helpers.port.match_events",
"timeflux.helpers.port.get_meta",
"pandas.Timedelta",
"numpy.append",
"pandas.tseries.frequencies.to_offset"
]
| [((3357, 3382), 'pandas.Timedelta', 'pd.Timedelta', (['buffer_size'], {}), '(buffer_size)\n', (3369, 3382), True, 'import pandas as pd\n'), ((8422, 8455), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.datetime64'}), '([], dtype=np.datetime64)\n', (8430, 8455), True, 'import numpy as np\n'), ((10524, 10576), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['*pipeline'], {'memory': 'None', 'verbose': '(False)'}), '(*pipeline, memory=None, verbose=False)\n', (10537, 10576), False, 'from sklearn.pipeline import make_pipeline\n'), ((10749, 10782), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.datetime64'}), '([], dtype=np.datetime64)\n', (10757, 10782), True, 'import numpy as np\n'), ((19682, 19716), 'pandas.DataFrame', 'pd.DataFrame', (['data', 'times', 'columns'], {}), '(data, times, columns)\n', (19694, 19716), True, 'import pandas as pd\n'), ((3580, 3625), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_reset'], {}), '(self.i_events, self.event_reset)\n', (3592, 3625), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((4318, 4376), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_start_accumulation'], {}), '(self.i_events, self.event_start_accumulation)\n', (4330, 4376), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((4646, 4703), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_stop_accumulation'], {}), '(self.i_events, self.event_stop_accumulation)\n', (4658, 4703), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((5056, 5066), 'timeflux.helpers.clock.max_time', 'max_time', ([], {}), '()\n', (5064, 5066), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((9510, 9549), 'jsonschema.validate', 'validate', ([], {'instance': 'steps', 'schema': 'schema'}), '(instance=steps, schema=schema)\n', (9518, 9549), False, 'from jsonschema import validate\n'), ((13057, 13098), 'numpy.append', 'np.append', (['self._X_train_indices', 'indices'], {}), '(self._X_train_indices, indices)\n', (13066, 13098), True, 'import numpy as np\n'), ((5311, 5321), 'timeflux.helpers.clock.max_time', 'max_time', ([], {}), '()\n', (5319, 5321), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((5460, 5514), 'timeflux.helpers.port.match_events', 'match_events', (['self.i_events', 'self.event_start_training'], {}), '(self.i_events, self.event_start_training)\n', (5472, 5514), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((9603, 9642), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'error.message'], {}), "('steps', error.message)\n", (9618, 9642), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((9791, 9830), 'importlib.import_module', 'importlib.import_module', (["step['module']"], {}), "(step['module'])\n", (9814, 9830), False, 'import importlib\n'), ((19478, 19501), 'pandas.date_range', 'pd.date_range', ([], {}), '(**kwargs)\n', (19491, 19501), True, 'import pandas as pd\n'), ((7065, 7084), 'timeflux.helpers.port.make_event', 'make_event', (['"""ready"""'], {}), "('ready')\n", (7075, 7084), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((7704, 7721), 'timeflux.core.exceptions.WorkerInterrupt', 'WorkerInterrupt', ([], {}), '()\n', (7719, 7721), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10005, 10069), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not import \'{step[\'module\']}\'"""'], {}), '(\'steps\', f"could not import \'{step[\'module\']}\'")\n', (10020, 10069), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10136, 10203), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not find class \'{step[\'class\']}\'"""'], {}), '(\'steps\', f"could not find class \'{step[\'class\']}\'")\n', (10151, 10203), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((10303, 10403), 'timeflux.core.exceptions.ValidationError', 'ValidationError', (['"""steps"""', 'f"""could not instantiate class \'{step[\'class\']}\' with the given params"""'], {}), '(\'steps\',\n f"could not instantiate class \'{step[\'class\']}\' with the given params")\n', (10318, 10403), False, 'from timeflux.core.exceptions import ValidationError, WorkerInterrupt\n'), ((14330, 14361), 'timeflux.helpers.port.get_meta', 'get_meta', (['port', 'self.meta_label'], {}), '(port, self.meta_label)\n', (14338, 14361), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((18780, 18800), 'pandas.infer_freq', 'pd.infer_freq', (['times'], {}), '(times)\n', (18793, 18800), True, 'import pandas as pd\n'), ((18838, 18886), 'pandas.tseries.frequencies.to_offset', 'pd.tseries.frequencies.to_offset', (["kwargs['freq']"], {}), "(kwargs['freq'])\n", (18870, 18886), True, 'import pandas as pd\n'), ((18946, 18991), 'pandas.DateOffset', 'pd.DateOffset', ([], {'seconds': '(1 / self.resample_rate)'}), '(seconds=1 / self.resample_rate)\n', (18959, 18991), True, 'import pandas as pd\n'), ((4994, 4999), 'timeflux.helpers.clock.now', 'now', ([], {}), '()\n', (4997, 4999), False, 'from timeflux.helpers.clock import now, min_time, max_time\n'), ((11956, 11987), 'timeflux.helpers.port.get_meta', 'get_meta', (['port', 'self.meta_label'], {}), '(port, self.meta_label)\n', (11964, 11987), False, 'from timeflux.helpers.port import make_event, match_events, get_meta\n'), ((12658, 12683), 'numpy.append', 'np.append', (['indices', 'index'], {}), '(indices, index)\n', (12667, 12683), True, 'import numpy as np\n'), ((5686, 5743), 'timeflux.helpers.background.Task', 'Task', (['self._pipeline', '"""fit"""', 'self._X_train', 'self._y_train'], {}), "(self._pipeline, 'fit', self._X_train, self._y_train)\n", (5690, 5743), False, 'from timeflux.helpers.background import Task\n'), ((11413, 11452), 'numpy.vstack', 'np.vstack', (['(self._X_train, data.values)'], {}), '((self._X_train, data.values))\n', (11422, 11452), True, 'import numpy as np\n'), ((12432, 12448), 'numpy.array', 'np.array', (['[data]'], {}), '([data])\n', (12440, 12448), True, 'import numpy as np\n'), ((12589, 12623), 'numpy.vstack', 'np.vstack', (['(self._X_train, [data])'], {}), '((self._X_train, [data]))\n', (12598, 12623), True, 'import numpy as np\n'), ((16421, 16448), 'numpy.asarray', 'np.asarray', (['self._X_indices'], {}), '(self._X_indices)\n', (16431, 16448), True, 'import numpy as np\n'), ((12832, 12849), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (12840, 12849), True, 'import numpy as np\n'), ((12932, 12965), 'numpy.append', 'np.append', (['self._y_train', '[label]'], {}), '(self._y_train, [label])\n', (12941, 12965), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 15:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20170122_1509'),
('cms', '0005_auto_20170122_1534'),
]
operations = [
migrations.AddField(
model_name='paginahome',
name='posts1_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent'),
),
]
| [
"django.db.models.OneToOneField"
]
| [((487, 637), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts1_imagen"""', 'to': '"""content.ImageContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent')\n", (507, 637), False, 'from django.db import migrations, models\n'), ((762, 910), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts1_texto"""', 'to': '"""content.TextContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts1_texto', to='content.TextContent')\n", (782, 910), False, 'from django.db import migrations, models\n'), ((1036, 1186), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts1_titulo"""', 'to': '"""content.TitleContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent')\n", (1056, 1186), False, 'from django.db import migrations, models\n'), ((1312, 1462), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts2_imagen"""', 'to': '"""content.ImageContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent')\n", (1332, 1462), False, 'from django.db import migrations, models\n'), ((1587, 1735), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts2_texto"""', 'to': '"""content.TextContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts2_texto', to='content.TextContent')\n", (1607, 1735), False, 'from django.db import migrations, models\n'), ((1861, 2011), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts2_titulo"""', 'to': '"""content.TitleContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent')\n", (1881, 2011), False, 'from django.db import migrations, models\n'), ((2137, 2287), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts3_imagen"""', 'to': '"""content.ImageContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent')\n", (2157, 2287), False, 'from django.db import migrations, models\n'), ((2412, 2560), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts3_texto"""', 'to': '"""content.TextContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts3_texto', to='content.TextContent')\n", (2432, 2560), False, 'from django.db import migrations, models\n'), ((2686, 2836), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts3_titulo"""', 'to': '"""content.TitleContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent')\n", (2706, 2836), False, 'from django.db import migrations, models\n'), ((2962, 3112), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts4_imagen"""', 'to': '"""content.ImageContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent')\n", (2982, 3112), False, 'from django.db import migrations, models\n'), ((3237, 3385), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts4_texto"""', 'to': '"""content.TextContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts4_texto', to='content.TextContent')\n", (3257, 3385), False, 'from django.db import migrations, models\n'), ((3511, 3661), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""posts4_titulo"""', 'to': '"""content.TitleContent"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent')\n", (3531, 3661), False, 'from django.db import migrations, models\n')] |
from pug.nlp.db import representation
from django.db import models
class RepresentationMixin(models.Model):
"""Produce a meaningful string representation of a model with `str(model.objects.all[0])`."""
__unicode__ = representation
class Meta:
abstract = True
class DateMixin(models.Model):
"""Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events)."""
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
| [
"django.db.models.DateTimeField"
]
| [((479, 514), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (499, 514), False, 'from django.db import models\n'), ((529, 568), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (549, 568), False, 'from django.db import models\n')] |
from django.shortcuts import render
from django.views.generic.edit import FormView
from django.views.generic.edit import View
from . import forms
# Опять же, спасибо django за готовую форму аутентификации.
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from django.contrib.auth import login
class index(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/homepage.html"
# В случае успеха перенаправим на главную.
success_url = "/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/homepage.html',
{'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(index, self).form_valid(form)
class contact(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/contact.html"
# В случае успеха перенаправим на главную.
success_url = "../contact/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/contact.html',
{'values': ['Звоните по телефону', '<EMAIL>', '8(977)335-77-77'], 'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(contact, self).form_valid(form)
class registration(FormView):
form_class = forms.UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
success_url = "/login/"
# Шаблон, который будет использоваться при отображении представления.
template_name = "MainApp/registration_form.html"
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(registration, self).form_valid(form)
class LogoutView(View):
def get(self, request):
# Выполняем выход для пользователя, запросившего данное представление.
logout(request)
# После чего, перенаправляем пользователя на главную страницу.
#return HttpResponseRedirect("/seeuagain")
return render(request, 'MainApp/quitpage.html')
class LoginFormView(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/login_form.html"
# В случае успеха перенаправим на главную.
success_url = "/news"
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(LoginFormView, self).form_valid(form)
| [
"django.shortcuts.render",
"django.contrib.auth.forms.AuthenticationForm",
"django.contrib.auth.login",
"django.contrib.auth.logout"
]
| [((698, 730), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', (['request.POST'], {}), '(request.POST)\n', (716, 730), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((749, 828), 'django.shortcuts.render', 'render', (['request', '"""MainApp/homepage.html"""', "{'form': form1, 'user': request.user}"], {}), "(request, 'MainApp/homepage.html', {'form': form1, 'user': request.user})\n", (755, 828), False, 'from django.shortcuts import render\n'), ((1061, 1091), 'django.contrib.auth.login', 'login', (['self.request', 'self.user'], {}), '(self.request, self.user)\n', (1066, 1091), False, 'from django.contrib.auth import login\n'), ((1460, 1492), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', (['request.POST'], {}), '(request.POST)\n', (1478, 1492), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((1511, 1658), 'django.shortcuts.render', 'render', (['request', '"""MainApp/contact.html"""', "{'values': ['Звоните по телефону', '<EMAIL>', '8(977)335-77-77'], 'form':\n form1, 'user': request.user}"], {}), "(request, 'MainApp/contact.html', {'values': ['Звоните по телефону',\n '<EMAIL>', '8(977)335-77-77'], 'form': form1, 'user': request.user})\n", (1517, 1658), False, 'from django.shortcuts import render\n'), ((1887, 1917), 'django.contrib.auth.login', 'login', (['self.request', 'self.user'], {}), '(self.request, self.user)\n', (1892, 1917), False, 'from django.contrib.auth import login\n'), ((2780, 2795), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (2786, 2795), False, 'from django.contrib.auth import logout\n'), ((2940, 2980), 'django.shortcuts.render', 'render', (['request', '"""MainApp/quitpage.html"""'], {}), "(request, 'MainApp/quitpage.html')\n", (2946, 2980), False, 'from django.shortcuts import render\n'), ((3461, 3491), 'django.contrib.auth.login', 'login', (['self.request', 'self.user'], {}), '(self.request, self.user)\n', (3466, 3491), False, 'from django.contrib.auth import login\n')] |
"""
Django settings for imagetagger project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'imagetagger.annotations',
'imagetagger.base',
'imagetagger.images',
'imagetagger.users',
'imagetagger.tools',
'imagetagger.administration',
'django.contrib.admin',
'imagetagger.tagger_messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'widget_tweaks',
'friendlytagloader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'imagetagger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'imagetagger.base.context_processors.base_data',
],
},
},
]
WSGI_APPLICATION = 'imagetagger.wsgi.application'
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.User'
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin' # Timezone of your server
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROBLEMS_URL = 'https://github.com/bit-bots/imagetagger/issues'
PROBLEMS_TEXT = ''
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/images/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
MESSAGE_TAGS = {
messages.INFO: 'info',
messages.ERROR: 'danger',
messages.WARNING: 'warning',
messages.SUCCESS: 'success',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
EXPORT_SEPARATOR = '|'
DATA_PATH = os.path.join(BASE_DIR, 'data')
IMAGE_PATH = os.path.join(BASE_DIR, 'images') # the absolute path to the folder with the imagesets
# filename extension of accepted imagefiles
IMAGE_EXTENSION = {
'png',
'jpeg',
}
# Sets the default expire time for new messages in days
DEFAULT_EXPIRE_TIME = 7
# Sets the default number of messages per page
MESSAGES_PER_PAGE = 10
| [
"os.path.abspath",
"os.path.join"
]
| [((3683, 3713), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data"""'], {}), "(BASE_DIR, 'data')\n", (3695, 3713), False, 'import os\n'), ((3728, 3760), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""images"""'], {}), "(BASE_DIR, 'images')\n", (3740, 3760), False, 'import os\n'), ((499, 524), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (514, 524), False, 'import os\n')] |
#encoding:utf-8
from utils import weighted_random_subreddit
t_channel = '@news756'
subreddit = weighted_random_subreddit({
'politics': 0.5,
'news': 0.5
})
def send_post(submission, r2t):
return r2t.send_simple(submission,
text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}',
gif='{title}\n\n/r/{subreddit_name}\n{short_link}',
img='{title}\n\n/r/{subreddit_name}\n{short_link}',
album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}',
other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}'
)
| [
"utils.weighted_random_subreddit"
]
| [((98, 155), 'utils.weighted_random_subreddit', 'weighted_random_subreddit', (["{'politics': 0.5, 'news': 0.5}"], {}), "({'politics': 0.5, 'news': 0.5})\n", (123, 155), False, 'from utils import weighted_random_subreddit\n')] |
'''
This module contains the classes which represent XCB data types.
'''
from xcbgen.expr import Field, Expression
import __main__
class Type(object):
'''
Abstract base class for all XCB data types.
Contains default fields, and some abstract methods.
'''
def __init__(self, name):
'''
Default structure initializer. Sets up default fields.
Public fields:
name is a tuple of strings specifying the full type name.
size is the size of the datatype in bytes, or None if variable-sized.
nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts.
booleans for identifying subclasses, because I can't figure out isinstance().
'''
self.name = name
self.size = None
self.nmemb = None
self.resolved = False
# Screw isinstance().
self.is_simple = False
self.is_list = False
self.is_expr = False
self.is_container = False
self.is_reply = False
self.is_union = False
self.is_pad = False
self.is_switch = False
self.is_bitcase = False
def resolve(self, module):
'''
Abstract method for resolving a type.
This should make sure any referenced types are already declared.
'''
raise Exception('abstract resolve method not overridden!')
def out(self, name):
'''
Abstract method for outputting code.
These are declared in the language-specific modules, and
there must be a dictionary containing them declared when this module is imported!
'''
raise Exception('abstract out method not overridden!')
def fixed_size(self):
'''
Abstract method for determining if the data type is fixed-size.
'''
raise Exception('abstract fixed_size method not overridden!')
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
'''
Default method for making a data type a member of a structure.
Extend this if the data type needs to add an additional length field or something.
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(complex_type.fields):
if field == _placeholder_byte:
complex_type.fields[idx] = new_field
return
complex_type.fields.append(new_field)
class SimpleType(Type):
'''
Derived class which represents a cardinal type like CARD32 or char.
Any type which is typedef'ed to cardinal will be one of these.
Public fields added:
none
'''
def __init__(self, name, size):
Type.__init__(self, name)
self.is_simple = True
self.size = size
self.nmemb = 1
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['simple']
# Cardinal datatype globals. See module __init__ method.
tcard8 = SimpleType(('uint8_t',), 1)
tcard16 = SimpleType(('uint16_t',), 2)
tcard32 = SimpleType(('uint32_t',), 4)
tint8 = SimpleType(('int8_t',), 1)
tint16 = SimpleType(('int16_t',), 2)
tint32 = SimpleType(('int32_t',), 4)
tchar = SimpleType(('char',), 1)
tfloat = SimpleType(('float',), 4)
tdouble = SimpleType(('double',), 8)
class Enum(SimpleType):
'''
Derived class which represents an enum. Fixed-size.
Public fields added:
values contains a list of (name, value) tuples. value is empty, or a number.
bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number.
'''
def __init__(self, name, elt):
SimpleType.__init__(self, name, 4)
self.values = []
self.bits = []
for item in list(elt):
# First check if we're using a default value
if len(list(item)) == 0:
self.values.append((item.get('name'), ''))
continue
# An explicit value or bit was specified.
value = list(item)[0]
if value.tag == 'value':
self.values.append((item.get('name'), value.text))
elif value.tag == 'bit':
self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0))))
self.bits.append((item.get('name'), value.text))
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['enum']
class ListType(Type):
'''
Derived class which represents a list of some other datatype. Fixed- or variable-sized.
Public fields added:
member is the datatype of the list elements.
parent is the structure type containing the list.
expr is an Expression object containing the length information, for variable-sized lists.
'''
def __init__(self, elt, member, *parent):
Type.__init__(self, member.name)
self.is_list = True
self.member = member
self.parents = list(parent)
if elt.tag == 'list':
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
elif elt.tag == 'valueparam':
self.expr = Expression(elt, self)
self.size = member.size if member.fixed_size() else None
self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.expr.resolve(module, self.parents)
# Find my length field again. We need the actual Field object in the expr.
# This is needed because we might have added it ourself above.
if not self.fixed_size():
for parent in self.parents:
for field in parent.fields:
if field.field_name == self.expr.lenfield_name and field.wire:
self.expr.lenfield = field
break
self.resolved = True
def fixed_size(self):
return self.member.fixed_size() and self.expr.fixed_size()
class ExprType(Type):
'''
Derived class which represents an exprfield. Fixed size.
Public fields added:
expr is an Expression object containing the value of the field.
'''
def __init__(self, elt, member, *parents):
Type.__init__(self, member.name)
self.is_expr = True
self.member = member
self.parents = parents
self.expr = Expression(list(elt)[0], self)
self.size = member.size
self.nmemb = 1
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.resolved = True
def fixed_size(self):
return True
class PadType(Type):
'''
Derived class which represents a padding field.
'''
def __init__(self, elt):
Type.__init__(self, tcard8.name)
self.is_pad = True
self.size = 1
self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0)
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
class ComplexType(Type):
'''
Derived class which represents a structure. Base type for all structure types.
Public fields added:
fields is an array of Field objects describing the structure fields.
'''
def __init__(self, name, elt):
Type.__init__(self, name)
self.is_container = True
self.elt = elt
self.fields = []
self.nmemb = 1
self.size = 0
self.lenfield_parent = [self]
def resolve(self, module):
if self.resolved:
return
pads = 0
# Resolve all of our field datatypes.
for child in list(self.elt):
if child.tag == 'pad':
field_name = 'pad' + str(pads)
fkey = 'CARD8'
type = PadType(child)
pads = pads + 1
visible = False
elif child.tag == 'field':
field_name = child.get('name')
fkey = child.get('type')
type = module.get_type(fkey)
visible = True
elif child.tag == 'exprfield':
field_name = child.get('name')
fkey = child.get('type')
type = ExprType(child, module.get_type(fkey), *self.lenfield_parent)
visible = False
elif child.tag == 'list':
field_name = child.get('name')
fkey = child.get('type')
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'valueparam':
field_name = child.get('value-list-name')
fkey = 'CARD32'
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'switch':
field_name = child.get('name')
# construct the switch type name from the parent type and the field name
field_type = self.name + (field_name,)
type = SwitchType(field_type, child, *self.lenfield_parent)
visible = True
type.make_member_of(module, self, field_type, field_name, visible, True, False)
type.resolve(module)
continue
else:
# Hit this on Reply
continue
# Get the full type name for the field
field_type = module.get_type_name(fkey)
# Add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# Recursively resolve the type (could be another structure, list)
type.resolve(module)
self.calc_size() # Figure out how big we are
self.resolved = True
def calc_size(self):
self.size = 0
for m in self.fields:
if not m.wire:
continue
if m.type.fixed_size():
self.size = self.size + (m.type.size * m.type.nmemb)
else:
self.size = None
break
def fixed_size(self):
for m in self.fields:
if not m.type.fixed_size():
return False
return True
class SwitchType(ComplexType):
'''
Derived class which represents a List of Items.
Public fields added:
bitcases is an array of Bitcase objects describing the list items
'''
def __init__(self, name, elt, *parents):
ComplexType.__init__(self, name, elt)
self.parents = parents
# FIXME: switch cannot store lenfields, so it should just delegate the parents
self.lenfield_parent = list(parents) + [self]
# self.fields contains all possible fields collected from the Bitcase objects,
# whereas self.items contains the Bitcase objects themselves
self.bitcases = []
self.is_switch = True
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
def resolve(self, module):
if self.resolved:
return
# pads = 0
parents = list(self.parents) + [self]
# Resolve all of our field datatypes.
for index, child in enumerate(list(self.elt)):
if child.tag == 'bitcase':
field_name = child.get('name')
if field_name is None:
field_type = self.name + ('bitcase%d' % index,)
else:
field_type = self.name + (field_name,)
# use self.parent to indicate anchestor,
# as switch does not contain named fields itself
type = BitcaseType(index, field_type, child, *parents)
# construct the switch type name from the parent type and the field name
if field_name is None:
type.has_name = False
# Get the full type name for the field
field_type = type.name
visible = True
# add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# recursively resolve the type (could be another structure, list)
type.resolve(module)
inserted = False
for new_field in type.fields:
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(self.fields):
if field == _placeholder_byte:
self.fields[idx] = new_field
inserted = True
break
if False == inserted:
self.fields.append(new_field)
self.calc_size() # Figure out how big we are
self.resolved = True
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
# size for switch can only be calculated at runtime
def calc_size(self):
pass
# note: switch is _always_ of variable size, but we indicate here wether
# it contains elements that are variable-sized themselves
def fixed_size(self):
return False
# for m in self.fields:
# if not m.type.fixed_size():
# return False
# return True
class Struct(ComplexType):
'''
Derived class representing a struct data type.
'''
out = __main__.output['struct']
class Union(ComplexType):
'''
Derived class representing a union data type.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_union = True
out = __main__.output['union']
class BitcaseType(ComplexType):
'''
Derived class representing a struct data type.
'''
def __init__(self, index, name, elt, *parent):
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
ComplexType.__init__(self, name, elts[1:])
self.has_name = True
self.index = 1
self.lenfield_parent = list(parent) + [self]
self.parents = list(parent)
self.is_bitcase = True
def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto):
'''
register BitcaseType with the corresponding SwitchType
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any bitcases are added.
for (idx, field) in enumerate(switch_type.bitcases):
if field == _placeholder_byte:
switch_type.bitcases[idx] = new_field
return
switch_type.bitcases.append(new_field)
def resolve(self, module):
if self.resolved:
return
self.expr.resolve(module, self.parents+[self])
# Resolve the bitcase expression
ComplexType.resolve(self, module)
class Reply(ComplexType):
'''
Derived class representing a reply. Only found as a field of Request.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_reply = True
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True))
ComplexType.resolve(self, module)
class Request(ComplexType):
'''
Derived class representing a request.
Public fields added:
reply contains the reply datatype or None for void requests.
opcode contains the request number.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.reply = None
self.opcode = elt.get('opcode')
for child in list(elt):
if child.tag == 'reply':
self.reply = Reply(name, child)
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
if module.namespace.is_ext:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
else:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
if self.reply:
self.reply.resolve(module)
out = __main__.output['request']
class Event(ComplexType):
'''
Derived class representing an event data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for eventcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
tmp = elt.get('no-sequence-number')
self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0')
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
if self.has_seq:
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['event']
class Error(ComplexType):
'''
Derived class representing an error data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for errorcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['error']
_placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
| [
"xcbgen.expr.Expression",
"xcbgen.expr.Field"
]
| [((2353, 2409), 'xcbgen.expr.Field', 'Field', (['self', 'field_type', 'field_name', 'visible', 'wire', 'auto'], {}), '(self, field_type, field_name, visible, wire, auto)\n', (2358, 2409), False, 'from xcbgen.expr import Field, Expression\n'), ((17293, 17349), 'xcbgen.expr.Field', 'Field', (['self', 'field_type', 'field_name', 'visible', 'wire', 'auto'], {}), '(self, field_type, field_name, visible, wire, auto)\n', (17298, 17349), False, 'from xcbgen.expr import Field, Expression\n'), ((18249, 18311), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""response_type"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'response_type', False, True, True)\n", (18254, 18311), False, 'from xcbgen.expr import Field, Expression\n'), ((18386, 18445), 'xcbgen.expr.Field', 'Field', (['tcard16', 'tcard16.name', '"""sequence"""', '(False)', '(True)', '(True)'], {}), "(tcard16, tcard16.name, 'sequence', False, True, True)\n", (18391, 18445), False, 'from xcbgen.expr import Field, Expression\n'), ((18474, 18531), 'xcbgen.expr.Field', 'Field', (['tcard32', 'tcard32.name', '"""length"""', '(False)', '(True)', '(True)'], {}), "(tcard32, tcard32.name, 'length', False, True, True)\n", (18479, 18531), False, 'from xcbgen.expr import Field, Expression\n'), ((20654, 20716), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""response_type"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'response_type', False, True, True)\n", (20659, 20716), False, 'from xcbgen.expr import Field, Expression\n'), ((21540, 21602), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""response_type"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'response_type', False, True, True)\n", (21545, 21602), False, 'from xcbgen.expr import Field, Expression\n'), ((21631, 21690), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""error_code"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'error_code', False, True, True)\n", (21636, 21690), False, 'from xcbgen.expr import Field, Expression\n'), ((21719, 21778), 'xcbgen.expr.Field', 'Field', (['tcard16', 'tcard16.name', '"""sequence"""', '(False)', '(True)', '(True)'], {}), "(tcard16, tcard16.name, 'sequence', False, True, True)\n", (21724, 21778), False, 'from xcbgen.expr import Field, Expression\n'), ((5514, 5535), 'xcbgen.expr.Expression', 'Expression', (['elt', 'self'], {}), '(elt, self)\n', (5524, 5535), False, 'from xcbgen.expr import Field, Expression\n'), ((19255, 19316), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""major_opcode"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'major_opcode', False, True, True)\n", (19260, 19316), False, 'from xcbgen.expr import Field, Expression\n'), ((19349, 19410), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""minor_opcode"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'minor_opcode', False, True, True)\n", (19354, 19410), False, 'from xcbgen.expr import Field, Expression\n'), ((19443, 19500), 'xcbgen.expr.Field', 'Field', (['tcard16', 'tcard16.name', '"""length"""', '(False)', '(True)', '(True)'], {}), "(tcard16, tcard16.name, 'length', False, True, True)\n", (19448, 19500), False, 'from xcbgen.expr import Field, Expression\n'), ((19593, 19654), 'xcbgen.expr.Field', 'Field', (['tcard8', 'tcard8.name', '"""major_opcode"""', '(False)', '(True)', '(True)'], {}), "(tcard8, tcard8.name, 'major_opcode', False, True, True)\n", (19598, 19654), False, 'from xcbgen.expr import Field, Expression\n'), ((19737, 19794), 'xcbgen.expr.Field', 'Field', (['tcard16', 'tcard16.name', '"""length"""', '(False)', '(True)', '(True)'], {}), "(tcard16, tcard16.name, 'length', False, True, True)\n", (19742, 19794), False, 'from xcbgen.expr import Field, Expression\n'), ((20824, 20883), 'xcbgen.expr.Field', 'Field', (['tcard16', 'tcard16.name', '"""sequence"""', '(False)', '(True)', '(True)'], {}), "(tcard16, tcard16.name, 'sequence', False, True, True)\n", (20829, 20883), False, 'from xcbgen.expr import Field, Expression\n')] |
# Copyright 2020 by <NAME>, Solis-Lemus Lab, WID.
# All rights reserved.
# This file is part of the BioKlustering Website.
import pandas as pd
from Bio import SeqIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import numpy as np
import os
from .helpers import plotly_dash_show_plot
def parseFasta(data):
d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")}
pd.DataFrame([d])
s = pd.Series(d, name='Sequence')
s.index.name = 'ID'
s.reset_index()
return pd.DataFrame(s)
def kmerXTable(s, a, b):
tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b))
s_hat = tfid_vector.fit_transform(s.Sequence)
kmerNames = tfid_vector.get_feature_names()
kmers = s_hat.toarray()
return pd.DataFrame(kmers,columns=kmerNames, index = s.index)
# credit to chunrong
def read_fasta_sequences(sequence_paths):
all_sequences = pd.DataFrame()
for path in sequence_paths:
path = os.path.join("media", path)
sequence = parseFasta(path)
all_sequences = pd.concat([all_sequences, sequence])
return all_sequences
def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
km = KMeans(random_state = rNum, n_clusters = cNum)
km.fit(kmerXTableInput)
y_hat = km.predict(kmerXTableInput)
plotly_kmertable = kmerXTableInput
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerXTableInput)
plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method)
inputData.insert(0, "Labels", y_hat)
return [[inputData], [plot_div]]
def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
PCAembedding = PCA(n_components=10)
NkmerXTableInput = preprocessing.normalize(kmerXTableInput)
PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput)
ms = MeanShift()
ms.fit(PCAembedding_low)
cluster_centers = ms.cluster_centers_
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers))
kmms_labels = kmms.fit_predict(PCAembedding_low)
# convert all clusters into two clusters
kmerXTableInput["pLabels"] = kmms_labels
kmerXTableInput["aLabels"] = y_hat.tolist()
newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist()
newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist()
newLabels = []
for label in kmms_labels:
if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label):
newLabels.append(1)
else:
newLabels.append(0)
kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"])
plotly_kmertable = kmerTable
plotly_labels = np.array(newLabels)
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerTable)
plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method)
inputData.insert(0, "Labels", newLabels)
return [[inputData], [plotly_div]]
| [
"pandas.Series",
"sklearn.cluster.KMeans",
"sklearn.decomposition.PCA",
"os.path.join",
"warnings.catch_warnings",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"warnings.simplefilter",
"Bio.SeqIO.parse",
"pandas.DataFrame",
"sklearn.cluster.MeanShift",
"sklearn.preprocessing.normalize",
"pandas.concat"
]
| [((546, 563), 'pandas.DataFrame', 'pd.DataFrame', (['[d]'], {}), '([d])\n', (558, 563), True, 'import pandas as pd\n'), ((573, 602), 'pandas.Series', 'pd.Series', (['d'], {'name': '"""Sequence"""'}), "(d, name='Sequence')\n", (582, 602), True, 'import pandas as pd\n'), ((658, 673), 'pandas.DataFrame', 'pd.DataFrame', (['s'], {}), '(s)\n', (670, 673), True, 'import pandas as pd\n'), ((718, 770), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""char"""', 'ngram_range': '(a, b)'}), "(analyzer='char', ngram_range=(a, b))\n", (733, 770), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((907, 960), 'pandas.DataFrame', 'pd.DataFrame', (['kmers'], {'columns': 'kmerNames', 'index': 's.index'}), '(kmers, columns=kmerNames, index=s.index)\n', (919, 960), True, 'import pandas as pd\n'), ((1046, 1060), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1058, 1060), True, 'import pandas as pd\n'), ((1546, 1588), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': 'rNum', 'n_clusters': 'cNum'}), '(random_state=rNum, n_clusters=cNum)\n', (1552, 1588), False, 'from sklearn.cluster import KMeans\n'), ((2300, 2320), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (2303, 2320), False, 'from sklearn.decomposition import PCA\n'), ((2344, 2384), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerXTableInput'], {}), '(kmerXTableInput)\n', (2367, 2384), False, 'from sklearn import preprocessing\n'), ((2467, 2478), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {}), '()\n', (2476, 2478), False, 'from sklearn.cluster import MeanShift\n'), ((3450, 3469), 'numpy.array', 'np.array', (['newLabels'], {}), '(newLabels)\n', (3458, 3469), True, 'import numpy as np\n'), ((1108, 1135), 'os.path.join', 'os.path.join', (['"""media"""', 'path'], {}), "('media', path)\n", (1120, 1135), False, 'import os\n'), ((1196, 1232), 'pandas.concat', 'pd.concat', (['[all_sequences, sequence]'], {}), '([all_sequences, sequence])\n', (1205, 1232), True, 'import pandas as pd\n'), ((1753, 1793), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerXTableInput'], {}), '(kmerXTableInput)\n', (1776, 1793), False, 'from sklearn import preprocessing\n'), ((2580, 2605), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2603, 2605), False, 'import warnings\n'), ((2615, 2646), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2636, 2646), False, 'import warnings\n'), ((3521, 3555), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['kmerTable'], {}), '(kmerTable)\n', (3544, 3555), False, 'from sklearn import preprocessing\n'), ((514, 540), 'Bio.SeqIO.parse', 'SeqIO.parse', (['data', '"""fasta"""'], {}), "(data, 'fasta')\n", (525, 540), False, 'from Bio import SeqIO\n')] |
# -*- coding: utf-8 -*-
from src import icons, __version__
from src.actions import HOST_URL
from src.actions.configure import ConfigureWorkflowAction
from src.actions.help import HelpWorkflowAction
from src.actions.index import IndexWorkflowAction
from src.actions.projects import ProjectWorkflowAction
from src.actions.pull_requests import PullRequestWorkflowAction
from src.actions.repositories import RepositoryWorkflowAction
from src.util import workflow, call_alfred
WORKFLOW_ACTIONS = {
':config': ConfigureWorkflowAction,
':projects': ProjectWorkflowAction,
':repos': RepositoryWorkflowAction,
':pullrequests': PullRequestWorkflowAction,
':help': HelpWorkflowAction
}
def route(args): # e.g., args = ":config sethost http://localhost,--exec"
command_string = args[0] # :config sethost http://localhost
command = command_string.split(' ')
if not workflow().settings.get(HOST_URL, None) and 'sethost' not in command:
call_alfred('stash:config sethost ')
return
handler = IndexWorkflowAction
action = next(iter(command), None)
if action:
handler = WORKFLOW_ACTIONS.get(action, IndexWorkflowAction)
if '--exec' in args:
handler().execute(command, cmd_pressed='--cmd' in args, shift_pressed='--shift' in args)
else: # show menu
handler().menu(command)
_notify_if_upgrade_available()
workflow().send_feedback()
def _notify_if_upgrade_available():
if workflow().update_available:
new_version = workflow().cached_data('__workflow_update_status', max_age=0)['version']
workflow().add_item('An update is available!',
'Update the workflow from version {} to {}'.format(__version__, new_version),
arg=':config update',
valid=True,
icon=icons.UPDATE)
| [
"src.util.workflow",
"src.util.call_alfred"
]
| [((992, 1028), 'src.util.call_alfred', 'call_alfred', (['"""stash:config sethost """'], {}), "('stash:config sethost ')\n", (1003, 1028), False, 'from src.util import workflow, call_alfred\n'), ((1498, 1508), 'src.util.workflow', 'workflow', ([], {}), '()\n', (1506, 1508), False, 'from src.util import workflow, call_alfred\n'), ((1426, 1436), 'src.util.workflow', 'workflow', ([], {}), '()\n', (1434, 1436), False, 'from src.util import workflow, call_alfred\n'), ((1630, 1640), 'src.util.workflow', 'workflow', ([], {}), '()\n', (1638, 1640), False, 'from src.util import workflow, call_alfred\n'), ((1549, 1559), 'src.util.workflow', 'workflow', ([], {}), '()\n', (1557, 1559), False, 'from src.util import workflow, call_alfred\n'), ((914, 924), 'src.util.workflow', 'workflow', ([], {}), '()\n', (922, 924), False, 'from src.util import workflow, call_alfred\n')] |
from itertools import count
from _pandigital_tools import is_pandigital
def pand_products():
"""
Returns the sum of all numbers n which have a factorization a * b = n such
that a, b, n are (cumulatively) 1 through 9 pandigital.
"""
total = set()
for a in range(2, 100):
for b in count(a):
if len(str(a) + str(b) + str(a * b)) > 9:
break
elif is_pandigital(a, b, a * b):
total.add(a * b)
return sum(total)
def solve(vol=0):
return pand_products()
| [
"itertools.count",
"_pandigital_tools.is_pandigital"
]
| [((314, 322), 'itertools.count', 'count', (['a'], {}), '(a)\n', (319, 322), False, 'from itertools import count\n'), ((417, 443), '_pandigital_tools.is_pandigital', 'is_pandigital', (['a', 'b', '(a * b)'], {}), '(a, b, a * b)\n', (430, 443), False, 'from _pandigital_tools import is_pandigital\n')] |
from django.db import models
from openstates.data.models import Bill
class LegacyBillMapping(models.Model):
legacy_id = models.CharField(max_length=20, primary_key=True)
bill = models.ForeignKey(
Bill, related_name="legacy_mapping", on_delete=models.CASCADE
)
| [
"django.db.models.CharField",
"django.db.models.ForeignKey"
]
| [((126, 175), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'primary_key': '(True)'}), '(max_length=20, primary_key=True)\n', (142, 175), False, 'from django.db import models\n'), ((187, 272), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Bill'], {'related_name': '"""legacy_mapping"""', 'on_delete': 'models.CASCADE'}), "(Bill, related_name='legacy_mapping', on_delete=models.CASCADE\n )\n", (204, 272), False, 'from django.db import models\n')] |
from __future__ import print_function
from troposphere import (
Template, Parameter, Ref, Condition, Equals, And, Or, Not, If
)
from troposphere import ec2
parameters = {
"One": Parameter(
"One",
Type="String",
),
"Two": Parameter(
"Two",
Type="String",
),
"Three": Parameter(
"Three",
Type="String",
),
"Four": Parameter(
"Four",
Type="String",
),
"SshKeyName": Parameter(
"SshKeyName",
Type="String",
)
}
conditions = {
"OneEqualsFoo": Equals(
Ref("One"),
"Foo"
),
"NotOneEqualsFoo": Not(
Condition("OneEqualsFoo")
),
"BarEqualsTwo": Equals(
"Bar",
Ref("Two")
),
"ThreeEqualsFour": Equals(
Ref("Three"),
Ref("Four")
),
"OneEqualsFooOrBarEqualsTwo": Or(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo")
),
"OneEqualsFooAndNotBarEqualsTwo": And(
Condition("OneEqualsFoo"),
Not(Condition("BarEqualsTwo"))
),
"OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo"),
Equals(Ref("Three"), "Pft")
),
"OneIsQuzAndThreeEqualsFour": And(
Equals(Ref("One"), "Quz"),
Condition("ThreeEqualsFour")
),
"LaunchInstance": And(
Condition("OneEqualsFoo"),
Condition("NotOneEqualsFoo"),
Condition("BarEqualsTwo"),
Condition("OneEqualsFooAndNotBarEqualsTwo"),
Condition("OneIsQuzAndThreeEqualsFour")
),
"LaunchWithGusto": And(
Condition("LaunchInstance"),
Equals(Ref("One"), "Gusto")
)
}
resources = {
"Ec2Instance": ec2.Instance(
"Ec2Instance",
Condition="LaunchInstance",
ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"),
InstanceType="t1.micro",
KeyName=Ref("SshKeyName"),
SecurityGroups=["default"],
)
}
t = Template()
for p in parameters.values():
t.add_parameter(p)
for k in conditions:
t.add_condition(k, conditions[k])
for r in resources.values():
t.add_resource(r)
print(t.to_json())
| [
"troposphere.Parameter",
"troposphere.Ref",
"troposphere.If",
"troposphere.Condition",
"troposphere.Template"
]
| [((2017, 2027), 'troposphere.Template', 'Template', ([], {}), '()\n', (2025, 2027), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((189, 220), 'troposphere.Parameter', 'Parameter', (['"""One"""'], {'Type': '"""String"""'}), "('One', Type='String')\n", (198, 220), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((256, 287), 'troposphere.Parameter', 'Parameter', (['"""Two"""'], {'Type': '"""String"""'}), "('Two', Type='String')\n", (265, 287), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((325, 358), 'troposphere.Parameter', 'Parameter', (['"""Three"""'], {'Type': '"""String"""'}), "('Three', Type='String')\n", (334, 358), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((395, 427), 'troposphere.Parameter', 'Parameter', (['"""Four"""'], {'Type': '"""String"""'}), "('Four', Type='String')\n", (404, 427), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((470, 508), 'troposphere.Parameter', 'Parameter', (['"""SshKeyName"""'], {'Type': '"""String"""'}), "('SshKeyName', Type='String')\n", (479, 508), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((586, 596), 'troposphere.Ref', 'Ref', (['"""One"""'], {}), "('One')\n", (589, 596), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((655, 680), 'troposphere.Condition', 'Condition', (['"""OneEqualsFoo"""'], {}), "('OneEqualsFoo')\n", (664, 680), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((739, 749), 'troposphere.Ref', 'Ref', (['"""Two"""'], {}), "('Two')\n", (742, 749), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((796, 808), 'troposphere.Ref', 'Ref', (['"""Three"""'], {}), "('Three')\n", (799, 808), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((818, 829), 'troposphere.Ref', 'Ref', (['"""Four"""'], {}), "('Four')\n", (821, 829), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((883, 908), 'troposphere.Condition', 'Condition', (['"""OneEqualsFoo"""'], {}), "('OneEqualsFoo')\n", (892, 908), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((918, 943), 'troposphere.Condition', 'Condition', (['"""BarEqualsTwo"""'], {}), "('BarEqualsTwo')\n", (927, 943), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1002, 1027), 'troposphere.Condition', 'Condition', (['"""OneEqualsFoo"""'], {}), "('OneEqualsFoo')\n", (1011, 1027), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1140, 1165), 'troposphere.Condition', 'Condition', (['"""OneEqualsFoo"""'], {}), "('OneEqualsFoo')\n", (1149, 1165), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1175, 1200), 'troposphere.Condition', 'Condition', (['"""BarEqualsTwo"""'], {}), "('BarEqualsTwo')\n", (1184, 1200), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1327, 1355), 'troposphere.Condition', 'Condition', (['"""ThreeEqualsFour"""'], {}), "('ThreeEqualsFour')\n", (1336, 1355), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1398, 1423), 'troposphere.Condition', 'Condition', (['"""OneEqualsFoo"""'], {}), "('OneEqualsFoo')\n", (1407, 1423), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1433, 1461), 'troposphere.Condition', 'Condition', (['"""NotOneEqualsFoo"""'], {}), "('NotOneEqualsFoo')\n", (1442, 1461), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1471, 1496), 'troposphere.Condition', 'Condition', (['"""BarEqualsTwo"""'], {}), "('BarEqualsTwo')\n", (1480, 1496), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1506, 1549), 'troposphere.Condition', 'Condition', (['"""OneEqualsFooAndNotBarEqualsTwo"""'], {}), "('OneEqualsFooAndNotBarEqualsTwo')\n", (1515, 1549), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1559, 1598), 'troposphere.Condition', 'Condition', (['"""OneIsQuzAndThreeEqualsFour"""'], {}), "('OneIsQuzAndThreeEqualsFour')\n", (1568, 1598), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1642, 1669), 'troposphere.Condition', 'Condition', (['"""LaunchInstance"""'], {}), "('LaunchInstance')\n", (1651, 1669), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1041, 1066), 'troposphere.Condition', 'Condition', (['"""BarEqualsTwo"""'], {}), "('BarEqualsTwo')\n", (1050, 1066), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1217, 1229), 'troposphere.Ref', 'Ref', (['"""Three"""'], {}), "('Three')\n", (1220, 1229), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1299, 1309), 'troposphere.Ref', 'Ref', (['"""One"""'], {}), "('One')\n", (1302, 1309), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1686, 1696), 'troposphere.Ref', 'Ref', (['"""One"""'], {}), "('One')\n", (1689, 1696), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1838, 1898), 'troposphere.If', 'If', (['"""ConditionNameEqualsFoo"""', '"""ami-12345678"""', '"""ami-87654321"""'], {}), "('ConditionNameEqualsFoo', 'ami-12345678', 'ami-87654321')\n", (1840, 1898), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n'), ((1949, 1966), 'troposphere.Ref', 'Ref', (['"""SshKeyName"""'], {}), "('SshKeyName')\n", (1952, 1966), False, 'from troposphere import Template, Parameter, Ref, Condition, Equals, And, Or, Not, If\n')] |
# (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import logging
from lrtc_lib.data_access import single_dataset_loader
from lrtc_lib.data_access.processors.dataset_part import DatasetPart
from lrtc_lib.oracle_data_access import gold_labels_loader
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
def load(dataset: str, force_new: bool = False):
for part in DatasetPart:
dataset_name = dataset + '_' + part.name.lower()
# load dataset (generate Documents and TextElements)
if force_new:
single_dataset_loader.clear_all_saved_files(dataset_name)
single_dataset_loader.load_dataset(dataset_name, force_new)
# load gold labels
if force_new:
gold_labels_loader.clear_gold_labels_file(dataset_name)
gold_labels_loader.load_gold_labels(dataset_name, force_new)
logging.info('-' * 60)
if __name__ == '__main__':
dataset_name = 'polarity'
load(dataset=dataset_name) | [
"logging.basicConfig",
"lrtc_lib.data_access.single_dataset_loader.load_dataset",
"lrtc_lib.data_access.single_dataset_loader.clear_all_saved_files",
"lrtc_lib.oracle_data_access.gold_labels_loader.clear_gold_labels_file",
"lrtc_lib.oracle_data_access.gold_labels_loader.load_gold_labels",
"logging.info"
]
| [((328, 448), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')\n", (347, 448), False, 'import logging\n'), ((742, 801), 'lrtc_lib.data_access.single_dataset_loader.load_dataset', 'single_dataset_loader.load_dataset', (['dataset_name', 'force_new'], {}), '(dataset_name, force_new)\n', (776, 801), False, 'from lrtc_lib.data_access import single_dataset_loader\n'), ((927, 987), 'lrtc_lib.oracle_data_access.gold_labels_loader.load_gold_labels', 'gold_labels_loader.load_gold_labels', (['dataset_name', 'force_new'], {}), '(dataset_name, force_new)\n', (962, 987), False, 'from lrtc_lib.oracle_data_access import gold_labels_loader\n'), ((996, 1018), 'logging.info', 'logging.info', (["('-' * 60)"], {}), "('-' * 60)\n", (1008, 1018), False, 'import logging\n'), ((676, 733), 'lrtc_lib.data_access.single_dataset_loader.clear_all_saved_files', 'single_dataset_loader.clear_all_saved_files', (['dataset_name'], {}), '(dataset_name)\n', (719, 733), False, 'from lrtc_lib.data_access import single_dataset_loader\n'), ((863, 918), 'lrtc_lib.oracle_data_access.gold_labels_loader.clear_gold_labels_file', 'gold_labels_loader.clear_gold_labels_file', (['dataset_name'], {}), '(dataset_name)\n', (904, 918), False, 'from lrtc_lib.oracle_data_access import gold_labels_loader\n')] |
from .component import Component, using_scope
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class EmbeddingLayer(Component):
def __init__(self, input_size, output_size, name='embedding'):
Component.__init__(self, name=name)
self.input_size = input_size
self.output_size = output_size
with self.use_scope():
self.embedding_matrix = tf.get_variable(
'embedding_matrix', shape=[self.input_size, self.output_size])
self._built = True
@using_scope
def embed(self, x):
return tf.nn.embedding_lookup(self.embedding_matrix, x)
def __call__(self, inputs):
return self.embed(inputs)
| [
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.nn.embedding_lookup",
"tensorflow.compat.v1.get_variable"
]
| [((80, 104), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (102, 104), True, 'import tensorflow.compat.v1 as tf\n'), ((577, 625), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.embedding_matrix', 'x'], {}), '(self.embedding_matrix, x)\n', (599, 625), True, 'import tensorflow.compat.v1 as tf\n'), ((397, 475), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (['"""embedding_matrix"""'], {'shape': '[self.input_size, self.output_size]'}), "('embedding_matrix', shape=[self.input_size, self.output_size])\n", (412, 475), True, 'import tensorflow.compat.v1 as tf\n')] |
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import json
import time
import inspect
from functools import wraps
import bpy
debug_run_test_calls = False
def debug_test_call(*args, **kwargs):
def wrapper(fn):
if debug_run_test_calls:
ret = str(fn(*args,*kwargs))
print('TEST: %s()' % fn.__name__)
if args:
print(' arg:', args)
if kwargs:
print(' kwa:', kwargs)
print(' ret:', ret)
return fn
return wrapper
def stats_wrapper(fn):
return fn
if not hasattr(stats_report, 'stats'):
stats_report.stats = dict()
frame = inspect.currentframe().f_back
f_locals = frame.f_locals
filename = os.path.basename(frame.f_code.co_filename)
clsname = f_locals['__qualname__'] if '__qualname__' in f_locals else ''
linenum = frame.f_lineno
fnname = fn.__name__
key = '%s%s (%s:%d)' % (
clsname + ('.' if clsname else ''),
fnname, filename, linenum
)
stats = stats_report.stats
stats[key] = {
'filename': filename,
'clsname': clsname,
'linenum': linenum,
'fileline': '%s:%d' % (filename, linenum),
'fnname': fnname,
'count': 0,
'total time': 0,
'average time': 0,
}
def wrapped(*args, **kwargs):
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
d = stats[key]
d['count'] += 1
d['total time'] += time_delta
d['average time'] = d['total time'] / d['count']
return ret
return wrapped
def stats_report():
return
stats = stats_report.stats if hasattr(stats_report, 'stats') else dict()
l = max(len(k) for k in stats)
def fmt(s):
return s + ' ' * (l - len(s))
print()
print('Call Statistics Report')
cols = [
('class', 'clsname', '%s'),
('func', 'fnname', '%s'),
('location', 'fileline', '%s'),
# ('line','linenum','% 10d'),
('count', 'count', '% 8d'),
('total (sec)', 'total time', '% 10.4f'),
('avg (sec)', 'average time', '% 10.6f'),
]
data = [stats[k] for k in sorted(stats)]
data = [[h] + [f % row[c] for row in data] for (h, c, f) in cols]
colwidths = [max(len(d) for d in col) for col in data]
totwidth = sum(colwidths) + len(colwidths) - 1
def rpad(s, l):
return '%s%s' % (s, ' ' * (l - len(s)))
def printrow(i_row):
row = [col[i_row] for col in data]
print(' '.join(rpad(d, w) for (d, w) in zip(row, colwidths)))
printrow(0)
print('-' * totwidth)
for i in range(1, len(data[0])):
printrow(i)
def add_cache(attr, default):
def wrapper(fn):
setattr(fn, attr, default)
return fn
return wrapper
class LimitRecursion:
def __init__(self, count, def_ret):
self.count = count
self.def_ret = def_ret
self.calls = 0
def __call__(self, fn):
def wrapped(*args, **kwargs):
ret = self.def_ret
if self.calls < self.count:
try:
self.calls += 1
ret = fn(*args, **kwargs)
finally:
self.calls -= 1
return ret
return wrapped
@add_cache('data', {'nested':0, 'last':None})
def timed_call(label):
def wrapper(fn):
def wrapped(*args, **kwargs):
data = timed_call.data
if data['last']: print(data['last'])
data['last'] = f'''{" " * data['nested']}Timing {label}'''
data['nested'] += 1
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
if data['last']:
print(f'''{data['last']}: {time_delta:0.4f}s''')
data['last'] = None
else:
print(f'''{" " * data['nested']}{time_delta:0.4f}s''')
data['nested'] -= 1
return ret
return wrapped
return wrapper
# corrected bug in previous version of blender_version fn wrapper
# https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688
def blender_version_wrapper(op, ver):
self = blender_version_wrapper
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = fns = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
update_fn = self.ops[op](ver)
def wrapit(fn):
nonlocal self, update_fn
fn_name = fn.__name__
fns = self.fns
error_msg = "Could not find appropriate function named %s for version Blender %s" % (fn_name, self.blenderver)
if update_fn: fns[fn_name] = fn
def callit(*args, **kwargs):
nonlocal fns, fn_name, error_msg
fn = fns.get(fn_name, None)
assert fn, error_msg
ret = fn(*args, **kwargs)
return ret
return callit
return wrapit
def only_in_blender_version(*args, ignore_others=False, ignore_return=None):
self = only_in_blender_version
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = {}
self.ignores = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
self.re_blender_version = re.compile(r'^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\d\.\d\d)$')
matches = [self.re_blender_version.match(arg) for arg in args]
assert all(match is not None for match in matches), f'At least one arg did not match version comparison: {args}'
results = [self.ops[match.group('comparison')](match.group('version')) for match in matches]
version_matches = all(results)
def wrapit(fn):
fn_name = fn.__name__
if version_matches:
assert fn_name not in self.fns, f'Multiple functions {fn_name} match the Blender version {self.blenderver}'
self.fns[fn_name] = fn
if ignore_others and fn_name not in self.ignores:
self.ignores[fn_name] = ignore_return
@wraps(fn)
def callit(*args, **kwargs):
fn = self.fns.get(fn_name, None)
if fn_name not in self.ignores:
assert fn, f'Could not find appropriate function named {fn_name} for version Blender version {self.blenderver}'
elif fn is None:
return self.ignores[fn_name]
return fn(*args, **kwargs)
return callit
return wrapit
def warn_once(warning):
def wrapper(fn):
nonlocal warning
@wraps(fn)
def wrapped(*args, **kwargs):
nonlocal warning
if warning:
print(warning)
warning = None
return fn(*args, **kwargs)
return wrapped
return wrapper
class PersistentOptions:
class WrappedDict:
def __init__(self, cls, filename, version, defaults, update_external):
self._dirty = False
self._last_save = time.time()
self._write_delay = 2.0
self._defaults = defaults
self._update_external = update_external
self._defaults['persistent options version'] = version
self._dict = {}
if filename:
src = inspect.getsourcefile(cls)
path = os.path.split(os.path.abspath(src))[0]
self._fndb = os.path.join(path, filename)
else:
self._fndb = None
self.read()
if self._dict.get('persistent options version', None) != version:
self.reset()
self.update_external()
def update_external(self):
upd = self._update_external
if upd:
upd()
def dirty(self):
self._dirty = True
self.update_external()
def clean(self, force=False):
if not force:
if not self._dirty:
return
if time.time() < self._last_save + self._write_delay:
return
if self._fndb:
json.dump(self._dict, open(self._fndb, 'wt'), indent=2, sort_keys=True)
self._dirty = False
self._last_save = time.time()
def read(self):
self._dict = {}
if self._fndb and os.path.exists(self._fndb):
try:
self._dict = json.load(open(self._fndb, 'rt'))
except Exception as e:
print('Exception caught while trying to read options from "%s"' % self._fndb)
print(str(e))
for k in set(self._dict.keys()) - set(self._defaults.keys()):
print('Deleting extraneous key "%s" from options' % k)
del self._dict[k]
self.update_external()
self._dirty = False
def keys(self):
return self._defaults.keys()
def reset(self):
keys = list(self._dict.keys())
for k in keys:
del self._dict[k]
self._dict['persistent options version'] = self['persistent options version']
self.dirty()
self.clean()
def __getitem__(self, key):
return self._dict[key] if key in self._dict else self._defaults[key]
def __setitem__(self, key, val):
assert key in self._defaults, 'Attempting to write "%s":"%s" to options, but key does not exist in defaults' % (str(key), str(val))
if self[key] == val: return
self._dict[key] = val
self.dirty()
self.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
if not fn_get_wrap: fn_get_wrap = lambda v: v
if not fn_set_wrap: fn_set_wrap = lambda v: v
oself = self
class GetSet:
def get(self):
return fn_get_wrap(oself[key])
def set(self, v):
v = fn_set_wrap(v)
if oself[key] != v:
oself[key] = v
return GetSet()
def __init__(self, filename=None, version=None):
self._filename = filename
self._version = version
self._db = None
def __call__(self, cls):
upd = getattr(cls, 'update', None)
if upd:
u = upd
def wrap():
def upd_wrap(*args, **kwargs):
u(None)
return upd_wrap
upd = wrap()
self._db = PersistentOptions.WrappedDict(cls, self._filename, self._version, cls.defaults, upd)
db = self._db
class WrappedClass:
def __init__(self, *args, **kwargs):
self._db = db
self._def = cls.defaults
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, val):
self._db[key] = val
def keys(self):
return self._db.keys()
def reset(self):
self._db.reset()
def clean(self):
self._db.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
return self._db.gettersetter(key, fn_get_wrap=fn_get_wrap, fn_set_wrap=fn_set_wrap)
return WrappedClass
| [
"os.path.exists",
"inspect.getsourcefile",
"re.compile",
"inspect.currentframe",
"os.path.join",
"functools.wraps",
"os.path.basename",
"os.path.abspath",
"time.time"
]
| [((1455, 1497), 'os.path.basename', 'os.path.basename', (['frame.f_code.co_filename'], {}), '(frame.f_code.co_filename)\n', (1471, 1497), False, 'import os\n'), ((1379, 1401), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1399, 1401), False, 'import inspect\n'), ((2087, 2098), 'time.time', 'time.time', ([], {}), '()\n', (2096, 2098), False, 'import time\n'), ((2152, 2163), 'time.time', 'time.time', ([], {}), '()\n', (2161, 2163), False, 'import time\n'), ((6851, 6925), 're.compile', 're.compile', (['"""^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\\\\d\\\\.\\\\d\\\\d)$"""'], {}), "('^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\\\\d\\\\.\\\\d\\\\d)$')\n", (6861, 6925), False, 'import re\n'), ((7594, 7603), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (7599, 7603), False, 'from functools import wraps\n'), ((8092, 8101), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (8097, 8101), False, 'from functools import wraps\n'), ((4432, 4443), 'time.time', 'time.time', ([], {}), '()\n', (4441, 4443), False, 'import time\n'), ((4505, 4516), 'time.time', 'time.time', ([], {}), '()\n', (4514, 4516), False, 'import time\n'), ((8527, 8538), 'time.time', 'time.time', ([], {}), '()\n', (8536, 8538), False, 'import time\n'), ((9781, 9792), 'time.time', 'time.time', ([], {}), '()\n', (9790, 9792), False, 'import time\n'), ((8807, 8833), 'inspect.getsourcefile', 'inspect.getsourcefile', (['cls'], {}), '(cls)\n', (8828, 8833), False, 'import inspect\n'), ((8925, 8953), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (8937, 8953), False, 'import os\n'), ((9875, 9901), 'os.path.exists', 'os.path.exists', (['self._fndb'], {}), '(self._fndb)\n', (9889, 9901), False, 'import os\n'), ((9526, 9537), 'time.time', 'time.time', ([], {}), '()\n', (9535, 9537), False, 'import time\n'), ((8871, 8891), 'os.path.abspath', 'os.path.abspath', (['src'], {}), '(src)\n', (8886, 8891), False, 'import os\n')] |
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
| [
"pandas.Series",
"pandas._testing.assert_series_equal",
"pytest.mark.parametrize",
"pandas._testing.assert_equal",
"pytest.raises",
"numpy.isnan",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange"
]
| [((128, 176), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['cov', 'corr']"], {}), "('func', ['cov', 'corr'])\n", (151, 176), False, 'import pytest\n'), ((520, 568), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (543, 568), False, 'import pytest\n'), ((893, 942), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2]'], {}), "('min_periods', [0, 1, 2])\n", (916, 942), False, 'import pytest\n'), ((944, 992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (967, 992), False, 'import pytest\n'), ((1843, 1891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['cov', 'corr']"], {}), "('name', ['cov', 'corr'])\n", (1866, 1891), False, 'import pytest\n'), ((3705, 3760), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (3728, 3760), False, 'import pytest\n'), ((4368, 4423), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (4391, 4423), False, 'import pytest\n'), ((5333, 5388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (5356, 5388), False, 'import pytest\n'), ((6305, 6360), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (6328, 6360), False, 'import pytest\n'), ((6362, 6408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (6385, 6408), False, 'import pytest\n'), ((7127, 7182), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (7150, 7182), False, 'import pytest\n'), ((7184, 7230), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (7207, 7230), False, 'import pytest\n'), ((7912, 7967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (7935, 7967), False, 'import pytest\n'), ((7969, 8015), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (7992, 8015), False, 'import pytest\n'), ((8557, 8612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (8580, 8612), False, 'import pytest\n'), ((8614, 8660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (8637, 8660), False, 'import pytest\n'), ((9205, 9260), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_periods"""', '[0, 1, 2, 3, 4]'], {}), "('min_periods', [0, 1, 2, 3, 4])\n", (9228, 9260), False, 'import pytest\n'), ((9262, 9308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (9285, 9308), False, 'import pytest\n'), ((457, 516), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (479, 516), True, 'import pandas._testing as tm\n'), ((1498, 1526), 'pandas.Series', 'Series', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (1504, 1526), False, 'from pandas import DataFrame, Series, concat\n'), ((1609, 1646), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'empty'], {}), '(result, empty)\n', (1631, 1646), True, 'import pandas._testing as tm\n'), ((2918, 2947), 'pandas.Series', 'Series', (['np.nan'], {'index': 's.index'}), '(np.nan, index=s.index)\n', (2924, 2947), False, 'from pandas import DataFrame, Series, concat\n'), ((6230, 6301), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_unbiased_x', '(var_biased_x * var_debiasing_factors_x)'], {}), '(var_unbiased_x, var_biased_x * var_debiasing_factors_x)\n', (6245, 6301), True, 'import pandas._testing as tm\n'), ((8516, 8553), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', '(std_x * std_x)'], {}), '(var_x, std_x * std_x)\n', (8531, 8553), True, 'import pandas._testing as tm\n'), ((9170, 9201), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', 'cov_x_x'], {}), '(var_x, cov_x_x)\n', (9185, 9201), True, 'import pandas._testing as tm\n'), ((613, 632), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (628, 632), True, 'import numpy as np\n'), ((671, 690), 'numpy.random.randn', 'np.random.randn', (['(48)'], {}), '(48)\n', (686, 690), True, 'import numpy as np\n'), ((1076, 1095), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1091, 1095), True, 'import numpy as np\n'), ((1134, 1153), 'numpy.random.randn', 'np.random.randn', (['(48)'], {}), '(48)\n', (1149, 1153), True, 'import numpy as np\n'), ((1767, 1780), 'pandas.Series', 'Series', (['[1.0]'], {}), '([1.0])\n', (1773, 1780), False, 'from pandas import DataFrame, Series, concat\n'), ((1822, 1838), 'pandas.Series', 'Series', (['[np.NaN]'], {}), '([np.NaN])\n', (1828, 1838), False, 'from pandas import DataFrame, Series, concat\n'), ((1961, 1980), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1976, 1980), True, 'import numpy as np\n'), ((2117, 2153), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (2130, 2153), False, 'import pytest\n'), ((5156, 5189), 'pandas._testing.assert_equal', 'tm.assert_equal', (['mean_x', 'expected'], {}), '(mean_x, expected)\n', (5171, 5189), True, 'import pandas._testing as tm\n'), ((5294, 5329), 'pandas._testing.assert_equal', 'tm.assert_equal', (['corr_x_x', 'expected'], {}), '(corr_x_x, expected)\n', (5309, 5329), True, 'import pandas._testing as tm\n'), ((7072, 7121), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', '(mean_x2 - mean_x * mean_x)'], {}), '(var_x, mean_x2 - mean_x * mean_x)\n', (7087, 7121), True, 'import pandas._testing as tm\n'), ((7876, 7908), 'pandas._testing.assert_equal', 'tm.assert_equal', (['var_x', 'expected'], {}), '(var_x, expected)\n', (7891, 7908), True, 'import pandas._testing as tm\n'), ((10152, 10214), 'pandas._testing.assert_equal', 'tm.assert_equal', (['cov_x_y', '(0.5 * (var_x_plus_y - var_x - var_y))'], {}), '(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))\n', (10167, 10214), True, 'import pandas._testing as tm\n'), ((10693, 10745), 'pandas._testing.assert_equal', 'tm.assert_equal', (['corr_x_y', '(cov_x_y / (std_x * std_y))'], {}), '(corr_x_y, cov_x_y / (std_x * std_y))\n', (10708, 10745), True, 'import pandas._testing as tm\n'), ((640, 653), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (649, 653), True, 'import numpy as np\n'), ((805, 833), 'numpy.isnan', 'np.isnan', (['result.values[:14]'], {}), '(result.values[:14])\n', (813, 833), True, 'import numpy as np\n'), ((1103, 1116), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1112, 1116), True, 'import numpy as np\n'), ((1369, 1397), 'numpy.isnan', 'np.isnan', (['result.values[:11]'], {}), '(result.values[:11])\n', (1377, 1397), True, 'import numpy as np\n'), ((1988, 2001), 'numpy.arange', 'np.arange', (['(50)'], {}), '(50)\n', (1997, 2001), True, 'import numpy as np\n'), ((2207, 2226), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (2222, 2226), True, 'import numpy as np\n'), ((2371, 2418), 'pandas.DataFrame', 'DataFrame', ([], {'index': 'obj.index', 'columns': 'obj.columns'}), '(index=obj.index, columns=obj.columns)\n', (2380, 2418), False, 'from pandas import DataFrame, Series, concat\n'), ((11353, 11411), 'pandas._testing.assert_equal', 'tm.assert_equal', (['cov_x_y', '(mean_x_times_y - mean_x * mean_y)'], {}), '(cov_x_y, mean_x_times_y - mean_x * mean_y)\n', (11368, 11411), True, 'import pandas._testing as tm\n'), ((855, 883), 'numpy.isnan', 'np.isnan', (['result.values[14:]'], {}), '(result.values[14:])\n', (863, 883), True, 'import numpy as np\n'), ((1419, 1447), 'numpy.isnan', 'np.isnan', (['result.values[11:]'], {}), '(result.values[11:])\n', (1427, 1447), True, 'import numpy as np\n'), ((1700, 1713), 'pandas.Series', 'Series', (['[1.0]'], {}), '([1.0])\n', (1706, 1713), False, 'from pandas import DataFrame, Series, concat\n')] |
# TODO finish implementing query
import math
from pyspark import SparkContext
# from genex.cluster import sim_between_seq
from brainex.op.query_op import sim_between_seq
from brainex.parse import strip_function, remove_trailing_zeros
from .classes import Sequence
from brainex.database import genexengine
def query(q: Sequence, gc: genexengine, loi: list, sc: SparkContext,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, mode:str='genex'):
"""
:param q: query sequence
:param gc: Gcluster in which to query
:param loi: list of two integer values, specifying the query range, if set to None, is going to query all length
:param sc: spark context on which to run the query operation
:param k: integer, specifying to return top k matches
:param ex_sameID: boolean, whether to include sequences from the time series with the same id as the query sequence
:param overlap: float, how much overlapping between queries lookups
:param mode: query mode, supported modes are 'genex' and 'bf' (bf = brute force)
"""
if mode == 'genex':
gquery()
elif mode == 'bf':
bfquery()
else:
raise Exception('Unsupported query mode: ' + mode)
def get_query_from_dict():
pass
def get_query_sequence_from_file(file: str):
resList = []
with open(file, 'r') as f:
for i, line in enumerate(f):
if not i:
features = list(map(lambda x: strip_function(x),
line.strip()[:-1].split(',')))
if line != "" and line != "\n":
data = remove_trailing_zeros(line.split(",")[:-1])
series_data = data[len(features):]
resList.append(series_data)
if len(resList[0]) == 0:
return resList[1:]
else:
return resList
def gquery(query_list: list, gc_data: dict, loi: list, input_list: list,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, ):
"""
Because Gcluster object doesn't have map property, we have to use dict as input
:param file:
:param gc_data:
:param loi:
:param input_list:
:param k:
:param ex_sameID:
:param overlap:
:return:
"""
# get query from id, start, end point
# get query from csv file
#
# query_list = []
# query_set = get_query_from_csv_with_id(file)
# print(query_set)
# for cur_query in query_set:
# query_list.append(get_query_from_sequence(cur_query[0], int(cur_query[1]), int(cur_query[2]), input_list))
# print(query_list)
return custom_query(query_list, loi, gc_data, k, input_list)
def bfquery():
print()
#
# def custom_query_operation(q: Sequence, gc: Gcluster, loi: list, sc: SparkContext,
# k:int=1, ex_sameID: bool=False, overlap: float= 1.0):
#
# query_result = filter_rdd_back.repartition(16).map(
# lambda clusters: custom_query(q, loi, gc, k,
# global_time_series_dict.value, ))
# # changed here
# # plot_query_result(query_sequence, query_result, global_time_series_dict.value)
# return query_result
def get_query_from_sequence(id: tuple, start: int, end: int, input_list: list):
"""
:param id:
:param start:
:param end:
:param input_list:
:return: a list
"""
try:
input_dict = dict(input_list) # validate by converting input_list into a dict
except (TypeError, ValueError):
raise Exception('sequence: fetch_data: input_list is not key-value pair.')
return input_dict[id][start: end]
def custom_query(query_sequences: list, loi: list, Gcluster_data:dict, k : int, input_list:list):
# """
#
# :param query_sequences: list of list: the list of sequences to be queried
# :param cluster: dict[key = representative, value = list of timeSeriesObj] -> representative is timeSeriesObj
# the sequences in the cluster are all of the SAME length
# :param k: int
# :return list of time series objects: best k matches. Again note they are all of the SAME length
# """
"""
:param query_sequences:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
# get query from csv file which contains lists of list of query actual clusters
# get query from csv file which contains lists of tuple of id, start, endpoint
query_result = dict()
if not isinstance(query_sequences, list) or len(query_sequences) == 0:
raise ValueError("query sequence must be a list and not empty")
cur_query_number = 0
if isinstance(query_sequences[0], list):
print("length of query is [" + str(len(query_sequences)) + "]" + "[" + str(len(query_sequences[0])) + "]")
print("query is a list of list")
for cur_query in query_sequences:
if isinstance(cur_query, list):
query_result[cur_query_number] = get_most_k_sim(cur_query, loi, Gcluster_data, k, input_list)
cur_query_number += 1
return query_result
else:
return get_most_k_sim(query_sequences, loi, Gcluster_data, k, input_list)
def get_most_k_sim(query_sequence: list, loi: list, Gcluster_data : dict, k, input_list:list):
"""
:param query_sequence:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
min_rprs = None # the representative that is closest to the query distance
min_dist = math.inf
target_cluster = []
print("length of gcluster clusters is " + str(len(Gcluster_data[1])))
for cur_rprs_seq in Gcluster_data[1].keys():
# TODO do we want to get raw clusters here, or set the raw in timeSeriesObj before calling query (no parsing)
if (cur_rprs_seq.end - cur_rprs_seq.start + 1) in range(loi[0], loi[1] + 1):
# modify here, not use get clusters from objects, use values
cur_dist = sim_between_seq(query_sequence, cur_rprs_seq.fetch_data(input_list))
if cur_dist < min_dist:
min_rprs = cur_rprs_seq
min_dist = cur_dist
else:
break
if min_rprs:
print('min representative is ' + min_rprs.__str__())
print('min dist' + str(min_dist))
# print("Querying Cluster of length: " + str(len(get_data_for_timeSeriesObj(min_rprs, time_series_dict))))
target_cluster = Gcluster_data[1].get(min_rprs)
print('len of cluster is ' + str(len(target_cluster)))
# print("sorting")
#
target_cluster.sort(key=lambda cluster_sequence: sim_between_seq(query_sequence,
cluster_sequence.data))
k = int(k)
return target_cluster[0:k] # return the k most similar sequences
else:
return None
| [
"brainex.op.query_op.sim_between_seq",
"brainex.parse.strip_function"
]
| [((6638, 6692), 'brainex.op.query_op.sim_between_seq', 'sim_between_seq', (['query_sequence', 'cluster_sequence.data'], {}), '(query_sequence, cluster_sequence.data)\n', (6653, 6692), False, 'from brainex.op.query_op import sim_between_seq\n'), ((1456, 1473), 'brainex.parse.strip_function', 'strip_function', (['x'], {}), '(x)\n', (1470, 1473), False, 'from brainex.parse import strip_function, remove_trailing_zeros\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Dict
import torch
import torch.nn as nn
from nni.retiarii import model_wrapper
from nni.retiarii.nn.pytorch import NasBench201Cell
__all__ = ['NasBench201']
OPS_WITH_STRIDE = {
'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride),
'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'),
'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'),
'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)),
'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)),
'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out
else FactorizedReduce(C_in, C_out, stride),
}
PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3']
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out)
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out),
)
def forward(self, x):
return self.op(x)
class Pooling(nn.Module):
def __init__(self, C_in, C_out, stride, mode):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1)
if mode == 'avg':
self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
elif mode == 'max':
self.op = nn.MaxPool2d(3, stride=stride, padding=1)
else:
raise ValueError('Invalid mode={:} in Pooling'.format(mode))
def forward(self, x):
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1:
return x.mul(0.)
else:
return x[:, :, ::self.stride, ::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False))
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1)
self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1)
if stride == 2:
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
self.num_conv = 2
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
inputs = self.downsample(inputs) # residual
return inputs + basicblock
@model_wrapper
class NasBench201(nn.Module):
"""The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__.
It's a stack of :class:`NasBench201Cell`.
"""
def __init__(self,
stem_out_channels: int = 16,
num_modules_per_stack: int = 5,
num_labels: int = 10):
super().__init__()
self.channels = C = stem_out_channels
self.num_modules = N = num_modules_per_stack
self.num_labels = num_labels
self.stem = nn.Sequential(
nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(C)
)
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev = C
self.cells = nn.ModuleList()
for C_curr, reduction in zip(layer_channels, layer_reductions):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
ops: Dict[str, Callable[[int, int], nn.Module]] = {
prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES
}
cell = NasBench201Cell(ops, C_prev, C_curr, label='cell')
self.cells.append(cell)
C_prev = C_curr
self.lastact = nn.Sequential(
nn.BatchNorm2d(C_prev),
nn.ReLU(inplace=True)
)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, self.num_labels)
def forward(self, inputs):
feature = self.stem(inputs)
for cell in self.cells:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.ConstantPad2d",
"torch.nn.Identity",
"nni.retiarii.nn.pytorch.NasBench201Cell"
]
| [((3476, 3498), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (3483, 3498), True, 'import torch.nn as nn\n'), ((3909, 3930), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['C_out'], {}), '(C_out)\n', (3923, 3930), True, 'import torch.nn as nn\n'), ((6097, 6112), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6110, 6112), True, 'import torch.nn as nn\n'), ((6768, 6791), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (6788, 6791), True, 'import torch.nn as nn\n'), ((6818, 6852), 'torch.nn.Linear', 'nn.Linear', (['C_prev', 'self.num_labels'], {}), '(C_prev, self.num_labels)\n', (6827, 6852), True, 'import torch.nn as nn\n'), ((793, 806), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (804, 806), True, 'import torch.nn as nn\n'), ((1165, 1187), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (1172, 1187), True, 'import torch.nn as nn\n'), ((1201, 1303), 'torch.nn.Conv2d', 'nn.Conv2d', (['C_in', 'C_out', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': '(False)'}), '(C_in, C_out, kernel_size, stride=stride, padding=padding,\n dilation=dilation, bias=False)\n', (1210, 1303), True, 'import torch.nn as nn\n'), ((1335, 1356), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['C_out'], {}), '(C_out)\n', (1349, 1356), True, 'import torch.nn as nn\n'), ((1610, 1632), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (1617, 1632), True, 'import torch.nn as nn\n'), ((1646, 1773), 'torch.nn.Conv2d', 'nn.Conv2d', (['C_in', 'C_in'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'C_in', 'bias': '(False)'}), '(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=\n padding, dilation=dilation, groups=C_in, bias=False)\n', (1655, 1773), True, 'import torch.nn as nn\n'), ((1804, 1864), 'torch.nn.Conv2d', 'nn.Conv2d', (['C_in', 'C_out'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(C_in, C_out, kernel_size=1, padding=0, bias=False)\n', (1813, 1864), True, 'import torch.nn as nn\n'), ((1878, 1899), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['C_out'], {}), '(C_out)\n', (1892, 1899), True, 'import torch.nn as nn\n'), ((2272, 2338), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(3)'], {'stride': 'stride', 'padding': '(1)', 'count_include_pad': '(False)'}), '(3, stride=stride, padding=1, count_include_pad=False)\n', (2284, 2338), True, 'import torch.nn as nn\n'), ((3602, 3617), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3615, 3617), True, 'import torch.nn as nn\n'), ((3775, 3808), 'torch.nn.ConstantPad2d', 'nn.ConstantPad2d', (['(0, 1, 0, 1)', '(0)'], {}), '((0, 1, 0, 1), 0)\n', (3791, 3808), True, 'import torch.nn as nn\n'), ((5794, 5847), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'C'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(3, C, kernel_size=3, padding=1, bias=False)\n', (5803, 5847), True, 'import torch.nn as nn\n'), ((5861, 5878), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['C'], {}), '(C)\n', (5875, 5878), True, 'import torch.nn as nn\n'), ((6670, 6692), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['C_prev'], {}), '(C_prev)\n', (6684, 6692), True, 'import torch.nn as nn\n'), ((6706, 6727), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6713, 6727), True, 'import torch.nn as nn\n'), ((2389, 2430), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': 'stride', 'padding': '(1)'}), '(3, stride=stride, padding=1)\n', (2401, 2430), True, 'import torch.nn as nn\n'), ((4565, 4613), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)'}), '(kernel_size=2, stride=2, padding=0)\n', (4577, 4613), True, 'import torch.nn as nn\n'), ((4631, 4706), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)\n', (4640, 4706), True, 'import torch.nn as nn\n'), ((6504, 6554), 'nni.retiarii.nn.pytorch.NasBench201Cell', 'NasBench201Cell', (['ops', 'C_prev', 'C_curr'], {'label': '"""cell"""'}), "(ops, C_prev, C_curr, label='cell')\n", (6519, 6554), False, 'from nni.retiarii.nn.pytorch import NasBench201Cell\n'), ((3683, 3750), 'torch.nn.Conv2d', 'nn.Conv2d', (['C_in', 'C_outs[i]', '(1)'], {'stride': 'stride', 'padding': '(0)', 'bias': '(False)'}), '(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False)\n', (3692, 3750), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python3
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dialog_py',
version='1.0a1',
description='Python API for cdialog/linux dialog',
long_description=long_description,
url='https://github.com/pasha13666/dialog_py',
author='Pasha__kun',
author_email='<EMAIL>',
packages=['dialog_py'],
install_requires=[],
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| [
"os.path.dirname",
"setuptools.setup"
]
| [((187, 1082), 'setuptools.setup', 'setup', ([], {'name': '"""dialog_py"""', 'version': '"""1.0a1"""', 'description': '"""Python API for cdialog/linux dialog"""', 'long_description': 'long_description', 'url': '"""https://github.com/pasha13666/dialog_py"""', 'author': '"""Pasha__kun"""', 'author_email': '"""<EMAIL>"""', 'packages': "['dialog_py']", 'install_requires': '[]', 'include_package_data': '(True)', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Environment :: Console',\n 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux', 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython']"}), "(name='dialog_py', version='1.0a1', description=\n 'Python API for cdialog/linux dialog', long_description=\n long_description, url='https://github.com/pasha13666/dialog_py', author\n ='Pasha__kun', author_email='<EMAIL>', packages=['dialog_py'],\n install_requires=[], include_package_data=True, license='MIT',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Console', 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython'])\n", (192, 1082), False, 'from setuptools import setup\n'), ((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from nose.tools import *
from whoswho import who, config
from nameparser.config.titles import TITLES as NAMEPARSER_TITLES
class TestMatch(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_true(who.match(self.name, str('<NAME>')))
def test_unicode(self):
name = self.name
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
def test_name_and_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
def test_different_number_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_true(who.match('<NAME>', '<NAME>'))
def test_different_initials(self):
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.match(self.name, '<NAME>'))
# TODO: Should these be true?
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>r'))
assert_true(who.match(name, '<NAME>, PhD'))
assert_false(who.match(name, '<NAME>, Sr'))
assert_false(who.match(name, '<NAME>, Sr, PhD'))
assert_true(who.match(name, '<NAME>, Jr, PhD'))
def test_equivalent_suffixes(self):
name = '<NAME>r'
assert_true(who.match(name, '<NAME> Jnr'))
assert_false(who.match(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, 'Sir <NAME>'))
assert_true(who.match(name, 'Dr. <NAME>'))
assert_false(who.match(name, 'Mrs. <NAME>'))
def test_nickname(self):
name = 'Robert "<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match('<NAME>', name))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>',
options={'check_nickname': False}))
class TestRatio(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_equal(who.ratio(self.name, str('<NAME>')), 100)
def test_unicode(self):
name = self.name
assert_equal(who.ratio(name, 'attach<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
def test_name_and_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
def test_different_number_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_equal(who.ratio('<NAME>', '<NAME>'), 100)
def test_different_initials(self):
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>r'), 100)
assert_equal(who.ratio(name, '<NAME>, PhD'), 100)
assert_false(who.ratio(name, '<NAME>, Sr'))
assert_false(who.ratio(name, '<NAME>, Sr, PhD'))
assert_equal(who.ratio(name, '<NAME>, Jr, PhD'), 100)
# Suffix doesn't change a match
assert_equal(who.ratio(name, '<NAME>, Jr'),
who.ratio(name, '<NAME>'))
def test_equivalent_suffixes(self):
name = '<NAME> Jr'
assert_equal(who.ratio(name, '<NAME> Jnr'), 100)
assert_false(who.ratio(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, 'Sir <NAME>'), 100)
assert_equal(who.ratio(name, 'Dr. <NAME>'), 100)
assert_false(who.ratio(name, 'Mrs. <NAME>'))
# Title doesn't change a match
assert_equal(who.ratio(name, 'Dr. <NAME>'),
who.ratio(name, '<NAME>'))
def test_nickname(self):
name = 'Robert "Evan" Liebowitz'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio('<NAME>', name), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>',
options={'check_nickname': False}) < 100)
assert_true(who.ratio(name, '<NAME>') <
who.ratio(name, '<NAME>'))
assert_equal(who.ratio(name, '<NAME>ebowitz'),
who.ratio(name, '<NAME>', 'strict'))
# TODO: Should we ensure that the metadata is up to date?
@nottest
class TestConfig(unittest.TestCase):
def test_titles_all_defined(self):
"""
Check if list of titles is up to date with nameparser
"""
all_titles = (
config.MALE_TITLES |
config.FEMALE_TITLES |
config.GENDERLESS_TITLES
)
assert_equal(all_titles, NAMEPARSER_TITLES)
def test_suffixes_all_defined(self):
"""
Check if list of suffixes is up to date with nameparser
"""
from nameparser.config.suffixes import SUFFIX_ACRONYMS, SUFFIX_NOT_ACRONYMS
all_suffixes = (
config.UNIQUE_SUFFIXES |
config.MISC_SUFFIXES
)
nameparser_suffixes = (
SUFFIX_ACRONYMS |
SUFFIX_NOT_ACRONYMS
)
assert_equal(all_suffixes, nameparser_suffixes)
if __name__ == '__main__':
nose.main()
| [
"whoswho.who.match",
"nose.main",
"whoswho.who.ratio"
]
| [((7315, 7326), 'nose.main', 'nose.main', ([], {}), '()\n', (7324, 7326), False, 'import nose\n'), ((505, 530), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (514, 530), False, 'from whoswho import who, config\n'), ((552, 577), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (561, 577), False, 'from whoswho import who, config\n'), ((600, 625), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (609, 625), False, 'from whoswho import who, config\n'), ((686, 716), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (695, 716), False, 'from whoswho import who, config\n'), ((738, 768), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (747, 768), False, 'from whoswho import who, config\n'), ((790, 820), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (799, 820), False, 'from whoswho import who, config\n'), ((889, 919), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (898, 919), False, 'from whoswho import who, config\n'), ((941, 971), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (950, 971), False, 'from whoswho import who, config\n'), ((994, 1024), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1003, 1024), False, 'from whoswho import who, config\n'), ((1047, 1077), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1056, 1077), False, 'from whoswho import who, config\n'), ((1099, 1128), 'whoswho.who.match', 'who.match', (['"""<NAME>"""', '"""<NAME>"""'], {}), "('<NAME>', '<NAME>')\n", (1108, 1128), False, 'from whoswho import who, config\n'), ((1191, 1221), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1200, 1221), False, 'from whoswho import who, config\n'), ((1244, 1274), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1253, 1274), False, 'from whoswho import who, config\n'), ((1297, 1327), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1306, 1327), False, 'from whoswho import who, config\n'), ((1350, 1380), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1359, 1380), False, 'from whoswho import who, config\n'), ((1435, 1465), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1444, 1465), False, 'from whoswho import who, config\n'), ((1526, 1556), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1535, 1556), False, 'from whoswho import who, config\n'), ((1579, 1609), 'whoswho.who.match', 'who.match', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (1588, 1609), False, 'from whoswho import who, config\n'), ((1685, 1710), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (1694, 1710), False, 'from whoswho import who, config\n'), ((1732, 1758), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>r"""'], {}), "(name, '<NAME>r')\n", (1741, 1758), False, 'from whoswho import who, config\n'), ((1780, 1810), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>, PhD"""'], {}), "(name, '<NAME>, PhD')\n", (1789, 1810), False, 'from whoswho import who, config\n'), ((1833, 1862), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>, Sr"""'], {}), "(name, '<NAME>, Sr')\n", (1842, 1862), False, 'from whoswho import who, config\n'), ((1885, 1919), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>, Sr, PhD"""'], {}), "(name, '<NAME>, Sr, PhD')\n", (1894, 1919), False, 'from whoswho import who, config\n'), ((1941, 1975), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>, Jr, PhD"""'], {}), "(name, '<NAME>, Jr, PhD')\n", (1950, 1975), False, 'from whoswho import who, config\n'), ((2063, 2092), 'whoswho.who.match', 'who.match', (['name', '"""<NAME> Jnr"""'], {}), "(name, '<NAME> Jnr')\n", (2072, 2092), False, 'from whoswho import who, config\n'), ((2115, 2144), 'whoswho.who.match', 'who.match', (['name', '"""<NAME> Snr"""'], {}), "(name, '<NAME> Snr')\n", (2124, 2144), False, 'from whoswho import who, config\n'), ((2222, 2247), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (2231, 2247), False, 'from whoswho import who, config\n'), ((2269, 2298), 'whoswho.who.match', 'who.match', (['name', '"""Sir <NAME>"""'], {}), "(name, 'Sir <NAME>')\n", (2278, 2298), False, 'from whoswho import who, config\n'), ((2320, 2349), 'whoswho.who.match', 'who.match', (['name', '"""Dr. <NAME>"""'], {}), "(name, 'Dr. <NAME>')\n", (2329, 2349), False, 'from whoswho import who, config\n'), ((2372, 2402), 'whoswho.who.match', 'who.match', (['name', '"""Mrs. <NAME>"""'], {}), "(name, 'Mrs. <NAME>')\n", (2381, 2402), False, 'from whoswho import who, config\n'), ((2486, 2511), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (2495, 2511), False, 'from whoswho import who, config\n'), ((2533, 2558), 'whoswho.who.match', 'who.match', (['"""<NAME>"""', 'name'], {}), "('<NAME>', name)\n", (2542, 2558), False, 'from whoswho import who, config\n'), ((2581, 2606), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (2590, 2606), False, 'from whoswho import who, config\n'), ((2629, 2654), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (2638, 2654), False, 'from whoswho import who, config\n'), ((2677, 2737), 'whoswho.who.match', 'who.match', (['name', '"""<NAME>"""'], {'options': "{'check_nickname': False}"}), "(name, '<NAME>', options={'check_nickname': False})\n", (2686, 2737), False, 'from whoswho import who, config\n'), ((3064, 3095), 'whoswho.who.ratio', 'who.ratio', (['name', '"""attach<NAME>"""'], {}), "(name, 'attach<NAME>')\n", (3073, 3095), False, 'from whoswho import who, config\n'), ((3123, 3148), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (3132, 3148), False, 'from whoswho import who, config\n'), ((3268, 3298), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3277, 3298), False, 'from whoswho import who, config\n'), ((3326, 3356), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3335, 3356), False, 'from whoswho import who, config\n'), ((3384, 3414), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3393, 3414), False, 'from whoswho import who, config\n'), ((3489, 3519), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3498, 3519), False, 'from whoswho import who, config\n'), ((3547, 3577), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3556, 3577), False, 'from whoswho import who, config\n'), ((3721, 3750), 'whoswho.who.ratio', 'who.ratio', (['"""<NAME>"""', '"""<NAME>"""'], {}), "('<NAME>', '<NAME>')\n", (3730, 3750), False, 'from whoswho import who, config\n'), ((4397, 4427), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4406, 4427), False, 'from whoswho import who, config\n'), ((4725, 4750), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (4734, 4750), False, 'from whoswho import who, config\n'), ((4778, 4804), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>r"""'], {}), "(name, '<NAME>r')\n", (4787, 4804), False, 'from whoswho import who, config\n'), ((4832, 4862), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>, PhD"""'], {}), "(name, '<NAME>, PhD')\n", (4841, 4862), False, 'from whoswho import who, config\n'), ((4890, 4919), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>, Sr"""'], {}), "(name, '<NAME>, Sr')\n", (4899, 4919), False, 'from whoswho import who, config\n'), ((4942, 4976), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>, Sr, PhD"""'], {}), "(name, '<NAME>, Sr, PhD')\n", (4951, 4976), False, 'from whoswho import who, config\n'), ((4999, 5033), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>, Jr, PhD"""'], {}), "(name, '<NAME>, Jr, PhD')\n", (5008, 5033), False, 'from whoswho import who, config\n'), ((5101, 5130), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>, Jr"""'], {}), "(name, '<NAME>, Jr')\n", (5110, 5130), False, 'from whoswho import who, config\n'), ((5153, 5178), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (5162, 5178), False, 'from whoswho import who, config\n'), ((5269, 5298), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME> Jnr"""'], {}), "(name, '<NAME> Jnr')\n", (5278, 5298), False, 'from whoswho import who, config\n'), ((5326, 5355), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME> Snr"""'], {}), "(name, '<NAME> Snr')\n", (5335, 5355), False, 'from whoswho import who, config\n'), ((5434, 5459), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (5443, 5459), False, 'from whoswho import who, config\n'), ((5487, 5516), 'whoswho.who.ratio', 'who.ratio', (['name', '"""Sir <NAME>"""'], {}), "(name, 'Sir <NAME>')\n", (5496, 5516), False, 'from whoswho import who, config\n'), ((5544, 5573), 'whoswho.who.ratio', 'who.ratio', (['name', '"""Dr. <NAME>"""'], {}), "(name, 'Dr. <NAME>')\n", (5553, 5573), False, 'from whoswho import who, config\n'), ((5601, 5631), 'whoswho.who.ratio', 'who.ratio', (['name', '"""Mrs. <NAME>"""'], {}), "(name, 'Mrs. <NAME>')\n", (5610, 5631), False, 'from whoswho import who, config\n'), ((5693, 5722), 'whoswho.who.ratio', 'who.ratio', (['name', '"""Dr. <NAME>"""'], {}), "(name, 'Dr. <NAME>')\n", (5702, 5722), False, 'from whoswho import who, config\n'), ((5745, 5770), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (5754, 5770), False, 'from whoswho import who, config\n'), ((5864, 5889), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (5873, 5889), False, 'from whoswho import who, config\n'), ((5917, 5942), 'whoswho.who.ratio', 'who.ratio', (['"""<NAME>"""', 'name'], {}), "('<NAME>', name)\n", (5926, 5942), False, 'from whoswho import who, config\n'), ((6289, 6321), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>ebowitz"""'], {}), "(name, '<NAME>ebowitz')\n", (6298, 6321), False, 'from whoswho import who, config\n'), ((6344, 6379), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""', '"""strict"""'], {}), "(name, '<NAME>', 'strict')\n", (6353, 6379), False, 'from whoswho import who, config\n'), ((3175, 3200), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (3184, 3200), False, 'from whoswho import who, config\n'), ((3604, 3634), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3613, 3634), False, 'from whoswho import who, config\n'), ((3662, 3692), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3671, 3692), False, 'from whoswho import who, config\n'), ((3817, 3847), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3826, 3847), False, 'from whoswho import who, config\n'), ((3875, 3905), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3884, 3905), False, 'from whoswho import who, config\n'), ((3933, 3963), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (3942, 3963), False, 'from whoswho import who, config\n'), ((3991, 4021), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4000, 4021), False, 'from whoswho import who, config\n'), ((4049, 4079), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4058, 4079), False, 'from whoswho import who, config\n'), ((4102, 4132), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4111, 4132), False, 'from whoswho import who, config\n'), ((4154, 4184), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4163, 4184), False, 'from whoswho import who, config\n'), ((4207, 4237), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4216, 4237), False, 'from whoswho import who, config\n'), ((4259, 4289), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4268, 4289), False, 'from whoswho import who, config\n'), ((4312, 4342), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4321, 4342), False, 'from whoswho import who, config\n'), ((4449, 4479), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4458, 4479), False, 'from whoswho import who, config\n'), ((4507, 4537), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4516, 4537), False, 'from whoswho import who, config\n'), ((4565, 4595), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4574, 4595), False, 'from whoswho import who, config\n'), ((4618, 4648), 'whoswho.who.ratio', 'who.ratio', (['self.name', '"""<NAME>"""'], {}), "(self.name, '<NAME>')\n", (4627, 4648), False, 'from whoswho import who, config\n'), ((5969, 5994), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (5978, 5994), False, 'from whoswho import who, config\n'), ((6022, 6047), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (6031, 6047), False, 'from whoswho import who, config\n'), ((6075, 6135), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {'options': "{'check_nickname': False}"}), "(name, '<NAME>', options={'check_nickname': False})\n", (6084, 6135), False, 'from whoswho import who, config\n'), ((6193, 6218), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (6202, 6218), False, 'from whoswho import who, config\n'), ((6241, 6266), 'whoswho.who.ratio', 'who.ratio', (['name', '"""<NAME>"""'], {}), "(name, '<NAME>')\n", (6250, 6266), False, 'from whoswho import who, config\n')] |
import json
from flask import request
from flask_restful import Resource, abort, reqparse
from models.User import User
"""
POST Creates a new resource.
GET Retrieves a resource.
PUT Updates an existing resource.
DELETE Deletes a resource.
"""
class UserEndpoint(Resource):
def post(self):
j = request.get_json()
# need to ensure the required fields are in the json
if "name" not in j:
abort(422, message="name is not in json body")
else:
name = j["name"]
if "username" not in j:
abort(422, message="username not in json body")
else:
username = j["username"]
if "email" not in j:
abort(422, message="email not in json body")
else:
email = j["email"]
if "password" not in j:
abort(422, message="password not in json body")
else:
password = j["password"]
user_obj = User(
name=name,
username=username,
email=email,
password=password,
)
if "phone_number" in j:
user_obj.phone_number = j["phone_number"]
if "experience" in j:
user_obj.experience = j["experience"]
if "pictureURL" in j:
user_obj.pictureURL = j["pictureURL"]
d = user_obj.save()
return json.loads(d.to_json())
def put(self):
# TODO
pass
def delete(self):
# TODO
pass
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('username', required=True, type=str, help='The username of the User')
args = parser.parse_args()
try:
user = json.loads(User.objects.get(username=args['username']).to_json())
except Exception as e:
print(e)
abort(404, message="User doesnt exist: {} doesn't exist".format(args['username']))
return user
| [
"models.User.User.objects.get",
"flask_restful.reqparse.RequestParser",
"models.User.User",
"flask.request.get_json",
"flask_restful.abort"
]
| [((354, 372), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (370, 372), False, 'from flask import request\n'), ((1006, 1072), 'models.User.User', 'User', ([], {'name': 'name', 'username': 'username', 'email': 'email', 'password': 'password'}), '(name=name, username=username, email=email, password=password)\n', (1010, 1072), False, 'from models.User import User\n'), ((1586, 1610), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1608, 1610), False, 'from flask_restful import Resource, abort, reqparse\n'), ((476, 522), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""name is not in json body"""'}), "(422, message='name is not in json body')\n", (481, 522), False, 'from flask_restful import Resource, abort, reqparse\n'), ((611, 658), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""username not in json body"""'}), "(422, message='username not in json body')\n", (616, 658), False, 'from flask_restful import Resource, abort, reqparse\n'), ((752, 796), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""email not in json body"""'}), "(422, message='email not in json body')\n", (757, 796), False, 'from flask_restful import Resource, abort, reqparse\n'), ((887, 934), 'flask_restful.abort', 'abort', (['(422)'], {'message': '"""password not in json body"""'}), "(422, message='password not in json body')\n", (892, 934), False, 'from flask_restful import Resource, abort, reqparse\n'), ((1789, 1832), 'models.User.User.objects.get', 'User.objects.get', ([], {'username': "args['username']"}), "(username=args['username'])\n", (1805, 1832), False, 'from models.User import User\n')] |
import datetime
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast
import attr
import ciso8601
import structlog
from attr import converters
from . import enums
from .utils import as_json_dict, to_snake_case
logger = structlog.get_logger()
class Omitted(Enum):
"""Singleton written in a way mypy can parse.
See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
for more details.
"""
token = 0
OMITTED = Omitted.token
"""A singleton to differentiate between omitted vs explicit :obj:`None`."""
# helper type for entity_converter
U = TypeVar("U", bound="BaseAAEntity")
def entity_converter(
entity_cls, # type: Union[List[Type[U]], Type[U]]
):
# type: (...) -> Callable[[Union[Omitted, U, Dict]], Union[U, Omitted]]
"""
Convert a dictionary response into instances of the entity class.
Usage:
# disambiguates between type_a and type_b based on ``__typename``
converter = entity_converter([TypeA, TypeB])
my_instance = converter({'__typename': 'TypeB'})
XXX: mypy isn't expressive enough to annotate that the return type will be
one of the _specific_ arg types and not the most generic bound base. We'll
unfortunately have to ``# type: ignore`` on lines that call this.
Args:
entity_cls: the class (or classes) the value should be converted into.
If multiple classes are provided as options, ``__typename`` must be
included in the reponse to support disambiguation.
Returns:
A callable that will convert a dictionary to the right entity type. If
more than one entity type is possible, that dictionary must have a
``__typename`` field present, which must match the ``TYPENAME`` on a
provided entity. If none of the provided types match of if the fields
don't align with the provided entity, a ``TypeError`` is raised.
"""
entity_classes = [] # type: List[Type[U]]
if isinstance(entity_cls, (list, tuple)):
entity_classes = entity_cls
else:
entity_classes = [entity_cls]
def _entity_converter(val):
# type: (Union[Dict[str, Any], U, Omitted]) -> Union[U, Omitted]
# check if it's explitly been omitted (don't try to convert those)
if val is OMITTED:
return val
# check if it's already an entity
if any([isinstance(val, e_cls) for e_cls in entity_classes]):
return cast(U, val)
# definitely a dict now, since we check what it was earlier. (present
# for type checking)
val = cast(Dict[str, Any], val)
# if there's more than one possibility for entity classes, pick the
# right one based on ``__typename``
if len(entity_classes) == 1:
# only one option, we don't need an explicit type
selected_cls = entity_classes[0] # type: Type[U]
else:
# a few different return types are expected
typename = val.pop("__typename", None)
if typename is None:
type_options = ", ".join([e.TYPENAME for e in entity_classes])
raise TypeError(
'No "__typename" present to disambiguate between possible '
"types: [{}]".format(type_options)
)
matching_typename = next(
(e for e in entity_classes if e.TYPENAME == typename), None
) # type: Optional[Type[U]]
if matching_typename is None:
raise TypeError('No entity found for type "{}"'.format(typename))
selected_cls = matching_typename
return selected_cls.from_api_response(val)
return _entity_converter
@attr.attrs(frozen=True)
class BaseAAEntity(object):
TYPENAME = "" # type: str
"""The name of the graphql type in the schema.
Used for disambiguation when there's more than one possible type being
returned.
"""
as_dict = as_json_dict
@classmethod
def from_api_response(cls, data):
# type: (Type[U], Dict[str, Any]) -> U
# If __typename is present, this asserts that it matches this class's
# expected typename
typename = data.pop("__typename", None)
if typename and typename != cls.TYPENAME:
raise TypeError(
(
"Given type \"{}\" doesn't match this entity's type: "
'"{}". Is {} the right entity for '
"this data?"
).format(typename, cls.TYPENAME, cls.__name__)
)
# convert top-level kwargs from camelCase to snake_case
kwargs = {to_snake_case(k): v for k, v in data.items()}
# mypy doesn't like that we're providing kwargs to a type whose init
# doesn't accept any kwargs (even though subclasses do have attributes)
return cls(**kwargs) # type: ignore
@attr.attrs(frozen=True)
class AccountError(BaseAAEntity):
TYPENAME = "AccountError"
code = attr.attrib(converter=enums.AccountErrorCode) # type: enums.AccountErrorCode
message = attr.attrib() # type: Optional[str]
retry_in = attr.attrib() # type: Optional[int]
@attr.attrs(frozen=True)
class Account(BaseAAEntity):
TYPENAME = "Account"
provider = attr.attrib(converter=enums.ProviderType) # type: enums.ProviderType
username = attr.attrib() # type: str
access_token = attr.attrib() # type: Optional[str]
access_token_expires_at = attr.attrib(
converter=converters.optional(ciso8601.parse_datetime),
) # type: Optional[datetime.datetime]
@attr.attrs(frozen=True)
class DeleteOperation(BaseAAEntity):
"""Base class for delete operation payloads.
These payloads don't actually have any field information in them. While
there's technically a "_" field in the schema, it's only a placeholder to
work around the language not supporting empty responses. It has no meaning
and will never have a meaningful value.
This class has no specific equivalent type, it's just a convenience type
for these entities.
"""
pass
@attr.attrs(frozen=True)
class DeleteOtherAccountKeysPayload(DeleteOperation):
TYPENAME = "DeleteOtherAccountKeysPayload"
@attr.attrs(frozen=True)
class DeleteAccountKeyPayload(DeleteOperation):
TYPENAME = "DeleteAccountKeyPayload"
@attr.attrs(frozen=True)
class DeleteAccountPayload(DeleteOperation):
TYPENAME = "DeleteAccountPayload"
@attr.attrs(frozen=True)
class AuthorizeAccountPayload(BaseAAEntity):
TYPENAME = "AuthorizeAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
account_key = attr.attrib() # type: str
number_of_account_keys = attr.attrib() # type: int
@attr.attrs(frozen=True)
class VerifyAccountPayload(BaseAAEntity):
TYPENAME = "VerifyAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
@attr.attrs(frozen=True)
class Query(BaseAAEntity):
account = attr.attrib(
default=OMITTED,
converter=entity_converter([Account, AccountError]), # type: ignore[misc]
) # type: Union[Omitted, Account, AccountError]
@attr.attrs(frozen=True)
class Mutation(BaseAAEntity):
# mypy and the attrs plugin doens't like the `Omitted` default + converter
# stuff
authorize_account = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, AuthorizeAccountPayload, AccountError],
entity_converter([AuthorizeAccountPayload, AccountError]),
),
) # type: Union[Omitted, AuthorizeAccountPayload, AccountError]
verify_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, VerifyAccountPayload, AccountError],
entity_converter([VerifyAccountPayload, AccountError]),
),
) # type: Union[Omitted, VerifyAccountPayload, AccountError]
delete_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountPayload, AccountError],
entity_converter([DeleteAccountPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountPayload, AccountError]
delete_account_key = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountKeyPayload, AccountError],
entity_converter([DeleteAccountKeyPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountKeyPayload, AccountError]
delete_other_account_keys = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteOtherAccountKeysPayload, AccountError],
entity_converter([DeleteOtherAccountKeysPayload, AccountError]),
),
) # type: Union[Omitted, DeleteOtherAccountKeysPayload, AccountError]
| [
"structlog.get_logger",
"attr.attrs",
"attr.converters.optional",
"attr.attrib",
"typing.cast",
"typing.TypeVar"
]
| [((273, 295), 'structlog.get_logger', 'structlog.get_logger', ([], {}), '()\n', (293, 295), False, 'import structlog\n'), ((645, 679), 'typing.TypeVar', 'TypeVar', (['"""U"""'], {'bound': '"""BaseAAEntity"""'}), "('U', bound='BaseAAEntity')\n", (652, 679), False, 'from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast\n'), ((3791, 3814), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3801, 3814), False, 'import attr\n'), ((4981, 5004), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4991, 5004), False, 'import attr\n'), ((5265, 5288), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (5275, 5288), False, 'import attr\n'), ((5680, 5703), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (5690, 5703), False, 'import attr\n'), ((6191, 6214), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (6201, 6214), False, 'import attr\n'), ((6319, 6342), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (6329, 6342), False, 'import attr\n'), ((6435, 6458), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (6445, 6458), False, 'import attr\n'), ((6545, 6568), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (6555, 6568), False, 'import attr\n'), ((6877, 6900), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (6887, 6900), False, 'import attr\n'), ((7102, 7125), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (7112, 7125), False, 'import attr\n'), ((7344, 7367), 'attr.attrs', 'attr.attrs', ([], {'frozen': '(True)'}), '(frozen=True)\n', (7354, 7367), False, 'import attr\n'), ((5081, 5126), 'attr.attrib', 'attr.attrib', ([], {'converter': 'enums.AccountErrorCode'}), '(converter=enums.AccountErrorCode)\n', (5092, 5126), False, 'import attr\n'), ((5173, 5186), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (5184, 5186), False, 'import attr\n'), ((5225, 5238), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (5236, 5238), False, 'import attr\n'), ((5359, 5400), 'attr.attrib', 'attr.attrib', ([], {'converter': 'enums.ProviderType'}), '(converter=enums.ProviderType)\n', (5370, 5400), False, 'import attr\n'), ((5444, 5457), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (5455, 5457), False, 'import attr\n'), ((5490, 5503), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (5501, 5503), False, 'import attr\n'), ((6791, 6804), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (6802, 6804), False, 'import attr\n'), ((6847, 6860), 'attr.attrib', 'attr.attrib', ([], {}), '()\n', (6858, 6860), False, 'import attr\n'), ((2653, 2678), 'typing.cast', 'cast', (['Dict[str, Any]', 'val'], {}), '(Dict[str, Any], val)\n', (2657, 2678), False, 'from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast\n'), ((2518, 2530), 'typing.cast', 'cast', (['U', 'val'], {}), '(U, val)\n', (2522, 2530), False, 'from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast\n'), ((5588, 5632), 'attr.converters.optional', 'converters.optional', (['ciso8601.parse_datetime'], {}), '(ciso8601.parse_datetime)\n', (5607, 5632), False, 'from attr import converters\n')] |
from datetime import datetime
from jsonschema_serialize_fork import NO_DEFAULT
from pyramid.security import effective_principals
from pyramid.threadlocal import get_current_request
from string import (
digits,
ascii_uppercase,
)
import random
import uuid
from snovault.schema_utils import server_default
ACCESSION_FACTORY = __name__ + ':accession_factory'
def includeme(config):
from pyramid.path import DottedNameResolver
accession_factory = config.registry.settings.get('accession_factory')
if accession_factory:
factory = DottedNameResolver().resolve(accession_factory)
else:
factory = enc_accession
config.registry[ACCESSION_FACTORY] = factory
@server_default
def userid(instance, subschema):
request = get_current_request()
principals = effective_principals(request)
for principal in principals:
if principal.startswith('userid.'):
return principal[7:]
return NO_DEFAULT
@server_default
def now(instance, subschema):
# from jsonschema_serialize_fork date-time format requires a timezone
return datetime.utcnow().isoformat() + '+00:00'
@server_default
def uuid4(instance, subschema):
return str(uuid.uuid4())
@server_default
def accession(instance, subschema):
if 'external_accession' in instance:
return NO_DEFAULT
request = get_current_request()
factory = request.registry[ACCESSION_FACTORY]
# With 17 576 000 options
ATTEMPTS = 10
for attempt in range(ATTEMPTS):
new_accession = factory(subschema['accessionType'])
if new_accession in request.root:
continue
return new_accession
raise AssertionError("Free accession not found in %d attempts" % ATTEMPTS)
ENC_ACCESSION_FORMAT = (digits, digits, digits, ascii_uppercase, ascii_uppercase, ascii_uppercase)
def enc_accession(accession_type):
random_part = ''.join(random.choice(s) for s in ENC_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
TEST_ACCESSION_FORMAT = (digits, ) * 6
def test_accession(accession_type):
""" Test accessions are generated on test.encodedcc.org
"""
random_part = ''.join(random.choice(s) for s in TEST_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
| [
"pyramid.threadlocal.get_current_request",
"random.choice",
"datetime.datetime.utcnow",
"pyramid.security.effective_principals",
"uuid.uuid4",
"pyramid.path.DottedNameResolver"
]
| [((766, 787), 'pyramid.threadlocal.get_current_request', 'get_current_request', ([], {}), '()\n', (785, 787), False, 'from pyramid.threadlocal import get_current_request\n'), ((805, 834), 'pyramid.security.effective_principals', 'effective_principals', (['request'], {}), '(request)\n', (825, 834), False, 'from pyramid.security import effective_principals\n'), ((1355, 1376), 'pyramid.threadlocal.get_current_request', 'get_current_request', ([], {}), '()\n', (1374, 1376), False, 'from pyramid.threadlocal import get_current_request\n'), ((1206, 1218), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1216, 1218), False, 'import uuid\n'), ((1906, 1922), 'random.choice', 'random.choice', (['s'], {}), '(s)\n', (1919, 1922), False, 'import random\n'), ((2173, 2189), 'random.choice', 'random.choice', (['s'], {}), '(s)\n', (2186, 2189), False, 'import random\n'), ((562, 582), 'pyramid.path.DottedNameResolver', 'DottedNameResolver', ([], {}), '()\n', (580, 582), False, 'from pyramid.path import DottedNameResolver\n'), ((1100, 1117), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1115, 1117), False, 'from datetime import datetime\n')] |
import unittest
import os
import json
import requests
import requests_mock
from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError
class APIAccountStateTestCase(unittest.TestCase):
def setUp(self):
self.service = IOService()
@requests_mock.mock()
def test_account_state_without_auth(self, mock):
data = self._read_mock_response('account_state_without_auth')
self._setup_response(mock, data, 401)
with self.assertRaises(AuthorizationError):
self.service.get_account_state()
@requests_mock.mock()
def test_account_state_auth_not_ok(self, mock):
data = self._read_mock_response('account_state_not_ok')
for code in range(201, 600):
# skip 401 status code (unauthorized)
if code == 401:
continue
self._setup_response(mock, data, code)
with self.assertRaises(UnexpectedResponseCodeError) as cm:
self.service.get_account_state()
self.assertEqual(cm.exception.status_code, code)
@requests_mock.mock()
def test_account_state(self, mock):
data = self._read_mock_response('account_state')
self.service = IOService()
self._setup_response(mock, data)
self.assertEqual(self.service.get_account_state(), data)
self.fail("auth missing")
def _read_mock_response(self, name):
path = os.path.join(os.path.dirname(__file__), name + '.json')
with open(path, 'r') as file:
data = json.loads(file.read())
return data
def _setup_response(self, mock, response, code=None):
if code is None:
code = requests.codes.ok
mock.get(
self.service.api + api_url.URL_ACCOUNT_STATE,
json=response,
status_code=code)
| [
"os.path.dirname",
"requests_mock.mock",
"ioapi.IOService"
]
| [((276, 296), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (294, 296), False, 'import requests_mock\n'), ((570, 590), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (588, 590), False, 'import requests_mock\n'), ((1088, 1108), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (1106, 1108), False, 'import requests_mock\n'), ((258, 269), 'ioapi.IOService', 'IOService', ([], {}), '()\n', (267, 269), False, 'from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError\n'), ((1229, 1240), 'ioapi.IOService', 'IOService', ([], {}), '()\n', (1238, 1240), False, 'from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError\n'), ((1452, 1477), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1467, 1477), False, 'import os\n')] |
'''
fake posts to bootstrap a development database. Put any interesting cases
useful for development in here.
'''
from datetime import datetime
POST_DATA_1 = [
{
"created" : datetime(2015, 10, 1),
"published": datetime(2015, 10, 1),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "First Post",
"slug": "",
"text": "a bunch of words #foo #bar",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "published",
"title": "Second Post",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "draft",
"title": "Third Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "draft",
"title": "Fourth Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
]
POST_DATA_2 = [
{
"created" : datetime(2015, 3, 2),
"published": datetime(2015, 3, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 1",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 4, 2),
"published": datetime(2015, 4, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 2",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 3",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 4",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 5",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 6",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 7",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 7, 2),
"published": datetime(2015, 7, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 8",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 8, 2),
"published": datetime(2015, 8, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 9",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 9, 2),
"published": datetime(2015, 9, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 10",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 11",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
] | [
"datetime.datetime"
]
| [((197, 218), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (205, 218), False, 'from datetime import datetime\n'), ((241, 262), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (249, 262), False, 'from datetime import datetime\n'), ((285, 306), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (293, 306), False, 'from datetime import datetime\n'), ((625, 646), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (633, 646), False, 'from datetime import datetime\n'), ((669, 690), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (677, 690), False, 'from datetime import datetime\n'), ((713, 734), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (721, 734), False, 'from datetime import datetime\n'), ((1052, 1073), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (1060, 1073), False, 'from datetime import datetime\n'), ((1096, 1117), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (1104, 1117), False, 'from datetime import datetime\n'), ((1140, 1161), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (1148, 1161), False, 'from datetime import datetime\n'), ((1473, 1494), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (1481, 1494), False, 'from datetime import datetime\n'), ((1517, 1538), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (1525, 1538), False, 'from datetime import datetime\n'), ((1561, 1582), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (1569, 1582), False, 'from datetime import datetime\n'), ((1916, 1936), 'datetime.datetime', 'datetime', (['(2015)', '(3)', '(2)'], {}), '(2015, 3, 2)\n', (1924, 1936), False, 'from datetime import datetime\n'), ((1959, 1979), 'datetime.datetime', 'datetime', (['(2015)', '(3)', '(2)'], {}), '(2015, 3, 2)\n', (1967, 1979), False, 'from datetime import datetime\n'), ((2002, 2023), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (2010, 2023), False, 'from datetime import datetime\n'), ((2338, 2358), 'datetime.datetime', 'datetime', (['(2015)', '(4)', '(2)'], {}), '(2015, 4, 2)\n', (2346, 2358), False, 'from datetime import datetime\n'), ((2381, 2401), 'datetime.datetime', 'datetime', (['(2015)', '(4)', '(2)'], {}), '(2015, 4, 2)\n', (2389, 2401), False, 'from datetime import datetime\n'), ((2424, 2445), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (2432, 2445), False, 'from datetime import datetime\n'), ((2758, 2778), 'datetime.datetime', 'datetime', (['(2015)', '(5)', '(2)'], {}), '(2015, 5, 2)\n', (2766, 2778), False, 'from datetime import datetime\n'), ((2801, 2821), 'datetime.datetime', 'datetime', (['(2015)', '(5)', '(2)'], {}), '(2015, 5, 2)\n', (2809, 2821), False, 'from datetime import datetime\n'), ((2844, 2865), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (2852, 2865), False, 'from datetime import datetime\n'), ((3178, 3198), 'datetime.datetime', 'datetime', (['(2015)', '(5)', '(2)'], {}), '(2015, 5, 2)\n', (3186, 3198), False, 'from datetime import datetime\n'), ((3221, 3241), 'datetime.datetime', 'datetime', (['(2015)', '(5)', '(2)'], {}), '(2015, 5, 2)\n', (3229, 3241), False, 'from datetime import datetime\n'), ((3264, 3285), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (3272, 3285), False, 'from datetime import datetime\n'), ((3598, 3618), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (3606, 3618), False, 'from datetime import datetime\n'), ((3641, 3661), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (3649, 3661), False, 'from datetime import datetime\n'), ((3684, 3705), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (3692, 3705), False, 'from datetime import datetime\n'), ((4018, 4038), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (4026, 4038), False, 'from datetime import datetime\n'), ((4061, 4081), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (4069, 4081), False, 'from datetime import datetime\n'), ((4104, 4125), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (4112, 4125), False, 'from datetime import datetime\n'), ((4438, 4458), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (4446, 4458), False, 'from datetime import datetime\n'), ((4481, 4501), 'datetime.datetime', 'datetime', (['(2015)', '(6)', '(2)'], {}), '(2015, 6, 2)\n', (4489, 4501), False, 'from datetime import datetime\n'), ((4524, 4545), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (4532, 4545), False, 'from datetime import datetime\n'), ((4858, 4878), 'datetime.datetime', 'datetime', (['(2015)', '(7)', '(2)'], {}), '(2015, 7, 2)\n', (4866, 4878), False, 'from datetime import datetime\n'), ((4901, 4921), 'datetime.datetime', 'datetime', (['(2015)', '(7)', '(2)'], {}), '(2015, 7, 2)\n', (4909, 4921), False, 'from datetime import datetime\n'), ((4944, 4965), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (4952, 4965), False, 'from datetime import datetime\n'), ((5278, 5298), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(2)'], {}), '(2015, 8, 2)\n', (5286, 5298), False, 'from datetime import datetime\n'), ((5321, 5341), 'datetime.datetime', 'datetime', (['(2015)', '(8)', '(2)'], {}), '(2015, 8, 2)\n', (5329, 5341), False, 'from datetime import datetime\n'), ((5364, 5385), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (5372, 5385), False, 'from datetime import datetime\n'), ((5698, 5718), 'datetime.datetime', 'datetime', (['(2015)', '(9)', '(2)'], {}), '(2015, 9, 2)\n', (5706, 5718), False, 'from datetime import datetime\n'), ((5741, 5761), 'datetime.datetime', 'datetime', (['(2015)', '(9)', '(2)'], {}), '(2015, 9, 2)\n', (5749, 5761), False, 'from datetime import datetime\n'), ((5784, 5805), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (5792, 5805), False, 'from datetime import datetime\n'), ((6119, 6140), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (6127, 6140), False, 'from datetime import datetime\n'), ((6163, 6184), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(2)'], {}), '(2015, 10, 2)\n', (6171, 6184), False, 'from datetime import datetime\n'), ((6207, 6228), 'datetime.datetime', 'datetime', (['(2015)', '(10)', '(1)'], {}), '(2015, 10, 1)\n', (6215, 6228), False, 'from datetime import datetime\n')] |
from __future__ import absolute_import
from django.shortcuts import render
import simplejson
import datetime
from django.http import HttpResponse
class GenericItemBase(object):
ITEM_ATTRS = []
def __init__(self, identifier):
self.identifier = identifier
def jsonify(self, value):
"""
Method to convert non JSON serializable objects into
an equivalent JSON serializable form.
"""
return value
def json(self):
raise NotImplementedError
def render_json(self):
raise NotImplementedError
def render_html(self):
raise NotImplementedError
class GenericItem(GenericItemBase):
TEMPLATE = 'customer_support/item.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._item = {}
def get_item(self, identifier):
raise NotImplementedError
def set_item(self, data):
self._item = {}
for key, value in data.items():
if key in self.ITEM_ATTRS:
self._item[key] = value
def json(self):
item = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(self._item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item[attr_name] = attr
return simplejson.dumps(item)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'item': self._item})
class GenericItems(GenericItemBase):
TEMPLATE = 'customer_support/items.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._items = []
def get_items(self, for_entity):
raise NotImplementedError
def set_items(self, items):
self._items = items
def json(self):
items = []
for item in self._items:
item_dict = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item_dict[attr_name] = attr
items.append(item)
return simplejson.dumps(items)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'items': self._items})
class GenericActions(object):
def __init__(self, item_id):
self.item_id = item_id
self.actions = []
def get_actions_for_item(self):
raise NotImplementedError
def json(self):
return simplejson.dumps(self.actions)
def render_json(self):
return HttpResponse(self.json(), mimetype='application/json')
def render_html(self):
pass
| [
"simplejson.dumps",
"django.shortcuts.render"
]
| [((1371, 1393), 'simplejson.dumps', 'simplejson.dumps', (['item'], {}), '(item)\n', (1387, 1393), False, 'import simplejson\n'), ((1548, 1591), 'django.shortcuts.render', 'render', (['self.TEMPLATE', "{'item': self._item}"], {}), "(self.TEMPLATE, {'item': self._item})\n", (1554, 1591), False, 'from django.shortcuts import render\n'), ((2330, 2353), 'simplejson.dumps', 'simplejson.dumps', (['items'], {}), '(items)\n', (2346, 2353), False, 'import simplejson\n'), ((2508, 2553), 'django.shortcuts.render', 'render', (['self.TEMPLATE', "{'items': self._items}"], {}), "(self.TEMPLATE, {'items': self._items})\n", (2514, 2553), False, 'from django.shortcuts import render\n'), ((2784, 2814), 'simplejson.dumps', 'simplejson.dumps', (['self.actions'], {}), '(self.actions)\n', (2800, 2814), False, 'import simplejson\n')] |
import math
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import norm # edit
from scipy.special import log_ndtr
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
def split_left_right_censored(x, y, cens):
counts = cens.value_counts()
if -1 not in counts and 1 not in counts:
warnings.warn("No censored observations; use regression methods for uncensored data")
xs = []
ys = []
for value in [-1, 0, 1]:
if value in counts:
split = cens == value
y_split = np.squeeze(y[split].values)
x_split = x[split].values
else:
y_split, x_split = None, None
xs.append(x_split)
ys.append(y_split)
return xs, ys
def tobit_neg_log_likelihood(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1])
s = params[-1]
to_cat = []
cens = False
if y_left is not None:
cens = True
left = (y_left - np.dot(x_left, b))
to_cat.append(left)
if y_right is not None:
cens = True
right = (np.dot(x_right, b) - y_right)
to_cat.append(right)
if cens:
concat_stats = np.concatenate(to_cat, axis=0) / s
log_cum_norm = scipy.stats.norm.logcdf(concat_stats) # log_ndtr(concat_stats)
cens_sum = log_cum_norm.sum()
else:
cens_sum = 0
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
mid = scipy.stats.norm.logpdf(mid_stats) - math.log(max(np.finfo('float').resolution, s))
mid_sum = mid.sum()
else:
mid_sum = 0
loglik = cens_sum + mid_sum
return - loglik
def tobit_neg_log_likelihood_der(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
s = params[-1]
beta_jac = np.zeros(len(b))
sigma_jac = 0
if y_left is not None:
left_stats = (y_left - np.dot(x_left, b)) / s
l_pdf = scipy.stats.norm.logpdf(left_stats)
l_cdf = log_ndtr(left_stats)
left_frac = np.exp(l_pdf - l_cdf)
beta_left = np.dot(left_frac, x_left / s)
beta_jac -= beta_left
left_sigma = np.dot(left_frac, left_stats)
sigma_jac -= left_sigma
if y_right is not None:
right_stats = (np.dot(x_right, b) - y_right) / s
r_pdf = scipy.stats.norm.logpdf(right_stats)
r_cdf = log_ndtr(right_stats)
right_frac = np.exp(r_pdf - r_cdf)
beta_right = np.dot(right_frac, x_right / s)
beta_jac += beta_right
right_sigma = np.dot(right_frac, right_stats)
sigma_jac -= right_sigma
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
beta_mid = np.dot(mid_stats, x_mid / s)
beta_jac += beta_mid
mid_sigma = (np.square(mid_stats) - 1).sum()
sigma_jac += mid_sigma
combo_jac = np.append(beta_jac, sigma_jac / s) # by chain rule, since the expression above is dloglik/dlogsigma
return -combo_jac
class TobitModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.ols_coef_ = None
self.ols_intercept = None
self.coef_ = None
self.intercept_ = None
self.sigma_ = None
def fit(self, x, y, cens, verbose=False):
"""
Fit a maximum-likelihood Tobit regression
:param x: Pandas DataFrame (n_samples, n_features): Data
:param y: Pandas Series (n_samples,): Target
:param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored
:param verbose: boolean, show info from minimization
:return:
"""
x_copy = x.copy()
if self.fit_intercept:
x_copy.insert(0, 'intercept', 1.0)
else:
x_copy.scale(with_mean=True, with_std=False, copy=False)
init_reg = LinearRegression(fit_intercept=False).fit(x_copy, y)
b0 = init_reg.coef_
y_pred = init_reg.predict(x_copy)
resid = y - y_pred
resid_var = np.var(resid)
s0 = np.sqrt(resid_var)
params0 = np.append(b0, s0)
xs, ys = split_left_right_censored(x_copy, y, cens)
result = minimize(lambda params: tobit_neg_log_likelihood(xs, ys, params), params0, method='BFGS',
jac=lambda params: tobit_neg_log_likelihood_der(xs, ys, params), options={'disp': verbose})
if verbose:
print(result)
self.ols_coef_ = b0[1:]
self.ols_intercept = b0[0]
if self.fit_intercept:
self.intercept_ = result.x[1]
self.coef_ = result.x[1:-1]
else:
self.coef_ = result.x[:-1]
self.intercept_ = 0
self.sigma_ = result.x[-1]
return self
def predict(self, x):
return self.intercept_ + np.dot(x, self.coef_)
def score(self, x, y, scoring_function=mean_absolute_error):
y_pred = np.dot(x, self.coef_)
return scoring_function(y, y_pred)
# EDIT - insert marginal effects function
def margins(self, x, k = 0):
"""
Marginal effects on dependent variable of a regressor, identified by coef
:param x: array with all regressors (independent variables) to make a prediction
:param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects
:return: an array with the marginal effects estimated at each observation's level
The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf
evaluated at x_i * coeff[k] / sigma
"""
return self.coef_[k] * norm.cdf(self.predict(x) / self.sigma_) | [
"scipy.special.log_ndtr",
"numpy.sqrt",
"numpy.squeeze",
"numpy.append",
"numpy.exp",
"numpy.dot",
"numpy.square",
"numpy.concatenate",
"numpy.finfo",
"warnings.warn",
"sklearn.linear_model.LinearRegression",
"numpy.var"
]
| [((3180, 3214), 'numpy.append', 'np.append', (['beta_jac', '(sigma_jac / s)'], {}), '(beta_jac, sigma_jac / s)\n', (3189, 3214), True, 'import numpy as np\n'), ((443, 533), 'warnings.warn', 'warnings.warn', (['"""No censored observations; use regression methods for uncensored data"""'], {}), "(\n 'No censored observations; use regression methods for uncensored data')\n", (456, 533), False, 'import warnings\n'), ((2304, 2324), 'scipy.special.log_ndtr', 'log_ndtr', (['left_stats'], {}), '(left_stats)\n', (2312, 2324), False, 'from scipy.special import log_ndtr\n'), ((2345, 2366), 'numpy.exp', 'np.exp', (['(l_pdf - l_cdf)'], {}), '(l_pdf - l_cdf)\n', (2351, 2366), True, 'import numpy as np\n'), ((2387, 2416), 'numpy.dot', 'np.dot', (['left_frac', '(x_left / s)'], {}), '(left_frac, x_left / s)\n', (2393, 2416), True, 'import numpy as np\n'), ((2469, 2498), 'numpy.dot', 'np.dot', (['left_frac', 'left_stats'], {}), '(left_frac, left_stats)\n', (2475, 2498), True, 'import numpy as np\n'), ((2686, 2707), 'scipy.special.log_ndtr', 'log_ndtr', (['right_stats'], {}), '(right_stats)\n', (2694, 2707), False, 'from scipy.special import log_ndtr\n'), ((2729, 2750), 'numpy.exp', 'np.exp', (['(r_pdf - r_cdf)'], {}), '(r_pdf - r_cdf)\n', (2735, 2750), True, 'import numpy as np\n'), ((2772, 2803), 'numpy.dot', 'np.dot', (['right_frac', '(x_right / s)'], {}), '(right_frac, x_right / s)\n', (2778, 2803), True, 'import numpy as np\n'), ((2858, 2889), 'numpy.dot', 'np.dot', (['right_frac', 'right_stats'], {}), '(right_frac, right_stats)\n', (2864, 2889), True, 'import numpy as np\n'), ((3020, 3048), 'numpy.dot', 'np.dot', (['mid_stats', '(x_mid / s)'], {}), '(mid_stats, x_mid / s)\n', (3026, 3048), True, 'import numpy as np\n'), ((4376, 4389), 'numpy.var', 'np.var', (['resid'], {}), '(resid)\n', (4382, 4389), True, 'import numpy as np\n'), ((4403, 4421), 'numpy.sqrt', 'np.sqrt', (['resid_var'], {}), '(resid_var)\n', (4410, 4421), True, 'import numpy as np\n'), ((4440, 4457), 'numpy.append', 'np.append', (['b0', 's0'], {}), '(b0, s0)\n', (4449, 4457), True, 'import numpy as np\n'), ((5275, 5296), 'numpy.dot', 'np.dot', (['x', 'self.coef_'], {}), '(x, self.coef_)\n', (5281, 5296), True, 'import numpy as np\n'), ((667, 694), 'numpy.squeeze', 'np.squeeze', (['y[split].values'], {}), '(y[split].values)\n', (677, 694), True, 'import numpy as np\n'), ((1152, 1169), 'numpy.dot', 'np.dot', (['x_left', 'b'], {}), '(x_left, b)\n', (1158, 1169), True, 'import numpy as np\n'), ((1264, 1282), 'numpy.dot', 'np.dot', (['x_right', 'b'], {}), '(x_right, b)\n', (1270, 1282), True, 'import numpy as np\n'), ((1359, 1389), 'numpy.concatenate', 'np.concatenate', (['to_cat'], {'axis': '(0)'}), '(to_cat, axis=0)\n', (1373, 1389), True, 'import numpy as np\n'), ((5170, 5191), 'numpy.dot', 'np.dot', (['x', 'self.coef_'], {}), '(x, self.coef_)\n', (5176, 5191), True, 'import numpy as np\n'), ((1606, 1622), 'numpy.dot', 'np.dot', (['x_mid', 'b'], {}), '(x_mid, b)\n', (1612, 1622), True, 'import numpy as np\n'), ((2213, 2230), 'numpy.dot', 'np.dot', (['x_left', 'b'], {}), '(x_left, b)\n', (2219, 2230), True, 'import numpy as np\n'), ((2583, 2601), 'numpy.dot', 'np.dot', (['x_right', 'b'], {}), '(x_right, b)\n', (2589, 2601), True, 'import numpy as np\n'), ((2979, 2995), 'numpy.dot', 'np.dot', (['x_mid', 'b'], {}), '(x_mid, b)\n', (2985, 2995), True, 'import numpy as np\n'), ((4206, 4243), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (4222, 4243), False, 'from sklearn.linear_model import LinearRegression\n'), ((3100, 3120), 'numpy.square', 'np.square', (['mid_stats'], {}), '(mid_stats)\n', (3109, 3120), True, 'import numpy as np\n'), ((1692, 1709), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (1700, 1709), True, 'import numpy as np\n')] |
import os
import sys
from conda_build import api
from conda_build import render
import pytest
def test_output_with_noarch_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch'] = 'python'
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_output_with_noarch_python_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch_python'] = True
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_reduce_duplicate_specs(testing_metadata):
reqs = {'build': ['exact', 'exact 1.2.3 1', 'exact >1.0,<2'],
'host': ['exact', 'exact 1.2.3 1']
}
testing_metadata.meta['requirements'] = reqs
render._simplify_to_exact_constraints(testing_metadata)
assert (testing_metadata.meta['requirements']['build'] ==
testing_metadata.meta['requirements']['host'])
simplified_deps = testing_metadata.meta['requirements']
assert len(simplified_deps['build']) == 1
assert 'exact 1.2.3 1' in simplified_deps['build']
def test_pin_run_as_build_preserve_string(testing_metadata):
m = testing_metadata
m.config.variant['pin_run_as_build']['pkg'] = {
'max_pin': 'x.x'
}
dep = render.get_pin_from_build(
m,
'pkg * somestring*',
{'pkg': '1.2.3 somestring_h1234'}
)
assert dep == 'pkg >=1.2.3,<1.3.0a0 somestring*'
| [
"conda_build.render._simplify_to_exact_constraints",
"conda_build.render.get_pin_from_build",
"conda_build.api.get_output_file_path"
]
| [((225, 267), 'conda_build.api.get_output_file_path', 'api.get_output_file_path', (['testing_metadata'], {}), '(testing_metadata)\n', (249, 267), False, 'from conda_build import api\n'), ((469, 511), 'conda_build.api.get_output_file_path', 'api.get_output_file_path', (['testing_metadata'], {}), '(testing_metadata)\n', (493, 511), False, 'from conda_build import api\n'), ((798, 853), 'conda_build.render._simplify_to_exact_constraints', 'render._simplify_to_exact_constraints', (['testing_metadata'], {}), '(testing_metadata)\n', (835, 853), False, 'from conda_build import render\n'), ((1317, 1405), 'conda_build.render.get_pin_from_build', 'render.get_pin_from_build', (['m', '"""pkg * somestring*"""', "{'pkg': '1.2.3 somestring_h1234'}"], {}), "(m, 'pkg * somestring*', {'pkg':\n '1.2.3 somestring_h1234'})\n", (1342, 1405), False, 'from conda_build import render\n')] |
#Writing MOOG parameter file for the parameter, abundance, and error calculations.
#The parameter file only needs to be written once, at beginning of the routine, because the output
#files are overwritten with each itereation of the routine, only minimal output data are needed.
#
#The user can choose to have the parameter file written to screen by choosing verbose=True
#The user can choose to have more detailed MOOG output by chooseing the appropriate values for the
#MOOG input parameters.
import numpy as np
def param_file(linelist,atmosphere=0,molecules=1,lines=0,flux=0,damp=0,plot=0,units=0,verbose=False):
if verbose:
print('abfind')
print('terminal \'x11\'')
print('standard_out \'moog_out.1\'')
print('summary_out \'moog_out.2\'')
print('model_in \'star.mod\'')
print('lines_in \'' + linelist + '\'')
print('atmosphere ' + str(atmosphere))
print('molecules ' + str(molecules))
print('lines ' + str(lines))
print('flux/int ' + str(flux))
print('damping ' + str(damp))
print('plot ' + str(plot))
print('units ' + str(units))
with open('batch.par', 'wt') as file:
file.write('abfind' + '\n')
file.write('terminal \'x11\'' + '\n')
file.write('standard_out \'moog_out.1\'' + '\n')
file.write('summary_out \'moog_out.2\'' + '\n')
file.write('model_in \'star.mod\'' + '\n')
file.write('lines_in \'' + linelist + '\'' + '\n')
file.write('atmosphere ' + str(atmosphere) + '\n')
file.write('molecules ' + str(molecules) + '\n')
file.write('lines ' + str(lines) + '\n')
file.write('flux/int ' + str(flux) + '\n')
file.write('damping ' + str(damp) + '\n')
file.write('plot ' + str(plot) + '\n')
file.write('units ' + str(units) + '\n')
#Function for creating the solar and stellar linelists
def linelist_create(star_in, sun_in, direc_path):
with open(direc_path + '/linelist_star.txt', 'w') as out_star:
with open(direc_path + '/linelist_sun.txt', 'w') as out_sun:
with open(star_in) as file_star:
with open(sun_in) as file_sun:
line_star = file_star.readline()
out_star.write(line_star) #accounts for comment line in linelist files
line_sun = file_sun.readline()
out_sun.write(line_sun) #accounts for comment line in linelist files
line = file_star.readlines()
line_s = file_sun.readlines()
for line_star in line:
line_star_split = line_star.split()
#if len(line_star_split) < 2: continue
for line_sun in line_s:
line_sun_split = line_sun.split()
#if len(line_sun_split) < 2: continue
if line_star_split[0] == line_sun_split[0] and line_star_split[1] == line_sun_split[1]:
out_star.write(line_star)
out_sun.write(line_sun)
continue
#Reads Moog output files, parsing elements and colums
def read_file(filename):
count = 0
elements = ['Fe I ', 'Fe II ', 'C I ', 'N I ', 'O I ', 'S I', 'K I ', 'Na I ', 'Mg I ', 'Al I ', 'Si I ', 'Ca I ', 'Sc II ', 'Ti I ', 'Ti II ', 'V ', 'Cr I ',
'Mn I ', 'Co I ', 'Ni I ', 'Cu I ', 'Zn I ', 'Ba II ']
dtype = [('wavelength', 'f8'),
('ID', 'f8'),
('EP', 'f8'),
('logGF', 'f8'),
('EWin', 'f8'),
('logRWin', 'f8'),
('abund', 'f8'),
('delavg', 'f8')]
abundances = []
el_found = []
with open(filename) as file:
while True:
count += 1
# Get next line from file
line = file.readline()
# if line is empty end of file is reached
if not line: break
for j, el in enumerate(elements):
species = 'Abundance Results for Species ' + el
if species in line:
new_arr = []
el_found.append(el)
line = file.readline().split()
line = file.readline().split()
while len(line) == 8:
new_arr.append(line)
line = file.readline().rstrip().split()
new_arr = np.array(new_arr)
new_arr = np.core.records.fromarrays(new_arr.T,dtype=dtype)
abundances.append(new_arr)
return el_found, abundances
| [
"numpy.array",
"numpy.core.records.fromarrays"
]
| [((4833, 4850), 'numpy.array', 'np.array', (['new_arr'], {}), '(new_arr)\n', (4841, 4850), True, 'import numpy as np\n'), ((4881, 4931), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['new_arr.T'], {'dtype': 'dtype'}), '(new_arr.T, dtype=dtype)\n', (4907, 4931), True, 'import numpy as np\n')] |
#coding=utf-8
from __future__ import division
"""
# OVERLAY UFOS
For anyone looking in here, sorry the code is so messy. This is a standalone version of a script with a lot of dependencies.
"""
import os
from AppKit import * #@PydevCodeAnalysisIgnore
from vanilla import * #@PydevCodeAnalysisIgnore
from mojo.drawingTools import *
from mojo.events import addObserver, removeObserver
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
from mojo.UI import UpdateCurrentGlyphView
from fontTools.pens.transformPen import TransformPen
from defconAppKit.windows.baseWindow import BaseWindowController
import unicodedata
#from lib.tools.defaults import getDefaultColor
from lib.tools.drawing import strokePixelPath
from lib.UI.spaceCenter.glyphSequenceEditText import splitText
from builtins import chr
selectedSymbol = u'•'
def SmallTextListCell(editable=False):
cell = NSTextFieldCell.alloc().init()
size = NSSmallControlSize #NSMiniControlSize
cell.setControlSize_(size)
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(size))
cell.setFont_(font)
cell.setEditable_(editable)
return cell
class TX:
"""
An agnostic way to get a naked font.
"""
@classmethod
def naked(cls, f):
try:
return f.naked()
except:
return f
class Tool():
"""
The tool object manages the font list. This is a simplification.
"""
fonts = AllFonts()
def addObserver(self, target, method, action):
addObserver(target, method, action)
def removeObserver(self, target, method, action):
removeObserver(target, method, action)
def getCurrentFont(self):
return CurrentFont()
def getFonts(self):
u"""Answers the list of selected fonts, ordered by their path.
"""
return self.fonts
def appendToFonts(self, path):
f = OpenFont(path, showUI=False)
self.fonts.append(f)
def removeFromFonts(self, path):
for i, f in enumerate(self.fonts):
if f.path == path:
del self.fonts[i]
def getFontPaths(self):
return [f.path or str(f.info.familyName)+" "+str(f.info.styleName) for f in self.getFonts()]
def getFontLabel(self, path):
if path is None:
return None
if not path:
return 'Untitled'
name = path.split('/')[-1]
status = selectedSymbol
return status, path, name
def getFontLabels(self):
labels = {}
for path in self.getFontPaths():
if path:
label = self.getFontLabel(path)
name = label[-1]
else:
name = 'Untitled'
if not name in labels:
labels[name] = []
labels[name].append(label)
sortedLabels = []
for _, labelSet in sorted(labels.items()):
if len(labelSet) == 1: # There is only a single font with this name
sortedLabels.append(labelSet[0])
else: # Otherwise we'll have to construct new names to show the difference
for status, path, name in sorted(labelSet):
sortedLabels.append((status, path, '%s "%s"' % (name, '/'.join(path.split('/')[:-1]))))
return sortedLabels
class C:
"""
Some constants.
"""
C2 = 100
BUTTON_WIDTH = 80
STYLE_CHECKBOXSIZE = 'small'
STYLE_LABELSIZE = 'small'
STYLE_RADIOSIZE = 'small'
L = 22
LL = 25
class OverlayUFOs(BaseWindowController):
DEFAULTKEY = "com.fontbureau.overlayUFO"
DEFAULTKEY_FILLCOLOR = "%s.fillColor" %DEFAULTKEY
DEFAULTKEY_STROKECOLOR = "%s.strokeColor" %DEFAULTKEY
DEFAULTKEY_STROKE = "%s.stroke" %DEFAULTKEY
DEFAULTKEY_FILL = "%s.fill" %DEFAULTKEY
FALLBACK_FILLCOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .1)
FALLBACK_STROKECOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .5)
VERSION = 1.0
NAME = u'Overlay UFOs'
MANUAL = u"""In the current glyph window, this will present the view the same glyph from a separate
UFO or set of UFOs.<br/>
This does NOT import the UFO into a background layer. Instead, it renders a outline directly from the UFO into the glyph window view.
<ul>
<li>There is no need to import duplicate data into a background layer.</li>
<li>The source outline is always live; when changes are made to the source, they will automatically
appear in the current without re-importing.</li>
<li>The source font does not need to be opened with a UI.</li>
</ul>
<h3>DIALOG</h3>
<ul>
<li>A floating dialog is present to let you open and select source fonts, fill, stroke, color.</li>
<li>Source Fonts: The default source font list is self.getOpenFonts(). The refresh button will
return this list to self.getOpenFonts().</li>
<li>Adding Fonts: You can manually add fonts by selecting a UFO file.
The UFO file will open without an interface.</li>
<li>Removing Fonts: There are buttons for removing selected fonts and for clearing the source font list.</li>
</ul>
<h3>BUGS/IMPROVEMENTS</h3>
<ul>
<li>Known Issue: The source font is drawn on top of the current font, instead of behind it.
So, it is good to select a color with a low opacity.</li>
<li>Known Bug: If the glyph window for both source and current fonts are open, it is possible
to select and inadvertently edit the source outline in the current window. I don't know how to solve this.</li>
<li>Improvement?: Add options to scale the source font.</li>
<li>Improvement?: Set different colors, fill settings for each font?</li>
</ul>
"""
# Fixed width of the window.
VIEWMINSIZE = 400
VIEWSIZE = VIEWMINSIZE
VIEWMAXSIZE = VIEWMINSIZE
WINDOW_POSSIZE = (130, 20, VIEWSIZE, 260)
WINDOW_MINSIZE = (VIEWMINSIZE, 260)
WINDOW_MAXSIZE = (VIEWMAXSIZE, 260)
def getPathListDescriptor(self):
return [
dict(title='Status', key='status', cell=SmallTextListCell(editable=False), width=12, editable=False),
dict(title='Name', key='name', width=300, cell=SmallTextListCell(editable=False), editable=False),
dict(title='Path', key='path', width=0, editable=False),
]
################
# OBSERVERS AND UPDATERS
################
def fontSelectionChanged(self):
self.setSourceFonts()
def activateModule(self):
self.tool.addObserver(self, 'drawInactive', 'drawInactive')
self.tool.addObserver(self, 'drawBackground', 'drawBackground')
self.tool.addObserver(self, 'fontDidOpen', 'fontDidOpen')
self.tool.addObserver(self, 'fontWillClose', 'fontWillClose')
def deactivateModule(self):
removeObserver(self, 'drawBackground')
removeObserver(self, 'drawInactive')
removeObserver(self, 'fontDidOpen')
removeObserver(self, 'fontWillClose')
################
# CONTEXTS
################
def fontDidOpen(self, info):
font = info.get('font')
if font:
self.tool.fonts.append(font)
self.refreshCallback()
def fontWillClose(self, info):
font = info.get('font')
path = font.path
if path:
self.tool.removeFromFonts(path)
self.refreshCallback()
def __init__(self):
self.tool = Tool()
self.w = FloatingWindow((400, 200), "Overlay UFOs", minSize=(400, 200))
self.populateView()
self.getView().open()
def getView(self):
return self.w
def refreshCallback(self, sender=None):
"""
Update the font list.
"""
self.getView().fontList.set(self.getFontItems())
def resetCallback(self, sender=None):
"""
Resets the view to the currently opened fonts.
"""
self.tool.fonts = AllFonts()
self.getView().fontList.set(self.getFontItems())
def addCallback(self, sender=None):
"""
Open a font without UI and add it to the font list.
"""
f = OpenFont(None, showUI=False)
if f is None:
return
self.tool.appendToFonts(f.path)
self.refreshCallback()
def populateView(self):
"""
The UI
"""
self.fillColor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
self.strokeColor = getExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, self.FALLBACK_STROKECOLOR)
self.contextBefore = self.contextAfter = ''
# Populating the view can only happen after the view is attached to the window,
# or else the relative widths go wrong.
view = self.getView()
view.add = Button((-40, 3, 30, 22), '+', callback=self.addCallback)
view.reset = Button((-40, 30, 30, 22), chr(8634), callback=self.resetCallback)
# Flag to see if the selection list click is in progress. We are resetting the selection
# ourselves, using the list "buttons", but changing that selection will cause another
# list update, that should be ignored.
self._selectionChanging = False
# Indicate that we are a drawing module
self._canDraw = True
self.sources = []
x = y = 4
view.fontList = List((C.C2, y, 250, -65), self.getFontItems(),
selectionCallback=self.fontListCallback,
drawFocusRing=False,
enableDelete=False,
allowsMultipleSelection=False,
allowsEmptySelection=True,
drawHorizontalLines=True,
showColumnTitles=False,
columnDescriptions=self.getPathListDescriptor(),
rowHeight=16,
)
view.viewEnabled = CheckBox((x, y, C.BUTTON_WIDTH, 22), "Show",
callback=self.viewCallback, sizeStyle=C.STYLE_CHECKBOXSIZE,
value=True)
y += C.L
view.fill = CheckBox((x, y, 60, 22), "Fill", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "fill"), True),
value = True,
callback=self.fillCallback)
y += C.L
color = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
view.color = ColorWell((x, y, 60, 22),
color=color,
callback=self.colorCallback)
y += C.L + 5
view.stroke = CheckBox((x, y, 60, 22), "Stroke", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "stroke"), False),
value = False,
callback=self.strokeCallback)
y += C.LL
view.alignText = TextBox((x, y, 90, 50), 'Alignment', sizeStyle=C.STYLE_LABELSIZE)
y += C.L
view.align = RadioGroup((x, y, 90, 50), ['Left', 'Center', 'Right'], isVertical=True,
sizeStyle=C.STYLE_RADIOSIZE, callback=self.alignCallback)
view.align.set(0)
#view.contextLabel = TextBox((C.C2, -58, 90, 50), 'Contexts', sizeStyle=C.STYLE_LABELSIZE)
view.viewCurrent = CheckBox((C.C2, -60, 150, 22), "Always View Current", sizeStyle=C.STYLE_CHECKBOXSIZE,
value = False,
callback=self.contextEditCallback)
#view.contextUandlc = CheckBox((C.C2+170, -60, 85, 22), "Match Case", sizeStyle=C.STYLE_CHECKBOXSIZE,
# value = False,
# callback=self.contextEditCallback)
view.contextBefore = EditText((C.C2, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Left Context')
view.contextCurrent = EditText((C.C2+95, -30, 60, 20), callback=self.contextCurrentEditCallback, continuous=True, sizeStyle="small")
view.contextAfter = EditText((C.C2+165, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Right Context')
self.activateModule()
self.setUpBaseWindowBehavior()
def fontListCallback(self, sender):
u"""If there is a selection, toggle the status of these fonts."""
# Avoid recursive loop because of changing font selection
if not self._selectionChanging:
for selectedIndex in sender.getSelection():
item = sender.get()[selectedIndex]
if item['status']:
item['status'] = ''
else:
item['status'] = selectedSymbol
# If shift is held when pressing an entry in the font list,
# the non-selected fonts will swap with the current's state
if NSEvent.modifierFlags() & NSShiftKeyMask:
items = [sender.get()[i] for i in range(len(sender.get())) if i != selectedIndex]
for subItems in items:
if item['status'] == '':
subItems['status'] = selectedSymbol
else:
subItems['status'] = ''
self._selectionChanging = True
# Avoid recursive loop because of changing font selection
sender.setSelection([])
self._selectionChanging = False
self.updateView()
def canDraw(self):
return True
"""
There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
"""
#def isUpper(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Lu':
# return True
# return False
#def isLower(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Ll':
# return True
# return False
def getHiddenFont(self, path):
from builtins import str
for f in self.tool.getFonts():
if f.path == path:
return f
elif path == str(f.info.familyName)+" "+str(f.info.styleName):
return f
def drawBackground(self, info):
u"""Draw the background of defined glyphs and fonbts.
Scale is available as mouse.scale."""
view = self.getView()
if not view.viewEnabled.get():
return
fill = getExtensionDefault(self.DEFAULTKEY_FILL, True)
stroke = getExtensionDefault(self.DEFAULTKEY_STROKE, True)
fillcolor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
glyph = info.get('glyph')
if glyph is not None:
current = glyph.getParent()
else:
current = self.tool.getCurrentFont()
if glyph is None or current is None:
return
align = self.getAlignment()
# Get the fonts from the list and see if they are selected.
sourceItems = self.getSourceFonts()
showFonts = []
for item in sourceItems:
if not item['status']:
continue
path = item['path']
font = self.getHiddenFont(path)
showFonts.append(font)
if view.viewCurrent.get() and current not in showFonts:
showFonts.append(current)
for font in showFonts:
self.fillColor.setFill()
self.strokeColor.setStroke()
contextBefore, contextCurrent, contextAfter = self.getContexts()
if font is not None:
contextBefore = splitText(contextBefore, TX.naked(font).unicodeData, TX.naked(font).groups)
contextBefore = [font[gname] for gname in contextBefore if gname in font.keys()]
contextAfter = splitText(contextAfter, TX.naked(font).unicodeData, TX.naked(font).groups)
contextAfter = [font[gname] for gname in contextAfter if gname in font.keys()]
contextCurrent = splitText(contextCurrent, TX.naked(font).unicodeData, TX.naked(font).groups)
if len(contextCurrent) > 0:
contextCurrent = [font[gname] for gname in [contextCurrent[0]] if gname in font.keys()]
if len(contextCurrent) > 0:
sourceGlyph = contextCurrent[0]
else:
sourceGlyph = None
elif glyph.name in font.keys():
sourceGlyph = font[glyph.name]
else:
sourceGlyph = None
"""
#There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
if view.contextUandlc.get():
caseTransform = None
if self.isUpper(glyph):
caseTransform = FontTX.unicodes.getUpperFromLower
elif self.isLower(glyph):
caseTransform = FontTX.unicodes.getLowerFromUpper
if caseTransform:
for i, g in enumerate(contextBefore):
newG = caseTransform(g)
if newG is not None:
contextBefore[i] = newG
newG = caseTransform(sourceGlyph)
if newG is not None:
sourceGlyph = newG
if caseTransform:
for i, g in enumerate(contextAfter):
newG = caseTransform(g)
if newG is not None:
contextAfter[i] = newG
"""
scale(current.info.unitsPerEm/float(font.info.unitsPerEm))
widthOffset = 0
if sourceGlyph is not None:
if align == 'center':
destCenter = float(glyph.width/2) / current.info.unitsPerEm
sourceCenter = float(sourceGlyph.width/2) / font.info.unitsPerEm
widthOffset = (destCenter-sourceCenter) * font.info.unitsPerEm
elif align == 'right':
widthOffset = ( ( glyph.width / glyph.getParent().info.unitsPerEm ) - (sourceGlyph.width / sourceGlyph.getParent().info.unitsPerEm ) ) * font.info.unitsPerEm
translate(widthOffset, 0)
previousGlyph = sourceGlyph
contextBefore.reverse()
totalWidth = 0
for i, cbGlyph in enumerate(contextBefore):
kernValue = 0
if previousGlyph is not None and previousGlyph.getParent() == cbGlyph.getParent():
# Uncomment to activate kerning. Requires FontTX.
#kernValue += FontTX.kerning.getValue((previousGlyph.name, cbGlyph.name), font.kerning, font.groups)
kernValue += 0
translate(-cbGlyph.width-kernValue, 0)
totalWidth += cbGlyph.width + kernValue
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
previousGlyph = cbGlyph
translate(totalWidth, 0)
totalWidth = 0
contextCurrentAndAfter = [sourceGlyph]+contextAfter
for i, cbGlyph in enumerate(contextCurrentAndAfter):
if cbGlyph is None:
cbGlyph = sourceGlyph
nextGlyph = None
if i + 1 < len(contextCurrentAndAfter):
nextGlyph = contextCurrentAndAfter[i+1]
if (i == 0 and cbGlyph == glyph) or sourceGlyph is None:
pass
else:
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
kernValue = 0
if cbGlyph is not None and nextGlyph is not None and nextGlyph.getParent() == cbGlyph.getParent():
#kernValue = FontTX.kerning.getValue((cbGlyph.name, nextGlyph.name), font.kerning, font.groups)
# Uncomment to activate kerning. Requires FontTX.
kernValue = 0
width = 0
if cbGlyph is not None:
width = cbGlyph.width
translate(width+kernValue, 0)
totalWidth += width + kernValue
previousGlyph = cbGlyph
translate(-totalWidth, 0)
translate(-widthOffset, 0)
scale(font.info.unitsPerEm/float(current.info.unitsPerEm))
#restore()
drawInactive = drawBackground
def viewCallback(self, sender):
self.updateView()
def getSourceFonts(self):
"""
Get the fonts in the list.
"""
view = self.getView()
return view.fontList.get()
def setSourceFonts(self):
u"""
Set the font list from the current set of open fonts.
"""
view = self.getView()
labels = []
currentSelection = []
for d in self.getSourceFonts():
if d['status']:
currentSelection.append(d['path'])
for status, path, name in self.tool.getFontLabels():
if path in currentSelection:
status = selectedSymbol
else:
status = ''
labels.append(dict(status=status, path=path, name=name))
view.fontList.set(labels)
def colorCallback(self, sender):
"""
Change the color.
"""
selectedColor = sender.get()
r = selectedColor.redComponent()
g = selectedColor.greenComponent()
b = selectedColor.blueComponent()
a = 1
strokeColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a)
setExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, selectedColor)
setExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, strokeColor)
self.fillColor = selectedColor
self.strokeColor = strokeColor
self.updateView()
def fillCallback(self, sender):
"""
Change the fill status.
"""
setExtensionDefault(self.DEFAULTKEY_FILL, sender.get())
self.updateView()
def strokeCallback(self, sender):
"""
Change the stroke status.
"""
setExtensionDefault(self.DEFAULTKEY_STROKE, sender.get())
self.updateView()
def alignCallback(self, sender):
"""
Change the alignment status.
"""
self.updateView()
def getAlignment(self):
"""
Get the alignment as a string.
"""
view = self.getView()
index = view.align.get()
if index == 0:
return 'left'
elif index == 1:
return 'center'
elif index == 2:
return 'right'
def updateView(self, sender=None):
UpdateCurrentGlyphView()
def windowCloseCallback(self, sender):
self.deactivateModule()
self.updateView()
BaseWindowController.windowCloseCallback(self, sender)
def getFontItems(self, update=False):
"""
Get all fonts in a way that can be set into a vanilla list.
"""
paths = set() # Set of all unique paths in the merges lists
itemsByName = {}
if update: # If update flag is set, then keep the existing selected fonts.
for item in self.getSourceFonts():
if item['status']:
itemsByName[item['name']] = item
currentStatuses = {}
if hasattr(self.getView(), 'fontList'):
for d in self.getSourceFonts():
currentStatuses[d['path']] = d['status']
for status, path, uniqueName in self.tool.getFontLabels():
if path in currentStatuses:
status = currentStatuses[path]
else:
status = selectedSymbol
if not uniqueName in itemsByName.keys():# If it is not already there, add this to the list
itemsByName[uniqueName] = dict(status=status, path=path, name=uniqueName)
fontList = []
for key, item in sorted(itemsByName.items()):
fontList.append(item)
return fontList
################
# CONTEXTS
################
def getContexts(self):
if not hasattr(self, 'contextBefore'):
self.contextBefore = ''
if not hasattr(self, 'contextAfter'):
self.contextAfter = ''
if not hasattr(self, 'contextCurrent'):
self.contextCurrent = None
return self.contextBefore, self.contextCurrent, self.contextAfter
def setContexts(self, contextBefore, contextCurrent, contextAfter):
self.contextBefore = contextBefore
self.contextCurrent = contextCurrent
self.contextAfter = contextAfter
def contextEditCallback(self, sender):
before = self.getView().contextBefore.get()
current = self.getView().contextCurrent.get() or None
after = self.getView().contextAfter.get()
self.setContexts(before, current, after)
self.updateView()
def contextCurrentEditCallback(self, sender):
#if sender.get():
#sender.set(sender.get()[0])
self.contextEditCallback(sender)
if __name__ == "__main__":
OverlayUFOs() | [
"builtins.chr",
"mojo.extensions.setExtensionDefaultColor",
"defconAppKit.windows.baseWindow.BaseWindowController.windowCloseCallback",
"mojo.extensions.getExtensionDefaultColor",
"builtins.str",
"mojo.extensions.getExtensionDefault",
"lib.tools.drawing.strokePixelPath",
"mojo.events.removeObserver",
"mojo.events.addObserver",
"mojo.UI.UpdateCurrentGlyphView"
]
| [((1594, 1629), 'mojo.events.addObserver', 'addObserver', (['target', 'method', 'action'], {}), '(target, method, action)\n', (1605, 1629), False, 'from mojo.events import addObserver, removeObserver\n'), ((1693, 1731), 'mojo.events.removeObserver', 'removeObserver', (['target', 'method', 'action'], {}), '(target, method, action)\n', (1707, 1731), False, 'from mojo.events import addObserver, removeObserver\n'), ((6961, 6999), 'mojo.events.removeObserver', 'removeObserver', (['self', '"""drawBackground"""'], {}), "(self, 'drawBackground')\n", (6975, 6999), False, 'from mojo.events import addObserver, removeObserver\n'), ((7008, 7044), 'mojo.events.removeObserver', 'removeObserver', (['self', '"""drawInactive"""'], {}), "(self, 'drawInactive')\n", (7022, 7044), False, 'from mojo.events import addObserver, removeObserver\n'), ((7053, 7088), 'mojo.events.removeObserver', 'removeObserver', (['self', '"""fontDidOpen"""'], {}), "(self, 'fontDidOpen')\n", (7067, 7088), False, 'from mojo.events import addObserver, removeObserver\n'), ((7097, 7134), 'mojo.events.removeObserver', 'removeObserver', (['self', '"""fontWillClose"""'], {}), "(self, 'fontWillClose')\n", (7111, 7134), False, 'from mojo.events import addObserver, removeObserver\n'), ((8552, 8628), 'mojo.extensions.getExtensionDefaultColor', 'getExtensionDefaultColor', (['self.DEFAULTKEY_FILLCOLOR', 'self.FALLBACK_FILLCOLOR'], {}), '(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)\n', (8576, 8628), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((8656, 8741), 'mojo.extensions.getExtensionDefaultColor', 'getExtensionDefaultColor', (['self.DEFAULTKEY_STROKECOLOR', 'self.FALLBACK_STROKECOLOR'], {}), '(self.DEFAULTKEY_STROKECOLOR, self.FALLBACK_STROKECOLOR\n )\n', (8680, 8741), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((10443, 10519), 'mojo.extensions.getExtensionDefaultColor', 'getExtensionDefaultColor', (['self.DEFAULTKEY_FILLCOLOR', 'self.FALLBACK_FILLCOLOR'], {}), '(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)\n', (10467, 10519), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((14710, 14757), 'mojo.extensions.getExtensionDefault', 'getExtensionDefault', (['self.DEFAULTKEY_FILL', '(True)'], {}), '(self.DEFAULTKEY_FILL, True)\n', (14729, 14757), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((14775, 14824), 'mojo.extensions.getExtensionDefault', 'getExtensionDefault', (['self.DEFAULTKEY_STROKE', '(True)'], {}), '(self.DEFAULTKEY_STROKE, True)\n', (14794, 14824), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((14845, 14921), 'mojo.extensions.getExtensionDefaultColor', 'getExtensionDefaultColor', (['self.DEFAULTKEY_FILLCOLOR', 'self.FALLBACK_FILLCOLOR'], {}), '(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)\n', (14869, 14921), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((23033, 23099), 'mojo.extensions.setExtensionDefaultColor', 'setExtensionDefaultColor', (['self.DEFAULTKEY_FILLCOLOR', 'selectedColor'], {}), '(self.DEFAULTKEY_FILLCOLOR, selectedColor)\n', (23057, 23099), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((23108, 23174), 'mojo.extensions.setExtensionDefaultColor', 'setExtensionDefaultColor', (['self.DEFAULTKEY_STROKECOLOR', 'strokeColor'], {}), '(self.DEFAULTKEY_STROKECOLOR, strokeColor)\n', (23132, 23174), False, 'from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor\n'), ((24133, 24157), 'mojo.UI.UpdateCurrentGlyphView', 'UpdateCurrentGlyphView', ([], {}), '()\n', (24155, 24157), False, 'from mojo.UI import UpdateCurrentGlyphView\n'), ((24268, 24322), 'defconAppKit.windows.baseWindow.BaseWindowController.windowCloseCallback', 'BaseWindowController.windowCloseCallback', (['self', 'sender'], {}), '(self, sender)\n', (24308, 24322), False, 'from defconAppKit.windows.baseWindow import BaseWindowController\n'), ((9087, 9096), 'builtins.chr', 'chr', (['(8634)'], {}), '(8634)\n', (9090, 9096), False, 'from builtins import chr\n'), ((2280, 2301), 'builtins.str', 'str', (['f.info.styleName'], {}), '(f.info.styleName)\n', (2283, 2301), False, 'from builtins import str\n'), ((2253, 2275), 'builtins.str', 'str', (['f.info.familyName'], {}), '(f.info.familyName)\n', (2256, 2275), False, 'from builtins import str\n'), ((14405, 14426), 'builtins.str', 'str', (['f.info.styleName'], {}), '(f.info.styleName)\n', (14408, 14426), False, 'from builtins import str\n'), ((19922, 19952), 'lib.tools.drawing.strokePixelPath', 'strokePixelPath', (['drawGlyphPath'], {}), '(drawGlyphPath)\n', (19937, 19952), False, 'from lib.tools.drawing import strokePixelPath\n'), ((14378, 14400), 'builtins.str', 'str', (['f.info.familyName'], {}), '(f.info.familyName)\n', (14381, 14400), False, 'from builtins import str\n'), ((20875, 20905), 'lib.tools.drawing.strokePixelPath', 'strokePixelPath', (['drawGlyphPath'], {}), '(drawGlyphPath)\n', (20890, 20905), False, 'from lib.tools.drawing import strokePixelPath\n')] |
import requests
import json
import datetime
import sys
from dateutil.parser import parse as to_datetime
try:
import pandas as pd
except:
pass
from pyteamup.utils.utilities import *
from pyteamup.utils.constants import *
from pyteamup.Event import Event
class Calendar:
def __init__(self, cal_id, api_key):
self.__calendar_id = cal_id
self.__api_key = api_key
self.__cal_base = f'/{cal_id}'
self.__token_str = f'?_teamup_token={self.api_key}'
self.__subcalendars = None
self.__valid_api = None
self.__configuration = None
self._base_url = BASE_URL + self.__cal_base
self._event_collection_url = self._base_url + EVENTS_BASE + self.__token_str
self._subcalendars_url = self._base_url + SUBCALENDARS_BASE + self.__token_str
self._check_access_url = BASE_URL + CHECK_ACCESS_BASE + self.__token_str
self.events_json = None
if not self.valid_api:
raise Exception(f'Invalid Api Key: {self.api_key}')
def __str__(self):
return self.calendar_id
@property
def api_key(self):
return self.__api_key
@property
def calendar_id(self):
return self.__calendar_id
@property
def valid_api(self):
"""Makes a request to the calendar to see if the api is valid"""
if not self.__valid_api:
req = requests.get(self._check_access_url)
try:
check_status_code(req.status_code)
self.__valid_api = True
except:
self.__valid_api = False
return self.__valid_api
else:
return None
@property
def configuration(self):
if self.__configuration is None:
print('Fetching configuration')
req = requests.get(self._base_url + CONFIGURATION_BASE + self.__token_str)
check_status_code(req.status_code)
self.__configuration = json.loads(req.text)['configuration']
return self.__configuration
@property
def subcalendars(self):
if not self.__subcalendars:
print('Fetching Subcalendars')
req = requests.get(self._subcalendars_url)
check_status_code(req.status_code)
self.__subcalendars = json.loads(req.text)['subcalendars']
return self.__subcalendars
def clear_calendar_cache(self):
self.__subcalendars = None
self.__configuration = None
def get_event_collection(self, start_dt=None, end_dt=None, subcal_id=None, returnas='events', markdown=False):
"""
Method allows bulk fetching of events that fall between the provided time frame. If None is provided then
the current date -30 and +180 days is used.
:param start_dt: if set as None then set as today minus 30 days
:param end_dt: if left as None then set as today plus 180 days
:param subcal_id: optional str or list-like if a different calendar should be queried
:return: json of events
"""
if returnas not in ('events', 'dataframe', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
if start_dt is None:
start_dt = datetime.date.today() - datetime.timedelta(30)
if end_dt is None:
end_dt = datetime.date.today() + datetime.timedelta(180)
subcal_par = ''
if subcal_id:
if isinstance(subcal_id, (list, tuple)):
for id in subcal_id:
subcal_par += f'&subcalendarId[]={id}'
else:
subcal_par = f'&subcalendarId[]={subcal_id}'
if markdown == True:
para_markdown = '&format[]=markdown'
else:
para_markdown = ''
parameters = f'&startDate={start_dt.strftime("%Y-%m-%d")}&endDate={end_dt.strftime("%Y-%m-%d")}' + subcal_par + para_markdown
req = requests.get(self._event_collection_url + parameters)
check_status_code(req.status_code)
self.events_json = json.loads(req.text)['events']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in self.events_json]
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(self.events_json)
else:
return self.events_json
def _create_event_from_json(self, payload):
""" Lazy Creation of Event by passing a formatted payload"""
resp = requests.post(self._event_collection_url, data=payload, headers=POST_HEADERS)
try:
check_status_code(resp.status_code)
except:
print(payload)
print(resp.text)
raise
return resp.text
def get_event(self, event_id, returnas='event'):
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + f'/{event_id}' + self.__token_str
resp = requests.get(url)
check_status_code(resp.status_code)
event_dict = json.loads(resp.text)['event']
if returnas == 'event':
return Event(self, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
def get_subcalendar(self):
raise NotImplementedError
def search_events(self):
raise NotImplementedError
def get_changed_events(self, modified_since, returnas='event'):
"""
Get changed events since given unix time
:param modified_since: <int> Unix timestamp, must be less than 30 days old
:param returnas: <str> `event` `series` `dict` are valid options
:return: Tuple of event list and returned timestamp
"""
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + self.__token_str + '&modifiedSince=' + str(modified_since)
resp = requests.get(url)
check_status_code(resp.status_code)
events_json = json.loads(resp.text)['events']
timestamp = json.loads(resp.text)['timestamp']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in events_json], timestamp
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(events_json), timestamp
else:
return events_json, timestamp
def new_event(self, title, start_dt, end_dt, subcalendar_ids, all_day=False,
notes=None, location=None, who=None, remote_id=None, returnas='event'):
"""
Create a new event within a provided subcalendar. Can return as Event object, Series object, or Dictionary.
Undo_id not included with return unless returnas='event' in which case it is included with the returned Event Object
:param subcalendar_id: <str, int, or list-like> Required - the ID of the subcalendar within the calendar the event should be created in.
:param title: <str> Title of the event, must be
:param start_dt: <datetime> Start Datetime
:param end_dt: <datetime> End Datetime
:param all_day: <Bool> Allday or Not
:param notes: <str> HTML or Markdown formatted string detailing the Description
:param location: <str> Location of the event
:param who: <str>
:param remote_id: <str> Remote ID of the event, used to link the TeamUp event record to its source information
:param returnas: <str> `event` `series` `dict` are valid options
:return:
"""
if returnas not in ('event','dict','series'):
raise ValueError(f'Unrecognized returnas paramter: {returnas}')
if not isinstance(start_dt, datetime.datetime) or not isinstance(end_dt, datetime.datetime):
try:
start_dt = to_datetime(start_dt)
end_dt = to_datetime(end_dt)
except:
raise ValueError('Parse failed, please pass all dates as a datetime object')
if isinstance(subcalendar_ids, (str, int)):
subcalendar_ids = [subcalendar_ids]
if not isinstance(subcalendar_ids, (tuple, list)):
raise ValueError(f'Unrecognized Type: Subcalendar_ids type: {type(subcalendar_ids)}')
dict = {'remote_id': remote_id,
'title': title,
'subcalendar_ids': subcalendar_ids,
'start_dt': format_date(start_dt),
'end_dt': format_date(end_dt),
'all_day': all_day,
'notes': notes,
'location': location,
'who': who
}
resp_text = self._create_event_from_json(json.dumps(dict))
resp_dict = json.loads(resp_text)
event_dict = resp_dict['event']
undo_id = resp_dict['undo_id']
if returnas == 'event':
return Event(self, undo_id = undo_id, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
| [
"pandas.DataFrame.from_records",
"pandas.Series",
"json.loads",
"requests.post",
"dateutil.parser.parse",
"json.dumps",
"pyteamup.Event.Event",
"requests.get",
"datetime.timedelta",
"datetime.date.today"
]
| [((3970, 4023), 'requests.get', 'requests.get', (['(self._event_collection_url + parameters)'], {}), '(self._event_collection_url + parameters)\n', (3982, 4023), False, 'import requests\n'), ((4553, 4630), 'requests.post', 'requests.post', (['self._event_collection_url'], {'data': 'payload', 'headers': 'POST_HEADERS'}), '(self._event_collection_url, data=payload, headers=POST_HEADERS)\n', (4566, 4630), False, 'import requests\n'), ((5107, 5124), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5119, 5124), False, 'import requests\n'), ((6204, 6221), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6216, 6221), False, 'import requests\n'), ((9021, 9042), 'json.loads', 'json.loads', (['resp_text'], {}), '(resp_text)\n', (9031, 9042), False, 'import json\n'), ((1391, 1427), 'requests.get', 'requests.get', (['self._check_access_url'], {}), '(self._check_access_url)\n', (1403, 1427), False, 'import requests\n'), ((1819, 1887), 'requests.get', 'requests.get', (['(self._base_url + CONFIGURATION_BASE + self.__token_str)'], {}), '(self._base_url + CONFIGURATION_BASE + self.__token_str)\n', (1831, 1887), False, 'import requests\n'), ((2184, 2220), 'requests.get', 'requests.get', (['self._subcalendars_url'], {}), '(self._subcalendars_url)\n', (2196, 2220), False, 'import requests\n'), ((4094, 4114), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (4104, 4114), False, 'import json\n'), ((5190, 5211), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (5200, 5211), False, 'import json\n'), ((5272, 5297), 'pyteamup.Event.Event', 'Event', (['self'], {}), '(self, **event_dict)\n', (5277, 5297), False, 'from pyteamup.Event import Event\n'), ((6288, 6309), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (6298, 6309), False, 'import json\n'), ((6340, 6361), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (6350, 6361), False, 'import json\n'), ((8983, 8999), 'json.dumps', 'json.dumps', (['dict'], {}), '(dict)\n', (8993, 8999), False, 'import json\n'), ((9174, 9216), 'pyteamup.Event.Event', 'Event', (['self'], {'undo_id': 'undo_id'}), '(self, undo_id=undo_id, **event_dict)\n', (9179, 9216), False, 'from pyteamup.Event import Event\n'), ((1970, 1990), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (1980, 1990), False, 'import json\n'), ((2302, 2322), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (2312, 2322), False, 'import json\n'), ((3267, 3288), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3286, 3288), False, 'import datetime\n'), ((3291, 3313), 'datetime.timedelta', 'datetime.timedelta', (['(30)'], {}), '(30)\n', (3309, 3313), False, 'import datetime\n'), ((3362, 3383), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3381, 3383), False, 'import datetime\n'), ((3386, 3409), 'datetime.timedelta', 'datetime.timedelta', (['(180)'], {}), '(180)\n', (3404, 3409), False, 'import datetime\n'), ((4179, 4204), 'pyteamup.Event.Event', 'Event', (['self'], {}), '(self, **event_dict)\n', (4184, 4204), False, 'from pyteamup.Event import Event\n'), ((4326, 4369), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['self.events_json'], {}), '(self.events_json)\n', (4351, 4369), True, 'import pandas as pd\n'), ((5380, 5401), 'pandas.Series', 'pd.Series', (['event_dict'], {}), '(event_dict)\n', (5389, 5401), True, 'import pandas as pd\n'), ((8122, 8143), 'dateutil.parser.parse', 'to_datetime', (['start_dt'], {}), '(start_dt)\n', (8133, 8143), True, 'from dateutil.parser import parse as to_datetime\n'), ((8169, 8188), 'dateutil.parser.parse', 'to_datetime', (['end_dt'], {}), '(end_dt)\n', (8180, 8188), True, 'from dateutil.parser import parse as to_datetime\n'), ((9301, 9322), 'pandas.Series', 'pd.Series', (['event_dict'], {}), '(event_dict)\n', (9310, 9322), True, 'import pandas as pd\n'), ((6429, 6454), 'pyteamup.Event.Event', 'Event', (['self'], {}), '(self, **event_dict)\n', (6434, 6454), False, 'from pyteamup.Event import Event\n'), ((6582, 6620), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['events_json'], {}), '(events_json)\n', (6607, 6620), True, 'import pandas as pd\n')] |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('reports.views',
url(r'^index/*$', 'index'),
url(r'^dashboard/*$', 'dashboard'),
url(r'^$', 'index'),
url(r'^detail/(?P<serial>[^/]+)$', 'detail'),
url(r'^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$', 'detail_pkg'),
url(r'^detailmachine/(?P<serial>[^/]+)$', 'machine_detail'),
url(r'^appleupdate/(?P<serial>[^/]+)$', 'appleupdate'),
url(r'^raw/(?P<serial>[^/]+)$', 'raw'),
url(r'^submit/(?P<submission_type>[^/]+)$', 'submit'),
url(r'^warranty/(?P<serial>[^/]+)$', 'warranty'),
# for compatibilty with MunkiReport scripts
url(r'^ip$', 'lookup_ip'),
url(r'^(?P<submission_type>[^/]+)$', 'submit'),
) | [
"django.conf.urls.url"
]
| [((97, 122), 'django.conf.urls.url', 'url', (['"""^index/*$"""', '"""index"""'], {}), "('^index/*$', 'index')\n", (100, 122), False, 'from django.conf.urls import patterns, include, url\n'), ((129, 162), 'django.conf.urls.url', 'url', (['"""^dashboard/*$"""', '"""dashboard"""'], {}), "('^dashboard/*$', 'dashboard')\n", (132, 162), False, 'from django.conf.urls import patterns, include, url\n'), ((169, 187), 'django.conf.urls.url', 'url', (['"""^$"""', '"""index"""'], {}), "('^$', 'index')\n", (172, 187), False, 'from django.conf.urls import patterns, include, url\n'), ((194, 237), 'django.conf.urls.url', 'url', (['"""^detail/(?P<serial>[^/]+)$"""', '"""detail"""'], {}), "('^detail/(?P<serial>[^/]+)$', 'detail')\n", (197, 237), False, 'from django.conf.urls import patterns, include, url\n'), ((244, 319), 'django.conf.urls.url', 'url', (['"""^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$"""', '"""detail_pkg"""'], {}), "('^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$', 'detail_pkg')\n", (247, 319), False, 'from django.conf.urls import patterns, include, url\n'), ((326, 384), 'django.conf.urls.url', 'url', (['"""^detailmachine/(?P<serial>[^/]+)$"""', '"""machine_detail"""'], {}), "('^detailmachine/(?P<serial>[^/]+)$', 'machine_detail')\n", (329, 384), False, 'from django.conf.urls import patterns, include, url\n'), ((391, 444), 'django.conf.urls.url', 'url', (['"""^appleupdate/(?P<serial>[^/]+)$"""', '"""appleupdate"""'], {}), "('^appleupdate/(?P<serial>[^/]+)$', 'appleupdate')\n", (394, 444), False, 'from django.conf.urls import patterns, include, url\n'), ((451, 488), 'django.conf.urls.url', 'url', (['"""^raw/(?P<serial>[^/]+)$"""', '"""raw"""'], {}), "('^raw/(?P<serial>[^/]+)$', 'raw')\n", (454, 488), False, 'from django.conf.urls import patterns, include, url\n'), ((495, 547), 'django.conf.urls.url', 'url', (['"""^submit/(?P<submission_type>[^/]+)$"""', '"""submit"""'], {}), "('^submit/(?P<submission_type>[^/]+)$', 'submit')\n", (498, 547), False, 'from django.conf.urls import patterns, include, url\n'), ((554, 601), 'django.conf.urls.url', 'url', (['"""^warranty/(?P<serial>[^/]+)$"""', '"""warranty"""'], {}), "('^warranty/(?P<serial>[^/]+)$', 'warranty')\n", (557, 601), False, 'from django.conf.urls import patterns, include, url\n'), ((660, 684), 'django.conf.urls.url', 'url', (['"""^ip$"""', '"""lookup_ip"""'], {}), "('^ip$', 'lookup_ip')\n", (663, 684), False, 'from django.conf.urls import patterns, include, url\n'), ((691, 736), 'django.conf.urls.url', 'url', (['"""^(?P<submission_type>[^/]+)$"""', '"""submit"""'], {}), "('^(?P<submission_type>[^/]+)$', 'submit')\n", (694, 736), False, 'from django.conf.urls import patterns, include, url\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import multiprocessing
import pytest
import socket
import signal
import os
import logging
try:
from urllib2 import URLError, urlopen
except ImportError:
from urllib.error import URLError
from urllib.request import urlopen
from flask import _request_ctx_stack
@pytest.yield_fixture
def client(app):
"""A Flask test client. An instance of :class:`flask.testing.TestClient`
by default.
"""
with app.test_client() as client:
yield client
@pytest.fixture
def client_class(request, client):
"""Uses to set a ``client`` class attribute to current Flask test client::
@pytest.mark.usefixtures('client_class')
class TestView:
def login(self, email, password):
credentials = {'email': email, 'password': password}
return self.client.post(url_for('login'), data=credentials)
def test_login(self):
assert self.login('<EMAIL>', 'pass').status_code == 200
"""
if request.cls is not None:
request.cls.client = client
class LiveServer(object):
"""The helper class uses to manage live server. Handles creation and
stopping application in a separate process.
:param app: The application to run.
:param host: The host where to listen (default localhost).
:param port: The port to run application.
"""
def __init__(self, app, host, port, clean_stop=False):
self.app = app
self.port = port
self.host = host
self.clean_stop = clean_stop
self._process = None
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker,
args=(self.app, self.host, self.port)
)
self._process.start()
# We must wait for the server to start listening with a maximum
# timeout of 5 seconds.
timeout = 5
while timeout > 0:
time.sleep(1)
try:
urlopen(self.url())
timeout = 0
except URLError:
timeout -= 1
def url(self, url=''):
"""Returns the complete url based on server options."""
return 'http://%s:%d%s' % (self.host, self.port, url)
def stop(self):
"""Stop application process."""
if self._process:
if self.clean_stop and self._stop_cleanly():
return
if self._process.is_alive():
# If it's still alive, kill it
self._process.terminate()
def _stop_cleanly(self, timeout=5):
"""Attempts to stop the server cleanly by sending a SIGINT signal and waiting for
``timeout`` seconds.
:return: True if the server was cleanly stopped, False otherwise.
"""
try:
os.kill(self._process.pid, signal.SIGINT)
self._process.join(timeout)
return True
except Exception as ex:
logging.error('Failed to join the live server process: %r', ex)
return False
def __repr__(self):
return '<LiveServer listening at %s>' % self.url()
def _rewrite_server_name(server_name, new_port):
"""Rewrite server port in ``server_name`` with ``new_port`` value."""
sep = ':'
if sep in server_name:
server_name, port = server_name.split(sep, 1)
return sep.join((server_name, new_port))
@pytest.fixture(scope='function')
def live_server(request, app, monkeypatch, pytestconfig):
"""Run application in a separate process.
When the ``live_server`` fixture is applied, the ``url_for`` function
works as expected::
def test_server_is_up_and_running(live_server):
index_url = url_for('index', _external=True)
assert index_url == 'http://localhost:5000/'
res = urllib2.urlopen(index_url)
assert res.code == 200
"""
port = pytestconfig.getvalue('live_server_port')
if port == 0:
# Bind to an open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
host = pytestconfig.getvalue('live_server_host')
# Explicitly set application ``SERVER_NAME`` for test suite
# and restore original value on test teardown.
server_name = app.config['SERVER_NAME'] or 'localhost'
monkeypatch.setitem(app.config, 'SERVER_NAME',
_rewrite_server_name(server_name, str(port)))
clean_stop = request.config.getvalue('live_server_clean_stop')
server = LiveServer(app, host, port, clean_stop)
if request.config.getvalue('start_live_server'):
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture
def config(app):
"""An application config."""
return app.config
@pytest.fixture
def request_ctx(app):
"""The request context which contains all request relevant information,
e.g. `session`, `g`, `flashes`, etc.
"""
return _request_ctx_stack.top
@pytest.fixture(params=['application/json', 'text/html'])
def mimetype(request):
return request.param
def _make_accept_header(mimetype):
return [('Accept', mimetype)]
@pytest.fixture
def accept_mimetype(mimetype):
return _make_accept_header(mimetype)
@pytest.fixture
def accept_json(request):
return _make_accept_header('application/json')
@pytest.fixture
def accept_jsonp():
return _make_accept_header('application/json-p')
@pytest.fixture(params=['*', '*/*'])
def accept_any(request):
return _make_accept_header(request.param)
| [
"os.kill",
"socket.socket",
"multiprocessing.Process",
"time.sleep",
"pytest.fixture",
"logging.error"
]
| [((3608, 3640), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3622, 3640), False, 'import pytest\n'), ((5242, 5298), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['application/json', 'text/html']"}), "(params=['application/json', 'text/html'])\n", (5256, 5298), False, 'import pytest\n'), ((5697, 5732), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['*', '*/*']"}), "(params=['*', '*/*'])\n", (5711, 5732), False, 'import pytest\n'), ((1835, 1912), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(self.app, self.host, self.port)'}), '(target=worker, args=(self.app, self.host, self.port))\n', (1858, 1912), False, 'import multiprocessing\n'), ((4220, 4269), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4233, 4269), False, 'import socket\n'), ((2141, 2154), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2151, 2154), False, 'import time\n'), ((3017, 3058), 'os.kill', 'os.kill', (['self._process.pid', 'signal.SIGINT'], {}), '(self._process.pid, signal.SIGINT)\n', (3024, 3058), False, 'import os\n'), ((3167, 3230), 'logging.error', 'logging.error', (['"""Failed to join the live server process: %r"""', 'ex'], {}), "('Failed to join the live server process: %r', ex)\n", (3180, 3230), False, 'import logging\n')] |
from .exceptions import (
ServerResponseError,
InternalServerError,
NonXMLResponseError,
EndpointUnavailableError,
)
from functools import wraps
from xml.etree.ElementTree import ParseError
from ..query import QuerySet
import logging
try:
from distutils2.version import NormalizedVersion as Version
except ImportError:
from distutils.version import LooseVersion as Version
logger = logging.getLogger("tableau.endpoint")
Success_codes = [200, 201, 202, 204]
class Endpoint(object):
def __init__(self, parent_srv):
self.parent_srv = parent_srv
@staticmethod
def _make_common_headers(auth_token, content_type):
headers = {}
if auth_token is not None:
headers["x-tableau-auth"] = auth_token
if content_type is not None:
headers["content-type"] = content_type
return headers
@staticmethod
def _safe_to_log(server_response):
"""Checks if the server_response content is not xml (eg binary image or zip)
and replaces it with a constant
"""
ALLOWED_CONTENT_TYPES = ("application/xml", "application/xml;charset=utf-8")
if server_response.headers.get("Content-Type", None) not in ALLOWED_CONTENT_TYPES:
return "[Truncated File Contents]"
else:
return server_response.content
def _make_request(
self,
method,
url,
content=None,
auth_token=None,
content_type=None,
parameters=None,
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
if content is not None:
parameters["data"] = content
logger.debug(u"request {}, url: {}".format(method.__name__, url))
if content:
logger.debug(u"request content: {}".format(content[:1000]))
server_response = method(url, **parameters)
self.parent_srv._namespace.detect(server_response.content)
self._check_status(server_response)
# This check is to determine if the response is a text response (xml or otherwise)
# so that we do not attempt to log bytes and other binary data.
if len(server_response.content) > 0 and server_response.encoding:
logger.debug(
u"Server response from {0}:\n\t{1}".format(
url, server_response.content.decode(server_response.encoding)
)
)
return server_response
def _check_status(self, server_response):
if server_response.status_code >= 500:
raise InternalServerError(server_response)
elif server_response.status_code not in Success_codes:
try:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
except ParseError:
# This will happen if we get a non-success HTTP code that
# doesn't return an xml error object (like metadata endpoints)
# we convert this to a better exception and pass through the raw
# response body
raise NonXMLResponseError(server_response.content)
except Exception:
# anything else re-raise here
raise
def get_unauthenticated_request(self, url):
return self._make_request(self.parent_srv.session.get, url)
def get_request(self, url, request_object=None, parameters=None):
if request_object is not None:
try:
# Query param delimiters don't need to be encoded for versions before 3.7 (2020.1)
self.parent_srv.assert_at_least_version("3.7")
parameters = parameters or {}
parameters["params"] = request_object.get_query_params()
except EndpointUnavailableError:
url = request_object.apply_query_params(url)
return self._make_request(
self.parent_srv.session.get,
url,
auth_token=self.parent_srv.auth_token,
parameters=parameters,
)
def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
def put_request(self, url, xml_request=None, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def post_request(self, url, xml_request, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def api(version):
"""Annotate the minimum supported version for an endpoint.
Checks the version on the server object and compares normalized versions.
It will raise an exception if the server version is > the version specified.
Args:
`version` minimum version that supports the endpoint. String.
Raises:
EndpointUnavailableError
Returns:
None
Example:
>>> @api(version="2.3")
>>> def get(self, req_options=None):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.parent_srv.assert_at_least_version(version)
return func(self, *args, **kwargs)
return wrapper
return _decorator
def parameter_added_in(**params):
"""Annotate minimum versions for new parameters or request options on an endpoint.
The api decorator documents when an endpoint was added, this decorator annotates
keyword arguments on endpoints that may control functionality added after an endpoint was introduced.
The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing
an exception.
Args:
Key/value pairs of the form `parameter`=`version`. Kwargs.
Raises:
UserWarning
Returns:
None
Example:
>>> @api(version="2.0")
>>> @parameter_added_in(no_extract='2.5')
>>> def download(self, workbook_id, filepath=None, extract_only=False):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import warnings
server_ver = Version(self.parent_srv.version or "0.0")
params_to_check = set(params) & set(kwargs)
for p in params_to_check:
min_ver = Version(str(params[p]))
if server_ver < min_ver:
error = "{!r} not available in {}, it will be ignored. Added in {}".format(p, server_ver, min_ver)
warnings.warn(error)
return func(self, *args, **kwargs)
return wrapper
return _decorator
class QuerysetEndpoint(Endpoint):
@api(version="2.0")
def all(self, *args, **kwargs):
queryset = QuerySet(self)
return queryset
@api(version="2.0")
def filter(self, *args, **kwargs):
queryset = QuerySet(self).filter(**kwargs)
return queryset
@api(version="2.0")
def order_by(self, *args, **kwargs):
queryset = QuerySet(self).order_by(*args)
return queryset
@api(version="2.0")
def paginate(self, **kwargs):
queryset = QuerySet(self).paginate(**kwargs)
return queryset
| [
"logging.getLogger",
"distutils.version.LooseVersion",
"warnings.warn",
"functools.wraps"
]
| [((408, 445), 'logging.getLogger', 'logging.getLogger', (['"""tableau.endpoint"""'], {}), "('tableau.endpoint')\n", (425, 445), False, 'import logging\n'), ((5527, 5538), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (5532, 5538), False, 'from functools import wraps\n'), ((6540, 6551), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (6545, 6551), False, 'from functools import wraps\n'), ((6650, 6691), 'distutils.version.LooseVersion', 'Version', (["(self.parent_srv.version or '0.0')"], {}), "(self.parent_srv.version or '0.0')\n", (6657, 6691), True, 'from distutils.version import LooseVersion as Version\n'), ((7016, 7036), 'warnings.warn', 'warnings.warn', (['error'], {}), '(error)\n', (7029, 7036), False, 'import warnings\n')] |
import unittest
from freeplane_importer.importer import Importer
from mock import Mock
from mock import MagicMock
from mock import call
from freeplane_importer.model_not_found_exception import ModelNotFoundException
class TestImporter(unittest.TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_model = MagicMock()
self.mock_collection.models.byName.return_value = self.mock_model
self.mock_note = MagicMock()
self.mock_note.model.return_value = self.mock_model
self.mock_collection.newNote.return_value = self.mock_note
self.mock_collection.models.fieldNames.return_value = []
self.importer = Importer(self.mock_collection)
self.mock_collection.db.scalar.return_value = None
self.note = {
'id': 100,
'deck': 'History',
'model': 'Basic',
'fields': {}
}
def test_it_should_initialise_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.setCurrent.assert_called_with(
self.mock_model)
def test_it_should_select_the_correct_deck(self):
self.mock_collection.decks.id.return_value = 100
self.importer = Importer(self.mock_collection)
self.importer.import_note(self.note)
self.mock_model.__setitem__.assert_called_with('did', 100)
self.mock_collection.decks.id.assert_called_with('History')
def test_it_should_find_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.byName.assert_called_with('Basic')
def test_it_should_return_true_if_note_was_added_successfully(self):
self.assertTrue(self.importer.import_note(self.note))
def test_it_should_raise_a_no_model_exception_if_the_model_does_not_exist(self):
self.mock_collection.models.byName.return_value = None
self.assertRaises(ModelNotFoundException,
self.importer.import_note, self.note)
def test_it_should_create_a_new_note(self):
self.importer.import_note(self.note)
self.mock_collection.newNote.assert_called_with()
def test_it_should_get_the_field_names_from_the_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.fieldNames.assert_called_with(
self.mock_model)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_lowercase(self):
self.mock_collection.models.fieldNames.return_value = ['id']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('id', 100)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_uppercase(self):
self.mock_collection.models.fieldNames.return_value = ['ID']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('ID', 100)
def test_it_should_populate_the_note_with_the_field_values(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front', 'Back']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_has_calls(
[call('Front', 'Front value'), call('Back', 'Back value')])
def test_it_should_ignore_fields_that_do_not_exist_in_the_model(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front']
self.importer.import_note(self.note)
self.assertFalse('Back' in self.mock_note)
def test_it_should_save_the_note_changes(self):
self.importer.import_note(self.note)
self.mock_note.flush.assert_called_with()
def test_it_should_attempt_to_find_an_existing_note_with_the_given_node_id(self):
self.mock_collection.getNote.return_value = self.mock_note
self.mock_collection.db.scalar.return_value = 123
self.importer.import_note(self.note)
self.mock_collection.getNote.assert_called_with(123)
def test_it_should_add_the_note_to_the_collection_if_it_is_new(self):
del self.mock_note.mod
self.importer.import_note(self.note)
self.mock_collection.addNote.assert_called_with(self.mock_note)
def test_it_should_not_add_the_note_to_the_collection_if_it_is_not_new(self):
self.importer.import_note(self.note)
self.assertEqual(0, self.mock_collection.addNote.call_count)
| [
"mock.Mock",
"mock.call",
"mock.MagicMock",
"freeplane_importer.importer.Importer"
]
| [((311, 317), 'mock.Mock', 'Mock', ([], {}), '()\n', (315, 317), False, 'from mock import Mock\n'), ((345, 356), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (354, 356), False, 'from mock import MagicMock\n'), ((457, 468), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (466, 468), False, 'from mock import MagicMock\n'), ((687, 717), 'freeplane_importer.importer.Importer', 'Importer', (['self.mock_collection'], {}), '(self.mock_collection)\n', (695, 717), False, 'from freeplane_importer.importer import Importer\n'), ((1257, 1287), 'freeplane_importer.importer.Importer', 'Importer', (['self.mock_collection'], {}), '(self.mock_collection)\n', (1265, 1287), False, 'from freeplane_importer.importer import Importer\n'), ((3315, 3343), 'mock.call', 'call', (['"""Front"""', '"""Front value"""'], {}), "('Front', 'Front value')\n", (3319, 3343), False, 'from mock import call\n'), ((3345, 3371), 'mock.call', 'call', (['"""Back"""', '"""Back value"""'], {}), "('Back', 'Back value')\n", (3349, 3371), False, 'from mock import call\n')] |
import argparse
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_const", default=False, const=True)
args = parser.parse_args()
data = np.loadtxt("../data/data.csv", skiprows=1, usecols=list(range(1,8)), delimiter=",")[33:,:]
xdays = data[:,0] - np.mean(data[:,0])
deaths = data[:,-1]
print(xdays, deaths)
logdeaths = np.log(deaths)
slope, offset, rval, pval, stderr = linregress(xdays, logdeaths)
stderr = np.sqrt(np.sum((logdeaths-(slope*logdeaths+offset))**2) / (len(logdeaths)-2.)) / np.sqrt(np.sum((xdays - np.mean(xdays))**2))
if args.plot:
plt.plot(xdays, np.exp(offset + slope*xdays), 'C0-')
plt.plot(xdays, np.exp(offset + (slope+stderr)*xdays), 'C0--')
plt.plot(xdays, np.exp(offset + (slope-stderr)*xdays), 'C0--')
plt.plot(xdays, deaths, 'C0o')
plt.gca().set_yscale("log")
plt.show()
print("Slope: %.3e" % slope)
print("Doubling every: %.2f" % (np.log(2)/slope))
print("R-squared: %.3f" % (rval*rval))
print("Stderr: %.3e" % stderr)
| [
"scipy.stats.linregress",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.gca",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.sum",
"matplotlib.pyplot.show"
]
| [((112, 137), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (135, 137), False, 'import argparse\n'), ((435, 449), 'numpy.log', 'np.log', (['deaths'], {}), '(deaths)\n', (441, 449), True, 'import numpy as np\n'), ((487, 515), 'scipy.stats.linregress', 'linregress', (['xdays', 'logdeaths'], {}), '(xdays, logdeaths)\n', (497, 515), False, 'from scipy.stats import linregress\n'), ((363, 382), 'numpy.mean', 'np.mean', (['data[:, 0]'], {}), '(data[:, 0])\n', (370, 382), True, 'import numpy as np\n'), ((861, 891), 'matplotlib.pyplot.plot', 'plt.plot', (['xdays', 'deaths', '"""C0o"""'], {}), "(xdays, deaths, 'C0o')\n", (869, 891), True, 'import matplotlib.pyplot as plt\n'), ((928, 938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (936, 938), True, 'import matplotlib.pyplot as plt\n'), ((686, 716), 'numpy.exp', 'np.exp', (['(offset + slope * xdays)'], {}), '(offset + slope * xdays)\n', (692, 716), True, 'import numpy as np\n'), ((743, 784), 'numpy.exp', 'np.exp', (['(offset + (slope + stderr) * xdays)'], {}), '(offset + (slope + stderr) * xdays)\n', (749, 784), True, 'import numpy as np\n'), ((810, 851), 'numpy.exp', 'np.exp', (['(offset + (slope - stderr) * xdays)'], {}), '(offset + (slope - stderr) * xdays)\n', (816, 851), True, 'import numpy as np\n'), ((533, 588), 'numpy.sum', 'np.sum', (['((logdeaths - (slope * logdeaths + offset)) ** 2)'], {}), '((logdeaths - (slope * logdeaths + offset)) ** 2)\n', (539, 588), True, 'import numpy as np\n'), ((896, 905), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (903, 905), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1010), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1007, 1010), True, 'import numpy as np\n'), ((630, 644), 'numpy.mean', 'np.mean', (['xdays'], {}), '(xdays)\n', (637, 644), True, 'import numpy as np\n')] |
import sys
import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
MARKER_NAME,
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
flame_apps = []
selection = None
@contextlib.contextmanager
def io_preferences_file(klass, filepath, write=False):
try:
flag = "w" if write else "r"
yield open(filepath, flag)
except IOError as _error:
klass.log.info("Unable to work with preferences `{}`: {}".format(
filepath, _error))
class FlameAppFramework(object):
# flameAppFramework class takes care of preferences
class prefs_dict(dict):
def __init__(self, master, name, **kwargs):
self.name = name
self.master = master
if not self.master.get(self.name):
self.master[self.name] = {}
self.master[self.name].__init__()
def __getitem__(self, k):
return self.master[self.name].__getitem__(k)
def __setitem__(self, k, v):
return self.master[self.name].__setitem__(k, v)
def __delitem__(self, k):
return self.master[self.name].__delitem__(k)
def get(self, k, default=None):
return self.master[self.name].get(k, default)
def setdefault(self, k, default=None):
return self.master[self.name].setdefault(k, default)
def pop(self, *args, **kwargs):
return self.master[self.name].pop(*args, **kwargs)
def update(self, mapping=(), **kwargs):
self.master[self.name].update(mapping, **kwargs)
def __contains__(self, k):
return self.master[self.name].__contains__(k)
def copy(self): # don"t delegate w/ super - dict.copy() -> dict :(
return type(self)(self)
def keys(self):
return self.master[self.name].keys()
@classmethod
def fromkeys(cls, keys, v=None):
return cls.master[cls.name].fromkeys(keys, v)
def __repr__(self):
return "{0}({1})".format(
type(self).__name__, self.master[self.name].__repr__())
def master_keys(self):
return self.master.keys()
def __init__(self):
self.name = self.__class__.__name__
self.bundle_name = "OpenPypeFlame"
# self.prefs scope is limited to flame project and user
self.prefs = {}
self.prefs_user = {}
self.prefs_global = {}
self.log = log
try:
import flame
self.flame = flame
self.flame_project_name = self.flame.project.current_project.name
self.flame_user_name = flame.users.current_user.name
except Exception:
self.flame = None
self.flame_project_name = None
self.flame_user_name = None
import socket
self.hostname = socket.gethostname()
if sys.platform == "darwin":
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
"Library",
"Caches",
"OpenPype",
self.bundle_name
)
elif sys.platform.startswith("linux"):
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
".OpenPype",
self.bundle_name)
self.prefs_folder = os.path.join(
self.prefs_folder,
self.hostname,
)
self.log.info("[{}] waking up".format(self.__class__.__name__))
try:
self.load_prefs()
except RuntimeError:
self.save_prefs()
# menu auto-refresh defaults
if not self.prefs_global.get("menu_auto_refresh"):
self.prefs_global["menu_auto_refresh"] = {
"media_panel": True,
"batch": True,
"main_menu": True,
"timeline_menu": True
}
self.apps = []
def get_pref_file_paths(self):
prefix = self.prefs_folder + os.path.sep + self.bundle_name
prefs_file_path = "_".join([
prefix, self.flame_user_name,
self.flame_project_name]) + ".prefs"
prefs_user_file_path = "_".join([
prefix, self.flame_user_name]) + ".prefs"
prefs_global_file_path = prefix + ".prefs"
return (prefs_file_path, prefs_user_file_path, prefs_global_file_path)
def load_prefs(self):
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path) as prefs_file:
self.prefs = pickle.load(prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path) as prefs_file:
self.prefs_user = pickle.load(prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path) as prefs_file:
self.prefs_global = pickle.load(prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def save_prefs(self):
# make sure the preference folder is available
if not os.path.isdir(self.prefs_folder):
try:
os.makedirs(self.prefs_folder)
except Exception:
self.log.info("Unable to create folder {}".format(
self.prefs_folder))
return False
# get all pref file paths
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path, True) as prefs_file:
pickle.dump(self.prefs, prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path, True) as prefs_file:
pickle.dump(self.prefs_user, prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path, True) as prefs_file:
pickle.dump(self.prefs_global, prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def get_current_project():
import flame
return flame.project.current_project
def get_current_sequence(selection):
import flame
def segment_to_sequence(_segment):
track = _segment.parent
version = track.parent
return version.parent
process_timeline = None
if len(selection) == 1:
if isinstance(selection[0], flame.PySequence):
process_timeline = selection[0]
if isinstance(selection[0], flame.PySegment):
process_timeline = segment_to_sequence(selection[0])
else:
for segment in selection:
if isinstance(segment, flame.PySegment):
process_timeline = segment_to_sequence(segment)
break
return process_timeline
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
except Exception:
pass
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
WireTapNodeHandle,
WireTapStr
)
class GetProjectColorPolicy(object):
def __init__(self, host_name=None, _log=None):
# Create a connection to the Backburner manager using the Wiretap
# python API.
#
self.log = _log or log
self.host_name = host_name or "localhost"
self._wiretap_client = WireTapClient()
if not self._wiretap_client.init():
raise Exception("Could not initialize Wiretap Client")
self._server = WireTapServerHandle(
"{}:IFFFS".format(self.host_name))
def process(self, project_name):
policy_node_handle = WireTapNodeHandle(
self._server,
"/projects/{}/syncolor/policy".format(project_name)
)
self.log.info(policy_node_handle)
policy = WireTapStr()
if not policy_node_handle.getNodeTypeStr(policy):
self.log.warning(
"Could not retrieve policy of '%s': %s" % (
policy_node_handle.getNodeId().id(),
policy_node_handle.lastError()
)
)
return policy.c_str()
policy_wiretap = GetProjectColorPolicy(_log=_log)
return policy_wiretap.process(project_name)
def get_segment_data_marker(segment, with_marker=None):
"""
Get openpype track item tag created by creator or loader plugin.
Attributes:
segment (flame.PySegment): flame api object
with_marker (bool)[optional]: if true it will return also marker object
Returns:
dict: openpype tag data
Returns(with_marker=True):
flame.PyMarker, dict
"""
for marker in segment.markers:
comment = marker.comment.get_value()
color = marker.colour.get_value()
name = marker.name.get_value()
if (name == MARKER_NAME) and (
color == COLOR_MAP[MARKER_COLOR]):
if not with_marker:
return json.loads(comment)
else:
return marker, json.loads(comment)
def set_segment_data_marker(segment, data=None):
"""
Set openpype track item tag to input segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
dict: json loaded data
"""
data = data or dict()
marker_data = get_segment_data_marker(segment, True)
if marker_data:
# get available openpype tag if any
marker, tag_data = marker_data
# update tag data with new data
tag_data.update(data)
# update marker with tag data
marker.comment = json.dumps(tag_data)
else:
# update tag data with new data
marker = create_segment_data_marker(segment)
# add tag data to marker's comment
marker.comment = json.dumps(data)
def set_publish_attribute(segment, value):
""" Set Publish attribute in input Tag object
Attribute:
segment (flame.PySegment)): flame api object
value (bool): True or False
"""
tag_data = get_segment_data_marker(segment)
tag_data["publish"] = value
# set data to the publish attribute
set_segment_data_marker(segment, tag_data)
def get_publish_attribute(segment):
""" Get Publish attribute from input Tag object
Attribute:
segment (flame.PySegment)): flame api object
Returns:
bool: True or False
"""
tag_data = get_segment_data_marker(segment)
if not tag_data:
set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT)
return MARKER_PUBLISH_DEFAULT
return tag_data["publish"]
def create_segment_data_marker(segment):
""" Create openpype marker on a segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
flame.PyMarker: flame api object
"""
# get duration of segment
duration = segment.record_duration.relative_frame
# calculate start frame of the new marker
start_frame = int(segment.record_in.relative_frame) + int(duration / 2)
# create marker
marker = segment.create_marker(start_frame)
# set marker name
marker.name = MARKER_NAME
# set duration
marker.duration = MARKER_DURATION
# set colour
marker.colour = COLOR_MAP[MARKER_COLOR] # Red
return marker
def get_sequence_segments(sequence, selected=False):
segments = []
# loop versions in sequence
for ver in sequence.versions:
# loop track in versions
for track in ver.tracks:
# ignore all empty tracks and hidden too
if len(track.segments) == 0 and track.hidden:
continue
# loop all segment in remaining tracks
for segment in track.segments:
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
if (
selected is True
and segment.selected.get_value() is not True
):
continue
# add it to original selection
segments.append(segment)
return segments
@contextlib.contextmanager
def maintained_segment_selection(sequence):
"""Maintain selection during context
Attributes:
sequence (flame.PySequence): python api object
Yield:
list of flame.PySegment
Example:
>>> with maintained_segment_selection(sequence) as selected_segments:
... for segment in selected_segments:
... segment.selected = False
>>> print(segment.selected)
True
"""
selected_segments = get_sequence_segments(sequence, True)
try:
# do the operation on selected segments
yield selected_segments
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected_segments:
segment.selected = True
def reset_segment_selection(sequence):
"""Deselect all selected nodes
"""
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
segment.selected = False
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = str(re.sub("[<>]", "", token)).replace(" ", "_")
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def get_segment_attributes(segment):
if segment.name.get_value() == "":
return None
# Add timeline segment to tree
clip_data = {
"shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# head and tail with forward compatibility
if segment.head:
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
"<track>", "<track name>"
])
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr_name in segment_attrs:
if not hasattr(segment, attr_name):
continue
attr = getattr(segment, attr_name)
segment_attrs_data[attr] = str(attr).replace("+", ":")
if attr_name in ["record_in", "record_out"]:
clip_data[attr_name] = attr.relative_frame
else:
clip_data[attr_name] = attr.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
@contextlib.contextmanager
def maintained_object_duplication(item):
"""Maintain input item duplication
Attributes:
item (any flame.PyObject): python api object
Yield:
duplicate input PyObject type
"""
import flame
# Duplicate the clip to avoid modifying the original clip
duplicate = flame.duplicate(item)
try:
# do the operation on selected segments
yield duplicate
finally:
# delete the item at the end
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
| [
"xml.etree.ElementTree.fromstringlist",
"re.compile",
"xml.etree.cElementTree.ElementTree",
"sys.platform.startswith",
"copy.deepcopy",
"flame.execute_shortcut",
"os.remove",
"openpype.api.run_subprocess",
"flame.duplicate",
"adsk.libwiretapPythonClientAPI.WireTapStr",
"json.dumps",
"os.path.isdir",
"flame.delete",
"socket.gethostname",
"os.path.expanduser",
"json.loads",
"pickle.load",
"os.path.splitext",
"pprint.pformat",
"adsk.libwiretapPythonClientAPI.WireTapClient",
"os.path.isfile",
"os.path.dirname",
"re.sub",
"re.findall",
"openpype.api.Logger.get_logger",
"pickle.dump",
"os.makedirs",
"os.path.join",
"tempfile.mktemp",
"os.path.basename"
]
| [((403, 439), 'openpype.api.Logger.get_logger', 'openpype.Logger.get_logger', (['__name__'], {}), '(__name__)\n', (429, 439), True, 'import openpype.api as openpype\n'), ((457, 488), 're.compile', 're.compile', (['"""[\\\\._](\\\\d+)[\\\\.]"""'], {}), "('[\\\\._](\\\\d+)[\\\\.]')\n", (467, 488), False, 'import re\n'), ((19406, 19441), 're.findall', 're.findall', (['FRAME_PATTERN', 'filename'], {}), '(FRAME_PATTERN, filename)\n', (19416, 19441), False, 'import re\n'), ((19817, 19838), 'flame.duplicate', 'flame.duplicate', (['item'], {}), '(item)\n', (19832, 19838), False, 'import flame\n'), ((3298, 3318), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3316, 3318), False, 'import socket\n'), ((3798, 3844), 'os.path.join', 'os.path.join', (['self.prefs_folder', 'self.hostname'], {}), '(self.prefs_folder, self.hostname)\n', (3810, 3844), False, 'import os\n'), ((7986, 8031), 'flame.execute_shortcut', 'flame.execute_shortcut', (['"""Rescan Python Hooks"""'], {}), "('Rescan Python Hooks')\n", (8008, 8031), False, 'import flame\n'), ((10984, 11004), 'json.dumps', 'json.dumps', (['tag_data'], {}), '(tag_data)\n', (10994, 11004), False, 'import json\n'), ((11176, 11192), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (11186, 11192), False, 'import json\n'), ((19979, 20002), 'flame.delete', 'flame.delete', (['duplicate'], {}), '(duplicate)\n', (19991, 20002), False, 'import flame\n'), ((20184, 20243), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'suffix': '_suffix', 'prefix': '"""flame_maintained_"""'}), "(suffix=_suffix, prefix='flame_maintained_')\n", (20199, 20243), False, 'import tempfile\n'), ((20485, 20510), 'os.remove', 'os.remove', (['temporary_file'], {}), '(temporary_file)\n', (20494, 20510), False, 'import os\n'), ((22114, 22136), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (22130, 22136), False, 'import os\n'), ((22161, 22182), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (22176, 22182), False, 'import os\n'), ((3585, 3617), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (3608, 3617), False, 'import sys\n'), ((5066, 5089), 'pickle.load', 'pickle.load', (['prefs_file'], {}), '(prefs_file)\n', (5077, 5089), False, 'import pickle\n'), ((5339, 5362), 'pickle.load', 'pickle.load', (['prefs_file'], {}), '(prefs_file)\n', (5350, 5362), False, 'import pickle\n'), ((5616, 5639), 'pickle.load', 'pickle.load', (['prefs_file'], {}), '(prefs_file)\n', (5627, 5639), False, 'import pickle\n'), ((5912, 5944), 'os.path.isdir', 'os.path.isdir', (['self.prefs_folder'], {}), '(self.prefs_folder)\n', (5925, 5944), False, 'import os\n'), ((6395, 6430), 'pickle.dump', 'pickle.dump', (['self.prefs', 'prefs_file'], {}), '(self.prefs, prefs_file)\n', (6406, 6430), False, 'import pickle\n'), ((6668, 6708), 'pickle.dump', 'pickle.dump', (['self.prefs_user', 'prefs_file'], {}), '(self.prefs_user, prefs_file)\n', (6679, 6708), False, 'import pickle\n'), ((6948, 6990), 'pickle.dump', 'pickle.dump', (['self.prefs_global', 'prefs_file'], {}), '(self.prefs_global, prefs_file)\n', (6959, 6990), False, 'import pickle\n'), ((8662, 8677), 'adsk.libwiretapPythonClientAPI.WireTapClient', 'WireTapClient', ([], {}), '()\n', (8675, 8677), False, 'from adsk.libwiretapPythonClientAPI import WireTapClient, WireTapServerHandle, WireTapNodeHandle, WireTapStr\n'), ((9170, 9182), 'adsk.libwiretapPythonClientAPI.WireTapStr', 'WireTapStr', ([], {}), '()\n', (9180, 9182), False, 'from adsk.libwiretapPythonClientAPI import WireTapClient, WireTapServerHandle, WireTapNodeHandle, WireTapStr\n'), ((24166, 24204), 'os.path.isfile', 'os.path.isfile', (['self.MEDIA_SCRIPT_PATH'], {}), '(self.MEDIA_SCRIPT_PATH)\n', (24180, 24204), False, 'import os\n'), ((24661, 24694), 'openpype.api.run_subprocess', 'openpype.run_subprocess', (['cmd_args'], {}), '(cmd_args)\n', (24684, 24694), True, 'import openpype.api as openpype\n'), ((25068, 25098), 'xml.etree.ElementTree.fromstringlist', 'ET.fromstringlist', (['_added_root'], {}), '(_added_root)\n', (25085, 25098), True, 'from xml.etree import ElementTree as ET\n'), ((27100, 27133), 'xml.etree.cElementTree.ElementTree', 'cET.ElementTree', (['xml_element_data'], {}), '(xml_element_data)\n', (27115, 27133), True, 'import xml.etree.cElementTree as cET\n'), ((3419, 3442), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3437, 3442), False, 'import os\n'), ((5979, 6009), 'os.makedirs', 'os.makedirs', (['self.prefs_folder'], {}), '(self.prefs_folder)\n', (5990, 6009), False, 'import os\n'), ((10343, 10362), 'json.loads', 'json.loads', (['comment'], {}), '(comment)\n', (10353, 10362), False, 'import json\n'), ((3681, 3704), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3699, 3704), False, 'import os\n'), ((5199, 5218), 'pprint.pformat', 'pformat', (['self.prefs'], {}), '(self.prefs)\n', (5206, 5218), False, 'from pprint import pformat\n'), ((5469, 5493), 'pprint.pformat', 'pformat', (['self.prefs_user'], {}), '(self.prefs_user)\n', (5476, 5493), False, 'from pprint import pformat\n'), ((5748, 5774), 'pprint.pformat', 'pformat', (['self.prefs_global'], {}), '(self.prefs_global)\n', (5755, 5774), False, 'from pprint import pformat\n'), ((6540, 6559), 'pprint.pformat', 'pformat', (['self.prefs'], {}), '(self.prefs)\n', (6547, 6559), False, 'from pprint import pformat\n'), ((6815, 6839), 'pprint.pformat', 'pformat', (['self.prefs_user'], {}), '(self.prefs_user)\n', (6822, 6839), False, 'from pprint import pformat\n'), ((7099, 7125), 'pprint.pformat', 'pformat', (['self.prefs_global'], {}), '(self.prefs_global)\n', (7106, 7125), False, 'from pprint import pformat\n'), ((10412, 10431), 'json.loads', 'json.loads', (['comment'], {}), '(comment)\n', (10422, 10431), False, 'import json\n'), ((14974, 14999), 're.sub', 're.sub', (['"""[<>]"""', '""""""', 'token'], {}), "('[<>]', '', token)\n", (14980, 14999), False, 'import re\n'), ((25013, 25028), 'copy.deepcopy', 'deepcopy', (['lines'], {}), '(lines)\n', (25021, 25028), False, 'from copy import deepcopy\n'), ((22207, 22243), 'os.path.splitext', 'os.path.splitext', (['self.feed_basename'], {}), '(self.feed_basename)\n', (22223, 22243), False, 'import os\n')] |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if len(sys.argv) != 3:
print('usage: python plot_performances.py <group_csv> <indiv_csv>')
exit()
group_file = sys.argv[1]
indiv_file = sys.argv[2]
# Load the data
df_group = pd.read_csv(group_file)
df_indiv = pd.read_csv(indiv_file)
df = pd.concat([df_group, df_indiv], sort=True)
# Prepare the data for plotting
plot_df = df.groupby(['model', 'id'], as_index=False)['hit'].agg('mean')
mfa_df = plot_df.loc[plot_df['model'] == 'MFA']
mfa_median = mfa_df['hit'].median()
plot_df = plot_df.loc[plot_df['model'] != 'MFA']
# Plot the data
sns.set(style='whitegrid', palette='colorblind')
plt.figure(figsize=(7, 3))
order = plot_df.groupby('model', as_index=False)['hit'].agg('median').sort_values('hit')['model']
colors = [('C0' if 'mReasoner' in x else 'C2') for x in order]
sns.boxplot(x='model', y='hit', data=plot_df, order=order, palette=colors)
plt.axhline(y=mfa_median, ls='--', color='C7', zorder=10)
plt.text(0.002, mfa_median + 0.015, 'MFA', color='C7', fontsize=10, transform=plt.gca().transAxes)
plt.xlabel('')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylabel('Coverage Accuracy')
plt.tight_layout()
plt.savefig('visualizations/performances.pdf')
plt.show()
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"seaborn.boxplot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"numpy.arange",
"matplotlib.pyplot.show"
]
| [((292, 315), 'pandas.read_csv', 'pd.read_csv', (['group_file'], {}), '(group_file)\n', (303, 315), True, 'import pandas as pd\n'), ((327, 350), 'pandas.read_csv', 'pd.read_csv', (['indiv_file'], {}), '(indiv_file)\n', (338, 350), True, 'import pandas as pd\n'), ((356, 398), 'pandas.concat', 'pd.concat', (['[df_group, df_indiv]'], {'sort': '(True)'}), '([df_group, df_indiv], sort=True)\n', (365, 398), True, 'import pandas as pd\n'), ((655, 703), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'palette': '"""colorblind"""'}), "(style='whitegrid', palette='colorblind')\n", (662, 703), True, 'import seaborn as sns\n'), ((704, 730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 3)'}), '(figsize=(7, 3))\n', (714, 730), True, 'import matplotlib.pyplot as plt\n'), ((893, 967), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""model"""', 'y': '"""hit"""', 'data': 'plot_df', 'order': 'order', 'palette': 'colors'}), "(x='model', y='hit', data=plot_df, order=order, palette=colors)\n", (904, 967), True, 'import seaborn as sns\n'), ((969, 1026), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'mfa_median', 'ls': '"""--"""', 'color': '"""C7"""', 'zorder': '(10)'}), "(y=mfa_median, ls='--', color='C7', zorder=10)\n", (980, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1141), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (1137, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1177, 1208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coverage Accuracy"""'], {}), "('Coverage Accuracy')\n", (1187, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1228), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1226, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualizations/performances.pdf"""'], {}), "('visualizations/performances.pdf')\n", (1240, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1276, 1286), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1284, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1175), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1162, 1175), True, 'import numpy as np\n'), ((1105, 1114), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1112, 1114), True, 'import matplotlib.pyplot as plt\n')] |
from collections import OrderedDict
import pytest
import vcr
try: # Python 2.7
# requests's ``json()`` function returns strings as unicode (as per the
# JSON spec). In 2.7, those are of type unicode rather than str. basestring
# was created to help with that.
# https://docs.python.org/2/library/functions.html#basestring
basestring = basestring
except NameError:
basestring = str
@pytest.mark.incremental
class TestShow(object):
"""These don't require authentication to Game Pass."""
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_desc(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.desc, basestring)
# content is not required
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_logo(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.logo, basestring)
assert show.logo
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_name(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.name, basestring)
assert show.name
@vcr.use_cassette('public_API/europe_show_seasons.yaml')
@staticmethod
def test_seasons(gp):
shows = gp.shows
for s in shows:
show = shows[s]
assert type(show.seasons) is OrderedDict
assert show.seasons
prev = 9999
for s in show.seasons:
season = show.seasons[s]
# TODO: assert it has content
# TODO: assert is type season
# make sure the years look sane-ish
assert int(s) > 2000 and int(s) < 2050
# make sure it's sorted high to low
assert int(prev) > int(s)
prev = s
| [
"vcr.use_cassette"
]
| [((524, 571), 'vcr.use_cassette', 'vcr.use_cassette', (['"""public_API/europe_show.yaml"""'], {}), "('public_API/europe_show.yaml')\n", (540, 571), False, 'import vcr\n'), ((783, 830), 'vcr.use_cassette', 'vcr.use_cassette', (['"""public_API/europe_show.yaml"""'], {}), "('public_API/europe_show.yaml')\n", (799, 830), False, 'import vcr\n'), ((1033, 1080), 'vcr.use_cassette', 'vcr.use_cassette', (['"""public_API/europe_show.yaml"""'], {}), "('public_API/europe_show.yaml')\n", (1049, 1080), False, 'import vcr\n'), ((1283, 1338), 'vcr.use_cassette', 'vcr.use_cassette', (['"""public_API/europe_show_seasons.yaml"""'], {}), "('public_API/europe_show_seasons.yaml')\n", (1299, 1338), False, 'import vcr\n')] |
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
| [
"os.path.exists",
"numpy.ones",
"flopy.modflow.ModflowPcg",
"os.makedirs",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.gcf",
"os.path.join",
"flopy.modflow.ModflowDis",
"os.getcwd",
"os.chdir",
"numpy.linspace",
"flopy.modflow.ModflowBas",
"numpy.savetxt",
"flopy.modflow.ModflowLpf",
"flopy.modflow.ModflowOc",
"matplotlib.pyplot.axis",
"numpy.arange",
"flopy.modflow.Modflow"
]
| [((115, 135), 'os.path.join', 'os.path.join', (['"""lake"""'], {}), "('lake')\n", (127, 135), False, 'import os\n'), ((526, 537), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (535, 537), False, 'import os\n'), ((581, 600), 'os.chdir', 'os.chdir', (['workspace'], {}), '(workspace)\n', (589, 600), False, 'import os\n'), ((1491, 1565), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', ([], {'modelname': 'name', 'exe_name': '"""mf2005"""', 'version': '"""mf2005"""'}), "(modelname=name, exe_name='mf2005', version='mf2005')\n", (1512, 1565), False, 'import flopy\n'), ((1930, 1962), 'numpy.linspace', 'np.linspace', (['(-H / Nlay)', '(-H)', 'Nlay'], {}), '(-H / Nlay, -H, Nlay)\n', (1941, 1962), True, 'import numpy as np\n'), ((2007, 2122), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['ml'], {'nlay': 'Nlay', 'nrow': 'N', 'ncol': 'N', 'delr': 'delrow', 'delc': 'delcol', 'top': '(0.0)', 'botm': 'bot', 'laycbd': '(0)'}), '(ml, nlay=Nlay, nrow=N, ncol=N, delr=delrow, delc=\n delcol, top=0.0, botm=bot, laycbd=0)\n', (2031, 2122), False, 'import flopy\n'), ((2806, 2838), 'numpy.ones', 'np.ones', (['(Nlay, N, N)'], {'dtype': 'int'}), '((Nlay, N, N), dtype=int)\n', (2813, 2838), True, 'import numpy as np\n'), ((3147, 3171), 'numpy.savetxt', 'np.savetxt', (['hfile', 'start'], {}), '(hfile, start)\n', (3157, 3171), True, 'import numpy as np\n'), ((3383, 3438), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['ml'], {'ibound': 'files', 'strt': 'hfiles'}), '(ml, ibound=files, strt=hfiles)\n', (3407, 3438), False, 'import flopy\n'), ((3560, 3595), 'flopy.modflow.ModflowLpf', 'flopy.modflow.ModflowLpf', (['ml'], {'hk': 'Kh'}), '(ml, hk=Kh)\n', (3584, 3595), False, 'import flopy\n'), ((3825, 3853), 'flopy.modflow.ModflowPcg', 'flopy.modflow.ModflowPcg', (['ml'], {}), '(ml)\n', (3849, 3853), False, 'import flopy\n'), ((3863, 3890), 'flopy.modflow.ModflowOc', 'flopy.modflow.ModflowOc', (['ml'], {}), '(ml)\n', (3886, 3890), False, 'import flopy\n'), ((3980, 3996), 'os.chdir', 'os.chdir', (['cwdpth'], {}), '(cwdpth)\n', (3988, 3996), False, 'import os\n'), ((4594, 4614), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N'], {}), '(0, L, N)\n', (4605, 4614), True, 'import numpy as np\n'), ((4678, 4704), 'matplotlib.pyplot.clabel', 'plt.clabel', (['c'], {'fmt': '"""%2.1f"""'}), "(c, fmt='%2.1f')\n", (4688, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4709, 4727), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (4717, 4727), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4782), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake1.{fext}"""'], {}), "(workspace, f'lake1.{fext}')\n", (4754, 4782), False, 'import os\n'), ((4793, 4802), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4800, 4802), True, 'import matplotlib.pyplot as plt\n'), ((4881, 4901), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N'], {}), '(0, L, N)\n', (4892, 4901), True, 'import numpy as np\n'), ((4966, 4992), 'matplotlib.pyplot.clabel', 'plt.clabel', (['c'], {'fmt': '"""%1.1f"""'}), "(c, fmt='%1.1f')\n", (4976, 4992), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5015), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (5005, 5015), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5070), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake2.{fext}"""'], {}), "(workspace, f'lake2.{fext}')\n", (5042, 5070), False, 'import os\n'), ((5081, 5090), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5088, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5165, 5216), 'numpy.linspace', 'np.linspace', (['(-H / Nlay / 2)', '(-H + H / Nlay / 2)', 'Nlay'], {}), '(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)\n', (5176, 5216), True, 'import numpy as np\n'), ((5287, 5305), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (5295, 5305), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5360), 'os.path.join', 'os.path.join', (['workspace', 'f"""lake3.{fext}"""'], {}), "(workspace, f'lake3.{fext}')\n", (5332, 5360), False, 'import os\n'), ((5371, 5380), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5378, 5380), True, 'import matplotlib.pyplot as plt\n'), ((190, 215), 'os.path.exists', 'os.path.exists', (['workspace'], {}), '(workspace)\n', (204, 215), False, 'import os\n'), ((225, 247), 'os.makedirs', 'os.makedirs', (['workspace'], {}), '(workspace)\n', (236, 247), False, 'import os\n'), ((2992, 3007), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2999, 3007), True, 'import numpy as np\n'), ((3325, 3371), 'numpy.savetxt', 'np.savetxt', (['file', 'ibound[kdx, :, :]'], {'fmt': '"""%5d"""'}), "(file, ibound[kdx, :, :], fmt='%5d')\n", (3335, 3371), True, 'import numpy as np\n'), ((4504, 4542), 'os.path.join', 'os.path.join', (['workspace', 'f"""{name}.hds"""'], {}), "(workspace, f'{name}.hds')\n", (4516, 4542), False, 'import os\n'), ((4647, 4672), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (4656, 4672), True, 'import numpy as np\n'), ((4935, 4960), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (4944, 4960), True, 'import numpy as np\n'), ((5256, 5281), 'numpy.arange', 'np.arange', (['(90)', '(100.1)', '(0.2)'], {}), '(90, 100.1, 0.2)\n', (5265, 5281), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Documentación sobre clustering en Python:
http://scikit-learn.org/stable/modules/clustering.html
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
http://www.learndatasci.com/k-means-clustering-algorithms-python-intro/
'''
import time
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import metrics
from sklearn import cluster
from math import floor
import seaborn as sns
# Cosas bonitas por defecto
sns.set()
def norm_to_zero_one(df):
return (df - df.min()) * 1.0 / (df.max() - df.min())
censo = pd.read_csv('../mujeres_fecundidad_INE_2018.csv')
'''
for col in censo:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count)
#'''
#Se pueden reemplazar los valores desconocidos por un número
#censo = censo.replace(np.NaN,0)
# Sustituimos valores perdidos con la media
for col in censo:
censo[col].fillna(censo[col].mean(), inplace=True)
#seleccionar casos
subset = censo.loc[(censo['TRAREPRO']==1) & (censo['NEMBTRAREPRO']<=6)]
# Seleccionar variables
usadas = ['NHIJOS', 'TIPOTRAREPRO', 'NMESESTRAREPRO', 'NEMBTRAREPRO']
X = subset[usadas]
X_normal = X.apply(norm_to_zero_one)
print('Tamaño de la población tras filtrado: ',len(X_normal.index))
for col in X:
missing_count = sum(pd.isnull(censo[col]))
if missing_count > 0:
print(col,missing_count, ' AFTER')
algoritmos = (('KMeans', cluster.KMeans(init='k-means++', n_clusters=5, n_init=5)),
('MeanShift', cluster.MeanShift(cluster_all=False, min_bin_freq=3)),
('Ward', cluster.AgglomerativeClustering(n_clusters=4, linkage='ward')),
('DBScan', cluster.DBSCAN(eps=0.35, min_samples=5)),
('Birch', cluster.Birch(threshold=0.1,n_clusters=5)))
cluster_predict = {}
calinski = {}
silh = {}
times = {}
n_clusters = {}
clusters_fig, clusters_axis = plt.subplots(3, 2, figsize=(10,10))
clusters_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', '#ffb347']
ijs = [(0,0), (0,1), (1,0), (1,1), (2,0), (2,1)]
for i_alg, par in enumerate(algoritmos):
name, alg = par
print('----- Ejecutando ' + name,)
t = time.time()
cluster_predict[name] = alg.fit_predict(X_normal)
tiempo = time.time() - t
times[name] = tiempo
metric_CH = metrics.calinski_harabasz_score(X_normal, cluster_predict[name])
calinski[name] = metric_CH
metric_SC = metrics.silhouette_score(X_normal, cluster_predict[name], metric='euclidean', sample_size=floor(len(X)), random_state=123456)
silh[name] = metric_SC
# Asignamos de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict[name],index=X.index,columns=['cluster'])
if (name == 'KMeans'):
clusters_kmeans = clusters
alg_kmeans = alg
elif (name == 'Ward'):
clusters_ward = clusters
print("Tamaño de cada cluster:")
size = clusters['cluster'].value_counts()
cluster_fractions = []
for num,i in size.iteritems():
print('%s: %5d (%5.2f%%)' % (num,i,100*i/len(clusters)))
cluster_fractions.append( 100*i/len(clusters) )
n_clusters[name] = len(size)
# Bar charts
if ( len(cluster_fractions) > 7 ):
cluster_fractions = cluster_fractions[0:6]
i, j = ijs[i_alg]
y_pos = np.arange(len(cluster_fractions))
labels = [ "Cluster " + str(i) for i in range(len(cluster_fractions)) ]
clusters_axis[i, j].bar(y_pos, cluster_fractions, tick_label=labels, color=clusters_colors)
clusters_axis[i, j].set_ylim(0, 100)
clusters_axis[i, j].set_title(name)
if (j == 0):
clusters_axis[i, j].set_ylabel("Cluster size (%)")
clusters_axis[2,1].remove()
#clusters_fig.savefig("clusters.png")
plt.show()
from prettytable import PrettyTable
header = ['Algoritmo', 'CH', 'Silh', 'Tiempo', 'Número de clusters']
tabla = PrettyTable(header)
for name, alg in algoritmos:
tabla.add_row([name,
"{0:.2f}".format(calinski[name]),
"{0:.2f}".format(silh[name]),
"{0:.2f}".format(times[name]),
n_clusters[name]])
print(tabla)
# Escribir los datos en un general.csv
'''
with open('general.csv', mode='w+', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for name, _ in algoritmos:
writer.writerow({'Algoritmo': name,
'CH': "{0:.2f}".format(calinski[name]),
'Silh': "{0:.2f}".format(silh[name]),
'Tiempo': "{0:.2f}".format(times[name]),
'Número de clusters': n_clusters[name]})
#'''
# ----------------------- FUNCIONES DE DISTRIBUCIÓN ---------
print("---------- Preparando funciones de distribución...")
n_clusters_ward = n_clusters['Ward']
n_var = len(usadas)
X_ward = pd.concat([X, clusters_ward], axis=1)
fig, axes = plt.subplots(n_clusters_ward, n_var, sharey=True, figsize=(15,15))
fig.subplots_adjust(wspace=0, hspace=0)
colors = sns.color_palette(palette=None, n_colors=n_clusters_ward, desat=None)
rango = []
for j in range(n_var):
rango.append([X_ward[usadas[j]].min(), X_ward[usadas[j]].max()])
for i in range(n_clusters_ward):
dat_filt = X_ward.loc[X_ward['cluster']==i]
for j in range(n_var):
#ax = sns.kdeplot(dat_filt[usadas[j]], label="", shade=True, color=colors[i], ax=axes[i,j])
ax = sns.boxplot(dat_filt[usadas[j]], color=colors[i], flierprops={'marker':'o','markersize':4}, ax=axes[i,j])
if (i==n_clusters_ward-1):
axes[i,j].set_xlabel(usadas[j])
else:
axes[i,j].set_xlabel("")
if (j==0):
axes[i,j].set_ylabel("Cluster "+str(i))
else:
axes[i,j].set_ylabel("")
axes[i,j].set_yticks([])
axes[i,j].grid(axis='x', linestyle='-', linewidth='0.2', color='gray')
axes[i,j].grid(axis='y', b=False)
ax.set_xlim(rango[j][0]-0.05*(rango[j][1]-rango[j][0]),rango[j][1]+0.05*(rango[j][1]-rango[j][0]))
plt.show()
#fig.savefig("boxes.png")
# ---------------- SCATTER MATRIX -----------------------
'''
plt.clf()
print("---------- Preparando el scatter matrix...")
# Se añade la asignación de clusters como columna a X
variables = list(X_ward)
variables.remove('cluster')
sns_plot = sns.pairplot(X_ward, vars=variables, hue="cluster", palette='Paired', plot_kws={"s": 25}, diag_kind="hist")
sns_plot.fig.subplots_adjust(wspace=.03, hspace=.03);
# sns_plot.savefig("scatter_matrix.png")
plt.show()
#'''
# ----------------------- DENDOGRAMAS -----------------------
#En clustering hay que normalizar para las métricas de distancia
# X_normal = preprocessing.normalize(X, norm='l2')
X_normal = (X - X.min() ) / (X.max() - X.min())
#Vamos a usar este jerárquico y nos quedamos con 100 clusters, es decir, cien ramificaciones del dendrograma
ward = cluster.AgglomerativeClustering(n_clusters=20, linkage='ward')
name, algorithm = ('Ward', ward)
cluster_predict = {}
k = {}
t = time.time()
cluster_predict[name] = algorithm.fit_predict(X_normal)
tiempo = time.time() - t
k[name] = len(set(cluster_predict[name]))
# Se convierte la asignación de clusters a DataFrame
clusters = pd.DataFrame(cluster_predict['Ward'],index=X.index,columns=['cluster'])
# Y se añade como columna a X
X_cluster = pd.concat([X, clusters], axis=1)
# Filtro quitando los elementos (outliers) que caen en clusters muy pequeños en el jerárquico
min_size = 3
X_filtrado = X
'''
X_cluster[X_cluster.groupby('cluster').cluster.transform(len) > min_size]
k_filtrado = len(set(X_filtrado['cluster']))
print("De los {:.0f} clusters hay {:.0f} con más de {:.0f} elementos. Del total de {:.0f} elementos, se seleccionan {:.0f}".format(k['Ward'],k_filtrado,min_size,len(X),len(X_filtrado)))
X_filtrado = X_filtrado.drop('cluster', 1)
X_filtrado = X
#'''
#Normalizo el conjunto filtrado
X_filtrado_normal = preprocessing.normalize(X_filtrado, norm='l2')
# Obtengo el dendrograma usando scipy, que realmente vuelve a ejecutar el clustering jerárquico
from scipy.cluster import hierarchy
linkage_array = hierarchy.ward(X_filtrado_normal)
plt.clf()
dendro = hierarchy.dendrogram(linkage_array,orientation='left', p=10, truncate_mode='lastp') #lo pongo en horizontal para compararlo con el generado por seaborn
# puedo usar "p=10,truncate_mode='lastp'" para cortar el dendrograma en 10 hojas
# Dendograma usando seaborn (que a su vez usa scipy) para incluir un heatmap
X_filtrado_normal_DF = pd.DataFrame(X_filtrado_normal, index=X_filtrado.index, columns=usadas)
# Añadimos una columna de label para indicar el cluster al que pertenece cada objeto
labels = X_ward['cluster']
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
# Para añadir los labels reordenados. Ahora mismo no salen los colores en la
# columna donde deberian. Intuyo que esto se debe a que los ids no encajan.
#'''
ordering = clustergrid.dendrogram_row.reordered_ind
labels_list = [x for _, x in sorted(zip(ordering,labels), key=lambda pair: pair[0])]
labels = pd.Series(labels_list, index=X_filtrado_normal_DF.index, name='cluster')
lut = dict(zip(set(labels), sns.color_palette(palette="Blues_d", n_colors=n_clusters_ward)))
row_colors = pd.DataFrame(labels)['cluster'].map(lut)
clustergrid = sns.clustermap(X_filtrado_normal_DF, method='ward', row_colors=row_colors, col_cluster=False, figsize=(20,10), cmap="YlGnBu", yticklabels=False)
#'''
#plt.savefig("dendograma.png")
# ----------------------- HEATMAPS -----------------------
#'''
plt.figure(1)
centers = pd.DataFrame(alg_kmeans.cluster_centers_, columns=list(X))
centers_desnormal = centers.copy()
centers_desnormal = centers.drop([4])
# Calculamos los centroides
X = pd.concat([X, clusters_ward], axis=1)
for variable in list(centers):
for k_cluster in range(n_clusters_ward):
centroide = X.loc[(clusters_ward['cluster']==k_cluster)][variable].mean()
centers_desnormal.loc[k_cluster, variable] = centroide
# Normalizamos
centers_normal2 = centers_desnormal.copy()
centers_normal2 = (centers_normal2 - centers_normal2.min() ) / (centers_normal2.max() - centers_normal2.min())
import matplotlib.pyplot as plt
heatmap_fig, ax = plt.subplots(figsize=(10,10))
heatmap = sns.heatmap(centers_normal2, cmap="YlGnBu", annot=centers_desnormal, fmt='.3f')
# Para evitar que los bloques de arriba y abajo se corten por la mitad
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
#heatmap_fig.savefig("heatmap.png")
#'''
| [
"pandas.read_csv",
"sklearn.cluster.MeanShift",
"sklearn.cluster.DBSCAN",
"seaborn.set",
"sklearn.cluster.AgglomerativeClustering",
"seaborn.color_palette",
"scipy.cluster.hierarchy.ward",
"pandas.DataFrame",
"prettytable.PrettyTable",
"seaborn.clustermap",
"seaborn.heatmap",
"time.time",
"sklearn.cluster.Birch",
"matplotlib.pyplot.show",
"pandas.Series",
"sklearn.metrics.calinski_harabasz_score",
"pandas.isnull",
"sklearn.cluster.KMeans",
"scipy.cluster.hierarchy.dendrogram",
"matplotlib.pyplot.clf",
"seaborn.boxplot",
"matplotlib.pyplot.figure",
"sklearn.preprocessing.normalize",
"pandas.concat",
"matplotlib.pyplot.subplots"
]
| [((751, 760), 'seaborn.set', 'sns.set', ([], {}), '()\n', (758, 760), True, 'import seaborn as sns\n'), ((859, 908), 'pandas.read_csv', 'pd.read_csv', (['"""../mujeres_fecundidad_INE_2018.csv"""'], {}), "('../mujeres_fecundidad_INE_2018.csv')\n", (870, 908), True, 'import pandas as pd\n'), ((2256, 2292), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'figsize': '(10, 10)'}), '(3, 2, figsize=(10, 10))\n', (2268, 2292), True, 'import matplotlib.pyplot as plt\n'), ((4185, 4195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4193, 4195), True, 'import matplotlib.pyplot as plt\n'), ((4318, 4337), 'prettytable.PrettyTable', 'PrettyTable', (['header'], {}), '(header)\n', (4329, 4337), False, 'from prettytable import PrettyTable\n'), ((5353, 5390), 'pandas.concat', 'pd.concat', (['[X, clusters_ward]'], {'axis': '(1)'}), '([X, clusters_ward], axis=1)\n', (5362, 5390), True, 'import pandas as pd\n'), ((5406, 5473), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_clusters_ward', 'n_var'], {'sharey': '(True)', 'figsize': '(15, 15)'}), '(n_clusters_ward, n_var, sharey=True, figsize=(15, 15))\n', (5418, 5473), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5595), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': 'None', 'n_colors': 'n_clusters_ward', 'desat': 'None'}), '(palette=None, n_colors=n_clusters_ward, desat=None)\n', (5543, 5595), True, 'import seaborn as sns\n'), ((6597, 6607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6605, 6607), True, 'import matplotlib.pyplot as plt\n'), ((7473, 7535), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'n_clusters': '(20)', 'linkage': '"""ward"""'}), "(n_clusters=20, linkage='ward')\n", (7504, 7535), False, 'from sklearn import cluster\n'), ((7609, 7620), 'time.time', 'time.time', ([], {}), '()\n', (7618, 7620), False, 'import time\n'), ((7816, 7889), 'pandas.DataFrame', 'pd.DataFrame', (["cluster_predict['Ward']"], {'index': 'X.index', 'columns': "['cluster']"}), "(cluster_predict['Ward'], index=X.index, columns=['cluster'])\n", (7828, 7889), True, 'import pandas as pd\n'), ((7932, 7964), 'pandas.concat', 'pd.concat', (['[X, clusters]'], {'axis': '(1)'}), '([X, clusters], axis=1)\n', (7941, 7964), True, 'import pandas as pd\n'), ((8528, 8574), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['X_filtrado'], {'norm': '"""l2"""'}), "(X_filtrado, norm='l2')\n", (8551, 8574), False, 'from sklearn import preprocessing\n'), ((8728, 8761), 'scipy.cluster.hierarchy.ward', 'hierarchy.ward', (['X_filtrado_normal'], {}), '(X_filtrado_normal)\n', (8742, 8761), False, 'from scipy.cluster import hierarchy\n'), ((8763, 8772), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8770, 8772), True, 'import matplotlib.pyplot as plt\n'), ((8783, 8872), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['linkage_array'], {'orientation': '"""left"""', 'p': '(10)', 'truncate_mode': '"""lastp"""'}), "(linkage_array, orientation='left', p=10, truncate_mode\n ='lastp')\n", (8803, 8872), False, 'from scipy.cluster import hierarchy\n'), ((9121, 9192), 'pandas.DataFrame', 'pd.DataFrame', (['X_filtrado_normal'], {'index': 'X_filtrado.index', 'columns': 'usadas'}), '(X_filtrado_normal, index=X_filtrado.index, columns=usadas)\n', (9133, 9192), True, 'import pandas as pd\n'), ((9473, 9622), 'seaborn.clustermap', 'sns.clustermap', (['X_filtrado_normal_DF'], {'method': '"""ward"""', 'row_colors': 'row_colors', 'col_cluster': '(False)', 'figsize': '(20, 10)', 'cmap': '"""YlGnBu"""', 'yticklabels': '(False)'}), "(X_filtrado_normal_DF, method='ward', row_colors=row_colors,\n col_cluster=False, figsize=(20, 10), cmap='YlGnBu', yticklabels=False)\n", (9487, 9622), True, 'import seaborn as sns\n'), ((9933, 10005), 'pandas.Series', 'pd.Series', (['labels_list'], {'index': 'X_filtrado_normal_DF.index', 'name': '"""cluster"""'}), "(labels_list, index=X_filtrado_normal_DF.index, name='cluster')\n", (9942, 10005), True, 'import pandas as pd\n'), ((10171, 10320), 'seaborn.clustermap', 'sns.clustermap', (['X_filtrado_normal_DF'], {'method': '"""ward"""', 'row_colors': 'row_colors', 'col_cluster': '(False)', 'figsize': '(20, 10)', 'cmap': '"""YlGnBu"""', 'yticklabels': '(False)'}), "(X_filtrado_normal_DF, method='ward', row_colors=row_colors,\n col_cluster=False, figsize=(20, 10), cmap='YlGnBu', yticklabels=False)\n", (10185, 10320), True, 'import seaborn as sns\n'), ((10427, 10440), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (10437, 10440), True, 'import matplotlib.pyplot as plt\n'), ((10622, 10659), 'pandas.concat', 'pd.concat', (['[X, clusters_ward]'], {'axis': '(1)'}), '([X, clusters_ward], axis=1)\n', (10631, 10659), True, 'import pandas as pd\n'), ((11122, 11152), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (11134, 11152), True, 'import matplotlib.pyplot as plt\n'), ((11163, 11242), 'seaborn.heatmap', 'sns.heatmap', (['centers_normal2'], {'cmap': '"""YlGnBu"""', 'annot': 'centers_desnormal', 'fmt': '""".3f"""'}), "(centers_normal2, cmap='YlGnBu', annot=centers_desnormal, fmt='.3f')\n", (11174, 11242), True, 'import seaborn as sns\n'), ((2542, 2553), 'time.time', 'time.time', ([], {}), '()\n', (2551, 2553), False, 'import time\n'), ((2689, 2753), 'sklearn.metrics.calinski_harabasz_score', 'metrics.calinski_harabasz_score', (['X_normal', 'cluster_predict[name]'], {}), '(X_normal, cluster_predict[name])\n', (2720, 2753), False, 'from sklearn import metrics\n'), ((3025, 3096), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_predict[name]'], {'index': 'X.index', 'columns': "['cluster']"}), "(cluster_predict[name], index=X.index, columns=['cluster'])\n", (3037, 3096), True, 'import pandas as pd\n'), ((7689, 7700), 'time.time', 'time.time', ([], {}), '()\n', (7698, 7700), False, 'import time\n'), ((1653, 1674), 'pandas.isnull', 'pd.isnull', (['censo[col]'], {}), '(censo[col])\n', (1662, 1674), True, 'import pandas as pd\n'), ((1776, 1832), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'init': '"""k-means++"""', 'n_clusters': '(5)', 'n_init': '(5)'}), "(init='k-means++', n_clusters=5, n_init=5)\n", (1790, 1832), False, 'from sklearn import cluster\n'), ((1864, 1916), 'sklearn.cluster.MeanShift', 'cluster.MeanShift', ([], {'cluster_all': '(False)', 'min_bin_freq': '(3)'}), '(cluster_all=False, min_bin_freq=3)\n', (1881, 1916), False, 'from sklearn import cluster\n'), ((1943, 2004), 'sklearn.cluster.AgglomerativeClustering', 'cluster.AgglomerativeClustering', ([], {'n_clusters': '(4)', 'linkage': '"""ward"""'}), "(n_clusters=4, linkage='ward')\n", (1974, 2004), False, 'from sklearn import cluster\n'), ((2033, 2072), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': '(0.35)', 'min_samples': '(5)'}), '(eps=0.35, min_samples=5)\n', (2047, 2072), False, 'from sklearn import cluster\n'), ((2100, 2142), 'sklearn.cluster.Birch', 'cluster.Birch', ([], {'threshold': '(0.1)', 'n_clusters': '(5)'}), '(threshold=0.1, n_clusters=5)\n', (2113, 2142), False, 'from sklearn import cluster\n'), ((2624, 2635), 'time.time', 'time.time', ([], {}), '()\n', (2633, 2635), False, 'import time\n'), ((5931, 6044), 'seaborn.boxplot', 'sns.boxplot', (['dat_filt[usadas[j]]'], {'color': 'colors[i]', 'flierprops': "{'marker': 'o', 'markersize': 4}", 'ax': 'axes[i, j]'}), "(dat_filt[usadas[j]], color=colors[i], flierprops={'marker': 'o',\n 'markersize': 4}, ax=axes[i, j])\n", (5942, 6044), True, 'import seaborn as sns\n'), ((9338, 9400), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': '"""Blues_d"""', 'n_colors': 'n_clusters_ward'}), "(palette='Blues_d', n_colors=n_clusters_ward)\n", (9355, 9400), True, 'import seaborn as sns\n'), ((10036, 10098), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': '"""Blues_d"""', 'n_colors': 'n_clusters_ward'}), "(palette='Blues_d', n_colors=n_clusters_ward)\n", (10053, 10098), True, 'import seaborn as sns\n'), ((9417, 9437), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {}), '(labels)\n', (9429, 9437), True, 'import pandas as pd\n'), ((10115, 10135), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {}), '(labels)\n', (10127, 10135), True, 'import pandas as pd\n')] |
"""
preprocess of (single lead) ecg signal:
band pass --> remove baseline --> find rpeaks --> denoise (mainly deal with motion artefact)
TODO:
1. motion artefact detection,
and slice the signal into continuous (no motion artefact within) segments
2. to add
References:
-----------
[1] https://github.com/PIA-Group/BioSPPy
[2] to add
"""
import os, time
import multiprocessing as mp
from copy import deepcopy
from numbers import Real
from typing import Union, Optional, Any, List, Dict
import numpy as np
from easydict import EasyDict as ED
from scipy.ndimage.filters import median_filter
from scipy.signal.signaltools import resample
from scipy.io import savemat
# from scipy.signal import medfilt
# https://github.com/scipy/scipy/issues/9680
try:
from biosppy.signals.tools import filter_signal
except:
from references.biosppy.biosppy.signals.tools import filter_signal
from cfg import PreprocCfg
from .ecg_rpeaks import (
xqrs_detect, gqrs_detect, pantompkins,
hamilton_detect, ssf_detect, christov_detect, engzee_detect, gamboa_detect,
)
from .ecg_rpeaks_dl import seq_lab_net_detect
__all__ = [
"preprocess_signal",
"parallel_preprocess_signal",
"denoise_signal",
]
QRS_DETECTORS = {
"xqrs": xqrs_detect,
"gqrs": gqrs_detect,
"pantompkins": pantompkins,
"hamilton": hamilton_detect,
"ssf": ssf_detect,
"christov": christov_detect,
"engzee": engzee_detect,
"gamboa": gamboa_detect,
"seq_lab": seq_lab_net_detect,
}
DL_QRS_DETECTORS = [
"seq_lab",
]
def preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will be updated by this `config`
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
filtered_ecg = raw_sig.copy()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
if fs != cfg.fs:
filtered_ecg = resample(filtered_ecg, int(round(len(filtered_ecg)*cfg.fs/fs)))
# remove baseline
if 'baseline' in cfg.preproc:
window1 = 2 * (cfg.baseline_window1 // 2) + 1 # window size must be odd
window2 = 2 * (cfg.baseline_window2 // 2) + 1
baseline = median_filter(filtered_ecg, size=window1, mode='nearest')
baseline = median_filter(baseline, size=window2, mode='nearest')
filtered_ecg = filtered_ecg - baseline
# filter signal
if 'bandpass' in cfg.preproc:
filtered_ecg = filter_signal(
signal=filtered_ecg,
ftype='FIR',
band='bandpass',
order=int(0.3 * fs),
sampling_rate=fs,
frequency=cfg.filter_band,
)['signal']
if cfg.rpeaks and cfg.rpeaks.lower() not in DL_QRS_DETECTORS:
# dl detectors not for parallel computing using `mp`
detector = QRS_DETECTORS[cfg.rpeaks.lower()]
rpeaks = detector(sig=filtered_ecg, fs=fs).astype(int)
else:
rpeaks = np.array([], dtype=int)
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
def parallel_preprocess_signal(raw_sig:np.ndarray, fs:Real, config:Optional[ED]=None, save_dir:Optional[str]=None, save_fmt:str='npy', verbose:int=0) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
raw_sig: ndarray,
the raw ecg signal
fs: real number,
sampling frequency of `raw_sig`
config: dict, optional,
extra process configuration,
`PreprocCfg` will `update` this `config`
save_dir: str, optional,
directory for saving the outcome ('filtered_ecg' and 'rpeaks')
save_fmt: str, default 'npy',
format of the save files, 'npy' or 'mat'
Returns:
--------
retval: dict,
with items
- 'filtered_ecg': the array of the processed ecg signal
- 'rpeaks': the array of indices of rpeaks; empty if 'rpeaks' in `config` is not set
NOTE:
-----
output (`retval`) are resampled to have sampling frequency
equal to `config.fs` (if `config` has item `fs`) or `PreprocCfg.fs`
"""
start_time = time.time()
cfg = deepcopy(PreprocCfg)
cfg.update(config or {})
epoch_len = int(cfg.parallel_epoch_len * fs)
epoch_overlap_half = int(cfg.parallel_epoch_overlap * fs) // 2
epoch_overlap = 2 * epoch_overlap_half
epoch_forward = epoch_len - epoch_overlap
if len(raw_sig) <= 3 * epoch_len: # too short, no need for parallel computing
retval = preprocess_signal(raw_sig, fs, cfg)
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
retval.rpeaks = rpeaks
return retval
l_epoch = [
raw_sig[idx*epoch_forward: idx*epoch_forward + epoch_len] \
for idx in range((len(raw_sig)-epoch_overlap)//epoch_forward)
]
if cfg.parallel_keep_tail:
tail_start_idx = epoch_forward * len(l_epoch) + epoch_overlap
if len(raw_sig) - tail_start_idx < 30 * fs: # less than 30s, make configurable?
# append to the last epoch
l_epoch[-1] = np.append(l_epoch[-1], raw_sig[tail_start_idx:])
else: # long enough
tail_epoch = raw_sig[tail_start_idx-epoch_overlap:]
l_epoch.append(tail_epoch)
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=preprocess_signal,
iterable=[(e, fs, cfg) for e in l_epoch],
)
if cfg.parallel_keep_tail:
tail_result = result[-1]
result = result[:-1]
filtered_ecg = result[0]['filtered_ecg'][:epoch_len-epoch_overlap_half]
rpeaks = result[0]['rpeaks'][np.where(result[0]['rpeaks']<epoch_len-epoch_overlap_half)[0]]
for idx, e in enumerate(result[1:]):
filtered_ecg = np.append(
filtered_ecg, e['filtered_ecg'][epoch_overlap_half: -epoch_overlap_half]
)
epoch_rpeaks = e['rpeaks'][np.where( (e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len-epoch_overlap_half) )[0]]
rpeaks = np.append(rpeaks, (idx+1)*epoch_forward + epoch_rpeaks)
if cfg.parallel_keep_tail:
filtered_ecg = np.append(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])
tail_rpeaks = tail_result['rpeaks'][np.where(tail_result['rpeaks'] >= epoch_overlap_half)[0]]
rpeaks = np.append(rpeaks, len(result)*epoch_forward + tail_rpeaks)
if verbose >= 1:
if cfg.rpeaks.lower() in DL_QRS_DETECTORS:
print(f"signal processing took {round(time.time()-start_time, 3)} seconds")
else:
print(f"signal processing and R peaks detection took {round(time.time()-start_time, 3)} seconds")
start_time = time.time()
if cfg.rpeaks and cfg.rpeaks.lower() in DL_QRS_DETECTORS:
rpeaks = QRS_DETECTORS[cfg.rpeaks.lower()](sig=raw_sig, fs=fs, verbose=verbose).astype(int)
if verbose >= 1:
print(f"R peaks detection using {cfg.rpeaks} took {round(time.time()-start_time, 3)} seconds")
if save_dir:
# NOTE: this part is not tested
os.makedirs(save_dir, exist_ok=True)
if save_fmt.lower() == 'npy':
np.save(os.path.join(save_dir, "filtered_ecg.npy"), filtered_ecg)
np.save(os.path.join(save_dir, "rpeaks.npy"), rpeaks)
elif save_fmt.lower() == 'mat':
# save into 2 files, keep in accordance
savemat(os.path.join(save_dir, "filtered_ecg.mat"), {"filtered_ecg": filtered_ecg}, format='5')
savemat(os.path.join(save_dir, "rpeaks.mat"), {"rpeaks": rpeaks}, format='5')
retval = ED({
"filtered_ecg": filtered_ecg,
"rpeaks": rpeaks,
})
return retval
"""
to check correctness of the function `parallel_preprocess_signal`,
say for record A01, one can call
>>> raw_sig = loadmat("./data/A01.mat")['ecg'].flatten()
>>> processed = parallel_preprocess_signal(raw_sig, 400)
>>> print(len(processed['filtered_ecg']) - len(raw_sig))
>>> start_t = int(3600*24.7811)
>>> len_t = 10
>>> fig, ax = plt.subplots(figsize=(20,6))
>>> ax.plot(hehe['filtered_ecg'][start_t*400:(start_t+len_t)*400])
>>> for r in [p for p in hehe['rpeaks'] if start_t*400 <= p < (start_t+len_t)*400]:
>>> ax.axvline(r-start_t*400,c='red',linestyle='dashed')
>>> plt.show()
or one can use the 'dataset.py'
"""
| [
"scipy.ndimage.filters.median_filter",
"os.makedirs",
"numpy.where",
"os.path.join",
"multiprocessing.cpu_count",
"numpy.append",
"easydict.EasyDict",
"numpy.array",
"multiprocessing.Pool",
"copy.deepcopy",
"time.time"
]
| [((2371, 2391), 'copy.deepcopy', 'deepcopy', (['PreprocCfg'], {}), '(PreprocCfg)\n', (2379, 2391), False, 'from copy import deepcopy\n'), ((3534, 3586), 'easydict.EasyDict', 'ED', (["{'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks}"], {}), "({'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks})\n", (3536, 3586), True, 'from easydict import EasyDict as ED\n'), ((4684, 4695), 'time.time', 'time.time', ([], {}), '()\n', (4693, 4695), False, 'import os, time\n'), ((4706, 4726), 'copy.deepcopy', 'deepcopy', (['PreprocCfg'], {}), '(PreprocCfg)\n', (4714, 4726), False, 'from copy import deepcopy\n'), ((8305, 8357), 'easydict.EasyDict', 'ED', (["{'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks}"], {}), "({'filtered_ecg': filtered_ecg, 'rpeaks': rpeaks})\n", (8307, 8357), True, 'from easydict import EasyDict as ED\n'), ((2741, 2798), 'scipy.ndimage.filters.median_filter', 'median_filter', (['filtered_ecg'], {'size': 'window1', 'mode': '"""nearest"""'}), "(filtered_ecg, size=window1, mode='nearest')\n", (2754, 2798), False, 'from scipy.ndimage.filters import median_filter\n'), ((2818, 2871), 'scipy.ndimage.filters.median_filter', 'median_filter', (['baseline'], {'size': 'window2', 'mode': '"""nearest"""'}), "(baseline, size=window2, mode='nearest')\n", (2831, 2871), False, 'from scipy.ndimage.filters import median_filter\n'), ((3496, 3519), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (3504, 3519), True, 'import numpy as np\n'), ((5981, 6007), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'cpu_num'}), '(processes=cpu_num)\n', (5988, 6007), True, 'import multiprocessing as mp\n'), ((6483, 6570), 'numpy.append', 'np.append', (['filtered_ecg', "e['filtered_ecg'][epoch_overlap_half:-epoch_overlap_half]"], {}), "(filtered_ecg, e['filtered_ecg'][epoch_overlap_half:-\n epoch_overlap_half])\n", (6492, 6570), True, 'import numpy as np\n'), ((6740, 6799), 'numpy.append', 'np.append', (['rpeaks', '((idx + 1) * epoch_forward + epoch_rpeaks)'], {}), '(rpeaks, (idx + 1) * epoch_forward + epoch_rpeaks)\n', (6749, 6799), True, 'import numpy as np\n'), ((6851, 6924), 'numpy.append', 'np.append', (['filtered_ecg', "tail_result['filtered_ecg'][epoch_overlap_half:]"], {}), "(filtered_ecg, tail_result['filtered_ecg'][epoch_overlap_half:])\n", (6860, 6924), True, 'import numpy as np\n'), ((7409, 7420), 'time.time', 'time.time', ([], {}), '()\n', (7418, 7420), False, 'import os, time\n'), ((7782, 7818), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (7793, 7818), False, 'import os, time\n'), ((5751, 5799), 'numpy.append', 'np.append', (['l_epoch[-1]', 'raw_sig[tail_start_idx:]'], {}), '(l_epoch[-1], raw_sig[tail_start_idx:])\n', (5760, 5799), True, 'import numpy as np\n'), ((5954, 5968), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5966, 5968), True, 'import multiprocessing as mp\n'), ((6356, 6418), 'numpy.where', 'np.where', (["(result[0]['rpeaks'] < epoch_len - epoch_overlap_half)"], {}), "(result[0]['rpeaks'] < epoch_len - epoch_overlap_half)\n", (6364, 6418), True, 'import numpy as np\n'), ((6624, 6722), 'numpy.where', 'np.where', (["((e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len -\n epoch_overlap_half))"], {}), "((e['rpeaks'] >= epoch_overlap_half) & (e['rpeaks'] < epoch_len -\n epoch_overlap_half))\n", (6632, 6722), True, 'import numpy as np\n'), ((6969, 7022), 'numpy.where', 'np.where', (["(tail_result['rpeaks'] >= epoch_overlap_half)"], {}), "(tail_result['rpeaks'] >= epoch_overlap_half)\n", (6977, 7022), True, 'import numpy as np\n'), ((7877, 7919), 'os.path.join', 'os.path.join', (['save_dir', '"""filtered_ecg.npy"""'], {}), "(save_dir, 'filtered_ecg.npy')\n", (7889, 7919), False, 'import os, time\n'), ((7955, 7991), 'os.path.join', 'os.path.join', (['save_dir', '"""rpeaks.npy"""'], {}), "(save_dir, 'rpeaks.npy')\n", (7967, 7991), False, 'import os, time\n'), ((8113, 8155), 'os.path.join', 'os.path.join', (['save_dir', '"""filtered_ecg.mat"""'], {}), "(save_dir, 'filtered_ecg.mat')\n", (8125, 8155), False, 'import os, time\n'), ((8221, 8257), 'os.path.join', 'os.path.join', (['save_dir', '"""rpeaks.mat"""'], {}), "(save_dir, 'rpeaks.mat')\n", (8233, 8257), False, 'import os, time\n'), ((7226, 7237), 'time.time', 'time.time', ([], {}), '()\n', (7235, 7237), False, 'import os, time\n'), ((7350, 7361), 'time.time', 'time.time', ([], {}), '()\n', (7359, 7361), False, 'import os, time\n'), ((7678, 7689), 'time.time', 'time.time', ([], {}), '()\n', (7687, 7689), False, 'import os, time\n')] |
"""Test Manage All-Link Record."""
import unittest
from binascii import unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId
from pyinsteon.protocol.messages.all_link_record_flags import \
AllLinkRecordFlags
from tests import set_log_levels
from tests.utils import hex_to_inbound_message
# pylint: disable=no-member
class TestManageAllLinkRecord(unittest.TestCase):
"""Test Manage All-Link Record."""
def setUp(self):
"""Set up test."""
self.hex = "026F400405060708090a0b"
self.hex_ack = "026F400405060708090a0b06"
self.message_id = MessageId(0x6F)
self.action = ManageAllLinkRecordAction(0x40)
self.flags = AllLinkRecordFlags(0x04)
self.group = int(0x05)
self.address = Address("060708")
self.data1 = int(0x09)
self.data2 = int(0x0A)
self.data3 = int(0x0B)
self.ack = AckNak(0x06)
self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_ack)
set_log_levels(
logger="info",
logger_pyinsteon="info",
logger_messages="info",
logger_topics=False,
)
def test_id(self):
"""Test ID."""
assert self.msg.message_id == self.message_id
def test_ack_nak(self):
"""Test ACK/NAK."""
assert self.msg.ack == self.ack
def test_bytes(self):
"""Test bytes."""
assert bytes(self.msg) == unhexlify(self.hex_ack)
| [
"pyinsteon.constants.AckNak",
"tests.utils.hex_to_inbound_message",
"tests.set_log_levels",
"pyinsteon.address.Address",
"pyinsteon.constants.MessageId",
"pyinsteon.protocol.messages.all_link_record_flags.AllLinkRecordFlags",
"binascii.unhexlify",
"pyinsteon.constants.ManageAllLinkRecordAction"
]
| [((653, 667), 'pyinsteon.constants.MessageId', 'MessageId', (['(111)'], {}), '(111)\n', (662, 667), False, 'from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId\n'), ((691, 720), 'pyinsteon.constants.ManageAllLinkRecordAction', 'ManageAllLinkRecordAction', (['(64)'], {}), '(64)\n', (716, 720), False, 'from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId\n'), ((744, 765), 'pyinsteon.protocol.messages.all_link_record_flags.AllLinkRecordFlags', 'AllLinkRecordFlags', (['(4)'], {}), '(4)\n', (762, 765), False, 'from pyinsteon.protocol.messages.all_link_record_flags import AllLinkRecordFlags\n'), ((823, 840), 'pyinsteon.address.Address', 'Address', (['"""060708"""'], {}), "('060708')\n", (830, 840), False, 'from pyinsteon.address import Address\n'), ((953, 962), 'pyinsteon.constants.AckNak', 'AckNak', (['(6)'], {}), '(6)\n', (959, 962), False, 'from pyinsteon.constants import AckNak, ManageAllLinkRecordAction, MessageId\n'), ((1002, 1038), 'tests.utils.hex_to_inbound_message', 'hex_to_inbound_message', (['self.hex_ack'], {}), '(self.hex_ack)\n', (1024, 1038), False, 'from tests.utils import hex_to_inbound_message\n'), ((1047, 1151), 'tests.set_log_levels', 'set_log_levels', ([], {'logger': '"""info"""', 'logger_pyinsteon': '"""info"""', 'logger_messages': '"""info"""', 'logger_topics': '(False)'}), "(logger='info', logger_pyinsteon='info', logger_messages=\n 'info', logger_topics=False)\n", (1061, 1151), False, 'from tests import set_log_levels\n'), ((1491, 1514), 'binascii.unhexlify', 'unhexlify', (['self.hex_ack'], {}), '(self.hex_ack)\n', (1500, 1514), False, 'from binascii import unhexlify\n')] |
from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from django.template import loader
from django.conf import settings
from django.contrib.auth.models import User
from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle
from django.views.generic import ListView, FormView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from rameniaapp.decorators import user_is_moderator
from rameniaapp.actionhookutils import dispatch_hook
from rameniaapp.utils import UserIsModeratorMixin
from django.forms.widgets import Select
from django.contrib import messages
class ReportForm(LoginRequiredMixin, CreateView):
'''Class based view for creating reports'''
template_name = "report_form.html"
model = Report
success_url = "/app"
fields = ["reason"]
url_path = "/app"
login_url="/app/login"
def get_form(self, form_class=None):
form = super(ReportForm, self).get_form(form_class)
form.fields['reason'].widget.attrs.update({'class':'form-control'})
return form
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.reporter = self.request.user
form.instance.status = 'OP'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Adds url_path value and relevant object id to template'''
context = super().get_context_data(**kwargs)
context["id"] = self.kwargs["id"]
context["url_path"] = self.url_path
return context
class NoodleReportForm(ReportForm):
'''Class based view for reporting noodles'''
model = NoodleReport
#This is used to allow the form to create the correct object
url_path = "noodle_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.noodle = Noodle.objects.get(pk=self.kwargs["id"])
form.instance.type = 'ND'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Noodle.objects.get(pk=self.kwargs["id"]).name
return context
class ReviewReportForm(ReportForm):
'''Class based view for reporting reviews'''
model = ReviewReport
url_path = "review_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.review = Review.objects.get(pk=self.kwargs["id"])
form.instance.type = 'RV'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Review.objects.get(pk=self.kwargs["id"]).title
return context
class ProfileReportForm(ReportForm):
'''Class based view for reporting profile'''
model = ProfileReport
url_path = "profile_report"
def form_valid(self, form):
'''Ensures hidden form values are filled'''
form.instance.profile = Profile.objects.get(pk=self.kwargs["id"])
form.instance.type = 'PF'
return super().form_valid(form)
def get_context_data(self, **kwargs):
'''Passes item name to template'''
context = super().get_context_data(**kwargs)
context["name"] = Profile.objects.get(pk=self.kwargs["id"]).name
return context
class ReportList(LoginRequiredMixin, UserIsModeratorMixin, ListView):
'''Class based view for viewing reports'''
# These values are overriden for the subclasses so we can create
# multiple types of noodles without rewriting code
model = Report
item_type = ""
context_object_name = "reports"
template_name = "report_view.html"
login_url="/app/login"
def get_queryset(self):
'''Get all reports for specific objects'''
if "item_id" in self.kwargs:
item_tuple = self.get_item(self.kwargs["item_id"])
self.kwargs[item_tuple[0]] = item_tuple[1]
# This prevents the next line from breaking
del self.kwargs["item_id"]
# Using get_item, this lets us filter for any kind of object without
# writing extra code
return self.model.objects.filter(**self.kwargs)
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
return (None, None)
def get_context_data(self, **kwargs):
'''Knowing the item type lets us not break things'''
context = super().get_context_data(**kwargs)
context['item_type'] = self.item_type
return context
class NoodleReportList(ReportList):
'''List of noodle reports'''
model = NoodleReport
item_type = "Noodles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
noodle = Noodle.objects.get(id=id)
return ("noodle", noodle)
class ReviewReportList(ReportList):
'''List of review reports'''
model = ReviewReport
item_type = "Reviews"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
review = Review.objects.get(id=id)
return ("review", review)
class ProfileReportList(ReportList):
'''List of profile reports'''
model = ProfileReport
item_type = "Profiles"
def get_item(self, id):
'''Returns a tuple containing the key name and item'''
profile = Profile.objects.get(id=id)
return ("profile", profile)
@login_required(login_url="/app/login")
@user_is_moderator
def ban_user(request, report_type, user_id):
'''Ban a user by their id; expects report_type arg for redirect reasons'''
if request.method == "POST":
user = User.objects.get(pk=user_id).delete()
path = None
if report_type == "ND":
path = "reports/noodle"
elif report_type == "RV":
path = "reports/review"
elif report_type == "PF":
path = "reports/profile"
messages.add_message(request, messages.WARNING, "User banned")
return HttpResponseRedirect("/app/mod/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def delete_content(request, report_id):
'''This method deletes offending items that have been reported, or just their content'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
reporter = report.reporter
creator = None
path = get_return_path(report)
# Deleting object is dependent on type
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
report.review.delete()
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
report.noodle.delete()
elif report.type == "PF":
# Deleting a profile will break fundamental assumptions, so we instead
# remove all content from it.
report = ProfileReport.objects.get(pk=report_id)
report.profile.name = "AnonymousUser"
report.profile.profile_pic = Profile._meta.get_field('profile_pic').default
report.profile.metadata["Description"] = ""
report.profile.save()
creator = report.profile.user
report.delete()
# If we delete the content, it was reasonable to report it
dispatch_hook(reporter, "good-report")
if creator:
# If the noodle's creator hasn't been banned, penalize them
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.WARNING, "Content deleted")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def update_report_status(request, report_id, status):
'''Change report status to "open", "resolved", or "spam"'''
if request.method == "POST":
# Validate status is the correct value
if status in dict(Report.STATUS_CHOICES):
report = Report.objects.get(pk=report_id)
report.status = status
report.save()
creator = None
path = get_return_path(report)
# Get the creator of the relevant object/report
if report.type == "RV":
report = ReviewReport.objects.get(pk=report_id)
creator = report.review.reviewer
elif report.type == "ND":
report = NoodleReport.objects.get(pk=report_id)
creator = report.noodle.editor
elif report.type == "PF":
report = ProfileReport.objects.get(pk=report_id)
creator = report.profile.user
# Reward people for good reports
if status == "ED":
if report.reporter:
dispatch_hook(report.reporter, "good-report")
if creator:
dispatch_hook(creator, "bad-content")
messages.add_message(request, messages.SUCCESS, "Report marked as resolved")
# Penalize people for bad reports
if status == "SP":
if report.reporter:
dispatch_hook(report.reporter, "bad-report")
messages.add_message(request, messages.WARNING, "Report marked as spam")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
@login_required(login_url="/app/login")
@user_is_moderator
def ignore_report(request, report_id):
'''Ignore (delete) a report'''
if request.method == "POST":
report = Report.objects.get(pk=report_id)
path = get_return_path(report)
if report.reporter:
# We assume a bad report is worth deleting if its creator
# wasn't banned
dispatch_hook(report.reporter, "bad-report")
report.delete()
messages.add_message(request, messages.WARNING, "Report ignored")
return HttpResponseRedirect("/app/mod/reports/{}".format(path))
else:
return HttpResponseRedirect("/app/mod")
def get_return_path(report):
'''Util method to return a correct redirect path'''
if report.type == "RV":
return "review"
elif report.type == "ND":
return "noodle"
elif report.type == "PF":
return "profile" | [
"django.contrib.auth.models.User.objects.get",
"rameniaapp.models.ProfileReport.objects.get",
"rameniaapp.actionhookutils.dispatch_hook",
"rameniaapp.models.NoodleReport.objects.get",
"rameniaapp.models.Report.objects.get",
"rameniaapp.models.Profile.objects.get",
"rameniaapp.models.Profile._meta.get_field",
"rameniaapp.models.ReviewReport.objects.get",
"django.shortcuts.HttpResponseRedirect",
"django.contrib.auth.decorators.login_required",
"django.contrib.messages.add_message",
"rameniaapp.models.Noodle.objects.get",
"rameniaapp.models.Review.objects.get"
]
| [((5632, 5670), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/app/login"""'}), "(login_url='/app/login')\n", (5646, 5670), False, 'from django.contrib.auth.decorators import login_required\n'), ((6324, 6362), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/app/login"""'}), "(login_url='/app/login')\n", (6338, 6362), False, 'from django.contrib.auth.decorators import login_required\n'), ((8067, 8105), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/app/login"""'}), "(login_url='/app/login')\n", (8081, 8105), False, 'from django.contrib.auth.decorators import login_required\n'), ((9821, 9859), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/app/login"""'}), "(login_url='/app/login')\n", (9835, 9859), False, 'from django.contrib.auth.decorators import login_required\n'), ((1962, 2002), 'rameniaapp.models.Noodle.objects.get', 'Noodle.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (1980, 2002), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((2569, 2609), 'rameniaapp.models.Review.objects.get', 'Review.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (2587, 2609), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((3181, 3222), 'rameniaapp.models.Profile.objects.get', 'Profile.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (3200, 3222), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((4982, 5007), 'rameniaapp.models.Noodle.objects.get', 'Noodle.objects.get', ([], {'id': 'id'}), '(id=id)\n', (5000, 5007), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((5272, 5297), 'rameniaapp.models.Review.objects.get', 'Review.objects.get', ([], {'id': 'id'}), '(id=id)\n', (5290, 5297), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((5567, 5593), 'rameniaapp.models.Profile.objects.get', 'Profile.objects.get', ([], {'id': 'id'}), '(id=id)\n', (5586, 5593), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((6137, 6199), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.WARNING', '"""User banned"""'], {}), "(request, messages.WARNING, 'User banned')\n", (6157, 6199), False, 'from django.contrib import messages\n'), ((6289, 6321), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/app/mod"""'], {}), "('/app/mod')\n", (6309, 6321), False, 'from django.shortcuts import render, HttpResponse, HttpResponseRedirect\n'), ((6565, 6597), 'rameniaapp.models.Report.objects.get', 'Report.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (6583, 6597), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((7679, 7717), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['reporter', '"""good-report"""'], {}), "(reporter, 'good-report')\n", (7692, 7717), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((7868, 7934), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.WARNING', '"""Content deleted"""'], {}), "(request, messages.WARNING, 'Content deleted')\n", (7888, 7934), False, 'from django.contrib import messages\n'), ((8032, 8064), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/app/mod"""'], {}), "('/app/mod')\n", (8052, 8064), False, 'from django.shortcuts import render, HttpResponse, HttpResponseRedirect\n'), ((9786, 9818), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/app/mod"""'], {}), "('/app/mod')\n", (9806, 9818), False, 'from django.shortcuts import render, HttpResponse, HttpResponseRedirect\n'), ((10003, 10035), 'rameniaapp.models.Report.objects.get', 'Report.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (10021, 10035), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((10290, 10355), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.WARNING', '"""Report ignored"""'], {}), "(request, messages.WARNING, 'Report ignored')\n", (10310, 10355), False, 'from django.contrib import messages\n'), ((10453, 10485), 'django.shortcuts.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/app/mod"""'], {}), "('/app/mod')\n", (10473, 10485), False, 'from django.shortcuts import render, HttpResponse, HttpResponseRedirect\n'), ((2242, 2282), 'rameniaapp.models.Noodle.objects.get', 'Noodle.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (2260, 2282), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((2849, 2889), 'rameniaapp.models.Review.objects.get', 'Review.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (2867, 2889), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((3462, 3503), 'rameniaapp.models.Profile.objects.get', 'Profile.objects.get', ([], {'pk': "self.kwargs['id']"}), "(pk=self.kwargs['id'])\n", (3481, 3503), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((6795, 6833), 'rameniaapp.models.ReviewReport.objects.get', 'ReviewReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (6819, 6833), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((7822, 7859), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['creator', '"""bad-content"""'], {}), "(creator, 'bad-content')\n", (7835, 7859), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((8394, 8426), 'rameniaapp.models.Report.objects.get', 'Report.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (8412, 8426), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((10213, 10257), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['report.reporter', '"""bad-report"""'], {}), "(report.reporter, 'bad-report')\n", (10226, 10257), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((5862, 5890), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': 'user_id'}), '(pk=user_id)\n', (5878, 5890), False, 'from django.contrib.auth.models import User\n'), ((6969, 7007), 'rameniaapp.models.NoodleReport.objects.get', 'NoodleReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (6993, 7007), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((8679, 8717), 'rameniaapp.models.ReviewReport.objects.get', 'ReviewReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (8703, 8717), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((9345, 9421), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.SUCCESS', '"""Report marked as resolved"""'], {}), "(request, messages.SUCCESS, 'Report marked as resolved')\n", (9365, 9421), False, 'from django.contrib import messages\n'), ((9616, 9688), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'messages.WARNING', '"""Report marked as spam"""'], {}), "(request, messages.WARNING, 'Report marked as spam')\n", (9636, 9688), False, 'from django.contrib import messages\n'), ((7266, 7305), 'rameniaapp.models.ProfileReport.objects.get', 'ProfileReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (7291, 7305), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((8830, 8868), 'rameniaapp.models.NoodleReport.objects.get', 'NoodleReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (8854, 8868), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((9197, 9242), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['report.reporter', '"""good-report"""'], {}), "(report.reporter, 'good-report')\n", (9210, 9242), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((9291, 9328), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['creator', '"""bad-content"""'], {}), "(creator, 'bad-content')\n", (9304, 9328), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((9555, 9599), 'rameniaapp.actionhookutils.dispatch_hook', 'dispatch_hook', (['report.reporter', '"""bad-report"""'], {}), "(report.reporter, 'bad-report')\n", (9568, 9599), False, 'from rameniaapp.actionhookutils import dispatch_hook\n'), ((7397, 7435), 'rameniaapp.models.Profile._meta.get_field', 'Profile._meta.get_field', (['"""profile_pic"""'], {}), "('profile_pic')\n", (7420, 7435), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n'), ((8979, 9018), 'rameniaapp.models.ProfileReport.objects.get', 'ProfileReport.objects.get', ([], {'pk': 'report_id'}), '(pk=report_id)\n', (9004, 9018), False, 'from rameniaapp.models import ReviewReport, ProfileReport, NoodleReport, Report, Review, Profile, Noodle\n')] |
import numpy as np
class ProjectionMatrix():
"""This matrix provides projection distortion.
Projection distortion is when things that are far away
appear smaller and things that are close appear bigger.
This works flawlessly so far. Takes in screen-size and
provides near- and far clipping. fov is field-of-view
and smaller values will make view zoom in. A value of 1
will provide a panorama image."""
def __init__(self, screen_size, zNear, zFar, fov):
if fov >= 1: # Limit to 0.99 or we get infinity error at 1.0. >1.0 will give strange result.
fov = 0.99999;
tanHalfFOV = np.tan(fov * np.pi / 2.0)
zRange = zNear - zFar;
self.projectionMatrix = np.array([
[ # Row 0:
screen_size[1] / (tanHalfFOV * screen_size[0]),
0,
0,
0
],
[ # Row 1:
0,
1.0 / tanHalfFOV,
0,
0
],
[ # Row 2:
0,
0,
(-zNear - zFar)/zRange,
2.0 * zFar * zNear / zRange
],
[ # Row 3:
0,
0,
1,
0
],
], dtype=np.float32)
def get(self):
return self.projectionMatrix
class ViewMatrix():
"""This matrix transform a model as if it's percieved by a
camera with a target 'self.t' in global world coordinates
and a position 'self.p' in global world coordinates. Global
coordinates are x=right, y=forth and z=up."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
# target coordinates:
self.t = vec3(0, 0, 0)
# tolerance value:
self.tolerance = 0.5
"""The tolerance value is for testing when view lies within bounds.
In case of 'self.orbitTarget()', it's for testing when view gets too
close to target z-axis. In case of 'self.approachTarget()', it's for
testing when view gets too close to target coordinates."""
# Sensitivity value:
self.alpha = 0.01
"""The sensitivity value is for tuning how sensitive 'self.orbitTarget()'
and 'self.approachTarget()' are to user input."""
# Initialize the rotationMatrix as the identity matrix:
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def setPos(self, p):
self.p = vec3(p.x, p.y, p.z)
def lookAt(self, target=None, up=None):
"""This function focuses the view on a target.
Tested and seem to work as it should... ........finally........"""
if target != None:
self.t = vec3(target.x, target.y, target.z)
f = self.t.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, s.y, s.z, 0],
[ u.x, u.y, u.z, 0],
[ f.x, f.y, f.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def approachTarget(self, amount):
"""This function approaches the view towards the target
when amount is positive and moves away from the target when
amount is negative. It will stay outside the self.tolerance
distance. When completely close to the target, view cannot
look up or down too much."""
if amount == 0:
# If amount is zero, do nothing.
return
if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:
# If 'self.approachTarget()' will not take the view within twice the
# tolerance distance, approach the target by given amount:
self.p = self.p.add(self.t.sub(self.p).scale(amount))
def orbitTarget(self, axis):
if axis == (0, 0):
return # Do nothing
# Get target2camera-vector:
p = self.p.sub(self.t)
# Assign passed values to variables we can change if we have to:
axis_x = -axis[0]
if axis[1] > 0.30/self.alpha:
"""If axis[1] is bigger than 0.40 / self.alpha, we get strange results
becouse view can 'tunnel' over the boundary set when getting view is
getting close to target z-axis. Changing tolerance doen't change it a
whole lot so I'm setting a boundary value for axis[1] to +-0.30 / self.alpha which is
really really large as it is."""
axis_y = 0.3 / self.alpha
elif axis[1] < -0.30/self.alpha:
axis_y = -0.3 / self.alpha
else:
axis_y = axis[1]
if axis_y > 0 and p.z > 0:
"""Tests if user is trying to orbit the view up
and if the view is above the 'equator'. The second
test is to make sure the view doesn't get stuck
if it gets inside the tolerance bounds and can get back
out as long as it's trying to move away."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
elif axis_y < 0 and p.z < 0:
"""Tests if user is trying to orbit the view down
and if the view is below the 'equator'. Same test
but for different case as the one above."""
if vec2(p.x, p.y).mag() < self.tolerance:
axis_y = 0
if axis_y == 0: #If the other axis is zero:
# Amount of rotation for target-cam x-axis: (longitude, west2east)
v = vec3(0, 0, 1) # v is up vector
rate = axis_x
elif axis_x == 0: #If the other axis is zero:
# Amount of rotation for target-cam y-axis: (latitude, south2north)
v = p.cross(vec3(0, 0, 1)).norm() # v is side vector
rate = axis_y
else: #If neither is zero
# u is up vector:
u = vec3(0, 0, axis_x)
# s is side vector:
s = p.cross(vec3(0, 0, 1)).norm().scale(axis_y)
# v is combined vector:
v = u.add(s).norm()
rate = abs(axis_x) + abs(axis_y)
sin = np.sin(self.alpha * rate)
cos = np.cos(self.alpha * rate)
rotateMatrix = np.matrix([
[ # Row 0:
( v.x*v.x*(1 - cos) + cos ),
( v.y*v.x*(1 - cos) - v.z*sin ),
( v.z*v.x*(1 - cos) + v.y*sin ),
0
],
[ # Row 1:
( v.x*v.y*(1 - cos) + v.z*sin ),
( v.y*v.y*(1 - cos) + cos ),
( v.z*v.y*(1 - cos) - v.x*sin ),
0
],
[ # Row 2:
( v.x*v.z*(1 - cos) - v.y*sin ),
( v.y*v.z*(1 - cos) + v.x*sin ),
( v.z*v.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32)
p = rotateMatrix.dot( np.array([p.x, p.y, p.z, 1.0]) ).getA()[0][0:3]
self.p = vec3(p[0], p[1], p[2]).add(self.t)
self.lookAt(self.t)
def get(self):
translationMatrix = np.matrix([
[1,0,0,-self.p.x],
[0,1,0,-self.p.y],
[0,0,1,-self.p.z],
[0,0,0,1]
], dtype=np.float32)
return (self.rotationMatrix*translationMatrix).getA()
class ModelMatrix():
"""This matrix transform a model into world coordinates.
Heavily tested and should work properly. Could probably
be optimized further or even translated into cython for
performance."""
def __init__(self, position):
self.p = vec3(position.x, position.y, position.z)
self.s = vec3(1, 1, 1)
self.rotationMatrix = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
], dtype=np.float32)
def translate(self, dp):
self.p = self.p.add(dp)
def rotate(self, turns, unit):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
u = unit.norm()
sin = np.sin(turns * np.pi * 2)
cos = np.cos(turns * np.pi * 2)
self.rotationMatrix = self.rotationMatrix.dot(
np.matrix([
[ # Row 0:
( u.x*u.x*(1 - cos) + cos ),
( u.y*u.x*(1 - cos) - u.z*sin ),
( u.z*u.x*(1 - cos) + u.y*sin ),
0
],
[ # Row 1:
( u.x*u.y*(1 - cos) + u.z*sin ),
( u.y*u.y*(1 - cos) + cos ),
( u.z*u.y*(1 - cos) - u.x*sin ),
0
],
[ # Row 2:
( u.x*u.z*(1 - cos) - u.y*sin ),
( u.y*u.z*(1 - cos) + u.x*sin ),
( u.z*u.z*(1 - cos) + cos ),
0
],
[ # Row 3:
0,
0,
0,
1
],
], dtype=np.float32))
def scale(self, s):
self.s = vec3(s.x, s.y, s.z)
def lookAt(self, target, up=None):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
# Get normalized vector pointing from model to target
f = target.sub(self.p).norm()
if up != None:
u = vec3(up.x, up.y, up.z).norm()
else:
u = vec3(0, 0, 1)
s = f.cross(u).norm() # f x u
# s must be normalized! Consider when f and u are not perpendicular!
u = s.cross(f) # s x f, automatically normalized
self.rotationMatrix = np.matrix([
[ s.x, f.x, u.x, 0],
[ s.y, f.y, u.y, 0],
[ s.z, f.z, u.z, 0],
[ 0, 0, 0, 1]], dtype=np.float32)
def get(self):
"""Heavily tested and should work! Requires 'GL_TRUE'
to be passed to the uniform on shader program to work."""
translationMatrix = np.matrix([
[1,0,0,self.p.x],
[0,1,0,self.p.y],
[0,0,1,self.p.z],
[0,0,0,1]
], dtype=np.float32)
scaleMatrix = np.matrix([
[self.s.x,0,0,0],
[0,self.s.y,0,0],
[0,0,self.s.z,0],
[0,0,0,1]
], dtype=np.float32)
return (translationMatrix*self.rotationMatrix*scaleMatrix).getA()
class quaternion():
def __init__(self, x, y, z, w):
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.w = float(w)
def mag(self): # Get length of quaternion
return np.sqrt(self.x*self.x + self.y*self.y + self.y*self.y + self.y*self.y)
def norm(self): # Normalize quaternion
return quaternion(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag(),
w= self.w / self.mag())
def conjugate(self):
return quaternion(
x=-self.x,
y=-self.y,
z=-self.z,
w= self.w)
def xQ(self, q): # Multiply with quaternion
return quaternion(
x= self.x * q.w + self.w * q.x + self.y * q.z - self.z * q.y,
y= self.y * q.w + self.w * q.y + self.z * q.x - self.x * q.z,
z= self.z * q.w + self.w * q.z + self.x * q.y - self.y * q.x,
w= self.w * q.w - self.x * q.x - self.y * q.y - self.z * q.z)
def xV(self, v): # Multiply with vector
return quaternion(
x= self.w*v.x + self.y*v.z - self.z*v.y,
y= self.w*v.y + self.z*v.x - self.x*v.z,
z= self.w*v.z + self.x*v.y - self.y*v.x,
w=-self.x*v.x - self.y*v.y - self.z*v.z)
class vec2():
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y)
def norm(self):
return vec2(
x= self.x / self.mag(),
y= self.y / self.mag())
class vec3():
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def cross(self, vector):
return vec3(
x= self.y*vector.z - self.z*vector.y,
y= self.z*vector.x - self.x*vector.z,
z= self.x*vector.y - self.y*vector.x)
def dot(self, vector):
return float( self.x*vector.x + self.y*vector.y + self.z*vector.z )
def mag(self):
return np.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def norm(self):
return vec3(
x= self.x / self.mag(),
y= self.y / self.mag(),
z= self.z / self.mag())
def add(self, vector):
return vec3(
x= self.x + vector.x,
y= self.y + vector.y,
z= self.z + vector.z)
def sub(self, vector):
return vec3(
x= self.x - vector.x,
y= self.y - vector.y,
z= self.z - vector.z)
def scale(self, scalar):
return vec3(
self.x*scalar,
self.y*scalar,
self.z*scalar)
def rotate(self, angle, axis):
pass
| [
"numpy.sqrt",
"numpy.tan",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.matrix"
]
| [((648, 673), 'numpy.tan', 'np.tan', (['(fov * np.pi / 2.0)'], {}), '(fov * np.pi / 2.0)\n', (654, 673), True, 'import numpy as np\n'), ((746, 948), 'numpy.array', 'np.array', (['[[screen_size[1] / (tanHalfFOV * screen_size[0]), 0, 0, 0], [0, 1.0 /\n tanHalfFOV, 0, 0], [0, 0, (-zNear - zFar) / zRange, 2.0 * zFar * zNear /\n zRange], [0, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[screen_size[1] / (tanHalfFOV * screen_size[0]), 0, 0, 0], [0, \n 1.0 / tanHalfFOV, 0, 0], [0, 0, (-zNear - zFar) / zRange, 2.0 * zFar *\n zNear / zRange], [0, 0, 1, 0]], dtype=np.float32)\n', (754, 948), True, 'import numpy as np\n'), ((2582, 2672), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=\n np.float32)\n', (2591, 2672), True, 'import numpy as np\n'), ((3428, 3536), 'numpy.matrix', 'np.matrix', (['[[s.x, s.y, s.z, 0], [u.x, u.y, u.z, 0], [f.x, f.y, f.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[s.x, s.y, s.z, 0], [u.x, u.y, u.z, 0], [f.x, f.y, f.z, 0], [0, \n 0, 0, 1]], dtype=np.float32)\n', (3437, 3536), True, 'import numpy as np\n'), ((6746, 6771), 'numpy.sin', 'np.sin', (['(self.alpha * rate)'], {}), '(self.alpha * rate)\n', (6752, 6771), True, 'import numpy as np\n'), ((6786, 6811), 'numpy.cos', 'np.cos', (['(self.alpha * rate)'], {}), '(self.alpha * rate)\n', (6792, 6811), True, 'import numpy as np\n'), ((6844, 7216), 'numpy.matrix', 'np.matrix', (['[[v.x * v.x * (1 - cos) + cos, v.y * v.x * (1 - cos) - v.z * sin, v.z * v.x *\n (1 - cos) + v.y * sin, 0], [v.x * v.y * (1 - cos) + v.z * sin, v.y * v.\n y * (1 - cos) + cos, v.z * v.y * (1 - cos) - v.x * sin, 0], [v.x * v.z *\n (1 - cos) - v.y * sin, v.y * v.z * (1 - cos) + v.x * sin, v.z * v.z * (\n 1 - cos) + cos, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[v.x * v.x * (1 - cos) + cos, v.y * v.x * (1 - cos) - v.z * sin,\n v.z * v.x * (1 - cos) + v.y * sin, 0], [v.x * v.y * (1 - cos) + v.z *\n sin, v.y * v.y * (1 - cos) + cos, v.z * v.y * (1 - cos) - v.x * sin, 0],\n [v.x * v.z * (1 - cos) - v.y * sin, v.y * v.z * (1 - cos) + v.x * sin, \n v.z * v.z * (1 - cos) + cos, 0], [0, 0, 0, 1]], dtype=np.float32)\n', (6853, 7216), True, 'import numpy as np\n'), ((7930, 8043), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, -self.p.x], [0, 1, 0, -self.p.y], [0, 0, 1, -self.p.z], [0, 0, 0, 1]\n ]'], {'dtype': 'np.float32'}), '([[1, 0, 0, -self.p.x], [0, 1, 0, -self.p.y], [0, 0, 1, -self.p.z],\n [0, 0, 0, 1]], dtype=np.float32)\n', (7939, 8043), True, 'import numpy as np\n'), ((8554, 8644), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=\n np.float32)\n', (8563, 8644), True, 'import numpy as np\n'), ((8983, 9008), 'numpy.sin', 'np.sin', (['(turns * np.pi * 2)'], {}), '(turns * np.pi * 2)\n', (8989, 9008), True, 'import numpy as np\n'), ((9023, 9048), 'numpy.cos', 'np.cos', (['(turns * np.pi * 2)'], {}), '(turns * np.pi * 2)\n', (9029, 9048), True, 'import numpy as np\n'), ((10669, 10777), 'numpy.matrix', 'np.matrix', (['[[s.x, f.x, u.x, 0], [s.y, f.y, u.y, 0], [s.z, f.z, u.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[s.x, f.x, u.x, 0], [s.y, f.y, u.y, 0], [s.z, f.z, u.z, 0], [0, \n 0, 0, 1]], dtype=np.float32)\n', (10678, 10777), True, 'import numpy as np\n'), ((11012, 11123), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0, self.p.x], [0, 1, 0, self.p.y], [0, 0, 1, self.p.z], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 0, 0, self.p.x], [0, 1, 0, self.p.y], [0, 0, 1, self.p.z], [\n 0, 0, 0, 1]], dtype=np.float32)\n', (11021, 11123), True, 'import numpy as np\n'), ((11200, 11311), 'numpy.matrix', 'np.matrix', (['[[self.s.x, 0, 0, 0], [0, self.s.y, 0, 0], [0, 0, self.s.z, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[self.s.x, 0, 0, 0], [0, self.s.y, 0, 0], [0, 0, self.s.z, 0], [\n 0, 0, 0, 1]], dtype=np.float32)\n', (11209, 11311), True, 'import numpy as np\n'), ((11675, 11753), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y + self.y * self.y + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y + self.y * self.y + self.y * self.y)\n', (11682, 11753), True, 'import numpy as np\n'), ((12938, 12980), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (12945, 12980), True, 'import numpy as np\n'), ((13590, 13650), 'numpy.sqrt', 'np.sqrt', (['(self.x * self.x + self.y * self.y + self.z * self.z)'], {}), '(self.x * self.x + self.y * self.y + self.z * self.z)\n', (13597, 13650), True, 'import numpy as np\n'), ((9125, 9497), 'numpy.matrix', 'np.matrix', (['[[u.x * u.x * (1 - cos) + cos, u.y * u.x * (1 - cos) - u.z * sin, u.z * u.x *\n (1 - cos) + u.y * sin, 0], [u.x * u.y * (1 - cos) + u.z * sin, u.y * u.\n y * (1 - cos) + cos, u.z * u.y * (1 - cos) - u.x * sin, 0], [u.x * u.z *\n (1 - cos) - u.y * sin, u.y * u.z * (1 - cos) + u.x * sin, u.z * u.z * (\n 1 - cos) + cos, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float32'}), '([[u.x * u.x * (1 - cos) + cos, u.y * u.x * (1 - cos) - u.z * sin,\n u.z * u.x * (1 - cos) + u.y * sin, 0], [u.x * u.y * (1 - cos) + u.z *\n sin, u.y * u.y * (1 - cos) + cos, u.z * u.y * (1 - cos) - u.x * sin, 0],\n [u.x * u.z * (1 - cos) - u.y * sin, u.y * u.z * (1 - cos) + u.x * sin, \n u.z * u.z * (1 - cos) + cos, 0], [0, 0, 0, 1]], dtype=np.float32)\n', (9134, 9497), True, 'import numpy as np\n'), ((7750, 7780), 'numpy.array', 'np.array', (['[p.x, p.y, p.z, 1.0]'], {}), '([p.x, p.y, p.z, 1.0])\n', (7758, 7780), True, 'import numpy as np\n')] |
from xml.dom import minidom
import pywikibot
from api.decorator import time_this
SiteMock = pywikibot.Site
class PageMock(pywikibot.Page):
def __init__(self, *args, **kwargs):
super(PageMock, self).__init__(*args, **kwargs)
self.filename = "test_data/test_pages_%s.xml" % self.site.lang
self.parsed = minidom.parse(open(self.filename, 'r'))
self.pages = self.parsed.getElementsByTagName('page')
def put(self, newtext, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None, **kwargs):
print(('Saving page [[%s]] through put' % self.title()))
def save(self, summary=None, watch=None, minor=True, botflag=None,
force=False, asynchronous=False, callback=None,
apply_cosmetic_changes=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
def _save(self, summary=None, watch=None, minor=True, botflag=None,
cc=None, quiet=False, **kwargs):
print(('Saving page [[%s]] through save' % self.title()))
@time_this('Page.get() method mock')
def get(self, force=False, get_redirect=False, sysop=False):
for page in self.pages:
xml_title = page.getElementsByTagName(
'title')[0].childNodes[0].nodeValue
if xml_title == self.title():
return page.getElementsByTagName(
'text')[0].childNodes[0].nodeValue
print(('No page %s found in "%s"' % (self.title(), self.filename)))
return ''
p = PageMock(SiteMock('en', 'wiktionary'), 'gaon')
e = p.get()
| [
"api.decorator.time_this"
]
| [((1109, 1144), 'api.decorator.time_this', 'time_this', (['"""Page.get() method mock"""'], {}), "('Page.get() method mock')\n", (1118, 1144), False, 'from api.decorator import time_this\n')] |
import os
from functools import partial
from io import BytesIO
import numpy as np
import PIL.Image
import scipy.misc
import tensorflow as tf
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(tf.float32, name="input")
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {"input": t_preprocessed})
def load_inception():
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
model_fn = "./models/tensorflow_inception_graph.pb"
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# 定义t_input为我们输入的图像
t_input = tf.placeholder(np.float32, name='input')
imagenet_mean = 117.0
# 输入图像需要经过处理才能送入网络中
# expand_dims是加一维,从[height, width, channel]变成[1, height, width, channel]
# t_input - imagenet_mean是减去一个均值
t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input': t_preprocessed})
# 找到所有卷积层
layers = [op.name for op in graph.get_operations() if op.type ==
"Conv2D" and "import/" in op.name]
# 输出卷积层层数
print('Number of layers', len(layers))
# 特别地,输出mixed4d_3x3_bottleneck_pre_relu的形状
name = 'mixed4d_3x3_bottleneck_pre_relu'
print('shape of %s: %s' %(name, str(graph.get_tensor_by_name('import/' + name + ':0').get_shape())))
def savearray(img_array, img_name):
scipy.misc.toimage(img_array).save(img_name)
print('img saved: %s' % img_name)
def visstd(a, s=0.1):
return (a-a.mean())/max(a.std(), 1e-4)*s+0.5
def resize_ratio(img, ratio):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, ratio))
img = img / 255 * (max - min) + min
return img
def resize(img, hw):
min = img.min()
max = img.max()
img = (img - min) / (max - min) * 255
img = np.float32(scipy.misc.imresize(img, hw))
img = img / 255 * (max - min) + min
return img
def calc_grad_tiled(img, t_grad, tile_size=512):
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0) # 先在行上做整体移动,再在列上做整体移动
grad = np.zeros_like(img)
for y in range(0, max(h - sz // 2, sz), sz):
for x in range(0, max(w - sz // 2, sz), sz):
sub = img_shift[y:y + sz, x:x + sz]
g = sess.run(t_grad, {t_input: sub})
grad[y:y + sz, x:x + sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
k5x5 = k[:, :, None, None] / k.sum() * np.eye(3, dtype=np.float32)
# 将拉普拉斯金字塔还原到原始图像
def lap_merge(levels):
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5 * 4, tf.shape(hi), [1, 2, 2, 1]) + hi
return img
# 对img做标准化。
def normalize_std(img, eps=1e-10):
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img / tf.maximum(std, eps)
# 拉普拉斯金字塔标准化
def lap_normalize(img, scale_n=4):
img = tf.expand_dims(img, 0)
tlevels = lap_split_n(img, scale_n)
# 每一层都做一次normalize_std
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0, :, :, :]
# 这个函数将图像分为低频和高频成分
def lap_split(img):
with tf.name_scope('split'):
# 做过一次卷积相当于一次“平滑”,因此lo为低频成分
lo = tf.nn.conv2d(img, k5x5, [1, 2, 2, 1], 'SAME')
# 低频成分放缩到原始图像一样大小得到lo2,再用原始图像img减去lo2,就得到高频成分hi
lo2 = tf.nn.conv2d_transpose(lo, k5x5 * 4, tf.shape(img), [1, 2, 2, 1])
hi = img - lo2
return lo, hi
# 这个函数将图像img分成n层拉普拉斯金字塔
def lap_split_n(img, n):
levels = []
for i in range(n):
# 调用lap_split将图像分为低频和高频部分
# 高频部分保存到levels中
# 低频部分再继续分解
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def tffunc(*argtypes):
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
def render_deepdream(img0, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
name = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139
t_obj = graph.get_tensor_by_name("import/%s:0" % name)
t_score = tf.reduce_mean(t_obj)
t_grad = tf.gradients(t_score, t_input)[0]
lap_n=4
# 将lap_normalize转换为正常函数
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0
# 同样将图像进行金字塔分解
# 此时提取高频、低频的方法比较简单。直接缩放就可以
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
# 先生成低频的图像,再依次放大并加上高频
for octave in range(octave_n):
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g * (step / (np.abs(g).mean() + 1e-7))
# 唯一的区别在于我们使用lap_norm_func来标准化g!
# g = lap_norm_func(g)
# img += g * step
print('.', end=' ')
img = img.clip(0, 255)
savearray(img, './predict_img/deepdream.jpg')
if __name__ == '__main__':
img0 = PIL.Image.open('./images/test.jpg')
img0 = np.float32(img0)
render_deepdream(img0)
| [
"tensorflow.shape",
"tensorflow.gfile.FastGFile",
"tensorflow.gradients",
"tensorflow.reduce_mean",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.GraphDef",
"tensorflow.maximum",
"tensorflow.square",
"tensorflow.nn.conv2d",
"numpy.abs",
"numpy.eye",
"tensorflow.InteractiveSession",
"numpy.outer",
"tensorflow.import_graph_def",
"tensorflow.expand_dims",
"numpy.roll",
"numpy.random.randint",
"tensorflow.name_scope",
"functools.partial",
"numpy.zeros_like",
"numpy.float32"
]
| [((151, 161), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (159, 161), True, 'import tensorflow as tf\n'), ((169, 203), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'graph'}), '(graph=graph)\n', (190, 203), True, 'import tensorflow as tf\n'), ((383, 423), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""input"""'}), "(tf.float32, name='input')\n", (397, 423), True, 'import tensorflow as tf\n'), ((463, 505), 'tensorflow.expand_dims', 'tf.expand_dims', (['(t_input - imagenet_mean)', '(0)'], {}), '(t_input - imagenet_mean, 0)\n', (477, 505), True, 'import tensorflow as tf\n'), ((504, 561), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def', "{'input': t_preprocessed}"], {}), "(graph_def, {'input': t_preprocessed})\n", (523, 561), True, 'import tensorflow as tf\n'), ((2762, 2789), 'numpy.float32', 'np.float32', (['[1, 4, 6, 4, 1]'], {}), '([1, 4, 6, 4, 1])\n', (2772, 2789), True, 'import numpy as np\n'), ((2794, 2808), 'numpy.outer', 'np.outer', (['k', 'k'], {}), '(k, k)\n', (2802, 2808), True, 'import numpy as np\n'), ((261, 295), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['model_fn', '"""rb"""'], {}), "(model_fn, 'rb')\n", (279, 295), True, 'import tensorflow as tf\n'), ((318, 331), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (329, 331), True, 'import tensorflow as tf\n'), ((598, 608), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (606, 608), True, 'import tensorflow as tf\n'), ((620, 654), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'graph'}), '(graph=graph)\n', (641, 654), True, 'import tensorflow as tf\n'), ((877, 917), 'tensorflow.placeholder', 'tf.placeholder', (['np.float32'], {'name': '"""input"""'}), "(np.float32, name='input')\n", (891, 917), True, 'import tensorflow as tf\n'), ((1103, 1145), 'tensorflow.expand_dims', 'tf.expand_dims', (['(t_input - imagenet_mean)', '(0)'], {}), '(t_input - imagenet_mean, 0)\n', (1117, 1145), True, 'import tensorflow as tf\n'), ((1150, 1207), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def', "{'input': t_preprocessed}"], {}), "(graph_def, {'input': t_preprocessed})\n", (1169, 1207), True, 'import tensorflow as tf\n'), ((2332, 2361), 'numpy.random.randint', 'np.random.randint', (['sz'], {'size': '(2)'}), '(sz, size=2)\n', (2349, 2361), True, 'import numpy as np\n'), ((2448, 2466), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (2461, 2466), True, 'import numpy as np\n'), ((2848, 2875), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2854, 2875), True, 'import numpy as np\n'), ((3345, 3367), 'tensorflow.expand_dims', 'tf.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (3359, 3367), True, 'import tensorflow as tf\n'), ((4664, 4685), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['t_obj'], {}), '(t_obj)\n', (4678, 4685), True, 'import tensorflow as tf\n'), ((5751, 5767), 'numpy.float32', 'np.float32', (['img0'], {}), '(img0)\n', (5761, 5767), True, 'import numpy as np\n'), ((720, 754), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['model_fn', '"""rb"""'], {}), "(model_fn, 'rb')\n", (738, 754), True, 'import tensorflow as tf\n'), ((781, 794), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (792, 794), True, 'import tensorflow as tf\n'), ((2386, 2405), 'numpy.roll', 'np.roll', (['img', 'sx', '(1)'], {}), '(img, sx, 1)\n', (2393, 2405), True, 'import numpy as np\n'), ((2726, 2747), 'numpy.roll', 'np.roll', (['grad', '(-sx)', '(1)'], {}), '(grad, -sx, 1)\n', (2733, 2747), True, 'import numpy as np\n'), ((3162, 3188), 'tensorflow.name_scope', 'tf.name_scope', (['"""normalize"""'], {}), "('normalize')\n", (3175, 3188), True, 'import tensorflow as tf\n'), ((3589, 3611), 'tensorflow.name_scope', 'tf.name_scope', (['"""split"""'], {}), "('split')\n", (3602, 3611), True, 'import tensorflow as tf\n'), ((3662, 3707), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['img', 'k5x5', '[1, 2, 2, 1]', '"""SAME"""'], {}), "(img, k5x5, [1, 2, 2, 1], 'SAME')\n", (3674, 3707), True, 'import tensorflow as tf\n'), ((4699, 4729), 'tensorflow.gradients', 'tf.gradients', (['t_score', 't_input'], {}), '(t_score, t_input)\n', (4711, 4729), True, 'import tensorflow as tf\n'), ((4812, 4849), 'functools.partial', 'partial', (['lap_normalize'], {'scale_n': 'lap_n'}), '(lap_normalize, scale_n=lap_n)\n', (4819, 4849), False, 'from functools import partial\n'), ((2977, 2999), 'tensorflow.name_scope', 'tf.name_scope', (['"""merge"""'], {}), "('merge')\n", (2990, 2999), True, 'import tensorflow as tf\n'), ((3265, 3285), 'tensorflow.maximum', 'tf.maximum', (['std', 'eps'], {}), '(std, eps)\n', (3275, 3285), True, 'import tensorflow as tf\n'), ((3815, 3828), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (3823, 3828), True, 'import tensorflow as tf\n'), ((3227, 3241), 'tensorflow.square', 'tf.square', (['img'], {}), '(img)\n', (3236, 3241), True, 'import tensorflow as tf\n'), ((3057, 3069), 'tensorflow.shape', 'tf.shape', (['hi'], {}), '(hi)\n', (3065, 3069), True, 'import tensorflow as tf\n'), ((5027, 5041), 'numpy.float32', 'np.float32', (['hw'], {}), '(hw)\n', (5037, 5041), True, 'import numpy as np\n'), ((5414, 5423), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (5420, 5423), True, 'import numpy as np\n')] |
import calendar
import datetime
import logging
import os
import webapp2
import dbmodel
TESTING = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
class ResetHandler(webapp2.RequestHandler):
def get(self):
timestamp = calendar.timegm(datetime.datetime.utcnow().timetuple())
self.response.write('<html><body><form method="POST"><input type="text" value="' + str(
timestamp) + '" name="day"><input type="submit"></form></body></html>')
def post(self):
timestamp = int(self.request.get('day', None))
entry_day = datetime.datetime.utcfromtimestamp(timestamp).date()
logging.info('Processing day %s', entry_day)
starttimestamp = calendar.timegm((entry_day.year, entry_day.month, entry_day.day, 0, 0, 0))
endtimestamp = starttimestamp + 24 * 60 * 60
logging.info('starttimestamp, endtimestamp: (%s, %s)', starttimestamp, endtimestamp)
count = 0
for item in dbmodel.ReportItem.all().filter('counted', 0).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
for item in dbmodel.ReportItem.all().filter('counted', 1).filter('eventtype =', 'Information').filter(
'timestamp <', endtimestamp).filter('timestamp >=', starttimestamp).order('timestamp'):
item.counted = None
item.put()
count += 1
logging.info('Reset for %s items', count)
for item in dbmodel.AggregateItem.all().filter('timestamp =', starttimestamp).filter('rangetype =', 'day'):
item.delete()
app = webapp2.WSGIApplication([
('/tasks/admin/reset', ResetHandler)
], debug=TESTING)
| [
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.utcnow",
"dbmodel.ReportItem.all",
"os.environ.get",
"calendar.timegm",
"webapp2.WSGIApplication",
"logging.info",
"dbmodel.AggregateItem.all"
]
| [((1744, 1822), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/tasks/admin/reset', ResetHandler)]"], {'debug': 'TESTING'}), "([('/tasks/admin/reset', ResetHandler)], debug=TESTING)\n", (1767, 1822), False, 'import webapp2\n'), ((100, 137), 'os.environ.get', 'os.environ.get', (['"""SERVER_SOFTWARE"""', '""""""'], {}), "('SERVER_SOFTWARE', '')\n", (114, 137), False, 'import os\n'), ((644, 688), 'logging.info', 'logging.info', (['"""Processing day %s"""', 'entry_day'], {}), "('Processing day %s', entry_day)\n", (656, 688), False, 'import logging\n'), ((715, 789), 'calendar.timegm', 'calendar.timegm', (['(entry_day.year, entry_day.month, entry_day.day, 0, 0, 0)'], {}), '((entry_day.year, entry_day.month, entry_day.day, 0, 0, 0))\n', (730, 789), False, 'import calendar\n'), ((852, 940), 'logging.info', 'logging.info', (['"""starttimestamp, endtimestamp: (%s, %s)"""', 'starttimestamp', 'endtimestamp'], {}), "('starttimestamp, endtimestamp: (%s, %s)', starttimestamp,\n endtimestamp)\n", (864, 940), False, 'import logging\n'), ((1552, 1593), 'logging.info', 'logging.info', (['"""Reset for %s items"""', 'count'], {}), "('Reset for %s items', count)\n", (1564, 1593), False, 'import logging\n'), ((583, 628), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (617, 628), False, 'import datetime\n'), ((265, 291), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (289, 291), False, 'import datetime\n'), ((1614, 1641), 'dbmodel.AggregateItem.all', 'dbmodel.AggregateItem.all', ([], {}), '()\n', (1639, 1641), False, 'import dbmodel\n'), ((976, 1000), 'dbmodel.ReportItem.all', 'dbmodel.ReportItem.all', ([], {}), '()\n', (998, 1000), False, 'import dbmodel\n'), ((1270, 1294), 'dbmodel.ReportItem.all', 'dbmodel.ReportItem.all', ([], {}), '()\n', (1292, 1294), False, 'import dbmodel\n')] |
from dataclasses import dataclass
from itertools import cycle
from typing import Dict, Union
import numpy as np
from ...layers.utils.color_transformations import (
transform_color,
transform_color_cycle,
)
@dataclass(eq=False)
class ColorCycle:
"""A dataclass to hold a color cycle for the fallback_colors
in the CategoricalColormap
Attributes
----------
values : np.ndarray
The (Nx4) color array of all colors contained in the color cycle.
cycle : cycle
The cycle object that gives fallback colors.
"""
values: np.ndarray
cycle: cycle
@classmethod
def __get_validators__(cls):
yield cls.validate_type
@classmethod
def validate_type(cls, val):
# turn a generic dict into object
if isinstance(val, dict):
return _coerce_colorcycle_from_dict(val)
elif isinstance(val, ColorCycle):
return val
else:
return _coerce_colorcycle_from_colors(val)
def _json_encode(self):
return {'values': self.values.tolist()}
def __eq__(self, other):
if isinstance(other, ColorCycle):
eq = np.array_equal(self.values, other.values)
else:
eq = False
return eq
def _coerce_colorcycle_from_dict(
val: Dict[str, Union[str, list, np.ndarray, cycle]]
) -> ColorCycle:
# validate values
color_values = val.get('values')
if color_values is None:
raise ValueError('ColorCycle requires a values argument')
transformed_color_values = transform_color(color_values)
# validate cycle
color_cycle = val.get('cycle')
if color_cycle is None:
transformed_color_cycle = transform_color_cycle(
color_cycle=color_values,
elem_name='color_cycle',
default="white",
)[0]
else:
transformed_color_cycle = color_cycle
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def _coerce_colorcycle_from_colors(
val: Union[str, list, np.ndarray]
) -> ColorCycle:
if isinstance(val, str):
val = [val]
(
transformed_color_cycle,
transformed_color_values,
) = transform_color_cycle(
color_cycle=val,
elem_name='color_cycle',
default="white",
)
return ColorCycle(
values=transformed_color_values, cycle=transformed_color_cycle
)
def compare_colormap_dicts(cmap_1, cmap_2):
if len(cmap_1) != len(cmap_2):
return False
for k, v in cmap_1.items():
if k not in cmap_2:
return False
if not np.allclose(v, cmap_2[k]):
return False
return True
| [
"numpy.array_equal",
"numpy.allclose",
"dataclasses.dataclass"
]
| [((219, 238), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (228, 238), False, 'from dataclasses import dataclass\n'), ((1165, 1206), 'numpy.array_equal', 'np.array_equal', (['self.values', 'other.values'], {}), '(self.values, other.values)\n', (1179, 1206), True, 'import numpy as np\n'), ((2640, 2665), 'numpy.allclose', 'np.allclose', (['v', 'cmap_2[k]'], {}), '(v, cmap_2[k])\n', (2651, 2665), True, 'import numpy as np\n')] |
"""Abstract class for all toggle buttons"""
# Standard library imports
import logging
from collections import OrderedDict
# Third party imports
import ipywidgets
# Local imports
from .abc_toggle_buttons import BaseToggleButtons
from .layouts import DICT_LAYOUT_HBOX_ANY
LOGGER = logging.getLogger(__name__)
class BaseToggleButtonsWithHide(BaseToggleButtons):
"""Abstract class for all toggle buttons
Values are stored in self.widget_parent when displayed is self.widget
Which is updated in the moment when display() is launched
"""
def __init__(
self,
widget_parent,
options_visible=None,
options_hidden=None,
**kwargs
):
"""Initialize object"""
super().__init__(widget_parent, **kwargs)
# hidden attributes to setters
self._options_visible = []
self._options_hidden = []
self._bool_is_hidden_options_created = False
# Create scaffolds inside self.widgets
self._create_scaffold_for_widget()
self._dict_visible_button_by_option = OrderedDict()
self._dict_hidden_button_by_option = OrderedDict()
# Set options
self.options_visible = options_visible
self.options_hidden = options_hidden
self._update_buttons_for_new_options()
@property
def options_visible(self):
"""Getter for visible options used in widget"""
return self._options_visible
@options_visible.setter
def options_visible(self, new_value):
"""Setter for visible options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_visible):
return None
self._options_visible = new_value
self._create_buttons_for_visible_options()
# Update hidden options to delete which exists in new visible
# This will also update the whole widget
self.options_hidden = self._options_hidden
self.options = self._options_visible + self._options_hidden
self._update_widget_view()
@property
def options_hidden(self):
"""Getter for hidden options used in widget"""
return self._options_hidden
@options_hidden.setter
def options_hidden(self, new_value):
"""Setter for hidden options in widget
Args:
new_value (list or tuple): New options to set for widgets
"""
if new_value is None:
new_value = []
if set(new_value) == set(self.options_hidden):
return None
# Filter out from hidden options all options which exists in main
options_hidden_cleared = []
for str_option in new_value:
if str_option not in self.options_visible:
options_hidden_cleared.append(str_option)
self._options_hidden = options_hidden_cleared
self.options = self._options_visible + self._options_hidden
# self._create_buttons_for_hidden_options()
self._update_widget_view()
def turn_off_all_buttons(self):
"""Mark all buttons as not clicked"""
for str_option in self._dict_visible_button_by_option:
but = self._dict_visible_button_by_option[str_option]
but.button_style = ""
for str_option in self._dict_hidden_button_by_option:
but = self._dict_hidden_button_by_option[str_option]
but.button_style = ""
# Change style of selected hidden button
# self._widget_but_hidden_option_selected.description = "..."
# self._widget_but_hidden_option_selected.button_style = ""
def _update_buttons_for_new_options(self):
"""Update buttons if options were changed"""
self._create_buttons_for_visible_options()
self._bool_is_hidden_options_created = False
# self._create_buttons_for_hidden_options()
def _create_scaffold_for_widget(self):
"""Create scaffold of ipywidget Boxes for self"""
# Main buttons box
self._widget_hbox_main = ipywidgets.HBox()
self._widget_hbox_main.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_main.layout.flex_flow = "row wrap"
# Middle buttons box
self._widget_hbox_middle_buttons = ipywidgets.HBox()
self._widget_hbox_middle_buttons.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
self._create_middle_buttons()
# Hidden buttons box
self._widget_hbox_hidden = ipywidgets.HBox()
self._widget_hbox_hidden.layout = ipywidgets.Layout(**DICT_LAYOUT_HBOX_ANY)
# self._widget_hbox_hidden.layout.flex_flow = "row wrap"
def _create_buttons_for_visible_options(self):
"""Create buttons for all visible options"""
self._dict_visible_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_visible)
list_buttons = []
for str_option in list(self.options_visible):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_visible_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_main.children = list_buttons
def _create_middle_buttons(self):
"""Create buttons which are in charge what to do with hidden buttons"""
self._wid_but_hide_show = ipywidgets.ToggleButton(
value=False,
description="Show Hidden options",
button_style="info",
)
self._wid_but_hide_show.layout.width = "40%"
self._wid_but_hide_show.observe(
lambda _: self._update_widget_view(), "value")
self._widget_but_hidden_option_selected = ipywidgets.Button(
description="...", disabled=True)
self._widget_but_hidden_option_selected.layout.width = "40%"
self._widget_hbox_middle_buttons.children = [
self._widget_but_hidden_option_selected, self._wid_but_hide_show]
def _create_buttons_for_hidden_options(self):
"""Create buttons for all hidden options"""
self._dict_hidden_button_by_option = OrderedDict()
int_button_width = self._get_button_width(self.options_hidden)
list_buttons = []
for str_option in list(self.options_hidden):
but_wid = ipywidgets.Button(
description=str_option,
layout={"width": "%dpx" % int_button_width}
)
if str_option in self.value:
but_wid.button_style = "success"
but_wid.on_click(self._on_click_button_to_choose_option)
self._dict_hidden_button_by_option[str_option] = but_wid
list_buttons.append(but_wid)
self._widget_hbox_hidden.children = list_buttons
| [
"logging.getLogger",
"ipywidgets.HBox",
"collections.OrderedDict",
"ipywidgets.ToggleButton",
"ipywidgets.Button",
"ipywidgets.Layout"
]
| [((282, 309), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (299, 309), False, 'import logging\n'), ((1092, 1105), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1103, 1105), False, 'from collections import OrderedDict\n'), ((1151, 1164), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1162, 1164), False, 'from collections import OrderedDict\n'), ((4151, 4168), 'ipywidgets.HBox', 'ipywidgets.HBox', ([], {}), '()\n', (4166, 4168), False, 'import ipywidgets\n'), ((4209, 4250), 'ipywidgets.Layout', 'ipywidgets.Layout', ([], {}), '(**DICT_LAYOUT_HBOX_ANY)\n', (4226, 4250), False, 'import ipywidgets\n'), ((4386, 4403), 'ipywidgets.HBox', 'ipywidgets.HBox', ([], {}), '()\n', (4401, 4403), False, 'import ipywidgets\n'), ((4454, 4495), 'ipywidgets.Layout', 'ipywidgets.Layout', ([], {}), '(**DICT_LAYOUT_HBOX_ANY)\n', (4471, 4495), False, 'import ipywidgets\n'), ((4598, 4615), 'ipywidgets.HBox', 'ipywidgets.HBox', ([], {}), '()\n', (4613, 4615), False, 'import ipywidgets\n'), ((4658, 4699), 'ipywidgets.Layout', 'ipywidgets.Layout', ([], {}), '(**DICT_LAYOUT_HBOX_ANY)\n', (4675, 4699), False, 'import ipywidgets\n'), ((4916, 4929), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4927, 4929), False, 'from collections import OrderedDict\n'), ((5625, 5721), 'ipywidgets.ToggleButton', 'ipywidgets.ToggleButton', ([], {'value': '(False)', 'description': '"""Show Hidden options"""', 'button_style': '"""info"""'}), "(value=False, description='Show Hidden options',\n button_style='info')\n", (5648, 5721), False, 'import ipywidgets\n'), ((5968, 6019), 'ipywidgets.Button', 'ipywidgets.Button', ([], {'description': '"""..."""', 'disabled': '(True)'}), "(description='...', disabled=True)\n", (5985, 6019), False, 'import ipywidgets\n'), ((6382, 6395), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6393, 6395), False, 'from collections import OrderedDict\n'), ((5104, 5194), 'ipywidgets.Button', 'ipywidgets.Button', ([], {'description': 'str_option', 'layout': "{'width': '%dpx' % int_button_width}"}), "(description=str_option, layout={'width': '%dpx' %\n int_button_width})\n", (5121, 5194), False, 'import ipywidgets\n'), ((6568, 6658), 'ipywidgets.Button', 'ipywidgets.Button', ([], {'description': 'str_option', 'layout': "{'width': '%dpx' % int_button_width}"}), "(description=str_option, layout={'width': '%dpx' %\n int_button_width})\n", (6585, 6658), False, 'import ipywidgets\n')] |
__author__ = '<NAME> and <NAME>'
import Player
import Message
# input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class DWPMPlayer(Player.Player):
def __init__(self):
Player.Player.__init__(self)
self.past_moves = []
self.set_name("Dan and Pats Player")
def play(self):
return RpsPlayingStrategy.play(self.past_moves)
def add_past_move(self, move):
"""
adds opponents move to past moves
"""
self.past_moves.append(move)
def get_name(self):
return self.name
def notify(self, message):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if message.is_match_start_message():
players = message.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif message.is_round_end_message():
players = message.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result
# e.g. ((1, 0), (1,0)) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won (got 1 point) and player 2 lost (got 0 point)
moves, result = message.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.add_past_move(moves[opponent])
def reset(self):
self.past_moves = []
def set_name(self, name):
self.name = name
class RpsPlayingStrategy(object):
@staticmethod
def play(past_moves):
"""
our player assumes that given a high number of games, all 3 different moves of opponent will be used
an equal number of times. Given a list of past_moves, we can counter an opponent's assumed move
"""
rock = 0
paper = 0
scissors = 0
for this_move in list(past_moves):
if this_move == 0:
rock += 1
elif this_move == 1:
paper += 1
elif this_move == 2:
scissors += 1
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else:
move = 2
move = (move + 1) % 3
return move
# Test driver
# Run by typing "python3 RpsPlayerExample.py"
if __name__ == "__main__":
player = PatAndDansRPSPlayer()
opponent = PatAndDansRPSPlayer()
players = [opponent, player]
fakemoves = (1, 2)
fakeresult = (0, 1)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players, fakemoves, fakeresult))
| [
"Player.Player.__init__",
"Message.Message.get_match_start_message",
"Message.Message.get_round_start_message",
"Message.Message.get_round_end_message"
]
| [((503, 531), 'Player.Player.__init__', 'Player.Player.__init__', (['self'], {}), '(self)\n', (525, 531), False, 'import Player\n'), ((3621, 3669), 'Message.Message.get_match_start_message', 'Message.Message.get_match_start_message', (['players'], {}), '(players)\n', (3660, 3669), False, 'import Message\n'), ((3689, 3737), 'Message.Message.get_round_start_message', 'Message.Message.get_round_start_message', (['players'], {}), '(players)\n', (3728, 3737), False, 'import Message\n'), ((3816, 3885), 'Message.Message.get_round_end_message', 'Message.Message.get_round_end_message', (['players', 'fakemoves', 'fakeresult'], {}), '(players, fakemoves, fakeresult)\n', (3853, 3885), False, 'import Message\n')] |
from tictactoe import TicTacToe
import random
import csv
import os
gameNr = 1
gameLimit = 10000
lst_moves_1 = []
lst_moves_2 = []
while gameNr <= gameLimit:
print("+++++++++++")
print("Game#", gameNr)
game = TicTacToe()
tmp_moves_1 = []
tmp_moves_2 = []
while game.get_winner() == 0 and game.possible_moves() > 0:
pos = game.get_positions().copy()
while game.possible_moves() > 0:
move = random.randint(0,9)
if game.play(int(move)):
if game.get_player() == 1:
tmp_moves_2.append([gameNr] + [game.get_turn() - 1] + pos + [move])
else:
tmp_moves_1.append([gameNr] + [game.get_turn() - 1] + pos + [move])
break
print("Winner of game ", gameNr, "is", game.get_winner())
if game.get_winner() == 1:
lst_moves_1.append(tmp_moves_1)
#lst_moves_1.append(tmp_moves_1[len(tmp_moves_1) - 1])
else:
#lst_moves_2.append(tmp_moves_2[len(tmp_moves_2) - 1])
lst_moves_2.append(tmp_moves_2)
#print("List X: ", lst_moves_1)
#print("List O: ", lst_moves_2)
game.print_board()
gameNr = gameNr + 1
with open('moves_1.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_1:
writer.writerows(row)
with open('moves_2.csv', 'w', newline='') as f:
writer = csv.writer(f)
for row in lst_moves_2:
writer.writerows(row) | [
"csv.writer",
"random.randint",
"tictactoe.TicTacToe"
]
| [((223, 234), 'tictactoe.TicTacToe', 'TicTacToe', ([], {}), '()\n', (232, 234), False, 'from tictactoe import TicTacToe\n'), ((1258, 1271), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1268, 1271), False, 'import csv\n'), ((1392, 1405), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1402, 1405), False, 'import csv\n'), ((444, 464), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (458, 464), False, 'import random\n')] |
import bitstring
import pytest
from stixcore.data.test import test_data
from stixcore.idb.manager import IDBManager
from stixcore.tmtc.packets import (
SOURCE_PACKET_HEADER_STRUCTURE,
TC_DATA_HEADER_STRUCTURE,
TM_DATA_HEADER_STRUCTURE,
SourcePacketHeader,
TCPacket,
TMDataHeader,
TMPacket,
)
from stixcore.tmtc.tm.tm_1 import TM_1_1
@pytest.fixture
def idb():
return IDBManager(test_data.idb.DIR).get_idb("2.26.34")
@pytest.mark.parametrize('class_header', [(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE),
(TMDataHeader, TM_DATA_HEADER_STRUCTURE)])
def test_tmtc_headers(class_header):
cls, header = class_header
test_fmt = ', '.join(header.values())
test_values = {n: 2**int(v.split(':')[-1])-1 for n, v in header.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
sph = cls(test_binary)
assert all([getattr(sph, key) == test_values[key]
for key in header.keys() if not key.startswith('spare')])
def test_tm_packet(idb):
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TM_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TMPacket(test_binary, idb=idb)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TM_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tc_packet():
combind_structures = {**SOURCE_PACKET_HEADER_STRUCTURE, **TC_DATA_HEADER_STRUCTURE}
test_fmt = ', '.join(combind_structures.values())
test_values = {n: 2 ** int(v.split(':')[-1]) - 1 for n, v in
combind_structures.items()}
test_values['process_id'] = 90
test_values['packet_category'] = 12
test_binary = bitstring.pack(test_fmt, *test_values.values())
tmtc_packet = TCPacket(test_binary)
assert all([getattr(tmtc_packet.source_packet_header, key) == test_values[key]
for key in SOURCE_PACKET_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
assert all([getattr(tmtc_packet.data_header, key) == test_values[key]
for key in TC_DATA_HEADER_STRUCTURE.keys() if not key.startswith('spare')])
def test_tm_1_1(idb):
packet = TM_1_1('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)
assert packet.source_packet_header.process_id == 90
assert packet.source_packet_header.packet_category == 1
assert packet.data_header.service_type == 1
assert packet.data_header.service_subtype == 1
| [
"stixcore.tmtc.packets.TM_DATA_HEADER_STRUCTURE.keys",
"stixcore.tmtc.tm.tm_1.TM_1_1",
"pytest.mark.parametrize",
"stixcore.tmtc.packets.SOURCE_PACKET_HEADER_STRUCTURE.keys",
"stixcore.tmtc.packets.TC_DATA_HEADER_STRUCTURE.keys",
"stixcore.tmtc.packets.TCPacket",
"stixcore.tmtc.packets.TMPacket",
"stixcore.idb.manager.IDBManager"
]
| [((455, 596), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""class_header"""', '[(SourcePacketHeader, SOURCE_PACKET_HEADER_STRUCTURE), (TMDataHeader,\n TM_DATA_HEADER_STRUCTURE)]'], {}), "('class_header', [(SourcePacketHeader,\n SOURCE_PACKET_HEADER_STRUCTURE), (TMDataHeader, TM_DATA_HEADER_STRUCTURE)])\n", (478, 596), False, 'import pytest\n'), ((1408, 1438), 'stixcore.tmtc.packets.TMPacket', 'TMPacket', (['test_binary'], {'idb': 'idb'}), '(test_binary, idb=idb)\n', (1416, 1438), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n'), ((2223, 2244), 'stixcore.tmtc.packets.TCPacket', 'TCPacket', (['test_binary'], {}), '(test_binary)\n', (2231, 2244), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n'), ((2629, 2690), 'stixcore.tmtc.tm.tm_1.TM_1_1', 'TM_1_1', (['"""0x0da1c066000d100101782628a9c4e71e1dacc0a0"""'], {'idb': 'idb'}), "('0x0da1c066000d100101782628a9c4e71e1dacc0a0', idb=idb)\n", (2635, 2690), False, 'from stixcore.tmtc.tm.tm_1 import TM_1_1\n'), ((403, 432), 'stixcore.idb.manager.IDBManager', 'IDBManager', (['test_data.idb.DIR'], {}), '(test_data.idb.DIR)\n', (413, 432), False, 'from stixcore.idb.manager import IDBManager\n'), ((1549, 1586), 'stixcore.tmtc.packets.SOURCE_PACKET_HEADER_STRUCTURE.keys', 'SOURCE_PACKET_HEADER_STRUCTURE.keys', ([], {}), '()\n', (1584, 1586), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n'), ((1721, 1752), 'stixcore.tmtc.packets.TM_DATA_HEADER_STRUCTURE.keys', 'TM_DATA_HEADER_STRUCTURE.keys', ([], {}), '()\n', (1750, 1752), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n'), ((2355, 2392), 'stixcore.tmtc.packets.SOURCE_PACKET_HEADER_STRUCTURE.keys', 'SOURCE_PACKET_HEADER_STRUCTURE.keys', ([], {}), '()\n', (2390, 2392), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n'), ((2527, 2558), 'stixcore.tmtc.packets.TC_DATA_HEADER_STRUCTURE.keys', 'TC_DATA_HEADER_STRUCTURE.keys', ([], {}), '()\n', (2556, 2558), False, 'from stixcore.tmtc.packets import SOURCE_PACKET_HEADER_STRUCTURE, TC_DATA_HEADER_STRUCTURE, TM_DATA_HEADER_STRUCTURE, SourcePacketHeader, TCPacket, TMDataHeader, TMPacket\n')] |
# run local models given a path, default to './mxnet_models/'
import os
import argparse
import time
import mxnet as mx
import numpy as np
file_path = os.path.realpath(__file__)
dir_name = os.path.dirname(file_path)
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
class cuda_profiler_start():
import numba.cuda as cuda
cuda.profile_start()
class cuda_profiler_stop():
import numba.cuda as cuda
cuda.profile_stop()
def xprint(s):
pass
parser = argparse.ArgumentParser(
description='Predict ImageNet classes from a given image')
parser.add_argument('--model_name', type=str, required=False, default='resnet50_v1',
help='name of the model to use')
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size to use')
parser.add_argument('--input_dim', type=int, required=False, default=224,
help='input dimension')
parser.add_argument('--input_channels', type=int, required=False, default=3,
help='input channels')
parser.add_argument('--num_iterations', type=int, required=False, default=30,
help='number of iterations to run')
parser.add_argument('--num_warmup', type=int, required=False, default=5,
help='number of warmup iterations to run')
parser.add_argument('--model_idx', type=int, required=False, default=2,
help='model idx')
parser.add_argument('--profile', type=bool, required=False, default=False,
help='enable profiling')
opt = parser.parse_args()
model_name = opt.model_name
batch_size = opt.batch_size
input_dim = opt.input_dim
input_channels = opt.input_channels
num_iterations = opt.num_iterations
num_warmup = opt.num_warmup
model_idx = opt.model_idx
profile = opt.profile
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
sym, arg_params, aux_params = mx.model.load_checkpoint(
dir_name + '/mxnet_models/'+model_name, 0)
data_names = [
graph_input
for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params
]
net = mx.mod.Module(
symbol=sym,
data_names=[data_names[0]],
context=ctx,
label_names=None,
)
input_shape = (batch_size, input_channels, input_dim, input_dim)
img = mx.random.uniform(
shape=input_shape, ctx=ctx)
net.bind(for_training=False, data_shapes=[
(data_names[0], input_shape)], label_shapes=net._label_shapes)
net.set_params(arg_params, aux_params, allow_missing=True)
def forward_once():
mx.nd.waitall()
start = time.time()
prob = net.predict(img)
mx.nd.waitall()
end = time.time() # stop timer
return end - start
for i in range(num_warmup):
forward_once()
res = []
if profile:
cuda_profiler_start()
for i in range(num_iterations):
t = forward_once()
res.append(t)
if profile:
cuda_profiler_stop()
res = np.multiply(res, 1000)
print("{},{},{},{},{},{}".format(model_idx+1, model_name, batch_size, np.min(res),
np.average(res), np.max(res)))
| [
"numpy.multiply",
"argparse.ArgumentParser",
"mxnet.nd.waitall",
"mxnet.random.uniform",
"mxnet.cpu",
"numpy.average",
"numba.cuda.profile_stop",
"numpy.min",
"numpy.max",
"os.path.realpath",
"os.path.dirname",
"numba.cuda.profile_start",
"mxnet.gpu",
"mxnet.mod.Module",
"mxnet.model.load_checkpoint",
"time.time",
"mxnet.test_utils.list_gpus"
]
| [((152, 178), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'import os\n'), ((190, 216), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (205, 216), False, 'import os\n'), ((474, 561), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict ImageNet classes from a given image"""'}), "(description=\n 'Predict ImageNet classes from a given image')\n", (497, 561), False, 'import argparse\n'), ((1909, 1978), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (["(dir_name + '/mxnet_models/' + model_name)", '(0)'], {}), "(dir_name + '/mxnet_models/' + model_name, 0)\n", (1933, 1978), True, 'import mxnet as mx\n'), ((2135, 2223), 'mxnet.mod.Module', 'mx.mod.Module', ([], {'symbol': 'sym', 'data_names': '[data_names[0]]', 'context': 'ctx', 'label_names': 'None'}), '(symbol=sym, data_names=[data_names[0]], context=ctx,\n label_names=None)\n', (2148, 2223), True, 'import mxnet as mx\n'), ((2312, 2357), 'mxnet.random.uniform', 'mx.random.uniform', ([], {'shape': 'input_shape', 'ctx': 'ctx'}), '(shape=input_shape, ctx=ctx)\n', (2329, 2357), True, 'import mxnet as mx\n'), ((2926, 2948), 'numpy.multiply', 'np.multiply', (['res', '(1000)'], {}), '(res, 1000)\n', (2937, 2948), True, 'import numpy as np\n'), ((332, 352), 'numba.cuda.profile_start', 'cuda.profile_start', ([], {}), '()\n', (350, 352), True, 'import numba.cuda as cuda\n'), ((417, 436), 'numba.cuda.profile_stop', 'cuda.profile_stop', ([], {}), '()\n', (434, 436), True, 'import numba.cuda as cuda\n'), ((1821, 1829), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (1827, 1829), True, 'import mxnet as mx\n'), ((1869, 1877), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (1875, 1877), True, 'import mxnet as mx\n'), ((2565, 2580), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (2578, 2580), True, 'import mxnet as mx\n'), ((2593, 2604), 'time.time', 'time.time', ([], {}), '()\n', (2602, 2604), False, 'import time\n'), ((2637, 2652), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (2650, 2652), True, 'import mxnet as mx\n'), ((2663, 2674), 'time.time', 'time.time', ([], {}), '()\n', (2672, 2674), False, 'import time\n'), ((1837, 1862), 'mxnet.test_utils.list_gpus', 'mx.test_utils.list_gpus', ([], {}), '()\n', (1860, 1862), True, 'import mxnet as mx\n'), ((3020, 3031), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (3026, 3031), True, 'import numpy as np\n'), ((3066, 3081), 'numpy.average', 'np.average', (['res'], {}), '(res)\n', (3076, 3081), True, 'import numpy as np\n'), ((3083, 3094), 'numpy.max', 'np.max', (['res'], {}), '(res)\n', (3089, 3094), True, 'import numpy as np\n')] |
# Generated by Django 3.1.1 on 2020-12-16 03:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Scheduler',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('minutes', models.IntegerField(default=15)),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.IntegerField"
]
| [((305, 398), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (321, 398), False, 'from django.db import migrations, models\n'), ((425, 456), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(15)'}), '(default=15)\n', (444, 456), False, 'from django.db import migrations, models\n')] |
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
wine = load_wine()
columns_names = wine.feature_names
y = wine.target
X = wine.data
print('Pre scaling X')
print(X)
scaler = StandardScaler()
scaler.fit(X)
scaled_features = scaler.transform(X)
print('Post scaling X')
print(scaled_features)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, y, test_size=0.375)
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_wine"
]
| [((149, 160), 'sklearn.datasets.load_wine', 'load_wine', ([], {}), '()\n', (158, 160), False, 'from sklearn.datasets import load_wine\n'), ((269, 285), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (283, 285), False, 'from sklearn.preprocessing import StandardScaler\n'), ((422, 475), 'sklearn.model_selection.train_test_split', 'train_test_split', (['scaled_features', 'y'], {'test_size': '(0.375)'}), '(scaled_features, y, test_size=0.375)\n', (438, 475), False, 'from sklearn.model_selection import train_test_split\n')] |
##############################################################
#*** MagicDAQ USB DAQ and M&A Board General Demo Script ***
##############################################################
#*** Websites ***
# MagicDAQ Website:
# https://www.magicdaq.com/
# API Docs Website:
# https://magicdaq.github.io/magicdaq_docs/
#*** Install MagicDAQ ***
# Download the MagicDAQ python package from pypi
# Run this command in a command prompt:
# python -m pip install magicdaq
# Further docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ
# MagicDAQ is only compatible with Python 3 on Windows. It does not work on Linux at the moment. It does not work with Python 2.
#*** Using Auto Code Complete With PyCharm ***
# Using a code editor like Pycharm and want to get auto complete working for the MagicDAQ package?
# Docs: https://magicdaq.github.io/magicdaq_docs/#/PyCharmCodeCompletion
##############################################################
#*** Imports ***
##############################################################
import sys
import time
# Import MagicDAQ
print('*** MagicDAQ Install Check ***')
print('')
try:
# Import MagicDAQDevice object
from magicdaq.api_class import MagicDAQDevice
# Create daq_one object
daq_one = MagicDAQDevice()
print('GOOD: MagicDAQ API is installed properly.')
# Get MagicDAQ Driver Version
driver_version = daq_one.get_driver_version()
if driver_version == 1.0:
print('GOOD: MagicDAQ Driver is installed properly.')
print('You are ready to use MagicDAQ!')
else:
print('ERROR: MagicDAQ Driver version not expected value: '+str(driver_version))
print('Try installing MagicDAQ using pip again.')
print('https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
except Exception as exception_text:
print('Original exception: ')
print(exception_text)
print('')
print('ERROR: Unable to import MagicDAQ API.')
print('Mostly likely, MagicDAQ has not been properly downloaded and installed using pip.')
print('Please consult MagicDAQ API Docs: https://magicdaq.github.io/magicdaq_docs/#/Install_MagicDAQ')
print('Feel free to email MagicDAQ Support at: <EMAIL>')
sys.exit(0)
##############################################################
#*** MagicDAQ USB DAQ MDAQ300 Features Demo ***
##############################################################
# This portion of the script shows off some of the USB DAQ's features
# Hardware docs: https://www.magicdaq.com/product/magic-daq/
print('')
print('*** MagicDAQ USB DAQ Demo ***')
print('Ensure the USB DAQ is plugged into the computer using the USB cable.')
print('The DAQ does not need to be connected to the M&A board.')
print('')
user_input = input('Press any key to continue.')
#*** Open DAQ Device ***
# Remember, the daq_one object has already been created in the above 'Imports' section
# We must open the daq device before performing any hardware feature manipulation
# https://magicdaq.github.io/magicdaq_docs/#/MagicDAQ_Basics
daq_one.open_daq_device()
###############################################################
#*** Analog Output Demo: Constant, Sine, and PWM on AO1 Pin ***
###############################################################
print('')
print('--- Analog Output Demo: Constant, Sine, and PWM Output ---')
# Set constant 3 volt output voltage on AO1 pin
daq_one.set_analog_output(1,3)
print('Using an oscilloscope, place the scope probe on pin AO1 and connect the scope probe GND to one of the USB DAQs AGND pins')
print('You should now observe a constant 3V')
print('')
user_input = input('Press any key to continue.')
# Configure and start 300Hz sine wave with 2V amplitude on AO1 pin
daq_one.configure_analog_output_sine_wave(1,300,amplitude=2)
daq_one.start_analog_output_wave(1)
print('You should now observe a 300Hz sine wave with 2V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop previous wave
daq_one.stop_analog_output_wave(1)
# Configure and start PWM wave, 200 Hz, 50% duty cycle, 3.3V amplitude
daq_one.configure_analog_output_pwm_wave(1,200,50,amplitude=3.3)
daq_one.start_analog_output_wave(1)
print('You should now observe a 200Hz PWM wave, 50% duty cycle, with 3.3V amplitude.')
print('')
user_input = input('Press any key to continue.')
# Stop the wave
daq_one.stop_analog_output_wave(1)
print('The wave should now stop. You could set it to GND using set_analog_ouput() if you wanted.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: PWM waves ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: PWM Waves ---')
# Configure a 50 KHz frequency, 75% duty cycle, continuous PWM Wave on the counter pin (CTR0)
# Note that unlike the analog output pins, the CTR0 pin always outputs at an amplitude of 3.3v when producing PWM waves
daq_one.configure_counter_pwm(50000,75)
# Start counter wave
daq_one.start_counter_pwm()
print('Place your scope probe on pin CTR0')
print('You should see a 50kHz, 75% duty cycle PWM wave.')
print('')
user_input = input('Press any key to continue.')
# Now stopping the counter PWM wave
daq_one.stop_counter_pwm()
print('The PWM wave will now stop.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Pulse Counter Pin Demo: Pulse Counting ***
###############################################################
print('')
print('--- Pulse Counter Pin Demo: Pulse Counting ---')
print('Use a piece of wire to bridge CTR0 to DGND several times')
print('CTR0 has an internal pull up resistor. You are simulating a pulse pulling the voltage to GND.')
print('You will have 8 sec to simulate some pulses.')
print('')
user_input = input('Press any key when you are ready to start.')
# Start the Pulse Counter
# Pulses will be counted on the falling edge
daq_one.enable_pulse_counter()
# Sleep for 8 sec
time.sleep(8)
# Read number of pulses
print('Number of pulses counted: '+str(daq_one.read_pulse_counter()))
print('You are using a piece of wire, so it is likely bouncing on and off the screw terminal, counting many pulses')
print('')
user_input = input('Stop simulating pulses. Press any key to continue.')
print('')
print('Now clearing the pulse counter')
daq_one.clear_pulse_counter()
print('Pulse count after clearing: '+str(daq_one.read_pulse_counter()))
###############################################################
#*** Digital Pin Demo ***
###############################################################
print('')
print('--- Digital Pin Demo ---')
# Set P0.0 pin LOW
daq_one.set_digital_output(0,0)
print('Place scope probe on pin P0.0, pin should be LOW')
print('')
user_input = input('Press any key to continue.')
# Set P0.0 pin HIGH
daq_one.set_digital_output(0,1)
print('Place scope probe on pin P0.0, pin should be HIGH')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Analog Input Pin Demo ***
###############################################################
print('')
print('--- Analog Input Pin Demo ---')
# Single ended voltage measurement
print('Apply voltage to AI0 pin. If you dont have a power supply handy, you can run a wire from the +5V pin to the AI0 pin.')
print('')
user_input = input('Press any key to continue.')
print('Voltage measured at AI0: '+str(daq_one.read_analog_input(0)))
print('If you are using the +5V pin, remember that this voltage is derived from the USB Power supply, so it will be what ever your USB bus ir producing, probably something slightly less than 5V.')
# If you want to perform a differential input measurement
# daq_one.read_diff_analog_input()
# https://magicdaq.github.io/magicdaq_docs/#/read_diff_analog_input
###############################################################
#*** M&A Board Demo ***
###############################################################
# M&A Board hardware spec:
# https://www.magicdaq.com/product/ma-board-full-kit/
print('')
print('*** M&A Board Demo ***')
print('Ensure the USB DAQ is connected to the M&A board using the ribbon cable.')
print('Ribbon cable pin out on page 6 of: ')
print('https://www.magicdaq.com/mdaq350datasheet/')
print('Use the provided power cable to apply power to the M&A board.')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Relay Demo ***
###############################################################
print('')
print('--- Relay Demo ---')
print('Setting all relays to closed.')
daq_one.set_digital_output(7, 1)
daq_one.set_digital_output(6, 1)
daq_one.set_digital_output(5, 1)
daq_one.set_digital_output(4, 1)
time.sleep(1)
relay_count = 1
digital_pin_count = 7
while relay_count <= 4:
print('Relay #: ' + str(relay_count) + ' Digital Pin #: ' + str(digital_pin_count))
# Set relay to open
print('Setting relay to OPEN.')
daq_one.set_digital_output(digital_pin_count, 0)
time.sleep(1)
# Increment counters
relay_count += 1
digital_pin_count -= 1
print('')
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Vout Demo ***
###############################################################
print('')
print('--- Vout Demo ---')
print('Vout provides a variable voltage power output capable of up to 2A')
print('By characterizing your M&A board, or building a feedback loop; voltage accuracy of Vout can be made quite good.')
print('See notes on page 4 of the M&A data sheet.')
print('https://www.magicdaq.com/mdaq350datasheet/')
# See the M&A board data sheet for the equation that describes the Vout to Vout_set (0 and 2.77 here) relationship
print('')
print('Vout_set Set to 0V.')
print('Measure Vout with a multimeter. It should be about 10V')
daq_one.set_analog_output(0, 0)
print('')
user_input = input('Press any key to continue.')
print('Vout_set Set to 2.77V')
print('Measure Vout with a multimeter. It should be about 5V')
daq_one.set_analog_output(0, 2.77)
print('')
user_input = input('Press any key to continue.')
###############################################################
#*** Low Current Measurement Demo: A1 ***
###############################################################
print('')
print('--- A1 Low Current Measurement Demo ---')
print('Use the 3.3V board voltage and a 20K resistor to put 165uA through A1.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_4_voltage = daq_one.read_analog_input(4)
print('Read voltage: ' + str(pin_4_voltage))
calculated_current_amps = pin_4_voltage / (332 * 97.863)
ua_current = round((calculated_current_amps / .000001), 3)
print('Calculated uA current: ' + str(ua_current))
###############################################################
#*** Current Measurement Demo: A2 ***
###############################################################
print('')
print('--- A2 Current Measurement Demo (+/- 5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A2.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_5_voltage = daq_one.read_analog_input(5)
print('Read voltage: ' + str(pin_5_voltage))
calculated_current_amps = pin_5_voltage / (.01 * 200)
# ma_current = round((calculated_current_amps / .001), 3)
print('Calculated A current: ' + str(calculated_current_amps))
###############################################################
#*** Current Measurement Demo: A3 ***
###############################################################
print('')
print('--- A3 Current Measurement Demo (+/- 1.5A max) ---')
print('Use an external 5V power supply and 5 ohm power resistor to put 1 Amp through A3.')
print('')
user_input = input('Press any key to continue.')
# See the M&A board data sheet for the equation that describes the Vout to current relationship
pin_6_voltage = daq_one.read_analog_input(6)
print('Read voltage: ' + str(pin_6_voltage))
calculated_current_amps = pin_6_voltage / (.033 * 200)
ma_current = round((calculated_current_amps / .001), 3)
print('Calculated mA current: ' + str(ma_current))
###############################################################
#*** Demo Complete. ***
###############################################################
# Close connection to daq
daq_one.close_daq_device()
| [
"magicdaq.api_class.MagicDAQDevice",
"time.sleep",
"sys.exit"
]
| [((6139, 6152), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (6149, 6152), False, 'import time\n'), ((8952, 8965), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8962, 8965), False, 'import time\n'), ((1263, 1279), 'magicdaq.api_class.MagicDAQDevice', 'MagicDAQDevice', ([], {}), '()\n', (1277, 1279), False, 'from magicdaq.api_class import MagicDAQDevice\n'), ((9236, 9249), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9246, 9249), False, 'import time\n'), ((2289, 2300), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2297, 2300), False, 'import sys\n')] |
# Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# <NAME> and <NAME>, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import os
import cl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import FuncFormatter
from itertools import permutations
def plotZ(Z, exportFilename=None) :
(m,n) = Z.shape
cmap = colors.LinearSegmentedColormap.from_list("white_and_gray", [(1, 1, 1), (0.6, 0.6, 0.6)], N=2)
fig, ax = plt.subplots()
im = ax.imshow(Z.T,cmap=cmap)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
for i in range(1,m) :
plt.plot([-0.5+i,-0.5+i],[-0.5,-0.5+n],color='k',linewidth=0.7)
for i in range(1,T.n) :
plt.plot([-0.5,-0.5+m],[-0.5+i,-0.5+i],color='k',linewidth=0.7)
for i in range(n) :
v = Z[:,i]
c = np.sum(v[:-1] != v[1:]) + v[0] + v[-1]
ax.text(m-0.25,i, str(c), fontsize=12, ha='left', va='center')
if (exportFilename) :
plt.gcf().tight_layout()
plt.savefig(exportFilename + "-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop %s-uncropped.pdf %s.pdf" % (exportFilename, exportFilename))
else :
plt.show()
# Make sure the figure directory exists
cl.ensureDirExists('fig')
# Create the test problem
M = cl.create_basic_problem(7,0)
C = cl.generate_full_rank_weights(20,7,seed=1)
M = np.dot(C,M) % 2
# Apply diagonalization and get the final Z matrix
T = cl.Tableau(M)
R = cl.RecordOperations(T.n)
T.addRecorder(R)
cl.zeroX_algorithm1_cz(T)
T = cl.Tableau(M)
R.apply(T)
Z = T.getZ()
# Plot the results
plotZ(Z,'fig/Figure_9a')
print("Original: %d" % cl.countCNot(Z))
idx = cl.orderZ(Z)
plotZ(Z[idx,:],'fig/Figure_9b')
print("Sorted : %d" % cl.countCNot(Z[idx,:]))
# Generate histogram of actual permutations
if (True) :
base = list(range(7))
count = []
for idx2 in permutations(base) :
idx1 = cl.orderZ(Z[:,idx2])
count.append(cl.countCNot(Z[idx1,:][:,idx2]))
def format_percentage(y, position):
return str(100 * y)
# Count is always even
plt.hist(count,bins=list(range(min(count)-1,max(count)+2,2)),rwidth=0.9,density=True)
plt.gca().set_xticklabels([str(x) for x in range(min(count),max(count)+1,2)],fontsize=16)
plt.gca().set_xticks(list(range(min(count),max(count)+1,2)))
plt.gca().yaxis.set_major_formatter(FuncFormatter(format_percentage))
plt.xlabel('Number of CNOT gates',fontsize=16)
plt.ylabel("Percentage",fontsize=16)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(16)
plt.gcf().tight_layout()
ratio = 0.5
xleft, xright = plt.gca().get_xlim()
ybottom, ytop = plt.gca().get_ylim()
plt.gca().set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
plt.savefig("fig/Figure_9c-uncropped.pdf", transparent=True)
plt.close()
os.system("pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf")
| [
"matplotlib.pyplot.ylabel",
"cl.orderZ",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.dot",
"itertools.permutations",
"cl.create_basic_problem",
"cl.Tableau",
"matplotlib.pyplot.savefig",
"cl.zeroX_algorithm1_cz",
"cl.ensureDirExists",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"cl.generate_full_rank_weights",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show",
"numpy.sum",
"cl.RecordOperations",
"cl.countCNot",
"os.system",
"matplotlib.pyplot.subplots"
]
| [((2046, 2071), 'cl.ensureDirExists', 'cl.ensureDirExists', (['"""fig"""'], {}), "('fig')\n", (2064, 2071), False, 'import cl\n'), ((2103, 2132), 'cl.create_basic_problem', 'cl.create_basic_problem', (['(7)', '(0)'], {}), '(7, 0)\n', (2126, 2132), False, 'import cl\n'), ((2136, 2180), 'cl.generate_full_rank_weights', 'cl.generate_full_rank_weights', (['(20)', '(7)'], {'seed': '(1)'}), '(20, 7, seed=1)\n', (2165, 2180), False, 'import cl\n'), ((2255, 2268), 'cl.Tableau', 'cl.Tableau', (['M'], {}), '(M)\n', (2265, 2268), False, 'import cl\n'), ((2273, 2297), 'cl.RecordOperations', 'cl.RecordOperations', (['T.n'], {}), '(T.n)\n', (2292, 2297), False, 'import cl\n'), ((2315, 2340), 'cl.zeroX_algorithm1_cz', 'cl.zeroX_algorithm1_cz', (['T'], {}), '(T)\n', (2337, 2340), False, 'import cl\n'), ((2345, 2358), 'cl.Tableau', 'cl.Tableau', (['M'], {}), '(M)\n', (2355, 2358), False, 'import cl\n'), ((2475, 2487), 'cl.orderZ', 'cl.orderZ', (['Z'], {}), '(Z)\n', (2484, 2487), False, 'import cl\n'), ((1144, 1241), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""white_and_gray"""', '[(1, 1, 1), (0.6, 0.6, 0.6)]'], {'N': '(2)'}), "('white_and_gray', [(1, 1, 1), (0.6,\n 0.6, 0.6)], N=2)\n", (1184, 1241), True, 'import matplotlib.colors as colors\n'), ((1251, 1265), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1263, 1265), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2195), 'numpy.dot', 'np.dot', (['C', 'M'], {}), '(C, M)\n', (2189, 2195), True, 'import numpy as np\n'), ((2679, 2697), 'itertools.permutations', 'permutations', (['base'], {}), '(base)\n', (2691, 2697), False, 'from itertools import permutations\n'), ((3204, 3251), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of CNOT gates"""'], {'fontsize': '(16)'}), "('Number of CNOT gates', fontsize=16)\n", (3214, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3291), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {'fontsize': '(16)'}), "('Percentage', fontsize=16)\n", (3264, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3571, 3631), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig/Figure_9c-uncropped.pdf"""'], {'transparent': '(True)'}), "('fig/Figure_9c-uncropped.pdf', transparent=True)\n", (3582, 3631), True, 'import matplotlib.pyplot as plt\n'), ((3635, 3646), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3644, 3646), True, 'import matplotlib.pyplot as plt\n'), ((3650, 3716), 'os.system', 'os.system', (['"""pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf"""'], {}), "('pdfcrop fig/Figure_9c-uncropped.pdf fig/Figure_9c.pdf')\n", (3659, 3716), False, 'import os\n'), ((1425, 1499), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5 + i, -0.5 + i]', '[-0.5, -0.5 + n]'], {'color': '"""k"""', 'linewidth': '(0.7)'}), "([-0.5 + i, -0.5 + i], [-0.5, -0.5 + n], color='k', linewidth=0.7)\n", (1433, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1596), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5, -0.5 + m]', '[-0.5 + i, -0.5 + i]'], {'color': '"""k"""', 'linewidth': '(0.7)'}), "([-0.5, -0.5 + m], [-0.5 + i, -0.5 + i], color='k', linewidth=0.7)\n", (1530, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1872), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(exportFilename + '-uncropped.pdf')"], {'transparent': '(True)'}), "(exportFilename + '-uncropped.pdf', transparent=True)\n", (1819, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1890), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1976), 'os.system', 'os.system', (["('pdfcrop %s-uncropped.pdf %s.pdf' % (exportFilename, exportFilename))"], {}), "('pdfcrop %s-uncropped.pdf %s.pdf' % (exportFilename, exportFilename))\n", (1906, 1976), False, 'import os\n'), ((1993, 2003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2001, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2466), 'cl.countCNot', 'cl.countCNot', (['Z'], {}), '(Z)\n', (2463, 2466), False, 'import cl\n'), ((2543, 2566), 'cl.countCNot', 'cl.countCNot', (['Z[idx, :]'], {}), '(Z[idx, :])\n', (2555, 2566), False, 'import cl\n'), ((2714, 2735), 'cl.orderZ', 'cl.orderZ', (['Z[:, idx2]'], {}), '(Z[:, idx2])\n', (2723, 2735), False, 'import cl\n'), ((3167, 3199), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_percentage'], {}), '(format_percentage)\n', (3180, 3199), False, 'from matplotlib.ticker import FuncFormatter\n'), ((2755, 2788), 'cl.countCNot', 'cl.countCNot', (['Z[idx1, :][:, idx2]'], {}), '(Z[idx1, :][:, idx2])\n', (2767, 2788), False, 'import cl\n'), ((2974, 2983), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2981, 2983), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3076), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3074, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3389), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3387, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3440, 3449), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3447, 3449), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3489), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3487, 3489), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3511, 3513), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1660), 'numpy.sum', 'np.sum', (['(v[:-1] != v[1:])'], {}), '(v[:-1] != v[1:])\n', (1643, 1660), True, 'import numpy as np\n'), ((1777, 1786), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1784, 1786), True, 'import matplotlib.pyplot as plt\n'), ((3131, 3140), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3138, 3140), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3316), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3314, 3316), True, 'import matplotlib.pyplot as plt\n')] |
"""
#;+
#; NAME:
#; general
#; Version 1.0
#;
#; PURPOSE:
#; Module for monkeying with files and filenames
#; 172Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
# Import libraries
import numpy as np
from astropy.io import fits
from astropy.io import ascii
import os, pdb
#### ###############################
# Deal with .gz extensions, usually on FITS files
# See if filenm exists, if so pass it back
#
def chk_for_gz(filenm,chk=None):
import os, pdb
# File exist?
if os.path.lexists(filenm):
chk=1
return filenm, chk
# .gz already
if filenm.find('.gz') > 0:
chk=0
return filenm, chk
# Add .gz
if os.path.lexists(filenm+'.gz'):
chk=1
return filenm+'.gz', chk
else:
chk=0
return filenm, chk
| [
"os.path.lexists"
]
| [((554, 577), 'os.path.lexists', 'os.path.lexists', (['filenm'], {}), '(filenm)\n', (569, 577), False, 'import os, pdb\n'), ((734, 765), 'os.path.lexists', 'os.path.lexists', (["(filenm + '.gz')"], {}), "(filenm + '.gz')\n", (749, 765), False, 'import os, pdb\n')] |
# Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test suite for the action module implementation
"""
import os
from unittest import mock
from jinja2 import Environment
from pytest import mark
from metadata.great_expectations.action import OpenMetadataValidationAction
from metadata.great_expectations.utils.ometa_config_handler import render_template
@mark.parametrize(
"input,expected",
[
(None, "list_entities"),
("service_name", "get_by_name"),
],
)
def test_get_table_entity(input, expected, mocked_ometa, mocked_ge_data_context):
"""Test get table entity"""
ometa_validation = OpenMetadataValidationAction(
data_context=mocked_ge_data_context,
config_file_path="my/config/path",
ometa_service_name=input,
)
res = ometa_validation._get_table_entity("database", "schema", "table")
assert res._type == expected
def test_create_jinja_environment(fixture_jinja_environment):
"""Test create jinja environment"""
assert isinstance(fixture_jinja_environment, Environment)
@mock.patch.dict(os.environ, {"API_VERSION": "v1"})
def test_render_template(fixture_jinja_environment):
"""Test create jinja environment"""
tmplt = render_template(fixture_jinja_environment)
assert tmplt == "hostPort: http://localhost:8585\napiVersion: v1"
| [
"metadata.great_expectations.utils.ometa_config_handler.render_template",
"pytest.mark.parametrize",
"unittest.mock.patch.dict",
"metadata.great_expectations.action.OpenMetadataValidationAction"
]
| [((883, 982), 'pytest.mark.parametrize', 'mark.parametrize', (['"""input,expected"""', "[(None, 'list_entities'), ('service_name', 'get_by_name')]"], {}), "('input,expected', [(None, 'list_entities'), (\n 'service_name', 'get_by_name')])\n", (899, 982), False, 'from pytest import mark\n'), ((1586, 1636), 'unittest.mock.patch.dict', 'mock.patch.dict', (['os.environ', "{'API_VERSION': 'v1'}"], {}), "(os.environ, {'API_VERSION': 'v1'})\n", (1601, 1636), False, 'from unittest import mock\n'), ((1149, 1279), 'metadata.great_expectations.action.OpenMetadataValidationAction', 'OpenMetadataValidationAction', ([], {'data_context': 'mocked_ge_data_context', 'config_file_path': '"""my/config/path"""', 'ometa_service_name': 'input'}), "(data_context=mocked_ge_data_context,\n config_file_path='my/config/path', ometa_service_name=input)\n", (1177, 1279), False, 'from metadata.great_expectations.action import OpenMetadataValidationAction\n'), ((1742, 1784), 'metadata.great_expectations.utils.ometa_config_handler.render_template', 'render_template', (['fixture_jinja_environment'], {}), '(fixture_jinja_environment)\n', (1757, 1784), False, 'from metadata.great_expectations.utils.ometa_config_handler import render_template\n')] |
# -*- coding: utf-8 -*-
from storyruntime.Containers import Containers
from storyruntime.constants.ServiceConstants import ServiceConstants
import storyscript
def test_containers_format_command(story):
"""
Ensures a simple resolve can be performed
"""
story_text = 'alpine echo msg:"foo"\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {
'arguments': {'msg': {'type': 'string'}}
}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo', '{"msg":"foo"}']
def test_containers_format_command_no_arguments(story):
story_text = 'alpine echo\n'
story.context = {}
story.app.services = {
'alpine': {
ServiceConstants.config: {
'actions': {
'echo': {}
}
}
}
}
story.tree = storyscript.Api.loads(story_text).result()['tree']
assert Containers.format_command(
story, story.line('1'), 'alpine', 'echo'
) == ['echo']
| [
"storyscript.Api.loads"
]
| [((631, 664), 'storyscript.Api.loads', 'storyscript.Api.loads', (['story_text'], {}), '(story_text)\n', (652, 664), False, 'import storyscript\n'), ((1129, 1162), 'storyscript.Api.loads', 'storyscript.Api.loads', (['story_text'], {}), '(story_text)\n', (1150, 1162), False, 'import storyscript\n')] |
from django.contrib import admin
from django.shortcuts import redirect
from django.utils.safestring import mark_safe
from django.contrib.admin.widgets import AdminFileWidget
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None, renderer=None):
output = []
if value and getattr(value, "url", None):
output.append(u'<a href="%s" target="_blank">%s</a>' % (value.url, thumbnail(value)))
output.append(super(AdminFileWidget, self).render(name, value, attrs, renderer))
return mark_safe(u''.join(output))
class ImageWidgetAdmin(admin.ModelAdmin):
image_fields = []
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.image_fields:
kwargs.pop("request", None)
kwargs['widget'] = AdminImageWidget
return db_field.formfield(**kwargs)
return super(ImageWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def redirect_one_object(model, obj):
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')
if obj:
response = redirect(f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/')
return response
def thumbnail(obj, size='col-md-2'):
return mark_safe('<img src="{}" class="img-thumbnail {} p-0">'.format(obj.url, size))
| [
"django.shortcuts.redirect"
]
| [((1027, 1100), 'django.shortcuts.redirect', 'redirect', (['f"""/admin/{model._meta.app_label}/{model._meta.model_name}/add/"""'], {}), "(f'/admin/{model._meta.app_label}/{model._meta.model_name}/add/')\n", (1035, 1100), False, 'from django.shortcuts import redirect\n'), ((1132, 1227), 'django.shortcuts.redirect', 'redirect', (['f"""/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/"""'], {}), "(\n f'/admin/{model._meta.app_label}/{model._meta.model_name}/{obj.pk}/change/'\n )\n", (1140, 1227), False, 'from django.shortcuts import redirect\n')] |
import unittest
class Solution:
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
rows = [0] * len(matrix)
cols = [0] * len(matrix[0])
for i, row in enumerate(matrix):
for j, num in enumerate(row):
if not num:
rows[i] = 1
cols[j] = 1
for row, num in enumerate(rows):
if num:
for j in range(len(matrix[0])):
matrix[row][j] = 0
for col, num in enumerate(cols):
if num:
for i in range(len(matrix)):
matrix[i][col] = 0
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2, 0],
[1, 2, 3],
[0, 2, 3],
],
[
[0, 0, 0],
[0, 2, 0],
[0, 0, 0],
]
)
def _test(self, matrix, expected):
Solution().setZeroes(matrix)
self.assertEqual(expected, matrix)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
]
| [((1201, 1216), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1214, 1216), False, 'import unittest\n')] |
import argparse
import collections
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
try:
# PyTorch >= 1.6 supports mixed precision training
from torch.cuda.amp import autocast
amp_support = True
except:
amp_support = False
from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import (
build_test_dataloader,
build_train_dataloader,
build_val_dataloader,
)
from openunreid.models import build_gan_model
from openunreid.models.losses import build_loss
from openunreid.models.utils.extract import extract_features
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
class SPGANRunner(GANBaseRunner):
def train_step(self, iter, batch):
data_src, data_tgt = batch[0], batch[1]
self.real_A = data_src['img'].cuda()
self.real_B = data_tgt['img'].cuda()
# Forward
self.fake_B = self.model['G_A'](self.real_A) # G_A(A)
self.fake_A = self.model['G_B'](self.real_B) # G_B(B)
self.rec_A = self.model['G_B'](self.fake_B) # G_B(G_A(A))
self.rec_B = self.model['G_A'](self.fake_A) # G_A(G_B(B))
# G_A and G_B
if iter % 2 == 0:
self.set_requires_grad([self.model['D_A'], self.model['D_B'], self.model['Metric']], False) # save memory
if self.scaler is None:
self.optimizer['G'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['G'].zero_grad()
if self._epoch > 1:
self.backward_G(retain_graph=True)
self.backward_GM()
else:
self.backward_G()
if self.scaler is None:
self.optimizer['G'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['G'])
# SiaNet for SPGAN
if self._epoch > 0:
self.set_requires_grad([self.model['Metric']], True)
if self.scaler is None:
self.optimizer['Metric'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['Metric'].zero_grad()
self.backward_M()
if self.scaler is None:
self.optimizer['Metric'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['Metric'])
# D_A and D_B
self.set_requires_grad([self.model['D_A'], self.model['D_B']], True)
# self.optimizer['D'].zero_grad()
# self.backward_D()
# self.optimizer['D'].step()
if self.scaler is None:
self.optimizer['D'].zero_grad()
else:
with autocast(enabled=False):
self.optimizer['D'].zero_grad()
self.backward_D()
if self.scaler is None:
self.optimizer['D'].step()
else:
with autocast(enabled=False):
self.scaler.step(self.optimizer['D'])
# save translated images
if self._rank == 0:
self.save_imgs(['real_A', 'real_B', 'fake_A', 'fake_B', 'rec_A', 'rec_B'])
return 0
def backward_GM(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A)
fake_B_metric = self.model['Metric'](self.fake_B)
# positive pairs
loss_pos = self.criterions['sia_G'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_G'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_G'](fake_B_metric, real_B_metric, 0) + \
self.criterions['sia_G'](fake_A_metric, real_A_metric, 0)
loss_M = (loss_pos + 0.5 * loss_neg) / 4.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_G']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_G': loss_M.item()}
self.train_progress.update(meters)
def backward_M(self):
real_A_metric = self.model['Metric'](self.real_A)
real_B_metric = self.model['Metric'](self.real_B)
fake_A_metric = self.model['Metric'](self.fake_A.detach())
fake_B_metric = self.model['Metric'](self.fake_B.detach())
# positive pairs
loss_pos = self.criterions['sia_M'](real_A_metric, fake_B_metric, 1) + \
self.criterions['sia_M'](real_B_metric, fake_A_metric, 1)
# negative pairs
loss_neg = self.criterions['sia_M'](real_A_metric, real_B_metric, 0)
loss_M = (loss_pos + 2 * loss_neg) / 3.0
loss = loss_M * self.cfg.TRAIN.LOSS.losses['sia_M']
if self.scaler is None:
loss.backward()
else:
with autocast(enabled=False):
self.scaler.scale(loss).backward()
meters = {'sia_M': loss_M.item()}
self.train_progress.update(meters)
def parge_config():
parser = argparse.ArgumentParser(description="SPGAN training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
assert len(list(cfg.TRAIN.datasets.keys()))==2, \
"the number of datasets for domain-translation training should be two"
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
set_random_seed(cfg.TRAIN.seed, cfg.TRAIN.deterministic)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / 'log.txt', debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, _ = build_train_dataloader(cfg, joint=False)
# build model
model = build_gan_model(cfg)
for key in model.keys():
model[key].cuda()
if dist:
ddp_cfg = {
"device_ids": [cfg.gpu],
"output_device": cfg.gpu,
"find_unused_parameters": True,
}
for key in model.keys():
model[key] = torch.nn.parallel.DistributedDataParallel(model[key], **ddp_cfg)
elif cfg.total_gpus > 1:
for key in model.keys():
model[key] = torch.nn.DataParallel(model[key])
# build optimizer
optimizer = {}
optimizer['G'] = build_optimizer([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)
optimizer['D'] = build_optimizer([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)
optimizer['Metric'] = build_optimizer([model['Metric']], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = [build_lr_scheduler(optimizer[key], **cfg.TRAIN.SCHEDULER) \
for key in optimizer.keys()]
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, cuda=True)
# build runner
runner = SPGANRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"}
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the latest model
# runner.resume(cfg.work_dir)
# final inference
test_loader, _ = build_val_dataloader(
cfg,
for_clustering=True,
all_datasets=True
)
# source to target
infer_gan(
cfg,
model['G_A'],
test_loader[0],
dataset_name=list(cfg.TRAIN.datasets.keys())[0]
)
# target to source
infer_gan(
cfg,
model['G_B'],
test_loader[1],
dataset_name=list(cfg.TRAIN.datasets.keys())[1]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
main()
| [
"openunreid.utils.dist_utils.init_dist",
"openunreid.models.losses.build_loss",
"openunreid.utils.logger.Logger",
"datetime.timedelta",
"argparse.ArgumentParser",
"pathlib.Path",
"openunreid.data.build_val_dataloader",
"openunreid.utils.config.cfg.TRAIN.datasets.keys",
"torch.cuda.amp.autocast",
"openunreid.core.solvers.build_lr_scheduler",
"torch.nn.parallel.DistributedDataParallel",
"openunreid.models.build_gan_model",
"openunreid.utils.config.cfg_from_yaml_file",
"time.monotonic",
"openunreid.apis.set_random_seed",
"openunreid.utils.config.cfg_from_list",
"openunreid.utils.config.log_config_to_file",
"shutil.copy",
"openunreid.utils.dist_utils.synchronize",
"openunreid.data.build_train_dataloader",
"openunreid.core.solvers.build_optimizer",
"openunreid.utils.file_utils.mkdir_if_missing",
"torch.nn.DataParallel"
]
| [((5588, 5641), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SPGAN training"""'}), "(description='SPGAN training')\n", (5611, 5641), False, 'import argparse\n'), ((6342, 6378), 'openunreid.utils.config.cfg_from_yaml_file', 'cfg_from_yaml_file', (['args.config', 'cfg'], {}), '(args.config, cfg)\n', (6360, 6378), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n'), ((6708, 6738), 'openunreid.utils.file_utils.mkdir_if_missing', 'mkdir_if_missing', (['cfg.work_dir'], {}), '(cfg.work_dir)\n', (6724, 6738), False, 'from openunreid.utils.file_utils import mkdir_if_missing\n'), ((6820, 6874), 'shutil.copy', 'shutil.copy', (['args.config', "(cfg.work_dir / 'config.yaml')"], {}), "(args.config, cfg.work_dir / 'config.yaml')\n", (6831, 6874), False, 'import shutil\n'), ((6928, 6944), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6942, 6944), False, 'import time\n'), ((7020, 7034), 'openunreid.utils.dist_utils.init_dist', 'init_dist', (['cfg'], {}), '(cfg)\n', (7029, 7034), False, 'from openunreid.utils.dist_utils import init_dist, synchronize\n'), ((7039, 7095), 'openunreid.apis.set_random_seed', 'set_random_seed', (['cfg.TRAIN.seed', 'cfg.TRAIN.deterministic'], {}), '(cfg.TRAIN.seed, cfg.TRAIN.deterministic)\n', (7054, 7095), False, 'from openunreid.apis import GANBaseRunner, set_random_seed, infer_gan\n'), ((7100, 7113), 'openunreid.utils.dist_utils.synchronize', 'synchronize', ([], {}), '()\n', (7111, 7113), False, 'from openunreid.utils.dist_utils import init_dist, synchronize\n'), ((7152, 7197), 'openunreid.utils.logger.Logger', 'Logger', (["(cfg.work_dir / 'log.txt')"], {'debug': '(False)'}), "(cfg.work_dir / 'log.txt', debug=False)\n", (7158, 7197), False, 'from openunreid.utils.logger import Logger\n'), ((7284, 7307), 'openunreid.utils.config.log_config_to_file', 'log_config_to_file', (['cfg'], {}), '(cfg)\n', (7302, 7307), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n'), ((7356, 7396), 'openunreid.data.build_train_dataloader', 'build_train_dataloader', (['cfg'], {'joint': '(False)'}), '(cfg, joint=False)\n', (7378, 7396), False, 'from openunreid.data import build_test_dataloader, build_train_dataloader, build_val_dataloader\n'), ((7428, 7448), 'openunreid.models.build_gan_model', 'build_gan_model', (['cfg'], {}), '(cfg)\n', (7443, 7448), False, 'from openunreid.models import build_gan_model\n'), ((7974, 8038), 'openunreid.core.solvers.build_optimizer', 'build_optimizer', (["[model['G_A'], model['G_B']]"], {}), "([model['G_A'], model['G_B']], **cfg.TRAIN.OPTIM)\n", (7989, 8038), False, 'from openunreid.core.solvers import build_lr_scheduler, build_optimizer\n'), ((8060, 8124), 'openunreid.core.solvers.build_optimizer', 'build_optimizer', (["[model['D_A'], model['D_B']]"], {}), "([model['D_A'], model['D_B']], **cfg.TRAIN.OPTIM)\n", (8075, 8124), False, 'from openunreid.core.solvers import build_lr_scheduler, build_optimizer\n'), ((8151, 8204), 'openunreid.core.solvers.build_optimizer', 'build_optimizer', (["[model['Metric']]"], {}), "([model['Metric']], **cfg.TRAIN.OPTIM)\n", (8166, 8204), False, 'from openunreid.core.solvers import build_lr_scheduler, build_optimizer\n'), ((8504, 8541), 'openunreid.models.losses.build_loss', 'build_loss', (['cfg.TRAIN.LOSS'], {'cuda': '(True)'}), '(cfg.TRAIN.LOSS, cuda=True)\n', (8514, 8541), False, 'from openunreid.models.losses import build_loss\n'), ((8982, 9047), 'openunreid.data.build_val_dataloader', 'build_val_dataloader', (['cfg'], {'for_clustering': '(True)', 'all_datasets': '(True)'}), '(cfg, for_clustering=True, all_datasets=True)\n', (9002, 9047), False, 'from openunreid.data import build_test_dataloader, build_train_dataloader, build_val_dataloader\n'), ((9493, 9509), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (9507, 9509), False, 'import time\n'), ((6781, 6814), 'openunreid.utils.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs', 'cfg'], {}), '(args.set_cfgs, cfg)\n', (6794, 6814), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n'), ((9544, 9584), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(end_time - start_time)'}), '(seconds=end_time - start_time)\n', (9553, 9584), False, 'from datetime import timedelta\n'), ((6632, 6649), 'pathlib.Path', 'Path', (['args.config'], {}), '(args.config)\n', (6636, 6649), False, 'from pathlib import Path\n'), ((7725, 7789), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model[key]'], {}), '(model[key], **ddp_cfg)\n', (7766, 7789), False, 'import torch\n'), ((8308, 8365), 'openunreid.core.solvers.build_lr_scheduler', 'build_lr_scheduler', (['optimizer[key]'], {}), '(optimizer[key], **cfg.TRAIN.SCHEDULER)\n', (8326, 8365), False, 'from openunreid.core.solvers import build_lr_scheduler, build_optimizer\n'), ((3183, 3206), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (3191, 3206), False, 'from torch.cuda.amp import autocast\n'), ((3384, 3407), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (3392, 3407), False, 'from torch.cuda.amp import autocast\n'), ((4463, 4486), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (4471, 4486), False, 'from torch.cuda.amp import autocast\n'), ((5391, 5414), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (5399, 5414), False, 'from torch.cuda.amp import autocast\n'), ((6399, 6424), 'openunreid.utils.config.cfg.TRAIN.datasets.keys', 'cfg.TRAIN.datasets.keys', ([], {}), '()\n', (6422, 6424), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n'), ((7877, 7910), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model[key]'], {}), '(model[key])\n', (7898, 7910), False, 'import torch\n'), ((1849, 1872), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (1857, 1872), False, 'from torch.cuda.amp import autocast\n'), ((2214, 2237), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (2222, 2237), False, 'from torch.cuda.amp import autocast\n'), ((2546, 2569), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (2554, 2569), False, 'from torch.cuda.amp import autocast\n'), ((2781, 2804), 'torch.cuda.amp.autocast', 'autocast', ([], {'enabled': '(False)'}), '(enabled=False)\n', (2789, 2804), False, 'from torch.cuda.amp import autocast\n'), ((9265, 9290), 'openunreid.utils.config.cfg.TRAIN.datasets.keys', 'cfg.TRAIN.datasets.keys', ([], {}), '()\n', (9288, 9290), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n'), ((9424, 9449), 'openunreid.utils.config.cfg.TRAIN.datasets.keys', 'cfg.TRAIN.datasets.keys', ([], {}), '()\n', (9447, 9449), False, 'from openunreid.utils.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\n')] |
import logging
import requests
import multiprocessing
import pathlib
from typing import List
from typing import Optional
from typing import Tuple
from typing import Dict
from joblib import delayed
from joblib import Parallel
from datetime import date
from datetime import timedelta
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def next_date(start_date=date(2018, 3, 1)):
days_to_download = abs(start_date - date.today()).days - 5
for date_offset in range(days_to_download):
yield start_date
start_date = start_date + timedelta(days=1)
def download_all(inputs: List[Tuple[pathlib.Path, str]], cookies: Optional[Dict]):
session = requests.session()
inputs[0][0].parent.mkdir(parents=True, exist_ok=True)
def download_single_link(file_path: pathlib.Path, url):
thread_nr = multiprocessing.current_process().name
thread_nr = thread_nr[thread_nr.rfind('-') + 1:]
file_name = file_path.stem
if file_path.is_file():
logger.info('{} {} already exists'.format(thread_nr, file_name))
return
try:
response = session.get(url=url, cookies=cookies)
except TimeoutError:
logger.critical('{} Timeout Error'.format(thread_nr))
return
content = response.content.decode('utf-8')
if response.status_code != 200:
logger.critical('{} {}'.format(thread_nr, url, response.status_code))
logger.critical('{}'.format(thread_nr, content))
return
else:
logger.info('{} {} {} OK'.format(thread_nr, file_name, response.status_code))
with open(str(file_path), mode='w', encoding='utf-8') as output_file:
output_file.write(content)
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(download_single_link)(*j) for j in inputs)
| [
"logging.getLogger",
"logging.NullHandler",
"requests.session",
"datetime.date.today",
"multiprocessing.cpu_count",
"joblib.Parallel",
"datetime.date",
"joblib.delayed",
"datetime.timedelta",
"multiprocessing.current_process"
]
| [((292, 319), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (309, 319), False, 'import logging\n'), ((338, 359), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (357, 359), False, 'import logging\n'), ((388, 404), 'datetime.date', 'date', (['(2018)', '(3)', '(1)'], {}), '(2018, 3, 1)\n', (392, 404), False, 'from datetime import date\n'), ((694, 712), 'requests.session', 'requests.session', ([], {}), '()\n', (710, 712), False, 'import requests\n'), ((1797, 1824), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1822, 1824), False, 'import multiprocessing\n'), ((1829, 1855), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (1837, 1855), False, 'from joblib import Parallel\n'), ((577, 594), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (586, 594), False, 'from datetime import timedelta\n'), ((854, 887), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (885, 887), False, 'import multiprocessing\n'), ((1856, 1885), 'joblib.delayed', 'delayed', (['download_single_link'], {}), '(download_single_link)\n', (1863, 1885), False, 'from joblib import delayed\n'), ((447, 459), 'datetime.date.today', 'date.today', ([], {}), '()\n', (457, 459), False, 'from datetime import date\n')] |
import logging
import os
import shutil
import time
import torch
model_state = 'model_state.pt'
trainer_state = 'trainer_state.pt'
class Checkpoint():
def __init__(self, step, epoch, model, optim, path=None, opt=None):
self.step = step
self.epoch = epoch
self.model = model
self.optim = optim
self._path = path
self.opt = opt
self.logger = logging.getLogger(__name__)
@property
def path(self):
if self._path is None:
raise LookupError("The checkpoint has not been saved.")
return self._path
@classmethod
def load(cls, model, optim=None, opt=None):
logger = logging.getLogger(__name__)
all_times = sorted(os.listdir(opt.ckpt_fd), reverse=True)
fchckpt = os.path.join(opt.ckpt_fd, all_times[0])
logger.info("load checkpoint from %s" % fchckpt)
resume_model = torch.load(os.path.join(fchckpt, model_state),
map_location=opt.device)
resume_checkpoint = torch.load(os.path.join(fchckpt, trainer_state),
map_location=opt.device)
model.load_state_dict(resume_model)
if optim is not None:
optim.load_state_dict(resume_checkpoint['optimizer'])
return Checkpoint(step=resume_checkpoint['step'],
epoch=resume_checkpoint['epoch'],
model=model,
optim=optim,
path=opt.ckpt_fd)
def save(self):
date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
path = os.path.join(self.opt.ckpt_fd, date_time)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
torch.save(
{'epoch': self.epoch,
'step': self.step,
'optimizer': self.optim.state_dict()},
os.path.join(path, trainer_state))
torch.save(
self.model.state_dict(), os.path.join(path, model_state))
log_msg = "Validation loss being smaller than previous "
log_msg += "minimum, checkpoint is saved at %s" % path
self.logger.info(log_msg)
return path
| [
"logging.getLogger",
"os.path.exists",
"os.listdir",
"os.makedirs",
"os.path.join",
"shutil.rmtree",
"time.localtime"
]
| [((406, 433), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (423, 433), False, 'import logging\n'), ((677, 704), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (694, 704), False, 'import logging\n'), ((790, 829), 'os.path.join', 'os.path.join', (['opt.ckpt_fd', 'all_times[0]'], {}), '(opt.ckpt_fd, all_times[0])\n', (802, 829), False, 'import os\n'), ((1649, 1690), 'os.path.join', 'os.path.join', (['self.opt.ckpt_fd', 'date_time'], {}), '(self.opt.ckpt_fd, date_time)\n', (1661, 1690), False, 'import os\n'), ((1702, 1722), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1716, 1722), False, 'import os\n'), ((1764, 1781), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1775, 1781), False, 'import os\n'), ((733, 756), 'os.listdir', 'os.listdir', (['opt.ckpt_fd'], {}), '(opt.ckpt_fd)\n', (743, 756), False, 'import os\n'), ((922, 956), 'os.path.join', 'os.path.join', (['fchckpt', 'model_state'], {}), '(fchckpt, model_state)\n', (934, 956), False, 'import os\n'), ((1056, 1092), 'os.path.join', 'os.path.join', (['fchckpt', 'trainer_state'], {}), '(fchckpt, trainer_state)\n', (1068, 1092), False, 'import os\n'), ((1616, 1632), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1630, 1632), False, 'import time\n'), ((1736, 1755), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (1749, 1755), False, 'import shutil\n'), ((1933, 1966), 'os.path.join', 'os.path.join', (['path', 'trainer_state'], {}), '(path, trainer_state)\n', (1945, 1966), False, 'import os\n'), ((2025, 2056), 'os.path.join', 'os.path.join', (['path', 'model_state'], {}), '(path, model_state)\n', (2037, 2056), False, 'import os\n')] |
import numpy as np
import torch
import os
import cv2
import importlib
from dataset import *
from PIL import Image
from argparse import ArgumentParser
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, CenterCrop, Normalize, Resize
from torchvision.transforms import ToTensor, ToPILImage
from dataset import cityscapes
from lednet import Net
from transform import Relabel, ToLabel, Colorize
import visdom
NUM_CHANNELS = 3
NUM_CLASSES = 20
#* *******************测试单张图片****************************
image_transform = ToPILImage()
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
def main(args):
modelpath = args.loadDir + args.loadModel
weightspath = args.loadDir + args.loadWeights
print("Loading model: " + modelpath)
print("Loading weights: " + weightspath)
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
# model.load_state_dict(torch.load(args.state))
# model.load_state_dict(torch.load(weightspath)) #not working if missing key
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print("Model and weights LOADED successfully")
model.eval()
if (not os.path.exists(args.datadir)):
print("Error: datadir could not be loaded")
# loader = DataLoader(
# cityscapes('/home/liqi/PycharmProjects/LEDNet/4.png', input_transform_cityscapes, target_transform_cityscapes, subset=args.subset),
# num_workers=args.num_workers, batch_size=1 ,shuffle=False)
input_transform_cityscapes = Compose([
Resize((512, 1024), Image.BILINEAR),
ToTensor(),
# Normalize([.485, .456, .406], [.229, .224, .225]),
])
name ="4.png"
with open(image_path_city('/home/gongyiqun/images', name), 'rb') as f:
images = load_image(f).convert('RGB')
images = input_transform_cityscapes(images)
# For visualizer:
# must launch in other window "python3.6 -m visdom.server -port 8097"
# and access localhost:8097 to see it
if (args.visualize):
vis = visdom.Visdom()
if (not args.cpu):
images = images.cuda()
# labels = labels.cuda()
a=torch.unsqueeze(images,0)
inputs = Variable(a)
# targets = Variable(labels)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
# label_cityscapes = cityscapes_trainIds2labelIds(label.unsqueeze(0))
label_color = Colorize()(label.unsqueeze(0))
filenameSave = "./save_color/"+"Others/"+name
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
# image_transform(label.byte()).save(filenameSave)
label_save = ToPILImage()(label_color)
label_save = label_save.resize((1241, 376), Image.BILINEAR)
# label_save = cv2.resize(label_save, (376, 1224),interpolation=cv2.INTER_AREA)
label_save.save(filenameSave)
if (args.visualize):
vis.image(label_color.numpy())
# print(step, filenameSave)
# for step, (images, labels, filename, filenameGt) in enumerate(loader):
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--state')
parser.add_argument('--loadDir', default="../save/logs(KITTI)/")
parser.add_argument('--loadWeights', default="model_best.pth")
parser.add_argument('--loadModel', default="lednet.py")
parser.add_argument('--subset', default="val") # can be val, test, train, demoSequence
parser.add_argument('--datadir', default="")
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--visualize', action='store_true')
main(parser.parse_args())
| [
"os.path.exists",
"transform.Colorize",
"lednet.Net",
"torchvision.transforms.ToPILImage",
"argparse.ArgumentParser",
"torch.unsqueeze",
"torch.load",
"torch.nn.DataParallel",
"os.path.dirname",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"visdom.Visdom"
]
| [((590, 602), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (600, 602), False, 'from torchvision.transforms import ToTensor, ToPILImage\n'), ((974, 990), 'lednet.Net', 'Net', (['NUM_CLASSES'], {}), '(NUM_CLASSES)\n', (977, 990), False, 'from lednet import Net\n'), ((1004, 1032), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1025, 1032), False, 'import torch\n'), ((2661, 2687), 'torch.unsqueeze', 'torch.unsqueeze', (['images', '(0)'], {}), '(images, 0)\n', (2676, 2687), False, 'import torch\n'), ((2700, 2711), 'torch.autograd.Variable', 'Variable', (['a'], {}), '(a)\n', (2708, 2711), False, 'from torch.autograd import Variable\n'), ((3590, 3606), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3604, 3606), False, 'from argparse import ArgumentParser\n'), ((646, 681), 'torchvision.transforms.Resize', 'Resize', (['(512, 1024)', 'Image.BILINEAR'], {}), '((512, 1024), Image.BILINEAR)\n', (652, 681), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Resize\n'), ((687, 697), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (695, 697), False, 'from torchvision.transforms import ToTensor, ToPILImage\n'), ((1577, 1600), 'torch.load', 'torch.load', (['weightspath'], {}), '(weightspath)\n', (1587, 1600), False, 'import torch\n'), ((1684, 1712), 'os.path.exists', 'os.path.exists', (['args.datadir'], {}), '(args.datadir)\n', (1698, 1712), False, 'import os\n'), ((2551, 2566), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (2564, 2566), False, 'import visdom\n'), ((2754, 2769), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2767, 2769), False, 'import torch\n'), ((2947, 2957), 'transform.Colorize', 'Colorize', ([], {}), '()\n', (2955, 2957), False, 'from transform import Relabel, ToLabel, Colorize\n'), ((3046, 3075), 'os.path.dirname', 'os.path.dirname', (['filenameSave'], {}), '(filenameSave)\n', (3061, 3075), False, 'import os\n'), ((3165, 3177), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (3175, 3177), False, 'from torchvision.transforms import ToTensor, ToPILImage\n'), ((2057, 2092), 'torchvision.transforms.Resize', 'Resize', (['(512, 1024)', 'Image.BILINEAR'], {}), '((512, 1024), Image.BILINEAR)\n', (2063, 2092), False, 'from torchvision.transforms import Compose, CenterCrop, Normalize, Resize\n'), ((2102, 2112), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (2110, 2112), False, 'from torchvision.transforms import ToTensor, ToPILImage\n')] |
import pytest
from pytest import raises
from pydantic_jsonapi.resource_linkage import ResourceLinkage
from pydantic import BaseModel, ValidationError
class ThingWithLinkageData(BaseModel):
data: ResourceLinkage
class TestResourceLinks:
@pytest.mark.parametrize(
'linkage, message',
[
(
None,
'null is valid for empty to-one relationships',
),
(
[],
'empty list valid for empty to-many relationships.',
),
(
{'id': 'abc123', 'type': 'item', 'meta': None},
'single resource identifier valid for non-empty to-one relationships.',
),
(
[
{'id': 'abc123', 'type': 'item', 'meta': None},
{'id': 'def456', 'type': 'item', 'meta': None},
],
'array of resource identifiers valid for non-empty to-many relationships.',
),
],
)
def test_valid_possibilities(self, linkage, message):
structure_to_validate = {
'data': linkage
}
validated = ThingWithLinkageData(**structure_to_validate)
assert validated.dict() == structure_to_validate, message
def test_invalid_resource_identifier(self):
structure_to_validate = {
'data': {}
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data', 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data',), 'msg': 'value is not a valid list', 'type': 'type_error.list'},
]
def test_invalid_resource_identifier_array(self):
structure_to_validate = {
'data': [
{}
],
}
with raises(ValidationError) as e:
ThingWithLinkageData(**structure_to_validate)
assert e.value.errors() == [
{'loc': ('data',), 'msg': 'value is not a valid dict', 'type': 'type_error.dict'},
{'loc': ('data', 0, 'id'), 'msg': 'field required', 'type': 'value_error.missing'},
{'loc': ('data', 0, 'type'), 'msg': 'field required', 'type': 'value_error.missing'},
]
| [
"pytest.mark.parametrize",
"pytest.raises"
]
| [((251, 742), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""linkage, message"""', "[(None, 'null is valid for empty to-one relationships'), ([],\n 'empty list valid for empty to-many relationships.'), ({'id': 'abc123',\n 'type': 'item', 'meta': None},\n 'single resource identifier valid for non-empty to-one relationships.'),\n ([{'id': 'abc123', 'type': 'item', 'meta': None}, {'id': 'def456',\n 'type': 'item', 'meta': None}],\n 'array of resource identifiers valid for non-empty to-many relationships.')\n ]"], {}), "('linkage, message', [(None,\n 'null is valid for empty to-one relationships'), ([],\n 'empty list valid for empty to-many relationships.'), ({'id': 'abc123',\n 'type': 'item', 'meta': None},\n 'single resource identifier valid for non-empty to-one relationships.'),\n ([{'id': 'abc123', 'type': 'item', 'meta': None}, {'id': 'def456',\n 'type': 'item', 'meta': None}],\n 'array of resource identifiers valid for non-empty to-many relationships.'\n )])\n", (274, 742), False, 'import pytest\n'), ((1430, 1453), 'pytest.raises', 'raises', (['ValidationError'], {}), '(ValidationError)\n', (1436, 1453), False, 'from pytest import raises\n'), ((2016, 2039), 'pytest.raises', 'raises', (['ValidationError'], {}), '(ValidationError)\n', (2022, 2039), False, 'from pytest import raises\n')] |
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
import keras.callbacks
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
batch_size = 128
nb_classes = 10
nb_epoch = 20
nb_data = 28*28
log_filepath = '/tmp/keras_log'
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1]*X_train.shape[2])
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1]*X_test.shape[2])
# rescale
X_train = X_train.astype(np.float32)
X_train /= 255
X_test = X_test.astype(np.float32)
X_test /= 255
# convert class vectors to binary class matrices (one hot vectors)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
old_session = KTF.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
# build model
model = Sequential()
model.add(Dense(512, input_shape=(nb_data,), init='normal',name='dense1'))
model.add(Activation('relu', name='relu1'))
model.add(Dropout(0.2, name='dropout1'))
model.add(Dense(512, init='normal', name='dense2'))
model.add(Activation('relu', name='relu2'))
model.add(Dropout(0.2, name='dropout2'))
model.add(Dense(10, init='normal', name='dense3'))
model.add(Activation('softmax', name='softmax1'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.001), metrics=['accuracy'])
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, histogram_freq=1)
cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch = nb_epoch, verbose=1, callbacks=cbks)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy;', score[1])
KTF.set_session(old_session)
| [
"keras.backend.tensorflow_backend.get_session",
"tensorflow.Graph",
"keras.backend.tensorflow_backend.set_session",
"keras.layers.core.Activation",
"keras.datasets.mnist.load_data",
"tensorflow.Session",
"keras.models.Sequential",
"keras.layers.core.Dense",
"keras.utils.np_utils.to_categorical",
"keras.optimizers.SGD",
"keras.layers.core.Dropout",
"keras.backend.tensorflow_backend.set_learning_phase"
]
| [((458, 475), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (473, 475), False, 'from keras.datasets import mnist\n'), ((831, 875), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'nb_classes'], {}), '(y_train, nb_classes)\n', (854, 875), False, 'from keras.utils import np_utils\n'), ((885, 928), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'nb_classes'], {}), '(y_test, nb_classes)\n', (908, 928), False, 'from keras.utils import np_utils\n'), ((944, 961), 'keras.backend.tensorflow_backend.get_session', 'KTF.get_session', ([], {}), '()\n', (959, 961), True, 'import keras.backend.tensorflow_backend as KTF\n'), ((2017, 2045), 'keras.backend.tensorflow_backend.set_session', 'KTF.set_session', (['old_session'], {}), '(old_session)\n', (2032, 2045), True, 'import keras.backend.tensorflow_backend as KTF\n'), ((1007, 1021), 'tensorflow.Session', 'tf.Session', (['""""""'], {}), "('')\n", (1017, 1021), True, 'import tensorflow as tf\n'), ((1026, 1050), 'keras.backend.tensorflow_backend.set_session', 'KTF.set_session', (['session'], {}), '(session)\n', (1041, 1050), True, 'import keras.backend.tensorflow_backend as KTF\n'), ((1055, 1080), 'keras.backend.tensorflow_backend.set_learning_phase', 'KTF.set_learning_phase', (['(1)'], {}), '(1)\n', (1077, 1080), True, 'import keras.backend.tensorflow_backend as KTF\n'), ((1111, 1123), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1121, 1123), False, 'from keras.models import Sequential\n'), ((1138, 1202), 'keras.layers.core.Dense', 'Dense', (['(512)'], {'input_shape': '(nb_data,)', 'init': '"""normal"""', 'name': '"""dense1"""'}), "(512, input_shape=(nb_data,), init='normal', name='dense1')\n", (1143, 1202), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1217, 1249), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {'name': '"""relu1"""'}), "('relu', name='relu1')\n", (1227, 1249), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1265, 1294), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {'name': '"""dropout1"""'}), "(0.2, name='dropout1')\n", (1272, 1294), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1310, 1350), 'keras.layers.core.Dense', 'Dense', (['(512)'], {'init': '"""normal"""', 'name': '"""dense2"""'}), "(512, init='normal', name='dense2')\n", (1315, 1350), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1366, 1398), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {'name': '"""relu2"""'}), "('relu', name='relu2')\n", (1376, 1398), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1414, 1443), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {'name': '"""dropout2"""'}), "(0.2, name='dropout2')\n", (1421, 1443), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1459, 1498), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'init': '"""normal"""', 'name': '"""dense3"""'}), "(10, init='normal', name='dense3')\n", (1464, 1498), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((1514, 1552), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {'name': '"""softmax1"""'}), "('softmax', name='softmax1')\n", (1524, 1552), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((968, 978), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (976, 978), True, 'import tensorflow as tf\n'), ((1636, 1649), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1639, 1649), False, 'from keras.optimizers import SGD\n')] |
"""
usage requires these additional modules
pip install azure-batch azure-storage-blob jsonschema pyyaml && pip install git+https://github.com/microsoft/SparseSC.git@ad4bf27edb28f517508f6934f21eb65d17fb6543 && scgrad start
usage:
from SparseSC import fit, aggregate_batch_results
from SparseSC.utils.azure_batch_client import BatchConfig, run
_TIMESTAMP = datetime.utcnow().strftime("%Y%m%d%H%M%S")
BATCH_DIR= "path/to/my/batch_config/"
fit(x=x,..., batchDir=BATCH_DIR)
my_config = BatchConfig(
BATCH_ACCOUNT_NAME="MySecret",
BATCH_ACCOUNT_KEY="MySecret",
BATCH_ACCOUNT_URL="MySecret",
STORAGE_ACCOUNT_NAME="MySecret",
STORAGE_ACCOUNT_KEY="MySecret",
POOL_ID="my-compute-pool",
POOL_NODE_COUNT=0,
POOL_LOW_PRIORITY_NODE_COUNT=20,
POOL_VM_SIZE="STANDARD_A1_v2",
DELETE_POOL_WHEN_DONE=False,
JOB_ID="my-job" + _TIMESTAMP,
DELETE_JOB_WHEN_DONE=False,
CONTAINER_NAME="my-blob-container",
BATCH_DIRECTORY=BATCH_DIR,
)
run(my_config)
fitted_model = aggregate_batch_results("path/to/my/batch_config")
"""
# pylint: disable=differing-type-doc, differing-param-doc, missing-param-doc, missing-raises-doc, missing-return-doc
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import pathlib
import importlib
from collections import defaultdict
import azure.storage.blob as azureblob
from azure.storage.blob.models import ContainerPermissions
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batch_auth
import azure.batch.models as models
from SparseSC.cli.stt import get_config
from ..print_progress import print_progress
from .BatchConfig import BatchConfig, validate_config
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from .constants import (
_STANDARD_OUT_FILE_NAME,
_CONTAINER_OUTPUT_FILE,
_CONTAINER_INPUT_FILE,
_BATCH_CV_FILE_NAME,
)
FOLD_FILE_PATTERN = "fold_{}.yaml"
# pylint: disable=bad-continuation, invalid-name, protected-access, line-too-long, fixme
sys.path.append(".")
sys.path.append("..")
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def build_output_sas_url(config, _blob_client):
"""
build a sas token for the output container
"""
sas_token = _blob_client.generate_container_shared_access_signature(
config.CONTAINER_NAME,
ContainerPermissions.READ
+ ContainerPermissions.WRITE
+ ContainerPermissions.DELETE
+ ContainerPermissions.LIST,
datetime.datetime.utcnow() + datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS),
start=datetime.datetime.utcnow(),
)
_sas_url = "https://{}.blob.core.windows.net/{}?{}".format(
config.STORAGE_ACCOUNT_NAME, config.CONTAINER_NAME, sas_token
)
return _sas_url
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print("-------------------------------------------")
print("Exception encountered:")
if (
batch_exception.error
and batch_exception.error.message
and batch_exception.error.message.value
):
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print("{}:\t{}".format(mesg.key, mesg.value))
print("-------------------------------------------")
def build_output_file(container_sas_url, fold_number):
"""
Uploads a local file to an Azure Blob storage container.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
# where to store the outputs
container_dest = models.OutputFileBlobContainerDestination(
container_url=container_sas_url, path=FOLD_FILE_PATTERN.format(fold_number)
)
dest = models.OutputFileDestination(container=container_dest)
# under what conditions should you attempt to extract the outputs?
upload_options = models.OutputFileUploadOptions(
upload_condition=models.OutputFileUploadCondition.task_success
)
# https://docs.microsoft.com/en-us/azure/batch/batch-task-output-files#specify-output-files-for-task-output
return models.OutputFile(
file_pattern=_CONTAINER_OUTPUT_FILE,
destination=dest,
upload_options=upload_options,
)
def upload_file_to_container(block_blob_client, container_name, file_path, duration_hours=24):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print("Uploading file {} to container [{}]...".format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name, blob_name, file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=duration_hours),
)
sas_url = block_blob_client.make_blob_url(
container_name, blob_name, sas_token=sas_token
)
return models.ResourceFile(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)
def create_pool(config, batch_service_client):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
image_ref_to_use = models.ImageReference(
publisher="microsoft-azure-batch",
offer="ubuntu-server-container",
sku="16-04-lts",
version="latest",
)
if config.REGISTRY_USERNAME:
registry = batch.models.ContainerRegistry(
user_name=config.REGISTRY_USERNAME,
password=config.REGISTRY_PASSWORD,
registry_server=config.REGISTRY_SERVER,
)
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER],
container_registries=[registry],
)
else:
container_conf = batch.models.ContainerConfiguration(
container_image_names=[config.DOCKER_CONTAINER]
)
new_pool = batch.models.PoolAddParameter(
id=config.POOL_ID,
virtual_machine_configuration=batch.models.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
container_configuration=container_conf,
node_agent_sku_id="batch.node.ubuntu 16.04",
),
vm_size=config.POOL_VM_SIZE,
target_dedicated_nodes=config.POOL_NODE_COUNT,
target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT,
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print("Creating job [{}]...".format(job_id))
job_description = batch.models.JobAddParameter(
id=job_id, pool_info=batch.models.PoolInformation(pool_id=pool_id)
)
batch_service_client.job.add(job_description)
def add_tasks(
config,
_blob_client,
batch_service_client,
container_sas_url,
job_id,
_input_file,
count,
):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: The input files
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print("Adding {} tasks to job [{}]...".format(count, job_id))
tasks = list()
for fold_number in range(count):
output_file = build_output_file(container_sas_url, fold_number)
# command_line = '/bin/bash -c \'echo "Hello World" && echo "hello: world" > output.yaml\''
command_line = "/bin/bash -c 'stt {} {} {}'".format(
_CONTAINER_INPUT_FILE, _CONTAINER_OUTPUT_FILE, fold_number
)
task_container_settings = models.TaskContainerSettings(
image_name=config.DOCKER_CONTAINER
)
tasks.append(
batch.models.TaskAddParameter(
id="Task_{}".format(fold_number),
command_line=command_line,
resource_files=[_input_file],
output_files=[output_file],
container_settings=task_container_settings,
)
)
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
_start_time = datetime.datetime.now()
timeout_expiration = _start_time + timeout
# print( "Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end="",)
while datetime.datetime.now() < timeout_expiration:
sys.stdout.flush()
tasks = [t for t in batch_service_client.task.list(job_id)]
incomplete_tasks = [
task for task in tasks if task.state != models.TaskState.completed
]
hours, remainder = divmod((datetime.datetime.now() - _start_time).seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print_progress(
len(tasks) - len(incomplete_tasks),
len(tasks),
prefix="Time elapsed {:02}:{:02}:{:02}".format(
int(hours), int(minutes), int(seconds)
),
decimals=1,
bar_length=min(len(tasks), 50),
)
error_codes = [t.execution_info.exit_code for t in tasks if t.execution_info and t.execution_info.exit_code ]
if error_codes:
codes = defaultdict(lambda : 0)
for cd in error_codes:
codes[cd] +=1
# import pdb; pdb.set_trace()
raise RuntimeError( "\nSome tasks have exited with a non-zero exit code including: " + ", ".join([ "{}({})".format(k,v) for k, v in codes.items() ] ))
if not incomplete_tasks:
print()
return True
time.sleep(1)
print()
raise RuntimeError(
"ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout)
)
def print_task_output(batch_service_client, job_id, encoding=None):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print("Printing task output...")
tasks = batch_service_client.task.list(job_id)
for task in tasks:
node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, _STANDARD_OUT_FILE_NAME
)
file_text = _read_stream_as_string(stream, encoding)
print("Standard output:")
print(file_text)
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = "utf-8"
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError("could not write data to stream or decode bytes")
def _download_files(config, _blob_client, out_path, count):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
for i in range(count):
blob_name = FOLD_FILE_PATTERN.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
_blob_client.get_blob_to_path(config.CONTAINER_NAME, blob_name, out_path)
def _download_results(config, _blob_client, out_path, count, ptrn=FOLD_FILE_PATTERN):
pathlib.Path(config.BATCH_DIRECTORY).mkdir(parents=True, exist_ok=True)
blob_names = [b.name for b in _blob_client.list_blobs(config.CONTAINER_NAME)]
results = []
for i in range(count):
blob_name = ptrn.format(i)
if not blob_name in blob_names:
raise RuntimeError("incomplete blob set: missing blob {}".format(blob_name))
out_path = os.path.join(config.BATCH_DIRECTORY, blob_name)
with _blob_client.get_blob_to_stream(
config.CONTAINER_NAME, blob_name, out_path
) as blob:
results[i] = load(blob, Loader=Loader)
return results
def run(config: BatchConfig, wait=True) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:param boolean wait: If true, wait for the batch to complete and then
download the results to file
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print(
'Synthetic Controls Run "{}" start time: {}'.format(config.JOB_ID, start_time)
)
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
blob_client.create_container(config.CONTAINER_NAME, fail_on_exist=False)
CONTAINER_SAS_URL = build_output_sas_url(config, blob_client)
# The collection of data files that are to be processed by the tasks.
input_file_path = os.path.join(sys.path[0], _LOCAL_INPUT_FILE)
# Upload the data files.
input_file = upload_file_to_container(
blob_client, config.CONTAINER_NAME, input_file_path, config.STORAGE_ACCESS_DURATION_HRS
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
try:
create_pool(config, batch_client)
print("Created pool: ", config.POOL_ID)
except models.BatchErrorException:
print("Using pool: ", config.POOL_ID)
# Create the job that will run the tasks.
create_job(batch_client, config.JOB_ID, config.POOL_ID)
# Add the tasks to the job.
add_tasks(
config,
blob_client,
batch_client,
CONTAINER_SAS_URL,
config.JOB_ID,
input_file,
n_folds,
)
if not wait:
return
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
def load_results(config: BatchConfig) -> None:
r"""
:param config: A :class:`BatchConfig` instance with the Azure Batch run parameters
:type config: :class:BatchConfig
:raises BatchErrorException: If raised by the Azure Batch Python SDK
"""
# pylint: disable=too-many-locals
# replace any missing values in the configuration with environment variables
config = validate_config(config)
start_time = datetime.datetime.now().replace(microsecond=0)
print('Load result for job "{}" start time: {}'.format(config.JOB_ID, start_time))
print()
_LOCAL_INPUT_FILE = os.path.join(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)
v_pen, w_pen, model_data = get_config(_LOCAL_INPUT_FILE)
n_folds = len(model_data["folds"]) * len(v_pen) * len(w_pen)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config.STORAGE_ACCOUNT_NAME, account_key=config.STORAGE_ACCOUNT_KEY
)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(
config.BATCH_ACCOUNT_NAME, config.BATCH_ACCOUNT_KEY
)
batch_client = batch.BatchServiceClient(
credentials, batch_url=config.BATCH_ACCOUNT_URL
)
try:
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(
batch_client, config.JOB_ID, datetime.timedelta(hours=config.STORAGE_ACCESS_DURATION_HRS)
)
_download_files(config, blob_client, config.BATCH_DIRECTORY, n_folds)
except models.BatchErrorException as err:
print_batch_exception(err)
raise err
# Clean up storage resources
# TODO: re-enable this and delete the output container too
# -- print("Deleting container [{}]...".format(input_container_name))
# -- blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print("Sample end: {}".format(end_time))
print("Elapsed time: {}".format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
if config.DELETE_POOL_WHEN_DONE:
batch_client.pool.delete(config.POOL_ID)
if config.DELETE_JOB_WHEN_DONE:
batch_client.job.delete(config.JOB_ID)
if __name__ == "__main__":
# TODO: this is not an ideal API
config_module = importlib.__import__("config")
run(config_module.config)
| [
"importlib.__import__",
"azure.batch.models.ImageReference",
"io.BytesIO",
"azure.batch.models.OutputFileDestination",
"time.sleep",
"yaml.load",
"datetime.timedelta",
"sys.path.append",
"azure.batch.models.OutputFileUploadOptions",
"pathlib.Path",
"azure.batch.batch_service_client.models.ContainerRegistry",
"sys.stdout.flush",
"azure.batch.models.OutputFile",
"SparseSC.cli.stt.get_config",
"azure.batch.batch_service_client.models.ContainerConfiguration",
"azure.batch.batch_service_client.models.PoolInformation",
"azure.storage.blob.BlockBlobService",
"azure.batch.batch_service_client.models.VirtualMachineConfiguration",
"azure.batch.models.ResourceFile",
"datetime.datetime.utcnow",
"os.path.join",
"datetime.datetime.now",
"azure.batch.batch_auth.SharedKeyCredentials",
"collections.defaultdict",
"os.path.basename",
"azure.batch.batch_service_client.BatchServiceClient",
"azure.batch.models.TaskContainerSettings"
]
| [((2092, 2112), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (2107, 2112), False, 'import sys\n'), ((2113, 2134), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (2128, 2134), False, 'import sys\n'), ((4142, 4196), 'azure.batch.models.OutputFileDestination', 'models.OutputFileDestination', ([], {'container': 'container_dest'}), '(container=container_dest)\n', (4170, 4196), True, 'import azure.batch.models as models\n'), ((4290, 4389), 'azure.batch.models.OutputFileUploadOptions', 'models.OutputFileUploadOptions', ([], {'upload_condition': 'models.OutputFileUploadCondition.task_success'}), '(upload_condition=models.\n OutputFileUploadCondition.task_success)\n', (4320, 4389), True, 'import azure.batch.models as models\n'), ((4523, 4630), 'azure.batch.models.OutputFile', 'models.OutputFile', ([], {'file_pattern': '_CONTAINER_OUTPUT_FILE', 'destination': 'dest', 'upload_options': 'upload_options'}), '(file_pattern=_CONTAINER_OUTPUT_FILE, destination=dest,\n upload_options=upload_options)\n', (4540, 4630), True, 'import azure.batch.models as models\n'), ((5234, 5261), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (5250, 5261), False, 'import os\n'), ((5813, 5883), 'azure.batch.models.ResourceFile', 'models.ResourceFile', ([], {'http_url': 'sas_url', 'file_path': '_CONTAINER_INPUT_FILE'}), '(http_url=sas_url, file_path=_CONTAINER_INPUT_FILE)\n', (5832, 5883), True, 'import azure.batch.models as models\n'), ((6602, 6731), 'azure.batch.models.ImageReference', 'models.ImageReference', ([], {'publisher': '"""microsoft-azure-batch"""', 'offer': '"""ubuntu-server-container"""', 'sku': '"""16-04-lts"""', 'version': '"""latest"""'}), "(publisher='microsoft-azure-batch', offer=\n 'ubuntu-server-container', sku='16-04-lts', version='latest')\n", (6623, 6731), True, 'import azure.batch.models as models\n'), ((10582, 10605), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10603, 10605), False, 'import datetime\n'), ((13290, 13302), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (13300, 13302), False, 'import io\n'), ((15624, 15681), 'os.path.join', 'os.path.join', (['config.BATCH_DIRECTORY', '_BATCH_CV_FILE_NAME'], {}), '(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)\n', (15636, 15681), False, 'import os\n'), ((15714, 15743), 'SparseSC.cli.stt.get_config', 'get_config', (['_LOCAL_INPUT_FILE'], {}), '(_LOCAL_INPUT_FILE)\n', (15724, 15743), False, 'from SparseSC.cli.stt import get_config\n'), ((15959, 16071), 'azure.storage.blob.BlockBlobService', 'azureblob.BlockBlobService', ([], {'account_name': 'config.STORAGE_ACCOUNT_NAME', 'account_key': 'config.STORAGE_ACCOUNT_KEY'}), '(account_name=config.STORAGE_ACCOUNT_NAME,\n account_key=config.STORAGE_ACCOUNT_KEY)\n', (15985, 16071), True, 'import azure.storage.blob as azureblob\n'), ((16422, 16466), 'os.path.join', 'os.path.join', (['sys.path[0]', '_LOCAL_INPUT_FILE'], {}), '(sys.path[0], _LOCAL_INPUT_FILE)\n', (16434, 16466), False, 'import os\n'), ((16775, 16864), 'azure.batch.batch_auth.SharedKeyCredentials', 'batch_auth.SharedKeyCredentials', (['config.BATCH_ACCOUNT_NAME', 'config.BATCH_ACCOUNT_KEY'], {}), '(config.BATCH_ACCOUNT_NAME, config.\n BATCH_ACCOUNT_KEY)\n', (16806, 16864), True, 'import azure.batch.batch_auth as batch_auth\n'), ((16894, 16967), 'azure.batch.batch_service_client.BatchServiceClient', 'batch.BatchServiceClient', (['credentials'], {'batch_url': 'config.BATCH_ACCOUNT_URL'}), '(credentials, batch_url=config.BATCH_ACCOUNT_URL)\n', (16918, 16967), True, 'import azure.batch.batch_service_client as batch\n'), ((19382, 19439), 'os.path.join', 'os.path.join', (['config.BATCH_DIRECTORY', '_BATCH_CV_FILE_NAME'], {}), '(config.BATCH_DIRECTORY, _BATCH_CV_FILE_NAME)\n', (19394, 19439), False, 'import os\n'), ((19472, 19501), 'SparseSC.cli.stt.get_config', 'get_config', (['_LOCAL_INPUT_FILE'], {}), '(_LOCAL_INPUT_FILE)\n', (19482, 19501), False, 'from SparseSC.cli.stt import get_config\n'), ((19717, 19829), 'azure.storage.blob.BlockBlobService', 'azureblob.BlockBlobService', ([], {'account_name': 'config.STORAGE_ACCOUNT_NAME', 'account_key': 'config.STORAGE_ACCOUNT_KEY'}), '(account_name=config.STORAGE_ACCOUNT_NAME,\n account_key=config.STORAGE_ACCOUNT_KEY)\n', (19743, 19829), True, 'import azure.storage.blob as azureblob\n'), ((19973, 20062), 'azure.batch.batch_auth.SharedKeyCredentials', 'batch_auth.SharedKeyCredentials', (['config.BATCH_ACCOUNT_NAME', 'config.BATCH_ACCOUNT_KEY'], {}), '(config.BATCH_ACCOUNT_NAME, config.\n BATCH_ACCOUNT_KEY)\n', (20004, 20062), True, 'import azure.batch.batch_auth as batch_auth\n'), ((20092, 20165), 'azure.batch.batch_service_client.BatchServiceClient', 'batch.BatchServiceClient', (['credentials'], {'batch_url': 'config.BATCH_ACCOUNT_URL'}), '(credentials, batch_url=config.BATCH_ACCOUNT_URL)\n', (20116, 20165), True, 'import azure.batch.batch_service_client as batch\n'), ((21356, 21386), 'importlib.__import__', 'importlib.__import__', (['"""config"""'], {}), "('config')\n", (21376, 21386), False, 'import importlib\n'), ((6819, 6965), 'azure.batch.batch_service_client.models.ContainerRegistry', 'batch.models.ContainerRegistry', ([], {'user_name': 'config.REGISTRY_USERNAME', 'password': 'config.REGISTRY_PASSWORD', 'registry_server': 'config.REGISTRY_SERVER'}), '(user_name=config.REGISTRY_USERNAME, password\n =config.REGISTRY_PASSWORD, registry_server=config.REGISTRY_SERVER)\n', (6849, 6965), True, 'import azure.batch.batch_service_client as batch\n'), ((7033, 7155), 'azure.batch.batch_service_client.models.ContainerConfiguration', 'batch.models.ContainerConfiguration', ([], {'container_image_names': '[config.DOCKER_CONTAINER]', 'container_registries': '[registry]'}), '(container_image_names=[config.\n DOCKER_CONTAINER], container_registries=[registry])\n', (7068, 7155), True, 'import azure.batch.batch_service_client as batch\n'), ((7221, 7310), 'azure.batch.batch_service_client.models.ContainerConfiguration', 'batch.models.ContainerConfiguration', ([], {'container_image_names': '[config.DOCKER_CONTAINER]'}), '(container_image_names=[config.\n DOCKER_CONTAINER])\n', (7256, 7310), True, 'import azure.batch.batch_service_client as batch\n'), ((9521, 9585), 'azure.batch.models.TaskContainerSettings', 'models.TaskContainerSettings', ([], {'image_name': 'config.DOCKER_CONTAINER'}), '(image_name=config.DOCKER_CONTAINER)\n', (9549, 9585), True, 'import azure.batch.models as models\n'), ((10767, 10790), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10788, 10790), False, 'import datetime\n'), ((10821, 10839), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10837, 10839), False, 'import sys\n'), ((12013, 12026), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12023, 12026), False, 'import time\n'), ((14035, 14082), 'os.path.join', 'os.path.join', (['config.BATCH_DIRECTORY', 'blob_name'], {}), '(config.BATCH_DIRECTORY, blob_name)\n', (14047, 14082), False, 'import os\n'), ((14640, 14687), 'os.path.join', 'os.path.join', (['config.BATCH_DIRECTORY', 'blob_name'], {}), '(config.BATCH_DIRECTORY, blob_name)\n', (14652, 14687), False, 'import os\n'), ((2712, 2738), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2736, 2738), False, 'import datetime\n'), ((2741, 2801), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'config.STORAGE_ACCESS_DURATION_HRS'}), '(hours=config.STORAGE_ACCESS_DURATION_HRS)\n', (2759, 2801), False, 'import datetime\n'), ((2817, 2843), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2841, 2843), False, 'import datetime\n'), ((7440, 7608), 'azure.batch.batch_service_client.models.VirtualMachineConfiguration', 'batch.models.VirtualMachineConfiguration', ([], {'image_reference': 'image_ref_to_use', 'container_configuration': 'container_conf', 'node_agent_sku_id': '"""batch.node.ubuntu 16.04"""'}), "(image_reference=image_ref_to_use,\n container_configuration=container_conf, node_agent_sku_id=\n 'batch.node.ubuntu 16.04')\n", (7480, 7608), True, 'import azure.batch.batch_service_client as batch\n'), ((8353, 8398), 'azure.batch.batch_service_client.models.PoolInformation', 'batch.models.PoolInformation', ([], {'pool_id': 'pool_id'}), '(pool_id=pool_id)\n', (8381, 8398), True, 'import azure.batch.batch_service_client as batch\n'), ((11633, 11656), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (11644, 11656), False, 'from collections import defaultdict\n'), ((13657, 13693), 'pathlib.Path', 'pathlib.Path', (['config.BATCH_DIRECTORY'], {}), '(config.BATCH_DIRECTORY)\n', (13669, 13693), False, 'import pathlib\n'), ((14258, 14294), 'pathlib.Path', 'pathlib.Path', (['config.BATCH_DIRECTORY'], {}), '(config.BATCH_DIRECTORY)\n', (14270, 14294), False, 'import pathlib\n'), ((14833, 14858), 'yaml.load', 'load', (['blob'], {'Loader': 'Loader'}), '(blob, Loader=Loader)\n', (14837, 14858), False, 'from yaml import load\n'), ((15435, 15458), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15456, 15458), False, 'import datetime\n'), ((17832, 17892), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'config.STORAGE_ACCESS_DURATION_HRS'}), '(hours=config.STORAGE_ACCESS_DURATION_HRS)\n', (17850, 17892), False, 'import datetime\n'), ((18370, 18393), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18391, 18393), False, 'import datetime\n'), ((19211, 19234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19232, 19234), False, 'import datetime\n'), ((20329, 20389), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'config.STORAGE_ACCESS_DURATION_HRS'}), '(hours=config.STORAGE_ACCESS_DURATION_HRS)\n', (20347, 20389), False, 'import datetime\n'), ((20867, 20890), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20888, 20890), False, 'import datetime\n'), ((5615, 5641), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5639, 5641), False, 'import datetime\n'), ((5644, 5684), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'duration_hours'}), '(hours=duration_hours)\n', (5662, 5684), False, 'import datetime\n'), ((11063, 11086), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11084, 11086), False, 'import datetime\n')] |
from flask import Flask, current_app
from flask import render_template
from flask import jsonify
from jieba.analyse import extract_tags
import string
from DB import chinaSQL
from DB import worldSQL
app = Flask(__name__, template_folder='../../web', static_folder='../../static')
@app.route('/', methods=["get", "post"])
def hello_world():
return render_template("china.html")
@app.route('/china', methods=["get", "post"])
def china():
return render_template("china.html")
@app.route('/world', methods=["get", "post"])
def world():
return render_template("world.html")
@app.route('/favicon.ico')
def favicon():
return current_app.send_static_file('image/favicon-32x32-sun.ico')
@app.route("/time")
def time():
data = chinaSQL.time()
return str(data[0])
@app.route("/chinaEightNumber")
def chinaEightNumber():
data = chinaSQL.chinaEightNumber()
return jsonify({"confirmTotal": data[0],
"healTotal": data[1],
"deadTotal": data[2],
"nowConfirmTotal": data[3],
"suspectTotal": data[4],
"nowSevereTotal": data[5],
"importedCaseTotal": data[6],
"noInfectTotal": data[7],
"confirmAdd": data[8],
"healAdd": data[9],
"deadAdd": data[10],
"nowConfirmAdd": data[11],
"suspectAdd": data[12],
"nowSevereAdd": data[13],
"importedCaseAdd": data[14],
"noInfectAdd": data[15]
})
@app.route('/chinaMap', methods=['GET'])
def chinaMap():
data = chinaSQL.chinaMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a, "value": b})
nowConfirmTotal.append({"name": a, "value": c})
confirmTotal.append({"name": a, "value": d})
healTotal.append({"name": a, "value": e})
deadTotal.append({"name": a, "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route('/chinaProvinceMap', methods=['GET'])
def chinaProvinceMap():
data = chinaSQL.chinaProvinceMap()
confirmToday, nowConfirmTotal, confirmTotal, healTotal, deadTotal = [], [], [], [], []
for a, b, c, d, e, f in data:
confirmToday.append({"name": a + "市", "value": b})
nowConfirmTotal.append({"name": a + "市", "value": c})
confirmTotal.append({"name": a + "市", "value": d})
healTotal.append({"name": a + "市", "value": e})
deadTotal.append({"name": a + "市", "value": f})
return jsonify({"confirmToday": confirmToday, "nowConfirmTotal": nowConfirmTotal,
"confirmTotal": confirmTotal, "healTotal": healTotal, "deadTotal": deadTotal})
@app.route("/nationalTotal")
def nationalTotal():
data = chinaSQL.nationalTotal()
day, \
confirmChinaDayList, \
healChinaDayList, \
deadChinaDayList, \
importedCaseChinaDayList = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirmChinaDayList.append(b)
healChinaDayList.append(c)
deadChinaDayList.append(d)
importedCaseChinaDayList.append(e)
return jsonify({"day": day,
"confirmChinaDayList": confirmChinaDayList,
"healChinaDayList": healChinaDayList,
"deadChinaDayList": deadChinaDayList,
"importedCaseChinaDayList": importedCaseChinaDayList
})
@app.route("/dailyAdditionsNationwide")
def dailyAdditionsNationwide():
data = chinaSQL.dailyAdditionsNationwide()
day, \
confirmChinaDayAddList, \
healChinaDayAddList, \
deadChinaDayAddList, \
importedCaseChinaDayAddList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
confirmChinaDayAddList.append(b)
healChinaDayAddList.append(c)
deadChinaDayAddList.append(d)
importedCaseChinaDayAddList.append(e)
return jsonify({"day": day,
"confirmChinaDayAddList": confirmChinaDayAddList,
"healChinaDayAddList": healChinaDayAddList,
"deadChinaDayAddList": deadChinaDayAddList,
"importedCaseChinaDayAddList": importedCaseChinaDayAddList
})
@app.route("/dailyCasesNationwide")
def dailyCasesNationwide():
data = chinaSQL.dailyCasesNationwide()
day, \
suspectChinaDayList, \
noInfectChinaDayList, \
nowConfirmChinaDayList, \
nowSevereChinaDayList = [], [], [], [], []
for a, b, c, d, e in data[7:]:
day.append(a.strftime("%m-%d"))
suspectChinaDayList.append(b)
noInfectChinaDayList.append(c)
nowConfirmChinaDayList.append(d)
nowSevereChinaDayList.append(e)
return jsonify({"day": day,
"suspectChinaDayList": suspectChinaDayList,
"noInfectChinaDayList": noInfectChinaDayList,
"nowConfirmChinaDayList": nowConfirmChinaDayList,
"nowSevereChinaDayList": nowSevereChinaDayList
})
@app.route("/nationalCumulativeCureMortalityRate")
def nationalCumulativeCureMortalityRate():
data = chinaSQL.nationalCumulativeCureMortalityRate()
day, \
healRateChinaDayList, \
deadRateChinaDayList = [], [], []
for a, b, c in data[7:]:
day.append(a.strftime("%m-%d"))
healRateChinaDayList.append(b)
deadRateChinaDayList.append(c)
return jsonify({"day": day,
"healRateChinaDayList": healRateChinaDayList,
"deadRateChinaDayList": deadRateChinaDayList
})
@app.route("/detailedDataByProvince")
def detailedDataByProvince():
data = chinaSQL.detailedDataByProvince()
provinceName, \
confirmTotal, \
healTotal, \
deadTotal, \
healRateTotal, \
deadRateTotal = [], [], [], [], [], []
for a, b, c, d, e, f in data:
provinceName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
healRateTotal.append(e)
deadRateTotal.append(f)
return jsonify({"provinceName": provinceName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal,
"healRateTotal": healRateTotal,
"deadRateTotal": deadRateTotal
})
@app.route("/cumulativeNumberOfConfirmedCasesInAllProvinces")
def cumulativeNumberOfConfirmedCasesInAllProvinces():
data = chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces()
provincedetails = []
for provinceName, confirmTotal in data:
provincedetails.append({"name": provinceName, "value": confirmTotal})
return jsonify({"data": provincedetails})
@app.route("/currentConfirmedDataInAllProvinces")
def currentConfirmedDataInAllProvinces():
data = chinaSQL.currentConfirmedDataInAllProvinces()
provinceName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
provinceName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"provinceName": provinceName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/existingDiagnosticClassificationInChina")
def existingDiagnosticClassificationInChina():
data = chinaSQL.existingDiagnosticClassificationInChina()
nowconfirmstatis = []
nowconfirmstatis.append({"name": '港澳台现存确诊', "value": data[0][0]})
nowconfirmstatis.append({"name": '境外输入现存确诊', "value": data[0][1]})
nowconfirmstatis.append({"name": '31省本土现有确诊', "value": data[0][2]})
return jsonify({"data": nowconfirmstatis})
@app.route("/totalNumberOfOverseasImportsFromTop10Provinces")
def totalNumberOfOverseasImportsFromTop10Provinces():
data = chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces()
importstatis = []
for province, importedCase in data:
importstatis.append({"name": province, "value": importedCase})
return jsonify({"data": importstatis})
@app.route("/eachProvinceComparesYesterdayData")
def eachProvinceComparesYesterdayData():
data = chinaSQL.eachProvinceComparesYesterdayData()
province, \
nowConfirm, \
confirmAdd, \
heal, \
dead, \
zero = [], [], [], [], [], []
for a, b, c, d, e, f in data:
province.append(a)
nowConfirm.append(b)
confirmAdd.append(c)
heal.append(d)
dead.append(e)
zero.append(f)
return jsonify({"province": province,
"nowConfirm": nowConfirm,
"confirmAdd": confirmAdd,
"heal": heal,
"dead": dead,
"zero": zero
})
@app.route("/hubeiNonHubeiNationalCumulativeData")
def hubeiNonHubeiNationalCumulativeData():
data = chinaSQL.hubeiNonHubeiNationalCumulativeData()
day, \
hubeiNowConfirm, \
hubeiHeal, \
hubeiDead, \
notHubeiNowConfirm, \
notHubeiHeal, \
notHubeiDead, \
countryNowConfirm, \
countryHeal, \
countryDead = [], [], [], [], [], [], [], [], [], []
for a, b, c, d, e, f, g, h, i, j in data:
day.append(a.strftime("%m-%d"))
hubeiNowConfirm.append(b)
hubeiHeal.append(c)
hubeiDead.append(d)
notHubeiNowConfirm.append(e)
notHubeiHeal.append(f)
notHubeiDead.append(g)
countryNowConfirm.append(h)
countryHeal.append(i)
countryDead.append(j)
return jsonify({"day": day,
"hubeiNowConfirm": hubeiNowConfirm,
"hubeiHeal": hubeiHeal,
"hubeiDead": hubeiDead,
"notHubeiNowConfirm": notHubeiNowConfirm,
"notHubeiHeal": notHubeiHeal,
"notHubeiDead": notHubeiDead,
"countryNowConfirm": countryNowConfirm,
"countryHeal": countryHeal,
"countryDead": countryDead
})
@app.route("/hubeiNonHubeiNationalCureMortalityRate")
def hubeiNonHubeiNationalCureMortalityRate():
data = chinaSQL.hubeiNonHubeiNationalCureMortalityRate()
day, \
hubeiHealRate, \
hubeiDeadRate, \
notHubeiHealRate, \
notHubeiDeadRate, \
countryHealRate, \
countryDeadRate = [], [], [], [], [], [], []
for a, b, c, d, e, f, g in data:
day.append(a.strftime("%m-%d"))
hubeiHealRate.append(b)
hubeiDeadRate.append(c)
notHubeiHealRate.append(d)
notHubeiDeadRate.append(e)
countryHealRate.append(f)
countryDeadRate.append(g)
return jsonify({"day": day,
"hubeiHealRate": hubeiHealRate,
"hubeiDeadRate": hubeiDeadRate,
"notHubeiHealRate": notHubeiHealRate,
"notHubeiDeadRate": notHubeiDeadRate,
"countryHealRate": countryHealRate,
"countryDeadRate": countryDeadRate
})
@app.route("/hubeiNonHubeiNationalDailyNew")
def hubeiNonHubeiNationalDailyNew():
data = chinaSQL.hubeiNonHubeiNationalDailyNew()
day, \
hubei, \
notHubei, \
country = [], [], [], []
for a, b, c, d in data[7:]:
day.append(a.strftime("%m-%d"))
hubei.append(b)
notHubei.append(c)
country.append(d)
return jsonify({"day": day,
"hubei": hubei,
"notHubei": notHubei,
"country": country
})
@app.route("/wuhanNotWuhanNotHubeiNewlyConfirmed")
def wuhanNotWuhanNotHubeiNewlyConfirmed():
data = chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed()
day, \
wuhan, \
notWuhan, \
notHubei = [], [], [], []
for a, b, c, d in data:
day.append(a.strftime("%m-%d"))
wuhan.append(b)
notWuhan.append(c)
notHubei.append(d)
return jsonify({"day": day,
"wuhan": wuhan,
"notWuhan": notWuhan,
"notHubei": notHubei
})
@app.route("/totalConfirmedTop20UrbanAreas")
def totalConfirmedTop20UrbanAreas():
data = chinaSQL.totalConfirmedTop20UrbanAreas()
cityName, \
deadRateTotal, \
healRateTotal = [], [], []
for a, b, c in data:
cityName.append(a)
deadRateTotal.append(b)
healRateTotal.append(c)
return jsonify({"cityName": cityName,
"deadRateTotal": deadRateTotal,
"healRateTotal": healRateTotal
})
@app.route("/existingConfirmedTop20UrbanAreas")
def existingConfirmedTop20UrbanAreas():
data = chinaSQL.existingConfirmedTop20UrbanAreas()
cityName, \
nowConfirmTotal, \
confirmToday, \
suspectTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
nowConfirmTotal.append(b)
confirmToday.append(c)
suspectTotal.append(d)
return jsonify({"cityName": cityName,
"nowConfirmTotal": nowConfirmTotal,
"confirmToday": confirmToday,
"suspectTotal": suspectTotal
})
@app.route("/urbanDataOfHubeiProvince")
def urbanDataOfHubeiProvince():
data = chinaSQL.urbanDataOfHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/accumulativeDataExceptHubeiProvince")
def accumulativeDataExceptHubeiProvince():
data = chinaSQL.accumulativeDataExceptHubeiProvince()
cityName, \
confirmTotal, \
healTotal, \
deadTotal = [], [], [], []
for a, b, c, d in data:
cityName.append(a)
confirmTotal.append(b)
healTotal.append(c)
deadTotal.append(d)
return jsonify({"cityName": cityName,
"confirmTotal": confirmTotal,
"healTotal": healTotal,
"deadTotal": deadTotal
})
@app.route("/provincesWithFatalCasesNationwide")
def provincesWithFatalCasesNationwide():
data = chinaSQL.provincesWithFatalCasesNationwide()
provincedetails = []
provincedetails.append({"name": "无死亡病例省份数量", "value": data[0][0]})
provincedetails.append({"name": "有死亡病例省份数量", "value": data[0][1]})
return jsonify({"data": provincedetails})
@app.route("/numberOfDeathsInCities")
def numberOfDeathsInCities():
data = chinaSQL.numberOfDeathsInCities()
dataCityCount = []
dataCityCount.append({"name": "无死亡病例城市数量", "value": data[0][0]})
dataCityCount.append({"name": "有死亡病例城市数量", "value": data[0][1]})
return jsonify({"data": dataCityCount})
@app.route("/outbreakOut")
def outbreakOut():
data = chinaSQL.outbreakOut()
d = []
for i in data:
k = i[0].rstrip(string.digits)
v = i[0][len(k):]
ks = extract_tags(k)
for j in ks:
if not j.isdigit():
d.append({"name": j, "value": v})
return jsonify({"kws": d})
@app.route("/worldFourNumber")
def worldFourNumber():
data = worldSQL.worldFourNumber()
return jsonify({"nowConfirm": data[0],
"confirm": data[1],
"heal": data[2],
"dead": data[3],
"nowConfirmAdd": data[4],
"confirmAdd": data[5],
"healAdd": data[6],
"deadAdd": data[7]
})
@app.route('/worldMapNoChina', methods=['GET'])
def worldMapNoChina():
data = worldSQL.worldMapNoChina()
nowConfirm, confirm, heal, dead = [], [], [], []
for a, b, c, d, e in data:
nowConfirm.append({"name": a, "value": b})
confirm.append({"name": a, "value": c})
heal.append({"name": a, "value": d})
dead.append({"name": a, "value": e})
data1 = worldSQL.worldMapChina()
nowConfirm.append({"name": "中国", "value": data1[0][0]})
confirm.append({"name": "中国", "value": data1[0][1]})
heal.append({"name": "中国", "value": data1[0][2]})
dead.append({"name": "中国", "value": data1[0][3]})
return jsonify({"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/globalCumulativeTrend")
def globalCumulativeTrend():
data = worldSQL.globalCumulativeTrend()
day, \
confirm, \
heal, \
dead, \
newAddConfirm = [], [], [], [], []
for a, b, c, d, e in data:
day.append(a.strftime("%m-%d"))
confirm.append(b)
heal.append(c)
dead.append(d)
newAddConfirm.append(e)
return jsonify({"day": day,
"confirm": confirm,
"heal": heal,
"dead": dead,
"newAddConfirm": newAddConfirm
})
@app.route("/globalCumulativeCureMortality")
def globalCumulativeCureMortality():
data = worldSQL.globalCumulativeCureMortality()
day, \
healRate, \
deadRate = [], [], []
for a, b, c in data:
day.append(a.strftime("%m-%d"))
healRate.append(b)
deadRate.append(c)
return jsonify({"day": day,
"healRate": healRate,
"deadRate": deadRate
})
@app.route("/foreignCumulativeDiagnosisTop10Countries")
def foreignCumulativeDiagnosisTop10Countries():
data = worldSQL.foreignCumulativeDiagnosisTop10Countries()
name, \
nowConfirm, \
confirm, \
heal, \
dead = [], [], [], [], []
for a, b, c, d, e in data:
name.append(a)
nowConfirm.append(b)
confirm.append(c)
heal.append(d)
dead.append(e)
return jsonify({"name": name,
"nowConfirm": nowConfirm,
"confirm": confirm,
"heal": heal,
"dead": dead
})
@app.route("/theTop10CountriesGrewFastestInSevenDays")
def theTop10CountriesGrewFastestInSevenDays():
data = worldSQL.theTop10CountriesGrewFastestInSevenDays()
nation, \
day7, \
day, \
rate = [], [], [], []
for a, b, c, d in data:
nation.append(a)
day7.append(b)
day.append(c)
rate.append(d)
return jsonify({"nation": nation,
"day7": day7,
"day0": day,
"rate": rate
})
@app.route("/overseasCountriesWithMoreThan10000ConfirmedCases")
def overseasCountriesWithMoreThan10000ConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases()
foreignlist = []
for name, confirm in data:
foreignlist.append({"name": name, "value": confirm})
return jsonify({"data": foreignlist})
@app.route("/overseasCountriesWithMoreThan10000HaveBeenConfirmedCases")
def overseasCountriesWithMoreThan10000HaveBeenConfirmedCases():
data = worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases()
foreignlist = []
for name, nowConfirm in data:
foreignlist.append({"name": name, "value": nowConfirm})
return jsonify({"data": foreignlist})
@app.route("/newCasesInTheTop10CountriesWithin24Hours")
def newCasesInTheTop10CountriesWithin24Hours():
data = worldSQL.newCasesInTheTop10CountriesWithin24Hours()
nationAddConfirm = []
for nation, addConfirm in data:
nationAddConfirm.append({"name": nation, "value": addConfirm})
return jsonify({"data": nationAddConfirm})
@app.route("/theNumberOfForeignCountriesWithConfirmedCases")
def theNumberOfForeignCountriesWithConfirmedCases():
data = worldSQL.theNumberOfForeignCountriesWithConfirmedCases()
foreignlist = []
for continent, count in data:
foreignlist.append({"name": continent, "value": count})
return jsonify({"data": foreignlist})
if __name__ == '__main__':
app.run()
| [
"flask.render_template",
"DB.chinaSQL.urbanDataOfHubeiProvince",
"DB.chinaSQL.existingConfirmedTop20UrbanAreas",
"DB.chinaSQL.currentConfirmedDataInAllProvinces",
"DB.worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases",
"flask.Flask",
"DB.chinaSQL.hubeiNonHubeiNationalDailyNew",
"DB.chinaSQL.dailyCasesNationwide",
"DB.chinaSQL.chinaMap",
"DB.worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases",
"jieba.analyse.extract_tags",
"DB.chinaSQL.existingDiagnosticClassificationInChina",
"DB.chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces",
"flask.jsonify",
"DB.chinaSQL.accumulativeDataExceptHubeiProvince",
"DB.chinaSQL.nationalCumulativeCureMortalityRate",
"DB.chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces",
"DB.worldSQL.foreignCumulativeDiagnosisTop10Countries",
"DB.chinaSQL.numberOfDeathsInCities",
"DB.worldSQL.worldFourNumber",
"DB.worldSQL.globalCumulativeTrend",
"DB.chinaSQL.totalConfirmedTop20UrbanAreas",
"DB.chinaSQL.eachProvinceComparesYesterdayData",
"DB.chinaSQL.outbreakOut",
"DB.chinaSQL.chinaEightNumber",
"DB.chinaSQL.chinaProvinceMap",
"DB.chinaSQL.hubeiNonHubeiNationalCureMortalityRate",
"DB.worldSQL.newCasesInTheTop10CountriesWithin24Hours",
"DB.worldSQL.worldMapChina",
"DB.chinaSQL.detailedDataByProvince",
"flask.current_app.send_static_file",
"DB.worldSQL.theTop10CountriesGrewFastestInSevenDays",
"DB.chinaSQL.nationalTotal",
"DB.worldSQL.worldMapNoChina",
"DB.worldSQL.theNumberOfForeignCountriesWithConfirmedCases",
"DB.chinaSQL.dailyAdditionsNationwide",
"DB.chinaSQL.time",
"DB.worldSQL.globalCumulativeCureMortality",
"DB.chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed",
"DB.chinaSQL.hubeiNonHubeiNationalCumulativeData",
"DB.chinaSQL.provincesWithFatalCasesNationwide"
]
| [((205, 279), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""../../web"""', 'static_folder': '"""../../static"""'}), "(__name__, template_folder='../../web', static_folder='../../static')\n", (210, 279), False, 'from flask import Flask, current_app\n'), ((353, 382), 'flask.render_template', 'render_template', (['"""china.html"""'], {}), "('china.html')\n", (368, 382), False, 'from flask import render_template\n'), ((455, 484), 'flask.render_template', 'render_template', (['"""china.html"""'], {}), "('china.html')\n", (470, 484), False, 'from flask import render_template\n'), ((557, 586), 'flask.render_template', 'render_template', (['"""world.html"""'], {}), "('world.html')\n", (572, 586), False, 'from flask import render_template\n'), ((642, 701), 'flask.current_app.send_static_file', 'current_app.send_static_file', (['"""image/favicon-32x32-sun.ico"""'], {}), "('image/favicon-32x32-sun.ico')\n", (670, 701), False, 'from flask import Flask, current_app\n'), ((747, 762), 'DB.chinaSQL.time', 'chinaSQL.time', ([], {}), '()\n', (760, 762), False, 'from DB import chinaSQL\n'), ((856, 883), 'DB.chinaSQL.chinaEightNumber', 'chinaSQL.chinaEightNumber', ([], {}), '()\n', (881, 883), False, 'from DB import chinaSQL\n'), ((895, 1329), 'flask.jsonify', 'jsonify', (["{'confirmTotal': data[0], 'healTotal': data[1], 'deadTotal': data[2],\n 'nowConfirmTotal': data[3], 'suspectTotal': data[4], 'nowSevereTotal':\n data[5], 'importedCaseTotal': data[6], 'noInfectTotal': data[7],\n 'confirmAdd': data[8], 'healAdd': data[9], 'deadAdd': data[10],\n 'nowConfirmAdd': data[11], 'suspectAdd': data[12], 'nowSevereAdd': data\n [13], 'importedCaseAdd': data[14], 'noInfectAdd': data[15]}"], {}), "({'confirmTotal': data[0], 'healTotal': data[1], 'deadTotal': data[2\n ], 'nowConfirmTotal': data[3], 'suspectTotal': data[4],\n 'nowSevereTotal': data[5], 'importedCaseTotal': data[6],\n 'noInfectTotal': data[7], 'confirmAdd': data[8], 'healAdd': data[9],\n 'deadAdd': data[10], 'nowConfirmAdd': data[11], 'suspectAdd': data[12],\n 'nowSevereAdd': data[13], 'importedCaseAdd': data[14], 'noInfectAdd':\n data[15]})\n", (902, 1329), False, 'from flask import jsonify\n'), ((1696, 1715), 'DB.chinaSQL.chinaMap', 'chinaSQL.chinaMap', ([], {}), '()\n', (1713, 1715), False, 'from DB import chinaSQL\n'), ((2114, 2275), 'flask.jsonify', 'jsonify', (["{'confirmToday': confirmToday, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmTotal': confirmTotal, 'healTotal': healTotal, 'deadTotal':\n deadTotal}"], {}), "({'confirmToday': confirmToday, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmTotal': confirmTotal, 'healTotal': healTotal, 'deadTotal':\n deadTotal})\n", (2121, 2275), False, 'from flask import jsonify\n'), ((2374, 2401), 'DB.chinaSQL.chinaProvinceMap', 'chinaSQL.chinaProvinceMap', ([], {}), '()\n', (2399, 2401), False, 'from DB import chinaSQL\n'), ((2830, 2991), 'flask.jsonify', 'jsonify', (["{'confirmToday': confirmToday, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmTotal': confirmTotal, 'healTotal': healTotal, 'deadTotal':\n deadTotal}"], {}), "({'confirmToday': confirmToday, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmTotal': confirmTotal, 'healTotal': healTotal, 'deadTotal':\n deadTotal})\n", (2837, 2991), False, 'from flask import jsonify\n'), ((3067, 3091), 'DB.chinaSQL.nationalTotal', 'chinaSQL.nationalTotal', ([], {}), '()\n', (3089, 3091), False, 'from DB import chinaSQL\n'), ((3461, 3664), 'flask.jsonify', 'jsonify', (["{'day': day, 'confirmChinaDayList': confirmChinaDayList, 'healChinaDayList':\n healChinaDayList, 'deadChinaDayList': deadChinaDayList,\n 'importedCaseChinaDayList': importedCaseChinaDayList}"], {}), "({'day': day, 'confirmChinaDayList': confirmChinaDayList,\n 'healChinaDayList': healChinaDayList, 'deadChinaDayList':\n deadChinaDayList, 'importedCaseChinaDayList': importedCaseChinaDayList})\n", (3468, 3664), False, 'from flask import jsonify\n'), ((3843, 3878), 'DB.chinaSQL.dailyAdditionsNationwide', 'chinaSQL.dailyAdditionsNationwide', ([], {}), '()\n', (3876, 3878), False, 'from DB import chinaSQL\n'), ((4276, 4507), 'flask.jsonify', 'jsonify', (["{'day': day, 'confirmChinaDayAddList': confirmChinaDayAddList,\n 'healChinaDayAddList': healChinaDayAddList, 'deadChinaDayAddList':\n deadChinaDayAddList, 'importedCaseChinaDayAddList':\n importedCaseChinaDayAddList}"], {}), "({'day': day, 'confirmChinaDayAddList': confirmChinaDayAddList,\n 'healChinaDayAddList': healChinaDayAddList, 'deadChinaDayAddList':\n deadChinaDayAddList, 'importedCaseChinaDayAddList':\n importedCaseChinaDayAddList})\n", (4283, 4507), False, 'from flask import jsonify\n'), ((4674, 4705), 'DB.chinaSQL.dailyCasesNationwide', 'chinaSQL.dailyCasesNationwide', ([], {}), '()\n', (4703, 4705), False, 'from DB import chinaSQL\n'), ((5093, 5310), 'flask.jsonify', 'jsonify', (["{'day': day, 'suspectChinaDayList': suspectChinaDayList,\n 'noInfectChinaDayList': noInfectChinaDayList, 'nowConfirmChinaDayList':\n nowConfirmChinaDayList, 'nowSevereChinaDayList': nowSevereChinaDayList}"], {}), "({'day': day, 'suspectChinaDayList': suspectChinaDayList,\n 'noInfectChinaDayList': noInfectChinaDayList, 'nowConfirmChinaDayList':\n nowConfirmChinaDayList, 'nowSevereChinaDayList': nowSevereChinaDayList})\n", (5100, 5310), False, 'from flask import jsonify\n'), ((5511, 5557), 'DB.chinaSQL.nationalCumulativeCureMortalityRate', 'chinaSQL.nationalCumulativeCureMortalityRate', ([], {}), '()\n', (5555, 5557), False, 'from DB import chinaSQL\n'), ((5793, 5910), 'flask.jsonify', 'jsonify', (["{'day': day, 'healRateChinaDayList': healRateChinaDayList,\n 'deadRateChinaDayList': deadRateChinaDayList}"], {}), "({'day': day, 'healRateChinaDayList': healRateChinaDayList,\n 'deadRateChinaDayList': deadRateChinaDayList})\n", (5800, 5910), False, 'from flask import jsonify\n'), ((6049, 6082), 'DB.chinaSQL.detailedDataByProvince', 'chinaSQL.detailedDataByProvince', ([], {}), '()\n', (6080, 6082), False, 'from DB import chinaSQL\n'), ((6448, 6637), 'flask.jsonify', 'jsonify', (["{'provinceName': provinceName, 'confirmTotal': confirmTotal, 'healTotal':\n healTotal, 'deadTotal': deadTotal, 'healRateTotal': healRateTotal,\n 'deadRateTotal': deadRateTotal}"], {}), "({'provinceName': provinceName, 'confirmTotal': confirmTotal,\n 'healTotal': healTotal, 'deadTotal': deadTotal, 'healRateTotal':\n healRateTotal, 'deadRateTotal': deadRateTotal})\n", (6455, 6637), False, 'from flask import jsonify\n'), ((6880, 6937), 'DB.chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces', 'chinaSQL.cumulativeNumberOfConfirmedCasesInAllProvinces', ([], {}), '()\n', (6935, 6937), False, 'from DB import chinaSQL\n'), ((7096, 7130), 'flask.jsonify', 'jsonify', (["{'data': provincedetails}"], {}), "({'data': provincedetails})\n", (7103, 7130), False, 'from flask import jsonify\n'), ((7236, 7281), 'DB.chinaSQL.currentConfirmedDataInAllProvinces', 'chinaSQL.currentConfirmedDataInAllProvinces', ([], {}), '()\n', (7279, 7281), False, 'from DB import chinaSQL\n'), ((7545, 7684), 'flask.jsonify', 'jsonify', (["{'provinceName': provinceName, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmToday': confirmToday, 'suspectTotal': suspectTotal}"], {}), "({'provinceName': provinceName, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmToday': confirmToday, 'suspectTotal': suspectTotal})\n", (7552, 7684), False, 'from flask import jsonify\n'), ((7877, 7927), 'DB.chinaSQL.existingDiagnosticClassificationInChina', 'chinaSQL.existingDiagnosticClassificationInChina', ([], {}), '()\n', (7925, 7927), False, 'from DB import chinaSQL\n'), ((8178, 8213), 'flask.jsonify', 'jsonify', (["{'data': nowconfirmstatis}"], {}), "({'data': nowconfirmstatis})\n", (8185, 8213), False, 'from flask import jsonify\n'), ((8343, 8400), 'DB.chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces', 'chinaSQL.totalNumberOfOverseasImportsFromTop10Provinces', ([], {}), '()\n', (8398, 8400), False, 'from DB import chinaSQL\n'), ((8546, 8577), 'flask.jsonify', 'jsonify', (["{'data': importstatis}"], {}), "({'data': importstatis})\n", (8553, 8577), False, 'from flask import jsonify\n'), ((8681, 8725), 'DB.chinaSQL.eachProvinceComparesYesterdayData', 'chinaSQL.eachProvinceComparesYesterdayData', ([], {}), '()\n', (8723, 8725), False, 'from DB import chinaSQL\n'), ((9035, 9164), 'flask.jsonify', 'jsonify', (["{'province': province, 'nowConfirm': nowConfirm, 'confirmAdd': confirmAdd,\n 'heal': heal, 'dead': dead, 'zero': zero}"], {}), "({'province': province, 'nowConfirm': nowConfirm, 'confirmAdd':\n confirmAdd, 'heal': heal, 'dead': dead, 'zero': zero})\n", (9042, 9164), False, 'from flask import jsonify\n'), ((9389, 9435), 'DB.chinaSQL.hubeiNonHubeiNationalCumulativeData', 'chinaSQL.hubeiNonHubeiNationalCumulativeData', ([], {}), '()\n', (9433, 9435), False, 'from DB import chinaSQL\n'), ((10053, 10372), 'flask.jsonify', 'jsonify', (["{'day': day, 'hubeiNowConfirm': hubeiNowConfirm, 'hubeiHeal': hubeiHeal,\n 'hubeiDead': hubeiDead, 'notHubeiNowConfirm': notHubeiNowConfirm,\n 'notHubeiHeal': notHubeiHeal, 'notHubeiDead': notHubeiDead,\n 'countryNowConfirm': countryNowConfirm, 'countryHeal': countryHeal,\n 'countryDead': countryDead}"], {}), "({'day': day, 'hubeiNowConfirm': hubeiNowConfirm, 'hubeiHeal':\n hubeiHeal, 'hubeiDead': hubeiDead, 'notHubeiNowConfirm':\n notHubeiNowConfirm, 'notHubeiHeal': notHubeiHeal, 'notHubeiDead':\n notHubeiDead, 'countryNowConfirm': countryNowConfirm, 'countryHeal':\n countryHeal, 'countryDead': countryDead})\n", (10060, 10372), False, 'from flask import jsonify\n'), ((10671, 10720), 'DB.chinaSQL.hubeiNonHubeiNationalCureMortalityRate', 'chinaSQL.hubeiNonHubeiNationalCureMortalityRate', ([], {}), '()\n', (10718, 10720), False, 'from DB import chinaSQL\n'), ((11184, 11429), 'flask.jsonify', 'jsonify', (["{'day': day, 'hubeiHealRate': hubeiHealRate, 'hubeiDeadRate': hubeiDeadRate,\n 'notHubeiHealRate': notHubeiHealRate, 'notHubeiDeadRate':\n notHubeiDeadRate, 'countryHealRate': countryHealRate, 'countryDeadRate':\n countryDeadRate}"], {}), "({'day': day, 'hubeiHealRate': hubeiHealRate, 'hubeiDeadRate':\n hubeiDeadRate, 'notHubeiHealRate': notHubeiHealRate, 'notHubeiDeadRate':\n notHubeiDeadRate, 'countryHealRate': countryHealRate, 'countryDeadRate':\n countryDeadRate})\n", (11191, 11429), False, 'from flask import jsonify\n'), ((11654, 11694), 'DB.chinaSQL.hubeiNonHubeiNationalDailyNew', 'chinaSQL.hubeiNonHubeiNationalDailyNew', ([], {}), '()\n', (11692, 11694), False, 'from DB import chinaSQL\n'), ((11924, 12003), 'flask.jsonify', 'jsonify', (["{'day': day, 'hubei': hubei, 'notHubei': notHubei, 'country': country}"], {}), "({'day': day, 'hubei': hubei, 'notHubei': notHubei, 'country': country})\n", (11931, 12003), False, 'from flask import jsonify\n'), ((12192, 12238), 'DB.chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed', 'chinaSQL.wuhanNotWuhanNotHubeiNewlyConfirmed', ([], {}), '()\n', (12236, 12238), False, 'from DB import chinaSQL\n'), ((12466, 12551), 'flask.jsonify', 'jsonify', (["{'day': day, 'wuhan': wuhan, 'notWuhan': notWuhan, 'notHubei': notHubei}"], {}), "({'day': day, 'wuhan': wuhan, 'notWuhan': notWuhan, 'notHubei':\n notHubei})\n", (12473, 12551), False, 'from flask import jsonify\n'), ((12724, 12764), 'DB.chinaSQL.totalConfirmedTop20UrbanAreas', 'chinaSQL.totalConfirmedTop20UrbanAreas', ([], {}), '()\n', (12762, 12764), False, 'from DB import chinaSQL\n'), ((12960, 13059), 'flask.jsonify', 'jsonify', (["{'cityName': cityName, 'deadRateTotal': deadRateTotal, 'healRateTotal':\n healRateTotal}"], {}), "({'cityName': cityName, 'deadRateTotal': deadRateTotal,\n 'healRateTotal': healRateTotal})\n", (12967, 13059), False, 'from flask import jsonify\n'), ((13218, 13261), 'DB.chinaSQL.existingConfirmedTop20UrbanAreas', 'chinaSQL.existingConfirmedTop20UrbanAreas', ([], {}), '()\n', (13259, 13261), False, 'from DB import chinaSQL\n'), ((13517, 13648), 'flask.jsonify', 'jsonify', (["{'cityName': cityName, 'nowConfirmTotal': nowConfirmTotal, 'confirmToday':\n confirmToday, 'suspectTotal': suspectTotal}"], {}), "({'cityName': cityName, 'nowConfirmTotal': nowConfirmTotal,\n 'confirmToday': confirmToday, 'suspectTotal': suspectTotal})\n", (13524, 13648), False, 'from flask import jsonify\n'), ((13811, 13846), 'DB.chinaSQL.urbanDataOfHubeiProvince', 'chinaSQL.urbanDataOfHubeiProvince', ([], {}), '()\n', (13844, 13846), False, 'from DB import chinaSQL\n'), ((14084, 14197), 'flask.jsonify', 'jsonify', (["{'cityName': cityName, 'confirmTotal': confirmTotal, 'healTotal': healTotal,\n 'deadTotal': deadTotal}"], {}), "({'cityName': cityName, 'confirmTotal': confirmTotal, 'healTotal':\n healTotal, 'deadTotal': deadTotal})\n", (14091, 14197), False, 'from flask import jsonify\n'), ((14382, 14428), 'DB.chinaSQL.accumulativeDataExceptHubeiProvince', 'chinaSQL.accumulativeDataExceptHubeiProvince', ([], {}), '()\n', (14426, 14428), False, 'from DB import chinaSQL\n'), ((14666, 14779), 'flask.jsonify', 'jsonify', (["{'cityName': cityName, 'confirmTotal': confirmTotal, 'healTotal': healTotal,\n 'deadTotal': deadTotal}"], {}), "({'cityName': cityName, 'confirmTotal': confirmTotal, 'healTotal':\n healTotal, 'deadTotal': deadTotal})\n", (14673, 14779), False, 'from flask import jsonify\n'), ((14960, 15004), 'DB.chinaSQL.provincesWithFatalCasesNationwide', 'chinaSQL.provincesWithFatalCasesNationwide', ([], {}), '()\n', (15002, 15004), False, 'from DB import chinaSQL\n'), ((15183, 15217), 'flask.jsonify', 'jsonify', (["{'data': provincedetails}"], {}), "({'data': provincedetails})\n", (15190, 15217), False, 'from flask import jsonify\n'), ((15299, 15332), 'DB.chinaSQL.numberOfDeathsInCities', 'chinaSQL.numberOfDeathsInCities', ([], {}), '()\n', (15330, 15332), False, 'from DB import chinaSQL\n'), ((15505, 15537), 'flask.jsonify', 'jsonify', (["{'data': dataCityCount}"], {}), "({'data': dataCityCount})\n", (15512, 15537), False, 'from flask import jsonify\n'), ((15597, 15619), 'DB.chinaSQL.outbreakOut', 'chinaSQL.outbreakOut', ([], {}), '()\n', (15617, 15619), False, 'from DB import chinaSQL\n'), ((15858, 15877), 'flask.jsonify', 'jsonify', (["{'kws': d}"], {}), "({'kws': d})\n", (15865, 15877), False, 'from flask import jsonify\n'), ((15945, 15971), 'DB.worldSQL.worldFourNumber', 'worldSQL.worldFourNumber', ([], {}), '()\n', (15969, 15971), False, 'from DB import worldSQL\n'), ((15983, 16166), 'flask.jsonify', 'jsonify', (["{'nowConfirm': data[0], 'confirm': data[1], 'heal': data[2], 'dead': data[3\n ], 'nowConfirmAdd': data[4], 'confirmAdd': data[5], 'healAdd': data[6],\n 'deadAdd': data[7]}"], {}), "({'nowConfirm': data[0], 'confirm': data[1], 'heal': data[2], 'dead':\n data[3], 'nowConfirmAdd': data[4], 'confirmAdd': data[5], 'healAdd':\n data[6], 'deadAdd': data[7]})\n", (15990, 16166), False, 'from flask import jsonify\n'), ((16404, 16430), 'DB.worldSQL.worldMapNoChina', 'worldSQL.worldMapNoChina', ([], {}), '()\n', (16428, 16430), False, 'from DB import worldSQL\n'), ((16716, 16740), 'DB.worldSQL.worldMapChina', 'worldSQL.worldMapChina', ([], {}), '()\n', (16738, 16740), False, 'from DB import worldSQL\n'), ((16977, 17064), 'flask.jsonify', 'jsonify', (["{'nowConfirm': nowConfirm, 'confirm': confirm, 'heal': heal, 'dead': dead}"], {}), "({'nowConfirm': nowConfirm, 'confirm': confirm, 'heal': heal, 'dead':\n dead})\n", (16984, 17064), False, 'from flask import jsonify\n'), ((17221, 17253), 'DB.worldSQL.globalCumulativeTrend', 'worldSQL.globalCumulativeTrend', ([], {}), '()\n', (17251, 17253), False, 'from DB import worldSQL\n'), ((17529, 17634), 'flask.jsonify', 'jsonify', (["{'day': day, 'confirm': confirm, 'heal': heal, 'dead': dead,\n 'newAddConfirm': newAddConfirm}"], {}), "({'day': day, 'confirm': confirm, 'heal': heal, 'dead': dead,\n 'newAddConfirm': newAddConfirm})\n", (17536, 17634), False, 'from flask import jsonify\n'), ((17827, 17867), 'DB.worldSQL.globalCumulativeCureMortality', 'worldSQL.globalCumulativeCureMortality', ([], {}), '()\n', (17865, 17867), False, 'from DB import worldSQL\n'), ((18051, 18116), 'flask.jsonify', 'jsonify', (["{'day': day, 'healRate': healRate, 'deadRate': deadRate}"], {}), "({'day': day, 'healRate': healRate, 'deadRate': deadRate})\n", (18058, 18116), False, 'from flask import jsonify\n'), ((18295, 18346), 'DB.worldSQL.foreignCumulativeDiagnosisTop10Countries', 'worldSQL.foreignCumulativeDiagnosisTop10Countries', ([], {}), '()\n', (18344, 18346), False, 'from DB import worldSQL\n'), ((18600, 18701), 'flask.jsonify', 'jsonify', (["{'name': name, 'nowConfirm': nowConfirm, 'confirm': confirm, 'heal': heal,\n 'dead': dead}"], {}), "({'name': name, 'nowConfirm': nowConfirm, 'confirm': confirm, 'heal':\n heal, 'dead': dead})\n", (18607, 18701), False, 'from flask import jsonify\n'), ((18914, 18964), 'DB.worldSQL.theTop10CountriesGrewFastestInSevenDays', 'worldSQL.theTop10CountriesGrewFastestInSevenDays', ([], {}), '()\n', (18962, 18964), False, 'from DB import worldSQL\n'), ((19160, 19228), 'flask.jsonify', 'jsonify', (["{'nation': nation, 'day7': day7, 'day0': day, 'rate': rate}"], {}), "({'nation': nation, 'day7': day7, 'day0': day, 'rate': rate})\n", (19167, 19228), False, 'from flask import jsonify\n'), ((19443, 19502), 'DB.worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases', 'worldSQL.overseasCountriesWithMoreThan10000ConfirmedCases', ([], {}), '()\n', (19500, 19502), False, 'from DB import worldSQL\n'), ((19627, 19657), 'flask.jsonify', 'jsonify', (["{'data': foreignlist}"], {}), "({'data': foreignlist})\n", (19634, 19657), False, 'from flask import jsonify\n'), ((19807, 19874), 'DB.worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases', 'worldSQL.overseasCountriesWithMoreThan10000HaveBeenConfirmedCases', ([], {}), '()\n', (19872, 19874), False, 'from DB import worldSQL\n'), ((20005, 20035), 'flask.jsonify', 'jsonify', (["{'data': foreignlist}"], {}), "({'data': foreignlist})\n", (20012, 20035), False, 'from flask import jsonify\n'), ((20153, 20204), 'DB.worldSQL.newCasesInTheTop10CountriesWithin24Hours', 'worldSQL.newCasesInTheTop10CountriesWithin24Hours', ([], {}), '()\n', (20202, 20204), False, 'from DB import worldSQL\n'), ((20349, 20384), 'flask.jsonify', 'jsonify', (["{'data': nationAddConfirm}"], {}), "({'data': nationAddConfirm})\n", (20356, 20384), False, 'from flask import jsonify\n'), ((20512, 20568), 'DB.worldSQL.theNumberOfForeignCountriesWithConfirmedCases', 'worldSQL.theNumberOfForeignCountriesWithConfirmedCases', ([], {}), '()\n', (20566, 20568), False, 'from DB import worldSQL\n'), ((20699, 20729), 'flask.jsonify', 'jsonify', (["{'data': foreignlist}"], {}), "({'data': foreignlist})\n", (20706, 20729), False, 'from flask import jsonify\n'), ((15728, 15743), 'jieba.analyse.extract_tags', 'extract_tags', (['k'], {}), '(k)\n', (15740, 15743), False, 'from jieba.analyse import extract_tags\n')] |
# Generated by Django 2.0.2 on 2018-02-17 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('T2API', '0007_apiuser_deviceuser'),
]
operations = [
migrations.AddField(
model_name='product',
name='weight',
field=models.IntegerField(default=None, null=True),
),
]
| [
"django.db.models.IntegerField"
]
| [((333, 377), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (352, 377), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
# Simple and dumb script to send a message to the #podman IRC channel on frenode
# Based on example from: https://pythonspot.com/building-an-irc-bot/
import os
import time
import random
import errno
import socket
import sys
class IRC:
response_timeout = 10 # seconds
irc = socket.socket()
def __init__(self, server, nickname, channel):
self.server = server
self.nickname = nickname
self.channel = channel
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _send(self, cmdstr):
self.irc.send(bytes(cmdstr + '\r\n', 'utf-8'))
def message(self, msg):
data = 'PRIVMSG {0} :{1}\r\n'.format(self.channel, msg)
print(data)
self._send(data)
@staticmethod
def fix_newlines(bufr):
return bufr.replace('\\r\\n', '\n')
def _required_response(self, needle, haystack):
start = time.time()
end = start + self.response_timeout
while time.time() < end:
if haystack.find(needle) != -1:
return (False, haystack)
time.sleep(0.1)
try:
haystack += str(self.irc.recv(4096, socket.MSG_DONTWAIT))
except socket.error as serr:
if serr.errno == errno.EWOULDBLOCK:
continue
raise # can't handle this
return (True, haystack) # Error
def connect(self, username, password):
# This is ugly as sin, but seems to be a working send/expect sequence
print("connecting to: {0}".format(self.server))
self.irc.connect((self.server, 6667)) #connects to the server
self._send("USER {0} {0} {0} :I am {0}".format(self.nickname))
self._send("NICK {0}".format(self.nickname))
err, haystack = self._required_response('End of /MOTD command.'
''.format(self.nickname), "")
if err:
print(self.fix_newlines(haystack))
print("Error connecting to {0}".format(self.server))
return True
print("Logging in as {0}".format(username))
self._send("PRIVMSG NickServ :IDENTIFY {0} {1}".format(username, password))
err, _ = self._required_response("You are now identified for", "")
if err:
print("Error logging in to {0} as {1}".format(self.server, username))
return True
print("Joining {0}".format(self.channel))
self._send("JOIN {0}".format(self.channel))
err, haystack = self._required_response("{0} {1} :End of /NAMES list."
"".format(self.nickname, self.channel),
haystack)
print(self.fix_newlines(haystack))
if err:
print("Error joining {0}".format(self.channel))
return True
return False
def quit(self):
print("Quitting")
self._send("QUIT :my work is done here")
self.irc.close()
if len(sys.argv) < 3:
print("Error: Must pass desired nick and message as parameters")
else:
irc = IRC("irc.freenode.net", sys.argv[1], "#podman")
err = irc.connect(*os.environ.get('IRCID', 'Big Bug').split(" ", 2))
if not err:
irc.message(" ".join(sys.argv[2:]))
time.sleep(5.0) # avoid join/quit spam
irc.quit()
| [
"time.sleep",
"time.time",
"os.environ.get",
"socket.socket"
]
| [((309, 324), 'socket.socket', 'socket.socket', ([], {}), '()\n', (322, 324), False, 'import socket\n'), ((489, 538), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (502, 538), False, 'import socket\n'), ((922, 933), 'time.time', 'time.time', ([], {}), '()\n', (931, 933), False, 'import time\n'), ((3343, 3358), 'time.sleep', 'time.sleep', (['(5.0)'], {}), '(5.0)\n', (3353, 3358), False, 'import time\n'), ((992, 1003), 'time.time', 'time.time', ([], {}), '()\n', (1001, 1003), False, 'import time\n'), ((1108, 1123), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1118, 1123), False, 'import time\n'), ((3225, 3259), 'os.environ.get', 'os.environ.get', (['"""IRCID"""', '"""Big Bug"""'], {}), "('IRCID', 'Big Bug')\n", (3239, 3259), False, 'import os\n')] |
import unittest
from flydenity import Parser
class TestParseIcao24Bit(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def test_parse_simple(self):
match = self.parser.parse("3D2591", icao24bit=True)
self.assertEqual(match, {"nation": "Germany", "description": "general", "iso2": "DE", "iso3": "DEU"})
def test_parse_strict(self):
sloppy_reg_sloppy_parser = self.parser.parse("3DX", icao24bit=True, strict=False)
sloppy_reg_strict_parser = self.parser.parse("3DX", icao24bit=True, strict=True)
strict_reg_sloppy_parser = self.parser.parse("3D2591", icao24bit=True, strict=False)
strict_reg_strict_parser = self.parser.parse("3D2591", icao24bit=True, strict=True)
self.assertTrue(sloppy_reg_sloppy_parser == strict_reg_sloppy_parser == strict_reg_strict_parser != sloppy_reg_strict_parser)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"flydenity.Parser"
]
| [((916, 931), 'unittest.main', 'unittest.main', ([], {}), '()\n', (929, 931), False, 'import unittest\n'), ((136, 144), 'flydenity.Parser', 'Parser', ([], {}), '()\n', (142, 144), False, 'from flydenity import Parser\n')] |
import os
def create_project(path):
dirs = ['configs', 'module', 'data']
dirs = [os.path.join(path, d) for d in dirs]
for d in dirs:
os.makedirs(d)
train_script = r"""
import ever as er
def train(trainer_name):
trainer = er.trainer.get_trainer(trainer_name)()
trainer.run()
"""
with open(os.path.join(path, 'train.py'), 'w') as f:
f.write(train_script)
print('created project in {}'.format(path))
| [
"os.path.join",
"os.makedirs"
]
| [((91, 112), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (103, 112), False, 'import os\n'), ((155, 169), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (166, 169), False, 'import os\n'), ((342, 372), 'os.path.join', 'os.path.join', (['path', '"""train.py"""'], {}), "(path, 'train.py')\n", (354, 372), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.