code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cmdb', '0001_initial'),
('appconf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(db_index=True, max_length=40, unique=True)),
('email', models.EmailField(max_length=255)),
('is_active', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('nickname', models.CharField(blank=True, max_length=64, null=True)),
('ldap_name', models.CharField(blank=True, max_length=64)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('url', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='RoleList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('delivery', models.ManyToManyField(blank=True, to='appconf.Project')),
('permission', models.ManyToManyField(blank=True, to='accounts.PermissionList')),
('webssh', models.ManyToManyField(blank=True, to='cmdb.HostGroup')),
],
),
migrations.AddField(
model_name='userinfo',
name='role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.RoleList'),
),
]
| [
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
]
| [((2316, 2430), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""accounts.RoleList"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='accounts.RoleList')\n", (2333, 2430), False, 'from django.db import migrations, models\n'), ((474, 567), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (490, 567), False, 'from django.db import migrations, models\n'), ((595, 652), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'verbose_name': '"""password"""'}), "(max_length=128, verbose_name='password')\n", (611, 652), False, 'from django.db import migrations, models\n'), ((686, 756), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""last login"""'}), "(blank=True, null=True, verbose_name='last login')\n", (706, 756), False, 'from django.db import migrations, models\n'), ((788, 847), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(40)', 'unique': '(True)'}), '(db_index=True, max_length=40, unique=True)\n', (804, 847), False, 'from django.db import migrations, models\n'), ((876, 909), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (893, 909), False, 'from django.db import migrations, models\n'), ((942, 976), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (961, 976), False, 'from django.db import migrations, models\n'), ((1012, 1046), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1031, 1046), False, 'from django.db import migrations, models\n'), ((1078, 1132), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(64)', 'null': '(True)'}), '(blank=True, max_length=64, null=True)\n', (1094, 1132), False, 'from django.db import migrations, models\n'), ((1165, 1208), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(64)'}), '(blank=True, max_length=64)\n', (1181, 1208), False, 'from django.db import migrations, models\n'), ((1420, 1513), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1436, 1513), False, 'from django.db import migrations, models\n'), ((1537, 1568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1553, 1568), False, 'from django.db import migrations, models\n'), ((1595, 1627), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1611, 1627), False, 'from django.db import migrations, models\n'), ((1761, 1854), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1777, 1854), False, 'from django.db import migrations, models\n'), ((1878, 1909), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1894, 1909), False, 'from django.db import migrations, models\n'), ((1941, 1997), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""appconf.Project"""'}), "(blank=True, to='appconf.Project')\n", (1963, 1997), False, 'from django.db import migrations, models\n'), ((2031, 2095), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""accounts.PermissionList"""'}), "(blank=True, to='accounts.PermissionList')\n", (2053, 2095), False, 'from django.db import migrations, models\n'), ((2125, 2180), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""cmdb.HostGroup"""'}), "(blank=True, to='cmdb.HostGroup')\n", (2147, 2180), False, 'from django.db import migrations, models\n')] |
import http
import logging
from typing import List, Tuple, MutableMapping
from datetime import datetime
import re
from requests.packages.urllib3 import Retry
import autoscaler.utils as utils
from autoscaler.autoscaling_groups import AutoScalingGroup
from autoscaler.azure_api import AzureApi, AzureScaleSet, AzureScaleSetInstance
from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture
logger = logging.getLogger(__name__)
_RETRY_TIME_LIMIT = 30
class AzureBoundedRetry(Retry):
"""
XXX: Azure sometimes sends us a Retry-After: 1200, even when we still have quota, causing our client to appear to hang.
Ignore them and just retry after 30secs
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def from_retry(retry):
new_retry = AzureBoundedRetry()
new_retry.total = retry.total
new_retry.connect = retry.connect
new_retry.read = retry.read
new_retry.backoff_factor = retry.backoff_factor
new_retry.BACKOFF_MAX = retry.BACKOFF_MAX
new_retry.status_forcelist = retry.status_forcelist
new_retry.method_whitelist = retry.method_whitelist
return new_retry
def get_retry_after(self, response):
retry_after = super().get_retry_after(response)
if response.status != http.HTTPStatus.TOO_MANY_REQUESTS or retry_after <= _RETRY_TIME_LIMIT:
return retry_after
headers = {}
for header in ['Retry-After',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-ratelimit-remaining-subscription-writes',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-ratelimit-remaining-tenant-writes',
'x-ms-ratelimit-remaining-subscription-resource-requests',
'x-ms-ratelimit-remaining-subscription-resource-entities-read',
'x-ms-ratelimit-remaining-tenant-resource-requests',
'x-ms-ratelimit-remaining-tenant-resource-entities-read']:
value = response.getheader(header)
if value is not None:
headers[header] = value
logger.warn("Azure request throttled: {}".format(headers))
return _RETRY_TIME_LIMIT
class AzureGroups(object):
def __init__(self, resource_groups, slow_scale_classes, client: AzureApi):
self.resource_groups = resource_groups
self.slow_scale_classes = slow_scale_classes
self.client = client
def get_all_groups(self, kube_nodes):
groups = []
if self.client:
for resource_group in self.resource_groups:
scale_sets_by_type = {}
for scale_set in self.client.list_scale_sets(resource_group.name):
scale_sets_by_type.setdefault((scale_set.location, scale_set.instance_type), []).append(scale_set)
for key, scale_sets in scale_sets_by_type.items():
location, instance_type = key
slow_scale = _get_azure_class(instance_type) in self.slow_scale_classes
groups.append(AzureVirtualScaleSet(location, resource_group.name, self.client, instance_type, slow_scale, scale_sets, kube_nodes))
return groups
_CLASS_PAT = re.compile(r'\w+_(?P<class>[A-Z]+).+')
def _get_azure_class(type_):
m = _CLASS_PAT.match(type_)
return m.group('class')
_SCALE_SET_SIZE_LIMIT = 100
# Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts.
class AzureVirtualScaleSet(AutoScalingGroup):
provider = 'azure'
def __init__(self, region, resource_group, client: AzureApi, instance_type, slow_scale: bool, scale_sets: List[AzureScaleSet], kube_nodes):
self.client = client
self.instance_type = instance_type
self.tags = {}
self.name = 'virtual_scale_set_' + instance_type + '_' + region + '_' + resource_group
self.scale_sets = dict((scale_set.name, scale_set) for scale_set in scale_sets)
self.desired_capacity = sum(scale_set.capacity for scale_set in scale_sets)
self.region = region
self.resource_group = resource_group
self.selectors = dict(self.tags)
# HACK: for matching node selectors
self.selectors['azure/type'] = self.instance_type
self.selectors['azure/class'] = _get_azure_class(self.instance_type)
self.slow_scale = slow_scale
self.min_size = 0
self.max_size = 10000
self.is_spot = False
self.vm_id_to_instance: MutableMapping[str, Tuple[str, AzureScaleSetInstance]] = {}
self.instances = {}
self.timeout_until = None
self.timeout_reason = None
self._global_priority = None
self.no_schedule_taints = {}
for scale_set in scale_sets:
if scale_set.timeout_until is not None:
if self.timeout_until is None or self.timeout_until < scale_set.timeout_until:
self.timeout_until = scale_set.timeout_until
self.timeout_reason = scale_set.name + ": " + scale_set.timeout_reason
if scale_set.priority is not None:
if self._global_priority is None:
self._global_priority = scale_set.priority
else:
self._global_priority = min(scale_set.priority, self._global_priority)
if not self.no_schedule_taints:
self.no_schedule_taints = scale_set.no_schedule_taints
if scale_set.capacity == 0:
continue
for instance in self.client.list_scale_set_instances(scale_set):
self.vm_id_to_instance[instance.vm_id] = (scale_set.name, instance)
self.instances[instance.vm_id] = AzureInstance(instance.vm_id, self.instance_type, instance.launch_time, self.tags)
self.nodes = [node for node in kube_nodes if node.instance_id in self.vm_id_to_instance]
self.unschedulable_nodes = [n for n in self.nodes if n.unschedulable]
self._id = (self.region, self.name)
def is_timed_out(self):
if self.timeout_until and datetime.now(self.timeout_until.tzinfo) < self.timeout_until:
logger.warn("{} is timed out until {} because {}".format(self._id, self.timeout_until, self.timeout_reason))
return True
return False
@property
def global_priority(self):
if self._global_priority is None:
return super().global_priority
return self._global_priority
def get_azure_instances(self):
return self.instances.values()
@property
def instance_ids(self):
return self.vm_id_to_instance.keys()
def set_desired_capacity(self, new_desired_capacity):
"""
sets the desired capacity of the underlying ASG directly.
note that this is for internal control.
for scaling purposes, please use scale() instead.
"""
scale_out = new_desired_capacity - self.desired_capacity
assert scale_out >= 0
if scale_out == 0:
return CompletedFuture(False)
futures = []
for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)):
if scale_set.capacity < _SCALE_SET_SIZE_LIMIT:
if self.slow_scale:
new_group_capacity = scale_set.capacity + 1
else:
new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out)
scale_out -= (new_group_capacity - scale_set.capacity)
if scale_set.provisioning_state == 'Updating':
logger.warn("Update of {} already in progress".format(scale_set.name))
continue
if scale_set.provisioning_state == 'Failed':
logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name))
continue
# Update our cached version
self.scale_sets[scale_set.name].capacity = new_group_capacity
futures.append(self.client.update_scale_set(scale_set, new_group_capacity))
logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity))
if scale_out == 0:
break
if scale_out > 0:
logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self))
self.desired_capacity = new_desired_capacity - scale_out
logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity))
return TransformingFuture(True, AllCompletedFuture(futures))
def terminate_instances(self, vm_ids):
vm_ids = list(vm_ids)
instances = {}
for vm_id in vm_ids:
scale_set_name, instance = self.vm_id_to_instance[vm_id]
# Update our cached copy of the Scale Set
self.scale_sets[scale_set_name].capacity -= 1
instances.setdefault(scale_set_name, []).append(instance)
logger.info('Terminated instances %s', vm_ids)
futures = []
for scale_set_name, scale_set_instances in instances.items():
futures.append(self.client.terminate_scale_set_instances(self.scale_sets[scale_set_name], scale_set_instances))
return AllCompletedFuture(futures)
def scale_nodes_in(self, nodes):
"""
scale down asg by terminating the given node.
returns a future indicating when the request completes.
"""
for node in nodes:
self.nodes.remove(node)
return self.terminate_instances(node.instance_id for node in nodes)
def __str__(self):
return 'AzureVirtualScaleSet({name}, {selectors_hash})'.format(name=self.name, selectors_hash=utils.selectors_to_hash(self.selectors))
def __repr__(self):
return str(self)
class AzureInstance(object):
provider = 'azure'
def __init__(self, instance_id, instance_type, launch_time, tags):
self.id = instance_id
self.instance_type = instance_type
self.launch_time = launch_time
self.tags = tags
def __str__(self):
return 'AzureInstance({}, {})'.format(self.id, self.instance_type)
def __repr__(self):
return str(self) | [
"logging.getLogger",
"autoscaler.utils.AllCompletedFuture",
"re.compile",
"datetime.datetime.now",
"autoscaler.utils.selectors_to_hash",
"autoscaler.utils.CompletedFuture"
]
| [((427, 454), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (444, 454), False, 'import logging\n'), ((3351, 3389), 're.compile', 're.compile', (['"""\\\\w+_(?P<class>[A-Z]+).+"""'], {}), "('\\\\w+_(?P<class>[A-Z]+).+')\n", (3361, 3389), False, 'import re\n'), ((9475, 9502), 'autoscaler.utils.AllCompletedFuture', 'AllCompletedFuture', (['futures'], {}), '(futures)\n', (9493, 9502), False, 'from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture\n'), ((7190, 7212), 'autoscaler.utils.CompletedFuture', 'CompletedFuture', (['(False)'], {}), '(False)\n', (7205, 7212), False, 'from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture\n'), ((8783, 8810), 'autoscaler.utils.AllCompletedFuture', 'AllCompletedFuture', (['futures'], {}), '(futures)\n', (8801, 8810), False, 'from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture\n'), ((6235, 6274), 'datetime.datetime.now', 'datetime.now', (['self.timeout_until.tzinfo'], {}), '(self.timeout_until.tzinfo)\n', (6247, 6274), False, 'from datetime import datetime\n'), ((9948, 9987), 'autoscaler.utils.selectors_to_hash', 'utils.selectors_to_hash', (['self.selectors'], {}), '(self.selectors)\n', (9971, 9987), True, 'import autoscaler.utils as utils\n')] |
import unittest
import datetime
from parameterized import parameterized
from activity_merger import Interval
from aw_core.models import Event
from typing import List, Tuple
def _build_datetime(seed: int) -> datetime.datetime:
return datetime.datetime(2000, 1, seed, seed, 0, 0).astimezone(datetime.timezone.utc)
def _build_timedelta(seed: int) -> datetime.timedelta:
return _build_datetime(seed + 1) - _build_datetime(1)
def build_intervals_linked_list(data: List[Tuple[int, bool, int]]) -> Interval:
"""
Builds intervals linked list from the list of tuples. Doesn't check parameters.
:param data: List of tuples (day of start, flag to return `Interval` from the function, duration).
:return: Chosen interval.
"""
result = None
previous = None
for (seed, is_target, duration) in data:
if not previous:
previous = Interval(_build_datetime(seed), _build_datetime(seed + duration))
else:
tmp = Interval(_build_datetime(seed), _build_datetime(seed + duration), previous)
previous.next = tmp
previous = tmp
if is_target:
assert result is None, f"Wrong parameters - '{seed}' interval is marked as result but is not first."
result = previous
return result
class TestInterval(unittest.TestCase):
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(5, True, 1),
(6, False, 1)
]),
5
),
(
"Exact Interval right before",
build_intervals_linked_list([
(5, False, 1),
(6, True, 1),
(7, False, 1)
]),
5
),
(
"Exact Interval right after",
build_intervals_linked_list([
(3, False, 1),
(4, True, 1),
(5, False, 1)
]),
5
),
(
"Exact Interval far after",
build_intervals_linked_list([
(3, True, 1),
(4, False, 1),
(5, False, 1),
(6, False, 1),
]),
5
),
(
"Exact Interval far before",
build_intervals_linked_list([
(4, False, 1),
(5, False, 1),
(6, False, 1),
(7, True, 1),
]),
5
),
])
def test_find_closest_by_start(self, test_name, interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), False)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(4, True, 1),
(6, False, 1),
]),
4
),
(
"Exact Interval right before",
build_intervals_linked_list([
(4, False, 1),
(6, True, 1),
(7, False, 1),
]),
4
),
(
"Exact Interval right after",
build_intervals_linked_list([
(1, False, 1),
(2, True, 1),
(4, False, 1),
]),
4
),
(
"Exact Interval far after",
build_intervals_linked_list([
(2, True, 1),
(3, False, 1),
(4, False, 1),
(5, False, 1),
]),
4
),
(
"Exact Interval far before",
build_intervals_linked_list([
(3, False, 1),
(4, False, 1),
(6, False, 1),
(7, True, 1),
]),
4
),
])
def test_find_closest_by_end(self, test_name, interval: Interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), True)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Event at middle",
build_intervals_linked_list([
(3, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(3, True, 2),
(5, False, 1),
(6, False, 2),
]),
),
(
"Event start equal interval start",
build_intervals_linked_list([
(5, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(5, True, 1),
(6, False, 4),
]),
),
(
"Event end equal interval end",
build_intervals_linked_list([
(4, True, 2),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(4, True, 1),
(5, False, 1),
]),
),
])
def test_separate_new_at_middle(self, test_name: str, interval: Interval, event: Event,
expected_interval_offset_2_num_4: Interval):
actual: Interval = interval.separate_new_at_middle(event, datetime.timedelta(0))
self.assertListEqual(actual.get_range(-2, 4), expected_interval_offset_2_num_4.get_range(-2, 4),
f"'{test_name}' case failed.")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"datetime.datetime",
"datetime.timedelta"
]
| [((6290, 6305), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6303, 6305), False, 'import unittest\n'), ((239, 283), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', 'seed', 'seed', '(0)', '(0)'], {}), '(2000, 1, seed, seed, 0, 0)\n', (256, 283), False, 'import datetime\n'), ((2894, 2915), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (2912, 2915), False, 'import datetime\n'), ((4634, 4655), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (4652, 4655), False, 'import datetime\n'), ((6082, 6103), 'datetime.timedelta', 'datetime.timedelta', (['(0)'], {}), '(0)\n', (6100, 6103), False, 'import datetime\n')] |
"""
NOTE:
There are a few minor complications to fluid human control which make this
code a little more involved than trivial.
1. Key press-release cycles can be, and often are, faster than one tick of
the game/simulation, but the player still wants that cycle to count, i.e.
to lay a bomb!
2. When holding down a key, the player expects that action to be repeated,
at least after a slight delay.
3. But when holding a key down (say, move left) and simultaneously doing a
quick press-release cycle (put a bomb), we want the held-down key to keep
being executed, but the cycle should have happened in-between.
The way we solve this problem is by separating key-state and actions-to-do.
We hold the actions that need be executed in a queue (`self._action_q`) and
a state for all considered keys.
1. When a key is pressed down, we note the time and mark it as down.
2. If it is released quickly thereafter, before a game tick could happen,
we add its action into the queue. This often happens when putting bombs.
3. If it's still pressed down as we enter a game tick, we do some math to see
if it's time for a "repeat" event and, if so, push an action to the queue.
4. Just work off one item from the queue each tick.
This way, the input is "natural" and things like dropping a bomb while doing
a diagonal walk from one end to the other "just work".
"""
from time import time
from . import BaseAgent
from .. import characters
REPEAT_DELAY = 0.2 # seconds
REPEAT_INTERVAL = 0.1
class Keystate:
def __init__(self):
self.keydown_time = time()
self.last_repeat_time = None
self.fired = False
def should_fire(self):
if self.last_repeat_time is None:
# The first repetition:
if time() - self.keydown_time > REPEAT_DELAY:
return True
else:
# A repetition after the first:
if time() - self.last_repeat_time > REPEAT_INTERVAL:
return True
# No repetition yet
return False
def mark_fired(self):
self.last_repeat_time = time()
self.fired = True
class PlayerAgent(BaseAgent):
"""The Player Agent that lets the user control a character."""
def __init__(self, character=characters.Bomber, agent_control='arrows'):
super(PlayerAgent, self).__init__(character)
##
# @NOTE: DO NOT move this import outside the constructor. It will
# not work in headless environments like a Docker container
# and prevents Pommerman from running.
#
from pyglet.window import key
CONTROLS = {
'arrows': {
key.UP: 1,
key.DOWN: 2,
key.LEFT: 3,
key.RIGHT: 4,
key.SPACE: 5,
key.M: 6 # In Pommerman, this will freeze the game.
},
'wasd': {
key.W: 1,
key.S: 2,
key.A: 3,
key.D: 4,
key.E: 5,
key.Q: 6 # In Pommerman, this will freeze the game.
}
}
assert agent_control in CONTROLS, "Unknown control: {}".format(
agent_control)
self._key2act = CONTROLS[agent_control]
self._action_q = []
self._keystate = {}
def act(self, obs, action_space):
# Go through the keys and fire for those that needs repetition (because they're held down)
for k, state in self._keystate.items():
if state.should_fire():
self._action_q.append(k)
state.mark_fired()
act = 0
if self._action_q: # Work off the keys that are queued.
act = self._key2act[self._action_q.pop(0)]
return act
@staticmethod
def has_user_input():
return True
def on_key_press(self, k, mod):
# Ignore if we're not handling the key. Avoids "shadowing" ticks in
# multiplayer mode.
if k in self._key2act:
self._keystate[k] = Keystate()
def on_key_release(self, k, mod):
# We only need to act on keys for which we did something in the
# `key_press` event, and ignore any other key releases.
if k in self._keystate:
# Only mark this as a "press" upon release if it was a quick one,
# i.e. not held down and executed already
if not self._keystate[k].fired:
self._action_q.append(k)
del self._keystate[k]
| [
"time.time"
]
| [((1575, 1581), 'time.time', 'time', ([], {}), '()\n', (1579, 1581), False, 'from time import time\n'), ((2098, 2104), 'time.time', 'time', ([], {}), '()\n', (2102, 2104), False, 'from time import time\n'), ((1767, 1773), 'time.time', 'time', ([], {}), '()\n', (1771, 1773), False, 'from time import time\n'), ((1911, 1917), 'time.time', 'time', ([], {}), '()\n', (1915, 1917), False, 'from time import time\n')] |
import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
return 100*((foo["china_exports"]/foo["china_exports"].shift(12)) - 1)
def cum_trade(foo):
outdf = pd.DataFrame([])
outdf["cuml_trade_2017"] = foo["china_exports"].loc["2017"].cumsum()
outdf.index = pd.date_range(start="2020-01-01", end="2020-12-01", freq = "MS")
outdf["cuml_trade_2020"] = foo["china_exports"].loc["2020"].cumsum()
return outdf
#################################################################################
# Then this makes the simple plots:
def make_plot():
height = int(1.15*533)
width = int(1.15*750)
foo = df.loc[product_select.value]
#foo = df.query("@a < a")
# below there is an object of selections which will be one of the values in
# the list of options. So the .value then grabs that particular option selected.
x = foo.index
if level_select.value == 'US Dollars':
y = foo['china_exports']
if level_select.value == 'Year over Year % Change':
y = growth_trade(foo)
if level_select.value == "Cumulative Purchases 2020 vs 2017":
cuml = cum_trade(foo)
x = cuml.index
y2017 = cuml["cuml_trade_2017"]
y2020 = cuml["cuml_trade_2020"]
title = "US Exports to China of " + product_select.value.title().upper()
if level_select.value != "Cumulative Purchases 2020 vs 2017":
# This is standard bokeh stuff so far
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan, xwheel_zoom", title = title,
x_range = (start_date,end_date) )
plot.line(x = x,
y = y, line_width=3.5, line_alpha=0.75, line_color = "slategray")
if level_select.value == "Cumulative Purchases 2020 vs 2017":
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan", title = title,
x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )
plot.line(x = x,
y = y2017, line_width=3.5, line_alpha=0.5, line_color = "red", line_dash = "dashed"
, legend_label= "2017")
plot.line(x = x,
y = y2020, line_width=3.5, line_alpha=0.75, line_color = "darkblue"
, legend_label= "2020")
plot.legend.title = 'Cumulative Purchases'
plot.legend.location = "top_left"
plot.legend.title_text_font_style = "bold"
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = ""
plot.axis.axis_label_text_font_style = "bold"
plot.grid.grid_line_alpha = 0.3
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;">
<div style = "text-align:left;">"""
if level_select.value == 'Year over Year % Change':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == 'US Dollars':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == "Cumulative Purchases 2020 vs 2017":
#################################################################################
singlesource2020 = ColumnDataSource({
'xs': x.values,
'ys': y2020.values,
"dates": np.array(x),
})
c2020 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2020, color = "crimson",alpha=0.0)
singlesource2017 = ColumnDataSource({
'xs': x.values,
'ys': y2017.values,
"dates": np.array(pd.date_range(start="2017-01-01", end="2017-12-01", freq = "MS")),
})
c2017 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2017, color = "darkblue",alpha=0.0)
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))
if level_select.value == 'Year over Year % Change':
if y.max() > 1500:
plot.y_range.end = 1500
plot.title.text_font_size = '13pt'
plot.background_fill_color = background
plot.background_fill_alpha = 0.75
plot.border_fill_color = background
tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)
plot.add_layout(tradewar_box)
tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)
plot.add_layout(tradewar_box)
#p.yaxis.axis_label =
plot.yaxis.axis_label_text_font_style = 'bold'
plot.yaxis.axis_label_text_font_size = "13px"
plot.sizing_mode= "scale_both"
if level_select.value != 'Year over Year % Change':
plot.yaxis.formatter = NumeralTickFormatter(format="($0. a)")
plot.yaxis.axis_label = "US Dollars"
if level_select.value == 'Year over Year % Change':
plot.yaxis.axis_label = level_select.value
plot.max_height = height
plot.max_width = width
plot.min_height = int(0.25*height)
plot.min_width = int(0.25*width)
return plot
def update_plot(attrname, old, new):
layout.children[0] = make_plot()
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
| [
"datetime.datetime",
"bokeh.layouts.column",
"bokeh.models.Div",
"pyarrow.parquet.read_table",
"bokeh.plotting.figure",
"bokeh.io.curdoc",
"bokeh.models.Select",
"numpy.array",
"bokeh.models.NumeralTickFormatter",
"pandas.DataFrame",
"pandas.date_range",
"bokeh.models.HoverTool"
]
| [((902, 925), 'datetime.datetime', 'dt.datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (913, 925), True, 'import datetime as dt\n'), ((935, 958), 'datetime.datetime', 'dt.datetime', (['(2022)', '(1)', '(1)'], {}), '(2022, 1, 1)\n', (946, 958), True, 'import datetime as dt\n'), ((7986, 8121), 'bokeh.models.Select', 'Select', ([], {'value': 'level', 'title': '"""Tranformations"""', 'options': "['US Dollars', 'Year over Year % Change', 'Cumulative Purchases 2020 vs 2017']"}), "(value=level, title='Tranformations', options=['US Dollars',\n 'Year over Year % Change', 'Cumulative Purchases 2020 vs 2017'])\n", (7992, 8121), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((8451, 8835), 'bokeh.models.Div', 'Div', ([], {'text': '"""Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n\n \n\n \n\n """', 'width': '(400)', 'background': 'background', 'style': "{'justify-content': 'space-between', 'display': 'flex'}"}), '(text=\n """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n\n \n\n \n\n """\n , width=400, background=background, style={\'justify-content\':\n \'space-between\', \'display\': \'flex\'})\n', (8454, 8835), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((8838, 9347), 'bokeh.models.Div', 'Div', ([], {'text': '"""Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China\'s progress towards meeting their purchase commitments.\n\n """', 'width': '(400)', 'background': 'background', 'style': "{'justify-content': 'space-between', 'display': 'flex'}"}), '(text=\n """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China\'s progress towards meeting their purchase commitments.\n\n """\n , width=400, background=background, style={\'justify-content\':\n \'space-between\', \'display\': \'flex\'})\n', (8841, 9347), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((9353, 9401), 'bokeh.layouts.column', 'column', (['product_select', 'div0', 'level_select', 'div1'], {}), '(product_select, div0, level_select, div1)\n', (9359, 9401), False, 'from bokeh.layouts import column, gridplot, row\n'), ((1544, 1560), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (1556, 1560), True, 'import pandas as pd\n'), ((1666, 1728), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-01-01"""', 'end': '"""2020-12-01"""', 'freq': '"""MS"""'}), "(start='2020-01-01', end='2020-12-01', freq='MS')\n", (1679, 1728), True, 'import pandas as pd\n'), ((9656, 9664), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (9662, 9664), False, 'from bokeh.io import curdoc\n'), ((1021, 1040), 'pyarrow.parquet.read_table', 'pq.read_table', (['file'], {}), '(file)\n', (1034, 1040), True, 'import pyarrow.parquet as pq\n'), ((2894, 3084), 'bokeh.plotting.figure', 'figure', ([], {'x_axis_type': '"""datetime"""', 'plot_height': 'height', 'plot_width': 'width', 'toolbar_location': '"""below"""', 'tools': '"""box_zoom, reset, pan, xwheel_zoom"""', 'title': 'title', 'x_range': '(start_date, end_date)'}), "(x_axis_type='datetime', plot_height=height, plot_width=width,\n toolbar_location='below', tools='box_zoom, reset, pan, xwheel_zoom',\n title=title, x_range=(start_date, end_date))\n", (2900, 3084), False, 'from bokeh.plotting import figure\n'), ((7287, 7325), 'bokeh.models.NumeralTickFormatter', 'NumeralTickFormatter', ([], {'format': '"""($0. a)"""'}), "(format='($0. a)')\n", (7307, 7325), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((9630, 9638), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (9636, 9638), False, 'from bokeh.io import curdoc\n'), ((4695, 4791), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'$data_x': 'datetime'}"}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '$data_x': 'datetime'})\n", (4704, 4791), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((5071, 5167), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'$data_x': 'datetime'}"}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '$data_x': 'datetime'})\n", (5080, 5167), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((6249, 6370), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'@dates': 'datetime'}", 'renderers': '[c2017, c2020]'}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '@dates': 'datetime'}, renderers=[c2017, c2020])\n", (6258, 6370), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((6720, 6743), 'datetime.datetime', 'dt.datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (6731, 6743), True, 'import datetime as dt\n'), ((6749, 6774), 'datetime.datetime', 'dt.datetime', (['(2019)', '(10)', '(11)'], {}), '(2019, 10, 11)\n', (6760, 6774), True, 'import datetime as dt\n'), ((6885, 6908), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (6896, 6908), True, 'import datetime as dt\n'), ((6914, 6939), 'datetime.datetime', 'dt.datetime', (['(2021)', '(12)', '(31)'], {}), '(2021, 12, 31)\n', (6925, 6939), True, 'import datetime as dt\n'), ((5471, 5482), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5479, 5482), True, 'import numpy as np\n'), ((3507, 3530), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (3518, 3530), True, 'import datetime as dt\n'), ((3529, 3552), 'datetime.datetime', 'dt.datetime', (['(2021)', '(2)', '(1)'], {}), '(2021, 2, 1)\n', (3540, 3552), True, 'import datetime as dt\n'), ((5791, 5853), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2017-01-01"""', 'end': '"""2017-12-01"""', 'freq': '"""MS"""'}), "(start='2017-01-01', end='2017-12-01', freq='MS')\n", (5804, 5853), True, 'import pandas as pd\n')] |
import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
| [
"yfinance.pdr_override",
"numpy.array",
"pandas_datareader.data.get_data_yahoo"
]
| [((79, 97), 'yfinance.pdr_override', 'fix.pdr_override', ([], {}), '()\n', (95, 97), True, 'import yfinance as fix\n'), ((785, 833), 'pandas_datareader.data.get_data_yahoo', 'pdr.get_data_yahoo', (['ticker', 'start_date', 'end_date'], {}), '(ticker, start_date, end_date)\n', (803, 833), True, 'import pandas_datareader.data as pdr\n'), ((1284, 1300), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (1292, 1300), True, 'import numpy as np\n'), ((1037, 1082), 'numpy.array', 'np.array', (['stock_data.iloc[i + seq_len + 1, 1]'], {}), '(stock_data.iloc[i + seq_len + 1, 1])\n', (1045, 1082), True, 'import numpy as np\n'), ((961, 1004), 'numpy.array', 'np.array', (['stock_data.iloc[i:i + seq_len, 1]'], {}), '(stock_data.iloc[i:i + seq_len, 1])\n', (969, 1004), True, 'import numpy as np\n')] |
# -*- coding: utf-8
"""Module for custom component groups.
It is possible to create subsystems of component groups in tespy. The subsystem
class is the base class for custom subsystems.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/subsystems.py
SPDX-License-Identifier: MIT
"""
import logging
# %%
class subsystem:
r"""
Class subsystem is the base class of all TESPy subsystems.
Parameters
----------
label : str
The label of the subsystem.
Example
-------
Basic example for a setting up a tespy.components.subsystems.subsystem
object. This example does not run a tespy calculation!
>>> from tespy.components import subsystem
>>> mysub = subsystem('mySubsystem')
>>> type(mysub)
<class 'tespy.components.subsystems.subsystem'>
>>> mysub.get_attr('label')
'mySubsystem'
"""
def __init__(self, label):
if not isinstance(label, str):
msg = 'Subsystem label must be of type str!'
logging.error(msg)
raise ValueError(msg)
elif len([x for x in [';', ', ', '.'] if x in label]) > 0:
msg = 'Can\'t use ' + str([';', ', ', '.']) + ' in label.'
logging.error(msg)
raise ValueError(msg)
else:
self.label = label
self.comps = {}
self.conns = {}
self.create_comps()
self.create_conns()
def get_attr(self, key):
r"""
Get the value of a subsystem's attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Value of specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = 'Subsystem ' + self.label + ' has no attribute ' + key + '.'
logging.error(msg)
raise KeyError(msg)
def create_comps(self):
"""Create the subsystem's components."""
return
def create_conns(self):
"""Create the subsystem's connections."""
return
| [
"logging.error"
]
| [((1158, 1176), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (1171, 1176), False, 'import logging\n'), ((2040, 2058), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (2053, 2058), False, 'import logging\n'), ((1362, 1380), 'logging.error', 'logging.error', (['msg'], {}), '(msg)\n', (1375, 1380), False, 'import logging\n')] |
from setuptools import setup, find_packages
setup(
name="raytracing-one-weekend",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="A raytracer achievable in a weekend.",
url="https://github.com/ninezerozeronine/raytracing-one-weekend",
install_requires=[
"Pillow",
"numpy",
],
packages=find_packages('src'),
package_dir={'': 'src'},
)
| [
"setuptools.find_packages"
]
| [((361, 381), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (374, 381), False, 'from setuptools import setup, find_packages\n')] |
"""
Notification email machinery, for tasks to send credentials and instructions to users.
Email templates placed inside the `templates` directory of this module should:
- extend from `layout`
- provide `subject` and `body` blocks
"""
from enum import Enum
import os.path
from jinja2 import Environment, FileSystemLoader
from sqlalchemy.orm import Session as SQLASession
from srcf.database import Member, Society
from srcf.mail import send_mail
from ..plumbing import Owner, owner_desc, owner_name, owner_website
ENV = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
trim_blocks=True, lstrip_blocks=True)
ENV.filters.update({"is_member": lambda mem: isinstance(mem, Member),
"is_society": lambda soc: isinstance(soc, Society),
"owner_name": owner_name,
"owner_desc": owner_desc,
"owner_website": owner_website})
CURRENT_WRAPPER = None
class Layout(Enum):
"""
Base layout template to be inherited by an email-specific template.
"""
SUBJECT = "/common/subject.j2"
"""
Subject line of the email.
"""
BODY = "/common/body.j2"
"""
Main content of the email.
"""
class EmailWrapper:
"""
Context manager for email sending, used to augment emails with additional metadata.
"""
def __init__(self, subject: str = None, body: str = None, context: dict = None):
self._layouts = {Layout.SUBJECT: subject,
Layout.BODY: body}
self._context = context
def render(self, template: str, layout: Layout, target: Owner, context: dict = None):
"""
Render an email template with Jinja using the provided context.
"""
context = dict(context or (), layout=layout.value, target=target)
out = ENV.get_template(template).render(context)
custom = self._layouts.get(layout)
if custom:
if self._context:
context.update(self._context)
out = custom.format(out, **context)
if layout == Layout.SUBJECT:
out = " ".join(out.split())
return out
def __enter__(self):
global CURRENT_WRAPPER
if CURRENT_WRAPPER:
raise RuntimeError("Another context is already active")
CURRENT_WRAPPER = self
def __exit__(self, exception_type, exception_value, traceback):
global CURRENT_WRAPPER
CURRENT_WRAPPER = None
DEFAULT_WRAPPER = EmailWrapper(subject="[SRCF] {}")
def send(target: Owner, template: str, context: dict = None, session: SQLASession = None):
"""
Render and send an email to the target member or society.
"""
wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER
subject = wrapper.render(template, Layout.SUBJECT, target, context)
body = wrapper.render(template, Layout.BODY, target, context)
recipient = (owner_desc(target, True), target.email)
send_mail(recipient, subject, body, copy_sysadmins=False, session=session)
| [
"srcf.mail.send_mail"
]
| [((2983, 3057), 'srcf.mail.send_mail', 'send_mail', (['recipient', 'subject', 'body'], {'copy_sysadmins': '(False)', 'session': 'session'}), '(recipient, subject, body, copy_sysadmins=False, session=session)\n', (2992, 3057), False, 'from srcf.mail import send_mail\n')] |
import unittest
from players import Player, Quarterback
from possible_values import *
from game import Game
from random import randint, uniform, sample
from season import *
# TODO - some things you can add...
class FootballGameTest(unittest.TestCase):
'''test the class'''
def test_field_goal_made(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
team_prev_points = game.score[teams[0]]
game.field_goal(teams[0])
team_post_points = game.score[teams[0]]
self.assertEqual(team_post_points, team_prev_points + 3)
def test_get_winner(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
game.field_goal(teams[0])
t1_points = game.score[teams[0]]
t2_points = game.score[teams[1]]
if t1_points >= t2_points:
win, lose = teams
else:
lose, win = teams
self.assertEqual((win,lose), game.get_winning_team())
class FootballPlayerTest(unittest.TestCase):
'''Check the default values for Player and Quarterback
yards=120, touchdowns=5, safety=1,
interceptions=0
'''
def test_default_player_yards(self):
player = Player(name='Dude')
self.assertEqual(player.yards, 120)
def test_player_yards_set_to(self):
player = Player(name='OtherDude', yards=150)
self.assertEqual(player.yards, 150)
def test_default_qb_interceptions(self):
qb = Quarterback(name='FancyDude')
self.assertEqual(qb.interceptions, 4)
def test_default_qb_completed_passes(self):
qb = Quarterback()
self.assertEqual(qb.completed_passes, 20)
def test_passing_score(self):
qb = Quarterback()
self.assertEqual((20 - (2 * 4)), qb.passing_score())
if __name__ == '__main__':
unittest.main()
| [
"random.sample",
"players.Player",
"players.Quarterback",
"game.Game",
"unittest.main"
]
| [((1843, 1858), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1856, 1858), False, 'import unittest\n'), ((332, 355), 'random.sample', 'sample', (['team_names'], {'k': '(2)'}), '(team_names, k=2)\n', (338, 355), False, 'from random import randint, uniform, sample\n'), ((371, 388), 'game.Game', 'Game', ([], {'teams': 'teams'}), '(teams=teams)\n', (375, 388), False, 'from game import Game\n'), ((633, 656), 'random.sample', 'sample', (['team_names'], {'k': '(2)'}), '(team_names, k=2)\n', (639, 656), False, 'from random import randint, uniform, sample\n'), ((672, 689), 'game.Game', 'Game', ([], {'teams': 'teams'}), '(teams=teams)\n', (676, 689), False, 'from game import Game\n'), ((1224, 1243), 'players.Player', 'Player', ([], {'name': '"""Dude"""'}), "(name='Dude')\n", (1230, 1243), False, 'from players import Player, Quarterback\n'), ((1346, 1381), 'players.Player', 'Player', ([], {'name': '"""OtherDude"""', 'yards': '(150)'}), "(name='OtherDude', yards=150)\n", (1352, 1381), False, 'from players import Player, Quarterback\n'), ((1485, 1514), 'players.Quarterback', 'Quarterback', ([], {'name': '"""FancyDude"""'}), "(name='FancyDude')\n", (1496, 1514), False, 'from players import Player, Quarterback\n'), ((1623, 1636), 'players.Quarterback', 'Quarterback', ([], {}), '()\n', (1634, 1636), False, 'from players import Player, Quarterback\n'), ((1735, 1748), 'players.Quarterback', 'Quarterback', ([], {}), '()\n', (1746, 1748), False, 'from players import Player, Quarterback\n')] |
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
class SignUp(CreateView):
model = User
form_class = SignUpForm
template_name = "registration/signup.html"
success_url = reverse_lazy("dashboard:dashboard")
class SignIn(LoginView):
form_class = SignInForm
class Profile(LoginRequiredMixin, LockDuringEditMixin, UpdateView):
model = User
form_class = UserProfileForm
template_name = "registration/profile.html"
success_url = reverse_lazy("users:profile")
def get_object(self):
return self.request.user
def form_valid(self, form):
response = super().form_valid(form)
update_session_auth_hash(self.request, self.object) # this will delete the current user session
# and create anew
UserSession.objects.create(user=self.object, session_id=self.request.session.session_key)
return response
class UserPasswordResetView(PasswordResetView):
form_class = UserPasswordResetForm
class UserPasswordResetConfirmView(PasswordResetConfirmView):
form_class = UserSetPasswordForm
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
| [
"django.contrib.auth.update_session_auth_hash",
"django.http.HttpResponseNotAllowed",
"django.http.HttpResponse",
"users.models.Lock.objects.filter",
"users.models.UserSession.objects.create",
"django.urls.reverse_lazy"
]
| [((857, 892), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""dashboard:dashboard"""'], {}), "('dashboard:dashboard')\n", (869, 892), False, 'from django.urls import reverse_lazy\n'), ((1134, 1163), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""users:profile"""'], {}), "('users:profile')\n", (1146, 1163), False, 'from django.urls import reverse_lazy\n'), ((1897, 1929), 'django.http.HttpResponseNotAllowed', 'HttpResponseNotAllowed', (["['POST']"], {}), "(['POST'])\n", (1919, 1929), False, 'from django.http import HttpResponse, HttpResponseNotAllowed\n'), ((1309, 1360), 'django.contrib.auth.update_session_auth_hash', 'update_session_auth_hash', (['self.request', 'self.object'], {}), '(self.request, self.object)\n', (1333, 1360), False, 'from django.contrib.auth import update_session_auth_hash\n'), ((1439, 1533), 'users.models.UserSession.objects.create', 'UserSession.objects.create', ([], {'user': 'self.object', 'session_id': 'self.request.session.session_key'}), '(user=self.object, session_id=self.request.\n session.session_key)\n', (1465, 1533), False, 'from users.models import Lock, UserSession\n'), ((1869, 1885), 'django.http.HttpResponse', 'HttpResponse', (['""""""'], {}), "('')\n", (1881, 1885), False, 'from django.http import HttpResponse, HttpResponseNotAllowed\n'), ((1818, 1844), 'users.models.Lock.objects.filter', 'Lock.objects.filter', ([], {'pk': 'pk'}), '(pk=pk)\n', (1837, 1844), False, 'from users.models import Lock, UserSession\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| [
"pandas.merge",
"pandas.read_csv",
"matplotlib.pyplot.show"
]
| [((78, 107), 'pandas.read_csv', 'pd.read_csv', (['"""transcount.csv"""'], {}), "('transcount.csv')\n", (89, 107), True, 'import pandas as pd\n'), ((158, 191), 'pandas.read_csv', 'pd.read_csv', (['"""gpu_transcount.csv"""'], {}), "('gpu_transcount.csv')\n", (169, 191), True, 'import pandas as pd\n'), ((243, 308), 'pandas.merge', 'pd.merge', (['df', 'gpu'], {'how': '"""outer"""', 'left_index': '(True)', 'right_index': '(True)'}), "(df, gpu, how='outer', left_index=True, right_index=True)\n", (251, 308), True, 'import pandas as pd\n'), ((467, 477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (475, 477), True, 'import matplotlib.pyplot as plt\n')] |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
h = int(readline())
w = int(readline())
print((n - h + 1) * (n - w + 1))
| [
"sys.setrecursionlimit"
]
| [((116, 146), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (137, 146), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: <PASSWORD>
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def is_table_in_db(table_name):
cursor = connection.cursor()
return next(
(True for x in connection.introspection.get_table_list(cursor)
if x.name == table_name),
False
)
def is_wf_table_in_db(workflow):
return is_table_in_db(create_table_name(workflow.id))
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def get_table_data(pk, cond_filter, column_names=None):
# Get first the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Return the data
return cursor.fetchall()
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def get_table_queryset(tablename):
query = 'SELECT * from "{0}";'.format(tablename)
try:
cursor = connection.cursor()
cursor.execute(query)
except Exception:
return None
return cursor.fetchall()
def query_to_dicts(query_string, *query_args):
"""
Run a simple query and produce a generator that returns the results as
a bunch of dictionaries with keys for the column values selected.
"""
cursor = connection.cursor()
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = OrderedDict(izip(col_names, row))
yield row_dict
return
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| [
"logging.getLogger",
"django.db.connection.commit",
"pandas.read_sql_query",
"ontask.fix_pctg_in_name",
"django.db.connection.introspection.table_names",
"pandas.read_csv",
"subprocess.Popen",
"sqlalchemy.create_engine",
"django.db.connection.cursor",
"itertools.izip",
"django.db.connection.introspection.get_table_list",
"pandas.read_sql",
"django.core.cache.cache.lock",
"dataops.formula_evaluation.evaluate_node_sql",
"pandas.to_datetime"
]
| [((777, 804), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (794, 804), False, 'import logging\n'), ((2261, 2321), 'sqlalchemy.create_engine', 'create_engine', (['database_url'], {'echo': '(False)', 'paramstyle': '"""format"""'}), "(database_url, echo=False, paramstyle='format')\n", (2274, 2321), False, 'from sqlalchemy import create_engine\n'), ((3869, 3966), 'subprocess.Popen', 'subprocess.Popen', (["['psql', '-d', settings.DATABASES['default']['NAME'], '-q', '-f', filename]"], {}), "(['psql', '-d', settings.DATABASES['default']['NAME'], '-q',\n '-f', filename])\n", (3885, 3966), False, 'import subprocess\n'), ((4264, 4283), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4281, 4283), False, 'from django.db import connection\n'), ((4301, 4348), 'django.db.connection.introspection.get_table_list', 'connection.introspection.get_table_list', (['cursor'], {}), '(cursor)\n', (4340, 4348), False, 'from django.db import connection\n'), ((4560, 4579), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (4577, 4579), False, 'from django.db import connection\n'), ((4638, 4657), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4655, 4657), False, 'from django.db import connection\n'), ((7501, 7533), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {}), '(query, engine)\n', (7518, 7533), True, 'import pandas as pd\n'), ((8360, 8484), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(False)', 'infer_datetime_format': '(True)', 'quotechar': '"""\\""""', 'skiprows': 'skiprows', 'skipfooter': 'skipfooter'}), '(file, index_col=False, infer_datetime_format=True, quotechar=\n \'"\', skiprows=skiprows, skipfooter=skipfooter)\n', (8371, 8484), True, 'import pandas as pd\n'), ((9961, 10007), 'pandas.read_sql', 'pd.read_sql', (['conn_item.db_table', 'db_connection'], {}), '(conn_item.db_table, db_connection)\n', (9972, 10007), True, 'import pandas as pd\n'), ((11334, 11353), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11351, 11353), False, 'from django.db import connection\n'), ((11434, 11453), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (11451, 11453), False, 'from django.db import connection\n'), ((11610, 11629), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11627, 11629), False, 'from django.db import connection\n'), ((12767, 12786), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (12784, 12786), False, 'from django.db import connection\n'), ((14613, 14632), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (14630, 14632), False, 'from django.db import connection\n'), ((15758, 15777), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (15775, 15777), False, 'from django.db import connection\n'), ((16561, 16580), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (16578, 16580), False, 'from django.db import connection\n'), ((18222, 18241), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (18239, 18241), False, 'from django.db import connection\n'), ((18284, 18303), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (18301, 18303), False, 'from django.db import connection\n'), ((19066, 19085), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (19083, 19085), False, 'from django.db import connection\n'), ((19131, 19150), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (19148, 19150), False, 'from django.db import connection\n'), ((20477, 20496), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (20494, 20496), False, 'from django.db import connection\n'), ((26992, 27011), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (27009, 27011), False, 'from django.db import connection\n'), ((27759, 27778), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (27776, 27778), False, 'from django.db import connection\n'), ((28636, 28655), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (28653, 28655), False, 'from django.db import connection\n'), ((6638, 6676), 'django.db.connection.introspection.table_names', 'connection.introspection.table_names', ([], {}), '()\n', (6674, 6676), False, 'from django.db import connection\n'), ((6941, 6988), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'engine'], {'params': 'params'}), '(query, engine, params=params)\n', (6958, 6988), True, 'import pandas as pd\n'), ((7071, 7102), 'pandas.read_sql', 'pd.read_sql', (['table_name', 'engine'], {}), '(table_name, engine)\n', (7082, 7102), True, 'import pandas as pd\n'), ((10377, 10399), 'django.core.cache.cache.lock', 'cache.lock', (['table_name'], {}), '(table_name)\n', (10387, 10399), False, 'from django.core.cache import cache\n'), ((10855, 10874), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (10872, 10874), False, 'from django.db import connection\n'), ((10957, 10976), 'django.db.connection.commit', 'connection.commit', ([], {}), '()\n', (10974, 10976), False, 'from django.db import connection\n'), ((14391, 14429), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter.formula'], {}), '(cond_filter.formula)\n', (14408, 14429), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((16216, 16235), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (16233, 16235), False, 'from django.db import connection\n'), ((20130, 20158), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['kv_pair[0]'], {}), '(kv_pair[0])\n', (20146, 20158), False, 'from ontask import fix_pctg_in_name\n'), ((20314, 20352), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter.formula'], {}), '(cond_filter.formula)\n', (20331, 20352), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((23600, 23629), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['filter_exp'], {}), '(filter_exp)\n', (23617, 23629), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((25580, 25609), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['pre_filter'], {}), '(pre_filter)\n', (25597, 25609), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((27665, 27693), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['kv_pair[0]'], {}), '(kv_pair[0])\n', (27681, 27693), False, 'from ontask import fix_pctg_in_name\n'), ((28550, 28580), 'dataops.formula_evaluation.evaluate_node_sql', 'evaluate_node_sql', (['cond_filter'], {}), '(cond_filter)\n', (28567, 28580), False, 'from dataops.formula_evaluation import evaluate_node_sql\n'), ((14009, 14028), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (14025, 14028), False, 'from ontask import fix_pctg_in_name\n'), ((16804, 16824), 'itertools.izip', 'izip', (['col_names', 'row'], {}), '(col_names, row)\n', (16808, 16824), False, 'from itertools import izip\n'), ((19790, 19809), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (19806, 19809), False, 'from ontask import fix_pctg_in_name\n'), ((23231, 23250), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (23247, 23250), False, 'from ontask import fix_pctg_in_name\n'), ((25191, 25210), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (25207, 25210), False, 'from ontask import fix_pctg_in_name\n'), ((25804, 25826), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['name'], {}), '(name)\n', (25820, 25826), False, 'from ontask import fix_pctg_in_name\n'), ((26873, 26905), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['order_col_name'], {}), '(order_col_name)\n', (26889, 26905), False, 'from ontask import fix_pctg_in_name\n'), ((4698, 4745), 'django.db.connection.introspection.get_table_list', 'connection.introspection.get_table_list', (['cursor'], {}), '(cursor)\n', (4737, 4745), False, 'from django.db import connection\n'), ((8974, 9031), 'pandas.to_datetime', 'pd.to_datetime', (['data_frame[x]'], {'infer_datetime_format': '(True)'}), '(data_frame[x], infer_datetime_format=True)\n', (8988, 9031), True, 'import pandas as pd\n'), ((15442, 15461), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (15458, 15461), False, 'from ontask import fix_pctg_in_name\n'), ((17825, 17844), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (17841, 17844), False, 'from ontask import fix_pctg_in_name\n'), ((18000, 18019), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (18016, 18019), False, 'from ontask import fix_pctg_in_name\n'), ((15875, 15894), 'ontask.fix_pctg_in_name', 'fix_pctg_in_name', (['x'], {}), '(x)\n', (15891, 15894), False, 'from ontask import fix_pctg_in_name\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| [
"rhinoscriptsyntax.GetObject",
"ast.literal_eval",
"rhinoscriptsyntax.GetObjects",
"rhinoscriptsyntax.ObjectName"
]
| [((695, 781), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (707, 781), True, 'import rhinoscriptsyntax as rs\n'), ((1327, 1414), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (1340, 1414), True, 'import rhinoscriptsyntax as rs\n'), ((2090, 2175), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.mesh | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot\n )\n', (2102, 2175), True, 'import rhinoscriptsyntax as rs\n'), ((2736, 2822), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.mesh | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.mesh | rs.filter.\n textdot)\n', (2749, 2822), True, 'import rhinoscriptsyntax as rs\n'), ((3507, 3593), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (3519, 3593), True, 'import rhinoscriptsyntax as rs\n'), ((4238, 4325), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (4251, 4325), True, 'import rhinoscriptsyntax as rs\n'), ((5112, 5198), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (5124, 5198), True, 'import rhinoscriptsyntax as rs\n'), ((5756, 5843), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.point | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.point | rs.filter.\n textdot)\n', (5769, 5843), True, 'import rhinoscriptsyntax as rs\n'), ((6551, 6637), 'rhinoscriptsyntax.GetObject', 'rs.GetObject', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (6563, 6637), True, 'import rhinoscriptsyntax as rs\n'), ((7305, 7392), 'rhinoscriptsyntax.GetObjects', 'rs.GetObjects', (['message'], {'preselect': '(True)', 'filter': '(rs.filter.curve | rs.filter.textdot)'}), '(message, preselect=True, filter=rs.filter.curve | rs.filter.\n textdot)\n', (7318, 7392), True, 'import rhinoscriptsyntax as rs\n'), ((846, 865), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (859, 865), True, 'import rhinoscriptsyntax as rs\n'), ((1005, 1026), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (1021, 1026), False, 'import ast\n'), ((2240, 2259), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (2253, 2259), True, 'import rhinoscriptsyntax as rs\n'), ((2396, 2417), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (2412, 2417), False, 'import ast\n'), ((3658, 3677), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (3671, 3677), True, 'import rhinoscriptsyntax as rs\n'), ((3850, 3869), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (3866, 3869), False, 'import ast\n'), ((3890, 3909), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (3906, 3909), False, 'import ast\n'), ((5266, 5285), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (5279, 5285), True, 'import rhinoscriptsyntax as rs\n'), ((5423, 5444), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (5439, 5444), False, 'import ast\n'), ((6705, 6724), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (6718, 6724), True, 'import rhinoscriptsyntax as rs\n'), ((6897, 6916), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (6913, 6916), False, 'import ast\n'), ((6937, 6956), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (6953, 6956), False, 'import ast\n'), ((1532, 1551), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (1545, 1551), True, 'import rhinoscriptsyntax as rs\n'), ((2940, 2959), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (2953, 2959), True, 'import rhinoscriptsyntax as rs\n'), ((4443, 4462), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (4456, 4462), True, 'import rhinoscriptsyntax as rs\n'), ((5964, 5983), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (5977, 5983), True, 'import rhinoscriptsyntax as rs\n'), ((7513, 7532), 'rhinoscriptsyntax.ObjectName', 'rs.ObjectName', (['guid'], {}), '(guid)\n', (7526, 7532), True, 'import rhinoscriptsyntax as rs\n'), ((1752, 1773), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (1768, 1773), False, 'import ast\n'), ((3158, 3179), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (3174, 3179), False, 'import ast\n'), ((4705, 4724), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (4721, 4724), False, 'import ast\n'), ((4753, 4772), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (4769, 4772), False, 'import ast\n'), ((6182, 6203), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (6198, 6203), False, 'import ast\n'), ((7775, 7794), 'ast.literal_eval', 'ast.literal_eval', (['u'], {}), '(u)\n', (7791, 7794), False, 'import ast\n'), ((7823, 7842), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (7839, 7842), False, 'import ast\n')] |
from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_0(update: Update, _: CallbackContext):
update.message.reply_text('Enter new product in format\n'
'"name" fat/protein/carbohydrates kcal')
return ADD_1
def add_1(update: Update, context: CallbackContext):
db_connect: DBConnector = context.bot_data['db_connect']
result = re.match(str_matcher, update.message.text)
if result:
db_connect.products.insert(result.groupdict())
update.message.reply_text('Product was added')
else:
update.message.reply_text('Message have wrong format')
return ConversationHandler.END
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| [
"telegram.ext.MessageHandler",
"re.match",
"telegram.ext.CommandHandler"
]
| [((636, 678), 're.match', 're.match', (['str_matcher', 'update.message.text'], {}), '(str_matcher, update.message.text)\n', (644, 678), False, 'import re\n'), ((1087, 1123), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""product_add"""', 'add_0'], {}), "('product_add', add_0)\n", (1101, 1123), False, 'from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters\n'), ((1163, 1217), 'telegram.ext.MessageHandler', 'MessageHandler', (['(Filters.text & ~Filters.command)', 'add_1'], {}), '(Filters.text & ~Filters.command, add_1)\n', (1177, 1217), False, 'from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters\n')] |
import os
from flask import Blueprint, render_template
def create_bp():
bp_red = Blueprint('red', __name__, url_prefix='/red')
@bp_red.route('/index/')
@bp_red.route('/')
def index():
return render_template('red/index.html')
return bp_red | [
"flask.render_template",
"flask.Blueprint"
]
| [((86, 131), 'flask.Blueprint', 'Blueprint', (['"""red"""', '__name__'], {'url_prefix': '"""/red"""'}), "('red', __name__, url_prefix='/red')\n", (95, 131), False, 'from flask import Blueprint, render_template\n'), ((217, 250), 'flask.render_template', 'render_template', (['"""red/index.html"""'], {}), "('red/index.html')\n", (232, 250), False, 'from flask import Blueprint, render_template\n')] |
# SQL output is imported as a pandas dataframe variable called "df"
# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import tmean, scoreatpercentile
import numpy as np
def trimmean(arr, percent):
lower_limit = scoreatpercentile(arr, percent)
upper_limit = scoreatpercentile(arr, 100-percent)
return tmean(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))
my_result = trimmean(df["amt_paid"].values,10) | [
"scipy.stats.tmean",
"scipy.stats.scoreatpercentile"
]
| [((337, 368), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['arr', 'percent'], {}), '(arr, percent)\n', (354, 368), False, 'from scipy.stats import tmean, scoreatpercentile\n'), ((387, 424), 'scipy.stats.scoreatpercentile', 'scoreatpercentile', (['arr', '(100 - percent)'], {}), '(arr, 100 - percent)\n', (404, 424), False, 'from scipy.stats import tmean, scoreatpercentile\n'), ((434, 505), 'scipy.stats.tmean', 'tmean', (['arr'], {'limits': '(lower_limit, upper_limit)', 'inclusive': '(False, False)'}), '(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))\n', (439, 505), False, 'from scipy.stats import tmean, scoreatpercentile\n')] |
import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
| [
"tensorflow.keras.utils.to_categorical",
"os.listdir",
"random.shuffle",
"os.path.join",
"numpy.array",
"cv2.resize",
"cv2.imread"
]
| [((216, 232), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (226, 232), False, 'import os\n'), ((716, 736), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (730, 736), False, 'import random\n'), ((253, 282), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (265, 282), False, 'import os\n'), ((355, 374), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (365, 374), False, 'import os\n'), ((1121, 1144), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (1135, 1144), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1146, 1169), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_valid'], {}), '(y_valid)\n', (1160, 1169), False, 'from tensorflow.keras.utils import to_categorical\n'), ((400, 427), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (412, 427), False, 'import os\n'), ((446, 488), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_UNCHANGED'], {}), '(filepath, cv2.IMREAD_UNCHANGED)\n', (456, 488), False, 'import cv2\n'), ((312, 331), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (322, 331), False, 'import os\n'), ((567, 611), 'cv2.resize', 'cv2.resize', (['img', '(360, 363)', 'cv2.INTER_CUBIC'], {}), '(img, (360, 363), cv2.INTER_CUBIC)\n', (577, 611), False, 'import cv2\n'), ((829, 852), 'numpy.array', 'np.array', (['X[:num_train]'], {}), '(X[:num_train])\n', (837, 852), True, 'import numpy as np\n'), ((881, 904), 'numpy.array', 'np.array', (['X[num_train:]'], {}), '(X[num_train:])\n', (889, 904), True, 'import numpy as np\n'), ((946, 969), 'numpy.array', 'np.array', (['y[:num_train]'], {}), '(y[:num_train])\n', (954, 969), True, 'import numpy as np\n'), ((995, 1018), 'numpy.array', 'np.array', (['y[num_train:]'], {}), '(y[num_train:])\n', (1003, 1018), True, 'import numpy as np\n')] |
import setuptools
long_description = """
# Coldtype
### Programmatic display typography
More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com)
"""
setuptools.setup(
name="coldtype",
version="0.6.6",
author="<NAME> / Goodhertz",
author_email="<EMAIL>",
description="Functions for manual vectorized typesetting",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/goodhertz/coldtype",
#package_dir={"": "coldtype"},
packages=[
"coldtype",
"coldtype.sh",
"coldtype.fx",
"coldtype.img",
"coldtype.time",
"coldtype.midi",
"coldtype.pens",
"coldtype.text",
"coldtype.grid",
"coldtype.color",
"coldtype.capture",
"coldtype.blender",
"coldtype.geometry",
"coldtype.time.nle",
"coldtype.renderer",
"coldtype.webserver",
"coldtype.renderable",
"coldtype.fontgoggles",
"coldtype.interpolation",
"coldtype.renderer.winman",
"coldtype.fontgoggles.font",
"coldtype.fontgoggles.misc",
"coldtype.fontgoggles.compile",
],
include_package_data=True,
package_data={
"": [
"webserver/webviewer.html",
"demo/RecMono-CasualItalic.ttf",
"demo/ColdtypeObviously-VF.ttf",
"demo/MutatorSans.ttf",
"demo/demo.py",
"demo/midi.py",
"demo/blank.py",
"demo/boiler.py",
"renderer/picklejar.py",
"renderer/.coldtype.py"
],
},
entry_points={
'console_scripts': [
'coldtype = coldtype.renderer:main'
],
},
extras_require={
"skia": [
"skia-python>=86.0",
],
"viewer": [
"glfw",
"PyOpenGL",
"PyOpenGL-accelerate",
"skia-python>=86.0",
"skia-pathops", # can this be taken from skia-python?
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
"noise",
"ufo2ft",
"numpy",
],
"webviewer": [
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
],
"experimental": [
"pynput",
"rtmidi",
"noise",
],
"c": [
"srt",
"noise",
],
"unicode": [
"unicodedata2"
],
"blender": [
"skia-pathops"
],
"notebook": [
"skia-pathops",
"skia-python",
]
},
install_requires=[
"lxml",
"fonttools[ufo]",
"fontPens",
"fontParts",
"more-itertools",
"easing-functions",
"timecode",
"mido",
"defcon",
"freetype-py",
"uharfbuzz>=0.14.0",
"python-bidi"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| [
"setuptools.setup"
]
| [((177, 2154), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""coldtype"""', 'version': '"""0.6.6"""', 'author': '"""<NAME> / Goodhertz"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Functions for manual vectorized typesetting"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/goodhertz/coldtype"""', 'packages': "['coldtype', 'coldtype.sh', 'coldtype.fx', 'coldtype.img', 'coldtype.time',\n 'coldtype.midi', 'coldtype.pens', 'coldtype.text', 'coldtype.grid',\n 'coldtype.color', 'coldtype.capture', 'coldtype.blender',\n 'coldtype.geometry', 'coldtype.time.nle', 'coldtype.renderer',\n 'coldtype.webserver', 'coldtype.renderable', 'coldtype.fontgoggles',\n 'coldtype.interpolation', 'coldtype.renderer.winman',\n 'coldtype.fontgoggles.font', 'coldtype.fontgoggles.misc',\n 'coldtype.fontgoggles.compile']", 'include_package_data': '(True)', 'package_data': "{'': ['webserver/webviewer.html', 'demo/RecMono-CasualItalic.ttf',\n 'demo/ColdtypeObviously-VF.ttf', 'demo/MutatorSans.ttf', 'demo/demo.py',\n 'demo/midi.py', 'demo/blank.py', 'demo/boiler.py',\n 'renderer/picklejar.py', 'renderer/.coldtype.py']}", 'entry_points': "{'console_scripts': ['coldtype = coldtype.renderer:main']}", 'extras_require': "{'skia': ['skia-python>=86.0'], 'viewer': ['glfw', 'PyOpenGL',\n 'PyOpenGL-accelerate', 'skia-python>=86.0', 'skia-pathops',\n 'SimpleWebSocketServer', 'watchdog<2.0.0', 'noise', 'ufo2ft', 'numpy'],\n 'webviewer': ['SimpleWebSocketServer', 'watchdog<2.0.0'],\n 'experimental': ['pynput', 'rtmidi', 'noise'], 'c': ['srt', 'noise'],\n 'unicode': ['unicodedata2'], 'blender': ['skia-pathops'], 'notebook': [\n 'skia-pathops', 'skia-python']}", 'install_requires': "['lxml', 'fonttools[ufo]', 'fontPens', 'fontParts', 'more-itertools',\n 'easing-functions', 'timecode', 'mido', 'defcon', 'freetype-py',\n 'uharfbuzz>=0.14.0', 'python-bidi']", 'classifiers': "['Programming Language :: Python :: 3', 'Operating System :: OS Independent']"}), "(name='coldtype', version='0.6.6', author=\n '<NAME> / Goodhertz', author_email='<EMAIL>', description=\n 'Functions for manual vectorized typesetting', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/goodhertz/coldtype', packages=['coldtype',\n 'coldtype.sh', 'coldtype.fx', 'coldtype.img', 'coldtype.time',\n 'coldtype.midi', 'coldtype.pens', 'coldtype.text', 'coldtype.grid',\n 'coldtype.color', 'coldtype.capture', 'coldtype.blender',\n 'coldtype.geometry', 'coldtype.time.nle', 'coldtype.renderer',\n 'coldtype.webserver', 'coldtype.renderable', 'coldtype.fontgoggles',\n 'coldtype.interpolation', 'coldtype.renderer.winman',\n 'coldtype.fontgoggles.font', 'coldtype.fontgoggles.misc',\n 'coldtype.fontgoggles.compile'], include_package_data=True,\n package_data={'': ['webserver/webviewer.html',\n 'demo/RecMono-CasualItalic.ttf', 'demo/ColdtypeObviously-VF.ttf',\n 'demo/MutatorSans.ttf', 'demo/demo.py', 'demo/midi.py', 'demo/blank.py',\n 'demo/boiler.py', 'renderer/picklejar.py', 'renderer/.coldtype.py']},\n entry_points={'console_scripts': ['coldtype = coldtype.renderer:main']},\n extras_require={'skia': ['skia-python>=86.0'], 'viewer': ['glfw',\n 'PyOpenGL', 'PyOpenGL-accelerate', 'skia-python>=86.0', 'skia-pathops',\n 'SimpleWebSocketServer', 'watchdog<2.0.0', 'noise', 'ufo2ft', 'numpy'],\n 'webviewer': ['SimpleWebSocketServer', 'watchdog<2.0.0'],\n 'experimental': ['pynput', 'rtmidi', 'noise'], 'c': ['srt', 'noise'],\n 'unicode': ['unicodedata2'], 'blender': ['skia-pathops'], 'notebook': [\n 'skia-pathops', 'skia-python']}, install_requires=['lxml',\n 'fonttools[ufo]', 'fontPens', 'fontParts', 'more-itertools',\n 'easing-functions', 'timecode', 'mido', 'defcon', 'freetype-py',\n 'uharfbuzz>=0.14.0', 'python-bidi'], classifiers=[\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent'])\n", (193, 2154), False, 'import setuptools\n')] |
# -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
| [
"numpy.array",
"GFOLD_params.SuperParams",
"cvxpy_codegen.codegen"
]
| [((1050, 1076), 'GFOLD_params.SuperParams', 'GFOLD_params.SuperParams', ([], {}), '()\n', (1074, 1076), False, 'import GFOLD_params\n'), ((4489, 4523), 'cvxpy_codegen.codegen', 'cpg.codegen', (['problem', 'codegen_path'], {}), '(problem, codegen_path)\n', (4500, 4523), True, 'import cvxpy_codegen as cpg\n'), ((2230, 2339), 'numpy.array', 'np.array', (['[params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot, params.r1,\n params.r2, params.tf]'], {}), '([params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot,\n params.r1, params.r2, params.tf])\n', (2238, 2339), True, 'import numpy as np\n'), ((2890, 2909), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2898, 2909), True, 'import numpy as np\n'), ((2977, 2996), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2985, 2996), True, 'import numpy as np\n'), ((4646, 4663), 'numpy.array', 'np.array', (['x.value'], {}), '(x.value)\n', (4654, 4663), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.array', 'np.array', (['u.value'], {}), '(u.value)\n', (4691, 4700), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.array', 'np.array', (['z.value'], {}), '(z.value)\n', (4748, 4757), True, 'import numpy as np\n')] |
import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
| [
"HintList.getHint",
"random.shuffle",
"HintList.getHintGroup"
]
| [((780, 810), 'HintList.getHintGroup', 'getHintGroup', (['"""alwaysLocation"""'], {}), "('alwaysLocation')\n", (792, 810), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((984, 1008), 'HintList.getHintGroup', 'getHintGroup', (['"""location"""'], {}), "('location')\n", (996, 1008), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((1078, 1112), 'random.shuffle', 'random.shuffle', (['sometimesLocations'], {}), '(sometimesLocations)\n', (1092, 1112), False, 'import random\n'), ((1844, 1874), 'random.shuffle', 'random.shuffle', (['stoneAddresses'], {}), '(stoneAddresses)\n', (1858, 1874), False, 'import random\n'), ((2824, 2848), 'HintList.getHintGroup', 'getHintGroup', (['"""junkHint"""'], {}), "('junkHint')\n", (2836, 2848), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2854, 2879), 'random.shuffle', 'random.shuffle', (['junkHints'], {}), '(junkHints)\n', (2868, 2879), False, 'import random\n'), ((3606, 3643), 'HintList.getHint', 'getHint', (['"""Spiritual Stone Text Start"""'], {}), "('Spiritual Stone Text Start')\n", (3613, 3643), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2046, 2070), 'HintList.getHint', 'getHint', (['currentLoc.name'], {}), '(currentLoc.name)\n', (2053, 2070), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((3828, 3863), 'HintList.getHint', 'getHint', (['"""Spiritual Stone Text End"""'], {}), "('Spiritual Stone Text End')\n", (3835, 3863), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((4217, 4246), 'HintList.getHint', 'getHint', (['"""Medallion Text End"""'], {}), "('Medallion Text End')\n", (4224, 4246), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2274, 2303), 'HintList.getHint', 'getHint', (['currentLoc.item.type'], {}), '(currentLoc.item.type)\n', (2281, 2303), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((2368, 2397), 'HintList.getHint', 'getHint', (['currentLoc.item.name'], {}), '(currentLoc.item.name)\n', (2375, 2397), False, 'from HintList import getHint, getHintGroup, Hint\n'), ((4677, 4699), 'HintList.getHint', 'getHint', (['location.name'], {}), '(location.name)\n', (4684, 4699), False, 'from HintList import getHint, getHintGroup, Hint\n')] |
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
class Dummy(awkward1.Record):
@property
def broken(self):
raise AttributeError("I'm broken!")
def test():
behavior = {}
behavior["Dummy"] = Dummy
array = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], behavior=behavior)
array.layout.setparameter("__record__", "Dummy")
with pytest.raises(AttributeError) as err:
array[1].broken
assert str(err.value) == "I'm broken!" # not "no field named 'broken'"
| [
"pytest.raises",
"awkward1.Array"
]
| [((371, 436), 'awkward1.Array', 'awkward1.Array', (["[{'x': 1}, {'x': 2}, {'x': 3}]"], {'behavior': 'behavior'}), "([{'x': 1}, {'x': 2}, {'x': 3}], behavior=behavior)\n", (385, 436), False, 'import awkward1\n'), ((500, 529), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (513, 529), False, 'import pytest\n')] |
from django.apps import AppConfig
from ievv_opensource import ievv_batchframework
from ievv_opensource.ievv_batchframework import batchregistry
class HelloWorldAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('Hello world! %r', self.kwargs)
class HelloWorldAsyncAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('\n\n\n\n\n\n\n\nHello world, async! %r\n\n\n\n\n', self.kwargs)
class BatchFrameworkDemoAppConfig(AppConfig):
name = 'ievv_opensource.demo.batchframeworkdemo'
verbose_name = "IEVV Batchframework demo"
def ready(self):
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld',
mode=batchregistry.ActionGroup.MODE_SYNCHRONOUS,
actions=[
HelloWorldAction
]))
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld_async',
mode=batchregistry.ActionGroup.MODE_ASYNCHRONOUS,
actions=[
HelloWorldAsyncAction
]
)
)
| [
"ievv_opensource.ievv_batchframework.batchregistry.ActionGroup",
"ievv_opensource.ievv_batchframework.batchregistry.Registry.get_instance"
]
| [((695, 840), 'ievv_opensource.ievv_batchframework.batchregistry.ActionGroup', 'batchregistry.ActionGroup', ([], {'name': '"""batchframeworkdemo_helloworld"""', 'mode': 'batchregistry.ActionGroup.MODE_SYNCHRONOUS', 'actions': '[HelloWorldAction]'}), "(name='batchframeworkdemo_helloworld', mode=\n batchregistry.ActionGroup.MODE_SYNCHRONOUS, actions=[HelloWorldAction])\n", (720, 840), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((999, 1161), 'ievv_opensource.ievv_batchframework.batchregistry.ActionGroup', 'batchregistry.ActionGroup', ([], {'name': '"""batchframeworkdemo_helloworld_async"""', 'mode': 'batchregistry.ActionGroup.MODE_ASYNCHRONOUS', 'actions': '[HelloWorldAsyncAction]'}), "(name='batchframeworkdemo_helloworld_async', mode=\n batchregistry.ActionGroup.MODE_ASYNCHRONOUS, actions=[\n HelloWorldAsyncAction])\n", (1024, 1161), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((628, 665), 'ievv_opensource.ievv_batchframework.batchregistry.Registry.get_instance', 'batchregistry.Registry.get_instance', ([], {}), '()\n', (663, 665), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n'), ((932, 969), 'ievv_opensource.ievv_batchframework.batchregistry.Registry.get_instance', 'batchregistry.Registry.get_instance', ([], {}), '()\n', (967, 969), False, 'from ievv_opensource.ievv_batchframework import batchregistry\n')] |
# Author: <NAME>
# <EMAIL>
#
# Command line to run program:
# python3 pyfrechet_visualize.py
import sys, os, unittest
sys.path.insert(0, "../")
from pyfrechet.distance import StrongDistance
from pyfrechet.visualize import FreeSpaceDiagram, Trajectories
TEST_DATA = "sp500"
if TEST_DATA == "sp500":
REACHABLE_EPSILON = 5
UNREACHABLE_EPSILON = 1
REVERSE_CURVE = False
elif TEST_DATA == "trajectory":
REACHABLE_EPSILON = 70
UNREACHABLE_EPSILON = 60
REVERSE_CURVE = True
CURVE_1 = f"{TEST_DATA}_data/sample_1.txt"
CURVE_2 = f"{TEST_DATA}_data/sample_2.txt"
class pyfrechet_optimise(unittest.TestCase):
global REACHABLE_EPSILON
global UNREACHABLE_EPSILON
global REVERSE_CURVE
global CURVE_1
global CURVE_2
def test_fail_BinarySearch_instance_argument(self):
class BadClass(): pass
with self.assertRaises(TypeError):
bc = BadClass()
FreeSpaceDiagram(bc)
def test_FreeSpaceDiagram_plot(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd = FreeSpaceDiagram(sd)
fsd.plot()
def test_FreeSpaceDiagram__addEpsilonSlider(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
fsd.addEpsilonSlider(UNREACHABLE_EPSILON, REACHABLE_EPSILON, 1)
fsd.plot()
def test_FreeSpaceDiagram__weighted_cells(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, False)
def test_FreeSpaceDiagram__gridlines(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, True)
def test_Trajectories(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
t = Trajectories(sd)
t.plot()
if __name__ == '__main__':
unittest.main()
| [
"sys.path.insert",
"pyfrechet.visualize.Trajectories",
"pyfrechet.visualize.FreeSpaceDiagram",
"unittest.main",
"pyfrechet.distance.StrongDistance.setCurves"
]
| [((119, 144), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (134, 144), False, 'import sys, os, unittest\n'), ((2058, 2073), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2071, 2073), False, 'import sys, os, unittest\n'), ((1001, 1058), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1025, 1058), False, 'from pyfrechet.distance import StrongDistance\n'), ((1116, 1136), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1132, 1136), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1225, 1282), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1249, 1282), False, 'from pyfrechet.distance import StrongDistance\n'), ((1297, 1317), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1313, 1317), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1476, 1533), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1500, 1533), False, 'from pyfrechet.distance import StrongDistance\n'), ((1548, 1568), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1564, 1568), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1704, 1761), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1728, 1761), False, 'from pyfrechet.distance import StrongDistance\n'), ((1776, 1796), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['sd'], {}), '(sd)\n', (1792, 1796), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((1916, 1973), 'pyfrechet.distance.StrongDistance.setCurves', 'StrongDistance.setCurves', (['CURVE_1', 'CURVE_2', 'REVERSE_CURVE'], {}), '(CURVE_1, CURVE_2, REVERSE_CURVE)\n', (1940, 1973), False, 'from pyfrechet.distance import StrongDistance\n'), ((1986, 2002), 'pyfrechet.visualize.Trajectories', 'Trajectories', (['sd'], {}), '(sd)\n', (1998, 2002), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n'), ((924, 944), 'pyfrechet.visualize.FreeSpaceDiagram', 'FreeSpaceDiagram', (['bc'], {}), '(bc)\n', (940, 944), False, 'from pyfrechet.visualize import FreeSpaceDiagram, Trajectories\n')] |
#!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: <NAME> <EMAIL>
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"logging.basicConfig",
"numpy.zeros",
"struct.unpack",
"progress.bar.Bar",
"logging.info"
]
| [((223, 369), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'u"""%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s"""', 'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), "(format=\n u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.DEBUG, stream=sys.stdout)\n", (242, 369), False, 'import logging\n'), ((3276, 3315), 'logging.info', 'logging.info', (["('%s started.\\n' % argv[0])"], {}), "('%s started.\\n' % argv[0])\n", (3288, 3315), False, 'import logging\n'), ((6271, 6311), 'logging.info', 'logging.info', (["('%s finished.\\n' % argv[0])"], {}), "('%s finished.\\n' % argv[0])\n", (6283, 6311), False, 'import logging\n'), ((1209, 1271), 'numpy.zeros', 'np.zeros', (['(self.framecount, self.imageheight, self.imagewidth)'], {}), '((self.framecount, self.imageheight, self.imagewidth))\n', (1217, 1271), True, 'import numpy as np\n'), ((1290, 1329), 'progress.bar.Bar', 'Bar', (['"""Downloading"""'], {'max': 'self.framecount'}), "('Downloading', max=self.framecount)\n", (1293, 1329), False, 'from progress.bar import Bar\n'), ((2141, 2180), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[14:18]'], {}), "('<i', self.header[14:18])\n", (2154, 2180), False, 'import struct\n'), ((2218, 2257), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[18:22]'], {}), "('<i', self.header[18:22])\n", (2231, 2257), False, 'import struct\n'), ((2367, 2406), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[22:26]'], {}), "('<i', self.header[22:26])\n", (2380, 2406), False, 'import struct\n'), ((2444, 2483), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[26:30]'], {}), "('<i', self.header[26:30])\n", (2457, 2483), False, 'import struct\n'), ((2521, 2560), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[30:34]'], {}), "('<i', self.header[30:34])\n", (2534, 2560), False, 'import struct\n'), ((2598, 2637), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[34:38]'], {}), "('<i', self.header[34:38])\n", (2611, 2637), False, 'import struct\n'), ((2675, 2714), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[38:42]'], {}), "('<i', self.header[38:42])\n", (2688, 2714), False, 'import struct\n'), ((2859, 2900), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[122:130]'], {}), "('<q', self.header[122:130])\n", (2872, 2900), False, 'import struct\n'), ((2938, 2979), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[130:138]'], {}), "('<q', self.header[130:138])\n", (2951, 2979), False, 'import struct\n'), ((3209, 3251), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'self.trailer[i:i + 8]'], {}), "('<Q', self.trailer[i:i + 8])\n", (3222, 3251), False, 'import struct\n'), ((1779, 1824), 'struct.unpack', 'struct.unpack', (['"""<H"""', 't_frame[index:index + 2]'], {}), "('<H', t_frame[index:index + 2])\n", (1792, 1824), False, 'import struct\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 <NAME>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fuzz import *
import sys, os
import utils
#######################################
# Find Best Matchs In List Of Choices #
#######################################
def extract(query, choices, processor=None, scorer=None, limit=5):
# choices = a list of objects we are attempting to extract values from
# query = an object representing the thing we want to find
# scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score
# by default, we use score.WRatio() and both OBJ and QUERY should be strings
# processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer
# for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings)
# this would then be used in the scoring collection
if choices is None or len(choices) == 0:
return []
# default, turn whatever the choice is into a string
if processor is None:
processor = lambda x: utils.asciidammit(x)
# default: wratio
if scorer is None:
scorer = WRatio
sl = list()
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
tuple = (choice, score)
sl.append(tuple)
sl.sort(key=lambda i: -1*i[1])
return sl[:limit]
##########################
# Find Single Best Match #
##########################
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
# convenience method which returns the single best choice
# optional parameter: score_cutoff.
# If the best choice has a score of less than score_cutoff
# we will return none (intuition: not a good enough match)
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0:
best = best_list[0]
if best[1] > score_cutoff:
return best
else:
return None
else:
return None
| [
"utils.asciidammit"
]
| [((2115, 2135), 'utils.asciidammit', 'utils.asciidammit', (['x'], {}), '(x)\n', (2132, 2135), False, 'import utils\n')] |
"""Monthly HDD/CDD Totals."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn, get_autoplot_context
from pyiem.exceptions import NoDataFound
PDICT = {'cdd': 'Cooling Degree Days',
'hdd': 'Heating Degree Days'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['report'] = True
desc['description'] = """This chart presents monthly cooling degree days
or heating degree days for a 20 year period of your choice. The 20 year
limit is for plot usability only, the data download has all available
years contained."""
y20 = datetime.date.today().year - 19
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station', network='IACLIMATE'),
dict(type='select', options=PDICT, default='cdd', name='var',
label='Select Variable'),
dict(type='year', name='syear', default=y20,
label='For plotting, year to start 20 years of plot'),
]
return desc
def plotter(fdict):
""" Go """
import seaborn as sns
ctx = get_autoplot_context(fdict, get_description())
pgconn = get_dbconn('coop')
station = ctx['station']
varname = ctx['var']
table = "alldata_%s" % (station[:2], )
df = read_sql("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days
from """+table+""" WHERE station = %s GROUP by year, month
""", pgconn, params=(station,), index_col=None)
if df.empty:
raise NoDataFound("No Data Found.")
df['monthdate'] = df[['year', 'month']].apply(
lambda x: datetime.date(x[0], x[1], 1), axis=1)
df.set_index('monthdate', inplace=True)
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: <NAME> <EMAIL> 515.294.5978
""" % (datetime.date.today().strftime("%d %b %Y"),
ctx['_nt'].sts[station]['archive_begin'].date(),
datetime.date.today(), station, ctx['_nt'].sts[station]['name'])
res += """# THESE ARE THE MONTHLY %s (base=65) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (PDICT[varname].upper(), station)
second = """# THESE ARE THE MONTHLY %s (base=60) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (
PDICT[varname].upper(), station)
minyear = df['year'].min()
maxyear = df['year'].max()
for yr in range(minyear, maxyear + 1):
res += ("%4i" % (yr,))
second += "%4i" % (yr,)
for mo in range(1, 13):
ts = datetime.date(yr, mo, 1)
if ts not in df.index:
res += ("%7s" % ("M",))
second += "%7s" % ("M",)
continue
row = df.loc[ts]
res += ("%7.0f" % (row[varname+"65"],))
second += "%7.0f" % (row[varname+"60"],)
res += ("\n")
second += "\n"
res += ("MEAN")
second += "MEAN"
for mo in range(1, 13):
df2 = df[df['month'] == mo]
res += ("%7.0f" % (df2[varname+"65"].mean(), ))
second += "%7.0f" % (df2[varname+"60"].mean(), )
res += ("\n")
second += "\n"
res += second
y1 = int(fdict.get('syear', 1990))
fig, ax = plt.subplots(1, 1, figsize=(8., 6.))
fig.text(0.5, 0.95, "[%s] %s (%s-%s)" % (
station, ctx['_nt'].sts[station]['name'], y1, y1 + 20), ha='center',
fontsize=16)
ax.set_title(r"%s base=60$^\circ$F" % (PDICT[varname], ))
filtered = df[(df['year'] >= y1) & (df['year'] <= (y1 + 20))]
df2 = filtered[
['month', 'year', varname + '60']
].pivot('year', 'month', varname + '60')
sns.heatmap(df2, annot=True, fmt=".0f", linewidths=.5, ax=ax)
return fig, df, res
if __name__ == '__main__':
plotter(dict(syear=1990))
| [
"pandas.io.sql.read_sql",
"seaborn.heatmap",
"pyiem.util.get_dbconn",
"datetime.date",
"datetime.date.today",
"pyiem.exceptions.NoDataFound",
"pyiem.plot.use_agg.plt.subplots"
]
| [((1275, 1293), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""coop"""'], {}), "('coop')\n", (1285, 1293), False, 'from pyiem.util import get_dbconn, get_autoplot_context\n'), ((1403, 1960), 'pandas.io.sql.read_sql', 'read_sql', (['(\n """\n SELECT year, month, sum(precip) as sum_precip,\n avg(high) as avg_high,\n avg(low) as avg_low,\n sum(cdd(high,low,60)) as cdd60,\n sum(cdd(high,low,65)) as cdd65,\n sum(hdd(high,low,60)) as hdd60,\n sum(hdd(high,low,65)) as hdd65,\n sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,\n sum(case when snow >= 0.1 then 1 else 0 end) as snow_days\n from """\n + table + \' WHERE station = %s GROUP by year, month\\n \')', 'pgconn'], {'params': '(station,)', 'index_col': 'None'}), '(\n """\n SELECT year, month, sum(precip) as sum_precip,\n avg(high) as avg_high,\n avg(low) as avg_low,\n sum(cdd(high,low,60)) as cdd60,\n sum(cdd(high,low,65)) as cdd65,\n sum(hdd(high,low,60)) as hdd60,\n sum(hdd(high,low,65)) as hdd65,\n sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,\n sum(case when snow >= 0.1 then 1 else 0 end) as snow_days\n from """\n + table + \' WHERE station = %s GROUP by year, month\\n \', pgconn,\n params=(station,), index_col=None)\n', (1411, 1960), False, 'from pandas.io.sql import read_sql\n'), ((3840, 3878), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8.0, 6.0)'}), '(1, 1, figsize=(8.0, 6.0))\n', (3852, 3878), False, 'from pyiem.plot.use_agg import plt\n'), ((4269, 4331), 'seaborn.heatmap', 'sns.heatmap', (['df2'], {'annot': '(True)', 'fmt': '""".0f"""', 'linewidths': '(0.5)', 'ax': 'ax'}), "(df2, annot=True, fmt='.0f', linewidths=0.5, ax=ax)\n", (4280, 4331), True, 'import seaborn as sns\n'), ((1977, 2006), 'pyiem.exceptions.NoDataFound', 'NoDataFound', (['"""No Data Found."""'], {}), "('No Data Found.')\n", (1988, 2006), False, 'from pyiem.exceptions import NoDataFound\n'), ((710, 731), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (729, 731), False, 'import datetime\n'), ((2076, 2104), 'datetime.date', 'datetime.date', (['x[0]', 'x[1]', '(1)'], {}), '(x[0], x[1], 1)\n', (2089, 2104), False, 'import datetime\n'), ((2476, 2497), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2495, 2497), False, 'import datetime\n'), ((3166, 3190), 'datetime.date', 'datetime.date', (['yr', 'mo', '(1)'], {}), '(yr, mo, 1)\n', (3179, 3190), False, 'import datetime\n'), ((2369, 2390), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2388, 2390), False, 'import datetime\n')] |
import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
| [
"horovod.torch.broadcast_optimizer_state",
"horovod.torch.local_rank",
"ray_shuffling_data_loader.data_generation.DATA_SPEC.values",
"time.sleep",
"horovod.torch.local_size",
"torch.cuda.is_available",
"horovod.torch.size",
"ray.init",
"numpy.mean",
"os.path.exists",
"horovod.torch.rank",
"argparse.ArgumentParser",
"horovod.ray.RayExecutor",
"torch.set_num_threads",
"numpy.max",
"horovod.torch.nccl_built",
"ray_shuffling_data_loader.data_generation.DATA_SPEC.keys",
"numpy.min",
"torch.nn.Dropout2d",
"pickle.load",
"torch.nn.functional.dropout",
"torch.nn.functional.log_softmax",
"numpy.std",
"horovod.ray.RayExecutor.create_settings",
"torch.manual_seed",
"pickle.dump",
"timeit.default_timer",
"horovod.torch.init",
"torch.nn.Conv2d",
"ray_shuffling_data_loader.stats.human_readable_size",
"ray_shuffling_data_loader.data_generation.generate_data",
"ray_shuffling_data_loader.torch_dataset.TorchShufflingDataset",
"tempfile.gettempdir",
"torch.nn.Linear",
"torch.cuda.manual_seed"
]
| [((922, 982), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (945, 982), False, 'import argparse\n'), ((4215, 4225), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4223, 4225), True, 'import horovod.torch as hvd\n'), ((4230, 4258), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4247, 4258), False, 'import torch\n'), ((4513, 4537), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (4534, 4537), False, 'import torch\n'), ((4549, 4559), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (4557, 4559), True, 'import horovod.torch as hvd\n'), ((5517, 5570), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (5546, 5570), True, 'import horovod.torch as hvd\n'), ((8136, 8161), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8143, 8161), True, 'import numpy as np\n'), ((8188, 8212), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8194, 8212), True, 'import numpy as np\n'), ((8239, 8263), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8245, 8263), True, 'import numpy as np\n'), ((8290, 8314), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8296, 8314), True, 'import numpy as np\n'), ((9439, 9709), 'ray_shuffling_data_loader.torch_dataset.TorchShufflingDataset', 'TorchShufflingDataset', (['filenames', 'num_epochs', 'world_size', 'batch_size', 'rank'], {'num_reducers': 'num_reducers', 'max_concurrent_epochs': 'max_concurrent_epochs', 'feature_columns': 'feature_columns', 'feature_types': 'feature_types', 'label_column': 'label_column', 'label_type': 'label_type'}), '(filenames, num_epochs, world_size, batch_size, rank,\n num_reducers=num_reducers, max_concurrent_epochs=max_concurrent_epochs,\n feature_columns=feature_columns, feature_types=feature_types,\n label_column=label_column, label_type=label_type)\n', (9460, 9709), False, 'from ray_shuffling_data_loader.torch_dataset import TorchShufflingDataset\n'), ((9976, 10006), 'ray.init', 'ray.init', ([], {'address': 'args.address'}), '(address=args.address)\n', (9984, 10006), False, 'import ray\n'), ((12163, 12204), 'horovod.ray.RayExecutor.create_settings', 'RayExecutor.create_settings', ([], {'timeout_s': '(30)'}), '(timeout_s=30)\n', (12190, 12204), False, 'from horovod.ray import RayExecutor\n'), ((12220, 12329), 'horovod.ray.RayExecutor', 'RayExecutor', (['settings'], {'use_gpu': '(True)', 'gpus_per_worker': '(1)', 'cpus_per_worker': 'cpus_per_worker'}), '(settings, use_gpu=True, gpus_per_worker=1, cpus_per_worker=\n cpus_per_worker, **worker_kwargs)\n', (12231, 12329), False, 'from horovod.ray import RayExecutor\n'), ((3628, 3659), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (3637, 3659), True, 'import torch.nn as nn\n'), ((3681, 3713), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (3690, 3713), True, 'import torch.nn as nn\n'), ((3740, 3754), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (3752, 3754), True, 'import torch.nn as nn\n'), ((3774, 3792), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (3783, 3792), True, 'import torch.nn as nn\n'), ((3812, 3829), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (3821, 3829), True, 'import torch.nn as nn\n'), ((4048, 4084), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (4057, 4084), True, 'import torch.nn.functional as F\n'), ((4124, 4140), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {}), '(x)\n', (4137, 4140), True, 'import torch.nn.functional as F\n'), ((4267, 4292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4290, 4292), False, 'import torch\n'), ((4413, 4446), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4435, 4446), False, 'import torch\n'), ((4929, 4939), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4937, 4939), True, 'import horovod.torch as hvd\n'), ((4978, 5003), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5001, 5003), False, 'import torch\n'), ((6220, 6242), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6240, 6242), False, 'import timeit\n'), ((7280, 7305), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7287, 7305), True, 'import numpy as np\n'), ((7336, 7360), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7342, 7360), True, 'import numpy as np\n'), ((7391, 7415), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7397, 7415), True, 'import numpy as np\n'), ((7446, 7470), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7452, 7470), True, 'import numpy as np\n'), ((8810, 8824), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8820, 8824), False, 'import time\n'), ((9230, 9246), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.keys', 'DATA_SPEC.keys', ([], {}), '()\n', (9244, 9246), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((10236, 10257), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (10255, 10257), False, 'import tempfile\n'), ((10322, 10348), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (10336, 10348), False, 'import os\n'), ((10815, 10908), 'ray_shuffling_data_loader.data_generation.generate_data', 'generate_data', (['num_rows', 'num_files', 'num_row_groups_per_file', 'max_row_group_skew', 'data_dir'], {}), '(num_rows, num_files, num_row_groups_per_file,\n max_row_group_skew, data_dir)\n', (10828, 10908), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((4387, 4403), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (4401, 4403), True, 'import horovod.torch as hvd\n'), ((4721, 4731), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4729, 4731), True, 'import horovod.torch as hvd\n'), ((5183, 5199), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (5197, 5199), True, 'import horovod.torch as hvd\n'), ((5225, 5241), 'horovod.torch.local_size', 'hvd.local_size', ([], {}), '()\n', (5239, 5241), True, 'import horovod.torch as hvd\n'), ((6920, 6957), 'time.sleep', 'time.sleep', (['args.mock_train_step_time'], {}), '(args.mock_train_step_time)\n', (6930, 6957), False, 'import time\n'), ((7165, 7187), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7185, 7187), False, 'import timeit\n'), ((7213, 7235), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7233, 7235), False, 'import timeit\n'), ((9325, 9343), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.values', 'DATA_SPEC.values', ([], {}), '()\n', (9341, 9343), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((6471, 6496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6494, 6496), False, 'import torch\n'), ((10448, 10462), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10459, 10462), False, 'import pickle\n'), ((11147, 11185), 'pickle.dump', 'pickle.dump', (['(filenames, num_bytes)', 'f'], {}), '((filenames, num_bytes), f)\n', (11158, 11185), False, 'import pickle\n'), ((11349, 11379), 'ray_shuffling_data_loader.stats.human_readable_size', 'human_readable_size', (['num_bytes'], {}), '(num_bytes)\n', (11368, 11379), False, 'from ray_shuffling_data_loader.stats import human_readable_size\n'), ((6414, 6436), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6434, 6436), False, 'import timeit\n'), ((11059, 11080), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (11078, 11080), False, 'import tempfile\n')] |
import pytest
import numpy as np
import eqtk
def test_promiscuous_binding_failure():
A = np.array(
[
[
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
],
]
)
G = np.array(
[
-0.51720535,
-0.69471304,
-1.78260496,
-1.32337777,
-0.63267947,
-0.57923893,
-0.78718634,
-0.27521037,
-0.13733511,
-0.69433251,
1.6858364,
-0.43683479,
0.39312096,
-0.0625205,
0.23139303,
0.07680628,
-0.52774543,
1.74592678,
]
)
x0 = np.array(
[
[
2.48257788e01,
1.72132293e-01,
1.14833731e-02,
5.00547317e-02,
1.38949549e-01,
1.93069773e01,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
]
]
)
def test_spontaneous_production_failure():
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
A = np.array(
[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]], dtype=float
)
G = np.array([0, 1, 2, 3, 4, 5])
K = np.exp(-np.dot(N, G))
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
]:
x0 = np.array(x0_val, dtype=float)
x_NK = eqtk.solve(c0=x0, N=N, K=K)
with pytest.raises(ValueError) as excinfo:
x_AG = eqtk.solve(c0=x0, A=A, G=G)
excinfo.match("`A` must have all nonnegative entries.")
assert eqtk.eqcheck(x_NK, x0, N=N, K=K)
def test_scale_factor_failure():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])
x0 = np.array(
[
[
5.50293892e-05,
6.49273515e-08,
2.75796219e-05,
1.29854703e-07,
3.24636758e-08,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_trivial_elemental_failure():
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[3.48219906e-06, 1.32719868e-10]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[2.24222410e-08, 1.63359284e-04]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
G = np.array([0.0, 0.0, 0.0])
x0 = np.array([[2.63761955e-04, 4.93360042e-07, 4.88340687e-07]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
def test_past_failure_1():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])
x0 = np.array(
[
[
1.65989040e-10,
1.07630096e-04,
1.65989040e-10,
1.65989040e-10,
5.38150479e-05,
]
]
)
x = eqtk.solve(x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_past_failure_2():
N = np.array([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])
minus_log_K = np.array([-43.66660344, -68.14676841, -92.28023823])
x0 = np.array([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])
K = np.exp(-minus_log_K)
x = eqtk.solve(x0, N, K)
assert eqtk.eqcheck(x, x0, N, K)
def test_small_conc_failure():
A = np.array(
[
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 2.0],
[1.0, 0.0, 0.0, 1.0, 2.0],
]
)
G = np.array(
[
-1.1323012373599138e02,
-2.7028447814426110e-01,
-2.3382656193096754e01,
-1.0088531260804201e02,
-5.7676558386243052e01,
]
)
x0 = np.array(
[
[
1.8134373707286439e-08,
3.5913242229740680e-14,
3.5913242229740680e-14,
3.5913242229740680e-14,
1.7956621114870340e-14,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
| [
"eqtk.solve",
"eqtk.eqcheck",
"numpy.exp",
"numpy.array",
"numpy.dot",
"pytest.raises"
]
| [((96, 687), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0,\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]])\n', (104, 687), True, 'import numpy as np\n'), ((2582, 2833), 'numpy.array', 'np.array', (['[-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, -\n 0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678]'], {}), '([-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, \n -0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678])\n', (2590, 2833), True, 'import numpy as np\n'), ((3070, 3224), 'numpy.array', 'np.array', (['[[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549, \n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549,\n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (3078, 3224), True, 'import numpy as np\n'), ((3749, 3838), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]]'], {'dtype': 'float'}), '([[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]],\n dtype=float)\n', (3757, 3838), True, 'import numpy as np\n'), ((3858, 3947), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]]'], {'dtype': 'float'}), '([[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]],\n dtype=float)\n', (3866, 3947), True, 'import numpy as np\n'), ((3967, 3995), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (3975, 3995), True, 'import numpy as np\n'), ((4507, 4571), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (4515, 4571), True, 'import numpy as np\n'), ((4580, 4638), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.77428976, -5.64873697, -0.95863043]'], {}), '([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])\n', (4588, 4638), True, 'import numpy as np\n'), ((4648, 4745), 'numpy.array', 'np.array', (['[[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]]'], {}), '([[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]])\n', (4656, 4745), True, 'import numpy as np\n'), ((4880, 4907), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4890, 4907), False, 'import eqtk\n'), ((4919, 4948), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (4931, 4948), False, 'import eqtk\n'), ((4997, 5031), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5005, 5031), True, 'import numpy as np\n'), ((5040, 5060), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5048, 5060), True, 'import numpy as np\n'), ((5070, 5114), 'numpy.array', 'np.array', (['[[3.48219906e-06, 1.32719868e-10]]'], {}), '([[3.48219906e-06, 1.32719868e-10]])\n', (5078, 5114), True, 'import numpy as np\n'), ((5180, 5214), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5188, 5214), True, 'import numpy as np\n'), ((5223, 5243), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5231, 5243), True, 'import numpy as np\n'), ((5253, 5296), 'numpy.array', 'np.array', (['[[2.2422241e-08, 0.000163359284]]'], {}), '([[2.2422241e-08, 0.000163359284]])\n', (5261, 5296), True, 'import numpy as np\n'), ((5363, 5424), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (5371, 5424), True, 'import numpy as np\n'), ((5433, 5458), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5441, 5458), True, 'import numpy as np\n'), ((5468, 5528), 'numpy.array', 'np.array', (['[[0.000263761955, 4.93360042e-07, 4.88340687e-07]]'], {}), '([[0.000263761955, 4.93360042e-07, 4.88340687e-07]])\n', (5476, 5528), True, 'import numpy as np\n'), ((5622, 5686), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (5630, 5686), True, 'import numpy as np\n'), ((5695, 5754), 'numpy.array', 'np.array', (['[0.0, 0.0, -16.76857677, -2.38430181, 1.22028775]'], {}), '([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])\n', (5703, 5754), True, 'import numpy as np\n'), ((5764, 5858), 'numpy.array', 'np.array', (['[[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, 5.38150479e-05]]'], {}), '([[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, \n 5.38150479e-05]])\n', (5772, 5858), True, 'import numpy as np\n'), ((5996, 6020), 'eqtk.solve', 'eqtk.solve', (['x0'], {'A': 'A', 'G': 'G'}), '(x0, A=A, G=G)\n', (6006, 6020), False, 'import eqtk\n'), ((6032, 6061), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (6044, 6061), False, 'import eqtk\n'), ((6099, 6178), 'numpy.array', 'np.array', (['[[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]]'], {}), '([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])\n', (6107, 6178), True, 'import numpy as np\n'), ((6197, 6249), 'numpy.array', 'np.array', (['[-43.66660344, -68.14676841, -92.28023823]'], {}), '([-43.66660344, -68.14676841, -92.28023823])\n', (6205, 6249), True, 'import numpy as np\n'), ((6259, 6335), 'numpy.array', 'np.array', (['[[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]]'], {}), '([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])\n', (6267, 6335), True, 'import numpy as np\n'), ((6344, 6364), 'numpy.exp', 'np.exp', (['(-minus_log_K)'], {}), '(-minus_log_K)\n', (6350, 6364), True, 'import numpy as np\n'), ((6373, 6393), 'eqtk.solve', 'eqtk.solve', (['x0', 'N', 'K'], {}), '(x0, N, K)\n', (6383, 6393), False, 'import eqtk\n'), ((6405, 6430), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0', 'N', 'K'], {}), '(x, x0, N, K)\n', (6417, 6430), False, 'import eqtk\n'), ((6472, 6568), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, 0.0, 1.0,\n 2.0]]'], {}), '([[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, \n 0.0, 1.0, 2.0]])\n', (6480, 6568), True, 'import numpy as np\n'), ((6633, 6752), 'numpy.array', 'np.array', (['[-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305]'], {}), '([-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305])\n', (6641, 6752), True, 'import numpy as np\n'), ((6859, 6991), 'numpy.array', 'np.array', (['[[1.813437370728644e-08, 3.591324222974068e-14, 3.591324222974068e-14, \n 3.591324222974068e-14, 1.795662111487034e-14]]'], {}), '([[1.813437370728644e-08, 3.591324222974068e-14, \n 3.591324222974068e-14, 3.591324222974068e-14, 1.795662111487034e-14]])\n', (6867, 6991), True, 'import numpy as np\n'), ((7131, 7158), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (7141, 7158), False, 'import eqtk\n'), ((7170, 7199), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (7182, 7199), False, 'import eqtk\n'), ((4179, 4208), 'numpy.array', 'np.array', (['x0_val'], {'dtype': 'float'}), '(x0_val, dtype=float)\n', (4187, 4208), True, 'import numpy as np\n'), ((4224, 4251), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'N': 'N', 'K': 'K'}), '(c0=x0, N=N, K=K)\n', (4234, 4251), False, 'import eqtk\n'), ((4431, 4463), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x_NK', 'x0'], {'N': 'N', 'K': 'K'}), '(x_NK, x0, N=N, K=K)\n', (4443, 4463), False, 'import eqtk\n'), ((5138, 5165), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5148, 5165), False, 'import eqtk\n'), ((5321, 5348), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5331, 5348), False, 'import eqtk\n'), ((5552, 5579), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5562, 5579), False, 'import eqtk\n'), ((4012, 4024), 'numpy.dot', 'np.dot', (['N', 'G'], {}), '(N, G)\n', (4018, 4024), True, 'import numpy as np\n'), ((4266, 4291), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4279, 4291), False, 'import pytest\n'), ((4323, 4350), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4333, 4350), False, 'import eqtk\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['OutboundRule']
class OutboundRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Load Balancer Outbound Rule.
> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
example_outbound_rule = azure.lb.OutboundRule("exampleOutboundRule",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
backend_address_pool_id=example_backend_address_pool.id,
frontend_ip_configurations=[azure.lb.OutboundRuleFrontendIpConfigurationArgs(
name="PublicIPAddress",
)])
```
## Import
Load Balancer Outbound Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/outboundRule:OutboundRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allocated_outbound_ports'] = allocated_outbound_ports
if backend_address_pool_id is None:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__['backend_address_pool_id'] = backend_address_pool_id
__props__['enable_tcp_reset'] = enable_tcp_reset
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['idle_timeout_in_minutes'] = idle_timeout_in_minutes
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
super(OutboundRule, __self__).__init__(
'azure:lb/outboundRule:OutboundRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'OutboundRule':
"""
Get an existing OutboundRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allocated_outbound_ports"] = allocated_outbound_ports
__props__["backend_address_pool_id"] = backend_address_pool_id
__props__["enable_tcp_reset"] = enable_tcp_reset
__props__["frontend_ip_configurations"] = frontend_ip_configurations
__props__["idle_timeout_in_minutes"] = idle_timeout_in_minutes
__props__["loadbalancer_id"] = loadbalancer_id
__props__["name"] = name
__props__["protocol"] = protocol
__props__["resource_group_name"] = resource_group_name
return OutboundRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> pulumi.Output[Optional[int]]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Output[str]:
"""
The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool_id")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> pulumi.Output[Optional[bool]]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundRuleFrontendIpConfiguration']]]:
"""
One or more `frontend_ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout for the TCP idle connection
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> pulumi.Output[str]:
"""
The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "loadbalancer_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"pulumi.getter",
"warnings.warn",
"pulumi.ResourceOptions",
"pulumi.get"
]
| [((10406, 10450), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""allocatedOutboundPorts"""'}), "(name='allocatedOutboundPorts')\n", (10419, 10450), False, 'import pulumi\n'), ((10684, 10726), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""backendAddressPoolId"""'}), "(name='backendAddressPoolId')\n", (10697, 10726), False, 'import pulumi\n'), ((11009, 11045), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""enableTcpReset"""'}), "(name='enableTcpReset')\n", (11022, 11045), False, 'import pulumi\n'), ((11369, 11415), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""frontendIpConfigurations"""'}), "(name='frontendIpConfigurations')\n", (11382, 11415), False, 'import pulumi\n'), ((11721, 11763), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""idleTimeoutInMinutes"""'}), "(name='idleTimeoutInMinutes')\n", (11734, 11763), False, 'import pulumi\n'), ((11986, 12022), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""loadbalancerId"""'}), "(name='loadbalancerId')\n", (11999, 12022), False, 'import pulumi\n'), ((12785, 12824), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""resourceGroupName"""'}), "(name='resourceGroupName')\n", (12798, 12824), False, 'import pulumi\n'), ((10619, 10663), 'pulumi.get', 'pulumi.get', (['self', '"""allocated_outbound_ports"""'], {}), "(self, 'allocated_outbound_ports')\n", (10629, 10663), False, 'import pulumi\n'), ((10945, 10988), 'pulumi.get', 'pulumi.get', (['self', '"""backend_address_pool_id"""'], {}), "(self, 'backend_address_pool_id')\n", (10955, 10988), False, 'import pulumi\n'), ((11312, 11348), 'pulumi.get', 'pulumi.get', (['self', '"""enable_tcp_reset"""'], {}), "(self, 'enable_tcp_reset')\n", (11322, 11348), False, 'import pulumi\n'), ((11654, 11700), 'pulumi.get', 'pulumi.get', (['self', '"""frontend_ip_configurations"""'], {}), "(self, 'frontend_ip_configurations')\n", (11664, 11700), False, 'import pulumi\n'), ((11922, 11965), 'pulumi.get', 'pulumi.get', (['self', '"""idle_timeout_in_minutes"""'], {}), "(self, 'idle_timeout_in_minutes')\n", (11932, 11965), False, 'import pulumi\n'), ((12240, 12275), 'pulumi.get', 'pulumi.get', (['self', '"""loadbalancer_id"""'], {}), "(self, 'loadbalancer_id')\n", (12250, 12275), False, 'import pulumi\n'), ((12491, 12515), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (12501, 12515), False, 'import pulumi\n'), ((12736, 12764), 'pulumi.get', 'pulumi.get', (['self', '"""protocol"""'], {}), "(self, 'protocol')\n", (12746, 12764), False, 'import pulumi\n'), ((13044, 13083), 'pulumi.get', 'pulumi.get', (['self', '"""resource_group_name"""'], {}), "(self, 'resource_group_name')\n", (13054, 13083), False, 'import pulumi\n'), ((4969, 5044), 'warnings.warn', 'warnings.warn', (['"""explicit use of __name__ is deprecated"""', 'DeprecationWarning'], {}), "('explicit use of __name__ is deprecated', DeprecationWarning)\n", (4982, 5044), False, 'import warnings\n'), ((5127, 5226), 'warnings.warn', 'warnings.warn', (['"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', 'DeprecationWarning'], {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)\n', (5140, 5226), False, 'import warnings\n'), ((5295, 5319), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (5317, 5319), False, 'import pulumi\n'), ((9710, 9739), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (9732, 9739), False, 'import pulumi\n')] |
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=1),
imnet=dict(type='BP', in_dim=8, out_dim=3),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
eval_bsize=30000,
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
assert isinstance(restorer.imnet, BP)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| [
"mmedit.models.registry.COMPONENTS.register_module",
"mmedit.models.build_model",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.rand"
]
| [((257, 285), 'mmedit.models.registry.COMPONENTS.register_module', 'COMPONENTS.register_module', ([], {}), '()\n', (283, 285), False, 'from mmedit.models.registry import COMPONENTS\n'), ((1389, 1451), 'mmedit.models.build_model', 'build_model', (['model_cfg'], {'train_cfg': 'train_cfg', 'test_cfg': 'test_cfg'}), '(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)\n', (1400, 1451), False, 'from mmedit.models import build_model\n'), ((1650, 1674), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(22)', '(11)'], {}), '(1, 3, 22, 11)\n', (1660, 1674), False, 'import torch\n'), ((1689, 1715), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(3)'], {}), '(1, 128 * 64, 3)\n', (1699, 1715), False, 'import torch\n'), ((1728, 1754), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(2)'], {}), '(1, 128 * 64, 2)\n', (1738, 1754), False, 'import torch\n'), ((1766, 1792), 'torch.rand', 'torch.rand', (['(1)', '(128 * 64)', '(2)'], {}), '(1, 128 * 64, 2)\n', (1776, 1792), False, 'import torch\n'), ((2515, 2560), 'torch.is_tensor', 'torch.is_tensor', (["outputs['results']['output']"], {}), "(outputs['results']['output'])\n", (2530, 2560), False, 'import torch\n'), ((2681, 2706), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2704, 2706), False, 'import torch\n'), ((541, 567), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (550, 567), True, 'import torch.nn as nn\n'), ((3483, 3528), 'torch.is_tensor', 'torch.is_tensor', (["outputs['results']['output']"], {}), "(outputs['results']['output'])\n", (3498, 3528), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""Implementation of MiniGoogLeNet architecture.
This implementation is based on the original implemetation of GoogLeNet.
The authors of the net used BN before Activation layer.
This should be switched.
"""
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import concatenate
from keras import backend as K
class MiniGoogLeNet:
"""Implementation of MiniGoogLeNet architecture
"""
@staticmethod
def conv_module(x, filter_num, filter_x_size, filter_y_size, stride, chanel_dim, padding="same"):
"""Define conv layer
Arguments:
x {Tensor} -- input layer to the function
filter_num {int} -- number of filters our CONV layer is going to learn
filter_x_size {int} -- x-size of each of the filter_num filters that will be learned
filter_y_size {int} -- y-size of each of the filter_num filters that will be learned
stride {int} -- stride of the CONV layer
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Keyword Arguments:
padding {str} -- type of padding to be applied to the CONV layer (default: {"same"})
Returns:
Tensor -- convolutional module
"""
# define a CONV => BN => RELU pattern
x = Conv2D(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=padding)(x)
x = BatchNormalization(axis=chanel_dim)(x)
x = Activation("relu")(x)
# return the block
return x
@staticmethod
def inception_module(x, numK1x1, numK3x3, chanel_dim): # pylint: disable=invalid-name
"""Define inception module
Arguments:
x {Tensor} -- input layer
numK1x1 {int} -- number of 1x1 filters
numK3x3 {int} -- number of 3x3 filters
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- inception module
"""
# define two CONV modules, then concatenate across the channel dimension
conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanel_dim)
conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanel_dim)
x = concatenate([conv_1x1, conv_3x3], axis=chanel_dim)
# return the block
return x
@staticmethod
def downsample_module(x, filter_num, chanel_dim):
"""Define downsample module
Arguments:
x {Tensor} -- input layer
filter_num {int} -- number of filters our CONV layer is going to learn
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- downsample module
"""
# define the CONV module and POOL, then concatenate across the channel dimensions
conv_3x3 = MiniGoogLeNet.conv_module(x, filter_num, 3, 3, (2, 2), chanel_dim, padding="valid")
pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate([conv_3x3, pool], axis=chanel_dim)
# return the block
return x
@staticmethod
def build(width, height, depth, classes):
"""Build MiniGoogLeNet architecture
Arguments:
width {int} -- [description]
height {int} -- [description]
depth {int} -- [description]
classes {int} -- [description]
Returns:
obj -- MiniGoogLeNet model
"""
# initialize the input shape to be "channels last" and the channels dimension itself
input_shape = (height, width, depth)
chanel_dim = -1
# if we are using "channels first", update the input shape and channels dimension
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanel_dim = 1
# define the model input and first CONV module
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanel_dim)
# two Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 32, 32, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 32, 48, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 80, chanel_dim)
# four Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 112, 48, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 96, 64, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 80, 80, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 48, 96, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 96, chanel_dim)
# two Inception modules followed by global POOL and dropout
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# softmax classifier
x = Flatten()(x)
x = Dense(classes)(x)
x = Activation("softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
# return the constructed network architecture
return model
| [
"keras.backend.image_data_format",
"keras.layers.core.Activation",
"keras.layers.Flatten",
"keras.layers.normalization.BatchNormalization",
"keras.layers.convolutional.AveragePooling2D",
"keras.layers.convolutional.Conv2D",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.models.Model",
"keras.layers.convolutional.MaxPooling2D",
"keras.layers.core.Dropout",
"keras.layers.core.Dense"
]
| [((2664, 2714), 'keras.layers.concatenate', 'concatenate', (['[conv_1x1, conv_3x3]'], {'axis': 'chanel_dim'}), '([conv_1x1, conv_3x3], axis=chanel_dim)\n', (2675, 2714), False, 'from keras.layers import concatenate\n'), ((3439, 3485), 'keras.layers.concatenate', 'concatenate', (['[conv_3x3, pool]'], {'axis': 'chanel_dim'}), '([conv_3x3, pool], axis=chanel_dim)\n', (3450, 3485), False, 'from keras.layers import concatenate\n'), ((4351, 4375), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4356, 4375), False, 'from keras.layers import Input\n'), ((5545, 5579), 'keras.models.Model', 'Model', (['inputs', 'x'], {'name': '"""googlenet"""'}), "(inputs, x, name='googlenet')\n", (5550, 5579), False, 'from keras.models import Model\n'), ((1715, 1803), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filter_num', '(filter_x_size, filter_y_size)'], {'strides': 'stride', 'padding': 'padding'}), '(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=\n padding)\n', (1721, 1803), False, 'from keras.layers.convolutional import Conv2D\n'), ((1814, 1849), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanel_dim'}), '(axis=chanel_dim)\n', (1832, 1849), False, 'from keras.layers.normalization import BatchNormalization\n'), ((1865, 1883), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1875, 1883), False, 'from keras.layers.core import Activation\n'), ((3387, 3423), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (3399, 3423), False, 'from keras.layers.convolutional import MaxPooling2D\n'), ((4159, 4180), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4178, 4180), True, 'from keras import backend as K\n'), ((5323, 5347), 'keras.layers.convolutional.AveragePooling2D', 'AveragePooling2D', (['(7, 7)'], {}), '((7, 7))\n', (5339, 5347), False, 'from keras.layers.convolutional import AveragePooling2D\n'), ((5363, 5375), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5370, 5375), False, 'from keras.layers.core import Dropout\n'), ((5421, 5430), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5428, 5430), False, 'from keras.layers import Flatten\n'), ((5446, 5460), 'keras.layers.core.Dense', 'Dense', (['classes'], {}), '(classes)\n', (5451, 5460), False, 'from keras.layers.core import Dense\n'), ((5476, 5497), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5486, 5497), False, 'from keras.layers.core import Activation\n')] |
import os
SERVER_NAME = os.getenv('DOMAIN_SUPERSET')
PUBLIC_ROLE_LIKE_GAMMA = True
SESSION_COOKIE_SAMESITE = None # One of [None, 'Lax', 'Strict']
SESSION_COOKIE_HTTPONLY = False
MAPBOX_API_KEY = os.getenv('MAPBOX_API_KEY', '')
POSTGRES_DB=os.getenv('POSTGRES_DB')
POSTGRES_PASSWORD=os.getenv('POSTGRES_PASSWORD')
POSTGRES_USER=os.getenv('POSTGRES_USER')
POSTGRES_PORT=str(os.getenv('POSTGRES_PORT'))
HTTP_HEADERS = {'X-Frame-Options': 'ALLOWALL'}
sql_alchemy_string='postgresql+psycopg2://'+POSTGRES_USER+':'+POSTGRES_PASSWORD+'@postgres:'+POSTGRES_PORT+'/'+POSTGRES_DB
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_DEFAULT_TIMEOUT': 300,
'CACHE_KEY_PREFIX': 'superset_',
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': 1,
'CACHE_REDIS_URL': 'redis://redis:6379/1'}
SQLALCHEMY_DATABASE_URI = \
sql_alchemy_string
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'thisISaSECRET_1234' | [
"os.getenv"
]
| [((25, 53), 'os.getenv', 'os.getenv', (['"""DOMAIN_SUPERSET"""'], {}), "('DOMAIN_SUPERSET')\n", (34, 53), False, 'import os\n'), ((197, 228), 'os.getenv', 'os.getenv', (['"""MAPBOX_API_KEY"""', '""""""'], {}), "('MAPBOX_API_KEY', '')\n", (206, 228), False, 'import os\n'), ((241, 265), 'os.getenv', 'os.getenv', (['"""POSTGRES_DB"""'], {}), "('POSTGRES_DB')\n", (250, 265), False, 'import os\n'), ((284, 314), 'os.getenv', 'os.getenv', (['"""POSTGRES_PASSWORD"""'], {}), "('POSTGRES_PASSWORD')\n", (293, 314), False, 'import os\n'), ((329, 355), 'os.getenv', 'os.getenv', (['"""POSTGRES_USER"""'], {}), "('POSTGRES_USER')\n", (338, 355), False, 'import os\n'), ((374, 400), 'os.getenv', 'os.getenv', (['"""POSTGRES_PORT"""'], {}), "('POSTGRES_PORT')\n", (383, 400), False, 'import os\n')] |
import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name + " told me about their job as a " + firstJobType + " at " + firstJobLoc + ".")
# def myskills(self):
# #info skills
# #profiles
# firstProfile = self.secondPerson.peopleManager(self.firstPerson)
# secondProfile = self.firstPerson.peopleManager(self.secondPerson)
# #variables
# firstSkills = self.firstPerson.getSkills()
# secondSkills = self.secondPerson.getSkills()
# dayNum = self.firstPerson.model.getDayNum()
# #update profiles
# if dayNum > firstProfile.getSkills()[1]:
# firstProfile.updateSkills(firstSkills, dayNum)
# if dayNum > secondProfile.getSkills()[1]:
# secondProfile.updateSkills(secondSkills, dayNum)
# #thoughts
# self.firstPerson.think(self.secondPerson.name + " and I talked shop for a while.")
# self.secondPerson.think(self.firstPerson.name + " and I talked shop for a while.")
#dialogues are chosen here, but the actual method call is in the handler (eg prices)
def talk(self, matrix, stateVector):
if self.isPlayer:
# stateVector = playerChoice
pass
else:
#get dialogue probabilities given last dialogue
probArray = np.dot(matrix, stateVector)
prob = probArray.tolist()
#choose dialogue
choice = random.random()
stateVector = [0 for i in range(len(prob))]
for i in range(len(prob)):
outcome = prob[i]
if outcome >= choice:
stateVector[i] = 1
return stateVector
else:
choice = choice - outcome
def topicHandler(self):
matrix = Conversation.topicMatrix
stateVector = [0,0,0,0,1]
# self.firstPerson.think("topicHandler")
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def storeHandler(self):
matrix = Conversation.storeMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("storeHandler")
self.different()
while self.state == 'store':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def manuHandler(self):
matrix = Conversation.manuMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("manuHandler")
self.different()
while self.state == 'manu':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def friendHandler(self):
matrix = Conversation.friendMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("friendHandler")
self.different()
while self.state == 'friend':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def myselfHandler(self):
matrix = Conversation.myselfMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("myselfHandler")
while self.state == 'myself':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def exitHandler(self):
self.isPlayer = False
Convo = Conversation() | [
"random.random",
"numpy.dot",
"transitions.Machine"
]
| [((2827, 2933), 'transitions.Machine', 'Machine', ([], {'model': 'self', 'states': 'Conversation.states', 'transitions': 'Conversation.transitions', 'initial': '"""exit"""'}), "(model=self, states=Conversation.states, transitions=Conversation.\n transitions, initial='exit')\n", (2834, 2933), False, 'from transitions import Machine\n'), ((15917, 15944), 'numpy.dot', 'np.dot', (['matrix', 'stateVector'], {}), '(matrix, stateVector)\n', (15923, 15944), True, 'import numpy as np\n'), ((16034, 16049), 'random.random', 'random.random', ([], {}), '()\n', (16047, 16049), False, 'import random\n')] |
from __init__ import ExtractUnlabeledData, SampleUnlabeledData, ExtractLabeledData
E = ExtractLabeledData(data_dir='../labeldata/')
E.get_pathways()
E.get_pathway_names()
E.get_classes_dict()
E.create_df_all_labels()
| [
"__init__.ExtractLabeledData"
]
| [((88, 132), '__init__.ExtractLabeledData', 'ExtractLabeledData', ([], {'data_dir': '"""../labeldata/"""'}), "(data_dir='../labeldata/')\n", (106, 132), False, 'from __init__ import ExtractUnlabeledData, SampleUnlabeledData, ExtractLabeledData\n')] |
from django.shortcuts import render, redirect
from .forms import AuthorForm, BlogForm, NewUserForm
from .models import Author, Blog
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def get_authors(request):
context = {'authors': Author.objects.all()}
return render(request, "blog/get_authors.html", context)
@login_required
def get_author(request, id):
author = Author.objects.get(pk = id)
blogs = Blog.objects.filter(author = id)
context = {'author': author, 'blogs': blogs}
return render(request, "blog/get_author.html", context)
@login_required
def post_put_author(request, id = 0):
if request.method == "GET":
if id == 0:
form = AuthorForm()
else:
author = Author.objects.get(pk = id)
form = AuthorForm(instance = author)
return render(request, "blog/post_put_authors.html", {"form": form})
else:
if id == 0:
form = AuthorForm(request.POST)
else:
author = Author.objects.get(pk = id)
form = AuthorForm(request.POST, instance = author)
if form.is_valid():
form.save()
return redirect('get_authors')
@login_required
def delete_author(request, id):
author = Author.objects.get(pk = id)
author.delete()
return redirect('get_authors')
def get_blogs(request):
context = {'blogs': Blog.objects.all()}
return render(request, "blog/get_blogs.html", context)
@login_required
def get_blog(request, id):
blog = {'blog': Blog.objects.get(pk = id)}
return render(request, "blog/get_blog.html", blog)
@login_required
def post_put_blog(request, id = 0):
if request.method == "GET":
if id == 0:
form = BlogForm()
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(instance = blog)
return render(request, "blog/post_put_blogs.html", {"form": form})
else:
if id == 0:
form = BlogForm(request.POST)
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(request.POST, instance = blog)
if form.is_valid():
form.save()
return redirect('get_blogs')
@login_required
def delete_blog(request, id):
blog = Blog.objects.get(pk = id)
blog.delete()
return redirect('get_blogs')
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful." )
return redirect("get_blogs")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render (request=request, template_name="blog/register.html", context={"register_form":form})
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("get_blogs")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid username or password.")
form = AuthenticationForm()
return render(request=request, template_name="blog/login.html", context={"login_form":form})
def logout_request(request):
logout(request)
messages.info(request, "You have successfully logged out.")
return redirect("get_blogs")
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.contrib.messages.error",
"django.contrib.auth.login",
"django.contrib.messages.info",
"django.contrib.auth.forms.AuthenticationForm",
"django.shortcuts.redirect",
"django.contrib.messages.success",
"django.contrib.auth.logout"
]
| [((457, 506), 'django.shortcuts.render', 'render', (['request', '"""blog/get_authors.html"""', 'context'], {}), "(request, 'blog/get_authors.html', context)\n", (463, 506), False, 'from django.shortcuts import render, redirect\n'), ((700, 748), 'django.shortcuts.render', 'render', (['request', '"""blog/get_author.html"""', 'context'], {}), "(request, 'blog/get_author.html', context)\n", (706, 748), False, 'from django.shortcuts import render, redirect\n'), ((1489, 1512), 'django.shortcuts.redirect', 'redirect', (['"""get_authors"""'], {}), "('get_authors')\n", (1497, 1512), False, 'from django.shortcuts import render, redirect\n'), ((1593, 1640), 'django.shortcuts.render', 'render', (['request', '"""blog/get_blogs.html"""', 'context'], {}), "(request, 'blog/get_blogs.html', context)\n", (1599, 1640), False, 'from django.shortcuts import render, redirect\n'), ((1743, 1786), 'django.shortcuts.render', 'render', (['request', '"""blog/get_blog.html"""', 'blog'], {}), "(request, 'blog/get_blog.html', blog)\n", (1749, 1786), False, 'from django.shortcuts import render, redirect\n'), ((2493, 2514), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2501, 2514), False, 'from django.shortcuts import render, redirect\n'), ((2877, 2974), 'django.shortcuts.render', 'render', ([], {'request': 'request', 'template_name': '"""blog/register.html"""', 'context': "{'register_form': form}"}), "(request=request, template_name='blog/register.html', context={\n 'register_form': form})\n", (2883, 2974), False, 'from django.shortcuts import render, redirect\n'), ((3558, 3578), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', ([], {}), '()\n', (3576, 3578), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((3587, 3678), 'django.shortcuts.render', 'render', ([], {'request': 'request', 'template_name': '"""blog/login.html"""', 'context': "{'login_form': form}"}), "(request=request, template_name='blog/login.html', context={\n 'login_form': form})\n", (3593, 3678), False, 'from django.shortcuts import render, redirect\n'), ((3704, 3719), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (3710, 3719), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3721, 3780), 'django.contrib.messages.info', 'messages.info', (['request', '"""You have successfully logged out."""'], {}), "(request, 'You have successfully logged out.')\n", (3734, 3780), False, 'from django.contrib import messages\n'), ((3790, 3811), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (3798, 3811), False, 'from django.shortcuts import render, redirect\n'), ((1015, 1076), 'django.shortcuts.render', 'render', (['request', '"""blog/post_put_authors.html"""', "{'form': form}"], {}), "(request, 'blog/post_put_authors.html', {'form': form})\n", (1021, 1076), False, 'from django.shortcuts import render, redirect\n'), ((1344, 1367), 'django.shortcuts.redirect', 'redirect', (['"""get_authors"""'], {}), "('get_authors')\n", (1352, 1367), False, 'from django.shortcuts import render, redirect\n'), ((2041, 2100), 'django.shortcuts.render', 'render', (['request', '"""blog/post_put_blogs.html"""', "{'form': form}"], {}), "(request, 'blog/post_put_blogs.html', {'form': form})\n", (2047, 2100), False, 'from django.shortcuts import render, redirect\n'), ((2358, 2379), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2366, 2379), False, 'from django.shortcuts import render, redirect\n'), ((2772, 2846), 'django.contrib.messages.error', 'messages.error', (['request', '"""Unsuccessful registration. Invalid information."""'], {}), "(request, 'Unsuccessful registration. Invalid information.')\n", (2786, 2846), False, 'from django.contrib import messages\n'), ((3038, 3084), 'django.contrib.auth.forms.AuthenticationForm', 'AuthenticationForm', (['request'], {'data': 'request.POST'}), '(request, data=request.POST)\n', (3056, 3084), False, 'from django.contrib.auth.forms import AuthenticationForm\n'), ((2659, 2679), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (2664, 2679), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((2683, 2736), 'django.contrib.messages.success', 'messages.success', (['request', '"""Registration successful."""'], {}), "(request, 'Registration successful.')\n", (2699, 2736), False, 'from django.contrib import messages\n'), ((2748, 2769), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (2756, 2769), False, 'from django.shortcuts import render, redirect\n'), ((3213, 3263), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (3225, 3263), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3494, 3550), 'django.contrib.messages.error', 'messages.error', (['request', '"""Invalid username or password."""'], {}), "(request, 'Invalid username or password.')\n", (3508, 3550), False, 'from django.contrib import messages\n'), ((3292, 3312), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3297, 3312), False, 'from django.contrib.auth import login, authenticate, logout\n'), ((3317, 3380), 'django.contrib.messages.info', 'messages.info', (['request', 'f"""You are now logged in as {username}."""'], {}), "(request, f'You are now logged in as {username}.')\n", (3330, 3380), False, 'from django.contrib import messages\n'), ((3392, 3413), 'django.shortcuts.redirect', 'redirect', (['"""get_blogs"""'], {}), "('get_blogs')\n", (3400, 3413), False, 'from django.shortcuts import render, redirect\n'), ((3427, 3483), 'django.contrib.messages.error', 'messages.error', (['request', '"""Invalid username or password."""'], {}), "(request, 'Invalid username or password.')\n", (3441, 3483), False, 'from django.contrib import messages\n')] |
import tensorflow as tf
import os
import pickle
import numpy as np
from constant_params import input_feature_dim, window_size
def build_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
label, ref_aa, alt_aa = parsed['label'], parsed['ref_aa'], parsed[
'alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa, label = tf.cast(ref_aa, tf.int32), tf.cast(
alt_aa, tf.int32), tf.cast(label, tf.float32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
'''
pos_encoding = 1.0 + tf.cast(
tf.math.abs(window_size // 2 - tf.range(window_size)),
dtype=tf.float32)
#pos_encoding = tf.math.log() / tf.math.log(2.0)
feature = tf.concat([feature, pos_encoding[:, tf.newaxis]], axis=-1)
'''
return var_id, ref_aa, alt_aa, feature, label, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.shuffle(2048)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
def build_all_possible_missenses_dataset(tr_list, feature_dir, batch_size):
amino_acid_order = 'ACDEFGHIKLMNPQRSTVWY*'
def _gen_data():
for transcript_id in tr_list:
feature_path = f'{feature_dir}/{transcript_id}.pickle'
if not os.path.exists(feature_path):
continue
print(feature_path, flush=True)
with open(feature_path, 'rb') as fr:
feature = pickle.load(fr)
L = feature.shape[0]
w = window_size // 2
for aa_pos in range(L):
ref_aa = int(feature[aa_pos, 0])
start = max(aa_pos - w, 0)
end = min(L, aa_pos + 1 + w)
var_start = start - (aa_pos - w)
var_end = var_start + (end - start)
var_feature = np.zeros([w * 2 + 1, feature.shape[1]])
var_feature[var_start:var_end] = feature[start:end]
mask = np.ones((w * 2 + 1, ), dtype=np.float32)
mask[var_start:var_end] = 0.0
mask[w] = 1.0
for alt_aa in range(20):
var_id = f'{transcript_id}_{str(aa_pos+1)}_{amino_acid_order[ref_aa]}_{amino_acid_order[alt_aa]}'.encode(
'utf-8')
yield var_id, np.int32(ref_aa), np.int32(
alt_aa), np.float32(var_feature), np.float32(mask)
dataset = tf.data.Dataset.from_generator(
_gen_data, (tf.string, tf.int32, tf.int32, tf.float32, tf.float32),
(tf.TensorShape(()), tf.TensorShape(()), tf.TensorShape(
()), tf.TensorShape((window_size, input_feature_dim)),
tf.TensorShape((window_size, ))))
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
#dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(4)
return dataset
def build_test_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
ref_aa, alt_aa = parsed['ref_aa'], parsed['alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa = tf.cast(ref_aa, tf.int32), tf.cast(alt_aa, tf.int32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
return var_id, ref_aa, alt_aa, feature, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
| [
"tensorflow.data.TFRecordDataset",
"os.path.exists",
"numpy.ones",
"tensorflow.io.parse_single_example",
"tensorflow.data.Options",
"pickle.load",
"numpy.int32",
"numpy.zeros",
"tensorflow.io.FixedLenFeature",
"tensorflow.io.decode_raw",
"tensorflow.reshape",
"tensorflow.cast",
"numpy.float32",
"tensorflow.TensorShape"
]
| [((1795, 1840), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (1818, 1840), True, 'import tensorflow as tf\n'), ((1856, 1873), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (1871, 1873), True, 'import tensorflow as tf\n'), ((3909, 3926), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (3924, 3926), True, 'import tensorflow as tf\n'), ((5424, 5469), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (5447, 5469), True, 'import tensorflow as tf\n'), ((5485, 5502), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (5500, 5502), True, 'import tensorflow as tf\n'), ((255, 290), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (276, 290), True, 'import tensorflow as tf\n'), ((310, 345), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (331, 345), True, 'import tensorflow as tf\n'), ((365, 400), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (386, 400), True, 'import tensorflow as tf\n'), ((421, 457), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (442, 457), True, 'import tensorflow as tf\n'), ((475, 511), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (496, 511), True, 'import tensorflow as tf\n'), ((531, 567), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (552, 567), True, 'import tensorflow as tf\n'), ((625, 687), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (651, 687), True, 'import tensorflow as tf\n'), ((965, 1012), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (981, 1012), True, 'import tensorflow as tf\n'), ((1031, 1084), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (1041, 1084), True, 'import tensorflow as tf\n'), ((1101, 1145), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (1117, 1145), True, 'import tensorflow as tf\n'), ((1161, 1193), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (1171, 1193), True, 'import tensorflow as tf\n'), ((4324, 4359), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4345, 4359), True, 'import tensorflow as tf\n'), ((4379, 4414), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4400, 4414), True, 'import tensorflow as tf\n'), ((4435, 4471), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4456, 4471), True, 'import tensorflow as tf\n'), ((4489, 4525), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4510, 4525), True, 'import tensorflow as tf\n'), ((4545, 4581), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4566, 4581), True, 'import tensorflow as tf\n'), ((4639, 4701), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (4665, 4701), True, 'import tensorflow as tf\n'), ((4894, 4941), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (4910, 4941), True, 'import tensorflow as tf\n'), ((4960, 5013), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (4970, 5013), True, 'import tensorflow as tf\n'), ((5030, 5074), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (5046, 5074), True, 'import tensorflow as tf\n'), ((5090, 5122), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (5100, 5122), True, 'import tensorflow as tf\n'), ((852, 877), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (859, 877), True, 'import tensorflow as tf\n'), ((879, 904), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (886, 904), True, 'import tensorflow as tf\n'), ((919, 945), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (926, 945), True, 'import tensorflow as tf\n'), ((3728, 3746), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3742, 3746), True, 'import tensorflow as tf\n'), ((3748, 3766), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3762, 3766), True, 'import tensorflow as tf\n'), ((3768, 3786), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3782, 3786), True, 'import tensorflow as tf\n'), ((3801, 3849), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size, input_feature_dim)'], {}), '((window_size, input_feature_dim))\n', (3815, 3849), True, 'import tensorflow as tf\n'), ((3860, 3890), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size,)'], {}), '((window_size,))\n', (3874, 3890), True, 'import tensorflow as tf\n'), ((4822, 4847), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (4829, 4847), True, 'import tensorflow as tf\n'), ((4849, 4874), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (4856, 4874), True, 'import tensorflow as tf\n'), ((1325, 1355), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (1332, 1355), True, 'import tensorflow as tf\n'), ((2444, 2472), 'os.path.exists', 'os.path.exists', (['feature_path'], {}), '(feature_path)\n', (2458, 2472), False, 'import os\n'), ((2619, 2634), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (2630, 2634), False, 'import pickle\n'), ((3009, 3048), 'numpy.zeros', 'np.zeros', (['[w * 2 + 1, feature.shape[1]]'], {}), '([w * 2 + 1, feature.shape[1]])\n', (3017, 3048), True, 'import numpy as np\n'), ((3141, 3180), 'numpy.ones', 'np.ones', (['(w * 2 + 1,)'], {'dtype': 'np.float32'}), '((w * 2 + 1,), dtype=np.float32)\n', (3148, 3180), True, 'import numpy as np\n'), ((5254, 5284), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (5261, 5284), True, 'import tensorflow as tf\n'), ((3493, 3509), 'numpy.int32', 'np.int32', (['ref_aa'], {}), '(ref_aa)\n', (3501, 3509), True, 'import numpy as np\n'), ((3511, 3527), 'numpy.int32', 'np.int32', (['alt_aa'], {}), '(alt_aa)\n', (3519, 3527), True, 'import numpy as np\n'), ((3554, 3577), 'numpy.float32', 'np.float32', (['var_feature'], {}), '(var_feature)\n', (3564, 3577), True, 'import numpy as np\n'), ((3579, 3595), 'numpy.float32', 'np.float32', (['mask'], {}), '(mask)\n', (3589, 3595), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import shutil
class ClapackConan(ConanFile):
name = "clapack"
version = "3.2.1"
license = "BSD 3-Clause"
# BSD-3-Clause-Clear
url = "https://github.com/sintef-ocean/conan-clapack"
author = "<NAME>"
homepage = "http://www.netlib.org/clapack/"
description = \
"CLAPACK's goal is to provide LAPACK for someone who does " \
"not have access to a Fortran compiler"
topics = ("clapack", "LAPACK", "Port to C", "Numerical linear algebra")
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
}
default_options = {
"fPIC": True,
}
generators = ("cmake_paths", "cmake_find_package")
exports = ["patch/*"]
source_file = "clapack-{}-CMAKE.tgz".format(version)
source_subfolder = source_file[:-4]
build_subfolder = "build_subfolder"
def source(self):
link = "http://www.netlib.org/clapack/" + self.source_file
tools.get(link, sha1="5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1")
tools.patch(patch_file="patch/MainCMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/SRC_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/F2C_CMakeLists.patch",
base_path=self.source_subfolder)
tools.patch(patch_file="patch/BLAS_CMakeLists.patch",
base_path=self.source_subfolder)
shutil.move(self.source_subfolder + "/COPYING",
self.source_subfolder + "/LICENSE")
def build(self):
cmake = CMake(self)
if self.settings.os != "Windows":
cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = self.options.fPIC
cmake.configure(source_folder=self.source_subfolder,
build_folder=self.build_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYING", dst="licenses", src=self.source_subfolder,
ignore_case=True, keep_path=False)
def package_info(self):
self.cpp_info.name = 'CLAPACK'
if self.settings.compiler == "Visual Studio":
self.cpp_info.libs = ["libf2c", "blas", "lapack"]
if self.settings.build_type == "Debug":
for i in range(len(self.cpp_info.libs)):
self.cpp_info.libs[i] += 'd'
else:
self.cpp_info.libs = ["lapack", "blas", "f2c"]
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
| [
"conans.tools.get",
"conans.tools.patch",
"conans.CMake",
"shutil.move"
]
| [((1052, 1116), 'conans.tools.get', 'tools.get', (['link'], {'sha1': '"""5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1"""'}), "(link, sha1='5ea1bcc4314e392bca8b9e5f61d44355cf9f4cc1')\n", (1061, 1116), False, 'from conans import ConanFile, CMake, tools\n'), ((1126, 1216), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/MainCMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/MainCMakeLists.patch', base_path=self.\n source_subfolder)\n", (1137, 1216), False, 'from conans import ConanFile, CMake, tools\n'), ((1240, 1330), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/SRC_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/SRC_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1251, 1330), False, 'from conans import ConanFile, CMake, tools\n'), ((1354, 1444), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/F2C_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/F2C_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1365, 1444), False, 'from conans import ConanFile, CMake, tools\n'), ((1468, 1559), 'conans.tools.patch', 'tools.patch', ([], {'patch_file': '"""patch/BLAS_CMakeLists.patch"""', 'base_path': 'self.source_subfolder'}), "(patch_file='patch/BLAS_CMakeLists.patch', base_path=self.\n source_subfolder)\n", (1479, 1559), False, 'from conans import ConanFile, CMake, tools\n'), ((1583, 1670), 'shutil.move', 'shutil.move', (["(self.source_subfolder + '/COPYING')", "(self.source_subfolder + '/LICENSE')"], {}), "(self.source_subfolder + '/COPYING', self.source_subfolder +\n '/LICENSE')\n", (1594, 1670), False, 'import shutil\n'), ((1725, 1736), 'conans.CMake', 'CMake', (['self'], {}), '(self)\n', (1730, 1736), False, 'from conans import ConanFile, CMake, tools\n')] |
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from dataclasses import dataclass
import pygmo as pg
from yacos.essential import Sequence
from yacos.essential import IO
from yacos.essential import Engine
class Pygmo:
"""A Pygmo's strategy."""
__version__ = '1.0.0'
__flags = None
# {key: {'goal': float,
# 'seq': list}}
__results = None
# SGA
# {gen = {'fevals': int,
# 'best': float,
# 'improvement': float}}
#
# PSO
# {gen: {'fevals': int,
# 'gbest': float,
# 'meanvel': float,
# 'meanlbest': float,
# 'avgdist': float}
__log = None
class Problem:
"""Pygmo's problem."""
def __init__(self,
first_key,
last_key,
passes_dict,
dimension,
goal,
compiler,
benchmark_directory,
working_set,
times,
tool,
verify_output):
"""Construct a Pygmo problem.
Parameters
----------
first_key : int
The index of the first pass.
last_key : int
The index of the last pass.
passes_dict : dict
The dictionary with the available passes.
dimension : int
The length of a sequence.
goal : str
compiler : str
benchmark_directory : str
working_set : int
times: int
tool: str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
self.first_key = first_key
self.last_key = last_key
self.passes_dict = passes_dict
self.dimension = dimension
self.goal = goal
self.compiler = compiler
self.benchmark_directory = benchmark_directory
self.working_set = working_set
self.times = times
self.tool = tool
self.verify_output = verify_output
def __deepcopy__(self,
*args,
**kwargs):
"""Deeep copy."""
return self
def fitness(self,
sequence):
"""Calculate and return the fitness."""
sequence = Sequence.fix_index(list(sequence))
sequence = Sequence.sanitize(sequence)
sequence = Sequence.index_pass_to_list(sequence,
self.passes_dict)
goal_value = Engine.evaluate(self.goal,
Sequence.name_pass_to_string(
sequence
),
self.compiler,
self.benchmark_directory,
self.working_set,
self.times,
self.tool,
self.verify_output)
return [goal_value]
def get_nix(self):
"""Integer dimension of the problem."""
return self.dimension
def get_bounds(self):
"""Box-bounds."""
return ([self.first_key] * self.dimension,
[self.last_key] * self.dimension)
def get_name(self):
"""Problem name."""
return 'Optimization Selection'
def get_extra_info(self):
"""Info."""
return '\tDimensions: ' + str(self.dimension)
@dataclass
class PygmoFlags:
"""Pygmo flags.
Parameters
----------
first_key : int
The index of the first pass.
last_key : int
The index of the last pass.
passes_dict : dict
The dictionary with the available passes.
dimension : int
The length of a sequence.
population : int
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times: int
Execution times
tool : str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
first_key: int
last_key: int
passes_dict: dict
dimension: int
population: int
goals: dict
compiler: str
benchmarks_directory: str
working_set: int
times: int
tool: str
verify_output: bool
def __init__(self,
dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize the arguments.
Parameters
----------
dimension : int
The length of a sequence.
population : int
passes_filename : str
The file that describes the passes to use.
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times: int
Execution times
tool: str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
first_key, last_key, passes_dict = IO.load_passes(passes_filename)
# When the goal is obtained during compile time
# and the working set is not defined during compilation,
# we do not need the working set.
self.__flags = self.PygmoFlags(first_key,
last_key,
passes_dict,
dimension,
population,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
@property
def results(self):
"""Getter."""
return self.__results
@property
def log(self):
"""Getter."""
return self.__log
def exec(self, algorithm, benchmark):
"""Execute the algorithm.
Parameter
---------
algorithm : Pygmo algorithm
benchmark : str
"""
# Step 1: Algorithm
algorithm = pg.algorithm(algorithm)
# algorithm.set_verbosity(1)
# Step 2: Instantiate a pygmo problem
index = benchmark.find('.')
# Benchmark directtory
bench_dir = os.path.join(self.__flags.benchmarks_directory,
benchmark[:index],
benchmark[index+1:])
problem = self.Problem(self.__flags.first_key,
self.__flags.last_key,
self.__flags.passes_dict,
self.__flags.dimension,
self.__flags.goals,
self.__flags.compiler,
bench_dir,
self.__flags.working_set,
self.__flags.times,
self.__flags.tool,
self.__flags.verify_output)
problem = pg.problem(problem)
# Step 3: The initial population
population = pg.population(problem,
self.__flags.population)
# Step 4: Evolve the population
population = algorithm.evolve(population)
# Step 5: Get the results
sga_sequence = population.get_x().tolist()
sga_fitness = population.get_f().tolist()
self.__results = {}
for index in range(self.__flags.population):
sequence = Sequence.index_pass_to_list(sga_sequence[index],
self.__flags.passes_dict)
goal_value = sga_fitness[index][0]
if goal_value == float('inf'):
continue
self.__results[index] = {'seq': sequence,
'goal': goal_value}
# Step 6: Get the log
self.__log = {}
if algorithm.get_name() == 'SGA: Genetic Algorithm':
uda = algorithm.extract(pg.sga)
log = uda.get_log()
for (gen, fevals, best, improvement) in log:
self.__log[gen] = {'fevals': fevals,
'best': best,
'improvement': improvement}
elif algorithm.get_name() == 'PSO: Particle Swarm Optimization':
uda = algorithm.extract(pg.pso)
log = uda.get_log()
for (gen, fevals, gbest, meanvel, meanlbest, avgdist) in log:
self.__log[gen] = {'fevals': fevals,
'gbest': gbest,
'meanvel': meanvel,
'meanlbest': meanlbest,
'avgdist': avgdist}
class SGA(Pygmo):
"""Simple Genetic Algorithm."""
__version__ = '1.0.0'
__flags = None
@dataclass
class Flags:
"""Pygmo flags.
Parameters
----------
generations : int
cr : float
Crossover probability
m : float
Mutation probability
param_m : float
Distribution index (polynomial mutation),
gaussian width (gaussian mutation) or
inactive (uniform mutation)
param_s : float
The number of best individuals to use in “truncated”
selection or the size of the tournament in
tournament selection.
crossover : str
exponential, binomial or single
mutation : str
gaussian, polynomial or uniform
selection : str
tournament or truncated
seed : int
"""
generations: int
cr: float
m: float
param_m: float
param_s: float
crossover: str
mutation: str
selection: str
seed: int
def __init__(self,
generations,
population,
cr,
m,
param_m,
param_s,
crossover,
mutation,
selection,
seed,
dimension,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize a SGA object.
Parameters
----------
generations : int
population : int
cr : float
Crossover probability
m : float
Mutation probability
param_m : float
Distribution index (polynomial mutation),
gaussian width (gaussian mutation) or
inactive (uniform mutation)
param_s : float
The number of best individuals to use in “truncated”
selection or the size of the tournament in
tournament selection.
crossover : str
exponential, binomial or single
mutation : str
gaussian, polynomial or uniform
selection : str
tournament or truncated
seed : int
dimension : int
The length of a sequence.
passes_filename : str
The file that describes the passes to use.
goals : dict
compiler : str
benchmarks_directory : str
working_set : int
The dataset to execute the benchmark.
times : int
Execution times
tool : str
Execution tool
verify_output: bool
The goal is valid only if the execution status is OK.
"""
self.__flags = self.Flags(generations,
cr,
m,
param_m,
param_s,
crossover,
mutation,
selection,
seed)
super().__init__(dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
def run(self, benchmark):
"""Execute the algorithm.
Parameter
--------
benchmark: str
"""
if self.__flags.seed is None:
algorithm = pg.sga(gen=self.__flags.generations,
cr=self.__flags.cr,
m=self.__flags.m,
param_m=self.__flags.param_m,
param_s=self.__flags.param_s,
crossover=self.__flags.crossover,
mutation=self.__flags.mutation,
selection=self.__flags.selection)
else:
algorithm = pg.sga(gen=self.__flags.generations,
cr=self.__flags.cr,
m=self.__flags.m,
param_m=self.__flags.param_m,
param_s=self.__flags.param_s,
crossover=self.__flags.crossover,
mutation=self.__flags.mutation,
selection=self.__flags.selection,
seed=self.__flags.seed)
# Execute
super().exec(algorithm, benchmark)
class PSO(Pygmo):
"""Particle Swarm Optimization."""
__version__ = '1.0.0'
__flags = None
@dataclass
class Flags:
"""PSO flags.
Parameters
----------
generations : int
omega : float
Inertia weight (or constriction factor)
eta1 : float
Social component
eta2 : float
Cognitive component
max_vel : float
Maximum allowed particle velocities
(normalized with respect to the bounds width)
variant : int
Algorithmic variant
neighb_type : int
Swarm topology (defining each particle’s neighbours)
neighb_param : int
Topology parameter (defines how many neighbours to consider)
memory : bool
When true the velocities are not reset between successive
calls to the evolve method
seed : int
Seed used by the internal random number generator.
"""
generations: int
omega: float
eta1: float
eta2: float
max_vel: float
variant: int
neighb_type: int
neighb_param: int
memory: bool
seed: int
def __init__(self,
generations,
population,
omega,
eta1,
eta2,
max_vel,
variant,
neighb_type,
neighb_param,
memory,
seed,
dimension,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output):
"""Initialize a PSO object.
Parameters
----------
generations : int
population : int
omega : float
Inertia weight (or constriction factor)
eta1 : float
Social component
eta2 : float
Cognitive component
max_vel : float
Maximum allowed particle velocities
(normalized with respect to the bounds width)
variant : int
Algorithmic variant
neighb_type : int
Swarm topology (defining each particle’s neighbours)
neighb_param : int
Topology parameter (defines how many neighbours to consider)
memory : bool
When true the velocities are not reset between successive
calls to the evolve method
seed : int
Seed used by the internal random number generator.
"""
self.__flags = self.Flags(generations,
omega,
eta1,
eta2,
max_vel,
variant,
neighb_type,
neighb_param,
memory,
seed)
super().__init__(dimension,
population,
passes_filename,
goals,
compiler,
benchmarks_directory,
working_set,
times,
tool,
verify_output)
def run(self, benchmark):
"""Execute the algorithm.
Parameter
--------
benchmark : str
"""
if self.__flags.seed:
algorithm = pg.pso(self.__flags.generations,
self.__flags.omega,
self.__flags.eta1,
self.__flags.eta2,
self.__flags.max_vel,
self.__flags.variant,
self.__flags.neighb_type,
self.__flags.neighb_param,
self.__flags.memory,
self.__flags.seed)
else:
algorithm = pg.pso(self.__flags.generations,
self.__flags.omega,
self.__flags.eta1,
self.__flags.eta2,
self.__flags.max_vel,
self.__flags.variant,
self.__flags.neighb_type,
self.__flags.neighb_param,
self.__flags.memory)
# Execute
super().exec(algorithm, benchmark)
| [
"yacos.essential.Sequence.name_pass_to_string",
"pygmo.sga",
"pygmo.problem",
"os.path.join",
"yacos.essential.Sequence.index_pass_to_list",
"yacos.essential.Sequence.sanitize",
"pygmo.population",
"pygmo.algorithm",
"pygmo.pso",
"yacos.essential.IO.load_passes"
]
| [((6387, 6418), 'yacos.essential.IO.load_passes', 'IO.load_passes', (['passes_filename'], {}), '(passes_filename)\n', (6401, 6418), False, 'from yacos.essential import IO\n'), ((7595, 7618), 'pygmo.algorithm', 'pg.algorithm', (['algorithm'], {}), '(algorithm)\n', (7607, 7618), True, 'import pygmo as pg\n'), ((7791, 7884), 'os.path.join', 'os.path.join', (['self.__flags.benchmarks_directory', 'benchmark[:index]', 'benchmark[index + 1:]'], {}), '(self.__flags.benchmarks_directory, benchmark[:index],\n benchmark[index + 1:])\n', (7803, 7884), False, 'import os\n'), ((8549, 8568), 'pygmo.problem', 'pg.problem', (['problem'], {}), '(problem)\n', (8559, 8568), True, 'import pygmo as pg\n'), ((8632, 8679), 'pygmo.population', 'pg.population', (['problem', 'self.__flags.population'], {}), '(problem, self.__flags.population)\n', (8645, 8679), True, 'import pygmo as pg\n'), ((3107, 3134), 'yacos.essential.Sequence.sanitize', 'Sequence.sanitize', (['sequence'], {}), '(sequence)\n', (3124, 3134), False, 'from yacos.essential import Sequence\n'), ((3158, 3213), 'yacos.essential.Sequence.index_pass_to_list', 'Sequence.index_pass_to_list', (['sequence', 'self.passes_dict'], {}), '(sequence, self.passes_dict)\n', (3185, 3213), False, 'from yacos.essential import Sequence\n'), ((9047, 9121), 'yacos.essential.Sequence.index_pass_to_list', 'Sequence.index_pass_to_list', (['sga_sequence[index]', 'self.__flags.passes_dict'], {}), '(sga_sequence[index], self.__flags.passes_dict)\n', (9074, 9121), False, 'from yacos.essential import Sequence\n'), ((14220, 14468), 'pygmo.sga', 'pg.sga', ([], {'gen': 'self.__flags.generations', 'cr': 'self.__flags.cr', 'm': 'self.__flags.m', 'param_m': 'self.__flags.param_m', 'param_s': 'self.__flags.param_s', 'crossover': 'self.__flags.crossover', 'mutation': 'self.__flags.mutation', 'selection': 'self.__flags.selection'}), '(gen=self.__flags.generations, cr=self.__flags.cr, m=self.__flags.m,\n param_m=self.__flags.param_m, param_s=self.__flags.param_s, crossover=\n self.__flags.crossover, mutation=self.__flags.mutation, selection=self.\n __flags.selection)\n', (14226, 14468), True, 'import pygmo as pg\n'), ((14710, 14982), 'pygmo.sga', 'pg.sga', ([], {'gen': 'self.__flags.generations', 'cr': 'self.__flags.cr', 'm': 'self.__flags.m', 'param_m': 'self.__flags.param_m', 'param_s': 'self.__flags.param_s', 'crossover': 'self.__flags.crossover', 'mutation': 'self.__flags.mutation', 'selection': 'self.__flags.selection', 'seed': 'self.__flags.seed'}), '(gen=self.__flags.generations, cr=self.__flags.cr, m=self.__flags.m,\n param_m=self.__flags.param_m, param_s=self.__flags.param_s, crossover=\n self.__flags.crossover, mutation=self.__flags.mutation, selection=self.\n __flags.selection, seed=self.__flags.seed)\n', (14716, 14982), True, 'import pygmo as pg\n'), ((18994, 19234), 'pygmo.pso', 'pg.pso', (['self.__flags.generations', 'self.__flags.omega', 'self.__flags.eta1', 'self.__flags.eta2', 'self.__flags.max_vel', 'self.__flags.variant', 'self.__flags.neighb_type', 'self.__flags.neighb_param', 'self.__flags.memory', 'self.__flags.seed'], {}), '(self.__flags.generations, self.__flags.omega, self.__flags.eta1,\n self.__flags.eta2, self.__flags.max_vel, self.__flags.variant, self.\n __flags.neighb_type, self.__flags.neighb_param, self.__flags.memory,\n self.__flags.seed)\n', (19000, 19234), True, 'import pygmo as pg\n'), ((19539, 19756), 'pygmo.pso', 'pg.pso', (['self.__flags.generations', 'self.__flags.omega', 'self.__flags.eta1', 'self.__flags.eta2', 'self.__flags.max_vel', 'self.__flags.variant', 'self.__flags.neighb_type', 'self.__flags.neighb_param', 'self.__flags.memory'], {}), '(self.__flags.generations, self.__flags.omega, self.__flags.eta1,\n self.__flags.eta2, self.__flags.max_vel, self.__flags.variant, self.\n __flags.neighb_type, self.__flags.neighb_param, self.__flags.memory)\n', (19545, 19756), True, 'import pygmo as pg\n'), ((3358, 3396), 'yacos.essential.Sequence.name_pass_to_string', 'Sequence.name_pass_to_string', (['sequence'], {}), '(sequence)\n', (3386, 3396), False, 'from yacos.essential import Sequence\n')] |
import tkinter as tk
from scipy.stats import chi2, chisquare
COLOR = '#dddddd'
COLUMNS_COLOR = '#ffffff'
MAX_SIZE = 10
WIDGET_WIDTH = 25
class LinearCongruent:
m = 2**32
a = 1664525
c = 1013904223
_cur = 1
def next(self):
self._cur = (self.a * self._cur + self.c) % self.m
return self._cur
def khi_krit(arr):
min_ = min(arr)
cnt = [0 for _ in range(max(arr) - min_ + 1)]
for elem in arr:
cnt[elem-min_] += 1
n = sum(cnt)
k = len(cnt)
p = 1 / k
chisq = 0
for j in range(k):
chisq += cnt[j]**2 / p
chisq = chisq / n - n
#print(chisquare(cnt))
return (1 - chi2.cdf(chisq, k)) * 100
def get_10_nums(arr, num):
cnt = 0
res = []
i = 0
while cnt != 10:
if arr[i] > num:
res.append(arr[i])
cnt += 1
i += 1
return res
class file_nums:
def __init__(self):
self.nums = None
with open('nums.txt', 'r') as f:
nums = [list(i.split()) for i in list(f.read().split('\n'))]
self.columns = len(nums)
self.rows = len(nums[0])
self.nums = [[] for _ in range(self.rows)]
for i in range(self.columns):
for j in range(self.rows):
self.nums[j].append(nums[i][j])
self.cur_x = 0
self.cur_y = 0
def next(self):
self.cur_x += 1
if self.cur_x == self.columns:
self.cur_x = 0
self.cur_y += 1
if self.cur_y == self.rows:
self.cur_y = 0
return self.nums[self.cur_y][self.cur_x]
class Block:
def __init__(self, master):
self.frame = tk.LabelFrame(master, bg=COLOR, text='Ввод данных', width=480, height=110)
self.frame.columnconfigure(0, weight=1)
self.frame.rowconfigure(0, weight=1)
self.frame.grid_propagate(False)
self.label_input = tk.Label(self.frame, text='Ваши числа: ', bg=COLOR)
self.entry_numbers = tk.Entry(self.frame, width=WIDGET_WIDTH+10)
self.calculate_custom_result_btn = tk.Button(self.frame, text="Статистика хи-квадрат ваших чисел: ", width=WIDGET_WIDTH+6,
bg=COLOR,
command=self.user_solve)
self.label_result = tk.Label(self.frame, text='', bg=COLOR)
self.calculate_result_btn = tk.Button(self.frame, text="Вычислить для 1000 чисел", width=WIDGET_WIDTH, bg=COLOR, command=self.solve)
self.listbox_frame = tk.LabelFrame(master, text='Матрица', bg=COLOR, width=530, height=200)
self.listbox_frame.grid_propagate(False)
self.result_frame = tk.LabelFrame(master, bg=COLOR, text='Результат', width=510, height=270)
self.result_frame.grid_propagate(False)
self.table_label = tk.Label(self.result_frame, text='Табличный способ', bg=COLOR, bd=3)
self.algorithm_label = tk.Label(self.result_frame, text='Алгоритмический способ', bg=COLOR, bd=3)
self.one_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_table = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.two_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.three_digit_algorithm = tk.Listbox(self.result_frame, selectmode=tk.SINGLE, width=13, bg=COLUMNS_COLOR, height=1)
self.one_digit_table.insert(tk.END, '1 разряд')
self.two_digit_table.insert(tk.END, '2 разряда')
self.three_digit_table.insert(tk.END, '3 разряда')
self.one_digit_algorithm.insert(tk.END, '1 разряд')
self.two_digit_algorithm.insert(tk.END, '2 разряда')
self.three_digit_algorithm.insert(tk.END, '3 разряда')
self.label_khi = tk.Label(self.result_frame, text='% статистики хи-квадрат', bg=COLOR, bd=3)
self.one_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_table_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.one_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.two_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.three_digit_algorithm_khi = tk.Label(self.result_frame, text='', bg=COLOR, bd=3)
self.table_label.grid(row=0, column=0, columnspan=3)
self.algorithm_label.grid(row=0, column=3, columnspan=3)
self.one_digit_table.grid(row=1, column=0, padx=1)
self.two_digit_table.grid(row=1, column=1, padx=1)
self.three_digit_table.grid(row=1, column=2, padx=1)
self.one_digit_algorithm.grid(row=1, column=3, padx=1)
self.two_digit_algorithm.grid(row=1, column=4, padx=1)
self.three_digit_algorithm.grid(row=1, column=5, padx=1)
self.one_digit_table_khi.grid(row=3, column=0, padx=1)
self.two_digit_table_khi.grid(row=3, column=1, padx=1)
self.three_digit_table_khi.grid(row=3, column=2, padx=1)
self.one_digit_algorithm_khi.grid(row=3, column=3, padx=1)
self.two_digit_algorithm_khi.grid(row=3, column=4, padx=1)
self.three_digit_algorithm_khi.grid(row=3, column=5, padx=1)
self.label_khi.grid(row=2, column=0, columnspan=6)
self.label_input.grid(row=0, column=0)
self.entry_numbers.grid(row=0, column=1, padx=10)
self.calculate_custom_result_btn.grid(row=1, column=0, pady=4)
self.label_result.grid(row=1, column=1)
self.calculate_result_btn.grid(row=2, column=0, columnspan=2, pady=2)
self.data = None
self.size = None
self.table_gen = file_nums()
self.listbox_list = [tk.Listbox(self.listbox_frame, selectmode=tk.SINGLE, width=8, bg=COLOR) for _ in range(MAX_SIZE)]
def defocus(self, event):
event.widget.master.focus_set()
def make_view(self):
self.frame.pack()
#self.listbox_frame.pack()
self.result_frame.pack()
def fill_data(self, size):
for i in range(size):
for j in range(size):
self.listbox_list[i].insert(tk.END, self.data[j, i])
def user_solve(self):
inp = self.entry_numbers.get()
try:
x = list(map(int, inp.split()))
self.label_result['text'] = str(round(khi_krit(x), 4)) + '%'
except:
self.label_result['text'] = 'Ошибка ввода!!!'
def solve(self):
alg_arrs = [[int(generator.next()) % j for _ in range(1000)] for j in [10, 100, 1000]]
table_arrs = [[int(self.table_gen.next()[:j]) for _ in range(1000)] for j in [1, 2, 3]]
self.one_digit_algorithm.delete(1, tk.END)
self.two_digit_algorithm.delete(1, tk.END)
self.three_digit_algorithm.delete(1, tk.END)
self.one_digit_algorithm['height'] = 11
self.two_digit_algorithm['height'] = 11
self.three_digit_algorithm['height'] = 11
self.one_digit_table.delete(1, tk.END)
self.two_digit_table.delete(1, tk.END)
self.three_digit_table.delete(1, tk.END)
self.one_digit_table['height'] = 11
self.two_digit_table['height'] = 11
self.three_digit_table['height'] = 11
[self.one_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[0], -1)]
[self.two_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[1], 9)]
[self.three_digit_algorithm.insert(tk.END, i) for i in get_10_nums(alg_arrs[2], 99)]
[self.one_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[0], -1)]
[self.two_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[1], 9)]
[self.three_digit_table.insert(tk.END, i) for i in get_10_nums(table_arrs[2], 99)]
self.one_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[0]), 4)) + '%'
self.two_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[1]), 4)) + '%'
self.three_digit_algorithm_khi['text'] = str(round(khi_krit(alg_arrs[2]), 4)) + '%'
self.one_digit_table_khi['text'] = str(round(khi_krit(table_arrs[0]), 4)) + '%'
self.two_digit_table_khi['text'] = str(round(khi_krit(table_arrs[1]), 4)) + '%'
self.three_digit_table_khi['text'] = str(round(khi_krit(table_arrs[2]), 4)) + '%'
generator = LinearCongruent()
root = tk.Tk()
root['bg'] = COLOR
root.geometry('540x390')
first_block = Block(root)
first_block.make_view()
root.mainloop()
| [
"scipy.stats.chi2.cdf",
"tkinter.LabelFrame",
"tkinter.Entry",
"tkinter.Button",
"tkinter.Tk",
"tkinter.Label",
"tkinter.Listbox"
]
| [((8804, 8811), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (8809, 8811), True, 'import tkinter as tk\n'), ((1698, 1772), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'bg': 'COLOR', 'text': '"""Ввод данных"""', 'width': '(480)', 'height': '(110)'}), "(master, bg=COLOR, text='Ввод данных', width=480, height=110)\n", (1711, 1772), True, 'import tkinter as tk\n'), ((1935, 1986), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Ваши числа: """', 'bg': 'COLOR'}), "(self.frame, text='Ваши числа: ', bg=COLOR)\n", (1943, 1986), True, 'import tkinter as tk\n'), ((2016, 2061), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'width': '(WIDGET_WIDTH + 10)'}), '(self.frame, width=WIDGET_WIDTH + 10)\n', (2024, 2061), True, 'import tkinter as tk\n'), ((2103, 2232), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Статистика хи-квадрат ваших чисел: """', 'width': '(WIDGET_WIDTH + 6)', 'bg': 'COLOR', 'command': 'self.user_solve'}), "(self.frame, text='Статистика хи-квадрат ваших чисел: ', width=\n WIDGET_WIDTH + 6, bg=COLOR, command=self.user_solve)\n", (2112, 2232), True, 'import tkinter as tk\n'), ((2360, 2399), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'bg': 'COLOR'}), "(self.frame, text='', bg=COLOR)\n", (2368, 2399), True, 'import tkinter as tk\n'), ((2436, 2544), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Вычислить для 1000 чисел"""', 'width': 'WIDGET_WIDTH', 'bg': 'COLOR', 'command': 'self.solve'}), "(self.frame, text='Вычислить для 1000 чисел', width=WIDGET_WIDTH,\n bg=COLOR, command=self.solve)\n", (2445, 2544), True, 'import tkinter as tk\n'), ((2571, 2641), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'text': '"""Матрица"""', 'bg': 'COLOR', 'width': '(530)', 'height': '(200)'}), "(master, text='Матрица', bg=COLOR, width=530, height=200)\n", (2584, 2641), True, 'import tkinter as tk\n'), ((2722, 2794), 'tkinter.LabelFrame', 'tk.LabelFrame', (['master'], {'bg': 'COLOR', 'text': '"""Результат"""', 'width': '(510)', 'height': '(270)'}), "(master, bg=COLOR, text='Результат', width=510, height=270)\n", (2735, 2794), True, 'import tkinter as tk\n'), ((2871, 2939), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""Табличный способ"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='Табличный способ', bg=COLOR, bd=3)\n", (2879, 2939), True, 'import tkinter as tk\n'), ((2971, 3045), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""Алгоритмический способ"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='Алгоритмический способ', bg=COLOR, bd=3)\n", (2979, 3045), True, 'import tkinter as tk\n'), ((3078, 3172), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3088, 3172), True, 'import tkinter as tk\n'), ((3199, 3293), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3209, 3293), True, 'import tkinter as tk\n'), ((3322, 3416), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3332, 3416), True, 'import tkinter as tk\n'), ((3447, 3541), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3457, 3541), True, 'import tkinter as tk\n'), ((3572, 3666), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3582, 3666), True, 'import tkinter as tk\n'), ((3699, 3793), 'tkinter.Listbox', 'tk.Listbox', (['self.result_frame'], {'selectmode': 'tk.SINGLE', 'width': '(13)', 'bg': 'COLUMNS_COLOR', 'height': '(1)'}), '(self.result_frame, selectmode=tk.SINGLE, width=13, bg=\n COLUMNS_COLOR, height=1)\n', (3709, 3793), True, 'import tkinter as tk\n'), ((4171, 4246), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '"""% статистики хи-квадрат"""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='% статистики хи-квадрат', bg=COLOR, bd=3)\n", (4179, 4246), True, 'import tkinter as tk\n'), ((4283, 4335), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4291, 4335), True, 'import tkinter as tk\n'), ((4371, 4423), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4379, 4423), True, 'import tkinter as tk\n'), ((4461, 4513), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4469, 4513), True, 'import tkinter as tk\n'), ((4553, 4605), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4561, 4605), True, 'import tkinter as tk\n'), ((4645, 4697), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4653, 4697), True, 'import tkinter as tk\n'), ((4739, 4791), 'tkinter.Label', 'tk.Label', (['self.result_frame'], {'text': '""""""', 'bg': 'COLOR', 'bd': '(3)'}), "(self.result_frame, text='', bg=COLOR, bd=3)\n", (4747, 4791), True, 'import tkinter as tk\n'), ((656, 674), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['chisq', 'k'], {}), '(chisq, k)\n', (664, 674), False, 'from scipy.stats import chi2, chisquare\n'), ((6163, 6234), 'tkinter.Listbox', 'tk.Listbox', (['self.listbox_frame'], {'selectmode': 'tk.SINGLE', 'width': '(8)', 'bg': 'COLOR'}), '(self.listbox_frame, selectmode=tk.SINGLE, width=8, bg=COLOR)\n', (6173, 6234), True, 'import tkinter as tk\n')] |
"""Tests joulia.unit_conversions.
"""
from django.test import TestCase
from joulia import unit_conversions
class GramsToPoundsTest(TestCase):
def test_grams_to_pounds(self):
self.assertEquals(unit_conversions.grams_to_pounds(1000.0), 2.20462)
class GramsToOuncesTest(TestCase):
def test_grams_to_ounces(self):
self.assertEquals(unit_conversions.grams_to_ounces(1000.0), 35.27392)
| [
"joulia.unit_conversions.grams_to_ounces",
"joulia.unit_conversions.grams_to_pounds"
]
| [((208, 248), 'joulia.unit_conversions.grams_to_pounds', 'unit_conversions.grams_to_pounds', (['(1000.0)'], {}), '(1000.0)\n', (240, 248), False, 'from joulia import unit_conversions\n'), ((358, 398), 'joulia.unit_conversions.grams_to_ounces', 'unit_conversions.grams_to_ounces', (['(1000.0)'], {}), '(1000.0)\n', (390, 398), False, 'from joulia import unit_conversions\n')] |
from pathlib import Path
from hylfm.hylfm_types import (
CriterionChoice,
DatasetChoice,
LRSchedThresMode,
LRSchedulerChoice,
MetricChoice,
OptimizerChoice,
PeriodUnit,
)
from hylfm.model import HyLFM_Net
from hylfm.train import train
if __name__ == "__main__":
train(
dataset=DatasetChoice.beads_highc_b,
batch_multiplier=2,
batch_size=1,
crit_apply_weight_above_threshold=False,
crit_beta=1.0,
crit_decay_weight_by=0.8,
crit_decay_weight_every_unit=PeriodUnit.epoch,
crit_decay_weight_every_value=1,
crit_decay_weight_limit=1.0,
crit_ms_ssim_weight=0.01,
crit_threshold=0.5,
crit_weight=0.001,
criterion=CriterionChoice.WeightedSmoothL1,
data_range=1.0,
eval_batch_size=1,
interpolation_order=2,
lr_sched_factor=0.5,
lr_sched_patience=10,
lr_sched_thres=0.0001,
lr_sched_thres_mode=LRSchedThresMode.abs,
lr_scheduler=LRSchedulerChoice.ReduceLROnPlateau,
max_epochs=10,
model_weights=None, # Path()
opt_lr=3e-4,
opt_momentum=0.0,
opt_weight_decay=0.0,
optimizer=OptimizerChoice.Adam,
patience=5,
score_metric=MetricChoice.MS_SSIM,
seed=None,
validate_every_unit=PeriodUnit.epoch,
validate_every_value=1,
win_sigma=1.5,
win_size=11,
# model
nnum=19,
z_out=51,
kernel2d=3,
c00_2d=976,
c01_2d=976,
c02_2d=0,
c03_2d=0,
c04_2d=0,
up0_2d=488,
c10_2d=488,
c11_2d=0,
c12_2d=0,
c13_2d=0,
c14_2d=0,
up1_2d=244,
c20_2d=244,
c21_2d=0,
c22_2d=0,
c23_2d=0,
c24_2d=0,
up2_2d=0,
c30_2d=0,
c31_2d=0,
c32_2d=0,
c33_2d=0,
c34_2d=0,
last_kernel2d=1,
cin_3d=7,
kernel3d=3,
c00_3d=7,
c01_3d=0,
c02_3d=0,
c03_3d=0,
c04_3d=0,
up0_3d=7,
c10_3d=7,
c11_3d=7,
c12_3d=0,
c13_3d=0,
c14_3d=0,
up1_3d=0,
c20_3d=0,
c21_3d=0,
c22_3d=0,
c23_3d=0,
c24_3d=0,
up2_3d=0,
c30_3d=0,
c31_3d=0,
c32_3d=0,
c33_3d=0,
c34_3d=0,
init_fn=HyLFM_Net.InitName.xavier_uniform_,
final_activation=None,
)
| [
"hylfm.train.train"
]
| [((297, 1857), 'hylfm.train.train', 'train', ([], {'dataset': 'DatasetChoice.beads_highc_b', 'batch_multiplier': '(2)', 'batch_size': '(1)', 'crit_apply_weight_above_threshold': '(False)', 'crit_beta': '(1.0)', 'crit_decay_weight_by': '(0.8)', 'crit_decay_weight_every_unit': 'PeriodUnit.epoch', 'crit_decay_weight_every_value': '(1)', 'crit_decay_weight_limit': '(1.0)', 'crit_ms_ssim_weight': '(0.01)', 'crit_threshold': '(0.5)', 'crit_weight': '(0.001)', 'criterion': 'CriterionChoice.WeightedSmoothL1', 'data_range': '(1.0)', 'eval_batch_size': '(1)', 'interpolation_order': '(2)', 'lr_sched_factor': '(0.5)', 'lr_sched_patience': '(10)', 'lr_sched_thres': '(0.0001)', 'lr_sched_thres_mode': 'LRSchedThresMode.abs', 'lr_scheduler': 'LRSchedulerChoice.ReduceLROnPlateau', 'max_epochs': '(10)', 'model_weights': 'None', 'opt_lr': '(0.0003)', 'opt_momentum': '(0.0)', 'opt_weight_decay': '(0.0)', 'optimizer': 'OptimizerChoice.Adam', 'patience': '(5)', 'score_metric': 'MetricChoice.MS_SSIM', 'seed': 'None', 'validate_every_unit': 'PeriodUnit.epoch', 'validate_every_value': '(1)', 'win_sigma': '(1.5)', 'win_size': '(11)', 'nnum': '(19)', 'z_out': '(51)', 'kernel2d': '(3)', 'c00_2d': '(976)', 'c01_2d': '(976)', 'c02_2d': '(0)', 'c03_2d': '(0)', 'c04_2d': '(0)', 'up0_2d': '(488)', 'c10_2d': '(488)', 'c11_2d': '(0)', 'c12_2d': '(0)', 'c13_2d': '(0)', 'c14_2d': '(0)', 'up1_2d': '(244)', 'c20_2d': '(244)', 'c21_2d': '(0)', 'c22_2d': '(0)', 'c23_2d': '(0)', 'c24_2d': '(0)', 'up2_2d': '(0)', 'c30_2d': '(0)', 'c31_2d': '(0)', 'c32_2d': '(0)', 'c33_2d': '(0)', 'c34_2d': '(0)', 'last_kernel2d': '(1)', 'cin_3d': '(7)', 'kernel3d': '(3)', 'c00_3d': '(7)', 'c01_3d': '(0)', 'c02_3d': '(0)', 'c03_3d': '(0)', 'c04_3d': '(0)', 'up0_3d': '(7)', 'c10_3d': '(7)', 'c11_3d': '(7)', 'c12_3d': '(0)', 'c13_3d': '(0)', 'c14_3d': '(0)', 'up1_3d': '(0)', 'c20_3d': '(0)', 'c21_3d': '(0)', 'c22_3d': '(0)', 'c23_3d': '(0)', 'c24_3d': '(0)', 'up2_3d': '(0)', 'c30_3d': '(0)', 'c31_3d': '(0)', 'c32_3d': '(0)', 'c33_3d': '(0)', 'c34_3d': '(0)', 'init_fn': 'HyLFM_Net.InitName.xavier_uniform_', 'final_activation': 'None'}), '(dataset=DatasetChoice.beads_highc_b, batch_multiplier=2, batch_size=1,\n crit_apply_weight_above_threshold=False, crit_beta=1.0,\n crit_decay_weight_by=0.8, crit_decay_weight_every_unit=PeriodUnit.epoch,\n crit_decay_weight_every_value=1, crit_decay_weight_limit=1.0,\n crit_ms_ssim_weight=0.01, crit_threshold=0.5, crit_weight=0.001,\n criterion=CriterionChoice.WeightedSmoothL1, data_range=1.0,\n eval_batch_size=1, interpolation_order=2, lr_sched_factor=0.5,\n lr_sched_patience=10, lr_sched_thres=0.0001, lr_sched_thres_mode=\n LRSchedThresMode.abs, lr_scheduler=LRSchedulerChoice.ReduceLROnPlateau,\n max_epochs=10, model_weights=None, opt_lr=0.0003, opt_momentum=0.0,\n opt_weight_decay=0.0, optimizer=OptimizerChoice.Adam, patience=5,\n score_metric=MetricChoice.MS_SSIM, seed=None, validate_every_unit=\n PeriodUnit.epoch, validate_every_value=1, win_sigma=1.5, win_size=11,\n nnum=19, z_out=51, kernel2d=3, c00_2d=976, c01_2d=976, c02_2d=0, c03_2d\n =0, c04_2d=0, up0_2d=488, c10_2d=488, c11_2d=0, c12_2d=0, c13_2d=0,\n c14_2d=0, up1_2d=244, c20_2d=244, c21_2d=0, c22_2d=0, c23_2d=0, c24_2d=\n 0, up2_2d=0, c30_2d=0, c31_2d=0, c32_2d=0, c33_2d=0, c34_2d=0,\n last_kernel2d=1, cin_3d=7, kernel3d=3, c00_3d=7, c01_3d=0, c02_3d=0,\n c03_3d=0, c04_3d=0, up0_3d=7, c10_3d=7, c11_3d=7, c12_3d=0, c13_3d=0,\n c14_3d=0, up1_3d=0, c20_3d=0, c21_3d=0, c22_3d=0, c23_3d=0, c24_3d=0,\n up2_3d=0, c30_3d=0, c31_3d=0, c32_3d=0, c33_3d=0, c34_3d=0, init_fn=\n HyLFM_Net.InitName.xavier_uniform_, final_activation=None)\n', (302, 1857), False, 'from hylfm.train import train\n')] |
import os
import pickle
from PIL import Image
class PatientToImageFolder:
def __init__(self, sourceFolder):
self.sourceFolder = sourceFolder
# How many patient with contrast SA for each pathology (used for classification)
self.contrastSApathologyDict = {}
# How many patient with contrast LA for each pathology (used for classification)
self.contrastCH2pathologyDict = {}
self.contrastCH3pathologyDict = {}
self.contrastCH4pathologyDict = {}
# How many patient with SA image (used for autoencoder training)
self.totalSaImagePatientNum = 0
self.curSaImagePatientNum = 0
# How many patient with LA image (used for autoencoder training)
self.totalCH2ImagePatientNum = 0
self.curCH2ImagePatientNum = 0
self.totalCH3ImagePatientNum = 0
self.curCH3ImagePatientNum = 0
self.totalCH4ImagePatientNum = 0
self.curCH4ImagePatientNum = 0
self.curContrastSaImagePatientNum = {}
self.curContrastCH2ImagePatientNum = {}
self.curContrastCH3ImagePatientNum = {}
self.curContrastCH4ImagePatientNum = {}
self.collectInfo()
def collectInfo(self):
for file in os.listdir(self.sourceFolder):
if ".p" in file:
tmpPat = pickle.load(open(os.path.join(self.sourceFolder, file), 'rb'))
patho = tmpPat.pathology.strip()
if "U18" in patho or "sport" in patho or "Normal" in patho:
continue
# elif "sport" in patho:
# patho = "Sport"
# elif "Normal" not in patho and "HCM" not in patho:
# patho = "Other"
if tmpPat.normalSaImages is not None:
self.totalSaImagePatientNum += 1
if (tmpPat.contrastSaImages is not None and tmpPat.contrastLaImages.ch2Images is not None and
tmpPat.contrastLaImages.ch3Images is not None and tmpPat.contrastLaImages.ch4Images is not None):
if patho in self.contrastSApathologyDict:
self.contrastSApathologyDict[patho] += 1
else:
self.contrastSApathologyDict[patho] = 1
if patho in self.contrastCH2pathologyDict:
self.contrastCH2pathologyDict[patho] += 1
else:
self.contrastCH2pathologyDict[patho] = 1
if patho in self.contrastCH3pathologyDict:
self.contrastCH3pathologyDict[patho] += 1
else:
self.contrastCH3pathologyDict[patho] = 1
if patho in self.contrastCH4pathologyDict:
self.contrastCH4pathologyDict[patho] += 1
else:
self.contrastCH4pathologyDict[patho] = 1
if tmpPat.normalLaImages.ch2Images is not None:
self.totalCH2ImagePatientNum += 1
if tmpPat.normalLaImages.ch3Images is not None:
self.totalCH3ImagePatientNum += 1
if tmpPat.normalLaImages.ch4Images is not None:
self.totalCH4ImagePatientNum += 1
for key in self.contrastSApathologyDict:
self.curContrastSaImagePatientNum[key] = 0
for key in self.contrastCH2pathologyDict:
self.curContrastCH2ImagePatientNum[key] = 0
for key in self.contrastCH3pathologyDict:
self.curContrastCH3ImagePatientNum[key] = 0
for key in self.contrastCH4pathologyDict:
self.curContrastCH4ImagePatientNum[key] = 0
def convertImage(self, image_2d):
# if image_2d.min() > 254:
# return None
# Converting image from numpy array to PIL.
pil_img = Image.fromarray(image_2d)
if pil_img.getbbox() is None:
return None
return pil_img
def createAutoEncoderImageFolderStructure(self, folderName):
autoFolder = os.path.join(os.path.dirname(self.sourceFolder), folderName)
autoTrainingFolder = os.path.join(autoFolder, "training")
autoTestFolder = os.path.join(autoFolder, "test")
os.makedirs(autoTrainingFolder)
os.makedirs(autoTestFolder)
return autoFolder, autoTrainingFolder, autoTestFolder
def createClassificationImageFolderStructure(self, folderName):
classFolder = os.path.join(os.path.dirname(self.sourceFolder), folderName)
classTrainingFolder = os.path.join(classFolder, "training")
classValidationFolder = os.path.join(classFolder, "validation")
classTestFolder = os.path.join(classFolder, "test")
classAllFolder = os.path.join(classFolder, 'all')
os.makedirs(classTrainingFolder)
os.makedirs(classValidationFolder)
os.makedirs(classTestFolder)
os.makedirs(classAllFolder)
return classFolder, classTrainingFolder, classValidationFolder, classTestFolder, classAllFolder
def saveImageForClassification(self, image, patientId, patho, testFolder, validationFolder, trainingFolder,
axis, imPatho, curPatientNum, allFolder, pathologyDict):
pil_img = self.convertImage(image[:, :])
if pil_img is not None:
if (curPatientNum[patho] <= pathologyDict[patho] * 0.075 or
(pathologyDict[patho] * 0.85 <= curPatientNum[patho] <= pathologyDict[patho] * 0.925)):
imFolder = os.path.join(testFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedTestFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
elif ((pathologyDict[patho] * 0.075 <= curPatientNum[patho] <= pathologyDict[patho] * 0.15) or
curPatientNum[patho] >= int(pathologyDict[patho] * 0.925)):
imFolder = os.path.join(validationFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedValidationFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
else:
imFolder = os.path.join(trainingFolder, imPatho)
os.makedirs(imFolder, exist_ok=True)
patientFolder = os.path.join(self.patientSeperatedTrainingFolder, imPatho + '_' + patientId)
os.makedirs(patientFolder, exist_ok=True)
axisFolder = os.path.join(patientFolder, axis)
os.makedirs(axisFolder, exist_ok=True)
pil_img.save(os.path.join(imFolder, "{}.png".format(patientId)))
# pil_img.save(os.path.join(allFolder, "{}.png".format(patientId)))
pil_img.save(os.path.join(axisFolder, "{}.png".format(patientId)))
file = open(os.path.join(patientFolder, "pathology.txt"), "w")
file.write("{}\n".format(patho))
file.close()
def saveImageForAutoEncoder(self, images, patientId, testFolder, trainingFolder,
curPatientNum, totalPatientNum, sliceIdx, frameIdx):
if sliceIdx is not None:
pil_img = self.convertImage(images[sliceIdx, frameIdx, :, :])
else:
pil_img = self.convertImage(images[frameIdx, :, :])
if pil_img is not None:
if (curPatientNum <= totalPatientNum * 0.1
or curPatientNum >= int(totalPatientNum * 0.9)):
if sliceIdx is not None:
pil_img.save(os.path.join(testFolder, "{}_{}_{}.png".format(patientId, sliceIdx, frameIdx)))
else:
pil_img.save(os.path.join(testFolder, "{}_{}.png".format(patientId, frameIdx)))
else:
if sliceIdx is not None:
pil_img.save(os.path.join(trainingFolder, "{}_{}_{}.png".format(patientId, sliceIdx, frameIdx)))
else:
pil_img.save(os.path.join(trainingFolder, "{}_{}.png".format(patientId, frameIdx)))
def createImageFolderDatasets(self):
subfol = "only_abnormal"
# autoSaFolder, autoSaTrainingFolder, autoSaTestFolder = self.createAutoEncoderImageFolderStructure(
# "SaAutoEncoder")
(contrastSaFolder, contrastSaTrainingFolder,
contrastSaValidationFolder, contrastSaTestFolder,
contrastSaAllFolder) = self.createClassificationImageFolderStructure(
"{}/SaClassification".format(subfol))
# autoCH2Folder, autoCH2TrainingFolder, autoCH2TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH2AutoEncoder")
(contrastCH2Folder, contrastCH2TrainingFolder,
contrastCH2ValidationFolder, contrastCH2TestFolder,
contrastCH2AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH2Classification".format(subfol))
# autoCH3Folder, autoCH3TrainingFolder, autoCH3TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH3AutoEncoder")
(contrastCH3Folder, contrastCH3TrainingFolder,
contrastCH3ValidationFolder, contrastCH3TestFolder,
contrastCH3AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH3Classification".format(subfol))
# autoCH4Folder, autoCH4TrainingFolder, autoCH4TestFolder = self.createAutoEncoderImageFolderStructure(
# "CH4AutoEncoder")
(contrastCH4Folder, contrastCH4TrainingFolder,
contrastCH4ValidationFolder, contrastCH4TestFolder,
contrastCH4AllFolder) = self.createClassificationImageFolderStructure(
"{}/CH4Classification".format(subfol))
self.patientSeperatedFolder = os.path.join(os.path.dirname(self.sourceFolder), '{}/patients'.format(subfol))
os.makedirs(self.patientSeperatedFolder)
self.patientSeperatedTrainingFolder = os.path.join(self.patientSeperatedFolder, 'training')
self.patientSeperatedValidationFolder = os.path.join(self.patientSeperatedFolder, 'validation')
self.patientSeperatedTestFolder = os.path.join(self.patientSeperatedFolder, 'test')
os.makedirs(self.patientSeperatedTrainingFolder)
os.makedirs(self.patientSeperatedValidationFolder)
os.makedirs(self.patientSeperatedTestFolder)
for file in os.listdir(self.sourceFolder):
if ".p" in file:
tmpPat = pickle.load(open(os.path.join(self.sourceFolder, file), 'rb'))
patho = tmpPat.pathology.strip()
if "U18" in patho or "sport" in patho or "Normal" in patho:
continue
# elif "sport" in patho:
# patho = "Sport"
# elif "Normal" not in patho and "HCM" not in patho:
# patho = "Other"
imPatho = patho
# if "sport" in patho:
# imPatho = "Sport"
# if "Normal" not in patho:
# imPatho = "Hypertrophic"
classificationReady = False
if (tmpPat.contrastSaImages is not None and tmpPat.contrastLaImages.ch2Images is not None and
tmpPat.contrastLaImages.ch3Images is not None and tmpPat.contrastLaImages.ch4Images is not None):
classificationReady = True
# if tmpPat.normalSaImages is not None:
# for i in range(tmpPat.normalSaImages.shape[0]):
# for j in range(tmpPat.normalSaImages.shape[1]):
# self.saveImageForAutoEncoder(tmpPat.normalSaImages, tmpPat.patientID, autoSaTestFolder,
# autoSaTrainingFolder, self.curSaImagePatientNum,
# self.totalSaImagePatientNum, i, j)
# self.curSaImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastSaImages, tmpPat.patientID, patho,
contrastSaTestFolder, contrastSaValidationFolder,
contrastSaTrainingFolder, 'SA', imPatho,
self.curContrastSaImagePatientNum, contrastSaAllFolder,
self.contrastSApathologyDict)
self.curContrastSaImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch2Images is not None:
# for i in range(tmpPat.normalLaImages.ch2Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch2Images, tmpPat.patientID,
# autoCH2TestFolder,
# autoCH2TrainingFolder, self.curCH2ImagePatientNum,
# self.totalCH2ImagePatientNum, None, i)
# self.curCH2ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch2Images, tmpPat.patientID, patho,
contrastCH2TestFolder, contrastCH2ValidationFolder,
contrastCH2TrainingFolder, 'CH2', imPatho,
self.curContrastCH2ImagePatientNum, contrastCH2AllFolder,
self.contrastCH2pathologyDict)
self.curContrastCH2ImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch3Images is not None:
# for i in range(tmpPat.normalLaImages.ch3Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch3Images, tmpPat.patientID,
# autoCH3TestFolder,
# autoCH3TrainingFolder, self.curCH3ImagePatientNum,
# self.totalCH3ImagePatientNum, None, i)
# self.curCH3ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch3Images, tmpPat.patientID, patho,
contrastCH3TestFolder, contrastCH3ValidationFolder,
contrastCH3TrainingFolder, 'CH3', imPatho,
self.curContrastCH3ImagePatientNum, contrastCH3AllFolder,
self.contrastCH3pathologyDict)
self.curContrastCH3ImagePatientNum[patho] += 1
# if tmpPat.normalLaImages.ch4Images is not None:
# for i in range(tmpPat.normalLaImages.ch4Images.shape[0]):
# self.saveImageForAutoEncoder(tmpPat.normalLaImages.ch4Images, tmpPat.patientID,
# autoCH4TestFolder,
# autoCH4TrainingFolder, self.curCH4ImagePatientNum,
# self.totalCH4ImagePatientNum, None, i)
# self.curCH4ImagePatientNum += 1
if classificationReady:
self.saveImageForClassification(tmpPat.contrastLaImages.ch4Images, tmpPat.patientID, patho,
contrastCH4TestFolder, contrastCH4ValidationFolder,
contrastCH4TrainingFolder, 'CH4', imPatho,
self.curContrastCH4ImagePatientNum, contrastCH4AllFolder,
self.contrastCH4pathologyDict)
self.curContrastCH4ImagePatientNum[patho] += 1
self.createLabelFileFromPathoDict(contrastSaFolder, self.contrastSApathologyDict)
self.createLabelFileFromPathoDict(contrastCH2Folder, self.contrastCH2pathologyDict)
self.createLabelFileFromPathoDict(contrastCH3Folder, self.contrastCH3pathologyDict)
self.createLabelFileFromPathoDict(contrastCH4Folder, self.contrastCH4pathologyDict)
def createLabelFileFromPathoDict(self, destination, pathoDict):
file = open(os.path.join(destination, "pathologies.txt"), "w")
for key in pathoDict:
file.write("{}\n".format(key))
file.close()
if __name__ == "__main__":
sourceFolder = 'D:/BME/7felev/Szakdolgozat/whole_dataset/filtered_data'
imageFolderArranger = PatientToImageFolder(sourceFolder)
imageFolderArranger.createImageFolderDatasets()
| [
"PIL.Image.fromarray",
"os.listdir",
"os.makedirs",
"os.path.join",
"os.path.dirname"
]
| [((1237, 1266), 'os.listdir', 'os.listdir', (['self.sourceFolder'], {}), '(self.sourceFolder)\n', (1247, 1266), False, 'import os\n'), ((3888, 3913), 'PIL.Image.fromarray', 'Image.fromarray', (['image_2d'], {}), '(image_2d)\n', (3903, 3913), False, 'from PIL import Image\n'), ((4176, 4212), 'os.path.join', 'os.path.join', (['autoFolder', '"""training"""'], {}), "(autoFolder, 'training')\n", (4188, 4212), False, 'import os\n'), ((4238, 4270), 'os.path.join', 'os.path.join', (['autoFolder', '"""test"""'], {}), "(autoFolder, 'test')\n", (4250, 4270), False, 'import os\n'), ((4280, 4311), 'os.makedirs', 'os.makedirs', (['autoTrainingFolder'], {}), '(autoTrainingFolder)\n', (4291, 4311), False, 'import os\n'), ((4320, 4347), 'os.makedirs', 'os.makedirs', (['autoTestFolder'], {}), '(autoTestFolder)\n', (4331, 4347), False, 'import os\n'), ((4593, 4630), 'os.path.join', 'os.path.join', (['classFolder', '"""training"""'], {}), "(classFolder, 'training')\n", (4605, 4630), False, 'import os\n'), ((4663, 4702), 'os.path.join', 'os.path.join', (['classFolder', '"""validation"""'], {}), "(classFolder, 'validation')\n", (4675, 4702), False, 'import os\n'), ((4729, 4762), 'os.path.join', 'os.path.join', (['classFolder', '"""test"""'], {}), "(classFolder, 'test')\n", (4741, 4762), False, 'import os\n'), ((4788, 4820), 'os.path.join', 'os.path.join', (['classFolder', '"""all"""'], {}), "(classFolder, 'all')\n", (4800, 4820), False, 'import os\n'), ((4830, 4862), 'os.makedirs', 'os.makedirs', (['classTrainingFolder'], {}), '(classTrainingFolder)\n', (4841, 4862), False, 'import os\n'), ((4871, 4905), 'os.makedirs', 'os.makedirs', (['classValidationFolder'], {}), '(classValidationFolder)\n', (4882, 4905), False, 'import os\n'), ((4914, 4942), 'os.makedirs', 'os.makedirs', (['classTestFolder'], {}), '(classTestFolder)\n', (4925, 4942), False, 'import os\n'), ((4951, 4978), 'os.makedirs', 'os.makedirs', (['classAllFolder'], {}), '(classAllFolder)\n', (4962, 4978), False, 'import os\n'), ((9947, 9987), 'os.makedirs', 'os.makedirs', (['self.patientSeperatedFolder'], {}), '(self.patientSeperatedFolder)\n', (9958, 9987), False, 'import os\n'), ((10034, 10087), 'os.path.join', 'os.path.join', (['self.patientSeperatedFolder', '"""training"""'], {}), "(self.patientSeperatedFolder, 'training')\n", (10046, 10087), False, 'import os\n'), ((10136, 10191), 'os.path.join', 'os.path.join', (['self.patientSeperatedFolder', '"""validation"""'], {}), "(self.patientSeperatedFolder, 'validation')\n", (10148, 10191), False, 'import os\n'), ((10234, 10283), 'os.path.join', 'os.path.join', (['self.patientSeperatedFolder', '"""test"""'], {}), "(self.patientSeperatedFolder, 'test')\n", (10246, 10283), False, 'import os\n'), ((10292, 10340), 'os.makedirs', 'os.makedirs', (['self.patientSeperatedTrainingFolder'], {}), '(self.patientSeperatedTrainingFolder)\n', (10303, 10340), False, 'import os\n'), ((10349, 10399), 'os.makedirs', 'os.makedirs', (['self.patientSeperatedValidationFolder'], {}), '(self.patientSeperatedValidationFolder)\n', (10360, 10399), False, 'import os\n'), ((10408, 10452), 'os.makedirs', 'os.makedirs', (['self.patientSeperatedTestFolder'], {}), '(self.patientSeperatedTestFolder)\n', (10419, 10452), False, 'import os\n'), ((10474, 10503), 'os.listdir', 'os.listdir', (['self.sourceFolder'], {}), '(self.sourceFolder)\n', (10484, 10503), False, 'import os\n'), ((4099, 4133), 'os.path.dirname', 'os.path.dirname', (['self.sourceFolder'], {}), '(self.sourceFolder)\n', (4114, 4133), False, 'import os\n'), ((4515, 4549), 'os.path.dirname', 'os.path.dirname', (['self.sourceFolder'], {}), '(self.sourceFolder)\n', (4530, 4549), False, 'import os\n'), ((6629, 6662), 'os.path.join', 'os.path.join', (['patientFolder', 'axis'], {}), '(patientFolder, axis)\n', (6641, 6662), False, 'import os\n'), ((6675, 6713), 'os.makedirs', 'os.makedirs', (['axisFolder'], {'exist_ok': '(True)'}), '(axisFolder, exist_ok=True)\n', (6686, 6713), False, 'import os\n'), ((9873, 9907), 'os.path.dirname', 'os.path.dirname', (['self.sourceFolder'], {}), '(self.sourceFolder)\n', (9888, 9907), False, 'import os\n'), ((16692, 16736), 'os.path.join', 'os.path.join', (['destination', '"""pathologies.txt"""'], {}), "(destination, 'pathologies.txt')\n", (16704, 16736), False, 'import os\n'), ((5577, 5610), 'os.path.join', 'os.path.join', (['testFolder', 'imPatho'], {}), '(testFolder, imPatho)\n', (5589, 5610), False, 'import os\n'), ((5627, 5663), 'os.makedirs', 'os.makedirs', (['imFolder'], {'exist_ok': '(True)'}), '(imFolder, exist_ok=True)\n', (5638, 5663), False, 'import os\n'), ((5696, 5768), 'os.path.join', 'os.path.join', (['self.patientSeperatedTestFolder', "(imPatho + '_' + patientId)"], {}), "(self.patientSeperatedTestFolder, imPatho + '_' + patientId)\n", (5708, 5768), False, 'import os\n'), ((5785, 5826), 'os.makedirs', 'os.makedirs', (['patientFolder'], {'exist_ok': '(True)'}), '(patientFolder, exist_ok=True)\n', (5796, 5826), False, 'import os\n'), ((6974, 7018), 'os.path.join', 'os.path.join', (['patientFolder', '"""pathology.txt"""'], {}), "(patientFolder, 'pathology.txt')\n", (6986, 7018), False, 'import os\n'), ((6039, 6078), 'os.path.join', 'os.path.join', (['validationFolder', 'imPatho'], {}), '(validationFolder, imPatho)\n', (6051, 6078), False, 'import os\n'), ((6095, 6131), 'os.makedirs', 'os.makedirs', (['imFolder'], {'exist_ok': '(True)'}), '(imFolder, exist_ok=True)\n', (6106, 6131), False, 'import os\n'), ((6164, 6242), 'os.path.join', 'os.path.join', (['self.patientSeperatedValidationFolder', "(imPatho + '_' + patientId)"], {}), "(self.patientSeperatedValidationFolder, imPatho + '_' + patientId)\n", (6176, 6242), False, 'import os\n'), ((6259, 6300), 'os.makedirs', 'os.makedirs', (['patientFolder'], {'exist_ok': '(True)'}), '(patientFolder, exist_ok=True)\n', (6270, 6300), False, 'import os\n'), ((6346, 6383), 'os.path.join', 'os.path.join', (['trainingFolder', 'imPatho'], {}), '(trainingFolder, imPatho)\n', (6358, 6383), False, 'import os\n'), ((6400, 6436), 'os.makedirs', 'os.makedirs', (['imFolder'], {'exist_ok': '(True)'}), '(imFolder, exist_ok=True)\n', (6411, 6436), False, 'import os\n'), ((6469, 6545), 'os.path.join', 'os.path.join', (['self.patientSeperatedTrainingFolder', "(imPatho + '_' + patientId)"], {}), "(self.patientSeperatedTrainingFolder, imPatho + '_' + patientId)\n", (6481, 6545), False, 'import os\n'), ((6562, 6603), 'os.makedirs', 'os.makedirs', (['patientFolder'], {'exist_ok': '(True)'}), '(patientFolder, exist_ok=True)\n', (6573, 6603), False, 'import os\n'), ((1339, 1376), 'os.path.join', 'os.path.join', (['self.sourceFolder', 'file'], {}), '(self.sourceFolder, file)\n', (1351, 1376), False, 'import os\n'), ((10576, 10613), 'os.path.join', 'os.path.join', (['self.sourceFolder', 'file'], {}), '(self.sourceFolder, file)\n', (10588, 10613), False, 'import os\n')] |
import pandas as pd
import numpy as np
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
def prune(x):
if x < 1:
return 1
elif x > 3:
return 3
else:
return x
def regression(reg_type, standardize_df, debug=False):
# load model
filename = '../../dataset/model_' + reg_type + '.pickle'
lin_model = None
with open(filename, 'rb') as f:
lin_model = pickle.load(f)
score_df_tst = pd.read_pickle('../../dataset/score_df_final_tst.pickle')
# Fill NaN value
# score_df = score_df.fillna(0.0)
# The last column is the target
X = np.array(score_df_tst)
if standardize_df:
print("Standardizing...")
with open("../../dataset/scaler.pickle", 'rb') as handle:
scaler = pickle.load(handle)
X = scaler.transform(X)
# Debug
if debug:
print("Score DataFrame")
print(score_df)
print("")
print("Training Values")
print(X)
print("")
print("Output Values")
print(Y)
print("")
print("Shapes of X and Y")
print(X.shape)
print(Y.shape)
# Debug
if debug:
print("XTR - XTS")
print(xtr.shape)
print(xts.shape)
print("")
print("YTR - YTS")
print(ytr.shape)
print(yts.shape)
print("")
yts_pred = lin_model.predict(X)
#yts_error = sqrt(mean_squared_error(yts_pred, yts))
print("Prediction by (" + reg_type + ") on Test data have finished")
# create submission file
id_series = pd.read_csv('../../dataset/test.csv')['id']
submission_df = pd.DataFrame(id_series, columns=['id'])
submission_df['relevance'] = yts_pred
submission_df['relevance'] = submission_df['relevance'].map(lambda x: prune(x))
submission_df.to_csv('../../dataset/submission.csv', columns=['id', 'relevance'], index=False)
if __name__ == "__main__":
# Change between:
# svr
# linear
# rfr
regression_type = 'svr'
standardize_df = True
regression(regression_type, standardize_df, debug=False) | [
"pandas.read_pickle",
"pandas.read_csv",
"pickle.load",
"numpy.array",
"pandas.DataFrame"
]
| [((697, 754), 'pandas.read_pickle', 'pd.read_pickle', (['"""../../dataset/score_df_final_tst.pickle"""'], {}), "('../../dataset/score_df_final_tst.pickle')\n", (711, 754), True, 'import pandas as pd\n'), ((848, 870), 'numpy.array', 'np.array', (['score_df_tst'], {}), '(score_df_tst)\n', (856, 870), True, 'import numpy as np\n'), ((1706, 1745), 'pandas.DataFrame', 'pd.DataFrame', (['id_series'], {'columns': "['id']"}), "(id_series, columns=['id'])\n", (1718, 1745), True, 'import pandas as pd\n'), ((665, 679), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (676, 679), False, 'import pickle\n'), ((1645, 1682), 'pandas.read_csv', 'pd.read_csv', (['"""../../dataset/test.csv"""'], {}), "('../../dataset/test.csv')\n", (1656, 1682), True, 'import pandas as pd\n'), ((992, 1011), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1003, 1011), False, 'import pickle\n')] |
import numpy as np
import serial
import time
import matplotlib.pyplot as plt
def getData():
ser = serial.Serial('/dev/ttyACM7', 9600)
sensorReadings = []
start = time.time()
current = time.time()
while current - start < 10:
data =ser.readline()
sensorReadings.append(float(data))
current = time.time()
return sensorReadings
def plotter(sensorReadings):
plt.plot(sensorReadings)
plt.ylabel('EEG Sensor sensorReadings')
plt.show()
if __name__ == '__main__':
sensorReadings = getData()
plotter(sensorReadings)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"serial.Serial",
"time.time",
"matplotlib.pyplot.show"
]
| [((109, 144), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM7"""', '(9600)'], {}), "('/dev/ttyACM7', 9600)\n", (122, 144), False, 'import serial\n'), ((183, 194), 'time.time', 'time.time', ([], {}), '()\n', (192, 194), False, 'import time\n'), ((210, 221), 'time.time', 'time.time', ([], {}), '()\n', (219, 221), False, 'import time\n'), ((424, 448), 'matplotlib.pyplot.plot', 'plt.plot', (['sensorReadings'], {}), '(sensorReadings)\n', (432, 448), True, 'import matplotlib.pyplot as plt\n'), ((454, 493), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""EEG Sensor sensorReadings"""'], {}), "('EEG Sensor sensorReadings')\n", (464, 493), True, 'import matplotlib.pyplot as plt\n'), ((499, 509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (507, 509), True, 'import matplotlib.pyplot as plt\n'), ((348, 359), 'time.time', 'time.time', ([], {}), '()\n', (357, 359), False, 'import time\n')] |
from CMText.TextClient import TextClient
# Message to be send
message = 'Examples message to be send'
# Media to be send
media = {
"mediaName": "conversational-commerce",
"mediaUri": "https://www.cm.com/cdn/cm/cm.png",
"mimeType": "image/png"
}
# AllowedChannels in this case Whatsapp
allowedChannels = ['Whatsapp']
# Recipients
to = ['003156789000', '002134567890']
# Instantiate client with your own api-key
client = TextClient(apikey=UNIQUE_API_KEY)
# Add a Rich message to the queue
client.AddRichMessage(message=message, from_='pythonSDK', to=to, allowedChannels=allowedChannels, media=media)
# Send the messages
response = client.send()
# Print response
print(response.text) | [
"CMText.TextClient.TextClient"
]
| [((468, 501), 'CMText.TextClient.TextClient', 'TextClient', ([], {'apikey': 'UNIQUE_API_KEY'}), '(apikey=UNIQUE_API_KEY)\n', (478, 501), False, 'from CMText.TextClient import TextClient\n')] |
# Copyright 2021 <NAME> <<EMAIL>>.
# SPDX-License-Identifier: MIT
# Telegram API framework core imports
from collections import namedtuple
from functools import partial
from ganjoor.ganjoor import Ganjoor
from telegram.ext import Dispatcher, CallbackContext
from telegram import Update
# Helper methods import
from utils.logger import get_logger
from utils.telegram.keyboards import category_keyboard
# Telegram API framework handlers imports
from telegram.ext import CallbackQueryHandler
# Init logger
logger = get_logger(__name__)
CallbackData = namedtuple('CallbackData', "menu_name doto")
def init(dispatcher: Dispatcher, ganjoor: Ganjoor):
"""Provide handlers initialization."""
dispatcher.add_handler(CallbackQueryHandler(
partial(category_id, ganjoor=ganjoor), pattern=r'^category_*'))
def category_id(update: Update, context: CallbackContext, ganjoor: Ganjoor) -> int:
"""Process a /start command."""
query = update.callback_query
message_id = '_'.join(query.data.split('_')[2:])
cat_id = query.data.split('_')[1]
cat = ganjoor.find_category_by_id(cat_id, with_poems=True)
# query.answer()
query.answer()
context.bot.edit_message_reply_markup(
inline_message_id=message_id, reply_markup=category_keyboard(cat, message_id))
# query.edit_reply_markup()
| [
"collections.namedtuple",
"functools.partial",
"utils.logger.get_logger",
"utils.telegram.keyboards.category_keyboard"
]
| [((514, 534), 'utils.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (524, 534), False, 'from utils.logger import get_logger\n'), ((550, 594), 'collections.namedtuple', 'namedtuple', (['"""CallbackData"""', '"""menu_name doto"""'], {}), "('CallbackData', 'menu_name doto')\n", (560, 594), False, 'from collections import namedtuple\n'), ((749, 786), 'functools.partial', 'partial', (['category_id'], {'ganjoor': 'ganjoor'}), '(category_id, ganjoor=ganjoor)\n', (756, 786), False, 'from functools import partial\n'), ((1260, 1294), 'utils.telegram.keyboards.category_keyboard', 'category_keyboard', (['cat', 'message_id'], {}), '(cat, message_id)\n', (1277, 1294), False, 'from utils.telegram.keyboards import category_keyboard\n')] |
import uos as os
import time
def countdown():
for i in range(5, 0, -1):
print("start stubbing in {}...".format(i))
time.sleep(1)
import createstubs
# import stub_lvgl
try:
# only run import if no stubs yet
os.listdir("stubs")
print("stub folder was found, stubbing is not automatically started")
except OSError:
countdown()
| [
"uos.listdir",
"time.sleep"
]
| [((247, 266), 'uos.listdir', 'os.listdir', (['"""stubs"""'], {}), "('stubs')\n", (257, 266), True, 'import uos as os\n'), ((137, 150), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (147, 150), False, 'import time\n')] |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.common.host import Host
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart
class TestDumpReaderMultipart(unittest.TestCase):
_MULTIPART_DUMP = [
'--boundary',
'Content-Disposition: form-data; name="prod"',
'',
'content_shell',
'--boundary',
'Content-Disposition: form-data; name="pid"',
'',
'4711',
'--boundary',
'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"',
'Content-Type: application/octet-stream',
'',
'MDMP',
'--boundary--',
]
def test_check_generate_breakpad_symbols_actually_exists(self):
host = Host()
dump_reader = DumpReaderMultipart(host, build_dir=None)
self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))
def test_check_is_functional_breakpad_tools_not_found(self):
host = MockHost()
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertFalse(dump_reader.check_is_functional())
def test_get_pid_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
expected_pid = '4711'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
def test_get_stack_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
self.assertEqual(2, len(host.executive.calls))
cmd_line = " ".join(host.executive.calls[0])
self.assertIn('generate_breakpad_symbols.py', cmd_line)
cmd_line = " ".join(host.executive.calls[1])
self.assertIn('minidump_stackwalk', cmd_line)
| [
"blinkpy.common.host_mock.MockHost",
"blinkpy.web_tests.breakpad.dump_reader_multipart.DumpReaderMultipart",
"blinkpy.common.host.Host"
]
| [((2307, 2313), 'blinkpy.common.host.Host', 'Host', ([], {}), '()\n', (2311, 2313), False, 'from blinkpy.common.host import Host\n'), ((2336, 2377), 'blinkpy.web_tests.breakpad.dump_reader_multipart.DumpReaderMultipart', 'DumpReaderMultipart', (['host'], {'build_dir': 'None'}), '(host, build_dir=None)\n', (2355, 2377), False, 'from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart\n'), ((2557, 2567), 'blinkpy.common.host_mock.MockHost', 'MockHost', ([], {}), '()\n', (2565, 2567), False, 'from blinkpy.common.host_mock import MockHost\n'), ((2694, 2730), 'blinkpy.web_tests.breakpad.dump_reader_multipart.DumpReaderMultipart', 'DumpReaderMultipart', (['host', 'build_dir'], {}), '(host, build_dir)\n', (2713, 2730), False, 'from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart\n'), ((2969, 2979), 'blinkpy.common.host_mock.MockHost', 'MockHost', ([], {}), '()\n', (2977, 2979), False, 'from blinkpy.common.host_mock import MockHost\n'), ((3575, 3611), 'blinkpy.web_tests.breakpad.dump_reader_multipart.DumpReaderMultipart', 'DumpReaderMultipart', (['host', 'build_dir'], {}), '(host, build_dir)\n', (3594, 3611), False, 'from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart\n'), ((3933, 3943), 'blinkpy.common.host_mock.MockHost', 'MockHost', ([], {}), '()\n', (3941, 3943), False, 'from blinkpy.common.host_mock import MockHost\n'), ((4509, 4545), 'blinkpy.web_tests.breakpad.dump_reader_multipart.DumpReaderMultipart', 'DumpReaderMultipart', (['host', 'build_dir'], {}), '(host, build_dir)\n', (4528, 4545), False, 'from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart\n')] |
#!/usr/bin/env python
""" ngc - n-grams count
License: 3-clause BSD (see https://opensource.org/licenses/BSD-3-Clause)
Author: <NAME>
"""
import getopt
import logging
import os
import re
import string
import sys
import unicode2ascii
# Version string used by the what(1) and ident(1) commands:
ID = "@(#) $Id: ngc - n-grams count v1.0.2 (September 26, 2021) by <NAME> $"
# Default parameters. Can be superseded by command line options
parameters = {
"Convert": {
"Unicode to ASCII": False,
"Upper to lower case": False,
"Lower to upper case": False,
"Spaces to one space": False,
},
"Discard": {
"Unicode characters": False,
"Upper case letters": False,
"Lower case letters": False,
"Connection symbols": False, # ' -
"Digits": False,
"Punctuation": False, # . , ; : ! ?
"Other printable symbols": False,
"Spaces": False, # space tab return formfeed vtab
"Control characters": False,
},
"Length": 1,
"Fixed block": False, # Sliding-window mode by default
"Word boundary": False,
"Partial": {
"Discard": False,
"Keep": True,
"Justify": False,
},
"Show": {
"Text": False,
"N-grams": True,
"Summary": False,
},
}
occurrences = {}
summary = {
"Upper case letters": 0,
"Lower case letters": 0,
"Connection symbols": 0,
"Digits": 0,
"Punctuation": 0,
"Other printable symbols": 0,
"Spaces": 0,
"Other spaces": 0,
"Control characters": 0,
"Unicode letters": 0,
"Unicode marks": 0,
"Unicode numbers": 0,
"Unicode punctuations": 0,
"Unicode symbols": 0,
"Unicode separators": 0,
"Unicode others": 0,
"All unicode characters": 0,
"All characters": 0,
"All n-grams": 0
}
################################################################################
def initialize_debugging(program_name):
"""Debugging set up"""
console_log_format = program_name + ": %(levelname)s: %(message)s"
logging.basicConfig(format=console_log_format, level=logging.DEBUG)
logging.disable(logging.INFO)
################################################################################
def display_help():
"""Displays usage and help"""
print("usage: ngc [-b|--block] [-c|--convert ARGS] [--debug]", file=sys.stderr)
print(" [-d|--discard ARGS] [--help|-?] [-l|--length ARG]", file=sys.stderr)
print(" [-p|--partial ARG] [-q|--quiet] [-s|--summary] [-t|--text]", file=sys.stderr)
print(" [--version] [-w|--word] [--] [filename ...]", file=sys.stderr)
print(" ----------------- ----------------------------------------------------",
file=sys.stderr
)
print(" -b|--block Use fixed- instead of sliding-windows blocks", file=sys.stderr)
print(" -c|--convert ARGS Convert text input. A combination of:", file=sys.stderr)
print(" ARG = a - Unicode characters to ASCII (remove accents)", file=sys.stderr)
print(" ARG = l - Upper case letters to lower", file=sys.stderr)
print(" ARG = u - Lower case letters to upper", file=sys.stderr)
print(" ARG = s - Spaces-like characters to 1 space", file=sys.stderr)
print(" ARGS l and u can't be used at the same time", file=sys.stderr)
print(" -d|--discard ARGS Discard characters. A combination of:", file=sys.stderr)
print(" ARG = U - Unicode characters", file=sys.stderr)
print(" ARG = u - Upper case letters", file=sys.stderr)
print(" ARG = l - Lower case letters", file=sys.stderr)
print(" ARG = L - All letters", file=sys.stderr)
print(" ARG = c - Connection symbols ('-)", file=sys.stderr)
print(" ARG = d - Digits", file=sys.stderr)
print(" ARG = p - Punctuation (.,;:!?)", file=sys.stderr)
print(" ARG = o - Other printable symbols", file=sys.stderr)
print(" ARG = s - Spaces (space, tab, return, formfeed, vtab)", file=sys.stderr)
print(" ARG = n - Non printable Control characters", file=sys.stderr)
print(" -l|--length ARG Length of the n-gram. Defaults to 1", file=sys.stderr)
print(" -p|--partial ARG What to do with partial blocks? One among:", file=sys.stderr)
print(" ARG = d - Discard", file=sys.stderr)
print(" ARG = k - Keep as-is", file=sys.stderr)
print(" ARG = j - Keep but right-justify with spaces", file=sys.stderr)
print(" -q|--quiet Don't show occurrences and frequency by n-gram", file=sys.stderr)
print(" -s|--summary Show a summary of what was processed", file=sys.stderr)
print(" -t|--text Show modified text input", file=sys.stderr)
print(" -w|--word Respect Word boundaries (delimited by spaces)", file=sys.stderr)
print(" --debug Enable debug mode", file=sys.stderr)
print(" --help|-? Print usage and this help message and exit", file=sys.stderr)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
################################################################################
def process_environment_variables():
"""Process environment variables"""
if "NGC_DEBUG" in os.environ.keys():
logging.disable(logging.NOTSET)
################################################################################
def process_command_line():
"""Process command line"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
# option letters followed by : expect an argument
# same for option strings followed by =
character_options = "bc:d:l:p:qstw?"
string_options = [
"block",
"convert=",
"debug",
"discard=",
"help",
"length=",
"partial=",
"quiet",
"summary",
"text",
"version",
"word",
]
try:
options, remaining_arguments = getopt.getopt(
sys.argv[1:], character_options, string_options
)
except getopt.GetoptError as error:
logging.critical(error)
display_help()
sys.exit(1)
for option, argument in options:
if option in ("-b", "--block"):
parameters["Fixed block"] = True
elif option in ("-c", "--convert"):
if 'l' in argument and 'u' in argument:
logging.critical("-c|--convert parameter can't contain [lu] at the same time")
sys.exit(1)
if 'a' in argument:
parameters["Convert"]["Unicode to ASCII"] = True
if 'l' in argument:
parameters["Convert"]["Upper to lower case"] = True
if 'u' in argument:
parameters["Convert"]["Lower to upper case"] = True
if 's' in argument:
parameters["Convert"]["Spaces to one space"] = True
elif option in ("-d", "--discard"):
if 'U' in argument:
parameters["Discard"]["Unicode characters"] = True
if 'u' in argument:
parameters["Discard"]["Upper case letters"] = True
if 'l' in argument:
parameters["Discard"]["Lower case letters"] = True
if 'L' in argument:
parameters["Discard"]["Upper case letters"] = True
parameters["Discard"]["Lower case letters"] = True
if 'c' in argument:
parameters["Discard"]["Connection symbols"] = True
if 'd' in argument:
parameters["Discard"]["Digits"] = True
if 'p' in argument:
parameters["Discard"]["Punctuation"] = True
if 'o' in argument:
parameters["Discard"]["Other printable symbols"] = True
if 's' in argument:
parameters["Discard"]["Spaces"] = True
if 'n' in argument:
parameters["Discard"]["Control characters"] = True
elif option in ("-l", "--length"):
if argument.isdigit() and int(argument) >= 0:
parameters["Length"] = int(argument)
else:
logging.critical("-l|--length parameter must be a strictly positive integer")
sys.exit(1)
elif option in ("-p", "--partial"):
if len(argument) > 1 or argument not in ('d', 'k', 'j'):
logging.critical("-p|--partial parameter must be a single character among [dkj]")
sys.exit(1)
if argument == 'd':
parameters["Partial"]["Discard"] = True
parameters["Partial"]["Keep"] = False
elif argument == 'j':
parameters["Partial"]["Justify"] = True
parameters["Partial"]["Keep"] = False
elif option in ("-q", "--quiet"):
parameters["Show"]["N-grams"] = False
elif option in ("-s", "--summary"):
parameters["Show"]["Summary"] = True
elif option in ("-t", "--text"):
parameters["Show"]["Text"] = True
elif option in ("-w", "--word"):
parameters["Word boundary"] = True
elif option == "--debug":
logging.disable(logging.NOTSET)
elif option in ("--help", "-?"):
display_help()
sys.exit(0)
elif option == "--version":
print(ID.replace("@(" + "#)" + " $" + "Id" + ": ", "").replace(" $", ""))
sys.exit(0)
logging.debug("process_command_line(): parameters:")
logging.debug(parameters)
logging.debug("process_command_line(): remaining_arguments:")
logging.debug(remaining_arguments)
return remaining_arguments
################################################################################
def handle_partial_n_gram(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if not parameters["Partial"]["Discard"]:
if parameters["Partial"]["Justify"]:
for _ in range(parameters["Length"] - len(text)):
text += " "
if text in occurrences:
occurrences[text] += 1
else:
occurrences[text] = 1
summary["All n-grams"] += 1
################################################################################
def frequency_analysis(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if parameters["Show"]["Summary"]:
for character in text:
if ord(character) < 128:
if character in string.ascii_uppercase:
summary["Upper case letters"] += 1
elif character in string.ascii_lowercase:
summary["Lower case letters"] += 1
elif character in ("'", "-"):
summary["Connection symbols"] += 1
elif character in string.digits:
summary["Digits"] += 1
elif character in (".", ",", ";", ":", "!", "?"):
summary["Punctuation"] += 1
elif character == " ":
summary["Spaces"] += 1
elif character in string.whitespace:
summary["Other spaces"] += 1
elif (ord(character) < 32 and ord(character) not in (9, 11, 12, 13)) \
or ord(character) == 127:
summary["Control characters"] += 1
else:
summary["Other printable symbols"] += 1
else:
summary["All unicode characters"] += 1
if unicode2ascii.is_unicode_letter(character):
summary["Unicode letters"] += 1
elif unicode2ascii.is_unicode_mark(character):
summary["Unicode marks"] += 1
elif unicode2ascii.is_unicode_number(character):
summary["Unicode numbers"] += 1
elif unicode2ascii.is_unicode_punctuation(character):
summary["Unicode punctuations"] += 1
elif unicode2ascii.is_unicode_symbol(character):
summary["Unicode symbols"] += 1
elif unicode2ascii.is_unicode_separator(character):
summary["Unicode separators"] += 1
else:
summary["Unicode others"] += 1
if len(text) <= parameters["Length"]:
if text:
handle_partial_n_gram(text)
else:
i = 0
while i < len(text) + 1 - parameters["Length"]:
sequence = text[i:i + parameters["Length"]]
if sequence in occurrences:
occurrences[sequence] += 1
else:
occurrences[sequence] = 1
summary["All n-grams"] += 1
if parameters["Fixed block"]:
i += parameters["Length"]
else:
i += 1
if i < len(text):
handle_partial_n_gram(text[i:])
################################################################################
def process_line(line):
"""Process a text line"""
# pylint: disable=C0103
global summary
# pylint: enable=C0103
line = line.rstrip(os.linesep)
# Conversions:
if parameters["Convert"]["Unicode to ASCII"]:
line = unicode2ascii.unicode_to_ascii_string(line)
if parameters["Convert"]["Upper to lower case"]:
line = line.lower()
if parameters["Convert"]["Lower to upper case"]:
line = line.upper()
# Discards:
if parameters["Discard"]["Unicode characters"]:
line = "".join([c for c in line if ord(c) < 128])
if parameters["Discard"]["Upper case letters"]:
line = re.sub(r"[A-Z]+", "", line)
if parameters["Discard"]["Lower case letters"]:
line = re.sub(r"[a-z]+", "", line)
if parameters["Discard"]["Connection symbols"]:
line = re.sub(r"[-']+", "", line)
if parameters["Discard"]["Digits"]:
line = re.sub(r"[0-9]+", "", line)
if parameters["Discard"]["Punctuation"]:
line = re.sub(r"[\.,;:!\?]+", "", line)
if parameters["Discard"]["Other printable symbols"]:
line = re.sub(r"[\"#$&@\[\\\]_`{|}~%()\*+/<=>^]+", "", line)
if parameters["Discard"]["Spaces"]:
line = re.sub(r"[" + string.whitespace + r"]+", "", line)
if parameters["Discard"]["Control characters"]:
line = "".join(
[c for c in line if not (ord(c) < 9 or (ord(c) > 13 and ord(c) < 32) or ord(c) == 127)]
)
# Late conversions:
if parameters["Convert"]["Spaces to one space"]:
line = re.sub(r"[" + string.whitespace + r"]+", " ", line)
if parameters["Show"]["Text"]:
print(line)
if parameters["Word boundary"]:
# Splitting words on all kind of whitespaces:
for word in line.split():
if word:
frequency_analysis(word)
summary["All characters"] += len(word)
else:
frequency_analysis(line)
summary["All characters"] += len(line)
################################################################################
def process_file(filename):
"""Process the file designated by filename, line by line"""
with open(filename, "r") as file:
for line in file.readlines():
process_line(line)
################################################################################
def compute_kappa_plaintext():
"""Return kappa_plaintext for the processed input stream"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
# See https://en.wikipedia.org/wiki/Index_of_coincidence
index = 0.0
for occurrence in occurrences.values():
index += occurrence * (occurrence - 1)
return index / (summary["All n-grams"] * (summary["All n-grams"] - 1))
################################################################################
def compute_coincidence_index(kappa_plaintext):
"""Return coincidence index for a given kappa_plaintext and alphabet"""
# pylint: disable=C0103
global summary
# pylint: enable=C0103
if summary["Unicode separators"]:
# Unknown alphabet size
return 0
alphabet_size = 0
if summary["Upper case letters"]:
alphabet_size += len(string.ascii_uppercase)
if summary["Lower case letters"]:
alphabet_size += len(string.ascii_lowercase)
if summary["Digits"]:
alphabet_size += len(string.digits)
if summary["Connection symbols"]:
alphabet_size += len("'-")
if summary["Punctuation"]:
alphabet_size += len(".,;:?!")
if summary["Other printable symbols"]:
alphabet_size += len("\"#$&@[\\]_`{|}~%()*+/<=>^")
if summary["Spaces"]:
alphabet_size += 1
if summary["Other spaces"]:
alphabet_size += len(string.whitespace) - 1
if summary["Control characters"]:
alphabet_size += 29
return kappa_plaintext * alphabet_size
################################################################################
def main():
"""The program's main entry point"""
program_name = os.path.basename(sys.argv[0])
initialize_debugging(program_name)
process_environment_variables()
arguments = process_command_line()
exit_status = 0
# Reading from files whose name were given as arguments:
if len(arguments):
for filename in arguments:
if os.path.isfile(filename):
process_file(filename)
else:
logging.error("The argument '%s' is not a filename", filename)
exit_status = 1
# Reading from standard input as there are no arguments:
else:
for line in sys.stdin:
process_line(line)
# Displaying occurrences and frequency by n-gram:
if parameters["Show"]["N-grams"]:
if parameters["Show"]["Text"]:
print("--")
decreasing_occurrences = dict(sorted(occurrences.items(), key=lambda t: t[1], reverse=True))
for key, value in decreasing_occurrences.items():
print("'{}'\t{}\t{:.2f}%".format(key, value, (value/summary["All n-grams"])*100))
# Displaying summary:
if parameters["Show"]["Summary"]:
print("==")
for key, value in summary.items():
print("{:23s}\t{:d}".format(key, value))
print()
kappa_plaintext = compute_kappa_plaintext()
coincidence_index = compute_coincidence_index(kappa_plaintext)
print("{:23s}\t{}".format("Kappa-plaintext", kappa_plaintext))
print("{:23s}\t{}".format("Index of coincidence", coincidence_index))
sys.exit(exit_status)
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"os.environ.keys",
"getopt.getopt",
"logging.debug",
"unicode2ascii.unicode_to_ascii_string",
"os.path.isfile",
"unicode2ascii.is_unicode_mark",
"unicode2ascii.is_unicode_punctuation",
"unicode2ascii.is_unicode_symbol",
"os.path.basename",
"logging.critical",
"sys.exit",
"re.sub",
"unicode2ascii.is_unicode_number",
"logging.error",
"unicode2ascii.is_unicode_separator",
"logging.disable",
"unicode2ascii.is_unicode_letter"
]
| [((2086, 2153), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'console_log_format', 'level': 'logging.DEBUG'}), '(format=console_log_format, level=logging.DEBUG)\n', (2105, 2153), False, 'import logging\n'), ((2158, 2187), 'logging.disable', 'logging.disable', (['logging.INFO'], {}), '(logging.INFO)\n', (2173, 2187), False, 'import logging\n'), ((9819, 9871), 'logging.debug', 'logging.debug', (['"""process_command_line(): parameters:"""'], {}), "('process_command_line(): parameters:')\n", (9832, 9871), False, 'import logging\n'), ((9876, 9901), 'logging.debug', 'logging.debug', (['parameters'], {}), '(parameters)\n', (9889, 9901), False, 'import logging\n'), ((9906, 9967), 'logging.debug', 'logging.debug', (['"""process_command_line(): remaining_arguments:"""'], {}), "('process_command_line(): remaining_arguments:')\n", (9919, 9967), False, 'import logging\n'), ((9972, 10006), 'logging.debug', 'logging.debug', (['remaining_arguments'], {}), '(remaining_arguments)\n', (9985, 10006), False, 'import logging\n'), ((17593, 17622), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (17609, 17622), False, 'import os\n'), ((19106, 19127), 'sys.exit', 'sys.exit', (['exit_status'], {}), '(exit_status)\n', (19114, 19127), False, 'import sys\n'), ((5575, 5592), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (5590, 5592), False, 'import os\n'), ((5602, 5633), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (5617, 5633), False, 'import logging\n'), ((6291, 6353), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', 'character_options', 'string_options'], {}), '(sys.argv[1:], character_options, string_options)\n', (6304, 6353), False, 'import getopt\n'), ((13763, 13806), 'unicode2ascii.unicode_to_ascii_string', 'unicode2ascii.unicode_to_ascii_string', (['line'], {}), '(line)\n', (13800, 13806), False, 'import unicode2ascii\n'), ((14163, 14189), 're.sub', 're.sub', (['"""[A-Z]+"""', '""""""', 'line'], {}), "('[A-Z]+', '', line)\n", (14169, 14189), False, 'import re\n'), ((14258, 14284), 're.sub', 're.sub', (['"""[a-z]+"""', '""""""', 'line'], {}), "('[a-z]+', '', line)\n", (14264, 14284), False, 'import re\n'), ((14353, 14378), 're.sub', 're.sub', (['"""[-\']+"""', '""""""', 'line'], {}), '("[-\']+", \'\', line)\n', (14359, 14378), False, 'import re\n'), ((14435, 14461), 're.sub', 're.sub', (['"""[0-9]+"""', '""""""', 'line'], {}), "('[0-9]+', '', line)\n", (14441, 14461), False, 'import re\n'), ((14523, 14556), 're.sub', 're.sub', (['"""[\\\\.,;:!\\\\?]+"""', '""""""', 'line'], {}), "('[\\\\.,;:!\\\\?]+', '', line)\n", (14529, 14556), False, 'import re\n'), ((14628, 14686), 're.sub', 're.sub', (['"""[\\\\"#$&@\\\\[\\\\\\\\\\\\]_`{|}~%()\\\\*+/<=>^]+"""', '""""""', 'line'], {}), '(\'[\\\\"#$&@\\\\[\\\\\\\\\\\\]_`{|}~%()\\\\*+/<=>^]+\', \'\', line)\n', (14634, 14686), False, 'import re\n'), ((14737, 14785), 're.sub', 're.sub', (["('[' + string.whitespace + ']+')", '""""""', 'line'], {}), "('[' + string.whitespace + ']+', '', line)\n", (14743, 14785), False, 'import re\n'), ((15071, 15120), 're.sub', 're.sub', (["('[' + string.whitespace + ']+')", '""" """', 'line'], {}), "('[' + string.whitespace + ']+', ' ', line)\n", (15077, 15120), False, 'import re\n'), ((6424, 6447), 'logging.critical', 'logging.critical', (['error'], {}), '(error)\n', (6440, 6447), False, 'import logging\n'), ((6479, 6490), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6487, 6490), False, 'import sys\n'), ((17894, 17918), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (17908, 17918), False, 'import os\n'), ((12051, 12093), 'unicode2ascii.is_unicode_letter', 'unicode2ascii.is_unicode_letter', (['character'], {}), '(character)\n', (12082, 12093), False, 'import unicode2ascii\n'), ((17993, 18055), 'logging.error', 'logging.error', (['"""The argument \'%s\' is not a filename"""', 'filename'], {}), '("The argument \'%s\' is not a filename", filename)\n', (18006, 18055), False, 'import logging\n'), ((6728, 6806), 'logging.critical', 'logging.critical', (['"""-c|--convert parameter can\'t contain [lu] at the same time"""'], {}), '("-c|--convert parameter can\'t contain [lu] at the same time")\n', (6744, 6806), False, 'import logging\n'), ((6823, 6834), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6831, 6834), False, 'import sys\n'), ((12168, 12208), 'unicode2ascii.is_unicode_mark', 'unicode2ascii.is_unicode_mark', (['character'], {}), '(character)\n', (12197, 12208), False, 'import unicode2ascii\n'), ((12281, 12323), 'unicode2ascii.is_unicode_number', 'unicode2ascii.is_unicode_number', (['character'], {}), '(character)\n', (12312, 12323), False, 'import unicode2ascii\n'), ((8498, 8575), 'logging.critical', 'logging.critical', (['"""-l|--length parameter must be a strictly positive integer"""'], {}), "('-l|--length parameter must be a strictly positive integer')\n", (8514, 8575), False, 'import logging\n'), ((8592, 8603), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8600, 8603), False, 'import sys\n'), ((12398, 12445), 'unicode2ascii.is_unicode_punctuation', 'unicode2ascii.is_unicode_punctuation', (['character'], {}), '(character)\n', (12434, 12445), False, 'import unicode2ascii\n'), ((8734, 8820), 'logging.critical', 'logging.critical', (['"""-p|--partial parameter must be a single character among [dkj]"""'], {}), "(\n '-p|--partial parameter must be a single character among [dkj]')\n", (8750, 8820), False, 'import logging\n'), ((8832, 8843), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8840, 8843), False, 'import sys\n'), ((12525, 12567), 'unicode2ascii.is_unicode_symbol', 'unicode2ascii.is_unicode_symbol', (['character'], {}), '(character)\n', (12556, 12567), False, 'import unicode2ascii\n'), ((12642, 12687), 'unicode2ascii.is_unicode_separator', 'unicode2ascii.is_unicode_separator', (['character'], {}), '(character)\n', (12676, 12687), False, 'import unicode2ascii\n'), ((9542, 9573), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (9557, 9573), False, 'import logging\n'), ((9655, 9666), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9663, 9666), False, 'import sys\n'), ((9802, 9813), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9810, 9813), False, 'import sys\n')] |
"""
"""
from rest_framework import routers
from safemasks.resources.rest.serializers import SupplierViewSet, TrustedSupplierViewSet
# Routers provide an easy way of automatically determining the URL conf.
ROUTER = routers.DefaultRouter()
ROUTER.register(r"suppliers", SupplierViewSet, "suppliers")
ROUTER.register(r"suppliers-trusted", TrustedSupplierViewSet, "suppliers-trusted")
| [
"rest_framework.routers.DefaultRouter"
]
| [((217, 240), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (238, 240), False, 'from rest_framework import routers\n')] |
from django.core.management import BaseCommand
import logging
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
class Command(BaseCommand):
def handle(self, *args, **options):
from exporter.tasks import GenerateModelExportTask
gmet = GenerateModelExportTask()
gmet.run(1) | [
"logging.basicConfig",
"exporter.tasks.GenerateModelExportTask",
"logging.getLogger"
]
| [((545, 566), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (564, 566), False, 'import logging\n'), ((626, 672), 'logging.getLogger', 'logging.getLogger', (['"""requests.packages.urllib3"""'], {}), "('requests.packages.urllib3')\n", (643, 672), False, 'import logging\n'), ((567, 586), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (584, 586), False, 'import logging\n'), ((883, 908), 'exporter.tasks.GenerateModelExportTask', 'GenerateModelExportTask', ([], {}), '()\n', (906, 908), False, 'from exporter.tasks import GenerateModelExportTask\n')] |
from functools import partial
import os
import pytest
import dask
import dask.array as da
from dask.utils_test import inc
from dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer
from dask.blockwise import Blockwise
from dask.array.utils import assert_eq
def test_visualize(tmpdir):
pytest.importorskip("graphviz")
fn = str(tmpdir)
a = da.ones(10, chunks=(5,))
b = a + 1
c = a + 2
d = b + c
d.dask.visualize(fn)
assert os.path.exists(fn)
def test_basic():
a = {"x": 1}
b = {"y": (inc, "x")}
layers = {"a": a, "b": b}
dependencies = {"a": set(), "b": {"a"}}
hg = HighLevelGraph(layers, dependencies)
assert dict(hg) == {"x": 1, "y": (inc, "x")}
assert all(isinstance(layer, Layer) for layer in hg.layers.values())
def test_keys_values_items_methods():
a = da.ones(10, chunks=(5,))
b = a + 1
c = a + 2
d = b + c
hg = d.dask
keys, values, items = hg.keys(), hg.values(), hg.items()
assert all(isinstance(i, list) for i in [keys, values, items])
assert keys == [i for i in hg]
assert values == [hg[i] for i in hg]
assert items == [(k, v) for k, v in zip(keys, values)]
def test_cull():
a = {"x": 1, "y": (inc, "x")}
layers = {
"a": BasicLayer(
a, dependencies={"x": set(), "y": {"x"}}, global_dependencies=set()
)
}
dependencies = {"a": set()}
hg = HighLevelGraph(layers, dependencies)
culled_by_x = hg.cull({"x"})
assert dict(culled_by_x) == {"x": 1}
culled_by_y = hg.cull({"y"})
assert dict(culled_by_y) == a
@pytest.mark.parametrize("inject_dict", [True, False])
def test_map_basic_layers(inject_dict):
"""Check map_basic_layers() by injecting an inc() call"""
y = da.ones(3, chunks=(3,), dtype="int") + 40
def inject_inc(dsk):
assert isinstance(dsk, BasicLayer)
dsk = dict(dsk)
k = next(iter(dsk))
dsk[k] = (inc, dsk[k])
if inject_dict:
return dsk # map_basic_layers() should automatically convert it to a `BasicLayer`
else:
return BasicLayer(dsk)
dsk = y.__dask_graph__()
y.dask = dsk.map_basic_layers(inject_inc)
layers = list(y.dask.layers.values())
assert isinstance(layers[0], BasicLayer)
assert isinstance(layers[1], Blockwise)
assert_eq(y, [42] * 3)
@pytest.mark.parametrize("use_layer_map_task", [True, False])
def test_map_tasks(use_layer_map_task):
"""Check map_tasks() by injecting an +1 to the `40` literal"""
y = da.ones(3, chunks=(3,), dtype="int") + 40
def plus_one(tasks):
ret = []
for t in tasks:
if t == 40:
t += 1
ret.append(t)
return tuple(ret)
dsk = y.__dask_graph__()
if use_layer_map_task:
# In order to test the default map_tasks() implementation on a Blockwise Layer,
# we overwrite Blockwise.map_tasks with Layer.map_tasks
blockwise_layer = list(dsk.layers.values())[1]
blockwise_layer.map_tasks = partial(Layer.map_tasks, blockwise_layer)
y.dask = dsk.map_tasks(plus_one)
assert_eq(y, [42] * 3)
def annot_map_fn(key):
return key[1:]
@pytest.mark.parametrize(
"annotation",
[
{"worker": "alice"},
{"block_id": annot_map_fn},
],
)
def test_single_annotation(annotation):
with dask.annotate(**annotation):
A = da.ones((10, 10), chunks=(5, 5))
alayer = A.__dask_graph__().layers[A.name]
assert alayer.annotations == annotation
assert dask.config.get("annotations", None) is None
def test_multiple_annotations():
with dask.annotate(block_id=annot_map_fn):
with dask.annotate(resource="GPU"):
A = da.ones((10, 10), chunks=(5, 5))
B = A + 1
C = B + 1
assert dask.config.get("annotations", None) is None
alayer = A.__dask_graph__().layers[A.name]
blayer = B.__dask_graph__().layers[B.name]
clayer = C.__dask_graph__().layers[C.name]
assert alayer.annotations == {"resource": "GPU", "block_id": annot_map_fn}
assert blayer.annotations == {"block_id": annot_map_fn}
assert clayer.annotations is None
| [
"os.path.exists",
"dask.annotate",
"dask.highlevelgraph.HighLevelGraph",
"dask.array.utils.assert_eq",
"pytest.mark.parametrize",
"pytest.importorskip",
"functools.partial",
"dask.highlevelgraph.BasicLayer",
"dask.array.ones",
"dask.config.get"
]
| [((1597, 1650), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inject_dict"""', '[True, False]'], {}), "('inject_dict', [True, False])\n", (1620, 1650), False, 'import pytest\n'), ((2361, 2421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_layer_map_task"""', '[True, False]'], {}), "('use_layer_map_task', [True, False])\n", (2384, 2421), False, 'import pytest\n'), ((3200, 3292), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""annotation"""', "[{'worker': 'alice'}, {'block_id': annot_map_fn}]"], {}), "('annotation', [{'worker': 'alice'}, {'block_id':\n annot_map_fn}])\n", (3223, 3292), False, 'import pytest\n'), ((300, 331), 'pytest.importorskip', 'pytest.importorskip', (['"""graphviz"""'], {}), "('graphviz')\n", (319, 331), False, 'import pytest\n'), ((361, 385), 'dask.array.ones', 'da.ones', (['(10)'], {'chunks': '(5,)'}), '(10, chunks=(5,))\n', (368, 385), True, 'import dask.array as da\n'), ((464, 482), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (478, 482), False, 'import os\n'), ((629, 665), 'dask.highlevelgraph.HighLevelGraph', 'HighLevelGraph', (['layers', 'dependencies'], {}), '(layers, dependencies)\n', (643, 665), False, 'from dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer\n'), ((837, 861), 'dask.array.ones', 'da.ones', (['(10)'], {'chunks': '(5,)'}), '(10, chunks=(5,))\n', (844, 861), True, 'import dask.array as da\n'), ((1414, 1450), 'dask.highlevelgraph.HighLevelGraph', 'HighLevelGraph', (['layers', 'dependencies'], {}), '(layers, dependencies)\n', (1428, 1450), False, 'from dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer\n'), ((2335, 2357), 'dask.array.utils.assert_eq', 'assert_eq', (['y', '([42] * 3)'], {}), '(y, [42] * 3)\n', (2344, 2357), False, 'from dask.array.utils import assert_eq\n'), ((3130, 3152), 'dask.array.utils.assert_eq', 'assert_eq', (['y', '([42] * 3)'], {}), '(y, [42] * 3)\n', (3139, 3152), False, 'from dask.array.utils import assert_eq\n'), ((1762, 1798), 'dask.array.ones', 'da.ones', (['(3)'], {'chunks': '(3,)', 'dtype': '"""int"""'}), "(3, chunks=(3,), dtype='int')\n", (1769, 1798), True, 'import dask.array as da\n'), ((2537, 2573), 'dask.array.ones', 'da.ones', (['(3)'], {'chunks': '(3,)', 'dtype': '"""int"""'}), "(3, chunks=(3,), dtype='int')\n", (2544, 2573), True, 'import dask.array as da\n'), ((3046, 3087), 'functools.partial', 'partial', (['Layer.map_tasks', 'blockwise_layer'], {}), '(Layer.map_tasks, blockwise_layer)\n', (3053, 3087), False, 'from functools import partial\n'), ((3372, 3399), 'dask.annotate', 'dask.annotate', ([], {}), '(**annotation)\n', (3385, 3399), False, 'import dask\n'), ((3413, 3445), 'dask.array.ones', 'da.ones', (['(10, 10)'], {'chunks': '(5, 5)'}), '((10, 10), chunks=(5, 5))\n', (3420, 3445), True, 'import dask.array as da\n'), ((3549, 3585), 'dask.config.get', 'dask.config.get', (['"""annotations"""', 'None'], {}), "('annotations', None)\n", (3564, 3585), False, 'import dask\n'), ((3638, 3674), 'dask.annotate', 'dask.annotate', ([], {'block_id': 'annot_map_fn'}), '(block_id=annot_map_fn)\n', (3651, 3674), False, 'import dask\n'), ((3815, 3851), 'dask.config.get', 'dask.config.get', (['"""annotations"""', 'None'], {}), "('annotations', None)\n", (3830, 3851), False, 'import dask\n'), ((2108, 2123), 'dask.highlevelgraph.BasicLayer', 'BasicLayer', (['dsk'], {}), '(dsk)\n', (2118, 2123), False, 'from dask.highlevelgraph import HighLevelGraph, BasicLayer, Layer\n'), ((3689, 3718), 'dask.annotate', 'dask.annotate', ([], {'resource': '"""GPU"""'}), "(resource='GPU')\n", (3702, 3718), False, 'import dask\n'), ((3736, 3768), 'dask.array.ones', 'da.ones', (['(10, 10)'], {'chunks': '(5, 5)'}), '((10, 10), chunks=(5, 5))\n', (3743, 3768), True, 'import dask.array as da\n')] |
# Requires pip install bitarray
from bitarray import bitarray
import argparse, math
def derive_transfer_function(pTransferFunctionString: str) -> list:
lTransferFunction = list(map(int, pTransferFunctionString.split(',')))
lTransferFunctionValid = True
lLengthTransferFunction = len(lTransferFunction)
for i in range(0, lLengthTransferFunction):
if i not in lTransferFunction:
lTransferFunctionValid = False
break
# end if
# end for
if not lTransferFunctionValid:
raise Exception('Transfer function must contain all integers from 0 to N where (N - 1) is length of the substitution array.')
lExponent = math.log(lLengthTransferFunction, 2)
if lExponent != math.floor(lExponent):
raise Exception('Transfer function length must be even power of 2.')
return lTransferFunction
def print_transfer_function_table(pTransferFunction: list) -> None:
lLengthTransferFunction = len(pTransferFunction)
lNumberBits = int(math.log(lLengthTransferFunction, 2))
lFormat = '0' + str(lNumberBits) + 'b'
# print column headers
print()
for i in range(0, lNumberBits):
print("x=" + str(i) + "\t", end="")
for i in range(0, lNumberBits):
print("y=" + str(i) + "\t", end="")
print()
# print values for transfer function
for lIndex, lSubstitutionValue in enumerate(pTransferFunction):
lBinaryIndex = bitarray(format(lIndex, lFormat))
lBinarySV = bitarray(format(lSubstitutionValue, lFormat))
for i in range(0, lNumberBits):
print(int(lBinaryIndex[i]), end="")
print("\t", end="")
for i in range(0, lNumberBits):
print(int(lBinarySV[i]), end="")
print("\t", end="")
print()
print()
def print_linear_approximation_table(pTransferFunction: list) -> None:
lLengthTransferFunction = len(pTransferFunction)
lNumberBits = int(math.log(lLengthTransferFunction, 2))
lFormat = '0' + str(lNumberBits) + 'b'
# print column headers
print("\t", end="")
for i in range(0, lLengthTransferFunction):
print("b=" + str(i) + "\t", end="")
print()
for lA in range(0, lLengthTransferFunction):
# print row header
print("a=" + str(lA) + "\t", end="")
for lB in range(0, lLengthTransferFunction):
a = bitarray(format(lA, lFormat))
b = bitarray(format(lB, lFormat))
lCount = 0
for lX, lY in enumerate(pTransferFunction):
x = bitarray(format(lX, lFormat))
y = bitarray(format(lY, lFormat))
lVectorXorOfAX = 0
for i in range(0, lNumberBits):
lVectorXorOfAX ^= int(a[i]) * int(x[i])
lVectorXorOfBY = 0
for i in range(0, lNumberBits):
lVectorXorOfBY ^= int(b[i]) * int(y[i])
lAXxorBY = lVectorXorOfAX ^ lVectorXorOfBY
if lAXxorBY == 0:
lCount += 1
# end looping through transfer function
print(str(lCount) + "\t", end="")
# end for b
print()
# end for a
if __name__ == '__main__':
lArgParser = argparse.ArgumentParser(description='Transference: A tool to help visualize s-boxes (substitution boxes or transfer functions)')
lArgParser.add_argument('-tft', '--transfer-function-table', help='Print the transfer function table for the s-box', action='store_true')
lArgParser.add_argument('-lat', '--linear-approximation-table', help='Calculate the linear transformation table for the s-box', action='store_true')
lArgParser.add_argument('-all', '--all', help='Calculate the linear transformation table for the s-box', action='store_true')
lArgParser.add_argument('-v', '--verbose', help='Enables verbose output', action='store_true')
lArgParser.add_argument('INPUT', action='store', type=str, help='The substitution table (s-box) represented as a comma delimted list of integers. The length of the list is the number of bits in the substitution. Required. Example: 3,2,0,1 means substitute 3 for 0, 2 for 1, 0 for 2 and 1 for 3. ')
lArgs = lArgParser.parse_args()
lTransferFunction = derive_transfer_function(lArgs.INPUT)
if lArgs.all:
lArgs.transfer_function_table = lArgs.linear_approximation_table = True
if lArgs.transfer_function_table:
print_transfer_function_table(lTransferFunction)
if lArgs.linear_approximation_table:
print_linear_approximation_table(lTransferFunction)
| [
"math.floor",
"argparse.ArgumentParser",
"math.log"
]
| [((683, 719), 'math.log', 'math.log', (['lLengthTransferFunction', '(2)'], {}), '(lLengthTransferFunction, 2)\n', (691, 719), False, 'import argparse, math\n'), ((3259, 3397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Transference: A tool to help visualize s-boxes (substitution boxes or transfer functions)"""'}), "(description=\n 'Transference: A tool to help visualize s-boxes (substitution boxes or transfer functions)'\n )\n", (3282, 3397), False, 'import argparse, math\n'), ((740, 761), 'math.floor', 'math.floor', (['lExponent'], {}), '(lExponent)\n', (750, 761), False, 'import argparse, math\n'), ((1016, 1052), 'math.log', 'math.log', (['lLengthTransferFunction', '(2)'], {}), '(lLengthTransferFunction, 2)\n', (1024, 1052), False, 'import argparse, math\n'), ((1958, 1994), 'math.log', 'math.log', (['lLengthTransferFunction', '(2)'], {}), '(lLengthTransferFunction, 2)\n', (1966, 1994), False, 'import argparse, math\n')] |
import pytest
from aiospamc.client import Client
from aiospamc.exceptions import (
BadResponse,
UsageException,
DataErrorException,
NoInputException,
NoUserException,
NoHostException,
UnavailableException,
InternalSoftwareException,
OSErrorException,
OSFileException,
CantCreateException,
IOErrorException,
TemporaryFailureException,
ProtocolException,
NoPermissionException,
ConfigException,
ServerTimeoutException,
ResponseException,
)
from aiospamc.responses import Response
async def test_request_sent_to_connection(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
await mock_client_dependency.request(mock_req, host=hostname)
assert (
bytes(mock_req)
== mock_client_dependency.connection_factory().request.await_args[0][0]
)
async def test_request_response_sent_to_parser(
mock_client_dependency, mocker, hostname
):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
mocker.spy(parser, "parse")
await mock_client_dependency.request(mock_req, host=hostname)
response = connection.request.return_value
assert response == parser.parse.call_args[0][0]
async def test_request_returns_response(mock_client_dependency, mocker, hostname):
mock_req = mocker.MagicMock()
connection = mock_client_dependency.connection_factory()
parser = mock_client_dependency.parser_factory()
parse_spy = mocker.spy(parser, "parse")
result = await mock_client_dependency.request(mock_req, host=hostname)
expected = Response(**parse_spy.spy_return)
assert expected == result
async def test_request_raises_usage(mock_client_response, mocker, ex_usage, hostname):
mock_client = mock_client_response(ex_usage)
with pytest.raises(UsageException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_data_err(
mock_client_response, mocker, ex_data_err, hostname
):
mock_client = mock_client_response(ex_data_err)
with pytest.raises(DataErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_input(
mock_client_response, mocker, ex_no_input, hostname
):
mock_client = mock_client_response(ex_no_input)
with pytest.raises(NoInputException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_user(
mock_client_response, mocker, ex_no_user, hostname
):
mock_client = mock_client_response(ex_no_user)
with pytest.raises(NoUserException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_host(
mock_client_response, mocker, ex_no_host, hostname
):
mock_client = mock_client_response(ex_no_host)
with pytest.raises(NoHostException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_unavailable(
mock_client_response, mocker, ex_unavailable, hostname
):
mock_client = mock_client_response(ex_unavailable)
with pytest.raises(UnavailableException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_software(
mock_client_response, mocker, ex_software, hostname
):
mock_client = mock_client_response(ex_software)
with pytest.raises(InternalSoftwareException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_error(
mock_client_response, mocker, ex_os_err, hostname
):
mock_client = mock_client_response(ex_os_err)
with pytest.raises(OSErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_os_file(
mock_client_response, mocker, ex_os_file, hostname
):
mock_client = mock_client_response(ex_os_file)
with pytest.raises(OSFileException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_cant_create(
mock_client_response, mocker, ex_cant_create, hostname
):
mock_client = mock_client_response(ex_cant_create)
with pytest.raises(CantCreateException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_io_error(
mock_client_response, mocker, ex_io_err, hostname
):
mock_client = mock_client_response(ex_io_err)
with pytest.raises(IOErrorException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_temporary_failure(
mock_client_response, mocker, ex_temp_fail, hostname
):
mock_client = mock_client_response(ex_temp_fail)
with pytest.raises(TemporaryFailureException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_protocol(
mock_client_response, mocker, ex_protocol, hostname
):
mock_client = mock_client_response(ex_protocol)
with pytest.raises(ProtocolException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_no_permission(
mock_client_response, mocker, ex_no_perm, hostname
):
mock_client = mock_client_response(ex_no_perm)
with pytest.raises(NoPermissionException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_config(mock_client_response, mocker, ex_config, hostname):
mock_client = mock_client_response(ex_config)
with pytest.raises(ConfigException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_timeout(
mock_client_response, mocker, ex_timeout, hostname
):
mock_client = mock_client_response(ex_timeout)
with pytest.raises(ServerTimeoutException):
await mock_client.request(mocker.MagicMock(), host=hostname)
async def test_request_raises_undefined(
mock_client_response, mocker, ex_undefined, hostname
):
mock_client = mock_client_response(ex_undefined)
with pytest.raises(ResponseException):
await mock_client.request(mocker.MagicMock(), host=hostname)
| [
"aiospamc.responses.Response",
"pytest.raises"
]
| [((1673, 1705), 'aiospamc.responses.Response', 'Response', ([], {}), '(**parse_spy.spy_return)\n', (1681, 1705), False, 'from aiospamc.responses import Response\n'), ((1885, 1914), 'pytest.raises', 'pytest.raises', (['UsageException'], {}), '(UsageException)\n', (1898, 1914), False, 'import pytest\n'), ((2148, 2181), 'pytest.raises', 'pytest.raises', (['DataErrorException'], {}), '(DataErrorException)\n', (2161, 2181), False, 'import pytest\n'), ((2415, 2446), 'pytest.raises', 'pytest.raises', (['NoInputException'], {}), '(NoInputException)\n', (2428, 2446), False, 'import pytest\n'), ((2677, 2707), 'pytest.raises', 'pytest.raises', (['NoUserException'], {}), '(NoUserException)\n', (2690, 2707), False, 'import pytest\n'), ((2938, 2968), 'pytest.raises', 'pytest.raises', (['NoHostException'], {}), '(NoHostException)\n', (2951, 2968), False, 'import pytest\n'), ((3211, 3246), 'pytest.raises', 'pytest.raises', (['UnavailableException'], {}), '(UnavailableException)\n', (3224, 3246), False, 'import pytest\n'), ((3480, 3520), 'pytest.raises', 'pytest.raises', (['InternalSoftwareException'], {}), '(InternalSoftwareException)\n', (3493, 3520), False, 'import pytest\n'), ((3750, 3781), 'pytest.raises', 'pytest.raises', (['OSErrorException'], {}), '(OSErrorException)\n', (3763, 3781), False, 'import pytest\n'), ((4012, 4042), 'pytest.raises', 'pytest.raises', (['OSFileException'], {}), '(OSFileException)\n', (4025, 4042), False, 'import pytest\n'), ((4285, 4319), 'pytest.raises', 'pytest.raises', (['CantCreateException'], {}), '(CantCreateException)\n', (4298, 4319), False, 'import pytest\n'), ((4549, 4580), 'pytest.raises', 'pytest.raises', (['IOErrorException'], {}), '(IOErrorException)\n', (4562, 4580), False, 'import pytest\n'), ((4825, 4865), 'pytest.raises', 'pytest.raises', (['TemporaryFailureException'], {}), '(TemporaryFailureException)\n', (4838, 4865), False, 'import pytest\n'), ((5099, 5131), 'pytest.raises', 'pytest.raises', (['ProtocolException'], {}), '(ProtocolException)\n', (5112, 5131), False, 'import pytest\n'), ((5368, 5404), 'pytest.raises', 'pytest.raises', (['NoPermissionException'], {}), '(NoPermissionException)\n', (5381, 5404), False, 'import pytest\n'), ((5626, 5656), 'pytest.raises', 'pytest.raises', (['ConfigException'], {}), '(ConfigException)\n', (5639, 5656), False, 'import pytest\n'), ((5887, 5924), 'pytest.raises', 'pytest.raises', (['ServerTimeoutException'], {}), '(ServerTimeoutException)\n', (5900, 5924), False, 'import pytest\n'), ((6161, 6193), 'pytest.raises', 'pytest.raises', (['ResponseException'], {}), '(ResponseException)\n', (6174, 6193), False, 'import pytest\n')] |
'''
Models for QtWidgets
'''
from collections import deque
from math import ceil
import datetime as dt
import calendar
class EventInCalendar__Model:
class Text:
@staticmethod
def getDefault():
return EventInCalendar__Model.Text()
def __init__(self, event=None, overflow=False):
if event is None:
self.init_date = dt.datetime(1, 1, 1)
self.end_date = dt.datetime(9999, 12, 31)
self.place = Event__Model.Place()
else:
if overflow:
self.init_date = dt.datetime.combine(
event.getInitDate().date(), dt.time(0, 0, 0))
else:
self.init_date = event.getInitDate()
self.end_date = event.getEndDate()
self.place = event.getPlace()
def __str__(self):
init_time, end_time = self.init_date.time(), self.end_date.time()
return ' '.join([str(i) for i in [init_time, end_time, self.place]])
@staticmethod
def colorOf(val):
range_list = [
(0.0, 0.2, 'rgb(178, 0, 0)'),
(0.2, 0.5, 'rgb(255, 40, 40)'),
(0.5, 0.7, 'rgb(191, 165, 0)'),
(0.7, 1.0, 'rgb(252, 224, 45)'),
(1.0, 1.1, 'rgb(46, 234, 81)'),
]
for lw, hi, c in range_list:
if lw <= val and hi > val:
return c
def __init__(self, master, overflow):
self._fulfillment = 0.0
self._overflow = overflow
self._master = master
self._event = None
def getFulFillmentStatus(self, numeric=False):
if not numeric:
return EventInCalendar__Model.colorOf(self._fulfillment)
return self._fulfillment
def setEvent(self, event):
self._event = event.getModel()
self._fulfillment = self._event.getFulFillmentStatus()
def __str__(self):
if self._event is None:
return EventInCalendar__Model.Text().__str__()
return EventInCalendar__Model.Text(self._event, self._overflow).__str__()
class Event__Model:
class Place:
def __init__(self, name='NA', people=0):
self.name = name
self.people = people
def __str__(self):
return self.name
def __init__(self, init_date, end_date, place, fulfillment=0.0):
self._init_date = init_date
self._end_date = end_date
self._place = place
self._fulfillment = fulfillment
def getFulFillmentStatus(self):
return self._fulfillment
def getInitDate(self):
return self._init_date
def getEndDate(self):
return self._end_date
def getPlace(self):
return self._place
class Date__Model:
TYPE_WEEKDAY = 0
TYPE_WEEKEND = 1
TYPE_HOLYDAY = 2
TYPE_FREEDAY = 3
TYPE_GRAYDAY = 4
@staticmethod
def colorOf(val):
color_list = [
(Date__Model.TYPE_WEEKDAY, (219, 219, 219)),
(Date__Model.TYPE_WEEKEND, (183, 183, 183)),
(Date__Model.TYPE_HOLYDAY, (183, 183, 183)),
(Date__Model.TYPE_FREEDAY, (0, 216, 255)),
(Date__Model.TYPE_GRAYDAY, (255, 255, 255)),
]
for d, c in color_list:
if d == val:
return c
return color_list[0][1]
def __init__(self, master, date):
self._master = master
self._events = list()
self._date = date
self._date_type = Date__Model.TYPE_WEEKDAY
def setDate(self, date, datetype=TYPE_WEEKDAY):
self._date = date
self._date_type = datetype
def getDate(self):
return self._date
def getDateType(self, numeric=False):
if numeric is False:
return Date__Model.colorOf(self._date_type)
return self._date_type
def addEvent(self, event):
self._events.append(event)
def getEvents(self):
return self._events
class Calendar__Model:
TYPE_MONDAY_LEADING = 0
TYPE_TUESDAY_LEADING = 1
TYPE_WEDNESDAY_LEADING = 2
TYPE_THURSDAY_LEADING = 3
TYPE_FRIDAY_LEADING = 4
TYPE_SATURDAY_LEADING = 5
TYPE_SUNDAY_LEADING = 6
MAX_DIM_X = 7
MAX_DIM_Y = 6
WEEKENDS = [5, 6]
@staticmethod
def dayOf(date, init, datatree):
'''
Returns the day of the week of a given date and the position
of that day in the calendar grid.
The returned text value of the day is recovered from the stringer module.
'''
days = datatree['str']['days']
# Get the day of the week of the selected date
datetuple = tuple([int(s) for s in str(date).split(' ')[0].split('-')])
day = days[list(zip(*days))[0].index(calendar.weekday(*datetuple))][1]
# Horizontal position in the grid is deduced from the selected leading day
days_dq = deque(days)
days_dq.rotate(7 - init)
pos_x = list(zip(*days_dq))[0].index(calendar.weekday(*datetuple))
# Vertical position is deduced from the selected leading day and the
# day of the first date of that month
firstmonthday = (datetuple[0], datetuple[1], 1)
fday = list(zip(*days_dq))[0].index(calendar.weekday(*firstmonthday))
pos_y = ceil((fday + date.day) / 7) - 1
# Return the place in the calendar grid depending on the offset
return day, pos_x, pos_y
def __init__(self, master, ctype=TYPE_SUNDAY_LEADING, holidays=list()):
'''
Calendar constructor, a calendar is an array of dates that should
always be full, thus, initialy an array of empty dates (6x7), is
array is called holders; a second empty array of dates is created
and will replace eventually the dates of the respective holder date.
Both arrays are validated through a snapshot array, the snapshot refers
to the dates that fill the Calendar grid for a current month, be those
dates from the actual month or the adjacent months
'''
self._master = master
self._type = ctype
self._holidays = holidays
# Assume month as current month
self._month = tuple([dt.date.today().year, dt.date.today().month])
# Generate the snapshot for the current month
self._snapshot = self.generateSnapshot()
# Create empty dates from the snapshot
self._dates = self.generateDefaultDates()
def generateSnapshot(self):
rt = list()
if self._month is None:
return rt
# First day of month
first_day = dt.date(self._month[0], self._month[1], 1)
# Find day of first position in calendar grid
offset = Calendar__Model.dayOf(first_day, self._type, self._master.getDataTree())[1]
first_day -= dt.timedelta(offset)
# Once first position is encountered, fill the holder array
for i in range(Calendar__Model.MAX_DIM_X * Calendar__Model.MAX_DIM_Y):
rt.append(first_day)
first_day += dt.timedelta(1)
return rt
def generateDefaultDates(self):
rt = list()
for date in self._snapshot:
created_date = self._master.createDate(date)
self.setDateType(created_date)
rt.append(created_date)
return rt
def addDate(self, date):
if self._month is not None:
if date.getModel().getDate() in self._snapshot:
index = self._snapshot.index(date.getModel().getDate())
self.setDateType(date)
self._dates[index] = date
def addEventInCalendar(self, date, eic):
if self._month is not None:
if date in self._snapshot:
index = self._snapshot.index(date)
self._dates[index].addCalendarEvent(eic)
def setDateType(self, date):
current_type = date.getModel().getDateType(numeric=True)
deduced_type = Date__Model.TYPE_WEEKDAY
dt_date = date.getModel().getDate()
dt_tuple = (dt_date.year, dt_date.month, dt_date.day)
if calendar.weekday(*dt_tuple) in Calendar__Model.WEEKENDS:
deduced_type = Date__Model.TYPE_WEEKEND
if dt_date in self._holidays:
deduced_type = Date__Model.TYPE_HOLYDAY
if (dt_date.year, dt_date.month) != self._month:
deduced_type = Date__Model.TYPE_GRAYDAY
if current_type < deduced_type:
current_type = deduced_type
date.changeDateType(current_type)
def _update(self):
self._snapshot = self.generateSnapshot()
self._dates = self.generateDefaultDates()
# Add the required events
events = self._master.getEvents()
events_to_add = list()
for event in events:
if event.getModel().getInitDate().date() in self._snapshot:
events_to_add.append(event)
self._master.createEvents(events_to_add)
def setMonth(self, month):
self._month = month
self._update()
def getMonth(self):
return self._month
def monthSubtract(self):
month = self._month
if month[1] == 1:
if month[0] == 1:
return month
else:
return (month[0] - 1, 12)
else:
return (month[0], month[1] - 1)
def monthAdd(self):
month = self._month
if month[1] == 12:
if month[0] == 9999:
return month
else:
return (month[0] + 1, 1)
else:
return (month[0], month[1] + 1)
def setDataTree(self, datatree):
self._datatree = datatree
self._update()
def getDataTree(self):
return self._datatree
def posInSnapshot(self, date):
i = self._snapshot.index(date)
return ceil((i + 1) / 7) - 1, (i) % 7
def getHolderDimensions(self):
return Calendar__Model.MAX_DIM_X, Calendar__Model.MAX_DIM_Y
def getDates(self):
return self._dates
def getType(self):
return self._type
| [
"datetime.datetime",
"math.ceil",
"collections.deque",
"datetime.time",
"datetime.date.today",
"datetime.date",
"calendar.weekday",
"datetime.timedelta"
]
| [((4929, 4940), 'collections.deque', 'deque', (['days'], {}), '(days)\n', (4934, 4940), False, 'from collections import deque\n'), ((6677, 6719), 'datetime.date', 'dt.date', (['self._month[0]', 'self._month[1]', '(1)'], {}), '(self._month[0], self._month[1], 1)\n', (6684, 6719), True, 'import datetime as dt\n'), ((6889, 6909), 'datetime.timedelta', 'dt.timedelta', (['offset'], {}), '(offset)\n', (6901, 6909), True, 'import datetime as dt\n'), ((5019, 5047), 'calendar.weekday', 'calendar.weekday', (['*datetuple'], {}), '(*datetuple)\n', (5035, 5047), False, 'import calendar\n'), ((5273, 5305), 'calendar.weekday', 'calendar.weekday', (['*firstmonthday'], {}), '(*firstmonthday)\n', (5289, 5305), False, 'import calendar\n'), ((5324, 5351), 'math.ceil', 'ceil', (['((fday + date.day) / 7)'], {}), '((fday + date.day) / 7)\n', (5328, 5351), False, 'from math import ceil\n'), ((7116, 7131), 'datetime.timedelta', 'dt.timedelta', (['(1)'], {}), '(1)\n', (7128, 7131), True, 'import datetime as dt\n'), ((8172, 8199), 'calendar.weekday', 'calendar.weekday', (['*dt_tuple'], {}), '(*dt_tuple)\n', (8188, 8199), False, 'import calendar\n'), ((388, 408), 'datetime.datetime', 'dt.datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (399, 408), True, 'import datetime as dt\n'), ((441, 466), 'datetime.datetime', 'dt.datetime', (['(9999)', '(12)', '(31)'], {}), '(9999, 12, 31)\n', (452, 466), True, 'import datetime as dt\n'), ((9928, 9945), 'math.ceil', 'ceil', (['((i + 1) / 7)'], {}), '((i + 1) / 7)\n', (9932, 9945), False, 'from math import ceil\n'), ((4793, 4821), 'calendar.weekday', 'calendar.weekday', (['*datetuple'], {}), '(*datetuple)\n', (4809, 4821), False, 'import calendar\n'), ((6271, 6286), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (6284, 6286), True, 'import datetime as dt\n'), ((6293, 6308), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (6306, 6308), True, 'import datetime as dt\n'), ((674, 690), 'datetime.time', 'dt.time', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (681, 690), True, 'import datetime as dt\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from bigdl.orca.tfpark.tf_dataset import TensorMeta
from bigdl.dllib.utils import nest
from bigdl.orca.data import SparkXShards
from bigdl.dllib.utils import log4Error
class Dataset(object):
"""
Represents a distributed set of elements backed by an RDD,
which is created by applying tensorflow dataset transformations
on each partitions.
"""
def __init__(self, xshards, create_dataset_fn):
self.xshards = xshards
self.create_dataset_fn = create_dataset_fn
def as_graph_rdd(self, batch_per_shard, drop_remainder=True):
create_dataset_fn = self.create_dataset_fn
def to_dataset(iter):
data_list = list(iter)
import tensorflow as tf
if not data_list:
return []
datasets = [create_dataset_fn(data) for data in data_list]
from functools import reduce
dataset = reduce(lambda x, y: x.concatenate(y), datasets)
dataset = dataset.batch(batch_per_shard, drop_remainder)
iterator = dataset.make_initializable_iterator()
train_next_ops = nest.flatten(iterator.get_next())
output_types = [t for t in nest.flatten(dataset.output_types)]
output_types_enum = [t.as_datatype_enum for t in output_types]
init_op_name = iterator.initializer.name
table_init_op = tf.tables_initializer().name
output_names = [op.name for op in train_next_ops]
graph = train_next_ops[0].graph
flatten_shapes = nest.flatten(dataset.output_shapes)
flatten_shapes = [shape[1:] for shape in flatten_shapes]
flatten_tensor_structure = [TensorMeta(dtype=output_types[i],
shape=list(flatten_shapes[i]),
name="zoo_input_{}".format(i))
for i in range(len(flatten_shapes))]
structure = dataset.output_types
if isinstance(structure, tf.DType):
structure = (structure,)
tensor_structure = nest.pack_sequence_as(structure,
flatten_tensor_structure)
meta_info = {
"init_op_name": init_op_name,
"table_init_op": table_init_op,
"output_names": output_names,
"output_types": output_types_enum,
"tensor_structure": tensor_structure
}
return [(bytearray(graph.as_graph_def().SerializeToString()), meta_info)]
graph_rdd_and_meta = self.xshards.rdd.mapPartitions(to_dataset)
return graph_rdd_and_meta
def as_tf_dataset_rdd(self):
create_dataset_fn = self.create_dataset_fn
def to_dataset(iter):
data_list = list(iter)
if not data_list:
return []
from tensorflow.python.distribute.coordinator.values import serialize_dataset_to_graph
datasets = [create_dataset_fn(data) for data in data_list]
from functools import reduce
dataset = reduce(lambda x, y: x.concatenate(y), datasets)
ds_def = serialize_dataset_to_graph(dataset).numpy()
elem_spec = dataset.element_spec
return [{"ds_def": ds_def, "elem_spec": elem_spec}]
tf_dataset_rdd = self.xshards.rdd.mapPartitions(to_dataset)
return tf_dataset_rdd
@staticmethod
def from_tensor_slices(xshards):
return TensorSliceDataset(xshards)
@staticmethod
def from_feature_table(tbl):
from bigdl.friesian.feature import FeatureTable
from bigdl.friesian.feature.utils import featuretable_to_xshards
log4Error.invalidInputError(isinstance(tbl, FeatureTable),
"Only Friesian FeatureTable is supported")
xshards = featuretable_to_xshards(tbl)
return TensorSliceDataset(xshards)
def map(self, map_func):
return MapDataset(self, map_func)
class TensorSliceDataset(Dataset):
def __init__(self, xshards):
assert isinstance(xshards, SparkXShards), \
"only datasets backed by a SparkXShards are supported"
self.xshards = xshards
def create_dataset_fn(data):
return tf.data.Dataset.from_tensor_slices(data)
super().__init__(xshards, create_dataset_fn)
class MapDataset(Dataset):
def __init__(self, input_dataset, map_func):
create_pre_dataset_fn = input_dataset.create_dataset_fn
def create_dataset_fn(data):
dataset = create_pre_dataset_fn(data)
return dataset.map(map_func)
super().__init__(xshards=input_dataset.xshards,
create_dataset_fn=create_dataset_fn)
| [
"tensorflow.data.Dataset.from_tensor_slices",
"bigdl.dllib.utils.nest.flatten",
"tensorflow.tables_initializer",
"tensorflow.python.distribute.coordinator.values.serialize_dataset_to_graph",
"bigdl.dllib.utils.nest.pack_sequence_as",
"bigdl.friesian.feature.utils.featuretable_to_xshards"
]
| [((4548, 4576), 'bigdl.friesian.feature.utils.featuretable_to_xshards', 'featuretable_to_xshards', (['tbl'], {}), '(tbl)\n', (4571, 4576), False, 'from bigdl.friesian.feature.utils import featuretable_to_xshards\n'), ((2163, 2198), 'bigdl.dllib.utils.nest.flatten', 'nest.flatten', (['dataset.output_shapes'], {}), '(dataset.output_shapes)\n', (2175, 2198), False, 'from bigdl.dllib.utils import nest\n'), ((2750, 2808), 'bigdl.dllib.utils.nest.pack_sequence_as', 'nest.pack_sequence_as', (['structure', 'flatten_tensor_structure'], {}), '(structure, flatten_tensor_structure)\n', (2771, 2808), False, 'from bigdl.dllib.utils import nest\n'), ((4972, 5012), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data'], {}), '(data)\n', (5006, 5012), True, 'import tensorflow as tf\n'), ((1997, 2020), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (2018, 2020), True, 'import tensorflow as tf\n'), ((1804, 1838), 'bigdl.dllib.utils.nest.flatten', 'nest.flatten', (['dataset.output_types'], {}), '(dataset.output_types)\n', (1816, 1838), False, 'from bigdl.dllib.utils import nest\n'), ((3852, 3887), 'tensorflow.python.distribute.coordinator.values.serialize_dataset_to_graph', 'serialize_dataset_to_graph', (['dataset'], {}), '(dataset)\n', (3878, 3887), False, 'from tensorflow.python.distribute.coordinator.values import serialize_dataset_to_graph\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : Ampel-core/ampel/cli/AbsStockCommand.py
# License : BSD-3-Clause
# Author : vb <<EMAIL>>
# Date : 25.03.2021
# Last Modified Date: 25.03.2021
# Last Modified By : vb <<EMAIL>>
from typing import Dict, Any, Optional, Union, Literal
from ampel.cli.ArgParserBuilder import ArgParserBuilder
from ampel.cli.MaybeIntAction import MaybeIntAction
from ampel.cli.LoadJSONAction import LoadJSONAction
from ampel.cli.AbsCoreCommand import AbsCoreCommand
from ampel.mongo.utils import maybe_match_array
from ampel.model.UnitModel import UnitModel
from ampel.model.time.UnixTimeModel import UnixTimeModel
from ampel.model.time.TimeStringModel import TimeStringModel
from ampel.model.time.TimeLastRunModel import TimeLastRunModel
from ampel.model.time.TimeDeltaModel import TimeDeltaModel
from ampel.model.time.TimeConstraintModel import TimeConstraintModel
class AbsStockCommand(AbsCoreCommand, abstract=True):
"""
Base class for commands selecting/matching stock(s)
"""
@staticmethod
def get_select_args_help() -> Dict[str, str]:
return {
# Required
'config': 'Path to an ampel config file (yaml/json)',
# Optional
'secrets': 'Path to a YAML secrets store in sops format',
'log-profile': 'One of: default, compact, headerless, verbose, debug',
'id-mapper': 'Convert stock ids using the provided id mapper (ex: ZTFIdMapper)',
# Selection
'stock': 'Stock id(s) (OR matched if multi-valued)',
'channel': 'Channel(s)',
'created-after-ts': 'Created after unix timestamp',
'created-after-str': 'Created after date-time iso string',
'created-after-delta': 'Created after time delta',
'created-after-process': 'Created after last run of process with name',
'created-before-ts': 'Created before unix timestamp',
'created-before-str': 'Created before date-time iso string',
'created-before-delta': 'Created before time delta',
'created-before-process': 'Created before last run of process with name',
'updated-after-ts': 'Updated after unix timestamp',
'updated-after-str': 'Updated after date-time iso string',
'updated-after-delta': 'Updated after time delta',
'updated-after-process': 'Updated after last run of process with name',
'updated-before-ts': 'Updated before unix timestamp',
'updated-before-str': 'Updated before date-time iso string',
'updated-before-delta': 'Updated before time delta',
'updated-before-process': 'Updated before last run of process with name',
'custom-match': 'Custom mongodb match as JSON string (ex: {"body.aKey": {"$gt": 1}})',
}
def add_selection_args(self, builder: ArgParserBuilder) -> None:
# Selection args
builder.add_group('match', 'Stock selection arguments')
builder.add_arg('match', "stock", action=MaybeIntAction, nargs="+")
builder.add_x_args('match',
{'name': 'created-before-str'}, {'name': 'created-before-ts', 'type': int},
{'name': 'created-before-delta', 'action': LoadJSONAction},
{'name': 'created-before-process'}
)
builder.add_x_args('match',
{'name': 'created-after-str'}, {'name': 'created-after-ts', 'type': int},
{'name': 'created-after-delta', 'action': LoadJSONAction},
{'name': 'created-after-process'}
)
builder.add_x_args('match',
{'name': 'updated-before-str'}, {'name': 'updated-before-ts', 'type': int},
{'name': 'updated-before-delta', 'action': LoadJSONAction},
{'name': 'updated-before-process'}
)
builder.add_x_args('match',
{'name': 'updated-after-str'}, {'name': 'updated-after-ts', 'type': int},
{'name': 'updated-after-delta', 'action': LoadJSONAction},
{'name': 'updated-after-process'}
)
builder.create_logic_args('match', "channel", "Channel")
builder.create_logic_args('match', "with-tag", "Tag")
builder.create_logic_args('match', "without-tag", "Tag", excl=True)
builder.add_arg('match', "custom-match", metavar="#", action=LoadJSONAction)
def get_tag(self, args: Dict[str, Any]) -> Optional[Dict[Union[Literal['with'], Literal['without']], Dict]]:
tag: Optional[Dict[Union[Literal['with'], Literal['without']], Dict]] = None
if args.get('with_tag'):
tag = {'with': args['with_tag']}
if args.get('without_tag'):
if tag is None:
tag = {}
tag['without'] = args['without_tag']
return tag
def build_select_model(self, args: Dict[str, Any]) -> UnitModel:
conf = {
"created": self.get_time_model("created", args),
"updated": self.get_time_model("updated", args),
'channel': args['channel'],
'custom': args['custom_match']
}
if args.get('tag'):
conf['tag'] = self.get_tag(args)
if (stock := args.get('stock')):
conf['custom'] = {
'_id': stock if isinstance(stock, (int, bytes, str))
else maybe_match_array(stock)
}
return UnitModel(unit="T3StockSelector", config=conf)
def get_time_model(self, prefix: str, args: Dict[str, Any]) -> TimeConstraintModel:
d: Dict[str, Any] = {'after': None, 'before': None}
for when in ('after', 'before'):
if args.get(x := f"{prefix}_{when}_ts"):
d[when] = UnixTimeModel(match_type='unix_time', value=args[x])
elif args.get(x := f"{prefix}_{when}_str"):
d[when] = TimeStringModel(match_type='time_string', dateTimeStr=args[x], dateTimeFormat="%Y%m%dT%H%M%S")
elif args.get(x := f"{prefix}_{when}_delta"):
d[when] = TimeDeltaModel(match_type='time_delta', **args[x])
elif args.get(x := f"{prefix}_{when}_process"):
d[when] = TimeLastRunModel(match_type='time_last_run', process_name=args[x])
return TimeConstraintModel(**d)
| [
"ampel.model.time.UnixTimeModel.UnixTimeModel",
"ampel.model.time.TimeStringModel.TimeStringModel",
"ampel.model.time.TimeLastRunModel.TimeLastRunModel",
"ampel.mongo.utils.maybe_match_array",
"ampel.model.time.TimeConstraintModel.TimeConstraintModel",
"ampel.model.UnitModel.UnitModel",
"ampel.model.time.TimeDeltaModel.TimeDeltaModel"
]
| [((4814, 4860), 'ampel.model.UnitModel.UnitModel', 'UnitModel', ([], {'unit': '"""T3StockSelector"""', 'config': 'conf'}), "(unit='T3StockSelector', config=conf)\n", (4823, 4860), False, 'from ampel.model.UnitModel import UnitModel\n'), ((5562, 5586), 'ampel.model.time.TimeConstraintModel.TimeConstraintModel', 'TimeConstraintModel', ([], {}), '(**d)\n', (5581, 5586), False, 'from ampel.model.time.TimeConstraintModel import TimeConstraintModel\n'), ((5097, 5149), 'ampel.model.time.UnixTimeModel.UnixTimeModel', 'UnixTimeModel', ([], {'match_type': '"""unix_time"""', 'value': 'args[x]'}), "(match_type='unix_time', value=args[x])\n", (5110, 5149), False, 'from ampel.model.time.UnixTimeModel import UnixTimeModel\n'), ((4774, 4798), 'ampel.mongo.utils.maybe_match_array', 'maybe_match_array', (['stock'], {}), '(stock)\n', (4791, 4798), False, 'from ampel.mongo.utils import maybe_match_array\n'), ((5211, 5309), 'ampel.model.time.TimeStringModel.TimeStringModel', 'TimeStringModel', ([], {'match_type': '"""time_string"""', 'dateTimeStr': 'args[x]', 'dateTimeFormat': '"""%Y%m%dT%H%M%S"""'}), "(match_type='time_string', dateTimeStr=args[x],\n dateTimeFormat='%Y%m%dT%H%M%S')\n", (5226, 5309), False, 'from ampel.model.time.TimeStringModel import TimeStringModel\n'), ((5369, 5419), 'ampel.model.time.TimeDeltaModel.TimeDeltaModel', 'TimeDeltaModel', ([], {'match_type': '"""time_delta"""'}), "(match_type='time_delta', **args[x])\n", (5383, 5419), False, 'from ampel.model.time.TimeDeltaModel import TimeDeltaModel\n'), ((5485, 5551), 'ampel.model.time.TimeLastRunModel.TimeLastRunModel', 'TimeLastRunModel', ([], {'match_type': '"""time_last_run"""', 'process_name': 'args[x]'}), "(match_type='time_last_run', process_name=args[x])\n", (5501, 5551), False, 'from ampel.model.time.TimeLastRunModel import TimeLastRunModel\n')] |
# -*- coding: utf-8 -*-
from pytest import raises
from astral import Astral, AstralError, Location
import datetime
import pytz
def datetime_almost_equal(datetime1, datetime2, seconds=60):
dd = datetime1 - datetime2
sd = (dd.days * 24 * 60 * 60) + dd.seconds
return abs(sd) <= seconds
def test_Location_Name():
c = Location()
assert c.name == 'Greenwich'
c.name = 'London'
assert c.name == 'London'
c.name = 'Köln'
assert c.name == 'Köln'
def test_Location_Country():
c = Location()
assert c.region == 'England'
c.region = 'Australia'
assert c.region == 'Australia'
def test_Location_Elevation():
dd = Astral()
c = dd['London']
assert c.elevation == 24
def test_Location_TimezoneName():
c = Location()
assert c.timezone == 'Europe/London'
c.name = 'Asia/Riyadh'
assert c.name == 'Asia/Riyadh'
def test_Location_TimezoneNameNoLocation():
c = Location()
c._timezone_group = 'Europe'
c._timezone_location = ''
assert c.timezone == 'Europe'
def test_Location_TimezoneNameBad():
c = Location()
with raises(ValueError):
c.timezone = 'bad/timezone'
def test_Location_TimezoneLookup():
c = Location()
assert c.tz == pytz.timezone('Europe/London')
c.timezone='Europe/Stockholm'
assert c.tz == pytz.timezone('Europe/Stockholm')
def test_Location_TimezoneLookupBad():
c = Location()
c._timezone_group = 'bad'
c._timezone_location = 'timezone'
with raises(AstralError):
c.tz
def test_Location_Sun():
c = Location()
c.sun()
def test_Location_Dawn():
c = Location()
c.dawn()
def test_Location_DawnUTC():
c = Location()
c.dawn(local=False)
def test_Location_Sunrise():
c = Location()
c.sunrise()
def test_Location_SunriseUTC():
c = Location()
c.sunrise(local=False)
def test_Location_SolarNoon():
c = Location()
c.solar_noon()
def test_Location_SolarNoonUTC():
c = Location()
c.solar_noon(local=False)
def test_Location_Dusk():
c = Location()
c.dusk()
def test_Location_DuskUTC():
c = Location()
c.dusk(local=False)
def test_Location_Sunset():
c = Location()
c.sunset()
def test_Location_SunsetUTC():
c = Location()
c.sunset(local=False)
def test_Location_SolarElevation():
dd = Astral()
location = dd['Riyadh']
dt = datetime.datetime(2015, 12, 14, 8, 0, 0)
dt = location.tz.localize(dt)
elevation = location.solar_elevation(dt)
assert abs(elevation - 17) < 0.5
def test_Location_SolarAzimuth():
dd = Astral()
location = dd['Riyadh']
dt = datetime.datetime(2015, 12, 14, 8, 0, 0)
dt = location.tz.localize(dt)
azimuth = location.solar_azimuth(dt)
assert abs(azimuth - 126) < 0.5
def test_Location_TimeAtElevation():
dd = Astral()
location = dd['New Delhi']
test_data = {
datetime.date(2016, 1, 5): datetime.datetime(2016, 1, 5, 10, 0),
}
for day, cdt in test_data.items():
cdt = location.tz.localize(cdt)
dt = location.time_at_elevation(28, date=day)
assert datetime_almost_equal(dt, cdt, seconds=600)
def test_Location_SolarDepression():
c = Location(("Heidelberg", "Germany", 49.412, -8.71, "Europe/Berlin"))
c.solar_depression = 'nautical'
assert c.solar_depression == 12
c.solar_depression = 18
assert c.solar_depression == 18
def test_Location_Moon():
d = datetime.date(2017, 12, 1)
c=Location()
assert c.moon_phase(date=d) == 11
def test_Location_TzError():
with raises(AttributeError):
c = Location()
c.tz = 1
def test_Location_equality():
c1 = Location()
c2 = Location()
t = (c1, c2)
assert c1 == c2
assert len(set(t)) == 1
c1 = Location(["Oslo", "Norway", 59.9, 10.7, "Europe/Oslo", 0])
c2 = Location(["Oslo", "Norway", 59.9, 10.7, "Europe/Oslo", 0])
c3 = Location(["Stockholm", "Sweden", 59.3, 18, "Europe/Stockholm", 0])
t1 = (c1, c2)
t2 = (c1, c3)
assert c1 == c2
assert len(set(t1)) == 1
assert c1 != c3
assert len(set(t2)) == 2
| [
"datetime.datetime",
"pytz.timezone",
"astral.Location",
"astral.Astral",
"pytest.raises",
"datetime.date"
]
| [((335, 345), 'astral.Location', 'Location', ([], {}), '()\n', (343, 345), False, 'from astral import Astral, AstralError, Location\n'), ((518, 528), 'astral.Location', 'Location', ([], {}), '()\n', (526, 528), False, 'from astral import Astral, AstralError, Location\n'), ((666, 674), 'astral.Astral', 'Astral', ([], {}), '()\n', (672, 674), False, 'from astral import Astral, AstralError, Location\n'), ((770, 780), 'astral.Location', 'Location', ([], {}), '()\n', (778, 780), False, 'from astral import Astral, AstralError, Location\n'), ((938, 948), 'astral.Location', 'Location', ([], {}), '()\n', (946, 948), False, 'from astral import Astral, AstralError, Location\n'), ((1093, 1103), 'astral.Location', 'Location', ([], {}), '()\n', (1101, 1103), False, 'from astral import Astral, AstralError, Location\n'), ((1215, 1225), 'astral.Location', 'Location', ([], {}), '()\n', (1223, 1225), False, 'from astral import Astral, AstralError, Location\n'), ((1412, 1422), 'astral.Location', 'Location', ([], {}), '()\n', (1420, 1422), False, 'from astral import Astral, AstralError, Location\n'), ((1569, 1579), 'astral.Location', 'Location', ([], {}), '()\n', (1577, 1579), False, 'from astral import Astral, AstralError, Location\n'), ((1628, 1638), 'astral.Location', 'Location', ([], {}), '()\n', (1636, 1638), False, 'from astral import Astral, AstralError, Location\n'), ((1691, 1701), 'astral.Location', 'Location', ([], {}), '()\n', (1699, 1701), False, 'from astral import Astral, AstralError, Location\n'), ((1765, 1775), 'astral.Location', 'Location', ([], {}), '()\n', (1773, 1775), False, 'from astral import Astral, AstralError, Location\n'), ((1834, 1844), 'astral.Location', 'Location', ([], {}), '()\n', (1842, 1844), False, 'from astral import Astral, AstralError, Location\n'), ((1913, 1923), 'astral.Location', 'Location', ([], {}), '()\n', (1921, 1923), False, 'from astral import Astral, AstralError, Location\n'), ((1987, 1997), 'astral.Location', 'Location', ([], {}), '()\n', (1995, 1997), False, 'from astral import Astral, AstralError, Location\n'), ((2064, 2074), 'astral.Location', 'Location', ([], {}), '()\n', (2072, 2074), False, 'from astral import Astral, AstralError, Location\n'), ((2127, 2137), 'astral.Location', 'Location', ([], {}), '()\n', (2135, 2137), False, 'from astral import Astral, AstralError, Location\n'), ((2200, 2210), 'astral.Location', 'Location', ([], {}), '()\n', (2208, 2210), False, 'from astral import Astral, AstralError, Location\n'), ((2267, 2277), 'astral.Location', 'Location', ([], {}), '()\n', (2275, 2277), False, 'from astral import Astral, AstralError, Location\n'), ((2351, 2359), 'astral.Astral', 'Astral', ([], {}), '()\n', (2357, 2359), False, 'from astral import Astral, AstralError, Location\n'), ((2397, 2437), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(12)', '(14)', '(8)', '(0)', '(0)'], {}), '(2015, 12, 14, 8, 0, 0)\n', (2414, 2437), False, 'import datetime\n'), ((2599, 2607), 'astral.Astral', 'Astral', ([], {}), '()\n', (2605, 2607), False, 'from astral import Astral, AstralError, Location\n'), ((2645, 2685), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(12)', '(14)', '(8)', '(0)', '(0)'], {}), '(2015, 12, 14, 8, 0, 0)\n', (2662, 2685), False, 'import datetime\n'), ((2845, 2853), 'astral.Astral', 'Astral', ([], {}), '()\n', (2851, 2853), False, 'from astral import Astral, AstralError, Location\n'), ((3223, 3290), 'astral.Location', 'Location', (["('Heidelberg', 'Germany', 49.412, -8.71, 'Europe/Berlin')"], {}), "(('Heidelberg', 'Germany', 49.412, -8.71, 'Europe/Berlin'))\n", (3231, 3290), False, 'from astral import Astral, AstralError, Location\n'), ((3464, 3490), 'datetime.date', 'datetime.date', (['(2017)', '(12)', '(1)'], {}), '(2017, 12, 1)\n', (3477, 3490), False, 'import datetime\n'), ((3497, 3507), 'astral.Location', 'Location', ([], {}), '()\n', (3505, 3507), False, 'from astral import Astral, AstralError, Location\n'), ((3691, 3701), 'astral.Location', 'Location', ([], {}), '()\n', (3699, 3701), False, 'from astral import Astral, AstralError, Location\n'), ((3711, 3721), 'astral.Location', 'Location', ([], {}), '()\n', (3719, 3721), False, 'from astral import Astral, AstralError, Location\n'), ((3797, 3855), 'astral.Location', 'Location', (["['Oslo', 'Norway', 59.9, 10.7, 'Europe/Oslo', 0]"], {}), "(['Oslo', 'Norway', 59.9, 10.7, 'Europe/Oslo', 0])\n", (3805, 3855), False, 'from astral import Astral, AstralError, Location\n'), ((3865, 3923), 'astral.Location', 'Location', (["['Oslo', 'Norway', 59.9, 10.7, 'Europe/Oslo', 0]"], {}), "(['Oslo', 'Norway', 59.9, 10.7, 'Europe/Oslo', 0])\n", (3873, 3923), False, 'from astral import Astral, AstralError, Location\n'), ((3933, 3999), 'astral.Location', 'Location', (["['Stockholm', 'Sweden', 59.3, 18, 'Europe/Stockholm', 0]"], {}), "(['Stockholm', 'Sweden', 59.3, 18, 'Europe/Stockholm', 0])\n", (3941, 3999), False, 'from astral import Astral, AstralError, Location\n'), ((1113, 1131), 'pytest.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1119, 1131), False, 'from pytest import raises\n'), ((1245, 1275), 'pytz.timezone', 'pytz.timezone', (['"""Europe/London"""'], {}), "('Europe/London')\n", (1258, 1275), False, 'import pytz\n'), ((1329, 1362), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Stockholm"""'], {}), "('Europe/Stockholm')\n", (1342, 1362), False, 'import pytz\n'), ((1500, 1519), 'pytest.raises', 'raises', (['AstralError'], {}), '(AstralError)\n', (1506, 1519), False, 'from pytest import raises\n'), ((2912, 2937), 'datetime.date', 'datetime.date', (['(2016)', '(1)', '(5)'], {}), '(2016, 1, 5)\n', (2925, 2937), False, 'import datetime\n'), ((2939, 2975), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(1)', '(5)', '(10)', '(0)'], {}), '(2016, 1, 5, 10, 0)\n', (2956, 2975), False, 'import datetime\n'), ((3586, 3608), 'pytest.raises', 'raises', (['AttributeError'], {}), '(AttributeError)\n', (3592, 3608), False, 'from pytest import raises\n'), ((3622, 3632), 'astral.Location', 'Location', ([], {}), '()\n', (3630, 3632), False, 'from astral import Astral, AstralError, Location\n')] |
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class MappingEnv(gym.Env):
def __init__(self):
# config_file = path.join(path.dirname(__file__), "params_flock.cfg")
# config = configparser.ConfigParser()
# config.read(config_file)
# config = config['flock']
self.nearest_agents = 7
self.nearest_targets = 7
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
self.centralized = True
# number states per agent
self.nx_system = 4
# number of actions per agent
self.nu = 2
# default problem parameters
self.n_agents = 100 # int(config['network_size'])
# self.comm_radius = 0.9 # float(config['comm_radius'])
self.dt = 0.1 # #float(config['system_dt'])
self.v_max = 5.0 # float(config['max_vel_init'])
self.v_bias = self.v_max
# intitialize state matrices
self.x = None
self.u = None
self.mean_vel = None
self.init_vel = None
self.greedy_action = None
self.diff = None
self.r2 = None
self.adj_mat = None
self.adj_mat_mean = None
self.diff_targets = None
self.r2_targets = None
self.target_observed = None
self.state_network = None
self.state_values = None
self.reward = None
self.max_accel = 1
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# dtype=np.float32)
#
# self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, ),
# dtype=np.float32)
# target initialization
self.px_max = 100
self.py_max = 100
x = np.linspace(-1.0 * self.px_max, self.px_max, self.n_agents)
y = np.linspace(-1.0 * self.py_max, self.py_max, self.n_agents)
tx, ty = np.meshgrid(x, y)
tx = tx.reshape((-1, 1))
ty = ty.reshape((-1, 1))
self.obs_rad = 2.0
self.obs_rad2 = self.obs_rad * self.obs_rad
self.target_x = np.stack((tx, ty), axis=1).reshape((-1, 2))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
# rendering initialization
self.fig = None
self.ax = None
self.line1 = None
self.line2 = None
self.action_scalar = 10.0
self.seed()
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
x[:, 0] = np.random.uniform(low=-self.px_max, high=self.px_max, size=(self.n_agents,))
x[:, 1] = np.random.uniform(low=-self.py_max, high=self.py_max, size=(self.n_agents,))
#bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[1]
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
# self.a_net = self.get_connectivity(self.x)
self.compute_helpers()
return self.state_values, self.state_network
def params_from_cfg(self, args):
# TODO
pass
# # self.comm_radius = args.getfloat('comm_radius')
# # self.comm_radius2 = self.comm_radius * self.comm_radius
# # self.vr = 1 / self.comm_radius2 + np.log(self.comm_radius2)
# #
# # self.n_agents = args.getint('n_agents')
# # self.r_max = self.r_max * np.sqrt(self.n_agents)
#
# # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# # dtype=np.float32)
# #
# # self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
# # dtype=np.float32)
#
# self.v_max = args.getfloat('v_max')
# self.v_bias = self.v_max
# self.dt = args.getfloat('dt')
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
# u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
self.u = u * self.action_scalar
old_x = np.copy(self.x)
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt + self.u[:, 0] * self.dt * self.dt * 0.5
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt + self.u[:, 1] * self.dt * self.dt * 0.5
# x velocity
self.x[:, 2] = self.x[:, 2] + self.u[:, 0] * self.dt
# y velocity
self.x[:, 3] = self.x[:, 3] + self.u[:, 1] * self.dt
# clip velocities
self.x[:, 2:4] = np.clip(self.x[:, 2:4], -1.0*self.v_max, self.v_max)
dist_traveled = np.sum(np.linalg.norm(self.x[:, 0:2] - old_x[:, 0:2], axis=1))
self.compute_helpers()
done = (0 == np.sum(self.target_unobserved))
return (self.state_values, self.state_network), 10.0 * self.reward - dist_traveled, done, {}
def compute_helpers(self):
# TODO - check this, and initialize stuff in the init(), and try to make more efficient
# Neighbors computations
self.diff = self.x.reshape((self.n_agents, 1, self.nx_system)) - self.x.reshape(
(1, self.n_agents, self.nx_system))
self.r2 = np.multiply(self.diff[:, :, 0], self.diff[:, :, 0]) + np.multiply(self.diff[:, :, 1],
self.diff[:, :, 1])
np.fill_diagonal(self.r2, np.Inf)
nearest = np.argsort(self.r2, axis=1)
obs_neigh = np.zeros((self.n_agents, self.nearest_agents * 4))
self.adj_mat = np.zeros((self.n_agents, self.n_agents))
for i in range(self.nearest_agents):
ind2, ind3 = np.meshgrid(nearest[:, i], range(4), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(4), indexing='ij')
obs_neigh[:, i * self.nx_system:(i + 1) * self.nx_system] = np.reshape(
self.diff[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 4))
self.adj_mat[:, nearest[:, i]] = 1.0
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(self.adj_mat, axis=1), (self.n_agents, 1)) # correct - checked this
n_neighbors[n_neighbors == 0] = 1
self.adj_mat_mean = self.adj_mat / n_neighbors
# Targets computations
self.diff_targets = self.x[:, 0:2].reshape((self.n_agents, 1, 2)) - self.target_x[
self.target_unobserved].reshape(
(1, -1, 2))
self.r2_targets = np.multiply(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0]) + np.multiply(
self.diff_targets[:, :, 1],
self.diff_targets[:, :, 1])
nearest_targets = np.argsort(self.r2_targets, axis=1)
obs_target = np.zeros((self.n_agents, self.nearest_targets * 2))
for i in range(min(self.nearest_targets, np.shape(nearest_targets)[1])):
ind2, ind3 = np.meshgrid(nearest_targets[:, i], range(2), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(2), indexing='ij')
obs_target[:, i * 2:(i + 1) * 2] = np.reshape(
self.diff_targets[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 2))
self.target_observed = np.any(self.r2_targets < self.obs_rad2, axis=0).reshape((-1, 1))
self.target_unobserved[self.target_unobserved] = np.tile(np.logical_not(self.target_observed), (1, 2)).flatten()
self.reward = np.sum(self.target_observed.astype(np.int))
self.state_values = np.hstack((obs_neigh, obs_target))
self.greedy_action = -1.0 * obs_target[:, 0:2]
if self.mean_pooling:
self.state_network = self.adj_mat_mean
else:
self.state_network = self.adj_mat
def controller(self):
"""
The controller for flocking from Turner 2003.
Returns: the optimal action
"""
# TODO
# return np.zeros((self.n_agents, 2))
return self.greedy_action / 10.0
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
self.ax = fig.add_subplot(111)
line1, = self.ax.plot(self.x[:, 0], self.x[:, 1], 'bo')
locs = self.target_x[self.target_unobserved].reshape((-1, 2))
line2, = self.ax.plot(locs[:, 0], locs[:, 1], 'rx')
plt.ylim(-1.0 * self.py_max, 1.0 * self.py_max)
plt.xlim(-1.0 * self.px_max, 1.0 * self.px_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line2 = line2
# TODO render unobserved targets
else:
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
locs = self.target_x[self.target_unobserved].reshape((-1,2))
self.line2.set_xdata(locs[:, 0])
self.line2.set_ydata(locs[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
| [
"numpy.clip",
"numpy.hstack",
"numpy.logical_not",
"numpy.argsort",
"numpy.linalg.norm",
"gym.utils.seeding.np_random",
"numpy.mean",
"numpy.multiply",
"numpy.stack",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.ylim",
"numpy.ones",
"matplotlib.pyplot.gca",
"numpy.fill_diagonal",
"numpy.any",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.shape",
"numpy.copy",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.uniform"
]
| [((2106, 2165), 'numpy.linspace', 'np.linspace', (['(-1.0 * self.px_max)', 'self.px_max', 'self.n_agents'], {}), '(-1.0 * self.px_max, self.px_max, self.n_agents)\n', (2117, 2165), True, 'import numpy as np\n'), ((2178, 2237), 'numpy.linspace', 'np.linspace', (['(-1.0 * self.py_max)', 'self.py_max', 'self.n_agents'], {}), '(-1.0 * self.py_max, self.py_max, self.n_agents)\n', (2189, 2237), True, 'import numpy as np\n'), ((2256, 2273), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2267, 2273), True, 'import numpy as np\n'), ((2522, 2580), 'numpy.ones', 'np.ones', (['(self.n_agents * self.n_agents, 2)'], {'dtype': 'np.bool'}), '((self.n_agents * self.n_agents, 2), dtype=np.bool)\n', (2529, 2580), True, 'import numpy as np\n'), ((2805, 2846), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nx_system)'], {}), '((self.n_agents, self.nx_system))\n', (2813, 2846), True, 'import numpy as np\n'), ((2880, 2938), 'numpy.ones', 'np.ones', (['(self.n_agents * self.n_agents, 2)'], {'dtype': 'np.bool'}), '((self.n_agents * self.n_agents, 2), dtype=np.bool)\n', (2887, 2938), True, 'import numpy as np\n'), ((2958, 3034), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.px_max)', 'high': 'self.px_max', 'size': '(self.n_agents,)'}), '(low=-self.px_max, high=self.px_max, size=(self.n_agents,))\n', (2975, 3034), True, 'import numpy as np\n'), ((3053, 3129), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.py_max)', 'high': 'self.py_max', 'size': '(self.n_agents,)'}), '(low=-self.py_max, high=self.py_max, size=(self.n_agents,))\n', (3070, 3129), True, 'import numpy as np\n'), ((3230, 3304), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (3247, 3304), True, 'import numpy as np\n'), ((3334, 3408), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (3351, 3408), True, 'import numpy as np\n'), ((3480, 3506), 'numpy.mean', 'np.mean', (['x[:, 2:4]'], {'axis': '(0)'}), '(x[:, 2:4], axis=0)\n', (3487, 3506), True, 'import numpy as np\n'), ((4658, 4681), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4675, 4681), False, 'from gym.utils import seeding\n'), ((4829, 4884), 'numpy.clip', 'np.clip', (['u'], {'a_min': '(-self.max_accel)', 'a_max': 'self.max_accel'}), '(u, a_min=-self.max_accel, a_max=self.max_accel)\n', (4836, 4884), True, 'import numpy as np\n'), ((4942, 4957), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (4949, 4957), True, 'import numpy as np\n'), ((5421, 5475), 'numpy.clip', 'np.clip', (['self.x[:, 2:4]', '(-1.0 * self.v_max)', 'self.v_max'], {}), '(self.x[:, 2:4], -1.0 * self.v_max, self.v_max)\n', (5428, 5475), True, 'import numpy as np\n'), ((6265, 6298), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.r2', 'np.Inf'], {}), '(self.r2, np.Inf)\n', (6281, 6298), True, 'import numpy as np\n'), ((6318, 6345), 'numpy.argsort', 'np.argsort', (['self.r2'], {'axis': '(1)'}), '(self.r2, axis=1)\n', (6328, 6345), True, 'import numpy as np\n'), ((6366, 6416), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nearest_agents * 4)'], {}), '((self.n_agents, self.nearest_agents * 4))\n', (6374, 6416), True, 'import numpy as np\n'), ((6440, 6480), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.n_agents)'], {}), '((self.n_agents, self.n_agents))\n', (6448, 6480), True, 'import numpy as np\n'), ((7634, 7669), 'numpy.argsort', 'np.argsort', (['self.r2_targets'], {'axis': '(1)'}), '(self.r2_targets, axis=1)\n', (7644, 7669), True, 'import numpy as np\n'), ((7691, 7742), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nearest_targets * 2)'], {}), '((self.n_agents, self.nearest_targets * 2))\n', (7699, 7742), True, 'import numpy as np\n'), ((8456, 8490), 'numpy.hstack', 'np.hstack', (['(obs_neigh, obs_target)'], {}), '((obs_neigh, obs_target))\n', (8465, 8490), True, 'import numpy as np\n'), ((5506, 5560), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.x[:, 0:2] - old_x[:, 0:2])'], {'axis': '(1)'}), '(self.x[:, 0:2] - old_x[:, 0:2], axis=1)\n', (5520, 5560), True, 'import numpy as np\n'), ((5615, 5645), 'numpy.sum', 'np.sum', (['self.target_unobserved'], {}), '(self.target_unobserved)\n', (5621, 5645), True, 'import numpy as np\n'), ((6067, 6118), 'numpy.multiply', 'np.multiply', (['self.diff[:, :, 0]', 'self.diff[:, :, 0]'], {}), '(self.diff[:, :, 0], self.diff[:, :, 0])\n', (6078, 6118), True, 'import numpy as np\n'), ((6121, 6172), 'numpy.multiply', 'np.multiply', (['self.diff[:, :, 1]', 'self.diff[:, :, 1]'], {}), '(self.diff[:, :, 1], self.diff[:, :, 1])\n', (6132, 6172), True, 'import numpy as np\n'), ((7053, 7081), 'numpy.sum', 'np.sum', (['self.adj_mat'], {'axis': '(1)'}), '(self.adj_mat, axis=1)\n', (7059, 7081), True, 'import numpy as np\n'), ((7444, 7511), 'numpy.multiply', 'np.multiply', (['self.diff_targets[:, :, 0]', 'self.diff_targets[:, :, 0]'], {}), '(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0])\n', (7455, 7511), True, 'import numpy as np\n'), ((7514, 7581), 'numpy.multiply', 'np.multiply', (['self.diff_targets[:, :, 1]', 'self.diff_targets[:, :, 1]'], {}), '(self.diff_targets[:, :, 1], self.diff_targets[:, :, 1])\n', (7525, 7581), True, 'import numpy as np\n'), ((9100, 9109), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (9107, 9109), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9449), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0 * self.py_max)', '(1.0 * self.py_max)'], {}), '(-1.0 * self.py_max, 1.0 * self.py_max)\n', (9410, 9449), True, 'import matplotlib.pyplot as plt\n'), ((9462, 9509), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0 * self.px_max)', '(1.0 * self.px_max)'], {}), '(-1.0 * self.px_max, 1.0 * self.px_max)\n', (9470, 9509), True, 'import matplotlib.pyplot as plt\n'), ((9526, 9531), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (9529, 9531), False, 'from matplotlib.pyplot import gca\n'), ((9648, 9675), 'matplotlib.pyplot.title', 'plt.title', (['"""GNN Controller"""'], {}), "('GNN Controller')\n", (9657, 9675), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2470), 'numpy.stack', 'np.stack', (['(tx, ty)'], {'axis': '(1)'}), '((tx, ty), axis=1)\n', (2452, 2470), True, 'import numpy as np\n'), ((8175, 8222), 'numpy.any', 'np.any', (['(self.r2_targets < self.obs_rad2)'], {'axis': '(0)'}), '(self.r2_targets < self.obs_rad2, axis=0)\n', (8181, 8222), True, 'import numpy as np\n'), ((7793, 7818), 'numpy.shape', 'np.shape', (['nearest_targets'], {}), '(nearest_targets)\n', (7801, 7818), True, 'import numpy as np\n'), ((8305, 8341), 'numpy.logical_not', 'np.logical_not', (['self.target_observed'], {}), '(self.target_observed)\n', (8319, 8341), True, 'import numpy as np\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.map_fn.map_fn",
"numpy.random.rand",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"tensorflow.python.ops.array_ops.matrix_set_diag",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.matrix_band_part",
"numpy.arange",
"numpy.reshape",
"numpy.sort",
"tensorflow.python.platform.benchmark.benchmark_config",
"numpy.empty",
"numpy.random.seed",
"numpy.vstack",
"tensorflow.python.framework.ops.device",
"numpy.triu",
"tensorflow.python.ops.linalg_ops.eye",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.tile",
"numpy.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.linalg_ops.lu",
"numpy.append",
"numpy.tril",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.concat"
]
| [((8201, 8237), 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), "('b/120545219')\n", (8222, 8237), False, 'from tensorflow.python.framework import test_util\n'), ((10466, 10477), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10475, 10477), False, 'from tensorflow.python.platform import test\n'), ((2584, 2633), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['x'], {'output_idx_type': 'output_idx_type'}), '(x, output_idx_type=output_idx_type)\n', (2597, 2633), False, 'from tensorflow.python.ops import linalg_ops\n'), ((2840, 2877), 'tensorflow.python.ops.array_ops.matrix_band_part', 'array_ops.matrix_band_part', (['lu', '(-1)', '(0)'], {}), '(lu, -1, 0)\n', (2866, 2877), False, 'from tensorflow.python.ops import array_ops\n'), ((3276, 3319), 'tensorflow.python.ops.array_ops.matrix_set_diag', 'array_ops.matrix_set_diag', (['lower', 'ones_diag'], {}), '(lower, ones_diag)\n', (3301, 3319), False, 'from tensorflow.python.ops import array_ops\n'), ((3365, 3402), 'tensorflow.python.ops.array_ops.matrix_band_part', 'array_ops.matrix_band_part', (['lu', '(0)', '(-1)'], {}), '(lu, 0, -1)\n', (3391, 3402), False, 'from tensorflow.python.ops import array_ops\n'), ((3423, 3452), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['lower', 'upper'], {}), '(lower, upper)\n', (3438, 3452), False, 'from tensorflow.python.ops import math_ops\n'), ((4990, 5052), 'numpy.array', 'np.array', (['[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [10.0, 0.0, 5.0]]'], {}), '([[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [10.0, 0.0, 5.0]])\n', (4998, 5052), True, 'import numpy as np\n'), ((5683, 5742), 'numpy.array', 'np.array', (['[[1e-09, 1.0, 0.0], [1.0, 0.0, 0], [0.0, 1.0, 5]]'], {}), '([[1e-09, 1.0, 0.0], [1.0, 0.0, 0], [0.0, 1.0, 5]])\n', (5691, 5742), True, 'import numpy as np\n'), ((7223, 7260), 'numpy.array', 'np.array', (['[[[1.0, -1.0], [2.0, 5.0]]]'], {}), '([[[1.0, -1.0], [2.0, 5.0]]])\n', (7231, 7260), True, 'import numpy as np\n'), ((7391, 7454), 'numpy.array', 'np.array', (['[[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]]'], {}), '([[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]])\n', (7399, 7454), True, 'import numpy as np\n'), ((7572, 7590), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7586, 7590), True, 'import numpy as np\n'), ((7606, 7638), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7620, 7638), True, 'import numpy as np\n'), ((7720, 7738), 'numpy.random.seed', 'np.random.seed', (['(52)'], {}), '(52)\n', (7734, 7738), True, 'import numpy as np\n'), ((7964, 7982), 'numpy.random.seed', 'np.random.seed', (['(64)'], {}), '(64)\n', (7978, 7982), True, 'import numpy as np\n'), ((7994, 8014), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8008, 8014), True, 'import numpy as np\n'), ((8092, 8111), 'numpy.random.seed', 'np.random.seed', (['(129)'], {}), '(129)\n', (8106, 8111), True, 'import numpy as np\n'), ((8435, 8476), 'tensorflow.python.ops.random_ops.random_normal', 'random_ops.random_normal', (['[5, 5]'], {'seed': '(42)'}), '([5, 5], seed=42)\n', (8459, 8476), False, 'from tensorflow.python.ops import random_ops\n'), ((8491, 8532), 'tensorflow.python.ops.random_ops.random_normal', 'random_ops.random_normal', (['[5, 5]'], {'seed': '(42)'}), '([5, 5], seed=42)\n', (8515, 8532), False, 'from tensorflow.python.ops import random_ops\n'), ((8547, 8569), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix1'], {}), '(matrix1)\n', (8560, 8569), False, 'from tensorflow.python.ops import linalg_ops\n'), ((8584, 8606), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix2'], {}), '(matrix2)\n', (8597, 8606), False, 'from tensorflow.python.ops import linalg_ops\n'), ((9319, 9356), 'numpy.tile', 'np.tile', (['matrix', '(batch_shape + (1, 1))'], {}), '(matrix, batch_shape + (1, 1))\n', (9326, 9356), True, 'import numpy as np\n'), ((2321, 2365), 'numpy.reshape', 'np.reshape', (['perm_np', '(-1, perm_np.shape[-1])'], {}), '(perm_np, (-1, perm_np.shape[-1]))\n', (2331, 2365), True, 'import numpy as np\n'), ((2919, 2987), 'tensorflow.python.ops.linalg_ops.eye', 'linalg_ops.eye', (['num_rows'], {'batch_shape': 'batch_shape', 'dtype': 'lower.dtype'}), '(num_rows, batch_shape=batch_shape, dtype=lower.dtype)\n', (2933, 2987), False, 'from tensorflow.python.ops import linalg_ops\n'), ((3013, 3068), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[lower, eye[..., num_cols:]]'], {'axis': '(-1)'}), '([lower, eye[..., num_cols:]], axis=-1)\n', (3029, 3068), False, 'from tensorflow.python.ops import array_ops\n'), ((3211, 3243), 'numpy.append', 'np.append', (['batch_shape', 'num_rows'], {}), '(batch_shape, num_rows)\n', (3220, 3243), True, 'import numpy as np\n'), ((3745, 3784), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['perm', '[-1, num_rows]'], {}), '(perm, [-1, num_rows])\n', (3762, 3784), False, 'from tensorflow.python.ops import array_ops\n'), ((3815, 3872), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['verification', '[-1, num_rows, num_cols]'], {}), '(verification, [-1, num_rows, num_cols])\n', (3832, 3872), False, 'from tensorflow.python.ops import array_ops\n'), ((3993, 4051), 'tensorflow.python.ops.map_fn.map_fn', 'map_fn.map_fn', (['array_ops.invert_permutation', 'perm_reshaped'], {}), '(array_ops.invert_permutation, perm_reshaped)\n', (4006, 4051), False, 'from tensorflow.python.ops import map_fn\n'), ((4755, 4814), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['permuted_verification_reshaped', 'lu_shape'], {}), '(permuted_verification_reshaped, lu_shape)\n', (4772, 4814), False, 'from tensorflow.python.ops import array_ops\n'), ((5878, 5897), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['data'], {}), '(data)\n', (5891, 5897), False, 'from tensorflow.python.ops import linalg_ops\n'), ((6279, 6298), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['data'], {}), '(data)\n', (6292, 6298), False, 'from tensorflow.python.ops import linalg_ops\n'), ((7328, 7367), 'numpy.vstack', 'np.vstack', (['(simple_array, simple_array)'], {}), '((simple_array, simple_array))\n', (7337, 7367), True, 'import numpy as np\n'), ((7466, 7511), 'numpy.vstack', 'np.vstack', (['(odd_sized_array, odd_sized_array)'], {}), '((odd_sized_array, odd_sized_array))\n', (7475, 7511), True, 'import numpy as np\n'), ((7754, 7786), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7768, 7786), True, 'import numpy as np\n'), ((8123, 8143), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8137, 8143), True, 'import numpy as np\n'), ((8280, 8299), 'numpy.empty', 'np.empty', (['[0, 2, 2]'], {}), '([0, 2, 2])\n', (8288, 8299), True, 'import numpy as np\n'), ((8320, 8339), 'numpy.empty', 'np.empty', (['[2, 0, 0]'], {}), '([2, 0, 0])\n', (8328, 8339), True, 'import numpy as np\n'), ((9901, 9928), 'tensorflow.python.platform.test.is_gpu_available', 'test.is_gpu_available', (['(True)'], {}), '(True)\n', (9922, 9928), False, 'from tensorflow.python.platform import test\n'), ((4603, 4663), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[batch_indices, inv_perm_reshaped]'], {'axis': '(-1)'}), '([batch_indices, inv_perm_reshaped], axis=-1)\n', (4618, 4663), False, 'from tensorflow.python.ops import array_ops\n'), ((6017, 6029), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (6026, 6029), True, 'import numpy as np\n'), ((6418, 6430), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (6427, 6430), True, 'import numpy as np\n'), ((7824, 7856), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7838, 7856), True, 'import numpy as np\n'), ((8151, 8171), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8165, 8171), True, 'import numpy as np\n'), ((9538, 9558), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (9548, 9558), False, 'from tensorflow.python.framework import ops\n'), ((9641, 9662), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix'], {}), '(matrix)\n', (9654, 9662), False, 'from tensorflow.python.ops import linalg_ops\n'), ((2463, 2483), 'numpy.sort', 'np.sort', (['perm_vector'], {}), '(perm_vector)\n', (2470, 2483), True, 'import numpy as np\n'), ((6108, 6132), 'numpy.tril', 'np.tril', (['(1.0j * data)', '(-1)'], {}), '(1.0j * data, -1)\n', (6115, 6132), True, 'import numpy as np\n'), ((6167, 6191), 'numpy.triu', 'np.triu', (['(-1.0j * data)', '(1)'], {}), '(-1.0j * data, 1)\n', (6174, 6191), True, 'import numpy as np\n'), ((9779, 9808), 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['lu', 'p'], {}), '(lu, p)\n', (9801, 9808), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((10059, 10086), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (10069, 10086), False, 'from tensorflow.python.framework import ops\n'), ((10173, 10194), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix'], {}), '(matrix)\n', (10186, 10194), False, 'from tensorflow.python.ops import linalg_ops\n'), ((4408, 4434), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['batch_size'], {}), '(batch_size)\n', (4422, 4434), False, 'from tensorflow.python.ops import math_ops\n'), ((5355, 5379), 'numpy.tril', 'np.tril', (['(1.0j * data)', '(-1)'], {}), '(1.0j * data, -1)\n', (5362, 5379), True, 'import numpy as np\n'), ((5416, 5440), 'numpy.triu', 'np.triu', (['(-1.0j * data)', '(1)'], {}), '(-1.0j * data, 1)\n', (5423, 5440), True, 'import numpy as np\n'), ((6797, 6871), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [2.0, 3.0, 4.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [2.0, 3.0, 4.0]], dtype=dtype)\n', (6805, 6871), True, 'import numpy as np\n'), ((7015, 7149), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [1.0, 2.0, 3.0]], [[1.0, 2.0, 3.0], [\n 3.0, 4.0, 5.0], [5.0, 6.0, 7.0]]]'], {'dtype': 'dtype'}), '([[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [1.0, 2.0, 3.0]], [[1.0, 2.0, \n 3.0], [3.0, 4.0, 5.0], [5.0, 6.0, 7.0]]], dtype=dtype)\n', (7023, 7149), True, 'import numpy as np\n'), ((9212, 9226), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (9219, 9226), True, 'import numpy as np\n'), ((9277, 9287), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9284, 9287), True, 'import numpy as np\n'), ((9426, 9437), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9435, 9437), False, 'from tensorflow.python.framework import ops\n'), ((9487, 9515), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (9513, 9515), False, 'from tensorflow.python.platform import benchmark\n'), ((9671, 9711), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (9709, 9711), False, 'from tensorflow.python.ops import variables\n'), ((10319, 10348), 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['lu', 'p'], {}), '(lu, p)\n', (10341, 10348), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((9943, 9954), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9952, 9954), False, 'from tensorflow.python.framework import ops\n'), ((10006, 10034), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (10032, 10034), False, 'from tensorflow.python.platform import benchmark\n'), ((10205, 10245), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (10243, 10245), False, 'from tensorflow.python.ops import variables\n')] |
import multiprocessing as mp
import subprocess
import shutil
import os
from ..helper import make_path_safe, thirdparty_binary, filter_scp
from ..exceptions import CorpusError
def mfcc_func(directory, job_name, mfcc_config_path): # pragma: no cover
log_directory = os.path.join(directory, 'log')
raw_mfcc_path = os.path.join(directory, 'raw_mfcc.{}.ark'.format(job_name))
raw_scp_path = os.path.join(directory, 'feats.{}.scp'.format(job_name))
log_path = os.path.join(log_directory, 'make_mfcc.{}.log'.format(job_name))
segment_path = os.path.join(directory, 'segments.{}'.format(job_name))
scp_path = os.path.join(directory, 'wav.{}.scp'.format(job_name))
with open(log_path, 'w') as f:
if os.path.exists(segment_path):
seg_proc = subprocess.Popen([thirdparty_binary('extract-segments'),
'scp,p:' + scp_path, segment_path, 'ark:-'],
stdout=subprocess.PIPE, stderr=f)
comp_proc = subprocess.Popen([thirdparty_binary('compute-mfcc-feats'), '--verbose=2',
'--config=' + mfcc_config_path,
'ark:-', 'ark:-'],
stdout=subprocess.PIPE, stderr=f, stdin=seg_proc.stdout)
else:
comp_proc = subprocess.Popen([thirdparty_binary('compute-mfcc-feats'), '--verbose=2',
'--config=' + mfcc_config_path,
'scp,p:' + scp_path, 'ark:-'],
stdout=subprocess.PIPE, stderr=f)
copy_proc = subprocess.Popen([thirdparty_binary('copy-feats'),
'--compress=true', 'ark:-',
'ark,scp:{},{}'.format(raw_mfcc_path, raw_scp_path)],
stdin=comp_proc.stdout, stderr=f)
copy_proc.wait()
def init(env):
os.environ = env
def mfcc(mfcc_directory, num_jobs, feature_config, frequency_configs):
"""
Multiprocessing function that converts wav files into MFCCs
See http://kaldi-asr.org/doc/feat.html and
http://kaldi-asr.org/doc/compute-mfcc-feats_8cc.html for more details on how
MFCCs are computed.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/make_mfcc.sh
for the bash script this function was based on.
Parameters
----------
mfcc_directory : str
Directory to save MFCC feature matrices
log_directory : str
Directory to store log files
num_jobs : int
The number of processes to use in calculation
mfcc_configs : list of :class:`~aligner.config.MfccConfig`
Configuration object for generating MFCCs
Raises
------
CorpusError
If the files per speaker exceeds the number of files that are
allowed to be open on the computer (for Unix-based systems)
"""
child_env = os.environ.copy()
os.makedirs(os.path.join(mfcc_directory, 'log'), exist_ok=True)
paths = []
for j, p in frequency_configs:
paths.append(feature_config.write(mfcc_directory, j, p))
jobs = [(mfcc_directory, x, paths[x])
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
r = False
try:
results = [pool.apply_async(mfcc_func, args=i) for i in jobs]
output = [p.get() for p in results]
except OSError as e:
print(dir(e))
if e.errno == 24:
r = True
else:
raise
if r:
raise (CorpusError(
'There were too many files per speaker to process based on your OS settings. Please try to split your data into more speakers.'))
def apply_cmvn_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
normed_ark_path = os.path.join(directory, config.raw_feature_id + '.{}.ark'.format(job_name))
with open(os.path.join(directory, 'log', 'norm.{}.log'.format(job_name)), 'w') as logf:
utt2spkpath = os.path.join(directory, 'utt2spk.{}'.format(job_name))
cmvnpath = os.path.join(directory, 'cmvn.{}.scp'.format(job_name))
featspath = os.path.join(directory, 'feats.{}.scp'.format(job_name))
if not os.path.exists(normed_scp_path):
cmvn_proc = subprocess.Popen([thirdparty_binary('apply-cmvn'),
'--utt2spk=ark:' + utt2spkpath,
'scp:' + cmvnpath,
'scp:' + featspath,
'ark,scp:{},{}'.format(normed_ark_path, normed_scp_path)],
stderr=logf
)
cmvn_proc.communicate()
def apply_cmvn(directory, num_jobs, config):
child_env = os.environ.copy()
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
results = [pool.apply_async(apply_cmvn_func, args=i) for i in jobs]
output = [p.get() for p in results]
def add_deltas_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
ark_path = os.path.join(directory, config.feature_id + '.{}.ark'.format(job_name))
scp_path = os.path.join(directory, config.feature_id + '.{}.scp'.format(job_name))
with open(os.path.join(directory, 'log', 'add_deltas.{}.log'.format(job_name)), 'w') as logf:
if config.fmllr_path is not None and os.path.exists(config.fmllr_path):
deltas_proc = subprocess.Popen([thirdparty_binary('add-deltas'),
'scp:' + normed_scp_path, 'ark:-'],
stderr=logf,
stdout=subprocess.PIPE)
trans_proc = subprocess.Popen([thirdparty_binary('transform-feats'),
'ark:' + config.fmllr_path, 'ark:-',
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=deltas_proc.stdout,
stderr=logf)
trans_proc.communicate()
else:
deltas_proc = subprocess.Popen([thirdparty_binary('add-deltas'),
'scp:' + normed_scp_path, 'ark,scp:{},{}'.format(ark_path, scp_path)],
stderr=logf)
deltas_proc.communicate()
def add_deltas(directory, num_jobs, config):
child_env = os.environ.copy()
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(child_env,)) as pool:
results = [pool.apply_async(add_deltas_func, args=i) for i in jobs]
output = [p.get() for p in results]
def apply_lda_func(directory, job_name, config):
normed_scp_path = os.path.join(directory, config.raw_feature_id + '.{}.scp'.format(job_name))
ark_path = os.path.join(directory, config.feature_id + '.{}.ark'.format(job_name))
scp_path = os.path.join(directory, config.feature_id + '.{}.scp'.format(job_name))
ivector_scp_path = os.path.join(directory, 'ivector.{}.scp'.format(job_name))
with open(os.path.join(directory, 'log', 'lda.{}.log'.format(job_name)), 'a') as logf:
if os.path.exists(config.lda_path):
splice_feats_proc = subprocess.Popen([thirdparty_binary('splice-feats'),
'--left-context={}'.format(config.splice_left_context),
'--right-context={}'.format(config.splice_right_context),
'scp:' + normed_scp_path,
'ark:-'],
stdout=subprocess.PIPE,
stderr=logf)
if config.ivectors and os.path.exists(ivector_scp_path):
transform_feats_proc = subprocess.Popen([thirdparty_binary("transform-feats"),
config.lda_path,
'ark:-',
'ark:-'],
stdin=splice_feats_proc.stdout,
stdout=subprocess.PIPE,
stderr=logf)
paste_proc = subprocess.Popen([thirdparty_binary('paste-feats'),
'ark:-',
'scp:' + ivector_scp_path,
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=transform_feats_proc.stdout,
stderr=logf)
paste_proc.communicate()
else:
transform_feats_proc = subprocess.Popen([thirdparty_binary("transform-feats"),
config.lda_path,
'ark:-',
'ark,scp:{},{}'.format(ark_path, scp_path)],
stdin=splice_feats_proc.stdout,
stderr=logf)
transform_feats_proc.communicate()
else:
logf.write('could not find "{}"\n'.format(config.lda_path))
splice_feats_proc = subprocess.Popen([thirdparty_binary('splice-feats'),
'--left-context={}'.format(config.splice_left_context),
'--right-context={}'.format(config.splice_right_context),
'scp:' + normed_scp_path,
'ark,scp:{},{}'.format(ark_path, scp_path)],
stderr=logf)
splice_feats_proc.communicate()
def apply_lda(directory, num_jobs, config):
jobs = [(directory, x, config)
for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(os.environ.copy(),)) as pool:
results = [pool.apply_async(apply_lda_func, args=i) for i in jobs]
output = [p.get() for p in results]
| [
"os.path.exists",
"os.path.join",
"multiprocessing.Pool",
"os.environ.copy"
]
| [((272, 302), 'os.path.join', 'os.path.join', (['directory', '"""log"""'], {}), "(directory, 'log')\n", (284, 302), False, 'import os\n'), ((3022, 3039), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3037, 3039), False, 'import os\n'), ((5060, 5077), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5075, 5077), False, 'import os\n'), ((6912, 6929), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (6927, 6929), False, 'import os\n'), ((731, 759), 'os.path.exists', 'os.path.exists', (['segment_path'], {}), '(segment_path)\n', (745, 759), False, 'import os\n'), ((3057, 3092), 'os.path.join', 'os.path.join', (['mfcc_directory', '"""log"""'], {}), "(mfcc_directory, 'log')\n", (3069, 3092), False, 'import os\n'), ((3313, 3381), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'num_jobs', 'initializer': 'init', 'initargs': '(child_env,)'}), '(processes=num_jobs, initializer=init, initargs=(child_env,))\n', (3320, 3381), True, 'import multiprocessing as mp\n'), ((5160, 5228), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'num_jobs', 'initializer': 'init', 'initargs': '(child_env,)'}), '(processes=num_jobs, initializer=init, initargs=(child_env,))\n', (5167, 5228), True, 'import multiprocessing as mp\n'), ((7012, 7080), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'num_jobs', 'initializer': 'init', 'initargs': '(child_env,)'}), '(processes=num_jobs, initializer=init, initargs=(child_env,))\n', (7019, 7080), True, 'import multiprocessing as mp\n'), ((7717, 7748), 'os.path.exists', 'os.path.exists', (['config.lda_path'], {}), '(config.lda_path)\n', (7731, 7748), False, 'import os\n'), ((4459, 4490), 'os.path.exists', 'os.path.exists', (['normed_scp_path'], {}), '(normed_scp_path)\n', (4473, 4490), False, 'import os\n'), ((5825, 5858), 'os.path.exists', 'os.path.exists', (['config.fmllr_path'], {}), '(config.fmllr_path)\n', (5839, 5858), False, 'import os\n'), ((8355, 8387), 'os.path.exists', 'os.path.exists', (['ivector_scp_path'], {}), '(ivector_scp_path)\n', (8369, 8387), False, 'import os\n'), ((10821, 10838), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (10836, 10838), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""sb-fastapi CLI root."""
import logging
import click
from sb_backend.cli.commands.serve import serve
@click.group()
@click.option(
"-v",
"--verbose",
help="Enable verbose logging.",
is_flag=True,
default=False,
)
def cli(**options):
"""sb-fastapi CLI root."""
if options["verbose"]:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(
level=level,
format="[%(asctime)s] [%(process)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %z",
)
cli.add_command(serve)
| [
"click.group",
"click.option",
"logging.basicConfig"
]
| [((131, 144), 'click.group', 'click.group', ([], {}), '()\n', (142, 144), False, 'import click\n'), ((146, 243), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'help': '"""Enable verbose logging."""', 'is_flag': '(True)', 'default': '(False)'}), "('-v', '--verbose', help='Enable verbose logging.', is_flag=\n True, default=False)\n", (158, 243), False, 'import click\n'), ((414, 554), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': '"""[%(asctime)s] [%(process)s] [%(levelname)s] %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S %z"""'}), "(level=level, format=\n '[%(asctime)s] [%(process)s] [%(levelname)s] %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S %z')\n", (433, 554), False, 'import logging\n')] |
import dash
from dash import html
app = dash.Dash(__name__)
app.layout = html.Div(children=[html.H1('Data Science',
style = {'textAlign': 'center',
'color': '#0FD08D',
'font-size': '50px'}),
html.H2('La carrera mas sexy del siglo XXI',
style = {'textAlign': 'center',
'color' : '#009A64'}),
html.P('Factores clave:'),
html.Ul(children = [html.Li('Factor 1'),
html.Li('Factor 2'),
html.Li('Factor 3'),
html.Li(['Source: ',
html.A('https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946',
href = 'https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946')
])
])
])
if __name__ == '__main__':
app.run_server(debug=True) | [
"dash.html.H2",
"dash.html.Li",
"dash.html.H1",
"dash.html.P",
"dash.html.A",
"dash.Dash"
]
| [((41, 60), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (50, 60), False, 'import dash\n'), ((94, 193), 'dash.html.H1', 'html.H1', (['"""Data Science"""'], {'style': "{'textAlign': 'center', 'color': '#0FD08D', 'font-size': '50px'}"}), "('Data Science', style={'textAlign': 'center', 'color': '#0FD08D',\n 'font-size': '50px'})\n", (101, 193), False, 'from dash import html\n'), ((361, 460), 'dash.html.H2', 'html.H2', (['"""La carrera mas sexy del siglo XXI"""'], {'style': "{'textAlign': 'center', 'color': '#009A64'}"}), "('La carrera mas sexy del siglo XXI', style={'textAlign': 'center',\n 'color': '#009A64'})\n", (368, 460), False, 'from dash import html\n'), ((581, 606), 'dash.html.P', 'html.P', (['"""Factores clave:"""'], {}), "('Factores clave:')\n", (587, 606), False, 'from dash import html\n'), ((660, 679), 'dash.html.Li', 'html.Li', (['"""Factor 1"""'], {}), "('Factor 1')\n", (667, 679), False, 'from dash import html\n'), ((729, 748), 'dash.html.Li', 'html.Li', (['"""Factor 2"""'], {}), "('Factor 2')\n", (736, 748), False, 'from dash import html\n'), ((798, 817), 'dash.html.Li', 'html.Li', (['"""Factor 3"""'], {}), "('Factor 3')\n", (805, 817), False, 'from dash import html\n'), ((944, 1185), 'dash.html.A', 'html.A', (['"""https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946"""'], {'href': '"""https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946"""'}), "(\n 'https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946'\n , href=\n 'https://www.excelsior.com.mx/nacional/ciencia-de-datos-la-carrera-mas-sexy-del-xxi-en-la-unam/1323946'\n )\n", (950, 1185), False, 'from dash import html\n')] |
#Main Program
from Class import Barang
import Menu
histori = list()
listBarang = [
Barang('Rinso', 5000, 20),
Barang('Sabun', 3000, 20),
Barang('Pulpen', 2500, 20),
Barang('Tisu', 10000, 20),
Barang('Penggaris', 1000, 20)
]
while True:
print('''
Menu
1. Tampilkan Barang
2. Tambahkan Barang
3. Tambah Stock Barang
4. Hapus Barang
5. Cari Barang Berdasarkan Keyword
6. Hitung Barang Belanjaan
7. Histori Keluar Masuk Barang
0. Keluar Program
''')
choice = input('Masukan No Menu: ')
if choice == '1':
Menu.menu1(listBarang)
elif choice == '2':
Menu.menu2(listBarang, histori)
elif choice == '3':
Menu.menu3(listBarang, histori)
elif choice == '4':
Menu.menu4(listBarang, histori)
elif choice == '5':
Menu.menu5(listBarang)
elif choice == '6':
Menu.menu6(listBarang, histori)
elif choice == '7':
Menu.menu7(histori)
elif choice == '0':
print('Keluar Program')
break
else:
print('Invalid Input!') | [
"Menu.menu7",
"Menu.menu2",
"Menu.menu4",
"Class.Barang",
"Menu.menu3",
"Menu.menu1",
"Menu.menu5",
"Menu.menu6"
]
| [((90, 115), 'Class.Barang', 'Barang', (['"""Rinso"""', '(5000)', '(20)'], {}), "('Rinso', 5000, 20)\n", (96, 115), False, 'from Class import Barang\n'), ((118, 143), 'Class.Barang', 'Barang', (['"""Sabun"""', '(3000)', '(20)'], {}), "('Sabun', 3000, 20)\n", (124, 143), False, 'from Class import Barang\n'), ((146, 172), 'Class.Barang', 'Barang', (['"""Pulpen"""', '(2500)', '(20)'], {}), "('Pulpen', 2500, 20)\n", (152, 172), False, 'from Class import Barang\n'), ((175, 200), 'Class.Barang', 'Barang', (['"""Tisu"""', '(10000)', '(20)'], {}), "('Tisu', 10000, 20)\n", (181, 200), False, 'from Class import Barang\n'), ((203, 232), 'Class.Barang', 'Barang', (['"""Penggaris"""', '(1000)', '(20)'], {}), "('Penggaris', 1000, 20)\n", (209, 232), False, 'from Class import Barang\n'), ((548, 570), 'Menu.menu1', 'Menu.menu1', (['listBarang'], {}), '(listBarang)\n', (558, 570), False, 'import Menu\n'), ((596, 627), 'Menu.menu2', 'Menu.menu2', (['listBarang', 'histori'], {}), '(listBarang, histori)\n', (606, 627), False, 'import Menu\n'), ((653, 684), 'Menu.menu3', 'Menu.menu3', (['listBarang', 'histori'], {}), '(listBarang, histori)\n', (663, 684), False, 'import Menu\n'), ((710, 741), 'Menu.menu4', 'Menu.menu4', (['listBarang', 'histori'], {}), '(listBarang, histori)\n', (720, 741), False, 'import Menu\n'), ((767, 789), 'Menu.menu5', 'Menu.menu5', (['listBarang'], {}), '(listBarang)\n', (777, 789), False, 'import Menu\n'), ((815, 846), 'Menu.menu6', 'Menu.menu6', (['listBarang', 'histori'], {}), '(listBarang, histori)\n', (825, 846), False, 'import Menu\n'), ((872, 891), 'Menu.menu7', 'Menu.menu7', (['histori'], {}), '(histori)\n', (882, 891), False, 'import Menu\n')] |
#!/usr/bin/env python3
"""get tag from http://demo.illustration2vec.net/."""
# note:
# - error 'ERROR: Request Entity Too Large' for file 1.1 mb
# <span style="color:red;">ERROR: Request Entity Too Large</span>
from collections import OrderedDict
from pathlib import Path
from pprint import pformat
import imghdr
import logging
import os
import shutil
import time
import urllib
import hashlib
import click
import requests
import structlog
import peewee
from PIL import Image
from i2vec_cli import models
from i2vec_cli.requests_session import Session, convert_raw_to_hydrus
from i2vec_cli.sha256 import sha256_checksum
from i2vec_cli.utils import user_data_dir, thumb_folder
def is_url(path):
"""Return True if path is url, False otherwise."""
scheme = urllib.parse.urlparse(path).scheme
if scheme in ('http', 'https'):
return True
return False
def is_ext_equal(file_ext, imghdr_ext):
"""compare file extension with result from imghdr_ext."""
if not imghdr_ext:
return False
if file_ext.lower() == '.{}'.format(imghdr_ext):
return True
if file_ext.lower() in ('.jpg', '.jpeg') and imghdr_ext == 'jpeg':
return True
return False
def download(url, no_clobber):
"""download url.
Args:
url: URL to be downloaded.
no_clobber: Skip download if file already exist.
Returns:
Downloaded filename or existing file if `no_clobber` is `True`
"""
log = structlog.getLogger()
basename = os.path.basename(url)
if os.path.isfile(basename) and no_clobber:
return basename
response = requests.get(url, stream=True)
with open(basename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
name, ext = os.path.splitext(basename)
imghdr_ext = imghdr.what(basename)
ext_equal = is_ext_equal(file_ext=ext, imghdr_ext=imghdr_ext)
if not imghdr_ext:
log.debug("imghdr can't recognize file", file=basename)
return basename
else:
new_basename = '{}.{}'.format(name, imghdr_ext)
new_basename_exist = os.path.isfile(new_basename)
if ext_equal:
log.debug('Extension is equal', file_ext=ext, imghdr_ext=imghdr_ext)
return basename
elif not ext_equal:
if new_basename_exist and not no_clobber:
log.debug('Replace existing file', old=basename, new=new_basename)
shutil.move(basename, new_basename)
elif not new_basename_exist:
log.debug('Rename file ext', file=basename, new_ext=imghdr_ext)
shutil.move(basename, new_basename)
else:
log.debug('Not replace/rename file', no_clobber=no_clobber, new_basename=new_basename)
return new_basename
else:
log.debug(
'Unknown condition',
file=basename,
ext_equal=ext_equal,
new_basename_exist=new_basename_exist,
imghdr_ext=imghdr_ext
)
# just return base name if any error happen
return basename
def validate_close_delay(ctx, param, value):
"""validate close delay."""
try:
value = int(value)
except Exception as e:
raise click.BadParameter(
'Error when validate close delay: value={}, error={}'.format(value, e))
if value >= -1:
return value
else:
raise click.BadParameter('Close delay have to be bigger or equal than -1')
def delay_close(close_delay):
"""delay when closing the program."""
log = structlog.getLogger()
if close_delay == -1:
click.pause()
elif close_delay == 0:
log.debug('No close delay')
elif close_delay > 0:
time.sleep(close_delay)
else:
log.error('Invalid close delay', v=close_delay)
def md5_checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def create_thumbnail(path, thumb_path):
"""create thumbnail."""
size = 320, 320
try:
im = Image.open(path)
im.thumbnail(size)
im.save(thumb_path, "JPEG")
except IOError:
raise IOError("cannot create thumbnail for", path)
def get_print_result(path, db_path, format, session):
"""get print result."""
# compatibility
p = path
sha256 = sha256_checksum(p)
md5 = md5_checksum(p)
thumb_path = os.path.join(user_data_dir, 'thumb', '{}.jpg'.format(sha256))
try:
load_res = models.load_result(db=db_path, sha256=sha256, md5=md5)
except models.Image.DoesNotExist:
load_res = None
if load_res:
tags = {'prediction': load_res}
else:
tags = session.get_tags(path=p)
try:
models.save_result(
db=db_path, sha256=sha256, md5=md5, prediction=tags['prediction'])
except peewee.IntegrityError as e:
log.debug(str(e))
except keyError as e:
log.debug(str(tags))
if not os.path.isfile(thumb_path):
create_thumbnail(p, thumb_path)
if format == 'dict':
return tags
if format == 'hydrus':
return convert_raw_to_hydrus(tags)
else:
return pformat(tags['prediction'])
@click.command()
@click.option('--format', type=click.Choice(['raw', 'hydrus']), default='raw')
@click.option('-d', '--debug', is_flag=True, help="Enable debug.")
@click.option('-nc', '--no-clobber', is_flag=True, help="Skip download url when file exist.")
@click.option(
'--close-delay', default=0, help="Close delay of the program.", callback=validate_close_delay)
@click.option(
'--driver', default=None, help="Driver for browser (deprecated).",
type=click.Choice(['firefox', 'phantomjs', 'chrome', 'zope.testbrowser', 'django']))
@click.option('--dump-html', is_flag=True, help="Dump html table for debugging (deprecated).")
@click.argument('path', nargs=-1)
def main(format, path, debug, no_clobber, close_delay, driver=None, dump_html=False):
"""get tag from illustration2vec."""
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
structlog.configure_once(logger_factory=structlog.stdlib.LoggerFactory())
log = structlog.getLogger()
if not path:
raise ValueError('PATH required.')
# init folder
os.makedirs(user_data_dir, exist_ok=True)
os.makedirs(thumb_folder, exist_ok=True)
# database
db_path = os.path.join(user_data_dir, 'main.db')
if not os.path.isfile(db_path):
Path(db_path).touch()
models.database.init(db_path)
try:
models.init_all_tables()
except peewee.OperationalError:
log.debug('Table already created')
session = Session(driver=driver)
try:
for p in path:
if os.path.isfile(p):
print('path:{}'.format(os.path.basename(p)))
elif is_url(p):
print('url:{}'.format(p))
p = download(p, no_clobber=no_clobber)
else:
log.error('Unknown path format or path is not exist', path=p)
continue
result = get_print_result(
path=p, db_path=db_path, format=format, session=session)
print(result)
finally:
delay_close(close_delay)
if hasattr(session, 'browser'):
session.browser.quit()
if __name__ == '__main__':
main()
| [
"click.Choice",
"i2vec_cli.requests_session.Session",
"i2vec_cli.models.init_all_tables",
"time.sleep",
"i2vec_cli.models.database.init",
"i2vec_cli.sha256.sha256_checksum",
"click.BadParameter",
"shutil.move",
"click.option",
"pathlib.Path",
"i2vec_cli.requests_session.convert_raw_to_hydrus",
"click.command",
"click.argument",
"hashlib.md5",
"shutil.copyfileobj",
"os.path.splitext",
"requests.get",
"pprint.pformat",
"os.path.isfile",
"click.pause",
"logging.basicConfig",
"structlog.getLogger",
"structlog.stdlib.LoggerFactory",
"PIL.Image.open",
"urllib.parse.urlparse",
"os.makedirs",
"i2vec_cli.models.load_result",
"os.path.join",
"imghdr.what",
"os.path.basename",
"i2vec_cli.models.save_result"
]
| [((5265, 5280), 'click.command', 'click.command', ([], {}), '()\n', (5278, 5280), False, 'import click\n'), ((5361, 5426), 'click.option', 'click.option', (['"""-d"""', '"""--debug"""'], {'is_flag': '(True)', 'help': '"""Enable debug."""'}), "('-d', '--debug', is_flag=True, help='Enable debug.')\n", (5373, 5426), False, 'import click\n'), ((5428, 5525), 'click.option', 'click.option', (['"""-nc"""', '"""--no-clobber"""'], {'is_flag': '(True)', 'help': '"""Skip download url when file exist."""'}), "('-nc', '--no-clobber', is_flag=True, help=\n 'Skip download url when file exist.')\n", (5440, 5525), False, 'import click\n'), ((5522, 5633), 'click.option', 'click.option', (['"""--close-delay"""'], {'default': '(0)', 'help': '"""Close delay of the program."""', 'callback': 'validate_close_delay'}), "('--close-delay', default=0, help='Close delay of the program.',\n callback=validate_close_delay)\n", (5534, 5633), False, 'import click\n'), ((5811, 5909), 'click.option', 'click.option', (['"""--dump-html"""'], {'is_flag': '(True)', 'help': '"""Dump html table for debugging (deprecated)."""'}), "('--dump-html', is_flag=True, help=\n 'Dump html table for debugging (deprecated).')\n", (5823, 5909), False, 'import click\n'), ((5906, 5938), 'click.argument', 'click.argument', (['"""path"""'], {'nargs': '(-1)'}), "('path', nargs=-1)\n", (5920, 5938), False, 'import click\n'), ((1462, 1483), 'structlog.getLogger', 'structlog.getLogger', ([], {}), '()\n', (1481, 1483), False, 'import structlog\n'), ((1500, 1521), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (1516, 1521), False, 'import os\n'), ((1610, 1640), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1622, 1640), False, 'import requests\n'), ((1751, 1777), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (1767, 1777), False, 'import os\n'), ((1795, 1816), 'imghdr.what', 'imghdr.what', (['basename'], {}), '(basename)\n', (1806, 1816), False, 'import imghdr\n'), ((3505, 3526), 'structlog.getLogger', 'structlog.getLogger', ([], {}), '()\n', (3524, 3526), False, 'import structlog\n'), ((3804, 3817), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3815, 3817), False, 'import hashlib\n'), ((4374, 4392), 'i2vec_cli.sha256.sha256_checksum', 'sha256_checksum', (['p'], {}), '(p)\n', (4389, 4392), False, 'from i2vec_cli.sha256 import sha256_checksum\n'), ((6275, 6296), 'structlog.getLogger', 'structlog.getLogger', ([], {}), '()\n', (6294, 6296), False, 'import structlog\n'), ((6381, 6422), 'os.makedirs', 'os.makedirs', (['user_data_dir'], {'exist_ok': '(True)'}), '(user_data_dir, exist_ok=True)\n', (6392, 6422), False, 'import os\n'), ((6427, 6467), 'os.makedirs', 'os.makedirs', (['thumb_folder'], {'exist_ok': '(True)'}), '(thumb_folder, exist_ok=True)\n', (6438, 6467), False, 'import os\n'), ((6498, 6536), 'os.path.join', 'os.path.join', (['user_data_dir', '"""main.db"""'], {}), "(user_data_dir, 'main.db')\n", (6510, 6536), False, 'import os\n'), ((6607, 6636), 'i2vec_cli.models.database.init', 'models.database.init', (['db_path'], {}), '(db_path)\n', (6627, 6636), False, 'from i2vec_cli import models\n'), ((6773, 6795), 'i2vec_cli.requests_session.Session', 'Session', ([], {'driver': 'driver'}), '(driver=driver)\n', (6780, 6795), False, 'from i2vec_cli.requests_session import Session, convert_raw_to_hydrus\n'), ((765, 792), 'urllib.parse.urlparse', 'urllib.parse.urlparse', (['path'], {}), '(path)\n', (786, 792), False, 'import urllib\n'), ((1529, 1553), 'os.path.isfile', 'os.path.isfile', (['basename'], {}), '(basename)\n', (1543, 1553), False, 'import os\n'), ((1692, 1734), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response.raw', 'out_file'], {}), '(response.raw, out_file)\n', (1710, 1734), False, 'import shutil\n'), ((2090, 2118), 'os.path.isfile', 'os.path.isfile', (['new_basename'], {}), '(new_basename)\n', (2104, 2118), False, 'import os\n'), ((3352, 3420), 'click.BadParameter', 'click.BadParameter', (['"""Close delay have to be bigger or equal than -1"""'], {}), "('Close delay have to be bigger or equal than -1')\n", (3370, 3420), False, 'import click\n'), ((3561, 3574), 'click.pause', 'click.pause', ([], {}), '()\n', (3572, 3574), False, 'import click\n'), ((4084, 4100), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (4094, 4100), False, 'from PIL import Image\n'), ((4526, 4580), 'i2vec_cli.models.load_result', 'models.load_result', ([], {'db': 'db_path', 'sha256': 'sha256', 'md5': 'md5'}), '(db=db_path, sha256=sha256, md5=md5)\n', (4544, 4580), False, 'from i2vec_cli import models\n'), ((5025, 5051), 'os.path.isfile', 'os.path.isfile', (['thumb_path'], {}), '(thumb_path)\n', (5039, 5051), False, 'import os\n'), ((5181, 5208), 'i2vec_cli.requests_session.convert_raw_to_hydrus', 'convert_raw_to_hydrus', (['tags'], {}), '(tags)\n', (5202, 5208), False, 'from i2vec_cli.requests_session import Session, convert_raw_to_hydrus\n'), ((5234, 5261), 'pprint.pformat', 'pformat', (["tags['prediction']"], {}), "(tags['prediction'])\n", (5241, 5261), False, 'from pprint import pformat\n'), ((6088, 6128), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (6107, 6128), False, 'import logging\n'), ((6147, 6186), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6166, 6186), False, 'import logging\n'), ((6548, 6571), 'os.path.isfile', 'os.path.isfile', (['db_path'], {}), '(db_path)\n', (6562, 6571), False, 'import os\n'), ((6654, 6678), 'i2vec_cli.models.init_all_tables', 'models.init_all_tables', ([], {}), '()\n', (6676, 6678), False, 'from i2vec_cli import models\n'), ((5312, 5343), 'click.Choice', 'click.Choice', (["['raw', 'hydrus']"], {}), "(['raw', 'hydrus'])\n", (5324, 5343), False, 'import click\n'), ((5730, 5808), 'click.Choice', 'click.Choice', (["['firefox', 'phantomjs', 'chrome', 'zope.testbrowser', 'django']"], {}), "(['firefox', 'phantomjs', 'chrome', 'zope.testbrowser', 'django'])\n", (5742, 5808), False, 'import click\n'), ((4775, 4865), 'i2vec_cli.models.save_result', 'models.save_result', ([], {'db': 'db_path', 'sha256': 'sha256', 'md5': 'md5', 'prediction': "tags['prediction']"}), "(db=db_path, sha256=sha256, md5=md5, prediction=tags[\n 'prediction'])\n", (4793, 4865), False, 'from i2vec_cli import models\n'), ((6231, 6263), 'structlog.stdlib.LoggerFactory', 'structlog.stdlib.LoggerFactory', ([], {}), '()\n', (6261, 6263), False, 'import structlog\n'), ((6843, 6860), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (6857, 6860), False, 'import os\n'), ((2404, 2439), 'shutil.move', 'shutil.move', (['basename', 'new_basename'], {}), '(basename, new_basename)\n', (2415, 2439), False, 'import shutil\n'), ((3672, 3695), 'time.sleep', 'time.sleep', (['close_delay'], {}), '(close_delay)\n', (3682, 3695), False, 'import time\n'), ((6581, 6594), 'pathlib.Path', 'Path', (['db_path'], {}), '(db_path)\n', (6585, 6594), False, 'from pathlib import Path\n'), ((2565, 2600), 'shutil.move', 'shutil.move', (['basename', 'new_basename'], {}), '(basename, new_basename)\n', (2576, 2600), False, 'import shutil\n'), ((6901, 6920), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (6917, 6920), False, 'import os\n')] |
"""Functions for builtin CherryPy tools."""
import logging
import re
from hashlib import md5
import six
from six.moves import urllib
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import httputil as _httputil
from cherrypy.lib import is_iterator
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See :rfc:`2616` Section 14.24.
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, 'ETag'):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ['*'] or etag in conditions):
raise cherrypy.HTTPError(412, 'If-Match failed: ETag %r did '
'not match %r' % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ['*'] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' %
request.method, 'TOOLS.ETAGS')
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, 'If-None-Match failed: ETag %r '
'matched %r' % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ('GET', 'HEAD'):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
The given methods are case-insensitive, and may be in any order.
If only one method is allowed, you may supply a single string;
if more than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
"""
if not isinstance(methods, (tuple, list)):
methods = [methods]
methods = [m.upper() for m in methods if m]
if not methods:
methods = ['GET', 'HEAD']
elif 'GET' in methods and 'HEAD' not in methods:
methods.append('HEAD')
cherrypy.response.headers['Allow'] = ', '.join(methods)
if cherrypy.request.method not in methods:
if debug:
cherrypy.log('request.method %r not in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
raise cherrypy.HTTPError(405)
else:
if debug:
cherrypy.log('request.method %r in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP server.
For Apache and lighttpd, you should leave the 'local' argument at the
default value of 'X-Forwarded-Host'. For Squid, you probably want to set
tools.proxy.local = 'Origin'.
If you want the new request.base to include path info (not just the host),
you must explicitly set base to the full base path, and ALSO set 'local'
to '', so that the X-Forwarded-Host request header (which never includes
path info) does not override it. Regardless, the value for 'base' MUST
NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid.
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
want to rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find('://')]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
default = urllib.parse.urlparse(request.base).netloc
base = request.headers.get('Host', default)
if base.find('://') == -1:
# add http:// or https:// if needed
base = scheme + '://' + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
# Grab the first IP in a comma-separated list. Ref #1268.
xff = next(ip.strip() for ip in xff.split(','))
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers;
for example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern
A regular expression pattern to test against the Referer.
accept
If True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing
If True, permit requests with no Referer header.
error
The HTTP error code to return to the client on failure.
message
A string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = 'username'
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='',
**kwargs):
return (six.text_type("""<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" />
<br />
Password: <input type="password" name="password" size="10" />
<br />
<input type="hidden" name="from_page" value="%(from_page)s" />
<br />
<input type="submit" />
</form>
</body></html>""") % vars()).encode('utf-8')
def do_login(self, username, password, from_page='..', **kwargs):
"""Login. May raise redirect, or return True if request handled."""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or '/')
def do_logout(self, from_page='..', **kwargs):
"""Logout. May raise redirect, or return True if request handled."""
sess = cherrypy.session
username = sess.get(self.session_key)
sess[self.session_key] = None
if username:
cherrypy.serving.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page)
def do_check(self):
"""Assert username. Raise redirect, or return True if request handled.
"""
sess = cherrypy.session
request = cherrypy.serving.request
response = cherrypy.serving.response
username = sess.get(self.session_key)
if not username:
sess[self.session_key] = username = self.anonymous()
self._debug_message('No session[username], trying anonymous')
if not username:
url = cherrypy.url(qs=request.query_string)
self._debug_message(
'No username, routing to login_screen with from_page %(url)r',
locals(),
)
response.body = self.login_screen(url)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return True
self._debug_message('Setting request.login to %(username)r', locals())
request.login = username
self.on_check(username)
def _debug_message(self, template, context={}):
if not self.debug:
return
cherrypy.log(template % context, 'TOOLS.SESSAUTH')
def run(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
path = request.path_info
if path.endswith('login_screen'):
self._debug_message('routing %(path)r to login_screen', locals())
response.body = self.login_screen()
return True
elif path.endswith('do_login'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
self._debug_message('do_login requires POST')
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_login', locals())
return self.do_login(**request.params)
elif path.endswith('do_logout'):
if request.method != 'POST':
response.headers['Allow'] = 'POST'
raise cherrypy.HTTPError(405)
self._debug_message('routing %(path)r to do_logout', locals())
return self.do_logout(**request.params)
else:
self._debug_message('No special path, running do_check')
return self.do_check()
def session_auth(**kwargs):
sa = SessionAuth()
for k, v in kwargs.items():
setattr(sa, k, v)
return sa.run()
session_auth.__doc__ = (
"""Session authentication hook.
Any attribute of the SessionAuth class may be overridden via a keyword arg
to this function:
""" + '\n'.join(['%s: %s' % (k, type(getattr(SessionAuth, k)).__name__)
for k in dir(SessionAuth) if not k.startswith('__')])
)
def log_traceback(severity=logging.ERROR, debug=False):
"""Write the last error's traceback to the cherrypy error log."""
cherrypy.log('', 'HTTP', severity=severity, traceback=True)
def log_request_headers(debug=False):
"""Write request headers to the cherrypy error log."""
h = [' %s: %s' % (k, v) for k, v in cherrypy.serving.request.header_list]
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), 'HTTP')
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(' %s:' % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(' %r' % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), 'HTTP')
def redirect(url='', internal=True, debug=False):
"""Raise InternalRedirect or HTTPRedirect to the given url."""
if debug:
cherrypy.log('Redirecting %sto: %s' %
({True: 'internal ', False: ''}[internal], url),
'TOOLS.REDIRECT')
if internal:
raise cherrypy.InternalRedirect(url)
else:
raise cherrypy.HTTPRedirect(url)
def trailing_slash(missing=True, extra=False, status=None, debug=False):
"""Redirect if path_info has (missing|extra) trailing slash."""
request = cherrypy.serving.request
pi = request.path_info
if debug:
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
(request.is_index, missing, extra, pi),
'TOOLS.TRAILING_SLASH')
if request.is_index is True:
if missing:
if not pi.endswith('/'):
new_url = cherrypy.url(pi + '/', request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
elif request.is_index is False:
if extra:
# If pi == '/', don't redirect to ''!
if pi.endswith('/') and pi != '/':
new_url = cherrypy.url(pi[:-1], request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def accept(media=None, debug=False):
"""Return the client's preferred media-type (from the given Content-Types).
If 'media' is None (the default), no test will be performed.
If 'media' is provided, it should be the Content-Type value (as a string)
or values (as a list or tuple of strings) which the current resource
can emit. The client's acceptable media ranges (as declared in the
Accept request header) will be matched in order to these Content-Type
values; the first such string is returned. That is, the return value
will always be one of the strings provided in the 'media' arg (or None
if 'media' is None).
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
Note that most web browsers send */* as a (low-quality) acceptable
media range, which should match any Content-Type. In addition, "...if
no Accept header field is present, then it is assumed that the client
accepts all media types."
Matching types are checked in order of client preference first,
and then in the order of the given 'media' values.
Note that this function does not honor accept-params (other than "q").
"""
if not media:
return
if isinstance(media, text_or_bytes):
media = [media]
request = cherrypy.serving.request
# Parse the Accept request header, and try to match one
# of the requested media-ranges (in order of preference).
ranges = request.headers.elements('Accept')
if not ranges:
# Any media type is acceptable.
if debug:
cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
return media[0]
else:
# Note that 'ranges' is sorted in order of preference
for element in ranges:
if element.qvalue > 0:
if element.value == '*/*':
# Matches any type or subtype
if debug:
cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
return media[0]
elif element.value.endswith('/*'):
# Matches any subtype
mtype = element.value[:-1] # Keep the slash
for m in media:
if m.startswith(mtype):
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return m
else:
# Matches exact value
if element.value in media:
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return element.value
# No suitable media-range found.
ah = request.headers.get('Accept')
if ah is None:
msg = 'Your client did not send an Accept header.'
else:
msg = 'Your client sent this Accept header: %s.' % ah
msg += (' But this resource only emits these media types: %s.' %
', '.join(media))
raise cherrypy.HTTPError(406, msg)
class MonitoredHeaderMap(_httputil.HeaderMap):
def transform_key(self, key):
self.accessed_headers.add(key)
return super(MonitoredHeaderMap, self).transform_key(key)
def __init__(self):
self.accessed_headers = set()
super(MonitoredHeaderMap, self).__init__()
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def convert_params(exception=ValueError, error=400):
"""Convert request params based on function annotations, with error handling.
exception
Exception class to catch.
status
The HTTP error code to return to the client on failure.
"""
request = cherrypy.serving.request
types = request.handler.callable.__annotations__
with cherrypy.HTTPError.handle(exception, error):
for key in set(types).intersection(request.params):
request.params[key] = types[key](request.params[key])
| [
"cherrypy.InternalRedirect",
"cherrypy.log",
"hashlib.md5",
"six.moves.urllib.parse.urlparse",
"re.match",
"cherrypy.url",
"cherrypy.HTTPError.handle",
"cherrypy.lib.is_iterator",
"six.text_type",
"cherrypy.lib.httputil.valid_status",
"cherrypy.HTTPError",
"cherrypy.HTTPRedirect"
]
| [((1355, 1394), 'cherrypy.lib.httputil.valid_status', '_httputil.valid_status', (['response.status'], {}), '(response.status)\n', (1377, 1394), True, 'from cherrypy.lib import httputil as _httputil\n'), ((10356, 10390), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['error', 'message'], {}), '(error, message)\n', (10374, 10390), False, 'import cherrypy\n'), ((15535, 15594), 'cherrypy.log', 'cherrypy.log', (['""""""', '"""HTTP"""'], {'severity': 'severity', 'traceback': '(True)'}), "('', 'HTTP', severity=severity, traceback=True)\n", (15547, 15594), False, 'import cherrypy\n'), ((21666, 21694), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(406)', 'msg'], {}), '(406, msg)\n', (21684, 21694), False, 'import cherrypy\n'), ((2235, 2285), 'cherrypy.log', 'cherrypy.log', (["('Status: %s' % status)", '"""TOOLS.ETAGS"""'], {}), "('Status: %s' % status, 'TOOLS.ETAGS')\n", (2247, 2285), False, 'import cherrypy\n'), ((3903, 3942), 'cherrypy.lib.httputil.valid_status', '_httputil.valid_status', (['response.status'], {}), '(response.status)\n', (3925, 3942), True, 'from cherrypy.lib import httputil as _httputil\n'), ((5618, 5641), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(405)'], {}), '(405)\n', (5636, 5641), False, 'import cherrypy\n'), ((12559, 12591), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['from_page'], {}), '(from_page)\n', (12580, 12591), False, 'import cherrypy\n'), ((13779, 13829), 'cherrypy.log', 'cherrypy.log', (['(template % context)', '"""TOOLS.SESSAUTH"""'], {}), "(template % context, 'TOOLS.SESSAUTH')\n", (13791, 13829), False, 'import cherrypy\n'), ((16607, 16720), 'cherrypy.log', 'cherrypy.log', (["('Redirecting %sto: %s' % ({(True): 'internal ', (False): ''}[internal], url))", '"""TOOLS.REDIRECT"""'], {}), "('Redirecting %sto: %s' % ({(True): 'internal ', (False): ''}[\n internal], url), 'TOOLS.REDIRECT')\n", (16619, 16720), False, 'import cherrypy\n'), ((16785, 16815), 'cherrypy.InternalRedirect', 'cherrypy.InternalRedirect', (['url'], {}), '(url)\n', (16810, 16815), False, 'import cherrypy\n'), ((16840, 16866), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['url'], {}), '(url)\n', (16861, 16866), False, 'import cherrypy\n'), ((17099, 17236), 'cherrypy.log', 'cherrypy.log', (["('is_index: %r, missing: %r, extra: %r, path_info: %r' % (request.is_index,\n missing, extra, pi))", '"""TOOLS.TRAILING_SLASH"""'], {}), "('is_index: %r, missing: %r, extra: %r, path_info: %r' % (\n request.is_index, missing, extra, pi), 'TOOLS.TRAILING_SLASH')\n", (17111, 17236), False, 'import cherrypy\n'), ((23272, 23315), 'cherrypy.HTTPError.handle', 'cherrypy.HTTPError.handle', (['exception', 'error'], {}), '(exception, error)\n', (23297, 23315), False, 'import cherrypy\n'), ((1539, 1597), 'cherrypy.log', 'cherrypy.log', (["('ETag already set: %s' % etag)", '"""TOOLS.ETAGS"""'], {}), "('ETag already set: %s' % etag, 'TOOLS.ETAGS')\n", (1551, 1597), False, 'import cherrypy\n'), ((2706, 2799), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(412)', "('If-Match failed: ETag %r did not match %r' % (etag, conditions))"], {}), "(412, 'If-Match failed: ETag %r did not match %r' % (etag,\n conditions))\n", (2724, 2799), False, 'import cherrypy\n'), ((5475, 5583), 'cherrypy.log', 'cherrypy.log', (["('request.method %r not in methods %r' % (cherrypy.request.method, methods))", '"""TOOLS.ALLOW"""'], {}), "('request.method %r not in methods %r' % (cherrypy.request.\n method, methods), 'TOOLS.ALLOW')\n", (5487, 5583), False, 'import cherrypy\n'), ((5682, 5785), 'cherrypy.log', 'cherrypy.log', (["('request.method %r in methods %r' % (cherrypy.request.method, methods))", '"""TOOLS.ALLOW"""'], {}), "('request.method %r in methods %r' % (cherrypy.request.method,\n methods), 'TOOLS.ALLOW')\n", (5694, 5785), False, 'import cherrypy\n'), ((7006, 7071), 'cherrypy.log', 'cherrypy.log', (["('Testing scheme %r:%r' % (scheme, s))", '"""TOOLS.PROXY"""'], {}), "('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')\n", (7018, 7071), False, 'import cherrypy\n'), ((7510, 7577), 'cherrypy.log', 'cherrypy.log', (["('Testing local %r:%r' % (local, lbase))", '"""TOOLS.PROXY"""'], {}), "('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')\n", (7522, 7577), False, 'import cherrypy\n'), ((7682, 7717), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', (['request.base'], {}), '(request.base)\n', (7703, 7717), False, 'from six.moves import urllib\n'), ((8003, 8070), 'cherrypy.log', 'cherrypy.log', (["('Testing remote %r:%r' % (remote, xff))", '"""TOOLS.PROXY"""'], {}), "('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')\n", (8015, 8070), False, 'import cherrypy\n'), ((9999, 10021), 're.match', 're.match', (['pattern', 'ref'], {}), '(pattern, ref)\n', (10007, 10021), False, 'import re\n'), ((10053, 10124), 'cherrypy.log', 'cherrypy.log', (["('Referer %r matches %r' % (ref, pattern))", '"""TOOLS.REFERER"""'], {}), "('Referer %r matches %r' % (ref, pattern), 'TOOLS.REFERER')\n", (10065, 10124), False, 'import cherrypy\n'), ((12152, 12191), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (["(from_page or '/')"], {}), "(from_page or '/')\n", (12173, 12191), False, 'import cherrypy\n'), ((13082, 13119), 'cherrypy.url', 'cherrypy.url', ([], {'qs': 'request.query_string'}), '(qs=request.query_string)\n', (13094, 13119), False, 'import cherrypy\n'), ((18363, 18427), 'cherrypy.log', 'cherrypy.log', (["('Flattened %d chunks' % numchunks)", '"""TOOLS.FLATTEN"""'], {}), "('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')\n", (18375, 18427), False, 'import cherrypy\n'), ((20097, 20154), 'cherrypy.log', 'cherrypy.log', (['"""No Accept header elements"""', '"""TOOLS.ACCEPT"""'], {}), "('No Accept header elements', 'TOOLS.ACCEPT')\n", (20109, 20154), False, 'import cherrypy\n'), ((22547, 22640), 'cherrypy.log', 'cherrypy.log', (["('Accessed headers: %s' % request.headers.accessed_headers)", '"""TOOLS.AUTOVARY"""'], {}), "('Accessed headers: %s' % request.headers.accessed_headers,\n 'TOOLS.AUTOVARY')\n", (22559, 22640), False, 'import cherrypy\n'), ((1651, 1694), 'cherrypy.log', 'cherrypy.log', (['"""Autotags off"""', '"""TOOLS.ETAGS"""'], {}), "('Autotags off', 'TOOLS.ETAGS')\n", (1663, 1694), False, 'import cherrypy\n'), ((3182, 3248), 'cherrypy.log', 'cherrypy.log', (["('request.method: %s' % request.method)", '"""TOOLS.ETAGS"""'], {}), "('request.method: %s' % request.method, 'TOOLS.ETAGS')\n", (3194, 3248), False, 'import cherrypy\n'), ((3350, 3380), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['[]', '(304)'], {}), '([], 304)\n', (3371, 3380), False, 'import cherrypy\n'), ((3421, 3513), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(412)', "('If-None-Match failed: ETag %r matched %r' % (etag, conditions))"], {}), "(412, 'If-None-Match failed: ETag %r matched %r' % (etag,\n conditions))\n", (3439, 3513), False, 'import cherrypy\n'), ((4175, 4198), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(412)'], {}), '(412)\n', (4193, 4198), False, 'import cherrypy\n'), ((8770, 8843), 'cherrypy.log', 'cherrypy.log', (["('Ignoring request header %r' % name)", '"""TOOLS.IGNORE_HEADERS"""'], {}), "('Ignoring request header %r' % name, 'TOOLS.IGNORE_HEADERS')\n", (8782, 8843), False, 'import cherrypy\n'), ((10248, 10298), 'cherrypy.log', 'cherrypy.log', (['"""No Referer header"""', '"""TOOLS.REFERER"""'], {}), "('No Referer header', 'TOOLS.REFERER')\n", (10260, 10298), False, 'import cherrypy\n'), ((17390, 17434), 'cherrypy.url', 'cherrypy.url', (["(pi + '/')", 'request.query_string'], {}), "(pi + '/', request.query_string)\n", (17402, 17434), False, 'import cherrypy\n'), ((17457, 17509), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['new_url'], {'status': '(status or 301)'}), '(new_url, status=status or 301)\n', (17478, 17509), False, 'import cherrypy\n'), ((18142, 18156), 'cherrypy.lib.is_iterator', 'is_iterator', (['x'], {}), '(x)\n', (18153, 18156), False, 'from cherrypy.lib import is_iterator\n'), ((1749, 1794), 'cherrypy.log', 'cherrypy.log', (['"""Status not 200"""', '"""TOOLS.ETAGS"""'], {}), "('Status not 200', 'TOOLS.ETAGS')\n", (1761, 1794), False, 'import cherrypy\n'), ((1921, 1975), 'cherrypy.log', 'cherrypy.log', (["('Setting ETag: %s' % etag)", '"""TOOLS.ETAGS"""'], {}), "('Setting ETag: %s' % etag, 'TOOLS.ETAGS')\n", (1933, 1975), False, 'import cherrypy\n'), ((4443, 4473), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['[]', '(304)'], {}), '([], 304)\n', (4464, 4473), False, 'import cherrypy\n'), ((4522, 4545), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(412)'], {}), '(412)\n', (4540, 4545), False, 'import cherrypy\n'), ((10959, 11361), 'six.text_type', 'six.text_type', (['"""<html><body>\nMessage: %(error_msg)s\n<form method="post" action="do_login">\n Login: <input type="text" name="username" value="%(username)s" size="10" />\n <br />\n Password: <input type="password" name="password" size="10" />\n <br />\n <input type="hidden" name="from_page" value="%(from_page)s" />\n <br />\n <input type="submit" />\n</form>\n</body></html>"""'], {}), '(\n """<html><body>\nMessage: %(error_msg)s\n<form method="post" action="do_login">\n Login: <input type="text" name="username" value="%(username)s" size="10" />\n <br />\n Password: <input type="password" name="password" size="10" />\n <br />\n <input type="hidden" name="from_page" value="%(from_page)s" />\n <br />\n <input type="submit" />\n</form>\n</body></html>"""\n )\n', (10972, 11361), False, 'import six\n'), ((14380, 14403), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(405)'], {}), '(405)\n', (14398, 14403), False, 'import cherrypy\n'), ((16400, 16414), 'cherrypy.url', 'cherrypy.url', ([], {}), '()\n', (16412, 16414), False, 'import cherrypy\n'), ((17687, 17730), 'cherrypy.url', 'cherrypy.url', (['pi[:-1]', 'request.query_string'], {}), '(pi[:-1], request.query_string)\n', (17699, 17730), False, 'import cherrypy\n'), ((17753, 17805), 'cherrypy.HTTPRedirect', 'cherrypy.HTTPRedirect', (['new_url'], {'status': '(status or 301)'}), '(new_url, status=status or 301)\n', (17774, 17805), False, 'import cherrypy\n'), ((14684, 14707), 'cherrypy.HTTPError', 'cherrypy.HTTPError', (['(405)'], {}), '(405)\n', (14702, 14707), False, 'import cherrypy\n'), ((20464, 20512), 'cherrypy.log', 'cherrypy.log', (['"""Match due to */*"""', '"""TOOLS.ACCEPT"""'], {}), "('Match due to */*', 'TOOLS.ACCEPT')\n", (20476, 20512), False, 'import cherrypy\n'), ((1869, 1878), 'hashlib.md5', 'md5', (['etag'], {}), '(etag)\n', (1872, 1878), False, 'from hashlib import md5\n'), ((21180, 21243), 'cherrypy.log', 'cherrypy.log', (["('Match due to %s' % element.value)", '"""TOOLS.ACCEPT"""'], {}), "('Match due to %s' % element.value, 'TOOLS.ACCEPT')\n", (21192, 21243), False, 'import cherrypy\n'), ((20861, 20924), 'cherrypy.log', 'cherrypy.log', (["('Match due to %s' % element.value)", '"""TOOLS.ACCEPT"""'], {}), "('Match due to %s' % element.value, 'TOOLS.ACCEPT')\n", (20873, 20924), False, 'import cherrypy\n')] |
'''Utility functions'''
import multiprocessing
from .globalVariables import *
def readMathIOmicaData(fileName):
'''Read text files exported by MathIOmica and convert to Python data
Parameters:
fileName: str
Path of directories and name of the file containing data
Returns:
data
Python data
Usage:
data = readMathIOmicaData("../../MathIOmica/MathIOmica/MathIOmicaData/ExampleData/rnaExample")
'''
if os.path.isfile(fileName):
with open(fileName, 'r') as tempFile:
data = tempFile.read()
data = data.replace('\n','').replace('{','(').replace('}',')').replace('->',':').replace('|>','}')
data = data.replace('<|','{').replace('^','*').replace('`','*').replace('Missing[]','"Missing[]"')
data = data.replace("\\",'')
else:
print('File not found (%s)'%(fileName))
returning = None
try:
returning = eval(data)
except:
print('Error occured while converting data (%s)'%(fileName))
return returning
def runCPUs(NumberOfAvailableCPUs, func, list_of_tuples_of_func_params):
"""Parallelize function call with multiprocessing.Pool.
Parameters:
NumberOfAvailableCPUs: int
Number of processes to create
func: function
Function to apply, must take at most one argument
list_of_tuples_of_func_params: list
Function parameters
Returns:
2d numpy.array
Results of func in a numpy array
Usage:
results = runCPUs(4, pAutocorrelation, [(times[i], data[i], allTimes) for i in range(10)])
"""
instPool = multiprocessing.Pool(processes = NumberOfAvailableCPUs)
return_values = instPool.map(func, list_of_tuples_of_func_params)
instPool.close()
instPool.join()
return np.vstack(return_values)
def createReverseDictionary(inputDictionary):
"""Efficient way to create a reverse dictionary from a dictionary.
Utilizes Pandas.Dataframe.groupby and Numpy arrays indexing.
Parameters:
inputDictionary: dictionary
Dictionary to reverse
Returns:
dictionary
Reversed dictionary
Usage:
revDict = createReverseDictionary(Dict)
"""
keys, values = np.array(list(inputDictionary.keys())), np.array(list(inputDictionary.values()))
df = pd.DataFrame(np.array([[keys[i], value] for i in range(len(keys)) for value in values[i]]))
dfGrouped = df.groupby(df.columns[1])
keys, values = list(dfGrouped.indices.keys()), list(dfGrouped.indices.values())
GOs = df.values.T[0]
return dict(zip(keys, [GOs[value].tolist() for value in values]))
def createDirectories(path):
"""Create a path of directories, unless the path already exists.
Parameters:
path: str
Path directory
Returns:
None
Usage:
createDirectories("/pathToFolder1/pathToSubFolder2")
"""
if path=='':
return None
if not os.path.exists(path):
os.makedirs(path)
return None
| [
"multiprocessing.Pool"
]
| [((1738, 1791), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'NumberOfAvailableCPUs'}), '(processes=NumberOfAvailableCPUs)\n', (1758, 1791), False, 'import multiprocessing\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipIf
try:
from django.core.urlresolvers import reverse
except ModuleNotFoundError:
from django.urls import reverse
from django.db import transaction
from aldryn_reversion.core import create_revision as aldryn_create_revision
from parler.utils.context import switch_language
import six
from . import NewsBlogTestCase
from aldryn_newsblog.cms_appconfig import NewsBlogConfig
from ..settings import ENABLE_REVERSION
if ENABLE_REVERSION:
try:
from reversion import create_revision
from reversion import default_revision_manager
except ImportError:
from reversion.revisions import create_revision
from reversion.revisions import default_revision_manager
@skipIf(not ENABLE_REVERSION, 'django-reversion not enabled')
class TestVersioning(NewsBlogTestCase):
def create_revision(self, article, content=None, language=None, **kwargs):
with transaction.atomic():
with create_revision():
for k, v in six.iteritems(kwargs):
setattr(article, k, v)
if content:
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content
plugin.save()
# TODO: Cover both cases (plugin modification/recreation)
# if content:
# article.content.get_plugins().delete()
# api.add_plugin(article.content, 'TextPlugin',
# self.language, body=content)
article.save()
def revert_to(self, article, revision):
(default_revision_manager.get_for_object(article)[revision]
.revision.revert())
def test_revert_revision(self):
title1 = self.rand_str(prefix='title1_')
title2 = self.rand_str(prefix='title2_')
content0 = self.rand_str(prefix='content0_')
content1 = self.rand_str(prefix='content1_')
content2 = self.rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, title=title1, content=content1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
# Revision 2
self.create_revision(article, title=title2, content=content2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2)
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1)
self.assertContains(response, content1)
self.assertNotContains(response, content0)
self.assertNotContains(response, content2)
def test_revert_translated_revision(self):
title1_en = self.rand_str(prefix='title1_en_')
title1_de = self.rand_str(prefix='title1_de_')
title2_en = self.rand_str(prefix='title2_en_')
title2_de = self.rand_str(prefix='title2_de_')
article = self.create_article()
# Revision 1
article.set_current_language('en')
self.create_revision(article, title=title1_en)
article.set_current_language('de')
self.create_revision(article, title=title1_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2a (modify just EN)
article.set_current_language('en')
self.create_revision(article, title=title2_en)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revision 2b (modify just DE)
article.set_current_language('de')
self.create_revision(article, title=title2_de)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_de)
# Revert to revision 2a (EN=2, DE=1)
self.revert_to(article, 1)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title2_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
# Revert to revision 1 (EN=1, DE=1)
self.revert_to(article, 2)
with switch_language(article, 'en'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_en)
with switch_language(article, 'de'):
response = self.client.get(article.get_absolute_url())
self.assertContains(response, title1_de)
def test_edit_plugin_directly(self):
content0 = self.rand_str(prefix='content0_')
content1 = self.rand_str(prefix='content1_')
content2 = self.rand_str(prefix='content2_')
article = self.create_article(content=content0)
# Revision 1
self.create_revision(article, content=content1)
self.assertEqual(
len(default_revision_manager.get_for_object(article)), 1)
# Revision 2
with transaction.atomic():
plugins = article.content.get_plugins()
plugin = plugins[0].get_plugin_instance()[0]
plugin.body = content2
plugin.save()
aldryn_create_revision(article)
self.assertEqual(
len(default_revision_manager.get_for_object(article)), 2)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content2)
self.assertNotContains(response, content1)
# Revert to revision 1
self.revert_to(article, 1)
response = self.client.get(article.get_absolute_url())
self.assertContains(response, content1)
self.assertNotContains(response, content2)
def test_blog_config_recovery_accessible(self):
with transaction.atomic():
with create_revision():
new_conf = NewsBlogConfig(
namespace='test_revocery_admin_url', paginate_by=15)
new_conf.save()
new_config_version = (default_revision_manager
.get_for_object(new_conf)[0])
new_config_pk = new_conf.pk
self.assertEqual(NewsBlogConfig.objects.filter(
pk=new_config_pk).count(), 1)
new_conf.delete()
self.assertEqual(NewsBlogConfig.objects.filter(
pk=new_config_pk).count(), 0)
# check that there is a a way to access recovery view
obj = new_config_version.object_version.object
opts = obj._meta
url = reverse(
'admin:{0}_{1}_{2}'.format(
opts.app_label,
obj._meta.model_name,
'recover'),
args=[new_config_version.pk])
# ust in case check the length, but at this step either a
# NoReverseMatch should occur or other error,
# if no exception is raised, it is a good sign
self.assertGreater(len(url), 4)
| [
"aldryn_reversion.core.create_revision",
"django.db.transaction.atomic",
"reversion.revisions.default_revision_manager.get_for_object",
"unittest.skipIf",
"parler.utils.context.switch_language",
"aldryn_newsblog.cms_appconfig.NewsBlogConfig",
"six.iteritems",
"aldryn_newsblog.cms_appconfig.NewsBlogConfig.objects.filter",
"reversion.revisions.create_revision"
]
| [((791, 851), 'unittest.skipIf', 'skipIf', (['(not ENABLE_REVERSION)', '"""django-reversion not enabled"""'], {}), "(not ENABLE_REVERSION, 'django-reversion not enabled')\n", (797, 851), False, 'from unittest import skipIf\n'), ((984, 1004), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1002, 1004), False, 'from django.db import transaction\n'), ((3684, 3714), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""en"""'], {}), "(article, 'en')\n", (3699, 3714), False, 'from parler.utils.context import switch_language\n'), ((3850, 3880), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""de"""'], {}), "(article, 'de')\n", (3865, 3880), False, 'from parler.utils.context import switch_language\n'), ((4267, 4297), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""de"""'], {}), "(article, 'de')\n", (4282, 4297), False, 'from parler.utils.context import switch_language\n'), ((4571, 4601), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""en"""'], {}), "(article, 'en')\n", (4586, 4601), False, 'from parler.utils.context import switch_language\n'), ((4737, 4767), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""de"""'], {}), "(article, 'de')\n", (4752, 4767), False, 'from parler.utils.context import switch_language\n'), ((4984, 5014), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""en"""'], {}), "(article, 'en')\n", (4999, 5014), False, 'from parler.utils.context import switch_language\n'), ((5150, 5180), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""de"""'], {}), "(article, 'de')\n", (5165, 5180), False, 'from parler.utils.context import switch_language\n'), ((5396, 5426), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""en"""'], {}), "(article, 'en')\n", (5411, 5426), False, 'from parler.utils.context import switch_language\n'), ((5562, 5592), 'parler.utils.context.switch_language', 'switch_language', (['article', '"""de"""'], {}), "(article, 'de')\n", (5577, 5592), False, 'from parler.utils.context import switch_language\n'), ((6182, 6202), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (6200, 6202), False, 'from django.db import transaction\n'), ((6386, 6417), 'aldryn_reversion.core.create_revision', 'aldryn_create_revision', (['article'], {}), '(article)\n', (6408, 6417), True, 'from aldryn_reversion.core import create_revision as aldryn_create_revision\n'), ((6974, 6994), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (6992, 6994), False, 'from django.db import transaction\n'), ((7210, 7259), 'reversion.revisions.default_revision_manager.get_for_object', 'default_revision_manager.get_for_object', (['new_conf'], {}), '(new_conf)\n', (7249, 7259), False, 'from reversion.revisions import default_revision_manager\n'), ((1023, 1040), 'reversion.revisions.create_revision', 'create_revision', ([], {}), '()\n', (1038, 1040), False, 'from reversion.revisions import create_revision\n'), ((1070, 1091), 'six.iteritems', 'six.iteritems', (['kwargs'], {}), '(kwargs)\n', (1083, 1091), False, 'import six\n'), ((6093, 6141), 'reversion.revisions.default_revision_manager.get_for_object', 'default_revision_manager.get_for_object', (['article'], {}), '(article)\n', (6132, 6141), False, 'from reversion.revisions import default_revision_manager\n'), ((6461, 6509), 'reversion.revisions.default_revision_manager.get_for_object', 'default_revision_manager.get_for_object', (['article'], {}), '(article)\n', (6500, 6509), False, 'from reversion.revisions import default_revision_manager\n'), ((7013, 7030), 'reversion.revisions.create_revision', 'create_revision', ([], {}), '()\n', (7028, 7030), False, 'from reversion.revisions import create_revision\n'), ((7059, 7126), 'aldryn_newsblog.cms_appconfig.NewsBlogConfig', 'NewsBlogConfig', ([], {'namespace': '"""test_revocery_admin_url"""', 'paginate_by': '(15)'}), "(namespace='test_revocery_admin_url', paginate_by=15)\n", (7073, 7126), False, 'from aldryn_newsblog.cms_appconfig import NewsBlogConfig\n'), ((7356, 7403), 'aldryn_newsblog.cms_appconfig.NewsBlogConfig.objects.filter', 'NewsBlogConfig.objects.filter', ([], {'pk': 'new_config_pk'}), '(pk=new_config_pk)\n', (7385, 7403), False, 'from aldryn_newsblog.cms_appconfig import NewsBlogConfig\n'), ((7480, 7527), 'aldryn_newsblog.cms_appconfig.NewsBlogConfig.objects.filter', 'NewsBlogConfig.objects.filter', ([], {'pk': 'new_config_pk'}), '(pk=new_config_pk)\n', (7509, 7527), False, 'from aldryn_newsblog.cms_appconfig import NewsBlogConfig\n'), ((1749, 1797), 'reversion.revisions.default_revision_manager.get_for_object', 'default_revision_manager.get_for_object', (['article'], {}), '(article)\n', (1788, 1797), False, 'from reversion.revisions import default_revision_manager\n')] |
# Library for the dynamics of a lumen network
# The lumen are 2 dimensional and symmetric and connected with 1 dimensional tubes
#
# Created by <NAME>, 2018
# Modified by <NAME>--Serandour on 8/04/2019
"""
network.py conf.init
Defines the class network and associated functions
Imports
-------
Libraries : numpy, os, math
Created by <NAME>
Modified by <NAME> on 8/06/2018
Modified by <NAME>--Serandour on 8/04/2019
"""
import numpy as np
import math
import os
class network:
def __init__(self, network_folder, out_path, t_step, tube_radius = 0.01, friction = 1, swelling = False, swelling_rate=0., save_area_dat=False):
"""
Initialization of the object network
All properties needed for the simulation are read and initialized
Input
-----
network_folder : str
out_path : str, path-like
t_step : float
Time step of the simulation. Note that if the simulation is adaptative, this time step will change.
tube_radius : float, optional, default = 0.01
Radius of the tube connecting lumens. Define the condition for empty lumens.
friction : float, optional, default = 1
Friction constant for the fluid circulating through pipes.
swelling : bool, optional, default = False
Swelling option for the simulation. True if swelling is included, False otherwise.
swelling_rate : float, optional, default = 0.
Swelling rate value in case the swelling is considered. Make sure the rate is not to big to avoid non-converging simulations.
save_area_dat : bool, optional, default = False
Save area option. True if areas are saved in area.dat, False otherwise.
"""
self.network_folder = network_folder
# Reading properties of the lumen
self.gamma_lumen, self.gamma_contact, self.area = np.loadtxt(os.path.join(network_folder, 'lumen.dat'), dtype = float, usecols = [0,2,3], unpack = True)
# Reading links between two lumen
self.lumen_lumen = self.read_lumen_lumen(os.path.join(network_folder, 'lumen_lumen.dat'))
# Reading links between bridge and lumen
self.bridge_lumen, self.num_bridges = self.read_bridge_lumen(os.path.join(network_folder, 'bridge_lumen.dat'))
# Reading links between two bridges
self.bridge_bridge, self.num_bridges = self.read_bridge_bridge(os.path.join(network_folder, 'bridge_bridge.dat'), self.num_bridges)
# Surface tension ratio
self.alpha = self.gamma_contact/(2*self.gamma_lumen)
self.delta = np.full(len(self.alpha), 1) # Possibility of asymmetric lumen is not included
# Resistances
self.tube_radius = tube_radius # Radius of the tube connecting the lumen and the bridges
self.friction = friction # Friction coefficient; friction * length = resistance
# Opening angle of the lumen (angle between curvature and tube)
self.theta = self.set_theta()
# Area factor for expressing the pressure in terms of the area instead of the radius
self.area_factor = self.set_area_factor()
# Ending time: time at which only one lumen is remaining
self.end_time = 0
# Time step for the output of the area evolution
self.time_step = t_step
# Creating output file for the area evolution, events, error messages
self.save_area(start = True, out_path = out_path)
self.save_event('', start = True, out_path = out_path)
self.save_error('', start = True, out_path = out_path)
# Area distribution after only one lumen is remaining
self.final_area = []
# Current time step of the simulation
self.current_time = 0
# List of empty lumen (area < tube_radius **2)
self.empty_list = np.zeros(len(self.alpha))
# Swelling
self.swelling_bool = swelling
self.swelling_rate = swelling_rate
# Save area
self.save_area_dat = save_area_dat
############################################################################################################################
########################################################## Dynamics ########################################################
############################################################################################################################
def flux(self, t, state):
"""
Determines the flux/ area change for each lumen of the network, main function of network.py
Input
-----
self : network object
Needs to be called by a class object
t : float
Actual time step (not needed for the calculation of the flux, but required for the used integration method in network_simulation.py
state : float array
The current area of the lumens
Returns
-------
flux : float array
Contains the area change for each lumen in dt
"""
# Initialization of the array containing the area change (index == lumen ID)
flux = []
self.current_time = t
for i in range(len(self.alpha)):
flux.append(0)
# If only one lumen remains -> End of simulation, flux is zero (needed as for the integration method used, no dynamic stop is possible)
if(np.sum(self.empty_list) >= len(self.alpha) - 1):
if(self.end_time == 0):
# Setting the end time for the output file area.log
self.end_time = t
# more than one lumen remaining: calculation of the flux
else:
# Adapting network to new state: Empty lumen are removed and graph is reconnected
self.area = state
self.remove_empty_lumen()
# Area change between directly connected lumen
flux = self.flux_lumen(flux)
# Calculating artificial pressure at each bridge; linear system of equations, with flux(bridge) = 0, the bridge does not gain or loose area
pressure_bridges = self.pressure_bridges()
# Area change between lumen-bridges
flux = self.flux_bridges(flux, pressure_bridges)
# Area change due to swelling
if self.swelling_bool :
flux = self.flux_swelling(flux)
# Saving area for the time step given in the configuration file
if self.save_area_dat :
self.save_area()
self.t_old = t
if(np.abs(np.sum(flux)) > self.tube_radius ** 2):
error = 'total flux is non-zero: total flux = %f' % (np.sum(flux))
self.save_error(error)
return flux
def flux_lumen(self,flux):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and lumen
Input
-----
self network object
needs to be called by a class object
flux float array
vector containing the area change for each lumen; index = lumen ID
Returns
-------
flux float array
area changes due to lumen-lumen connection added to the vector passed
"""
# for each connection between two lumen
for line in range(len(self.lumen_lumen)):
lumen_1 = int (self.lumen_lumen[line][0]) # first lumen
lumen_2 = int (self.lumen_lumen[line][1]) # second lumen
# flux from lumen 2 to lumen 1
fl = (self.pressure(lumen_2) - self.pressure(lumen_1))*self.friction/self.lumen_lumen[line][2]
flux[lumen_1] += fl
flux[lumen_2] -= fl
return flux
def pressure_bridges(self):
"""
Determines the pressure at each bridge
for each bridge the total flux is 0, meaning that the bridge does not gain or loose area
this gives a linear equation system, which can be solved
The connections are taken from the files bridge_lumen.dat and bridge_bridge.dat
For Information about the equations see the documentation to the code
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
pressure_bridges : float array
Pressure at each bridge
"""
R_sum = np.zeros(self.num_bridges, dtype = float) # sum of the resistences around one bridge
P_over_R_sum = np.zeros(self.num_bridges, dtype = float) # sum of pressure over resistance between one bridge and all directly connected lumen
matrix_bridges = np.zeros([self.num_bridges, self.num_bridges], dtype= float) # matrix to calculate the pressure at each bridge
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
R_sum[bridge] += 1./line[2]*self.friction
P_over_R_sum[bridge] += self.pressure(lumen)/line[2]*self.friction
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
matrix_bridges[bridge1][bridge2] = 1./line[2]*self.friction
matrix_bridges[bridge2][bridge1] = 1./line[2]*self.friction
R_sum[bridge1] += 1./line[2]*self.friction
R_sum[bridge2] += 1./line[2]*self.friction
for line in range(self.num_bridges):
matrix_bridges[line][line] = -R_sum[line]
# Solving linear problem with the pressure at each bridge as solution
pressure_bridges = np.linalg.solve(matrix_bridges, -P_over_R_sum)
return pressure_bridges;
def flux_bridges(self, flux, pressure_bridges):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and bridge
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# Area change in one bridge; should be 0; calculated as control value
flux_bridge = np.zeros(self.num_bridges, dtype = float)
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
fb = (pressure_bridges[bridge2] - pressure_bridges[bridge1])*self.friction/line[2]
flux_bridge[bridge1] += fb
flux_bridge[bridge2] -= fb
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
fl = (pressure_bridges[bridge] - self.pressure(lumen))*self.friction/line[2]
flux[lumen] += fl
flux_bridge[bridge] -= fl
for i in range(len(flux_bridge)):
if (np.abs(flux_bridge[i]) > self.tube_radius ** 2):
error = 'total flux of bridge %d is non-zero: total flux = %f' % (i,flux_bridge[i])
self.save_error(error)
return flux
def flux_swelling(self, flux) :
"""
Determines the flux/ area change for each lumen due to sewlling
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# for each lumen (lumen is the index of the lumen's area)
for lumen in range(len(self.area)) :
# if not empty
if not self.area[lumen] < 2*self.tube_radius ** 2 :
# then add the swelling contribution
flux[lumen] += self.swelling(lumen)
return flux
############################################################################################################################
###################################################### Removing Functions #####################################################
############################################################################################################################
def remove_empty_lumen(self):
"""
Determines and removes empty lumen
Calls a function to obtain a list of empty lumen and passes the list to a function to remove them and reconnect the network
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
no return
"""
empty_lumen_list = []
# Creating a list of empty lumen
empty_lumen_list = self.get_empty_lumen()
# Removing empty lumen and reconnecting the network
if (len(empty_lumen_list) > 0 ):
event = 'empty lumen: ' + ' '.join(map(str, empty_lumen_list))
#print event
self.save_event(event)
self.remove_lumen(empty_lumen_list)
return;
def remove_lumen(self, lumen_to_remove):
"""
Removes the lumen that are passed and connects the neighbors of these lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_to_remove : int list
List of lumen to be removed
Returns
-------
no return
"""
# For each lumen that has to be removed
for lumen in lumen_to_remove:
neighbours = self.get_neighbours(lumen) # List of connected lumen
bridges = self.get_bridges(lumen) # List of connected bridges
self.save_event('lumen ' + str(lumen) + ' neighbours ' + str(neighbours))
self.save_event('lumen ' + str(lumen) + ' bridges ' + str(bridges))
# Lumen had two connections, this means that it disappears and the two connected parts get directly connected, the resistance for the new link is the sum of the resistance of the two previous connections
test=True
if(len(neighbours) + len(bridges) == 2):
# Lumen was connected to two lumen -> new connection between lumen and lumen
if(len(neighbours) == 2):
self.create_link([neighbours[0][0], neighbours[1][0], neighbours[0][1] + neighbours[1][1]])
#print 'lumen_lumen connexion (' + str(neighbours[0][0]) + ', ' + str(neighbours[1][0]) + ')'
# Lumen was connected to a lumen and a bridge -> new connection between lumen and bridge
if(len(neighbours) == 1 and len(bridges)==1):
self.create_bridge_lumen([bridges[0][0], neighbours[0][0], bridges[0][1] + neighbours[0][1]])
#print 'lumen_bridge connexion (' + str(bridges[0][0]) + ', ' + str(neighbours[0][0]) + ')'
# Lumen was connected to two bridges -> new connection between bridge and bridge
if(len(bridges)==2):
self.create_bridge_bridge([bridges[0][0], bridges[1][0], bridges[0][1] + bridges[1][1]])
#print 'bridge_bridge connexion (' + str(bridges[0][0]) + ', ' + str(bridges[1][0]) + ')'
self.create_bridge(neighbours, bridges, lumid=lumen)
# Lumen had more than two connections -> becomes a bridge, the resistances remain the same but the connections are changed to connections to a bridge
if(len(neighbours) + len(bridges) > 2):
self.create_bridge(neighbours, bridges, lumid=lumen)
return;
def remove_link(self, lumen_1, lumen_2):
"""
Removes a connection between two lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_1 : int
First lumen of the connection
lumen_2 :
Second lumen of the connection
Returns
-------
no return
"""
# Due to data structure first lumen must be smaller than second lumen
if(lumen_1 > lumen_2):
n = lumen_1
lumen_1 = lumen_2
lumen_2 = n
# Find connection in lumen_lumen file and remove it
line = 0
# For each line in lumen_lumen until connection is found
while (line < len(self.lumen_lumen)):
# If connection is found removing it
if(self.lumen_lumen[line][0] == lumen_1 and self.lumen_lumen[line][1] == lumen_2):
event = 'link lumen %d to lumen %d removed' % (lumen_1, lumen_2)
#print event
self.save_event(event)
link = [lumen_1, lumen_2, self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
break;
# Look at next line
else: line += 1
############################################################################################################################
###################################################### Get Functions #####################################################
############################################################################################################################
def get_empty_lumen(self):
"""
Gets the IDs of the empty lumen
Empty means that the area is smaller than the tube_radius^2
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
empty_lumen_list : int list
Contains the IDs of the empty lumens
"""
empty_lumen_list = []
# For each lumen ID
for i in range(len(self.area)):
# If area is smaller than the treshhold
if(self.area[i] < self.tube_radius ** 2 and self.empty_list[i] == 0):
self.empty_list[i] = 1
self.area[i] = 0
empty_lumen_list.append(i)
return empty_lumen_list
def get_neighbours(self, lumen):
"""
Gets the lumen that are directly connected to the lumen passed on and deletes the connections
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
neighbour_list = []
line = 0
# Going through links in lumen_lumen.dat
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
if self.lumen_lumen[line][1] == lumen :
neighbour_list.append([self.lumen_lumen[line][0], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (self.lumen_lumen[line][0], lumen)
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
else : line += 1
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
line += 1
while(line < len(self.lumen_lumen) and self.lumen_lumen[line][0] == lumen):
neighbour_list.append([self.lumen_lumen[line][1], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (lumen, self.lumen_lumen[line][1])
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
return neighbour_list
def get_bridges(self, lumen):
"""
Gets the bridges that are directly connected to the lumen passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
bridge_list = []
line = 0
# Going through the links in bridge_lumen.dat
while(line < len(self.bridge_lumen)):
if (self.bridge_lumen[line][1] == lumen):
bridge_list.append([self.bridge_lumen[line][0], self.bridge_lumen[line][2]])
event = 'link bridge %d to lumen %d removed' % (self.bridge_lumen[line][0], lumen)
self.save_event(event)
self.bridge_lumen.remove(self.bridge_lumen[line])
else: line += 1
return bridge_list
############################################################################################################################
#################################################### Creating Functions ###################################################
############################################################################################################################
def create_link(self, link):
"""
Creates a link between two lumen in lumen_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID lumen1, ID lumen2, length]
Returns
-------
no return
"""
# no self-loops allowed
if(len(link) == 4 and link[0] != link[1]):
# Ensuring: lumen_1 < lumen_2
if(link[0] < link[2]):
lumen_1 = link[0]
lumen_2 = link[1]
else:
lumen_1 = link[1]
lumen_2 = link[0]
length = link[2]
line = 0
# Finding line in lumen_lumen.dat, to keep the sorting
while(line < len(self.lumen_lumen) and lumen_1 > self.lumen_lumen[line][0]): line += 1
if(line < len(self.lumen_lumen) - 1):
while(line < len(self.lumen_lumen) and lumen_2 > self.lumen_lumen[line][1] and lumen_1 == self.lumen_lumen[line][0]): line += 1
# Creating the link in lumen_lumen.dat
self.lumen_lumen.append([lumen_1,lumen_2, length])
self.lumen_lumen.sort()
event = 'link lumen %d to lumen %d created' % (lumen_1,lumen_2)
self.save_event(event)
return;
def create_bridge_lumen(self, link):
"""
Creates a link between a lumen and a bridge in bridge_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge, ID lumen, length]
Returns
-------
no return
"""
bridge = link[0]
lumen = link[1]
length = link[2]
line = 0
# Creating the link in bridge_lumen.dat
self.bridge_lumen.append(link)
self.bridge_lumen.sort()
event = 'link bridge %d to lumen %d created' % (bridge,lumen)
self.save_event(event)
return;
def create_bridge_bridge(self, link):
"""
Creates a link between two bridges in bridge_bridge.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge1, ID bridge2, length]
Returns
-------
no return
"""
if(link[0] == link[1]): return;
if(link[0] < link[1]):
bridge_1 = link[0]
bridge_2 = link[1]
else:
bridge_1 = link[1]
bridge_2 = link[0]
length = link[2]
line = 0
# Creating the link in bridge_bridge.dat
self.bridge_bridge.append([bridge_1,bridge_2, length])
self.bridge_bridge.sort()
event = 'link bridge %d to bridge %d created' % (bridge_1,bridge_2)
self.save_event(event)
return;
def create_bridge(self, lumen, bridge, lumid):
"""
Creates a new bridge connected with the lumen and bridges passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int list
[[lumen ID, length], [lumen ID, length],.....]
lumen IDs to which the new bridge should be connected to
bridge : int list
[[bridge ID, length], [bridge ID, length],.....]
bridge IDs to which the new bridge should be connected to
Returns
-------
no return
"""
#####
bridge_conversionfile = os.path.join(self.network_folder,'bridgesconversion.txt')
# ID of the new bridge
bridge_number = self.num_bridges
# Bridge ID counter, contains the ID of the next new bridge
self.num_bridges += 1
event = 'new bridge %d' % (bridge_number) + ' (' + str(lumid) + ')'
self.save_event(event)
line = 0
lumen.sort()
bridge.sort()
# For each lumen that should be connected to the new bridge
for i in range(len(lumen)):
new_link = [bridge_number, lumen[i][0], lumen[i][1]]
# Create link in bridge_lumen.dat
self.create_bridge_lumen(new_link)
# For each lumen that should be connected to the new bridge
for i in range(len(bridge)):
new_link = [bridge[i][0], bridge_number, bridge[i][1]]
# Create link in bridge_bridge.dat
self.create_bridge_bridge(new_link)
open(bridge_conversionfile, 'a').write(str(bridge_number) + ' ' + str(lumid)+ '\n')
return;
############################################################################################################################
################################ Geometric Functions for area and Pressure ###############################################
############################################################################################################################
def set_theta(self):
"""
Sets the angle theta
Calculates the angle theta, angle between the lumen and the tube
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
theta : float list
Theta value for each lumen
"""
theta = []
for i in range(len(self.alpha)):
#cos = (2*self.alpha[i]-(4*self.alpha[i]**2-self.delta[i]**2+1)/(4*self.alpha[i]))/self.delta[i] ## Old version, for assymmetric lumen
#theta.append(math.acos(cos))
theta.append(np.arccos(self.alpha[i]))
return theta;
def set_area_factor(self):
"""
Sets the area factor, needed to express the pressure in terms of the area instead of the curvature radius
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
area_factor : float list
Area factor for each lumen
"""
area_factor = []
for i in range(len(self.alpha)):
area_factor.append(np.sqrt((2*self.theta[i]-np.sin(2*self.theta[i]))))
return area_factor;
def opening_radius(self, lumen):
"""
Calculates the length/2 parallel to the 'tube' where the membrane is not attached for a given lumen
Input
-----
lumen : int
ID of the lumen
Returns
-------
radius : float
Length/2 of the opening radius
"""
return np.sqrt(2*self.area[lumen]/(2*self.theta[lumen]-np.sin(2*self.theta[lumen])))*np.sin(self.theta[lumen])
def get_area(self, lumen):
"""
Calculates the area in one half of the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
area : float
Area/2 of the lumen
"""
area = self.area[lumen]
return area
def pressure(self,lumen):
"""
Calculates the pressure inside the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
pressure : float
Pressure of the lumen
"""
area = self.get_area(lumen)
# Avoid dividing by zero
if(area < 0.1 * self.tube_radius**2 ):
error = 'division by zero in pressure: lumen ID: %d' % (lumen)
self.save_error(error)
pressure = self.gamma_lumen[lumen]*self.area_factor[lumen]/np.sqrt(area)
return pressure
############################################################################################################################
################################################# Reading Functions ########################################################
############################################################################################################################
def read_lumen_lumen(self, lumen_lumen_file):
"""
Reading the file with links between two lumens
Input
-----
lumen_lumen_file : str
File path to file with the links between two lumens
Returns
-------
lumen_lumen : float list [lumen1, lumen2, length]
Information about the links between two lumens
"""
if (os.path.getsize(lumen_lumen_file)>0): # If the file is not empty
lumen_1, lumen_2 = np.loadtxt(lumen_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(lumen_lumen_file, dtype = float, usecols = [2])
lumen_lumen = np.column_stack([lumen_1, lumen_2, length]).tolist()
else:
lumen_lumen = []
return lumen_lumen
def read_bridge_lumen(self, bridge_lumen_file):
"""
Reading the file with links between bridge and lumen
Input
-----
bridge_lumen_file : str
File path to file with the links between bridge and lumen
Returns
-------
bridge_lumen : float list [bridge, lumen, length]
Information about the links between bridge and lumen
num_bridges : int
Number of bridge_lumen links
"""
with open(bridge_lumen_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line): # If the file is empty
bridge_lumen = []
num_bridges = 0 # number of existing bridges
else:
bridge, lumen = np.loadtxt(bridge_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_lumen_file, dtype = float, usecols = [2])
bridge_lumen = np.column_stack([bridge, lumen, length]).tolist()
num_bridges = max(bridge)+1 # number of existing bridges
return bridge_lumen, num_bridges
def read_bridge_bridge(self, bridge_bridge_file, num_bridges):
"""
Reading the file with links between two bridge
Input
-----
bridge_bridge_file : str
File path to file with the links between two bridge
Returns
-------
bridge_bridge : float list [bridge1, bridge2, length]
Information about the links between two bridge
num : int
Number of bridge_bridge links
"""
with open(bridge_bridge_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line>0): # If the file is empty
bridge_bridge = []
num = num_bridges
else:
bridge1, bridge2 = np.loadtxt(bridge_bridge_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_bridge_file, dtype = float, usecols = [2])
bridge_bridge = np.column_stack([bridge1, bridge2, length]).tolist()
if (max(bridge2)+1 > num_bridges): num = max(bridge2)+1
return bridge_bridge, num
############################################################################################################################
################################################# Output functions #########################################################
############################################################################################################################
def save_event(self, event, start = False, out_path = ''):
"""
Saves each event in the output folder in the file event.dat
Events like a lumen disappearing, reconnections in the graph
Input
-----
event : str
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_event = '# Saves each event during the simulation; event is a disappearing lumen, graph reconnection \n'
self.file_event = os.path.join(out_path, 'event.dat')
fevent = open(self.file_event, 'w')
fevent.write(header_event)
fevent.close()
else:
fevent = open(self.file_event, 'a')
fevent.write('%.5f' % self.current_time)
fevent.write(' ')
fevent.write(event)
fevent.write('\n')
fevent.close()
return;
def save_error(self, error, start = False, out_path = ''):
"""
Saves errors in the output folder in the file error.dat
Errors like volume loss
Input
-----
error : string
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_error = '# Saves each warning like volume loss \n'
self.file_error = os.path.join(out_path, 'error.dat')
ferror = open(self.file_error, 'w')
ferror.write(header_error)
ferror.close()
else:
ferror = open(self.file_error, 'a')
ferror.write('%.5f' % self.current_time)
ferror.write(' ')
ferror.write(error)
ferror.write('\n')
ferror.close()
return;
def save_area(self, start = False, out_path = ''):
"""
Saves the volume evolution in the output folder in the file area.dat
Input
-----
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_volume = '# Saves the volume evolution of each lumen for the time step %f \n' %(self.time_step)
self.file_area = os.path.join(out_path, 'area.dat')
farea = open(self.file_area, 'w')
farea.write(header_volume)
farea.close()
self.t_old = 0
else:
farea = open(self.file_area, 'a')
farea.write('%.5f' % self.current_time)
farea.write(' ')
farea.write(' '.join(map(str, self.area)))
farea.write('\n')
farea.close()
return;
############################################################################################################################
################################################# Swelling functions #######################################################
############################################################################################################################
def swelling(self, lumen) :
"""
self.swelling(lumen)
Calculates the input flux for the area fo a given lumen, due to swelling.
Input
-----
lumen : int
Index of the lumen
"""
area = self.get_area(lumen)
theta = self.theta[lumen]
flux_swelling = self.swelling_rate * 4 * theta * np.sqrt(area)/ self.area_factor[lumen]
#print flux_swelling
return flux_swelling
| [
"numpy.abs",
"os.path.getsize",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.arccos",
"os.path.join",
"numpy.column_stack",
"numpy.sum",
"numpy.zeros",
"numpy.sin",
"numpy.loadtxt"
]
| [((8944, 8983), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (8952, 8983), True, 'import numpy as np\n'), ((9052, 9091), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (9060, 9091), True, 'import numpy as np\n'), ((9205, 9264), 'numpy.zeros', 'np.zeros', (['[self.num_bridges, self.num_bridges]'], {'dtype': 'float'}), '([self.num_bridges, self.num_bridges], dtype=float)\n', (9213, 9264), True, 'import numpy as np\n'), ((10261, 10307), 'numpy.linalg.solve', 'np.linalg.solve', (['matrix_bridges', '(-P_over_R_sum)'], {}), '(matrix_bridges, -P_over_R_sum)\n', (10276, 10307), True, 'import numpy as np\n'), ((10950, 10989), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (10958, 10989), True, 'import numpy as np\n'), ((26410, 26468), 'os.path.join', 'os.path.join', (['self.network_folder', '"""bridgesconversion.txt"""'], {}), "(self.network_folder, 'bridgesconversion.txt')\n", (26422, 26468), False, 'import os\n'), ((2057, 2098), 'os.path.join', 'os.path.join', (['network_folder', '"""lumen.dat"""'], {}), "(network_folder, 'lumen.dat')\n", (2069, 2098), False, 'import os\n'), ((2250, 2297), 'os.path.join', 'os.path.join', (['network_folder', '"""lumen_lumen.dat"""'], {}), "(network_folder, 'lumen_lumen.dat')\n", (2262, 2297), False, 'import os\n'), ((2426, 2474), 'os.path.join', 'os.path.join', (['network_folder', '"""bridge_lumen.dat"""'], {}), "(network_folder, 'bridge_lumen.dat')\n", (2438, 2474), False, 'import os\n'), ((2592, 2641), 'os.path.join', 'os.path.join', (['network_folder', '"""bridge_bridge.dat"""'], {}), "(network_folder, 'bridge_bridge.dat')\n", (2604, 2641), False, 'import os\n'), ((5691, 5714), 'numpy.sum', 'np.sum', (['self.empty_list'], {}), '(self.empty_list)\n', (5697, 5714), True, 'import numpy as np\n'), ((29767, 29792), 'numpy.sin', 'np.sin', (['self.theta[lumen]'], {}), '(self.theta[lumen])\n', (29773, 29792), True, 'import numpy as np\n'), ((30856, 30869), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (30863, 30869), True, 'import numpy as np\n'), ((31757, 31790), 'os.path.getsize', 'os.path.getsize', (['lumen_lumen_file'], {}), '(lumen_lumen_file)\n', (31772, 31790), False, 'import os\n'), ((31853, 31921), 'numpy.loadtxt', 'np.loadtxt', (['lumen_lumen_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(lumen_lumen_file, dtype=int, usecols=[0, 1], unpack=True)\n', (31863, 31921), True, 'import numpy as np\n'), ((31948, 32002), 'numpy.loadtxt', 'np.loadtxt', (['lumen_lumen_file'], {'dtype': 'float', 'usecols': '[2]'}), '(lumen_lumen_file, dtype=float, usecols=[2])\n', (31958, 32002), True, 'import numpy as np\n'), ((33042, 33111), 'numpy.loadtxt', 'np.loadtxt', (['bridge_lumen_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(bridge_lumen_file, dtype=int, usecols=[0, 1], unpack=True)\n', (33052, 33111), True, 'import numpy as np\n'), ((33138, 33193), 'numpy.loadtxt', 'np.loadtxt', (['bridge_lumen_file'], {'dtype': 'float', 'usecols': '[2]'}), '(bridge_lumen_file, dtype=float, usecols=[2])\n', (33148, 33193), True, 'import numpy as np\n'), ((34228, 34298), 'numpy.loadtxt', 'np.loadtxt', (['bridge_bridge_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(bridge_bridge_file, dtype=int, usecols=[0, 1], unpack=True)\n', (34238, 34298), True, 'import numpy as np\n'), ((34325, 34381), 'numpy.loadtxt', 'np.loadtxt', (['bridge_bridge_file'], {'dtype': 'float', 'usecols': '[2]'}), '(bridge_bridge_file, dtype=float, usecols=[2])\n', (34335, 34381), True, 'import numpy as np\n'), ((35641, 35676), 'os.path.join', 'os.path.join', (['out_path', '"""event.dat"""'], {}), "(out_path, 'event.dat')\n", (35653, 35676), False, 'import os\n'), ((36646, 36681), 'os.path.join', 'os.path.join', (['out_path', '"""error.dat"""'], {}), "(out_path, 'error.dat')\n", (36658, 36681), False, 'import os\n'), ((37605, 37639), 'os.path.join', 'os.path.join', (['out_path', '"""area.dat"""'], {}), "(out_path, 'area.dat')\n", (37617, 37639), False, 'import os\n'), ((6949, 6961), 'numpy.sum', 'np.sum', (['flux'], {}), '(flux)\n', (6955, 6961), True, 'import numpy as np\n'), ((7054, 7066), 'numpy.sum', 'np.sum', (['flux'], {}), '(flux)\n', (7060, 7066), True, 'import numpy as np\n'), ((11730, 11752), 'numpy.abs', 'np.abs', (['flux_bridge[i]'], {}), '(flux_bridge[i])\n', (11736, 11752), True, 'import numpy as np\n'), ((28579, 28603), 'numpy.arccos', 'np.arccos', (['self.alpha[i]'], {}), '(self.alpha[i])\n', (28588, 28603), True, 'import numpy as np\n'), ((38854, 38867), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (38861, 38867), True, 'import numpy as np\n'), ((32033, 32076), 'numpy.column_stack', 'np.column_stack', (['[lumen_1, lumen_2, length]'], {}), '([lumen_1, lumen_2, length])\n', (32048, 32076), True, 'import numpy as np\n'), ((33225, 33265), 'numpy.column_stack', 'np.column_stack', (['[bridge, lumen, length]'], {}), '([bridge, lumen, length])\n', (33240, 33265), True, 'import numpy as np\n'), ((34414, 34457), 'numpy.column_stack', 'np.column_stack', (['[bridge1, bridge2, length]'], {}), '([bridge1, bridge2, length])\n', (34429, 34457), True, 'import numpy as np\n'), ((29209, 29234), 'numpy.sin', 'np.sin', (['(2 * self.theta[i])'], {}), '(2 * self.theta[i])\n', (29215, 29234), True, 'import numpy as np\n'), ((29737, 29766), 'numpy.sin', 'np.sin', (['(2 * self.theta[lumen])'], {}), '(2 * self.theta[lumen])\n', (29743, 29766), True, 'import numpy as np\n')] |
# Illustrate upsampling in 2d
# Code from <NAME>
# https://machinelearningmastery.com/generative_adversarial_networks/
import tensorflow as tf
from tensorflow import keras
from numpy import asarray
#from keras.models import Sequential
from tensorflow.keras.models import Sequential
#from keras.layers import UpSampling2D
from tensorflow.keras.layers import UpSampling2D
X = asarray([[1, 2],
[3, 4]])
X = asarray([[1, 2, 3],
[4, 5, 6],
[7,8,9]])
print(X)
nr = X.shape[0]
nc = X.shape[1]
# reshape input data into one sample a sample with a channel
X = X.reshape((1, nr, nc, 1))
model = Sequential()
model.add(UpSampling2D(input_shape=(nr, nc, 1))) # nearest neighbor
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat)
model = Sequential()
model.add(UpSampling2D(input_shape=(nc, nc, 1), interpolation='bilinear'))
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat) | [
"tensorflow.keras.layers.UpSampling2D",
"numpy.asarray",
"tensorflow.keras.models.Sequential"
]
| [((380, 405), 'numpy.asarray', 'asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (387, 405), False, 'from numpy import asarray\n'), ((415, 457), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (422, 457), False, 'from numpy import asarray\n'), ((616, 628), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (626, 628), False, 'from tensorflow.keras.models import Sequential\n'), ((777, 789), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (787, 789), False, 'from tensorflow.keras.models import Sequential\n'), ((639, 676), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'input_shape': '(nr, nc, 1)'}), '(input_shape=(nr, nc, 1))\n', (651, 676), False, 'from tensorflow.keras.layers import UpSampling2D\n'), ((800, 863), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'input_shape': '(nc, nc, 1)', 'interpolation': '"""bilinear"""'}), "(input_shape=(nc, nc, 1), interpolation='bilinear')\n", (812, 863), False, 'from tensorflow.keras.layers import UpSampling2D\n')] |
from flask import Flask, Response
from flask_basicauth import BasicAuth
from flask_cors import CORS, cross_origin
import os
#from flask_admin import Admin,AdminIndexView
#from flask_admin.contrib.sqla import ModelView
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from werkzeug.exceptions import HTTPException
from flask_login import LoginManager
from itsdangerous import URLSafeSerializer
# import psycopg2
# import pymysql
# import logging
# import warnings
# warnings.filterwarnings("ignore")
# Initializing Flask App
app = Flask(__name__)
app.secret_key="Vampire"
# This video demonstrates why we use CORS in our Flask App - https://www.youtube.com/watch?v=vWl5XcvQBx0
CORS(app)
app.config.from_object("config.DevelopmentConfig")
class SQLAlchemy(_BaseSQLAlchemy):
"""
This class is defined so that we can set "pool_pre_ping" to True.
pool_pre_ping is a boolean flag, which when set to True,
will enable the connection pool 'pre-ping' feature
that tests connections for liveness upon each checkout.
This prevents from dropping of database connection with our app.
This class inherits the original SQLAlchemy class,
and nothing else is changed except pool_pre_ping flag
https://docs.sqlalchemy.org/en/13/core/pooling.html#dealing-with-disconnects
https://github.com/pallets/flask-sqlalchemy/issues/589
"""
def apply_pool_defaults(self, app, options):
super(SQLAlchemy, self).apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
# Creating and Initializing db object of SQLAlchemy class
db = SQLAlchemy(app)
db.init_app(app)
migrate = Migrate(app, db, render_as_batch=True)
with app.app_context():
if db.engine.url.drivername == 'sqlite':
migrate.init_app(app, db, render_as_batch=True)
else:
migrate.init_app(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# Creating serializer object of URLSafeSerializer class for serializing session_token
serializer = URLSafeSerializer(app.secret_key)
# Here we set session_token as our user_loader.
from bookstore.client.views import client
from bookstore.admin.views import admin
app.register_blueprint(client)
app.register_blueprint(admin)
| [
"flask_cors.CORS",
"flask.Flask",
"itsdangerous.URLSafeSerializer",
"flask_script.Manager",
"flask_migrate.Migrate"
]
| [((631, 646), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'from flask import Flask, Response\n'), ((781, 790), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (785, 790), False, 'from flask_cors import CORS, cross_origin\n'), ((1685, 1723), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {'render_as_batch': '(True)'}), '(app, db, render_as_batch=True)\n', (1692, 1723), False, 'from flask_migrate import Migrate, MigrateCommand\n'), ((1887, 1899), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (1894, 1899), False, 'from flask_script import Manager\n'), ((2046, 2079), 'itsdangerous.URLSafeSerializer', 'URLSafeSerializer', (['app.secret_key'], {}), '(app.secret_key)\n', (2063, 2079), False, 'from itsdangerous import URLSafeSerializer\n')] |
# Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Genie
from genie.libs.ops.dot1x.ios.dot1x import Dot1X
from genie.libs.ops.dot1x.ios.tests.dot1x_output import Dot1xOutput
# Parser
from genie.libs.parser.ios.show_dot1x import ShowDot1xAllDetail, \
ShowDot1xAllStatistics, \
ShowDot1xAllSummary, \
ShowDot1xAllCount
class test_dot1x(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'ios'
self.device.custom['abstraction'] = {'order':['os']}
self.device.mapping={}
self.device.mapping['cli']='cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': Dot1xOutput.ShowDot1xAllCount}
# Learn the feature
dot1x.learn()
# Verify Ops was created successfully
self.assertEqual(dot1x.info, Dot1xOutput.Dot1x_info)
# Check Selected Attributes
self.assertEqual(dot1x.info['version'], 3)
# info - mdot1x default
self.assertEqual(dot1x.info['interfaces']['GigabitEthernet1/0/9']\
['max_start'], 3)
def test_empty_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': {}}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Check no attribute not found
with self.assertRaises(AttributeError):
dot1x.info['version']
def test_incomplete_output(self):
self.maxDiff = None
dot1x = Dot1X(device=self.device)
# Get outputs
dot1x.maker.outputs[ShowDot1xAllDetail] = \
{'': Dot1xOutput.ShowDot1xAllDetail}
dot1x.maker.outputs[ShowDot1xAllStatistics] = \
{'': Dot1xOutput.ShowDot1xAllStatistics}
dot1x.maker.outputs[ShowDot1xAllSummary] = \
{'': Dot1xOutput.ShowDot1xAllSummary}
dot1x.maker.outputs[ShowDot1xAllCount] = \
{'': {}}
# Learn the feature
dot1x.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(Dot1xOutput.Dot1x_info)
del(expect_dict['sessions'])
# Verify Ops was created successfully
self.assertEqual(dot1x.info, expect_dict)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"genie.libs.ops.dot1x.ios.dot1x.Dot1X",
"pyats.topology.Device",
"copy.deepcopy"
]
| [((3479, 3494), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3492, 3494), False, 'import unittest\n'), ((611, 633), 'pyats.topology.Device', 'Device', ([], {'name': '"""aDevice"""'}), "(name='aDevice')\n", (617, 633), False, 'from pyats.topology import Device\n'), ((1067, 1092), 'genie.libs.ops.dot1x.ios.dot1x.Dot1X', 'Dot1X', ([], {'device': 'self.device'}), '(device=self.device)\n', (1072, 1092), False, 'from genie.libs.ops.dot1x.ios.dot1x import Dot1X\n'), ((2038, 2063), 'genie.libs.ops.dot1x.ios.dot1x.Dot1X', 'Dot1X', ([], {'device': 'self.device'}), '(device=self.device)\n', (2043, 2063), False, 'from genie.libs.ops.dot1x.ios.dot1x import Dot1X\n'), ((2666, 2691), 'genie.libs.ops.dot1x.ios.dot1x.Dot1X', 'Dot1X', ([], {'device': 'self.device'}), '(device=self.device)\n', (2671, 2691), False, 'from genie.libs.ops.dot1x.ios.dot1x import Dot1X\n'), ((3263, 3295), 'copy.deepcopy', 'deepcopy', (['Dot1xOutput.Dot1x_info'], {}), '(Dot1xOutput.Dot1x_info)\n', (3271, 3295), False, 'from copy import deepcopy\n')] |
# ======================================================================
# copyright 2020. Triad National Security, LLC. All rights
# reserved. This program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
# ======================================================================
# Authors: <NAME> (<EMAIL>)
# Purpose:
# Provides a check of whether a coordinate transformation of the metric
# from code coordinates to Kerr-Schild coordinates produces correct
# metric, consistent with the closed form (as in e.g. Eq.(3)
# McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512)
#
# Functions:
# - print_matrix
# - check_transformation_matrices
#
from math import *
import numpy as np
def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str:
"""Pretty-prints a matrix to a string (optinally, to stdout)
Parameters
----------
matrix : numpy.array([N,M])
matrix to print
fmt : str
C-style format of each element (default: "%19.11e")
tostdout : bool
output to stdout (default: true)
Returns
-------
str
formatted output string
"""
N = matrix.shape[0]
M = matrix.shape[1]
s = "["
for i in range(N):
s+= "["
for j in range(M):
s+= (fmt % matrix[i,j])
if j < M - 1: s += ", "
s+= "]"
if i < N - 1: s += ",\n "
s+="]"
if tostdout: print(s)
return s
def check_transformation_matrices(geom, a, ir, jth,
verbose=True, tol=1e-12) -> bool:
"""Transforms the metric to spherical KS and compares with analytic formula
Test 1: covariant metric, gcov, at A = {ir, jth}
1.1 sample gcov and Lambda_h2bl_cov at A
1.2 transform gcov to gks using transofmration matrices
1.3 compare to expected values at {r,th} at A
Parameters
----------
geom : dictionary
nubhlight geom object
a : Float
dimensionless Kerr spin parameter
ir : Integer
index of sample point in radial direction
jth : Integer
index of sample point in angular theta-direction
verbose : bool
output steps to stdout
tol : Float
tolerance to relative error (wrt det g)
Returns
-------
bool
True if all checks passed
Examples
--------
import hdf5_to_dict as io
hdr = io.load_hdr("dump_00000010.h5")
geom = io.load_geom(hdr,recalc=True)
check_transformation_matrices(geom, -1, 64)
"""
# sample gcov and h2bl at point A
gcov_A = geom['gcov'][ir,jth]
h2bl_A = geom['Lambda_h2bl_cov'][ir,jth]
# sample r and theta, compute BL metric-related quantities
r = geom['r'][ir,jth,0]; r2 = r*r
a2 = a*a
th= geom['th'][ir,jth,0]
sth2= sin(th)**2
Delta= r2 - 2*r + a2
Sigma= r2 + a2*cos(th)**2
A = (r2 + a2)**2 - a2*Delta*sin(th)**2
if verbose:
print ("r = %19.11e" % r)
print ("theta = %19.11e" % th)
print ("a = %19.11e" % a)
print ("Delta = %19.11e" % Delta)
print ("Sigma = %19.11e" % Sigma)
print ("A = %19.11e" % A)
# output metric
print ("gcov_A = ")
print_matrix (gcov_A)
print ("")
# output transformation matrix
print ("h2bl_A = ")
print_matrix (h2bl_A)
print ("")
# compute BL metric at A
gks_A = np.zeros([4,4])
for i in range(4):
for j in range(4):
for k in range(4):
for l in range(4):
gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l]
if verbose:
print ("gks_A = ")
print_matrix (gks_A)
print("")
# expected values at {r, th}
g_tt = -1. + 2.*r/Sigma
g_rr = 1. + 2.*r/Sigma
g_ff = sth2*(Sigma + a2*g_rr*sth2)
g_thth = Sigma
g_tr = 2*r/Sigma
g_tf = -2*a*r*sth2/Sigma
g_rf = -a*g_rr*sth2
det_g = -Sigma**2*sth2
if verbose:
print ("Expected:")
print (" g_tt = %19.11e" % g_tt )
print (" g_rr = %19.11e" % g_rr )
print (" g_thth = %19.11e" % g_thth)
print (" g_ff = %19.11e" % g_ff )
print (" g_tr = %19.11e" % g_tr )
print (" g_rf = %19.11e" % g_rf )
print (" g_tf = %19.11e" % g_tf )
print ("")
# check gks_A
gks_expected = np.array(
[[ g_tt, g_tr, 0.0, g_tf],
[ g_tr, g_rr, 0.0, g_rf],
[ 0.0, 0.0, g_thth, 0.0],
[ g_tf, g_rf, 0.0, g_ff]]
)
passed = True
for i in range(4):
for j in range(4):
if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol:
passed = False
if verbose:
print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:")
print (" -- expected: %19.11e" % gks_expected[i,j])
print (" -- actual: %19.11e" % gks_A[i,j])
return passed
| [
"numpy.array",
"numpy.zeros"
]
| [((3787, 3803), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (3795, 3803), True, 'import numpy as np\n'), ((4682, 4796), 'numpy.array', 'np.array', (['[[g_tt, g_tr, 0.0, g_tf], [g_tr, g_rr, 0.0, g_rf], [0.0, 0.0, g_thth, 0.0],\n [g_tf, g_rf, 0.0, g_ff]]'], {}), '([[g_tt, g_tr, 0.0, g_tf], [g_tr, g_rr, 0.0, g_rf], [0.0, 0.0,\n g_thth, 0.0], [g_tf, g_rf, 0.0, g_ff]])\n', (4690, 4796), True, 'import numpy as np\n')] |
import io
import os
import numpy as np
import pandas
import json
import logging #<== Optional. Log to console, file, kafka
from pipeline_monitor import prometheus_monitor as monitor #<== Optional. Monitor runtime metrics
from pipeline_logger import log
import tensorflow as tf
from tensorflow.contrib import predictor
from keras.models import Sequential, load_model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from collections import OrderedDict
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['invoke'] #<== Optional. Being a good Python citizen.
_labels = { #<== Optional. Used for metrics/labels
'name': 'injection',
'tag': 'v1',
'type': 'tensorflow',
'runtime': 'python',
'chip': 'cpu',
}
def _initialize_upon_import(): #<== Optional. Called once upon server startup
''' Initialize / Restore Model Object.
'''
model = load_model('securitai-lstm-model.h5')
model.load_weights('securitai-lstm-weights.h5')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
#@log(labels=_labels, logger=_logger) #<== Optional. Sample and compare predictions
def invoke(request): #<== Required. Called on every prediction
'''Where the magic happens...'''
with monitor(labels=_labels, name="transform_request"): #<== Optional. Expose fine-grained metrics
transformed_request = _transform_request(request) #<== Optional. Transform input (json) into TensorFlow (tensor)
with monitor(labels=_labels, name="invoke"): #<== Optional. Calls _model.predict()
response = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"): #<== Optional. Transform TensorFlow (tensor) into output (json)
transformed_response = _transform_response(response)
return transformed_response #<== Required. Returns the predicted value(s)
def _transform_request(request):
request_str = request.decode('utf-8')
# tokenize the csv request and create json
X = pandas.read_csv(io.StringIO(request_str), engine='python', quotechar='|', header=None).values[:,0]
for index, item in enumerate(X):
reqJson = json.loads(item, object_pairs_hook=OrderedDict)
del reqJson['http']['timestamp']
del reqJson['http']['headers']
del reqJson['http']['source']
del reqJson['http']['route']
del reqJson['http']['responsePayload']
X[index] = json.dumps(reqJson, separators=(',', ':'))
tokenizer = Tokenizer(filters='\t\n', char_level=True)
tokenizer.fit_on_texts(X)
# this used to be [log_entry]
seq = tokenizer.texts_to_sequences([request_str])
max_log_length = 1024
log_entry_processed = sequence.pad_sequences(seq, maxlen=max_log_length)
return log_entry_processed
def _transform_response(response):
return response[0]
if __name__ == '__main__':
with open('./pipeline_test_request.csv', 'rb') as fb:
request_bytes = fb.read()
response_bytes = invoke(request_bytes)
print(response_bytes)
| [
"logging.getLogger",
"json.loads",
"logging.StreamHandler",
"keras.preprocessing.text.Tokenizer",
"keras.models.load_model",
"json.dumps",
"io.StringIO",
"pipeline_monitor.prometheus_monitor",
"keras.preprocessing.sequence.pad_sequences"
]
| [((556, 592), 'logging.getLogger', 'logging.getLogger', (['"""pipeline-logger"""'], {}), "('pipeline-logger')\n", (573, 592), False, 'import logging\n'), ((649, 672), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (670, 672), False, 'import logging\n'), ((1310, 1347), 'keras.models.load_model', 'load_model', (['"""securitai-lstm-model.h5"""'], {}), "('securitai-lstm-model.h5')\n", (1320, 1347), False, 'from keras.models import Sequential, load_model\n'), ((3172, 3214), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': '"""\t\n"""', 'char_level': '(True)'}), "(filters='\\t\\n', char_level=True)\n", (3181, 3214), False, 'from keras.preprocessing.text import Tokenizer\n'), ((3385, 3435), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['seq'], {'maxlen': 'max_log_length'}), '(seq, maxlen=max_log_length)\n', (3407, 3435), False, 'from keras.preprocessing import sequence\n'), ((1872, 1921), 'pipeline_monitor.prometheus_monitor', 'monitor', ([], {'labels': '_labels', 'name': '"""transform_request"""'}), "(labels=_labels, name='transform_request')\n", (1879, 1921), True, 'from pipeline_monitor import prometheus_monitor as monitor\n'), ((2107, 2145), 'pipeline_monitor.prometheus_monitor', 'monitor', ([], {'labels': '_labels', 'name': '"""invoke"""'}), "(labels=_labels, name='invoke')\n", (2114, 2145), True, 'from pipeline_monitor import prometheus_monitor as monitor\n'), ((2265, 2315), 'pipeline_monitor.prometheus_monitor', 'monitor', ([], {'labels': '_labels', 'name': '"""transform_response"""'}), "(labels=_labels, name='transform_response')\n", (2272, 2315), True, 'from pipeline_monitor import prometheus_monitor as monitor\n'), ((2843, 2890), 'json.loads', 'json.loads', (['item'], {'object_pairs_hook': 'OrderedDict'}), '(item, object_pairs_hook=OrderedDict)\n', (2853, 2890), False, 'import json\n'), ((3112, 3154), 'json.dumps', 'json.dumps', (['reqJson'], {'separators': "(',', ':')"}), "(reqJson, separators=(',', ':'))\n", (3122, 3154), False, 'import json\n'), ((2705, 2729), 'io.StringIO', 'io.StringIO', (['request_str'], {}), '(request_str)\n', (2716, 2729), False, 'import io\n')] |
from datetime import timedelta
from typing import Union, List, Optional
import click
import pandas as pd
from flask import current_app as app
from flask.cli import with_appcontext
from flexmeasures import Sensor
from flexmeasures.data import db
from flexmeasures.data.schemas.generic_assets import GenericAssetIdField
from flexmeasures.data.schemas.sensors import SensorIdField
from flexmeasures.data.models.generic_assets import GenericAsset
from flexmeasures.data.models.time_series import TimedBelief
from flexmeasures.data.utils import save_to_db
@click.group("edit")
def fm_edit_data():
"""FlexMeasures: Edit data."""
@fm_edit_data.command("attribute")
@with_appcontext
@click.option(
"--asset-id",
"assets",
required=False,
multiple=True,
type=GenericAssetIdField(),
help="Add/edit attribute to this asset. Follow up with the asset's ID.",
)
@click.option(
"--sensor-id",
"sensors",
required=False,
multiple=True,
type=SensorIdField(),
help="Add/edit attribute to this sensor. Follow up with the sensor's ID.",
)
@click.option(
"--attribute",
"attribute_key",
required=True,
help="Add/edit this attribute. Follow up with the name of the attribute.",
)
@click.option(
"--float",
"attribute_float_value",
required=False,
type=float,
help="Set the attribute to this float value.",
)
@click.option(
"--bool",
"attribute_bool_value",
required=False,
type=bool,
help="Set the attribute to this bool value.",
)
@click.option(
"--str",
"attribute_str_value",
required=False,
type=str,
help="Set the attribute to this string value.",
)
@click.option(
"--int",
"attribute_int_value",
required=False,
type=int,
help="Set the attribute to this integer value.",
)
@click.option(
"--null",
"attribute_null_value",
required=False,
is_flag=True,
default=False,
help="Set the attribute to a null value.",
)
def edit_attribute(
attribute_key: str,
assets: List[GenericAsset],
sensors: List[Sensor],
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
):
"""Edit (or add) an asset attribute or sensor attribute."""
if not assets and not sensors:
raise ValueError("Missing flag: pass at least one --asset-id or --sensor-id.")
# Parse attribute value
attribute_value = parse_attribute_value(
attribute_float_value=attribute_float_value,
attribute_bool_value=attribute_bool_value,
attribute_str_value=attribute_str_value,
attribute_int_value=attribute_int_value,
attribute_null_value=attribute_null_value,
)
# Set attribute
for asset in assets:
asset.attributes[attribute_key] = attribute_value
db.session.add(asset)
for sensor in sensors:
sensor.attributes[attribute_key] = attribute_value
db.session.add(sensor)
db.session.commit()
print("Successfully edited/added attribute.")
@fm_edit_data.command("resample-data")
@with_appcontext
@click.option(
"--sensor-id",
"sensor_ids",
multiple=True,
required=True,
help="Resample data for this sensor. Follow up with the sensor's ID. This argument can be given multiple times.",
)
@click.option(
"--event-resolution",
"event_resolution_in_minutes",
type=int,
required=True,
help="New event resolution as an integer number of minutes.",
)
@click.option(
"--from",
"start_str",
required=False,
help="Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--until",
"end_str",
required=False,
help="Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format.",
)
@click.option(
"--skip-integrity-check",
is_flag=True,
help="Whether to skip checking the resampled time series data for each sensor."
" By default, an excerpt and the mean value of the original"
" and resampled data will be shown for manual approval.",
)
def resample_sensor_data(
sensor_ids: List[int],
event_resolution_in_minutes: int,
start_str: Optional[str] = None,
end_str: Optional[str] = None,
skip_integrity_check: bool = False,
):
"""Assign a new event resolution to an existing sensor and resample its data accordingly."""
event_resolution = timedelta(minutes=event_resolution_in_minutes)
event_starts_after = pd.Timestamp(start_str) # note that "" or None becomes NaT
event_ends_before = pd.Timestamp(end_str)
for sensor_id in sensor_ids:
sensor = Sensor.query.get(sensor_id)
if sensor.event_resolution == event_resolution:
print(f"{sensor} already has the desired event resolution.")
continue
df_original = sensor.search_beliefs(
most_recent_beliefs_only=False,
event_starts_after=event_starts_after,
event_ends_before=event_ends_before,
).sort_values("event_start")
df_resampled = df_original.resample_events(event_resolution).sort_values(
"event_start"
)
if not skip_integrity_check:
message = ""
if sensor.event_resolution < event_resolution:
message += f"Downsampling {sensor} to {event_resolution} will result in a loss of data. "
click.confirm(
message
+ f"Data before:\n{df_original}\nData after:\n{df_resampled}\nMean before: {df_original['event_value'].mean()}\nMean after: {df_resampled['event_value'].mean()}\nContinue?",
abort=True,
)
# Update sensor
sensor.event_resolution = event_resolution
db.session.add(sensor)
# Update sensor data
query = TimedBelief.query.filter(TimedBelief.sensor == sensor)
if not pd.isnull(event_starts_after):
query = query.filter(TimedBelief.event_start >= event_starts_after)
if not pd.isnull(event_ends_before):
query = query.filter(
TimedBelief.event_start + sensor.event_resolution <= event_ends_before
)
query.delete()
save_to_db(df_resampled, bulk_save_objects=True)
db.session.commit()
print("Successfully resampled sensor data.")
app.cli.add_command(fm_edit_data)
def parse_attribute_value(
attribute_null_value: bool,
attribute_float_value: Optional[float] = None,
attribute_bool_value: Optional[bool] = None,
attribute_str_value: Optional[str] = None,
attribute_int_value: Optional[int] = None,
) -> Union[float, int, bool, str, None]:
"""Parse attribute value."""
if not single_true(
[attribute_null_value]
+ [
v is not None
for v in [
attribute_float_value,
attribute_bool_value,
attribute_str_value,
attribute_int_value,
]
]
):
raise ValueError("Cannot set multiple values simultaneously.")
if attribute_null_value:
return None
elif attribute_float_value is not None:
return float(attribute_float_value)
elif attribute_bool_value is not None:
return bool(attribute_bool_value)
elif attribute_int_value is not None:
return int(attribute_int_value)
return attribute_str_value
def single_true(iterable) -> bool:
i = iter(iterable)
return any(i) and not any(i)
| [
"pandas.isnull",
"click.group",
"click.option",
"pandas.Timestamp",
"flexmeasures.data.schemas.generic_assets.GenericAssetIdField",
"flexmeasures.Sensor.query.get",
"flexmeasures.data.schemas.sensors.SensorIdField",
"flexmeasures.data.models.time_series.TimedBelief.query.filter",
"flexmeasures.data.db.session.add",
"flask.current_app.cli.add_command",
"flexmeasures.data.utils.save_to_db",
"datetime.timedelta",
"flexmeasures.data.db.session.commit"
]
| [((556, 575), 'click.group', 'click.group', (['"""edit"""'], {}), "('edit')\n", (567, 575), False, 'import click\n'), ((1078, 1217), 'click.option', 'click.option', (['"""--attribute"""', '"""attribute_key"""'], {'required': '(True)', 'help': '"""Add/edit this attribute. Follow up with the name of the attribute."""'}), "('--attribute', 'attribute_key', required=True, help=\n 'Add/edit this attribute. Follow up with the name of the attribute.')\n", (1090, 1217), False, 'import click\n'), ((1233, 1360), 'click.option', 'click.option', (['"""--float"""', '"""attribute_float_value"""'], {'required': '(False)', 'type': 'float', 'help': '"""Set the attribute to this float value."""'}), "('--float', 'attribute_float_value', required=False, type=float,\n help='Set the attribute to this float value.')\n", (1245, 1360), False, 'import click\n'), ((1381, 1504), 'click.option', 'click.option', (['"""--bool"""', '"""attribute_bool_value"""'], {'required': '(False)', 'type': 'bool', 'help': '"""Set the attribute to this bool value."""'}), "('--bool', 'attribute_bool_value', required=False, type=bool,\n help='Set the attribute to this bool value.')\n", (1393, 1504), False, 'import click\n'), ((1525, 1648), 'click.option', 'click.option', (['"""--str"""', '"""attribute_str_value"""'], {'required': '(False)', 'type': 'str', 'help': '"""Set the attribute to this string value."""'}), "('--str', 'attribute_str_value', required=False, type=str, help\n ='Set the attribute to this string value.')\n", (1537, 1648), False, 'import click\n'), ((1668, 1792), 'click.option', 'click.option', (['"""--int"""', '"""attribute_int_value"""'], {'required': '(False)', 'type': 'int', 'help': '"""Set the attribute to this integer value."""'}), "('--int', 'attribute_int_value', required=False, type=int, help\n ='Set the attribute to this integer value.')\n", (1680, 1792), False, 'import click\n'), ((1812, 1950), 'click.option', 'click.option', (['"""--null"""', '"""attribute_null_value"""'], {'required': '(False)', 'is_flag': '(True)', 'default': '(False)', 'help': '"""Set the attribute to a null value."""'}), "('--null', 'attribute_null_value', required=False, is_flag=True,\n default=False, help='Set the attribute to a null value.')\n", (1824, 1950), False, 'import click\n'), ((3210, 3409), 'click.option', 'click.option', (['"""--sensor-id"""', '"""sensor_ids"""'], {'multiple': '(True)', 'required': '(True)', 'help': '"""Resample data for this sensor. Follow up with the sensor\'s ID. This argument can be given multiple times."""'}), '(\'--sensor-id\', \'sensor_ids\', multiple=True, required=True,\n help=\n "Resample data for this sensor. Follow up with the sensor\'s ID. This argument can be given multiple times."\n )\n', (3222, 3409), False, 'import click\n'), ((3420, 3581), 'click.option', 'click.option', (['"""--event-resolution"""', '"""event_resolution_in_minutes"""'], {'type': 'int', 'required': '(True)', 'help': '"""New event resolution as an integer number of minutes."""'}), "('--event-resolution', 'event_resolution_in_minutes', type=int,\n required=True, help='New event resolution as an integer number of minutes.'\n )\n", (3432, 3581), False, 'import click\n'), ((3597, 3774), 'click.option', 'click.option', (['"""--from"""', '"""start_str"""'], {'required': '(False)', 'help': '"""Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format."""'}), "('--from', 'start_str', required=False, help=\n 'Resample only data from this datetime onwards. Follow up with a timezone-aware datetime in ISO 6801 format.'\n )\n", (3609, 3774), False, 'import click\n'), ((3785, 3954), 'click.option', 'click.option', (['"""--until"""', '"""end_str"""'], {'required': '(False)', 'help': '"""Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format."""'}), "('--until', 'end_str', required=False, help=\n 'Resample only data until this datetime. Follow up with a timezone-aware datetime in ISO 6801 format.'\n )\n", (3797, 3954), False, 'import click\n'), ((3965, 4220), 'click.option', 'click.option', (['"""--skip-integrity-check"""'], {'is_flag': '(True)', 'help': '"""Whether to skip checking the resampled time series data for each sensor. By default, an excerpt and the mean value of the original and resampled data will be shown for manual approval."""'}), "('--skip-integrity-check', is_flag=True, help=\n 'Whether to skip checking the resampled time series data for each sensor. By default, an excerpt and the mean value of the original and resampled data will be shown for manual approval.'\n )\n", (3977, 4220), False, 'import click\n'), ((6495, 6528), 'flask.current_app.cli.add_command', 'app.cli.add_command', (['fm_edit_data'], {}), '(fm_edit_data)\n', (6514, 6528), True, 'from flask import current_app as app\n'), ((3081, 3100), 'flexmeasures.data.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3098, 3100), False, 'from flexmeasures.data import db\n'), ((4566, 4612), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'event_resolution_in_minutes'}), '(minutes=event_resolution_in_minutes)\n', (4575, 4612), False, 'from datetime import timedelta\n'), ((4638, 4661), 'pandas.Timestamp', 'pd.Timestamp', (['start_str'], {}), '(start_str)\n', (4650, 4661), True, 'import pandas as pd\n'), ((4722, 4743), 'pandas.Timestamp', 'pd.Timestamp', (['end_str'], {}), '(end_str)\n', (4734, 4743), True, 'import pandas as pd\n'), ((6424, 6443), 'flexmeasures.data.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6441, 6443), False, 'from flexmeasures.data import db\n'), ((2938, 2959), 'flexmeasures.data.db.session.add', 'db.session.add', (['asset'], {}), '(asset)\n', (2952, 2959), False, 'from flexmeasures.data import db\n'), ((3054, 3076), 'flexmeasures.data.db.session.add', 'db.session.add', (['sensor'], {}), '(sensor)\n', (3068, 3076), False, 'from flexmeasures.data import db\n'), ((780, 801), 'flexmeasures.data.schemas.generic_assets.GenericAssetIdField', 'GenericAssetIdField', ([], {}), '()\n', (799, 801), False, 'from flexmeasures.data.schemas.generic_assets import GenericAssetIdField\n'), ((979, 994), 'flexmeasures.data.schemas.sensors.SensorIdField', 'SensorIdField', ([], {}), '()\n', (992, 994), False, 'from flexmeasures.data.schemas.sensors import SensorIdField\n'), ((4794, 4821), 'flexmeasures.Sensor.query.get', 'Sensor.query.get', (['sensor_id'], {}), '(sensor_id)\n', (4810, 4821), False, 'from flexmeasures import Sensor\n'), ((5910, 5932), 'flexmeasures.data.db.session.add', 'db.session.add', (['sensor'], {}), '(sensor)\n', (5924, 5932), False, 'from flexmeasures.data import db\n'), ((5979, 6033), 'flexmeasures.data.models.time_series.TimedBelief.query.filter', 'TimedBelief.query.filter', (['(TimedBelief.sensor == sensor)'], {}), '(TimedBelief.sensor == sensor)\n', (6003, 6033), False, 'from flexmeasures.data.models.time_series import TimedBelief\n'), ((6371, 6419), 'flexmeasures.data.utils.save_to_db', 'save_to_db', (['df_resampled'], {'bulk_save_objects': '(True)'}), '(df_resampled, bulk_save_objects=True)\n', (6381, 6419), False, 'from flexmeasures.data.utils import save_to_db\n'), ((6049, 6078), 'pandas.isnull', 'pd.isnull', (['event_starts_after'], {}), '(event_starts_after)\n', (6058, 6078), True, 'import pandas as pd\n'), ((6175, 6203), 'pandas.isnull', 'pd.isnull', (['event_ends_before'], {}), '(event_ends_before)\n', (6184, 6203), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3.6
# -*- encoding=utf8 -*-
import pyquery
"""
需求字段:
標題、發表日期、分類、標籤、內容、圖片
需要的字段信息
1. 网站根URL
2. 解析器名字
3. 解析器类型
1. PARSER_PASSAGE_URL 文章URL
2. PARSER_PASSAGE_TITLE 文章标题
3. PARSER_PASSAGE_DATE 发表日期
4. PARSER_PASSAGE_CATEGORY 文章分類
5. PARSER_PASSAGE_TAG 文章標籤
6. PARSER_PASSAGE_CONTENT 文章内容
7. PARSER_PASSAGE_IMGURL 文章中的图片 URL
"""
class Parser(object):
def __init__ (self):
self._webURL = ''
self._parserName = 'base_parser'
def _parser_passage_url (self, doc: str) -> (bool, str):
return
def _parser_passage_title (self, doc: str) -> (bool, str):
return
def _parser_passage_date (self, doc: str) -> (bool, str):
return
def _parser_passage_category (self, doc: str) -> (bool, str):
return
def _parser_passage_tag (self, doc: str) -> (bool, str):
return
def _parser_passage_content (self, doc: str) -> (bool, str):
return
def _parser_passage_img_url (self, doc: str) -> (bool, str, bytes):
return
def get_parser_name (self):
return self._parserName
@staticmethod
def _parser (doc: str, rule: str):
return pyquery.PyQuery(doc).find(rule)
def parse (self, doc: str, rule='', parse_type=-1):
if self.PARSER_PASSAGE_URL == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_url(doc)
elif self.PARSER_PASSAGE_TITLE == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_title(doc)
elif self.PARSER_PASSAGE_DATE == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_date(doc)
elif self.PARSER_PASSAGE_CATEGORY == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_category(doc)
elif self.PARSER_PASSAGE_TAG == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_tag(doc)
elif self.PARSER_PASSAGE_CONTENT == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_content(doc)
elif self.PARSER_PASSAGE_IMGURL == parse_type:
if doc == '' or doc == None:
return (False, '')
return self._parser_passage_img_url(doc)
else:
if doc == '' or doc == None:
return (False, '')
return Parser._parser(doc, rule)
PARSER_PASSAGE_URL = 1
PARSER_PASSAGE_TITLE = 2
PARSER_PASSAGE_DATE = 3
PARSER_PASSAGE_CATEGORY = 4
PARSER_PASSAGE_TAG = 5
PARSER_PASSAGE_CONTENT = 6
PARSER_PASSAGE_IMGURL = 7
| [
"pyquery.PyQuery"
]
| [((1374, 1394), 'pyquery.PyQuery', 'pyquery.PyQuery', (['doc'], {}), '(doc)\n', (1389, 1394), False, 'import pyquery\n')] |
import os
import json
import gzip
from copy import deepcopy, copy
import numpy as np
import csv
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler
from transformers.tokenization_utils import trim_batch
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smooth, tgt_vocab_size, ignore_index=-100):
assert 0. < label_smooth <= 1.
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smooth / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0).unsqueeze(0))
self.confidence = 1.0 - label_smooth
self.lossfct = torch.nn.KLDivLoss(reduction='none')
def forward(self, pred, target):
"""
Args:
pred: [bsz, seq_len, vocab_size]
target: [bsz, seq_len]
Returns:
"""
model_prob = self.one_hot.repeat(target.size(0), target.size(1), 1) # [bsz, seq_len, vocab_size]
model_prob.scatter_(2, target.unsqueeze(2), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(2), 0)
pred_prob = F.log_softmax(pred, dim=2)
#return F.kl_div(pred_prob, model_prob, reduction='mean')
loss = self.lossfct(pred_prob, model_prob)
loss = torch.sum(loss, dim=2).masked_fill_((target == self.ignore_index), 0)
avg_loss = torch.sum(loss) / torch.sum((target != self.ignore_index).to(torch.float))
return avg_loss
# Special symbols
SOS_token = "<SOS>" # start of sentence
EOS_token = "<EOS>" # end of sentence
PAD_token = SOS_token # padding symbol
INPUT_TOKENS_SCAN = ['jump', 'opposite', 'right', 'twice', 'and', 'turn', 'thrice', 'run', 'after', 'around', 'left', 'walk', 'look']
OUTPUT_TOKENS_SCAN = ['I_TURN_RIGHT', 'I_JUMP', 'I_TURN_LEFT', 'I_RUN', 'I_WALK', 'I_LOOK']
# ACTION_TO_TEXT = {'I_TURN_RIGHT': 'right', 'I_JUMP': 'jump', 'I_TURN_LEFT': 'left', 'I_RUN': 'run', 'I_WALK': 'walk', 'I_LOOK': 'look'}
class Lang:
# Class for converting strings/words to numerical indices, and vice versa.
# Should use separate class for input language (English) and output language (actions)
#
def __init__(self, symbols, io_type):
# symbols : list of all possible symbols
n = len(symbols)
self.symbols = [_s.strip('\n') for _s in symbols]
self.io_type = io_type
if SOS_token not in self.symbols:
assert EOS_token not in self.symbols
self.index2symbol = {n: SOS_token, n+1: EOS_token}
self.symbol2index = {SOS_token: n, EOS_token: n + 1}
self.sos_id, self.eos_id = n, n + 1
else:
self.index2symbol = {}
self.symbol2index = {}
self.sos_id, self.eos_id = 0, 1
self.pad_token_id = self.sos_id
for idx,s in enumerate(self.symbols):
self.index2symbol[idx] = s
self.symbol2index[s] = idx
self.n_symbols = len(self.index2symbol)
def variableFromSymbols(self, mylist, add_eos=True):
# Convert a list of symbols to a tensor of indices (adding a EOS token at end)
#
# Input
# mylist : list of m symbols
# add_eos : true/false, if true add the EOS symbol at end
#
# Output
# output : [m or m+1 LongTensor] indices of each symbol (plus EOS if appropriate)
mylist = copy(mylist)
if add_eos:
mylist.append(EOS_token)
indices = [self.symbol2index[s] for s in mylist]
output = torch.LongTensor(indices)
#if USE_CUDA:
output = output.cuda()
return output
def symbolsFromVector(self, v):
# Convert indices to symbols, breaking where we get a EOS token
#
# Input
# v : list of m indices
#
# Output
# mylist : list of m or m-1 symbols (excluding EOS)
mylist = []
for x in v:
s = self.index2symbol[x]
if s == EOS_token:
break
mylist.append(s)
return mylist
def encode_scan_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp[0], dp[1]
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def encode_scan_file_2_seg(self, data, max_length, cutoffs):
encoded_data_1, encoded_data_2 = [], []
for _id, dp in enumerate(data):
input, output, cutoff = dp[0], dp[1], cutoffs[_id]
assert self.io_type == 'output'
raw = output
encoded_1 = self.variableFromSymbols(raw.split(' ')[:cutoff])
encoded_2 = self.variableFromSymbols(raw.split(' ')[cutoff:])
encoded_data_1.append(encoded_1)
encoded_data_2.append(encoded_2)
return encoded_data_1, encoded_data_2
def encode_cfq_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['query_ids'], dp['sparql_ids']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output + [self.eos_id]
encoded = torch.LongTensor(raw).cuda()
encoded_data.append(encoded)
return encoded_data
def encode_cogs_file(self, data, max_length):
encoded_data = []
for dp in data:
input, output = dp['src'], dp['trg']
if self.io_type == 'input':
raw = input
else:
assert self.io_type == 'output'
raw = output
encoded = self.variableFromSymbols(raw.split(' '))
encoded_data.append(encoded)
return encoded_data
def decode(self, ids):
out = self.symbolsFromVector(ids.cpu().numpy())
if out == []:
return out
if out[0] in ['<SOS>', '<SOS_2>']:
out = out[1:]
return out
def calculate_accuracy(preds, gts):
assert len(preds) == len(gts)
match = 0
for pred, gt in zip(preds, gts):
if pred == gt:
match += 1
return match / len(preds)
def encode_file(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
examples = []
if data_path[-3:] == '.gz':
print('Data file is gzipped')
f = gzip.open(data_path, "rt")
else:
print('Data file is plain text')
print(data_path)
f = open(data_path, "r", encoding='utf-8')
for i, text in enumerate(f.readlines()):
tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
if max_examples and i >= max_examples:
break
examples.append(tokenized)
f.close()
return examples
# def encode_file_iterator(tokenizer, data_path, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# '''
# This provides a low-memory usage way of iterating thru all of the source/target lines for processing by JIT loader.
# '''
# if data_path[-3:] == '.gz':
# print('Data file is gzipped')
# f = gzip.open(data_path, "rt")
# else:
# print('Data file is plain text')
# f = open(data_path, "r", encoding='utf-8')
#
# for i, text in enumerate(f):
#
# tokenized = tokenizer.batch_encode_plus( [text + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# yield tokenized
#
# if max_examples and i >= max_examples:
# break
#
# f.close()
# def convert_scan_actions_to_text(actions):
# return ' '.join([ACTION_TO_TEXT[_action] for _action in actions.split(' ')])
# def encode_scan_file(tokenizer, data, io_type, max_length, pad_to_max_length=True, return_tensors="pt", max_examples=None):
# examples = []
# # a = tokenizer.batch_encode_plus( ['right jump left run walk look' + ' <s> </s>'], max_length=max_length,
# # pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
# # print(a)
# # exit()
# for dp in data:
# input, output = dp[0], dp[1]
# if io_type == 'input':
# raw = input
# else:
# assert io_type == 'output'
# raw = convert_scan_actions_to_text(output)
#
# tokenized = tokenizer.batch_encode_plus( [raw + ' </s>'], max_length=max_length,
# pad_to_max_length=pad_to_max_length, return_tensors=return_tensors )
#
# if max_examples and i >= max_examples:
# break
# examples.append(tokenized)
#
# return examples
def load_scan_file(mytype, split):
# Load SCAN dataset from file
#
# Input
# mytype : type of SCAN experiment
# split : 'train' or 'test'
#
# Output
# commands : list of input/output strings (as tuples)
assert mytype in ['simple', 'addprim_jump', 'length', 'addprim_turn_left', 'all', 'template_around_right', 'viz',
'examine', 'template_jump_around_right', 'template_right', 'template_around_right',
'mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2', 'debug', 'attn_vis']
assert split in ['train', 'test', 'val']
if split == 'val' and mytype not in ['mcd1', 'mcd2', 'mcd3', 'mcd1.1', 'mcd1.2']:
split = 'test'
fn = 'data/scan/tasks_' + split + '_' + mytype + '.txt'
fid = open(fn, 'r')
lines = fid.readlines()
fid.close()
lines = [l.strip() for l in lines]
lines = [l.lstrip('IN: ') for l in lines]
commands = [l.split(' OUT: ') for l in lines]
return commands
class CompositionDataset(Dataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir,
type_path,
sub_task,
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__()
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.tokenized = tokenized
self.src_lang = src_lang
self.trg_lang = trg_lang
def __len__(self):
if self.tokenized:
return len(self.dataset)
else:
return len(self.source)
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
return {"source_ids": source_ids, "target_ids": target_ids}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def pad_to_max_len(self, ids, max_len, pad_token_id):
ids_length = ids.size(0)
if ids_length == max_len:
return ids
pad_tokens = torch.tensor([pad_token_id] * (max_len - ids_length))
# if ids.type() == 'torch.cuda.FloatTensor':
# print(ids)
# exit()
padded_ids = torch.cat([ids, pad_tokens.cuda()])
return padded_ids
def create_mask(self, ids, max_len):
ids_length = ids.size(0)
mask = torch.tensor([1] * ids_length + [0] * (max_len - ids_length)).cuda()
return mask
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y}
class ScanDataset(CompositionDataset):
def __init__(
self,
src_lang,
trg_lang,
data_dir="./data/scan/",
type_path="train",
sub_task="addprim_jump",
max_source_length=20,
max_target_length=20,
tokenized=False,
):
super().__init__(src_lang, trg_lang, data_dir, type_path, sub_task, max_source_length,
max_target_length, tokenized)
scan_data = load_scan_file(sub_task, type_path)
print(len(scan_data))
all_scan_dict = self.convert_to_dict(load_scan_file('all', 'train'))
self.action_count_labels, self.action_group_labels, self.action_type_labels = self.construct_count_label(scan_data, all_scan_dict)
if not tokenized:
self.source = self.src_lang.encode_scan_file(scan_data, max_source_length)
self.target = self.trg_lang.encode_scan_file(scan_data, max_target_length)
else:
self.dataset = torch.load(os.path.join(data_dir, type_path))
def construct_count_label(self, raw_data, all_data_dict):
all_count_labels = []
count_label_scheme = "v1"
group_label_scheme = "v2"
type_label_scheme = "v2"
all_action_group_labels, all_action_type_labels = [], []
# Group 1: single prim (jump), Group 2: prim + direction (jump left), Group 3: prim opposite, Group 4: prim around
#no_skip_id = np.random.randint(0, len(raw_data), int(len(raw_data)*0.05))
#no_skip_id = np.random.choice(range(len(raw_data)), int(len(raw_data)*0.07), replace=False)
# no_skip_id = np.random.choice(range(len(raw_data)), 10, replace=False)
skip_cnt, sup_cnt = 0, 0
for _id, dp in enumerate(raw_data):
input_text, output_text = dp[0], dp[1]
input_tok, output_tok = input_text.split(' '), output_text.split(' ')
count_labels, group_labels, type_labels = [], [], []
first_part_output_text, second_part_output_text = '', ''
if 'and' in input_tok:
first_part_input_tok = input_tok[:input_tok.index('and')]
second_part_input_tok = input_tok[input_tok.index('and')+1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
elif 'after' in input_tok:
second_part_input_tok = input_tok[:input_tok.index('after')]
first_part_input_tok = input_tok[input_tok.index('after') + 1:]
first_part_output_text = all_data_dict[' '.join(first_part_input_tok)]
second_part_output_text = all_data_dict[' '.join(second_part_input_tok)]
else:
first_part_input_tok, second_part_input_tok = input_tok, []
first_part_output_text = output_text
first_part_output_tok, second_part_output_tok = first_part_output_text.split(' '), second_part_output_text.split(' ')
if second_part_output_text == '':
second_part_output_tok = []
assert len(first_part_output_tok) + len(second_part_output_tok) == len(output_tok), \
(len(first_part_output_tok), len(second_part_output_tok), len(output_tok), first_part_output_text, second_part_output_text, output_text)
### 1. Build the action count labels ###
if count_label_scheme == 'v1':
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([4] * int(len(first_part_output_tok) / 2) + [3] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([5] * int(len(first_part_output_tok) / 3) + [4] * int(len(first_part_output_tok) / 3) + \
[3] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([3] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([4] * int(len(second_part_output_tok) / 2) + [3] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([5] * int(len(second_part_output_tok) / 3) + [4] * int(len(second_part_output_tok) / 3) + \
[3] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([3] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v2':
### For the first part output
if 'twice' in first_part_input_tok:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
elif count_label_scheme == 'v3':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count-1] * int(len(first_part_output_tok) / 2))
else:
count_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count-1] * int(len(first_part_output_tok) / 3) + \
[start_count-2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([start_count] * len(first_part_output_tok))
else:
count_labels += ([0] * len(first_part_output_tok))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok)) - 1))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(len(second_part_output_tok) / 2))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 2) + [start_count-1] * int(len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([start_count] * int(len(second_part_output_tok) / 3) + [start_count-1] * int(len(second_part_output_tok) / 3) + \
[start_count-2] * int(len(second_part_output_tok) / 3))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 3) - 1)) * 3
else:
if 'after' in input_tok:
count_labels += ([0] * len(second_part_output_tok))
else:
count_labels += ([start_count] * len(second_part_output_tok))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok)) - 1))
elif count_label_scheme == 'v3.1':
### For the first part output
if 'thrice' in first_part_input_tok and 'thrice' in second_part_input_tok:
start_count = 5
elif ('thrice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('twice' in first_part_input_tok and 'thrice' in second_part_input_tok):
start_count = 4
elif ('twice' in first_part_input_tok and 'twice' in second_part_input_tok) or \
('thrice' in first_part_input_tok) or ('thrice' in second_part_input_tok):
start_count = 3
elif 'twice' in first_part_input_tok or 'twice' in second_part_input_tok:
start_count = 2
else:
start_count = 1
if 'twice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 2) + [start_count - 1] * int(
len(first_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(first_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in first_part_input_tok:
count_labels += ([start_count] * int(len(first_part_output_tok) / 3) + [start_count - 1] * int(
len(first_part_output_tok) / 3) + \
[start_count - 2] * int(len(first_part_output_tok) / 3))
else:
count_labels += ([start_count] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
count_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
# count_labels += ([1] + [0] * (int(len(second_part_output_tok) / 2) - 1)) * 2
elif 'thrice' in second_part_input_tok:
count_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + \
[0] * int(len(second_part_output_tok) / 3))
else:
count_labels += ([0] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
new_count_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
else:
new_count_labels = list(range(len(first_part_output_tok)))[::-1]
count_labels += new_count_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_count_labels = [_c + 8 for _c in new_count_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
if 'after' in input_tok:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
else:
new_count_labels = list(range(len(second_part_output_tok)))[::-1]
new_count_labels = [_c + 8 for _c in new_count_labels]
count_labels += new_count_labels
# count_labels = []
# count_labels += list(range(len(first_part_output_tok)))[::-1]
# count_labels += list(range(len(second_part_output_tok)))[::-1]
assert len(count_labels) == len(output_tok), (len(count_labels), len(output_tok), input_text, first_part_input_tok, count_labels, output_tok,
first_part_output_text, first_part_output_tok, second_part_output_text, second_part_output_tok)
count_labels.append(-1) # For the EOS token
# count_labels.append(7) # For the EOS token
### 2. Build the action group labels ###
if group_label_scheme == 'v1': ## As used in exp 9.0-9.4
if 'around' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([4] * len(first_part_output_tok))
else:
group_labels += ([0] * len(first_part_output_tok))
elif 'opposite' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([5] * len(first_part_output_tok))
else:
group_labels += ([1] * len(first_part_output_tok))
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
if 'after' in input_tok:
group_labels += ([6] * len(first_part_output_tok))
else:
group_labels += ([2] * len(first_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([7] * len(first_part_output_tok))
else:
group_labels += ([3] * len(first_part_output_tok))
if 'around' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([0] * len(second_part_output_tok))
else:
group_labels += ([4] * len(second_part_output_tok))
elif 'opposite' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([1] * len(second_part_output_tok))
else:
group_labels += ([5] * len(second_part_output_tok))
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
if 'after' in input_tok:
group_labels += ([2] * len(second_part_output_tok))
else:
group_labels += ([6] * len(second_part_output_tok))
else:
if 'after' in input_tok:
group_labels += ([3] * len(second_part_output_tok))
else:
group_labels += ([7] * len(second_part_output_tok))
else:
### For the first part output
if 'twice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 2)))[::-1] * 2
elif 'thrice' in first_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(int(len(first_part_output_tok) / 3)))[::-1] * 3
else:
if 'after' in input_tok:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
new_group_labels = list(range(len(first_part_output_tok)))[::-1]
group_labels += new_group_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 2)))[::-1] * 2
new_group_labels = [_c + 8 for _c in new_group_labels]
elif 'thrice' in second_part_input_tok:
if 'after' in input_tok:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
else:
new_group_labels = list(range(int(len(second_part_output_tok) / 3)))[::-1] * 3
new_group_labels = [_c + 8 for _c in new_group_labels]
else:
if 'after' in input_tok:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
else:
new_group_labels = list(range(len(second_part_output_tok)))[::-1]
new_group_labels = [_c + 8 for _c in new_group_labels]
group_labels += new_group_labels
assert len(group_labels) == len(output_tok)
group_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
### 3. Build the action type labels ###
### For the first part output
if type_label_scheme == 'v1':
if 'around' in first_part_input_tok:
new_type_labels = [3] * len(first_part_output_tok)
elif 'opposite' in first_part_input_tok:
new_type_labels = [2] * len(first_part_output_tok)
elif 'left' in first_part_input_tok or 'right' in first_part_input_tok:
new_type_labels = [1] * len(first_part_output_tok)
else:
new_type_labels = [0] * len(first_part_output_tok)
# if 'after' in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
### For the second part output
if len(second_part_output_tok) > 0:
if 'around' in second_part_input_tok:
new_type_labels = [3] * len(second_part_output_tok)
elif 'opposite' in second_part_input_tok:
new_type_labels = [2] * len(second_part_output_tok)
elif 'left' in second_part_input_tok or 'right' in second_part_input_tok:
new_type_labels = [1] * len(second_part_output_tok)
else:
new_type_labels = [0] * len(second_part_output_tok)
# if 'after' not in input_tok:
# new_type_labels = [_c + 4 for _c in new_type_labels]
type_labels += new_type_labels
elif type_label_scheme == 'v2':
if 'twice' in first_part_input_tok:
type_labels += ([1] * int(len(first_part_output_tok) / 2) + [0] * int(
len(first_part_output_tok) / 2))
elif 'thrice' in first_part_input_tok:
type_labels += ([2] * int(len(first_part_output_tok) / 3) + [1] * int(
len(first_part_output_tok) / 3) + \
[0] * int(len(first_part_output_tok) / 3))
else:
type_labels += ([0] * len(first_part_output_tok))
### For the second part output
if len(second_part_output_tok) > 0:
if 'twice' in second_part_input_tok:
type_labels += ([1] * int(len(second_part_output_tok) / 2) + [0] * int(
len(second_part_output_tok) / 2))
elif 'thrice' in second_part_input_tok:
type_labels += ([2] * int(len(second_part_output_tok) / 3) + [1] * int(
len(second_part_output_tok) / 3) + [0] * int(len(second_part_output_tok) / 3))
else:
type_labels += ([0] * len(second_part_output_tok))
assert len(type_labels) == len(output_tok)
type_labels.append(-1) # For the EOS token
# group_labels.append(17) # For the EOS token
# if _id not in no_skip_id:
# count_labels = [-1] * len(count_labels)
# group_labels = [-1] * len(group_labels)
# skip_cnt += 1
# else:
# sup_cnt += 1
all_action_type_labels.append(torch.tensor(type_labels).cuda())
all_count_labels.append(torch.tensor(count_labels).cuda())
all_action_group_labels.append(torch.tensor(group_labels).cuda())
print(skip_cnt, sup_cnt)
return all_count_labels, all_action_group_labels, all_action_type_labels
def convert_to_dict(self, raw_data):
dict_data = {}
for dp in raw_data:
input, output = dp[0], dp[1]
assert input not in dict_data
dict_data[input] = output
return dict_data
def __getitem__(self, index):
if self.tokenized:
dp = self.dataset[index]
source_ids, src_mask, target_ids = dp[0], dp[1], dp[2]
source_ids = source_ids[:self.max_source_length]
#src_mask = src_mask[:self.max_source_length]
target_ids = target_ids[:self.max_target_length]
else:
source_ids = self.source[index]
target_ids = self.target[index]
count_labels = self.action_count_labels[index]
group_labels = self.action_group_labels[index]
type_labels = self.action_type_labels[index]
return {"source_ids": source_ids, "target_ids": target_ids, "action_count_labels": count_labels,
"action_group_labels": group_labels, "action_type_labels": type_labels}
@staticmethod
def trim_seq2seq_batch(batch, src_pad_token_id, trg_pad_token_id, trim_y=True):
if trim_y:
y = trim_batch(batch["target_ids"], trg_pad_token_id)
else:
y = batch["target_ids"]
source_ids, source_mask = trim_batch(batch["source_ids"], src_pad_token_id, attention_mask=batch["source_mask"])
return source_ids, source_mask, y
def collate_fn(self, batch):
max_src_len = max(map(len, [x["source_ids"] for x in batch]))
max_trg_len = max(map(len, [x["target_ids"] for x in batch]))
src_mask = torch.stack([self.create_mask(x["source_ids"], max_src_len) for x in batch])
trg_mask = torch.stack([self.create_mask(x["target_ids"], max_trg_len) for x in batch])
src_ids = torch.stack([self.pad_to_max_len(x["source_ids"], max_src_len, self.src_lang.pad_token_id) for x in batch])
#masks = torch.stack([x["source_mask"] for x in batch])
trg_ids = torch.stack([self.pad_to_max_len(x["target_ids"], max_trg_len, self.trg_lang.pad_token_id) for x in batch])
action_count_labels = torch.stack([self.pad_to_max_len(x["action_count_labels"], max_trg_len, -1) for x in batch])
action_group_labels = torch.stack([self.pad_to_max_len(x["action_group_labels"], max_trg_len, -1) for x in batch])
action_type_labels = torch.stack(
[self.pad_to_max_len(x["action_type_labels"], max_trg_len, -1) for x in batch])
y = trim_batch(trg_ids, self.trg_lang.pad_token_id)
#action_count_labels = trim_batch(action_count_labels, -1)
# _src_ids, src_mask = trim_batch(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)
# print(_src_ids.size(), src_ids.size())
return {"source_ids": src_ids, "source_mask": src_mask, "target_ids": y, "target_mask": trg_mask,
"action_count_labels": action_count_labels, "action_group_labels": action_group_labels,
"action_type_labels": action_type_labels}
| [
"transformers.tokenization_utils.trim_batch",
"torch.full",
"gzip.open",
"torch.LongTensor",
"torch.nn.KLDivLoss",
"os.path.join",
"torch.tensor",
"torch.sum",
"torch.nn.functional.log_softmax",
"copy.copy"
]
| [((607, 653), 'torch.full', 'torch.full', (['(tgt_vocab_size,)', 'smoothing_value'], {}), '((tgt_vocab_size,), smoothing_value)\n', (617, 653), False, 'import torch\n'), ((837, 873), 'torch.nn.KLDivLoss', 'torch.nn.KLDivLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (855, 873), False, 'import torch\n'), ((1322, 1348), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred'], {'dim': '(2)'}), '(pred, dim=2)\n', (1335, 1348), True, 'import torch.nn.functional as F\n'), ((3589, 3601), 'copy.copy', 'copy', (['mylist'], {}), '(mylist)\n', (3593, 3601), False, 'from copy import deepcopy, copy\n'), ((3733, 3758), 'torch.LongTensor', 'torch.LongTensor', (['indices'], {}), '(indices)\n', (3749, 3758), False, 'import torch\n'), ((6816, 6842), 'gzip.open', 'gzip.open', (['data_path', '"""rt"""'], {}), "(data_path, 'rt')\n", (6825, 6842), False, 'import gzip\n'), ((11591, 11682), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (["batch['source_ids']", 'src_pad_token_id'], {'attention_mask': "batch['source_mask']"}), "(batch['source_ids'], src_pad_token_id, attention_mask=batch[\n 'source_mask'])\n", (11601, 11682), False, 'from transformers.tokenization_utils import trim_batch\n'), ((11890, 11943), 'torch.tensor', 'torch.tensor', (['([pad_token_id] * (max_len - ids_length))'], {}), '([pad_token_id] * (max_len - ids_length))\n', (11902, 11943), False, 'import torch\n'), ((12905, 12952), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (['trg_ids', 'self.trg_lang.pad_token_id'], {}), '(trg_ids, self.trg_lang.pad_token_id)\n', (12915, 12952), False, 'from transformers.tokenization_utils import trim_batch\n'), ((12981, 13053), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (['src_ids', 'self.src_lang.pad_token_id'], {'attention_mask': 'src_mask'}), '(src_ids, self.src_lang.pad_token_id, attention_mask=src_mask)\n', (12991, 13053), False, 'from transformers.tokenization_utils import trim_batch\n'), ((41258, 41349), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (["batch['source_ids']", 'src_pad_token_id'], {'attention_mask': "batch['source_mask']"}), "(batch['source_ids'], src_pad_token_id, attention_mask=batch[\n 'source_mask'])\n", (41268, 41349), False, 'from transformers.tokenization_utils import trim_batch\n'), ((42463, 42510), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (['trg_ids', 'self.trg_lang.pad_token_id'], {}), '(trg_ids, self.trg_lang.pad_token_id)\n', (42473, 42510), False, 'from transformers.tokenization_utils import trim_batch\n'), ((1571, 1586), 'torch.sum', 'torch.sum', (['loss'], {}), '(loss)\n', (1580, 1586), False, 'import torch\n'), ((11457, 11506), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (["batch['target_ids']", 'trg_pad_token_id'], {}), "(batch['target_ids'], trg_pad_token_id)\n", (11467, 11506), False, 'from transformers.tokenization_utils import trim_batch\n'), ((41124, 41173), 'transformers.tokenization_utils.trim_batch', 'trim_batch', (["batch['target_ids']", 'trg_pad_token_id'], {}), "(batch['target_ids'], trg_pad_token_id)\n", (41134, 41173), False, 'from transformers.tokenization_utils import trim_batch\n'), ((1482, 1504), 'torch.sum', 'torch.sum', (['loss'], {'dim': '(2)'}), '(loss, dim=2)\n', (1491, 1504), False, 'import torch\n'), ((12216, 12277), 'torch.tensor', 'torch.tensor', (['([1] * ids_length + [0] * (max_len - ids_length))'], {}), '([1] * ids_length + [0] * (max_len - ids_length))\n', (12228, 12277), False, 'import torch\n'), ((14135, 14168), 'os.path.join', 'os.path.join', (['data_dir', 'type_path'], {}), '(data_dir, type_path)\n', (14147, 14168), False, 'import os\n'), ((5641, 5662), 'torch.LongTensor', 'torch.LongTensor', (['raw'], {}), '(raw)\n', (5657, 5662), False, 'import torch\n'), ((39632, 39657), 'torch.tensor', 'torch.tensor', (['type_labels'], {}), '(type_labels)\n', (39644, 39657), False, 'import torch\n'), ((39702, 39728), 'torch.tensor', 'torch.tensor', (['count_labels'], {}), '(count_labels)\n', (39714, 39728), False, 'import torch\n'), ((39780, 39806), 'torch.tensor', 'torch.tensor', (['group_labels'], {}), '(group_labels)\n', (39792, 39806), False, 'import torch\n')] |
from bench import bench
print(bench(100, '''
def fib(n):
return n if n < 2 else fib(n-1) + fib(n-2)
''', '''
fib(20)
'''))
| [
"bench.bench"
]
| [((31, 128), 'bench.bench', 'bench', (['(100)', '"""\ndef fib(n):\n return n if n < 2 else fib(n-1) + fib(n-2)\n"""', '"""\n fib(20)\n"""'], {}), '(100, """\ndef fib(n):\n return n if n < 2 else fib(n-1) + fib(n-2)\n""",\n \'\\n fib(20)\\n\')\n', (36, 128), False, 'from bench import bench\n')] |
# coding=utf-8
from nlpir.native.nlpir_base import NLPIRBase
from ctypes import c_bool, c_char_p, c_int, POINTER, Structure, c_float
class StDoc(Structure):
__fields__ = [
("sTitle", c_char_p),
("sContent", c_char_p),
("sAuthor", c_char_p),
("sBoard", c_char_p),
("sDatatype", c_char_p)
]
class Classifier(NLPIRBase):
@property
def dll_name(self):
return "LJClassifier"
@NLPIRBase.byte_str_transform
def init_lib(self, data_path: str, encode: int, license_code: str) -> int:
"""
Call **classifier_init**
:param data_path:
:param encode:
:param license_code:
:return: 1 success 0 fail
"""
return self.get_func("classifier_init", [c_char_p, c_char_p, c_int, c_char_p], c_bool)(
"rulelist.xml", data_path, encode, license_code)
@NLPIRBase.byte_str_transform
def exit_lib(self) -> bool:
"""
Call **classifier_exit**
:return: exit success or not
"""
return self.get_func("classifier_exit", None, None)()
@NLPIRBase.byte_str_transform
def get_last_error_msg(self) -> str:
return self.get_func("classifier_GetLastErrorMsg", None, c_char_p)()
@NLPIRBase.byte_str_transform
def exec_1(self, data: StDoc, out_type: int = 0):
"""
Call **classifier_exec1**
对输入的文章结构进行分类
:param data: 文章结构
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_exec1", [POINTER(StDoc), c_int], c_char_p)(data, out_type)
@NLPIRBase.byte_str_transform
def exec(self, title: str, content: str, out_type: int):
"""
Call **classifier_exec**
对输入的文章进行分类
:param title: 文章标题
:param content: 文章内容
:param out_type: 输出知否包括置信度,同 :func:`exec_1`
:return: 同 :func:`exec_1`
"""
return self.get_func("classifier_exec", [c_char_p, c_char_p, c_int], c_char_p)(title, content, out_type)
@NLPIRBase.byte_str_transform
def exec_file(self, filename: str, out_type: int) -> str:
"""
Call **classifier_execFile**
:param filename: 文件名
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_execFile", [c_char_p, c_int], c_char_p)(filename, out_type)
@NLPIRBase.byte_str_transform
def detail(self, class_name: str):
"""
Call **classifier_detail**
对于当前文档,输入类名,取得结果明细
:param class_name: 结果类名
:return: 结果明细 例如:
::
RULE3:
SUBRULE1: 内幕 1
SUBRULE2: 股市 1 基金 3 股票 8
SUBRULE3: 书摘 2
"""
return self.get_func("classifier_detail", [c_char_p], c_char_p)(class_name)
@NLPIRBase.byte_str_transform
def set_sim_thresh(self, sim: float):
"""
Call **classifier_setsimthresh**
设置阈值
:param sim: 阈值
:return:
"""
return self.get_func("classifier_setsimthresh", [c_float])(sim)
| [
"ctypes.POINTER"
]
| [((1655, 1669), 'ctypes.POINTER', 'POINTER', (['StDoc'], {}), '(StDoc)\n', (1662, 1669), False, 'from ctypes import c_bool, c_char_p, c_int, POINTER, Structure, c_float\n')] |
# -*- coding: utf-8 -*-
import pytest
from mock import Mock
from bravado_core.exception import SwaggerMappingError
from bravado_core.operation import Operation
from bravado_core.param import get_param_type_spec
from bravado_core.param import Param
from bravado_core.spec import Spec
@pytest.fixture
def body_param_spec():
return {
'name': 'body',
'in': 'body',
'description': 'pet id',
'required': True,
'schema': {
'type': 'string',
},
}
def test_location_is_body(empty_swagger_spec, body_param_spec):
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
assert body_param_spec['schema'] == get_param_type_spec(param)
def test_location_is_not_body(empty_swagger_spec):
for location in ('path', 'query', 'header', 'formData',):
param_spec = {
'name': 'petId',
'in': location,
'description': 'ID of pet that needs to be updated',
'required': True,
'type': 'string',
}
param = Param(empty_swagger_spec, Mock(spec=Operation), param_spec)
assert param_spec == get_param_type_spec(param)
def test_location_invalid(empty_swagger_spec, body_param_spec):
body_param_spec['in'] = 'foo'
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
with pytest.raises(SwaggerMappingError) as excinfo:
get_param_type_spec(param)
assert 'location foo' in str(excinfo.value)
def test_ref(minimal_swagger_dict, body_param_spec):
minimal_swagger_dict['parameters'] = {
'PetIdParam': body_param_spec,
}
param_ref_spec = {'$ref': '#/parameters/PetIdParam'}
swagger_spec = Spec(minimal_swagger_dict)
param = Param(swagger_spec, Mock(spec=Operation), param_ref_spec)
assert {'type': 'string'} == get_param_type_spec(param)
| [
"bravado_core.spec.Spec",
"bravado_core.param.get_param_type_spec",
"pytest.raises",
"mock.Mock"
]
| [((1718, 1744), 'bravado_core.spec.Spec', 'Spec', (['minimal_swagger_dict'], {}), '(minimal_swagger_dict)\n', (1722, 1744), False, 'from bravado_core.spec import Spec\n'), ((614, 634), 'mock.Mock', 'Mock', ([], {'spec': 'Operation'}), '(spec=Operation)\n', (618, 634), False, 'from mock import Mock\n'), ((693, 719), 'bravado_core.param.get_param_type_spec', 'get_param_type_spec', (['param'], {}), '(param)\n', (712, 719), False, 'from bravado_core.param import get_param_type_spec\n'), ((1320, 1340), 'mock.Mock', 'Mock', ([], {'spec': 'Operation'}), '(spec=Operation)\n', (1324, 1340), False, 'from mock import Mock\n'), ((1369, 1403), 'pytest.raises', 'pytest.raises', (['SwaggerMappingError'], {}), '(SwaggerMappingError)\n', (1382, 1403), False, 'import pytest\n'), ((1424, 1450), 'bravado_core.param.get_param_type_spec', 'get_param_type_spec', (['param'], {}), '(param)\n', (1443, 1450), False, 'from bravado_core.param import get_param_type_spec\n'), ((1777, 1797), 'mock.Mock', 'Mock', ([], {'spec': 'Operation'}), '(spec=Operation)\n', (1781, 1797), False, 'from mock import Mock\n'), ((1848, 1874), 'bravado_core.param.get_param_type_spec', 'get_param_type_spec', (['param'], {}), '(param)\n', (1867, 1874), False, 'from bravado_core.param import get_param_type_spec\n'), ((1092, 1112), 'mock.Mock', 'Mock', ([], {'spec': 'Operation'}), '(spec=Operation)\n', (1096, 1112), False, 'from mock import Mock\n'), ((1155, 1181), 'bravado_core.param.get_param_type_spec', 'get_param_type_spec', (['param'], {}), '(param)\n', (1174, 1181), False, 'from bravado_core.param import get_param_type_spec\n')] |
# Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from markdown import markdown
import base64
import json
import base64
def main(params):
try:
md = json.loads(base64.decodebytes(params["__ow_body"].encode("utf-8")))["markdown"].encode("utf-8")
md_text = base64.decodebytes(md).decode("utf-8")
except KeyError:
return {'Error' : 'Possibly lacking markdown parameter in request.'}
test_id = params["__ow_query"].split("&")[0]
html = markdown(md_text)
return {"result": "ok", "html_response": html, "testid": test_id}
| [
"markdown.markdown",
"base64.decodebytes"
]
| [((593, 610), 'markdown.markdown', 'markdown', (['md_text'], {}), '(md_text)\n', (601, 610), False, 'from markdown import markdown\n'), ((394, 416), 'base64.decodebytes', 'base64.decodebytes', (['md'], {}), '(md)\n', (412, 416), False, 'import base64\n')] |
from pydub import AudioSegment
import os
import math
from pathlib import Path
'''
Splice wav files into multiple segments.
'''
LENGTH = 3 # Set splice length in seconds
def splice(audioPath, outputPath):
# try:
# os.mkdir('Spliced Spectrogram training') # Need to figure out where to put this
# except OSError:
# print("Creation of the directory failed")
audio = AudioSegment.from_wav(audioPath)
count = math.ceil(audio.duration_seconds/LENGTH) # Do we want the last part of audio?
t1 = 0
t2 = LENGTH*1000
for i in range(count):
newAudio = audio[t1:t2]
newPath = outputPath+Path(audioPath).stem+'_splice'+str(i)+'.wav'
newAudio.export(newPath, format="wav")
t1 = t2
t2 = t2 + LENGTH*1000
| [
"math.ceil",
"pydub.AudioSegment.from_wav",
"pathlib.Path"
]
| [((396, 428), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['audioPath'], {}), '(audioPath)\n', (417, 428), False, 'from pydub import AudioSegment\n'), ((441, 483), 'math.ceil', 'math.ceil', (['(audio.duration_seconds / LENGTH)'], {}), '(audio.duration_seconds / LENGTH)\n', (450, 483), False, 'import math\n'), ((640, 655), 'pathlib.Path', 'Path', (['audioPath'], {}), '(audioPath)\n', (644, 655), False, 'from pathlib import Path\n')] |
from typing import Dict, List
from arango.cursor import Cursor
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.request import Request
from rest_framework_extensions.mixins import NestedViewSetMixin
from multinet.api.models import Workspace, WorkspaceRole
from multinet.api.utils.arango import ArangoQuery
class MultinetPagination(LimitOffsetPagination):
default_limit = 100
class ArangoPagination(LimitOffsetPagination):
"""Override the LimitOffsetPagination class to allow for use with arango cursors."""
def _set_pre_query_params(self, request):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.request = request
def _set_post_query_params(self):
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
def paginate_queryset(self, query: ArangoQuery, request: Request) -> List[Dict]:
self._set_pre_query_params(request)
paginated_query = query.paginate(self.limit, self.offset)
cur: Cursor = paginated_query.execute(full_count=True)
self.count = cur.statistics()['fullCount']
self._set_post_query_params()
return list(cur)
class WorkspaceChildMixin(NestedViewSetMixin):
def get_queryset(self):
"""
Get the queryset for workspace child enpoints.
Check that the requeting user has appropriate permissions for the associated workspace.
"""
child_objects = super().get_queryset()
# prevent warning for schema generation incompatibility
if getattr(self, 'swagger_fake_view', False):
return child_objects.none()
parent_query_dict = self.get_parents_query_dict()
workspace = get_object_or_404(
Workspace.objects.select_related('owner'), name=parent_query_dict['workspace__name']
)
# No user or user permission required for public workspaces
if workspace.public:
return child_objects
# Private workspace
request_user = self.request.user
if not request_user.is_authenticated: # anonymous user
raise Http404
workspace_role = WorkspaceRole.objects.filter(
workspace=workspace, user=request_user
).first()
# If the user is at least a reader or the owner, grant access
if workspace_role is not None or workspace.owner == request_user:
return child_objects
# Read access denied
raise Http404
| [
"multinet.api.models.Workspace.objects.select_related",
"multinet.api.models.WorkspaceRole.objects.filter"
]
| [((1961, 2002), 'multinet.api.models.Workspace.objects.select_related', 'Workspace.objects.select_related', (['"""owner"""'], {}), "('owner')\n", (1993, 2002), False, 'from multinet.api.models import Workspace, WorkspaceRole\n'), ((2373, 2441), 'multinet.api.models.WorkspaceRole.objects.filter', 'WorkspaceRole.objects.filter', ([], {'workspace': 'workspace', 'user': 'request_user'}), '(workspace=workspace, user=request_user)\n', (2401, 2441), False, 'from multinet.api.models import Workspace, WorkspaceRole\n')] |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import sys
def ParseArgs():
parser = argparse.ArgumentParser(
description='Host file generator for CELab E2E tests')
all_tokens = ['project_id', 'storage_bucket', 'storage_prefix']
template_help = 'The full path to the *.host.textpb template file to use. '
template_help += 'Must contain the following tokens: %s' % all_tokens
parser.add_argument(
'--template', metavar='<host_file>', required=True, help=template_help)
parser.add_argument(
'--projects',
metavar='<projectA;projectB;...>',
dest="projects",
required=True,
help='The values to replace "<project_id>" with.')
parser.add_argument(
'--storage_bucket',
metavar='<token>',
dest="storage_bucket",
required=True,
help='The value to replace "<storage_bucket>" with.')
parser.add_argument(
'--storage_prefix',
metavar='<token>',
dest="storage_prefix",
required=True,
help='The value to replace "<storage_prefix>" with.')
parser.add_argument(
'--destination_dir',
metavar='<path>',
dest='destination',
required=True,
action='store',
help='Where to collect extra logs on test failures')
return parser.parse_args()
def ConfigureLogging(args):
logfmt = '%(asctime)s %(filename)s:%(lineno)s: [%(levelname)s] %(message)s'
datefmt = '%Y/%m/%d %H:%M:%S'
logging.basicConfig(level=logging.INFO, format=logfmt, datefmt=datefmt)
if __name__ == '__main__':
args = ParseArgs()
ConfigureLogging(args)
logging.info("Arguments: %s" % args)
if not os.path.exists(args.template):
raise ValueError('Template host file not found: %s' % args.template)
if not os.path.exists(args.destination):
raise ValueError('Destination directory not found: %s' % args.destination)
# Generate all the host files based off the arguments passed.
with open(args.template, 'r') as f:
template = f.read()
for project_id in args.projects.split(';'):
filename = "%s.host.textpb" % project_id
destination = os.path.join(args.destination, filename)
with open(destination, 'w') as f:
logging.info("Generating %s" % destination)
content = template.replace("<project_id>", project_id)
content = content.replace("<storage_bucket>", args.storage_bucket)
content = content.replace("<storage_prefix>", args.storage_prefix)
f.write(content)
sys.exit(0)
| [
"logging.basicConfig",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.join",
"sys.exit",
"logging.info"
]
| [((245, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Host file generator for CELab E2E tests"""'}), "(description='Host file generator for CELab E2E tests')\n", (268, 323), False, 'import argparse\n'), ((1577, 1648), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'logfmt', 'datefmt': 'datefmt'}), '(level=logging.INFO, format=logfmt, datefmt=datefmt)\n', (1596, 1648), False, 'import logging\n'), ((1728, 1764), 'logging.info', 'logging.info', (["('Arguments: %s' % args)"], {}), "('Arguments: %s' % args)\n", (1740, 1764), False, 'import logging\n'), ((2601, 2612), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2609, 2612), False, 'import sys\n'), ((1775, 1804), 'os.path.exists', 'os.path.exists', (['args.template'], {}), '(args.template)\n', (1789, 1804), False, 'import os\n'), ((1889, 1921), 'os.path.exists', 'os.path.exists', (['args.destination'], {}), '(args.destination)\n', (1903, 1921), False, 'import os\n'), ((2239, 2279), 'os.path.join', 'os.path.join', (['args.destination', 'filename'], {}), '(args.destination, filename)\n', (2251, 2279), False, 'import os\n'), ((2324, 2367), 'logging.info', 'logging.info', (["('Generating %s' % destination)"], {}), "('Generating %s' % destination)\n", (2336, 2367), False, 'import logging\n')] |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
from mptt.models import MPTTModel, TreeForeignKey
from .managers import UserProfileManager, DepartmentManager, PositionManager
User = settings.AUTH_USER_MODEL
class Department(MPTTModel, TimeStampedModel):
"""
Departments in an organisation
"""
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"), blank=True, default="")
parent = TreeForeignKey('self', verbose_name=_("Parent"), null=True,
blank=True, related_name='children', db_index=True,
on_delete=models.PROTECT,
help_text=_("The parent department"))
customer = models.ForeignKey(
'customers.Customer', verbose_name=_("Customer"),
on_delete=models.PROTECT)
manager = models.ForeignKey(
User, verbose_name=_("Manager"), on_delete=models.PROTECT,
blank=True, null=True)
active = models.BooleanField(_("Active"), default=True)
objects = DepartmentManager()
class Meta:
verbose_name = _("Department")
verbose_name_plural = _("Departments")
ordering = ['name']
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:departments_edit', args=[self.pk])
def get_delete_url(self):
return reverse('users:departments_delete', args=[self.pk])
def get_list_url(self):
return reverse('users:departments_list')
def __str__(self):
return self.name
class Position(MPTTModel, TimeStampedModel):
"""
Job positions in an organisation
"""
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"), blank=True, default="")
department = models.ForeignKey(
Department, verbose_name=_("Department"), on_delete=models.PROTECT)
parent = TreeForeignKey('self', verbose_name=_("Reports To"), null=True,
blank=True, related_name='children', db_index=True,
on_delete=models.PROTECT,
help_text=_("The parent Job Position"))
supervisor = models.ForeignKey(
User, verbose_name=_("Supervisor"), on_delete=models.PROTECT,
blank=True, null=True)
customer = models.ForeignKey(
'customers.Customer', verbose_name=_("Customer"),
on_delete=models.PROTECT)
active = models.BooleanField(_("Active"), default=True)
objects = PositionManager()
class Meta:
verbose_name = _("Job Positions")
verbose_name_plural = _("Job Positions")
ordering = ['name']
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:positions_edit', args=[self.pk])
def get_delete_url(self):
return reverse('users:positions_delete', args=[self.pk])
def get_list_url(self):
return reverse('users:positions_list')
def __str__(self):
return "{} - {}".format(self.department.name, self.name)
@python_2_unicode_compatible
class UserProfile(models.Model):
"""
Model used to store more information on users
"""
ADMIN = '1'
MEMBER = '2'
EDITOR = '3'
MEMBER_ROLE_CHOICES = (
(ADMIN, _('Admin')),
(EDITOR, _('Editor')),
(MEMBER, _('Member')),
)
created_on = models.DateTimeField(_("Created on"), auto_now_add=True)
updated_on = models.DateTimeField(_("Updated on"), auto_now=True)
user = models.OneToOneField(User, verbose_name=_("User"))
position = models.ForeignKey(Position, verbose_name=_(
"job Position"), on_delete=models.SET_NULL, blank=True, null=True,
default=None)
customer = models.ForeignKey('customers.Customer', verbose_name=_(
"Customer"), on_delete=models.SET_NULL, blank=True, null=True,
default=None)
role = models.CharField(
_("Role"), max_length=1, choices=MEMBER_ROLE_CHOICES, blank=False,
default=MEMBER)
active = models.BooleanField(
_("Active"), default=True, help_text="Is the staff member actively "
"employed?")
objects = UserProfileManager()
class Meta:
verbose_name = _("Staff Member")
verbose_name_plural = _("Staff Members")
ordering = ['user__first_name', 'user__last_name', 'user__email']
def get_name(self):
if self.user.get_full_name():
return self.user.get_full_name()
if self.user.email:
return self.user.email
return self.user.username
def get_initials(self):
if self.user.first_name and self.user.last_name:
return "{}{}".format(self.user.first_name[0],
self.user.last_name[0])
if self.user.first_name:
return self.user.first_name[0]
if self.user.last_name:
return self.user.last_name[0]
return self.user.email[0]
def is_admin(self):
return self.role == self.ADMIN
def is_editor(self):
return self.role == self.EDITOR
def can_edit(self):
return self.role == self.EDITOR or self.role == self.ADMIN
def get_subordinates(self):
"""
Returns a queryset of UserProfile objects which report to this
userprofile
"""
if self.position:
queryset = UserProfile.objects.active().exclude(
id=self.id).filter(
models.Q(
position__supervisor=self.user) | models.Q(
position__department__manager=self.user) | models.Q(
position__parent=self.position))
else:
queryset = UserProfile.objects.active().exclude(
id=self.id).filter(
models.Q(
position__supervisor=self.user) | models.Q(
position__department__manager=self.user))
# get job positions of subs
subordinate_positions = Position.objects.filter(
userprofile__in=queryset)
# get any position that may report to these positions
# list of position ids of Positions that report to
# subordinate_positions
reporting_jp_ids = []
for sub_p in subordinate_positions:
reporting_jps = sub_p.get_descendants(include_self=False)
if reporting_jps is not None:
reporting_jp_ids = reporting_jp_ids + list(
reporting_jps.values_list('id', flat=True))
reporting_jp_ids = list(set(reporting_jp_ids))
# get user profiles wiht positions that report to subordinate_positions
reporting_profiles = UserProfile.objects.active().filter(
position__id__in=reporting_jp_ids)
queryset = queryset.union(reporting_profiles)
# unions result in weird filtering so we create a new queryset
queryset_ids = list(set([x.id for x in queryset]))
if queryset_ids:
queryset = UserProfile.objects.filter(id__in=queryset_ids)
else:
queryset = UserProfile.objects.none()
return queryset
def has_subordinates(self):
return self.get_subordinates().exists()
def get_department(self):
if self.position is not None:
return self.position.department.name
return None
def get_absolute_url(self):
return "#"
def get_edit_url(self):
return reverse('users:userprofiles_edit', args=[self.pk])
def get_delete_url(self):
return "#"
def get_list_url(self):
return reverse('users:userprofiles_list')
def __str__(self):
return _("{user}").format(user=self.get_name())
| [
"django.db.models.Q",
"django.utils.translation.ugettext_lazy",
"django.urls.reverse"
]
| [((560, 569), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (561, 569), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((622, 638), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (623, 638), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1227, 1238), 'django.utils.translation.ugettext_lazy', '_', (['"""Active"""'], {}), "('Active')\n", (1228, 1238), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1329, 1344), 'django.utils.translation.ugettext_lazy', '_', (['"""Department"""'], {}), "('Department')\n", (1330, 1344), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1375, 1391), 'django.utils.translation.ugettext_lazy', '_', (['"""Departments"""'], {}), "('Departments')\n", (1376, 1391), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1516, 1565), 'django.urls.reverse', 'reverse', (['"""users:departments_edit"""'], {'args': '[self.pk]'}), "('users:departments_edit', args=[self.pk])\n", (1523, 1565), False, 'from django.urls import reverse\n'), ((1612, 1663), 'django.urls.reverse', 'reverse', (['"""users:departments_delete"""'], {'args': '[self.pk]'}), "('users:departments_delete', args=[self.pk])\n", (1619, 1663), False, 'from django.urls import reverse\n'), ((1708, 1741), 'django.urls.reverse', 'reverse', (['"""users:departments_list"""'], {}), "('users:departments_list')\n", (1715, 1741), False, 'from django.urls import reverse\n'), ((1920, 1929), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (1921, 1929), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1982, 1998), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (1983, 1998), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2711, 2722), 'django.utils.translation.ugettext_lazy', '_', (['"""Active"""'], {}), "('Active')\n", (2712, 2722), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2811, 2829), 'django.utils.translation.ugettext_lazy', '_', (['"""Job Positions"""'], {}), "('Job Positions')\n", (2812, 2829), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2860, 2878), 'django.utils.translation.ugettext_lazy', '_', (['"""Job Positions"""'], {}), "('Job Positions')\n", (2861, 2878), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3003, 3050), 'django.urls.reverse', 'reverse', (['"""users:positions_edit"""'], {'args': '[self.pk]'}), "('users:positions_edit', args=[self.pk])\n", (3010, 3050), False, 'from django.urls import reverse\n'), ((3097, 3146), 'django.urls.reverse', 'reverse', (['"""users:positions_delete"""'], {'args': '[self.pk]'}), "('users:positions_delete', args=[self.pk])\n", (3104, 3146), False, 'from django.urls import reverse\n'), ((3191, 3222), 'django.urls.reverse', 'reverse', (['"""users:positions_list"""'], {}), "('users:positions_list')\n", (3198, 3222), False, 'from django.urls import reverse\n'), ((3659, 3674), 'django.utils.translation.ugettext_lazy', '_', (['"""Created on"""'], {}), "('Created on')\n", (3660, 3674), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3733, 3748), 'django.utils.translation.ugettext_lazy', '_', (['"""Updated on"""'], {}), "('Updated on')\n", (3734, 3748), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4184, 4193), 'django.utils.translation.ugettext_lazy', '_', (['"""Role"""'], {}), "('Role')\n", (4185, 4193), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4317, 4328), 'django.utils.translation.ugettext_lazy', '_', (['"""Active"""'], {}), "('Active')\n", (4318, 4328), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4520, 4537), 'django.utils.translation.ugettext_lazy', '_', (['"""Staff Member"""'], {}), "('Staff Member')\n", (4521, 4537), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4568, 4586), 'django.utils.translation.ugettext_lazy', '_', (['"""Staff Members"""'], {}), "('Staff Members')\n", (4569, 4586), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7777, 7827), 'django.urls.reverse', 'reverse', (['"""users:userprofiles_edit"""'], {'args': '[self.pk]'}), "('users:userprofiles_edit', args=[self.pk])\n", (7784, 7827), False, 'from django.urls import reverse\n'), ((7922, 7956), 'django.urls.reverse', 'reverse', (['"""users:userprofiles_list"""'], {}), "('users:userprofiles_list')\n", (7929, 7956), False, 'from django.urls import reverse\n'), ((713, 724), 'django.utils.translation.ugettext_lazy', '_', (['"""Parent"""'], {}), "('Parent')\n", (714, 724), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((909, 935), 'django.utils.translation.ugettext_lazy', '_', (['"""The parent department"""'], {}), "('The parent department')\n", (910, 935), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1014, 1027), 'django.utils.translation.ugettext_lazy', '_', (['"""Customer"""'], {}), "('Customer')\n", (1015, 1027), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1123, 1135), 'django.utils.translation.ugettext_lazy', '_', (['"""Manager"""'], {}), "('Manager')\n", (1124, 1135), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2093, 2108), 'django.utils.translation.ugettext_lazy', '_', (['"""Department"""'], {}), "('Department')\n", (2094, 2108), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2185, 2200), 'django.utils.translation.ugettext_lazy', '_', (['"""Reports To"""'], {}), "('Reports To')\n", (2186, 2200), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2385, 2413), 'django.utils.translation.ugettext_lazy', '_', (['"""The parent Job Position"""'], {}), "('The parent Job Position')\n", (2386, 2413), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2478, 2493), 'django.utils.translation.ugettext_lazy', '_', (['"""Supervisor"""'], {}), "('Supervisor')\n", (2479, 2493), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2629, 2642), 'django.utils.translation.ugettext_lazy', '_', (['"""Customer"""'], {}), "('Customer')\n", (2630, 2642), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3539, 3549), 'django.utils.translation.ugettext_lazy', '_', (['"""Admin"""'], {}), "('Admin')\n", (3540, 3549), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3569, 3580), 'django.utils.translation.ugettext_lazy', '_', (['"""Editor"""'], {}), "('Editor')\n", (3570, 3580), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3600, 3611), 'django.utils.translation.ugettext_lazy', '_', (['"""Member"""'], {}), "('Member')\n", (3601, 3611), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3816, 3825), 'django.utils.translation.ugettext_lazy', '_', (['"""User"""'], {}), "('User')\n", (3817, 3825), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3883, 3900), 'django.utils.translation.ugettext_lazy', '_', (['"""job Position"""'], {}), "('job Position')\n", (3884, 3900), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4051, 4064), 'django.utils.translation.ugettext_lazy', '_', (['"""Customer"""'], {}), "('Customer')\n", (4052, 4064), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7996, 8007), 'django.utils.translation.ugettext_lazy', '_', (['"""{user}"""'], {}), "('{user}')\n", (7997, 8007), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5897, 5937), 'django.db.models.Q', 'models.Q', ([], {'position__parent': 'self.position'}), '(position__parent=self.position)\n', (5905, 5937), False, 'from django.db import models\n'), ((6087, 6127), 'django.db.models.Q', 'models.Q', ([], {'position__supervisor': 'self.user'}), '(position__supervisor=self.user)\n', (6095, 6127), False, 'from django.db import models\n'), ((6151, 6200), 'django.db.models.Q', 'models.Q', ([], {'position__department__manager': 'self.user'}), '(position__department__manager=self.user)\n', (6159, 6200), False, 'from django.db import models\n'), ((5760, 5800), 'django.db.models.Q', 'models.Q', ([], {'position__supervisor': 'self.user'}), '(position__supervisor=self.user)\n', (5768, 5800), False, 'from django.db import models\n'), ((5824, 5873), 'django.db.models.Q', 'models.Q', ([], {'position__department__manager': 'self.user'}), '(position__department__manager=self.user)\n', (5832, 5873), False, 'from django.db import models\n')] |
import unittest
from gamesopt.train import train, TrainConfig
class TestOptimizer(unittest.TestCase):
def test_sgda(self):
config = TrainConfig(num_iter=2)
train(config) | [
"gamesopt.train.train",
"gamesopt.train.TrainConfig"
]
| [((145, 168), 'gamesopt.train.TrainConfig', 'TrainConfig', ([], {'num_iter': '(2)'}), '(num_iter=2)\n', (156, 168), False, 'from gamesopt.train import train, TrainConfig\n'), ((177, 190), 'gamesopt.train.train', 'train', (['config'], {}), '(config)\n', (182, 190), False, 'from gamesopt.train import train, TrainConfig\n')] |
import bpy
from bpy.props import *
from ...nodes.BASE.node_base import RenderNodeBase
class RenderNodeGetListIndex(RenderNodeBase):
"""A simple input node"""
bl_idname = 'RenderNodeGetListIndex'
bl_label = 'Get List Index'
def init(self, context):
self.create_output('RenderNodeSocketInt', "index", 'Index')
def process(self,context,id,path):
node = self.id_data.nodes.get(bpy.context.window_manager.rsn_active_list)
if not node or node.bl_idname != 'RenderNodeTaskRenderListNode': return
self.outputs[0].set_value(node.active_index)
def register():
bpy.utils.register_class(RenderNodeGetListIndex)
def unregister():
bpy.utils.unregister_class(RenderNodeGetListIndex)
| [
"bpy.utils.unregister_class",
"bpy.utils.register_class"
]
| [((614, 662), 'bpy.utils.register_class', 'bpy.utils.register_class', (['RenderNodeGetListIndex'], {}), '(RenderNodeGetListIndex)\n', (638, 662), False, 'import bpy\n'), ((687, 737), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['RenderNodeGetListIndex'], {}), '(RenderNodeGetListIndex)\n', (713, 737), False, 'import bpy\n')] |
from keras.models import load_model
import cv2
import pickle
import keras.backend as K
import numpy as np
from src.model_path import MODEL_PATH
'''def predict(self, cell):
model = load_model('./model/Model.h5')
f = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output])
rescaled_cell = self.rescale(cell)
result = []
for _ in range(10):
result.append(f([rescaled_cell, 1]))
result = np.array(result)
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
if uncertainty.argmax() > 3:
new_prediction = 0
print(prediction.argmax(),uncertainty.argmax(),new_prediction)
else:
print(prediction.argmax(),uncertainty.argmax())'''
class recognizeDigit:
def __init__(self, cell):
self._prediction = self.predict(cell)
def predict(self, cell):
model = load_model(MODEL_PATH)
rescaled_cell = self.rescale(cell)
pred = model.predict(rescaled_cell)
return pred.argmax()
def rescale(self, cell):
resized_cell = cv2.resize(cell, (28, 28))
return resized_cell.reshape(1, resized_cell.shape[0], resized_cell.shape[1], 1)
@property
def prediction(self):
return self._prediction | [
"cv2.resize",
"keras.models.load_model"
]
| [((839, 861), 'keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (849, 861), False, 'from keras.models import load_model\n'), ((1013, 1039), 'cv2.resize', 'cv2.resize', (['cell', '(28, 28)'], {}), '(cell, (28, 28))\n', (1023, 1039), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""A module for plotting penguins data for modelling with scikit-learn."""
# Imports ---------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Constants -------------------------------------------------------------------
SPECIES_COLORS = {
'Adelie': '#4daf4a',
'Gentoo': '#ffb000',
'Chinstrap': '#0084f7'
}
X_AXIS = [30, 60]
Y_AXIS = [12, 22]
# Set style -------------------------------------------------------------------
# Load the style from a file
plt.style.use('./style/eda.mplstyle')
# Alternatively, load the style from the library in ~/.matplotlib/stylelib
# plt.style.use(['eda'])
# Functions -------------------------------------------------------------------
def get_contour_data(model, pipeline, n_points=1000):
"""Create the data used to show the boundary of the decision function."""
x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points)
x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm'])
X = pipeline.transform(df_X)
y_pred = model.predict(X).reshape(x0.shape)
y_decision = model.decision_function(X).reshape(x0.shape)
return x0, x1, y_pred, y_decision
def get_target_colors(target):
"""Create a dictionary of colors to use in binary classification plots."""
return {
target : '#984ea3',
'Other': '#ff7f00'
}
# Plots -----------------------------------------------------------------------
def plot_example():
plt.style.reload_library()
plt.style.use(['eda'])
fig, ax = plt.subplots()
ax.set_title('Some random words of the title')
ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10))
fig.savefig('plots/test.svg', format='svg')
fig.savefig('plots/test.png', format='png')
plt.close()
def plot_target_by_features(df):
"""Plot the different target species."""
fig, ax = plt.subplots()
ax.set_title(
label='Palmer penguins by species and bill characteristics',
loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=SPECIES_COLORS[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
fig.savefig('plots/target-by-features.png', format='png')
plt.close()
def plot_model(df, model, pipeline, f_score, target, title, filename):
"""Plot the results of a binary classification model."""
fig, ax = plt.subplots()
ax.set_title(title, loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
# Plot the boundary of the decision function
x0, x1, y_pred, y_decision = get_contour_data(model, pipeline)
ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2)
# This plots the decision score, if needed
# ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1)
df = df.copy()
df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other')
colors = get_target_colors(target)
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=colors[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
bbox_style = {
'boxstyle': 'round',
'facecolor': '#ffffff',
'edgecolor': '#d4d4d4',
'alpha': 0.8
}
ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style)
fig.savefig('plots/{0}.png'.format(filename), format='png')
plt.close() | [
"numpy.random.normal",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.style.reload_library",
"matplotlib.ticker.FormatStrFormatter",
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
]
| [((608, 645), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""./style/eda.mplstyle"""'], {}), "('./style/eda.mplstyle')\n", (621, 645), True, 'import matplotlib.pyplot as plt\n'), ((977, 1020), 'numpy.linspace', 'np.linspace', (['X_AXIS[0]', 'X_AXIS[1]', 'n_points'], {}), '(X_AXIS[0], X_AXIS[1], n_points)\n', (988, 1020), True, 'import numpy as np\n'), ((1031, 1074), 'numpy.linspace', 'np.linspace', (['Y_AXIS[0]', 'Y_AXIS[1]', 'n_points'], {}), '(Y_AXIS[0], Y_AXIS[1], n_points)\n', (1042, 1074), True, 'import numpy as np\n'), ((1088, 1109), 'numpy.meshgrid', 'np.meshgrid', (['x0s', 'x1s'], {}), '(x0s, x1s)\n', (1099, 1109), True, 'import numpy as np\n'), ((1159, 1219), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "['bill_length_mm', 'bill_depth_mm']"}), "(X, columns=['bill_length_mm', 'bill_depth_mm'])\n", (1171, 1219), True, 'import pandas as pd\n'), ((1700, 1726), 'matplotlib.pyplot.style.reload_library', 'plt.style.reload_library', ([], {}), '()\n', (1724, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1753), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['eda']"], {}), "(['eda'])\n", (1744, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1768, 1782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1780, 1782), True, 'import matplotlib.pyplot as plt\n'), ((2001, 2012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2010, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2123), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2121, 2123), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2962), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2960, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3127), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3125, 3127), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4544), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4542, 4544), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1875), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1865, 1875), True, 'import numpy as np\n'), ((1875, 1901), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1891, 1901), True, 'import numpy as np\n'), ((2284, 2321), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (2313, 2321), True, 'import matplotlib as mpl\n'), ((2448, 2485), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (2477, 2485), True, 'import matplotlib as mpl\n'), ((3216, 3253), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (3245, 3253), True, 'import matplotlib as mpl\n'), ((3380, 3417), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (3409, 3417), True, 'import matplotlib as mpl\n')] |
import unittest
from forestgame.game.world import World
class WorldTest(unittest.TestCase):
def test_world_inits_to_empty_data(self):
world = World(None, "1", "0", 0, 0, [], [])
self.assertEqual(0, world.get_size_x())
self.assertEqual(0, world.get_size_y())
self.assertEqual([], world.get_tile_data())
def test_world_with_tiles_inits__with_tiles_to_empty_data(self):
world = World(None, "1", "0", 3, 3, [(1, 1, 0)], [])
expected_tile_data = [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_from_zero_initialsies_from_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_x_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(2, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_larger_y_pads_with_forest(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 2)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_x_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 2)
expected_tile_data = [
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_smaller_x_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(2, 3)
expected_tile_data = [
[1, 1],
[1, 1],
[1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(2, world.get_size_x())
self.assertEqual(3, world.get_size_y())
def test_set_size_with_smaller_y_removes_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 2)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(2, world.get_size_y())
def test_set_size_with_same_x_y_does_nothing(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(3, 3)
world.set_size(3, 3)
expected_tile_data = [
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
]
self.assertEqual(expected_tile_data, world.get_tile_data())
self.assertEqual(3, world.get_size_x())
self.assertEqual(3, world.get_size_y())
# set tile range checks
def test_set_tile_changes_tile_data(self):
world = World(None, "1", "0", 0, 0, [], [])
world.set_size(5, 5)
world.set_tile_at(2, 3, 0)
self.assertEqual(0, world.get_tile_at(2, 3))
expected_tile_data = [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1]
]
self.assertEqual(expected_tile_data, world.get_tile_data())
| [
"forestgame.game.world.World"
]
| [((150, 185), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (155, 185), False, 'from forestgame.game.world import World\n'), ((403, 447), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(3)', '(3)', '[(1, 1, 0)]', '[]'], {}), "(None, '1', '0', 3, 3, [(1, 1, 0)], [])\n", (408, 447), False, 'from forestgame.game.world import World\n'), ((758, 793), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (763, 793), False, 'from forestgame.game.world import World\n'), ((1130, 1165), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (1135, 1165), False, 'from forestgame.game.world import World\n'), ((1525, 1560), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (1530, 1560), False, 'from forestgame.game.world import World\n'), ((1920, 1955), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (1925, 1955), False, 'from forestgame.game.world import World\n'), ((2314, 2349), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (2319, 2349), False, 'from forestgame.game.world import World\n'), ((2683, 2718), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (2688, 2718), False, 'from forestgame.game.world import World\n'), ((3066, 3101), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (3071, 3101), False, 'from forestgame.game.world import World\n'), ((3440, 3475), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (3445, 3475), False, 'from forestgame.game.world import World\n'), ((3849, 3884), 'forestgame.game.world.World', 'World', (['None', '"""1"""', '"""0"""', '(0)', '(0)', '[]', '[]'], {}), "(None, '1', '0', 0, 0, [], [])\n", (3854, 3884), False, 'from forestgame.game.world import World\n')] |
# Copyright 2021 <NAME> <<EMAIL>>
# SPDX-license-identifier: 0BSD
import string
from loguru import logger
try:
import cell_pos
from exceptions import InvconvMissingHeaders
import ftype
import msg_handler
except ModuleNotFoundError:
import invconv.cell_pos as cell_pos
from invconv.exceptions import InvconvMissingHeaders
import invconv.ftype as ftype
import invconv.msg_handler as msg_handler
used = True
try:
from openpyxl import load_workbook
except ModuleNotFoundError:
used = False
# load_workbook is used repeatedly with similar settings
# every time.
WB_SETTINGS = {
"read_only": True,
"keep_vba": False,
"data_only": True,
"keep_links": False,
}
class XlsxDataTuple(ftype.BasicFtypeDataClass):
def __init__(self, filename, wsname, headers):
self.filename = filename
self.wsname = wsname
self.headers = headers
self.cur_row = None
self.cur_col = None
super().__init__(
filename=self.filename, sectionname=self.wsname, headers=self.headers
)
# Set relevant values and gets the number of operations
# to be performed based on the dimensions.
def set_oper_num(self, min_row, max_row, max_col):
self.min_row = min_row
self.min_col = 1
self.max_row = max_row
self.max_col = max_col
delta_col = self.max_col - self.min_col + 1
delta_row = self.max_row - self.min_row + 1
self.num_oper = delta_col * delta_row
return self.num_oper
def load_workbook(self):
return load_workbook(self.filename, **WB_SETTINGS)
def parser(self):
if self.cur_row is None:
self.cur_row = self.min_row
if self.cur_col is None:
self.cur_col = self.min_col
if self.cur_col > self.max_col:
self.cur_col = self.min_col
self.cur_row += 1
if self.cur_row > self.max_row:
self.cur_row = None
self.cur_col = None
return None
col_letter = cell_pos.get_col_letter(self.cur_col)
row_str = str(self.cur_row)
wb = self.load_workbook()
ws = wb[self.wsname]
cell_val = ws[col_letter + row_str].value
return_str = str(cell_val)
if cell_val is None:
return_str = ""
if return_str == "#REF!":
logger.warning(
string.Template(
'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".'
).substitute(
cell_pos=col_letter + row_str,
id=msg_handler.get_id((self.filename, self.wsname), "WS"),
)
)
return_str = "unknown"
self.cur_col += 1
wb.close()
return return_str
# Will store a file, worksheet tuple-like class
# with additional data accessible.
xlsx_data_list = ftype.FtypeDataList()
# Contains just a list of file, worksheet tuples.
xlsx_tuple_list = []
# xlsx files always start counting at 1.
INVALID_ROW = 0
def start(input_files):
# Gets the name of worksheets and
# adds it to xlsx_tuple_list.
get_worksheets(input_files)
# Sometimes, openpyxl can't get
# the proper dimensions of a worksheet,
# so it handles that. It also deals with
# headers in the worksheets and removes
# blank cells from the size of the sheet.
set_data()
# Check if some file worksheet pairs don't
# have a valid header.
if not xlsx_data_list:
raise InvconvMissingHeaders
# Can't directly check for membership of
# items from xlsx_tuple_list in xlsx_data_list,
# for they are different types.
for file_section in xlsx_tuple_list:
found_file_section = False
for data_file_section in xlsx_data_list:
# The first element in if statement
# has to be XlsxDataTuple, as it
# contains a __eq__() function
# that should work in this case.
if data_file_section == file_section:
found_file_section = True
break
if not found_file_section:
logger.error(
f"{msg_handler.get_id(file_section, 'ws')} contains no valid headers."
)
msg_handler.does_continue()
return xlsx_data_list
def get_worksheets(input_files):
for input_file in input_files:
wb = load_workbook(input_file, **WB_SETTINGS)
sheetname_list = wb.sheetnames
for sheetname in sheetname_list:
xlsx_tuple_list.append((input_file, sheetname))
wb.close()
def set_data():
for filename, wsname in xlsx_tuple_list:
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# max_col and max_row can be None.
cur_max_col = ws.max_column
cur_max_row = ws.max_row
# Close workbook right away so
# it won't remain open in case script
# gets closed or crashes.
wb.close()
max_col = get_max_col(filename, wsname, cur_max_col)
max_row = get_max_row(filename, wsname, cur_max_row)
# Get the row where a header was found.
header_row = get_header_row(filename, wsname, max_row)
# check_header_row() ensures that a non-blank row
# is after header row. If not, it might not
# actually be a header row.
if (
header_row == INVALID_ROW
or header_row == max_row
or not check_header_row(filename, wsname, max_col, header_row)
):
continue
# The first row after the header_row.
min_row = header_row + 1
header_list = get_header_list(filename, wsname, max_col, header_row)
if max_col > len(header_list):
logger.info(
string.Template(
"Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."
)
)
max_col = len(header_list)
DataTuple = XlsxDataTuple(filename, wsname, header_list)
DataTuple.set_oper_num(min_row, max_row, max_col)
xlsx_data_list.append(DataTuple)
def get_max_col(filename, wsname, max_col):
xlsx_id = msg_handler.get_id((filename, wsname), "WS")
while (not isinstance(max_col, int)) or (max_col <= INVALID_ROW):
logger.error(f"Max col for {xlsx_id} is {str(max_col)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of columns (starting at 1).")
max_col = int(
input("Please provide the number of columns (starting at 1) > ")
)
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_col = None
if (isinstance(max_col, int)) and (max_col <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_col
def get_max_row(filename, wsname, max_row):
xlsx_id = msg_handler.get_id((filename, wsname))
while (not isinstance(max_row, int)) or (max_row <= 0):
logger.error(f"Max row for {xlsx_id} is {str(max_row)}.")
msg_handler.does_continue()
try:
logger.info("User providing number of rows (starting at 1).")
max_row = int(input("Please provide the number of rows (starting at 1) > "))
except (ValueError, TypeError):
logger.log("FAILURE", "Input could not be converted to int.")
max_row = None
if (isinstance(max_row, int)) and (max_row <= 0):
logger.log("FAILURE", "Input is less than one.")
return max_row
def get_header_row(filename, wsname, max_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# header_row starts at 1,
# so a value of 0 indicates
# it wasn't found.
header_row = INVALID_ROW
for row in cell_pos.row_iter(max_row):
row_str = str(row)
# A row with just a title would not fill up the entire max_column.
# As a result, there would be None at either the first or second
# position.
cell1 = ws["A" + row_str].value
cell2 = ws["B" + row_str].value
if cell1 is not None and cell2 is not None:
header_row = row
break
wb.close()
return header_row
def check_header_row(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
# Check the row after the header row
# for content.
post_header_row = header_row + 1
row_str = str(post_header_row)
# List of items in row.
row_list = []
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
row_list.append(str(ws[col_letter + row_str].value))
wb.close()
# Ensure the row is not blank.
if row_list.count("None") != len(row_list):
return True
return False
def get_header_list(filename, wsname, max_col, header_row):
wb = load_workbook(filename, **WB_SETTINGS)
ws = wb[wsname]
header_list = []
row_str = str(header_row)
for col in cell_pos.col_iter(max_col):
col_letter = cell_pos.get_col_letter(col)
header_item = ws[col_letter + row_str].value
# Assuming the header doesn't have blank
# items between entries. Only at the end.
if header_item is None:
logger.warning(
f"Blank header {col_letter+row_str} in {msg_handler.get_id((filename, wsname), 'WS')} will be ignored."
)
break
header_list.append(header_item)
wb.close()
return header_list
if used:
ftype.add("xlsx", start)
| [
"invconv.msg_handler.get_id",
"invconv.msg_handler.does_continue",
"string.Template",
"loguru.logger.log",
"loguru.logger.info",
"openpyxl.load_workbook",
"invconv.cell_pos.row_iter",
"invconv.cell_pos.get_col_letter",
"invconv.ftype.FtypeDataList",
"invconv.cell_pos.col_iter",
"invconv.ftype.add"
]
| [((2927, 2948), 'invconv.ftype.FtypeDataList', 'ftype.FtypeDataList', ([], {}), '()\n', (2946, 2948), True, 'import invconv.ftype as ftype\n'), ((6258, 6302), 'invconv.msg_handler.get_id', 'msg_handler.get_id', (['(filename, wsname)', '"""WS"""'], {}), "((filename, wsname), 'WS')\n", (6276, 6302), True, 'import invconv.msg_handler as msg_handler\n'), ((7026, 7064), 'invconv.msg_handler.get_id', 'msg_handler.get_id', (['(filename, wsname)'], {}), '((filename, wsname))\n', (7044, 7064), True, 'import invconv.msg_handler as msg_handler\n'), ((7740, 7778), 'openpyxl.load_workbook', 'load_workbook', (['filename'], {}), '(filename, **WB_SETTINGS)\n', (7753, 7778), False, 'from openpyxl import load_workbook\n'), ((7929, 7955), 'invconv.cell_pos.row_iter', 'cell_pos.row_iter', (['max_row'], {}), '(max_row)\n', (7946, 7955), True, 'import invconv.cell_pos as cell_pos\n'), ((8440, 8478), 'openpyxl.load_workbook', 'load_workbook', (['filename'], {}), '(filename, **WB_SETTINGS)\n', (8453, 8478), False, 'from openpyxl import load_workbook\n'), ((8694, 8720), 'invconv.cell_pos.col_iter', 'cell_pos.col_iter', (['max_col'], {}), '(max_col)\n', (8711, 8720), True, 'import invconv.cell_pos as cell_pos\n'), ((9039, 9077), 'openpyxl.load_workbook', 'load_workbook', (['filename'], {}), '(filename, **WB_SETTINGS)\n', (9052, 9077), False, 'from openpyxl import load_workbook\n'), ((9165, 9191), 'invconv.cell_pos.col_iter', 'cell_pos.col_iter', (['max_col'], {}), '(max_col)\n', (9182, 9191), True, 'import invconv.cell_pos as cell_pos\n'), ((9701, 9725), 'invconv.ftype.add', 'ftype.add', (['"""xlsx"""', 'start'], {}), "('xlsx', start)\n", (9710, 9725), True, 'import invconv.ftype as ftype\n'), ((1589, 1632), 'openpyxl.load_workbook', 'load_workbook', (['self.filename'], {}), '(self.filename, **WB_SETTINGS)\n', (1602, 1632), False, 'from openpyxl import load_workbook\n'), ((2061, 2098), 'invconv.cell_pos.get_col_letter', 'cell_pos.get_col_letter', (['self.cur_col'], {}), '(self.cur_col)\n', (2084, 2098), True, 'import invconv.cell_pos as cell_pos\n'), ((4439, 4479), 'openpyxl.load_workbook', 'load_workbook', (['input_file'], {}), '(input_file, **WB_SETTINGS)\n', (4452, 4479), False, 'from openpyxl import load_workbook\n'), ((4715, 4753), 'openpyxl.load_workbook', 'load_workbook', (['filename'], {}), '(filename, **WB_SETTINGS)\n', (4728, 4753), False, 'from openpyxl import load_workbook\n'), ((6447, 6474), 'invconv.msg_handler.does_continue', 'msg_handler.does_continue', ([], {}), '()\n', (6472, 6474), True, 'import invconv.msg_handler as msg_handler\n'), ((7199, 7226), 'invconv.msg_handler.does_continue', 'msg_handler.does_continue', ([], {}), '()\n', (7224, 7226), True, 'import invconv.msg_handler as msg_handler\n'), ((8743, 8771), 'invconv.cell_pos.get_col_letter', 'cell_pos.get_col_letter', (['col'], {}), '(col)\n', (8766, 8771), True, 'import invconv.cell_pos as cell_pos\n'), ((9214, 9242), 'invconv.cell_pos.get_col_letter', 'cell_pos.get_col_letter', (['col'], {}), '(col)\n', (9237, 9242), True, 'import invconv.cell_pos as cell_pos\n'), ((4302, 4329), 'invconv.msg_handler.does_continue', 'msg_handler.does_continue', ([], {}), '()\n', (4327, 4329), True, 'import invconv.msg_handler as msg_handler\n'), ((6500, 6564), 'loguru.logger.info', 'logger.info', (['"""User providing number of columns (starting at 1)."""'], {}), "('User providing number of columns (starting at 1).')\n", (6511, 6564), False, 'from loguru import logger\n'), ((6898, 6946), 'loguru.logger.log', 'logger.log', (['"""FAILURE"""', '"""Input is less than one."""'], {}), "('FAILURE', 'Input is less than one.')\n", (6908, 6946), False, 'from loguru import logger\n'), ((7252, 7313), 'loguru.logger.info', 'logger.info', (['"""User providing number of rows (starting at 1)."""'], {}), "('User providing number of rows (starting at 1).')\n", (7263, 7313), False, 'from loguru import logger\n'), ((7614, 7662), 'loguru.logger.log', 'logger.log', (['"""FAILURE"""', '"""Input is less than one."""'], {}), "('FAILURE', 'Input is less than one.')\n", (7624, 7662), False, 'from loguru import logger\n'), ((5838, 5952), 'string.Template', 'string.Template', (['"""Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos."""'], {}), "(\n 'Reducing max column length of $id from $cur_col to $new_col due to None in $cell_pos.'\n )\n", (5853, 5952), False, 'import string\n'), ((6739, 6800), 'loguru.logger.log', 'logger.log', (['"""FAILURE"""', '"""Input could not be converted to int."""'], {}), "('FAILURE', 'Input could not be converted to int.')\n", (6749, 6800), False, 'from loguru import logger\n'), ((7455, 7516), 'loguru.logger.log', 'logger.log', (['"""FAILURE"""', '"""Input could not be converted to int."""'], {}), "('FAILURE', 'Input could not be converted to int.')\n", (7465, 7516), False, 'from loguru import logger\n'), ((2418, 2511), 'string.Template', 'string.Template', (['"""Unknown reference found at $cell_pos in $id. Defaulting to "unknown"."""'], {}), '(\n \'Unknown reference found at $cell_pos in $id. Defaulting to "unknown".\')\n', (2433, 2511), False, 'import string\n'), ((2631, 2685), 'invconv.msg_handler.get_id', 'msg_handler.get_id', (['(self.filename, self.wsname)', '"""WS"""'], {}), "((self.filename, self.wsname), 'WS')\n", (2649, 2685), True, 'import invconv.msg_handler as msg_handler\n'), ((4208, 4246), 'invconv.msg_handler.get_id', 'msg_handler.get_id', (['file_section', '"""ws"""'], {}), "(file_section, 'ws')\n", (4226, 4246), True, 'import invconv.msg_handler as msg_handler\n'), ((9511, 9555), 'invconv.msg_handler.get_id', 'msg_handler.get_id', (['(filename, wsname)', '"""WS"""'], {}), "((filename, wsname), 'WS')\n", (9529, 9555), True, 'import invconv.msg_handler as msg_handler\n')] |
import os
from tkinter import *
import tkinter.filedialog as tkfd
from PIL import Image
import numpy as np
import solvers.generation_solver.image_seperation as IS
def layer_interface(img_num):
layer_names = []
layer_nums = []
for k in range(img_num):
master = Toplevel()
master.title(f"Image number {k+1}")
master.geometry("+300+200")
# input image and layer
img_label = Label(master, text="Image").grid(row=0)
layer_label = Label(master, text="Layer").grid(row=1)
entry_img = Entry(master, width=30)
entry_layer = Entry(master, width=30)
entry_img.grid(row=0, column=1)
entry_layer.grid(row=1, column=1)
if k == img_num - 1:
Button(master, text='Done', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
else:
Button(master, text='Next', command=master.quit).grid(row=2, column=2, sticky=W, pady=4)
img_path = "inputs/images/"
img_path = os.path.join(os.path.dirname(__file__), img_path)
path = tkfd.askopenfilename(initialdir = img_path, title = "Select file", filetypes = (("png files","*.png"),("all files","*.*")))
entry_img.insert('0', os.path.basename(path))
image = Image.open(path)
img = PhotoImage(file=path)
width, height = img.width(), img.height()
if width > 250:
scale_w = int(round(width / 250, 0))
scale_h = int(round(height / 250, 0))
img = img.subsample(scale_w, scale_h)
if width < 250:
scale_w = int(round(250 / width, 0))
scale_h = int(round(250 / height, 0))
img = img.zoom(scale_w, scale_h)
Label(master, image=img).grid(row=2, column=1)
mainloop()
img_name = entry_img.get()
img_layer = entry_layer.get()
layer_names.append(img_name)
layer_nums.append(img_layer)
return layer_names, layer_nums
def show_interface():
root = Tk()
root.geometry("+300+300")
Label(root, text="Graph", font=("", 14, "bold", "underline"), fg='#696969').grid(row=0, sticky='w')
entry_graph = Entry(root, width=15)
entry_graph.grid(row=0, column=1)
graph_path = "connectivity/"
graph_path = os.path.join(os.path.dirname(__file__), graph_path)
path = tkfd.askopenfilename(initialdir = graph_path, title = "Select file", filetypes = (("pkl files", "*.pkl"), ("all files","*.*")))
entry_graph.insert('0', os.path.basename(path))
# input No. image and button
Label(root, text="Input image", font=("", 14, "bold", "underline"), fg='#696969').grid(row=1, sticky='w')
entry_file = Entry(root, width=15)
entry_file.grid(row=1, column=1)
entry_path = "inputs/images/"
entry_path = os.path.join(os.path.dirname(__file__), entry_path)
input_path = tkfd.askopenfilename(initialdir=entry_path, title="Select input image", filetypes=(("png files", "*.png"), ("jpg files", "*.jpg")))
entry_file.insert('0', os.path.basename(input_path))
Button(root, text='Next', command=root.quit).grid(row=1, column=2, sticky='e', pady=4)
# input background color
Label(root, text="").grid(row=2, column=1)
Label(root, text="Background color", font=("", 14, "bold", "underline"), fg='#696969').grid(row=3, sticky='w')
Label(root, text="R", fg='#4f4f4f').grid(row=4, column=0)
Label(root, text="G", fg='#4f4f4f').grid(row=4, column=1)
Label(root, text="B", fg='#4f4f4f').grid(row=4, column=2)
entry_r = Entry(root, width=15)
entry_g = Entry(root, width=15)
entry_b = Entry(root, width=15)
entry_r.grid(row=5, column=0)
entry_g.grid(row=5, column=1)
entry_b.grid(row=5, column=2)
# input rotation and scaling
Label(root, text="").grid(row=6, column=1)
Label(root, text="Rotation degree", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, sticky='w')
entry_degree = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_degree.grid(row=7, column=1)
Label(root, text="Scale", font=("", 14, "bold", "underline"), fg='#696969').grid(row=7, column=2)
entry_scale = Entry(root, width=15, textvariable=StringVar(root, value='1'))
entry_scale.grid(row=7, column=3)
# input translation
Label(root, text="").grid(row=8, column=1)
Label(root, text="x translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, sticky='w')
entry_x = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_x.grid(row=9, column=1)
Label(root, text="y translation", font=("", 14, "bold", "underline"), fg='#696969').grid(row=9, column=2)
entry_y = Entry(root, width=15, textvariable=StringVar(root, value='0'))
entry_y.grid(row=9, column=3)
Label(root, text="").grid(row=9, column=1)
mainloop()
img_path = input_path
print(img_path)
img_num = IS.seperate_color(img_path, "./cache/")
r, g, b = entry_r.get(), entry_g.get(), entry_b.get()
if len(r) == 0:
r = 0
if len(g) == 0:
g = 0
if len(b) == 0:
b = 0
if r == 0 and g == 0 and b == 0:
rgb = []
else:
rgb = np.array((int(r), int(g), int(b)))
layer_names, layer_nums = layer_interface(img_num)
return entry_graph.get(), img_num, layer_names, layer_nums, rgb, int(entry_degree.get()), float(entry_scale.get()), int(entry_x.get()), int(entry_y.get())
if __name__ == '__main__':
print(show_interface()) | [
"PIL.Image.open",
"os.path.dirname",
"solvers.generation_solver.image_seperation.seperate_color",
"os.path.basename",
"tkinter.filedialog.askopenfilename"
]
| [((2350, 2477), 'tkinter.filedialog.askopenfilename', 'tkfd.askopenfilename', ([], {'initialdir': 'graph_path', 'title': '"""Select file"""', 'filetypes': "(('pkl files', '*.pkl'), ('all files', '*.*'))"}), "(initialdir=graph_path, title='Select file', filetypes=\n (('pkl files', '*.pkl'), ('all files', '*.*')))\n", (2370, 2477), True, 'import tkinter.filedialog as tkfd\n'), ((2870, 3005), 'tkinter.filedialog.askopenfilename', 'tkfd.askopenfilename', ([], {'initialdir': 'entry_path', 'title': '"""Select input image"""', 'filetypes': "(('png files', '*.png'), ('jpg files', '*.jpg'))"}), "(initialdir=entry_path, title='Select input image',\n filetypes=(('png files', '*.png'), ('jpg files', '*.jpg')))\n", (2890, 3005), True, 'import tkinter.filedialog as tkfd\n'), ((4914, 4953), 'solvers.generation_solver.image_seperation.seperate_color', 'IS.seperate_color', (['img_path', '"""./cache/"""'], {}), "(img_path, './cache/')\n", (4931, 4953), True, 'import solvers.generation_solver.image_seperation as IS\n'), ((1067, 1192), 'tkinter.filedialog.askopenfilename', 'tkfd.askopenfilename', ([], {'initialdir': 'img_path', 'title': '"""Select file"""', 'filetypes': "(('png files', '*.png'), ('all files', '*.*'))"}), "(initialdir=img_path, title='Select file', filetypes=((\n 'png files', '*.png'), ('all files', '*.*')))\n", (1087, 1192), True, 'import tkinter.filedialog as tkfd\n'), ((1270, 1286), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1280, 1286), False, 'from PIL import Image\n'), ((2300, 2325), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2315, 2325), False, 'import os\n'), ((2506, 2528), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2522, 2528), False, 'import os\n'), ((2814, 2839), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2829, 2839), False, 'import os\n'), ((3029, 3057), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (3045, 3057), False, 'import os\n'), ((1015, 1040), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1030, 1040), False, 'import os\n'), ((1221, 1243), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1237, 1243), False, 'import os\n')] |
# coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
r"""Tests for BertTokenizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow_text.python.ops import bert_tokenizer
def _utf8(x):
return x.encode('utf-8')
# TODO(thuang513): It appears there isn't a Ragged version of substr; consider
# checking this into core TF.
def _ragged_substr(text_input, begin, end):
text_input_flat = None
if ragged_tensor.is_ragged(text_input):
text_input_flat = text_input.flat_values
else:
text_input_flat = text_input
def _ragged_tile(x):
input_text, indices = x
multiple = math_ops.reduce_sum(indices.row_lengths())
return array_ops.tile([input_text], [multiple])
broadcasted_text = ragged_map_ops.map_fn(
_ragged_tile,
(text_input_flat, begin),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.string, ragged_rank=1),
infer_shape=False,
)
size = math_ops.sub(
array_ops.squeeze(end.flat_values), array_ops.squeeze(begin.flat_values))
new_tokens = string_ops.substr_v2(broadcasted_text,
array_ops.squeeze(begin.flat_values), size)
return begin.with_flat_values(new_tokens.flat_values)
_VOCAB = [
b'[unused1]',
b'[unused23]',
b"'",
b'##%',
b'##af',
b'##book',
b'##c',
b'##fr',
b'##hey',
b'##is',
b'##o',
b'##ost',
b'##s',
b'##tri',
b'##y',
b'$',
b'%',
b'&',
b'(',
b')',
b'*',
b'-',
b'.',
b'20',
b':',
b'?',
b'[CLS]',
b'[SEP]',
_utf8(u'國'),
_utf8(u'暐'),
_utf8(u'瀚'),
_utf8(u'韓'),
_utf8(u'食'),
_utf8(u'黃'),
_utf8(u'🤔'),
_utf8(u'🤣'),
b'^',
b'a',
b'ago',
b'among',
b'an',
b'and',
b'are',
b'aren',
b'awesome',
b'between',
b'candy',
b'china',
b'companies',
b'company',
b'crushed',
b'dug',
b'earnings',
b'engaged',
b'even',
b'few',
b'forecast',
b'getting',
b'had',
b'han',
b'has',
b'hers',
b'high',
b'hit',
b'hs',
b'hurting',
b'in',
b'indie',
b'is',
b'isn',
b'ka',
b'ku',
b'major',
b'maker',
b'moth',
b'nearly',
b'new',
b'now',
b'president',
b'record',
b'regulators',
b'reported',
b'rift',
b'rust',
b'sales',
b'shares',
b'slightly',
b'sprint',
b'states',
b'stock',
b't',
b'taste',
b'tension',
b'that',
b'the',
b'this',
b'today',
b'told',
b'topped',
b'trade',
b'trump',
b'united',
b'up',
b'weeks',
b'what',
b'why',
b'with',
b'year',
b'yo',
b'yu',
_utf8(u'\u7231'),
_utf8(u'\u4e0a'),
_utf8(u'\u4e00'),
_utf8(u'\u4e2a'),
_utf8(u'\u4e0d'),
_utf8(u'\u56de'),
_utf8(u'\u5bb6'),
_utf8(u'\u7684'),
_utf8(u'\u4eba'),
]
def _create_table(vocab, num_oov=1):
init = lookup_ops.KeyValueTensorInitializer(
vocab,
math_ops.range(
array_ops.size(vocab, out_type=dtypes.int64), dtype=dtypes.int64),
key_dtype=dtypes.string,
value_dtype=dtypes.int64)
return lookup_ops.StaticVocabularyTableV1(
init, num_oov, lookup_key_dtype=dtypes.string)
class BertTokenizerTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_bert_tokenizer_outputs(self):
text_inputs = constant_op.constant([_utf8('Test')])
vocab = _VOCAB
table = _create_table(vocab, 2)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.int32)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results.dtype, dtypes.int32)
@parameterized.parameters([
dict(
text_inputs=[
_utf8(u'taste the rustisc indiefrost'),
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'taste', b'the', b'rustisc', b'indiefrost'],
[
b'Han', b'Kuo', b'-', b'yu', b'(',
b'\xe9\x9f\x93', b'\xe5\x9c\x8b',
b'\xe9\xa3\x9f', b')', b'\xf0\x9f\xa4\x94'
],
[
b'A\xc3\xb1ade', b'la', b'informaci\xc3\xb3n',
b'del', b'formulario', b'y', b'tus', b'preguntas'
]],
),
dict(
text_inputs=[
_utf8(u'UNwant\u00E9d,running'),
_utf8(u'Añade la información del formulario y tus preguntas'),
],
expected_tokens=[[b'unwanted', b',', b'running'],
[
b'anade', b'la', b'informacion', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
lower_case=True,
),
dict(
text_inputs=[
_utf8(u'Añade la información del formulario y tus preguntas')
],
expected_tokens=[[
b'An\xcc\x83ade', b'la', b'informacio\xcc\x81n', b'del',
b'formulario', b'y', b'tus', b'preguntas'
]],
normalization_form='NFD',
),
# Test CJK are tokenized by unicode characters
dict(
text_inputs=[
_utf8(u'香港では4日'),
_utf8(u'영어독해 자만심 왜 문제일까'),
_utf8(u'據港媒《東網》報導')
],
expected_tokens=[
[_utf8(u'香'),
_utf8(u'港'),
_utf8(u'では4'),
_utf8(u'日')],
[
_utf8(u'영어독해'),
_utf8(u'자만심'),
_utf8(u'왜'),
_utf8(u'문제일까'),
],
[
_utf8(u'據'),
_utf8(u'港'),
_utf8(u'媒'),
_utf8(u'《'),
_utf8(u'東'),
_utf8(u'網'),
_utf8(u'》'),
_utf8(u'報'),
_utf8(u'導')
],
],
normalization_form=None,
),
# Test Katakana followed by Hiragana.
dict(
text_inputs=[_utf8(u'のテキストとして')],
expected_tokens=[
[_utf8(u'のテキストとして')],
],
normalization_form=None,
),
])
@test_util.run_in_graph_and_eager_modes
def test_basic_tokenize(self,
text_inputs,
expected_tokens,
lower_case=False,
normalization_form='NFC'):
text_inputs = ragged_factory_ops.constant(text_inputs)
tokenizer = bert_tokenizer.BasicTokenizer(
lower_case=lower_case, normalization_form=normalization_form)
tokens = tokenizer.tokenize(text_inputs)
self.assertAllEqual(tokens, expected_tokens)
@parameterized.parameters([
dict(
text_inputs=[
b'taste the rustisc indiefrost',
_utf8(u'Han Kuo-yu (韓國食)🤔'),
_utf8(u'dugtrio had an awesome 🤣 dugbook'),
b'yo^what$is*up?',
b'mothaf*&%ka',
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost']],
[[b'han'], [b'ku', b'##o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')], [b')'],
[_utf8(u'🤔')]],
[[b'dug', b'##tri', b'##o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'##book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'], [b'*'],
[b'up'], [b'?']],
[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka']]],
expected_extracted=[[[b'taste'], [b'the'], [b'rust', b'is', b'c'],
[b'indie', b'fr', b'ost']],
[[b'Han'], [b'Ku', b'o'], [b'-'], [b'yu'], [b'('],
[_utf8(u'韓')], [_utf8(u'國')], [_utf8(u'食')],
[b')'], [_utf8(u'🤔')]],
[[b'dug', b'tri', b'o'], [b'had'], [b'an'],
[b'awesome'], [_utf8(u'🤣')], [b'dug', b'book']],
[[b'yo'], [b'^'], [b'what'], [b'$'], [b'is'],
[b'*'], [b'up'], [b'?']],
[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka']]],
lower_case=True,
),
# Test when we are expecting multiple OOV vocab ids and tf.string just
# maps out [UNK] token.
dict(
text_inputs=[
b'mothaf*&%ka cantfindme whodis',
],
expected=[[[b'moth', b'##af'], [b'*'], [b'&'], [b'%'], [b'ka'],
[b'[UNK]'], [b'[UNK]']]],
expected_extracted=[[[b'moth', b'af'], [b'*'], [b'&'], [b'%'],
[b'ka'], [b'cantfindme'], [b'whodis']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
b'candy',
],
expected=[[[b'candy']]],
lower_case=True,
num_oov=2,
),
dict(
text_inputs=[
_utf8(u'爱上一个不回家的人'),
],
expected=[[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')]]],
lower_case=True,
num_oov=2,
),
# Test 'preserve_unused_token' option
dict(
text_inputs=[
b'taste the rustisc indiefrost [unused1]',
_utf8(u'爱上一个不回家的人[unused23]'),
],
expected=[[[b'taste'], [b'the'], [b'rust', b'##is', b'##c'],
[b'indie', b'##fr', b'##ost'], [b'[unused1]']],
[[_utf8(u'爱')], [_utf8(u'上')], [_utf8(u'一')], [_utf8(u'个')],
[_utf8(u'不')], [_utf8(u'回')], [_utf8(u'家')], [_utf8(u'的')],
[_utf8(u'人')], [b'[unused23]']]],
preserve_unused_token=True,
),
])
@test_util.run_in_graph_and_eager_modes
def test_bert_tokenizer(self,
text_inputs,
expected,
vocab=None,
expected_extracted=None,
lower_case=True,
num_oov=1,
preserve_unused_token=False):
text_inputs = constant_op.constant(text_inputs)
if not vocab:
vocab = _VOCAB
table = _create_table(vocab, num_oov)
self.evaluate(table.initializer)
tokenizer = bert_tokenizer.BertTokenizer(
table,
token_out_type=dtypes.string,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results = tokenizer.tokenize(text_inputs)
self.assertAllEqual(results, expected)
# Verify that the int ids are the same.
expected_rt = ragged_factory_ops.constant(expected)
expected_int = table.lookup(expected_rt.flat_values)
expected_int_rt = ragged_tensor.RaggedTensor.from_nested_row_splits(
expected_int, expected_rt.nested_row_splits)
int_tokenizer = bert_tokenizer.BertTokenizer(
vocab_lookup_table=table,
token_out_type=dtypes.int64,
lower_case=lower_case,
preserve_unused_token=preserve_unused_token)
results_int = int_tokenizer.tokenize(text_inputs)
self.assertAllEqual(results_int, expected_int_rt)
# Verify that the offsets can extract the expected tokens
_, begin, end = tokenizer.tokenize_with_offsets(text_inputs)
extracted_wordpieces = _ragged_substr(text_inputs, begin, end)
if expected_extracted:
self.assertAllEqual(extracted_wordpieces, expected_extracted)
else:
# The extracted won't have any wordpieces with '##' prefix. Strip them
# out.
stripped_prefix_flat = string_ops.regex_replace(expected_rt.flat_values,
'##', '')
stripped_prefix = expected_rt.with_flat_values(stripped_prefix_flat)
self.assertAllEqual(extracted_wordpieces, stripped_prefix)
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.ops.string_ops.regex_replace",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow_text.python.ops.bert_tokenizer.BasicTokenizer",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorType",
"tensorflow_text.python.ops.bert_tokenizer.BertTokenizer",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.platform.test.main"
]
| [((1633, 1668), 'tensorflow.python.ops.ragged.ragged_tensor.is_ragged', 'ragged_tensor.is_ragged', (['text_input'], {}), '(text_input)\n', (1656, 1668), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((4382, 4468), 'tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1', 'lookup_ops.StaticVocabularyTableV1', (['init', 'num_oov'], {'lookup_key_dtype': 'dtypes.string'}), '(init, num_oov, lookup_key_dtype=dtypes.\n string)\n', (4416, 4468), False, 'from tensorflow.python.ops import lookup_ops\n'), ((13676, 13687), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (13685, 13687), False, 'from tensorflow.python.platform import test\n'), ((1877, 1917), 'tensorflow.python.ops.array_ops.tile', 'array_ops.tile', (['[input_text]', '[multiple]'], {}), '([input_text], [multiple])\n', (1891, 1917), False, 'from tensorflow.python.ops import array_ops\n'), ((2153, 2187), 'tensorflow.python.ops.array_ops.squeeze', 'array_ops.squeeze', (['end.flat_values'], {}), '(end.flat_values)\n', (2170, 2187), False, 'from tensorflow.python.ops import array_ops\n'), ((2189, 2225), 'tensorflow.python.ops.array_ops.squeeze', 'array_ops.squeeze', (['begin.flat_values'], {}), '(begin.flat_values)\n', (2206, 2225), False, 'from tensorflow.python.ops import array_ops\n'), ((2317, 2353), 'tensorflow.python.ops.array_ops.squeeze', 'array_ops.squeeze', (['begin.flat_values'], {}), '(begin.flat_values)\n', (2334, 2353), False, 'from tensorflow.python.ops import array_ops\n'), ((4758, 4822), 'tensorflow_text.python.ops.bert_tokenizer.BertTokenizer', 'bert_tokenizer.BertTokenizer', (['table'], {'token_out_type': 'dtypes.int32'}), '(table, token_out_type=dtypes.int32)\n', (4786, 4822), False, 'from tensorflow_text.python.ops import bert_tokenizer\n'), ((7955, 7995), 'tensorflow.python.ops.ragged.ragged_factory_ops.constant', 'ragged_factory_ops.constant', (['text_inputs'], {}), '(text_inputs)\n', (7982, 7995), False, 'from tensorflow.python.ops.ragged import ragged_factory_ops\n'), ((8012, 8108), 'tensorflow_text.python.ops.bert_tokenizer.BasicTokenizer', 'bert_tokenizer.BasicTokenizer', ([], {'lower_case': 'lower_case', 'normalization_form': 'normalization_form'}), '(lower_case=lower_case, normalization_form=\n normalization_form)\n', (8041, 8108), False, 'from tensorflow_text.python.ops import bert_tokenizer\n'), ((11950, 11983), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['text_inputs'], {}), '(text_inputs)\n', (11970, 11983), False, 'from tensorflow.python.framework import constant_op\n'), ((12118, 12255), 'tensorflow_text.python.ops.bert_tokenizer.BertTokenizer', 'bert_tokenizer.BertTokenizer', (['table'], {'token_out_type': 'dtypes.string', 'lower_case': 'lower_case', 'preserve_unused_token': 'preserve_unused_token'}), '(table, token_out_type=dtypes.string,\n lower_case=lower_case, preserve_unused_token=preserve_unused_token)\n', (12146, 12255), False, 'from tensorflow_text.python.ops import bert_tokenizer\n'), ((12437, 12474), 'tensorflow.python.ops.ragged.ragged_factory_ops.constant', 'ragged_factory_ops.constant', (['expected'], {}), '(expected)\n', (12464, 12474), False, 'from tensorflow.python.ops.ragged import ragged_factory_ops\n'), ((12554, 12653), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits', 'ragged_tensor.RaggedTensor.from_nested_row_splits', (['expected_int', 'expected_rt.nested_row_splits'], {}), '(expected_int, expected_rt\n .nested_row_splits)\n', (12603, 12653), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((12678, 12839), 'tensorflow_text.python.ops.bert_tokenizer.BertTokenizer', 'bert_tokenizer.BertTokenizer', ([], {'vocab_lookup_table': 'table', 'token_out_type': 'dtypes.int64', 'lower_case': 'lower_case', 'preserve_unused_token': 'preserve_unused_token'}), '(vocab_lookup_table=table, token_out_type=\n dtypes.int64, lower_case=lower_case, preserve_unused_token=\n preserve_unused_token)\n', (12706, 12839), False, 'from tensorflow_text.python.ops import bert_tokenizer\n'), ((2027, 2093), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorType', 'ragged_tensor.RaggedTensorType', ([], {'dtype': 'dtypes.string', 'ragged_rank': '(1)'}), '(dtype=dtypes.string, ragged_rank=1)\n', (2057, 2093), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((4243, 4287), 'tensorflow.python.ops.array_ops.size', 'array_ops.size', (['vocab'], {'out_type': 'dtypes.int64'}), '(vocab, out_type=dtypes.int64)\n', (4257, 4287), False, 'from tensorflow.python.ops import array_ops\n'), ((13391, 13450), 'tensorflow.python.ops.string_ops.regex_replace', 'string_ops.regex_replace', (['expected_rt.flat_values', '"""##"""', '""""""'], {}), "(expected_rt.flat_values, '##', '')\n", (13415, 13450), False, 'from tensorflow.python.ops import string_ops\n')] |
import funcvote as vote
votes = input("투표내용 >>>")
# print(votes)
# print(type(votes))
result = vote.str2int(votes)
print(vote.countvotes(result))
result = vote.countvotes(result)
vote.printvote(result)
# 투표 초안 | [
"funcvote.str2int",
"funcvote.printvote",
"funcvote.countvotes"
]
| [((97, 116), 'funcvote.str2int', 'vote.str2int', (['votes'], {}), '(votes)\n', (109, 116), True, 'import funcvote as vote\n'), ((158, 181), 'funcvote.countvotes', 'vote.countvotes', (['result'], {}), '(result)\n', (173, 181), True, 'import funcvote as vote\n'), ((183, 205), 'funcvote.printvote', 'vote.printvote', (['result'], {}), '(result)\n', (197, 205), True, 'import funcvote as vote\n'), ((124, 147), 'funcvote.countvotes', 'vote.countvotes', (['result'], {}), '(result)\n', (139, 147), True, 'import funcvote as vote\n')] |
"""
libs.strings
By default, uses `en-gb.json` file inside the `strings` top-level folder.
If language changes, set `libs.strings.default_locale` and run `libs.strings.refresh()`.
"""
import json
default_locale = "en-us"
cached_strings = {}
def refresh():
global cached_strings
with open(f"strings/{default_locale}.json") as f:
cached_strings = json.load(f)
def gettext(name):
return cached_strings[name]
refresh()
| [
"json.load"
]
| [((366, 378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (375, 378), False, 'import json\n')] |
# Copyright 2013-2021 The Salish Sea MEOPAR Contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO-Cmd command plug-in for deflate sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
"""
import logging
import math
import multiprocessing
from pathlib import Path
import shlex
import subprocess
import time
import attr
import cliff.command
logger = logging.getLogger(__name__)
class Deflate(cliff.command.Command):
"""Deflate variables in netCDF files using Lempel-Ziv compression."""
def get_parser(self, prog_name):
parser = super(Deflate, self).get_parser(prog_name)
parser.description = """
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as running
ncks -4 -L -O FILEPATH FILEPATH
for each FILEPATH.
"""
parser.add_argument(
"filepaths",
nargs="+",
type=Path,
metavar="FILEPATH",
help="Path/name of file to be deflated.",
)
parser.add_argument(
"-j",
"--jobs",
type=int,
default=math.floor(multiprocessing.cpu_count() / 2),
help=(
"Maximum number of concurrent deflation processes allowed. "
"Defaults to 1/2 the number of cores detected."
),
)
return parser
def take_action(self, parsed_args):
"""Execute the :command:`nemo deflate` sub-command.
Deflate variables in netCDF files using Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
This command is effectively the same as
:command:`ncks -4 -L -O filename filename`.
"""
deflate(parsed_args.filepaths, parsed_args.jobs)
@attr.s
class DeflateJob(object):
"""netCDF file deflation job."""
#: Path/name of the netCDF file to deflate.
filepath = attr.ib()
#: Lempel-Ziv compression level to use.
dfl_lvl = attr.ib(default=4)
#: Deflation job subprocess object.
process = attr.ib(default=None)
#: Deflation job process PID.
pid = attr.ib(default=None)
#: Deflation job process return code.
returncode = attr.ib(default=None)
def start(self):
"""Start the deflation job in a subprocess.
Cache the subprocess object and its process id as job attributes.
"""
cmd = "nccopy -s -4 -d{0.dfl_lvl} {0.filepath} {0.filepath}.nccopy.tmp".format(
self
)
self.process = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
self.pid = self.process.pid
logger.debug("deflating {0.filepath} in process {0.pid}".format(self))
@property
def done(self):
"""Return a boolean indicating whether or not the job has finished.
Cache the subprocess return code as a job attribute.
"""
finished = False
self.returncode = self.process.poll()
if self.returncode is not None:
if self.returncode == 0:
Path("{0.filepath}.nccopy.tmp".format(self)).rename(self.filepath)
finished = True
logger.debug(
"deflating {0.filepath} finished "
"with return code {0.returncode}".format(self)
)
return finished
def deflate(filepaths, max_concurrent_jobs):
"""Deflate variables in each of the netCDF files in filepaths using
Lempel-Ziv compression.
Converts files to netCDF-4 format.
The deflated file replaces the original file.
:param sequence filepaths: Paths/names of files to be deflated.
:param int max_concurrent_jobs: Maximum number of concurrent deflation
processes allowed.
"""
logger.info(
"Deflating in up to {} concurrent sub-processes".format(
int(max_concurrent_jobs)
)
)
jobs = [DeflateJob(fp) for fp in filepaths if fp.exists()]
jobs_in_progress = _launch_initial_jobs(jobs, max_concurrent_jobs)
while jobs or jobs_in_progress:
time.sleep(1)
_poll_and_launch(jobs, jobs_in_progress)
def _launch_initial_jobs(jobs, max_concurrent_jobs):
jobs_in_progress = {}
for process in range(int(max_concurrent_jobs)):
try:
job = jobs.pop(0)
except IndexError:
break
else:
job.start()
jobs_in_progress[job.pid] = job
return jobs_in_progress
def _poll_and_launch(jobs, jobs_in_progress):
for running_job in jobs_in_progress.copy().values():
if running_job.done:
result, _ = running_job.process.communicate()
logger.error(result) if result else logger.info(
"netCDF4 deflated {.filepath}".format(running_job)
)
jobs_in_progress.pop(running_job.pid)
try:
job = jobs.pop(0)
except IndexError:
continue
else:
job.start()
jobs_in_progress[job.pid] = job
| [
"logging.getLogger",
"shlex.split",
"multiprocessing.cpu_count",
"time.sleep",
"attr.ib"
]
| [((924, 951), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'import logging\n'), ((2661, 2670), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2668, 2670), False, 'import attr\n'), ((2729, 2747), 'attr.ib', 'attr.ib', ([], {'default': '(4)'}), '(default=4)\n', (2736, 2747), False, 'import attr\n'), ((2802, 2823), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2809, 2823), False, 'import attr\n'), ((2868, 2889), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2875, 2889), False, 'import attr\n'), ((2949, 2970), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (2956, 2970), False, 'import attr\n'), ((4902, 4915), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4912, 4915), False, 'import time\n'), ((3300, 3316), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (3311, 3316), False, 'import shlex\n'), ((1843, 1870), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1868, 1870), False, 'import multiprocessing\n')] |
import unittest
from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash
class TestStringHash(unittest.TestCase):
def test_stringhash(self):
string_hash = StringHash()
self.assertEqual(string_hash.hash('abc'), 0)
self.assertEqual(string_hash.hash('def'), 1)
self.assertEqual(string_hash.hash('abc'), 0)
def test_unhash(self):
string_hash = StringHash()
self.assertEqual(string_hash.unhash(string_hash.hash('abc')), 'abc')
self.assertRaises(ValueError, string_hash.unhash, 'unknown')
| [
"flask_monitoringdashboard.core.profiler.util.stringHash.StringHash"
]
| [((192, 204), 'flask_monitoringdashboard.core.profiler.util.stringHash.StringHash', 'StringHash', ([], {}), '()\n', (202, 204), False, 'from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash\n'), ((415, 427), 'flask_monitoringdashboard.core.profiler.util.stringHash.StringHash', 'StringHash', ([], {}), '()\n', (425, 427), False, 'from flask_monitoringdashboard.core.profiler.util.stringHash import StringHash\n')] |
from django.shortcuts import render, get_object_or_404
from .models import News
# Create your views here.
def index(request):
latest_news_list = News.objects.order_by('-pub_date')[:10]
context = {'latest_news_list': latest_news_list}
return render(request, 'news/index.html', context)
def detail(request, news_id):
new = get_object_or_404(News, pk=news_id)
return render(request, 'news/detail.html', {'new': new})
| [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404"
]
| [((254, 297), 'django.shortcuts.render', 'render', (['request', '"""news/index.html"""', 'context'], {}), "(request, 'news/index.html', context)\n", (260, 297), False, 'from django.shortcuts import render, get_object_or_404\n'), ((340, 375), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['News'], {'pk': 'news_id'}), '(News, pk=news_id)\n', (357, 375), False, 'from django.shortcuts import render, get_object_or_404\n'), ((387, 436), 'django.shortcuts.render', 'render', (['request', '"""news/detail.html"""', "{'new': new}"], {}), "(request, 'news/detail.html', {'new': new})\n", (393, 436), False, 'from django.shortcuts import render, get_object_or_404\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.