repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
callowayproject/django-categories | categories/migration.py | migrate_app | def migrate_app(sender, *args, **kwargs):
"""
Migrate all models of this app registered
"""
from .registration import registry
if 'app_config' not in kwargs:
return
app_config = kwargs['app_config']
app_name = app_config.label
fields = [fld for fld in list(registry._field_registry.keys()) if fld.startswith(app_name)]
sid = transaction.savepoint()
for fld in fields:
model_name, field_name = fld.split('.')[1:]
if field_exists(app_name, model_name, field_name):
continue
model = app_config.get_model(model_name)
try:
with connection.schema_editor() as schema_editor:
schema_editor.add_field(model, registry._field_registry[fld])
if sid:
transaction.savepoint_commit(sid)
except ProgrammingError:
if sid:
transaction.savepoint_rollback(sid)
continue | python | def migrate_app(sender, *args, **kwargs):
"""
Migrate all models of this app registered
"""
from .registration import registry
if 'app_config' not in kwargs:
return
app_config = kwargs['app_config']
app_name = app_config.label
fields = [fld for fld in list(registry._field_registry.keys()) if fld.startswith(app_name)]
sid = transaction.savepoint()
for fld in fields:
model_name, field_name = fld.split('.')[1:]
if field_exists(app_name, model_name, field_name):
continue
model = app_config.get_model(model_name)
try:
with connection.schema_editor() as schema_editor:
schema_editor.add_field(model, registry._field_registry[fld])
if sid:
transaction.savepoint_commit(sid)
except ProgrammingError:
if sid:
transaction.savepoint_rollback(sid)
continue | [
"def",
"migrate_app",
"(",
"sender",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"registration",
"import",
"registry",
"if",
"'app_config'",
"not",
"in",
"kwargs",
":",
"return",
"app_config",
"=",
"kwargs",
"[",
"'app_config'",
"]",
"app_name",
"=",
"app_config",
".",
"label",
"fields",
"=",
"[",
"fld",
"for",
"fld",
"in",
"list",
"(",
"registry",
".",
"_field_registry",
".",
"keys",
"(",
")",
")",
"if",
"fld",
".",
"startswith",
"(",
"app_name",
")",
"]",
"sid",
"=",
"transaction",
".",
"savepoint",
"(",
")",
"for",
"fld",
"in",
"fields",
":",
"model_name",
",",
"field_name",
"=",
"fld",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
"if",
"field_exists",
"(",
"app_name",
",",
"model_name",
",",
"field_name",
")",
":",
"continue",
"model",
"=",
"app_config",
".",
"get_model",
"(",
"model_name",
")",
"try",
":",
"with",
"connection",
".",
"schema_editor",
"(",
")",
"as",
"schema_editor",
":",
"schema_editor",
".",
"add_field",
"(",
"model",
",",
"registry",
".",
"_field_registry",
"[",
"fld",
"]",
")",
"if",
"sid",
":",
"transaction",
".",
"savepoint_commit",
"(",
"sid",
")",
"except",
"ProgrammingError",
":",
"if",
"sid",
":",
"transaction",
".",
"savepoint_rollback",
"(",
"sid",
")",
"continue"
] | Migrate all models of this app registered | [
"Migrate",
"all",
"models",
"of",
"this",
"app",
"registered"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/migration.py#L39-L66 | train |
callowayproject/django-categories | categories/models.py | Category.get_absolute_url | def get_absolute_url(self):
"""Return a path"""
from django.urls import NoReverseMatch
if self.alternate_url:
return self.alternate_url
try:
prefix = reverse('categories_tree_list')
except NoReverseMatch:
prefix = '/'
ancestors = list(self.get_ancestors()) + [self, ]
return prefix + '/'.join([force_text(i.slug) for i in ancestors]) + '/' | python | def get_absolute_url(self):
"""Return a path"""
from django.urls import NoReverseMatch
if self.alternate_url:
return self.alternate_url
try:
prefix = reverse('categories_tree_list')
except NoReverseMatch:
prefix = '/'
ancestors = list(self.get_ancestors()) + [self, ]
return prefix + '/'.join([force_text(i.slug) for i in ancestors]) + '/' | [
"def",
"get_absolute_url",
"(",
"self",
")",
":",
"from",
"django",
".",
"urls",
"import",
"NoReverseMatch",
"if",
"self",
".",
"alternate_url",
":",
"return",
"self",
".",
"alternate_url",
"try",
":",
"prefix",
"=",
"reverse",
"(",
"'categories_tree_list'",
")",
"except",
"NoReverseMatch",
":",
"prefix",
"=",
"'/'",
"ancestors",
"=",
"list",
"(",
"self",
".",
"get_ancestors",
"(",
")",
")",
"+",
"[",
"self",
",",
"]",
"return",
"prefix",
"+",
"'/'",
".",
"join",
"(",
"[",
"force_text",
"(",
"i",
".",
"slug",
")",
"for",
"i",
"in",
"ancestors",
"]",
")",
"+",
"'/'"
] | Return a path | [
"Return",
"a",
"path"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L56-L67 | train |
callowayproject/django-categories | categories/models.py | CategoryRelationManager.get_content_type | def get_content_type(self, content_type):
"""
Get all the items of the given content type related to this item.
"""
qs = self.get_queryset()
return qs.filter(content_type__name=content_type) | python | def get_content_type(self, content_type):
"""
Get all the items of the given content type related to this item.
"""
qs = self.get_queryset()
return qs.filter(content_type__name=content_type) | [
"def",
"get_content_type",
"(",
"self",
",",
"content_type",
")",
":",
"qs",
"=",
"self",
".",
"get_queryset",
"(",
")",
"return",
"qs",
".",
"filter",
"(",
"content_type__name",
"=",
"content_type",
")"
] | Get all the items of the given content type related to this item. | [
"Get",
"all",
"the",
"items",
"of",
"the",
"given",
"content",
"type",
"related",
"to",
"this",
"item",
"."
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L109-L114 | train |
callowayproject/django-categories | categories/models.py | CategoryRelationManager.get_relation_type | def get_relation_type(self, relation_type):
"""
Get all the items of the given relationship type related to this item.
"""
qs = self.get_queryset()
return qs.filter(relation_type=relation_type) | python | def get_relation_type(self, relation_type):
"""
Get all the items of the given relationship type related to this item.
"""
qs = self.get_queryset()
return qs.filter(relation_type=relation_type) | [
"def",
"get_relation_type",
"(",
"self",
",",
"relation_type",
")",
":",
"qs",
"=",
"self",
".",
"get_queryset",
"(",
")",
"return",
"qs",
".",
"filter",
"(",
"relation_type",
"=",
"relation_type",
")"
] | Get all the items of the given relationship type related to this item. | [
"Get",
"all",
"the",
"items",
"of",
"the",
"given",
"relationship",
"type",
"related",
"to",
"this",
"item",
"."
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L116-L121 | train |
callowayproject/django-categories | categories/apps.py | handle_class_prepared | def handle_class_prepared(sender, **kwargs):
"""
See if this class needs registering of fields
"""
from .settings import M2M_REGISTRY, FK_REGISTRY
from .registration import registry
sender_app = sender._meta.app_label
sender_name = sender._meta.model_name
for key, val in list(FK_REGISTRY.items()):
app_name, model_name = key.split('.')
if app_name == sender_app and sender_name == model_name:
registry.register_model(app_name, sender, 'ForeignKey', val)
for key, val in list(M2M_REGISTRY.items()):
app_name, model_name = key.split('.')
if app_name == sender_app and sender_name == model_name:
registry.register_model(app_name, sender, 'ManyToManyField', val) | python | def handle_class_prepared(sender, **kwargs):
"""
See if this class needs registering of fields
"""
from .settings import M2M_REGISTRY, FK_REGISTRY
from .registration import registry
sender_app = sender._meta.app_label
sender_name = sender._meta.model_name
for key, val in list(FK_REGISTRY.items()):
app_name, model_name = key.split('.')
if app_name == sender_app and sender_name == model_name:
registry.register_model(app_name, sender, 'ForeignKey', val)
for key, val in list(M2M_REGISTRY.items()):
app_name, model_name = key.split('.')
if app_name == sender_app and sender_name == model_name:
registry.register_model(app_name, sender, 'ManyToManyField', val) | [
"def",
"handle_class_prepared",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
"settings",
"import",
"M2M_REGISTRY",
",",
"FK_REGISTRY",
"from",
".",
"registration",
"import",
"registry",
"sender_app",
"=",
"sender",
".",
"_meta",
".",
"app_label",
"sender_name",
"=",
"sender",
".",
"_meta",
".",
"model_name",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"FK_REGISTRY",
".",
"items",
"(",
")",
")",
":",
"app_name",
",",
"model_name",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"if",
"app_name",
"==",
"sender_app",
"and",
"sender_name",
"==",
"model_name",
":",
"registry",
".",
"register_model",
"(",
"app_name",
",",
"sender",
",",
"'ForeignKey'",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"M2M_REGISTRY",
".",
"items",
"(",
")",
")",
":",
"app_name",
",",
"model_name",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"if",
"app_name",
"==",
"sender_app",
"and",
"sender_name",
"==",
"model_name",
":",
"registry",
".",
"register_model",
"(",
"app_name",
",",
"sender",
",",
"'ManyToManyField'",
",",
"val",
")"
] | See if this class needs registering of fields | [
"See",
"if",
"this",
"class",
"needs",
"registering",
"of",
"fields"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/apps.py#L20-L37 | train |
callowayproject/django-categories | categories/editor/tree_editor.py | TreeEditor.get_queryset | def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
qs.__class__ = TreeEditorQuerySet
return qs | python | def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
qs.__class__ = TreeEditorQuerySet
return qs | [
"def",
"get_queryset",
"(",
"self",
",",
"request",
")",
":",
"qs",
"=",
"self",
".",
"model",
".",
"_default_manager",
".",
"get_queryset",
"(",
")",
"qs",
".",
"__class__",
"=",
"TreeEditorQuerySet",
"return",
"qs"
] | Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view. | [
"Returns",
"a",
"QuerySet",
"of",
"all",
"model",
"instances",
"that",
"can",
"be",
"edited",
"by",
"the",
"admin",
"site",
".",
"This",
"is",
"used",
"by",
"changelist_view",
"."
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/editor/tree_editor.py#L283-L290 | train |
callowayproject/django-categories | categories/base.py | CategoryBaseAdmin.deactivate | def deactivate(self, request, queryset):
"""
Set active to False for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
if item.active:
item.active = False
item.save()
item.children.all().update(active=False) | python | def deactivate(self, request, queryset):
"""
Set active to False for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
if item.active:
item.active = False
item.save()
item.children.all().update(active=False) | [
"def",
"deactivate",
"(",
"self",
",",
"request",
",",
"queryset",
")",
":",
"selected_cats",
"=",
"self",
".",
"model",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"request",
".",
"POST",
".",
"getlist",
"(",
"'_selected_action'",
")",
"]",
")",
"for",
"item",
"in",
"selected_cats",
":",
"if",
"item",
".",
"active",
":",
"item",
".",
"active",
"=",
"False",
"item",
".",
"save",
"(",
")",
"item",
".",
"children",
".",
"all",
"(",
")",
".",
"update",
"(",
"active",
"=",
"False",
")"
] | Set active to False for selected items | [
"Set",
"active",
"to",
"False",
"for",
"selected",
"items"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/base.py#L144-L155 | train |
callowayproject/django-categories | categories/management/commands/import_categories.py | Command.get_indent | def get_indent(self, string):
"""
Look through the string and count the spaces
"""
indent_amt = 0
if string[0] == '\t':
return '\t'
for char in string:
if char == ' ':
indent_amt += 1
else:
return ' ' * indent_amt | python | def get_indent(self, string):
"""
Look through the string and count the spaces
"""
indent_amt = 0
if string[0] == '\t':
return '\t'
for char in string:
if char == ' ':
indent_amt += 1
else:
return ' ' * indent_amt | [
"def",
"get_indent",
"(",
"self",
",",
"string",
")",
":",
"indent_amt",
"=",
"0",
"if",
"string",
"[",
"0",
"]",
"==",
"'\\t'",
":",
"return",
"'\\t'",
"for",
"char",
"in",
"string",
":",
"if",
"char",
"==",
"' '",
":",
"indent_amt",
"+=",
"1",
"else",
":",
"return",
"' '",
"*",
"indent_amt"
] | Look through the string and count the spaces | [
"Look",
"through",
"the",
"string",
"and",
"count",
"the",
"spaces"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L16-L28 | train |
callowayproject/django-categories | categories/management/commands/import_categories.py | Command.make_category | def make_category(self, string, parent=None, order=1):
"""
Make and save a category object from a string
"""
cat = Category(
name=string.strip(),
slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49],
# arent=parent,
order=order
)
cat._tree_manager.insert_node(cat, parent, 'last-child', True)
cat.save()
if parent:
parent.rght = cat.rght + 1
parent.save()
return cat | python | def make_category(self, string, parent=None, order=1):
"""
Make and save a category object from a string
"""
cat = Category(
name=string.strip(),
slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49],
# arent=parent,
order=order
)
cat._tree_manager.insert_node(cat, parent, 'last-child', True)
cat.save()
if parent:
parent.rght = cat.rght + 1
parent.save()
return cat | [
"def",
"make_category",
"(",
"self",
",",
"string",
",",
"parent",
"=",
"None",
",",
"order",
"=",
"1",
")",
":",
"cat",
"=",
"Category",
"(",
"name",
"=",
"string",
".",
"strip",
"(",
")",
",",
"slug",
"=",
"slugify",
"(",
"SLUG_TRANSLITERATOR",
"(",
"string",
".",
"strip",
"(",
")",
")",
")",
"[",
":",
"49",
"]",
",",
"# arent=parent,",
"order",
"=",
"order",
")",
"cat",
".",
"_tree_manager",
".",
"insert_node",
"(",
"cat",
",",
"parent",
",",
"'last-child'",
",",
"True",
")",
"cat",
".",
"save",
"(",
")",
"if",
"parent",
":",
"parent",
".",
"rght",
"=",
"cat",
".",
"rght",
"+",
"1",
"parent",
".",
"save",
"(",
")",
"return",
"cat"
] | Make and save a category object from a string | [
"Make",
"and",
"save",
"a",
"category",
"object",
"from",
"a",
"string"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L31-L46 | train |
callowayproject/django-categories | categories/management/commands/import_categories.py | Command.parse_lines | def parse_lines(self, lines):
"""
Do the work of parsing each line
"""
indent = ''
level = 0
if lines[0][0] == ' ' or lines[0][0] == '\t':
raise CommandError("The first line in the file cannot start with a space or tab.")
# This keeps track of the current parents at a given level
current_parents = {0: None}
for line in lines:
if len(line) == 0:
continue
if line[0] == ' ' or line[0] == '\t':
if indent == '':
indent = self.get_indent(line)
elif not line[0] in indent:
raise CommandError("You can't mix spaces and tabs for indents")
level = line.count(indent)
current_parents[level] = self.make_category(line, parent=current_parents[level - 1])
else:
# We are back to a zero level, so reset the whole thing
current_parents = {0: self.make_category(line)}
current_parents[0]._tree_manager.rebuild() | python | def parse_lines(self, lines):
"""
Do the work of parsing each line
"""
indent = ''
level = 0
if lines[0][0] == ' ' or lines[0][0] == '\t':
raise CommandError("The first line in the file cannot start with a space or tab.")
# This keeps track of the current parents at a given level
current_parents = {0: None}
for line in lines:
if len(line) == 0:
continue
if line[0] == ' ' or line[0] == '\t':
if indent == '':
indent = self.get_indent(line)
elif not line[0] in indent:
raise CommandError("You can't mix spaces and tabs for indents")
level = line.count(indent)
current_parents[level] = self.make_category(line, parent=current_parents[level - 1])
else:
# We are back to a zero level, so reset the whole thing
current_parents = {0: self.make_category(line)}
current_parents[0]._tree_manager.rebuild() | [
"def",
"parse_lines",
"(",
"self",
",",
"lines",
")",
":",
"indent",
"=",
"''",
"level",
"=",
"0",
"if",
"lines",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"' '",
"or",
"lines",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'\\t'",
":",
"raise",
"CommandError",
"(",
"\"The first line in the file cannot start with a space or tab.\"",
")",
"# This keeps track of the current parents at a given level",
"current_parents",
"=",
"{",
"0",
":",
"None",
"}",
"for",
"line",
"in",
"lines",
":",
"if",
"len",
"(",
"line",
")",
"==",
"0",
":",
"continue",
"if",
"line",
"[",
"0",
"]",
"==",
"' '",
"or",
"line",
"[",
"0",
"]",
"==",
"'\\t'",
":",
"if",
"indent",
"==",
"''",
":",
"indent",
"=",
"self",
".",
"get_indent",
"(",
"line",
")",
"elif",
"not",
"line",
"[",
"0",
"]",
"in",
"indent",
":",
"raise",
"CommandError",
"(",
"\"You can't mix spaces and tabs for indents\"",
")",
"level",
"=",
"line",
".",
"count",
"(",
"indent",
")",
"current_parents",
"[",
"level",
"]",
"=",
"self",
".",
"make_category",
"(",
"line",
",",
"parent",
"=",
"current_parents",
"[",
"level",
"-",
"1",
"]",
")",
"else",
":",
"# We are back to a zero level, so reset the whole thing",
"current_parents",
"=",
"{",
"0",
":",
"self",
".",
"make_category",
"(",
"line",
")",
"}",
"current_parents",
"[",
"0",
"]",
".",
"_tree_manager",
".",
"rebuild",
"(",
")"
] | Do the work of parsing each line | [
"Do",
"the",
"work",
"of",
"parsing",
"each",
"line"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L48-L74 | train |
callowayproject/django-categories | categories/management/commands/import_categories.py | Command.handle | def handle(self, *file_paths, **options):
"""
Handle the basic import
"""
import os
for file_path in file_paths:
if not os.path.isfile(file_path):
print("File %s not found." % file_path)
continue
f = open(file_path, 'r')
data = f.readlines()
f.close()
self.parse_lines(data) | python | def handle(self, *file_paths, **options):
"""
Handle the basic import
"""
import os
for file_path in file_paths:
if not os.path.isfile(file_path):
print("File %s not found." % file_path)
continue
f = open(file_path, 'r')
data = f.readlines()
f.close()
self.parse_lines(data) | [
"def",
"handle",
"(",
"self",
",",
"*",
"file_paths",
",",
"*",
"*",
"options",
")",
":",
"import",
"os",
"for",
"file_path",
"in",
"file_paths",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"print",
"(",
"\"File %s not found.\"",
"%",
"file_path",
")",
"continue",
"f",
"=",
"open",
"(",
"file_path",
",",
"'r'",
")",
"data",
"=",
"f",
".",
"readlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"self",
".",
"parse_lines",
"(",
"data",
")"
] | Handle the basic import | [
"Handle",
"the",
"basic",
"import"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L76-L90 | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | get_cat_model | def get_cat_model(model):
"""
Return a class from a string or class
"""
try:
if isinstance(model, string_types):
model_class = apps.get_model(*model.split("."))
elif issubclass(model, CategoryBase):
model_class = model
if model_class is None:
raise TypeError
except TypeError:
raise TemplateSyntaxError("Unknown model submitted: %s" % model)
return model_class | python | def get_cat_model(model):
"""
Return a class from a string or class
"""
try:
if isinstance(model, string_types):
model_class = apps.get_model(*model.split("."))
elif issubclass(model, CategoryBase):
model_class = model
if model_class is None:
raise TypeError
except TypeError:
raise TemplateSyntaxError("Unknown model submitted: %s" % model)
return model_class | [
"def",
"get_cat_model",
"(",
"model",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"model",
",",
"string_types",
")",
":",
"model_class",
"=",
"apps",
".",
"get_model",
"(",
"*",
"model",
".",
"split",
"(",
"\".\"",
")",
")",
"elif",
"issubclass",
"(",
"model",
",",
"CategoryBase",
")",
":",
"model_class",
"=",
"model",
"if",
"model_class",
"is",
"None",
":",
"raise",
"TypeError",
"except",
"TypeError",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"Unknown model submitted: %s\"",
"%",
"model",
")",
"return",
"model_class"
] | Return a class from a string or class | [
"Return",
"a",
"class",
"from",
"a",
"string",
"or",
"class"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L29-L42 | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | get_category | def get_category(category_string, model=Category):
"""
Convert a string, including a path, and return the Category object
"""
model_class = get_cat_model(model)
category = str(category_string).strip("'\"")
category = category.strip('/')
cat_list = category.split('/')
if len(cat_list) == 0:
return None
try:
categories = model_class.objects.filter(name=cat_list[-1], level=len(cat_list) - 1)
if len(cat_list) == 1 and len(categories) > 1:
return None
# If there is only one, use it. If there is more than one, check
# if the parent matches the parent passed in the string
if len(categories) == 1:
return categories[0]
else:
for item in categories:
if item.parent.name == cat_list[-2]:
return item
except model_class.DoesNotExist:
return None | python | def get_category(category_string, model=Category):
"""
Convert a string, including a path, and return the Category object
"""
model_class = get_cat_model(model)
category = str(category_string).strip("'\"")
category = category.strip('/')
cat_list = category.split('/')
if len(cat_list) == 0:
return None
try:
categories = model_class.objects.filter(name=cat_list[-1], level=len(cat_list) - 1)
if len(cat_list) == 1 and len(categories) > 1:
return None
# If there is only one, use it. If there is more than one, check
# if the parent matches the parent passed in the string
if len(categories) == 1:
return categories[0]
else:
for item in categories:
if item.parent.name == cat_list[-2]:
return item
except model_class.DoesNotExist:
return None | [
"def",
"get_category",
"(",
"category_string",
",",
"model",
"=",
"Category",
")",
":",
"model_class",
"=",
"get_cat_model",
"(",
"model",
")",
"category",
"=",
"str",
"(",
"category_string",
")",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"category",
"=",
"category",
".",
"strip",
"(",
"'/'",
")",
"cat_list",
"=",
"category",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"cat_list",
")",
"==",
"0",
":",
"return",
"None",
"try",
":",
"categories",
"=",
"model_class",
".",
"objects",
".",
"filter",
"(",
"name",
"=",
"cat_list",
"[",
"-",
"1",
"]",
",",
"level",
"=",
"len",
"(",
"cat_list",
")",
"-",
"1",
")",
"if",
"len",
"(",
"cat_list",
")",
"==",
"1",
"and",
"len",
"(",
"categories",
")",
">",
"1",
":",
"return",
"None",
"# If there is only one, use it. If there is more than one, check",
"# if the parent matches the parent passed in the string",
"if",
"len",
"(",
"categories",
")",
"==",
"1",
":",
"return",
"categories",
"[",
"0",
"]",
"else",
":",
"for",
"item",
"in",
"categories",
":",
"if",
"item",
".",
"parent",
".",
"name",
"==",
"cat_list",
"[",
"-",
"2",
"]",
":",
"return",
"item",
"except",
"model_class",
".",
"DoesNotExist",
":",
"return",
"None"
] | Convert a string, including a path, and return the Category object | [
"Convert",
"a",
"string",
"including",
"a",
"path",
"and",
"return",
"the",
"Category",
"object"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L45-L69 | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | get_category_drilldown | def get_category_drilldown(parser, token):
"""
Retrieves the specified category, its ancestors and its immediate children
as an iterable.
Syntax::
{% get_category_drilldown "category name" [using "app.Model"] as varname %}
Example::
{% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %}
or ::
{% get_category_drilldown category_obj as family %}
Sets family to::
Grandparent, Parent, Child 1, Child 2, Child n
"""
bits = token.split_contents()
error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' \
'"category name" [using "app.Model"] as varname %%} or ' \
'{%% %(tagname)s category_obj as varname %%}.'
if len(bits) == 4:
if bits[2] != 'as':
raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]})
if bits[2] == 'as':
varname = bits[3].strip("'\"")
model = "categories.category"
if len(bits) == 6:
if bits[2] not in ('using', 'as') or bits[4] not in ('using', 'as'):
raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]})
if bits[2] == 'as':
varname = bits[3].strip("'\"")
model = bits[5].strip("'\"")
if bits[2] == 'using':
varname = bits[5].strip("'\"")
model = bits[3].strip("'\"")
category = FilterExpression(bits[1], parser)
return CategoryDrillDownNode(category, varname, model) | python | def get_category_drilldown(parser, token):
"""
Retrieves the specified category, its ancestors and its immediate children
as an iterable.
Syntax::
{% get_category_drilldown "category name" [using "app.Model"] as varname %}
Example::
{% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %}
or ::
{% get_category_drilldown category_obj as family %}
Sets family to::
Grandparent, Parent, Child 1, Child 2, Child n
"""
bits = token.split_contents()
error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' \
'"category name" [using "app.Model"] as varname %%} or ' \
'{%% %(tagname)s category_obj as varname %%}.'
if len(bits) == 4:
if bits[2] != 'as':
raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]})
if bits[2] == 'as':
varname = bits[3].strip("'\"")
model = "categories.category"
if len(bits) == 6:
if bits[2] not in ('using', 'as') or bits[4] not in ('using', 'as'):
raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]})
if bits[2] == 'as':
varname = bits[3].strip("'\"")
model = bits[5].strip("'\"")
if bits[2] == 'using':
varname = bits[5].strip("'\"")
model = bits[3].strip("'\"")
category = FilterExpression(bits[1], parser)
return CategoryDrillDownNode(category, varname, model) | [
"def",
"get_category_drilldown",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"error_str",
"=",
"'%(tagname)s tag should be in the format {%% %(tagname)s '",
"'\"category name\" [using \"app.Model\"] as varname %%} or '",
"'{%% %(tagname)s category_obj as varname %%}.'",
"if",
"len",
"(",
"bits",
")",
"==",
"4",
":",
"if",
"bits",
"[",
"2",
"]",
"!=",
"'as'",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"error_str",
"%",
"{",
"'tagname'",
":",
"bits",
"[",
"0",
"]",
"}",
")",
"if",
"bits",
"[",
"2",
"]",
"==",
"'as'",
":",
"varname",
"=",
"bits",
"[",
"3",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"model",
"=",
"\"categories.category\"",
"if",
"len",
"(",
"bits",
")",
"==",
"6",
":",
"if",
"bits",
"[",
"2",
"]",
"not",
"in",
"(",
"'using'",
",",
"'as'",
")",
"or",
"bits",
"[",
"4",
"]",
"not",
"in",
"(",
"'using'",
",",
"'as'",
")",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"error_str",
"%",
"{",
"'tagname'",
":",
"bits",
"[",
"0",
"]",
"}",
")",
"if",
"bits",
"[",
"2",
"]",
"==",
"'as'",
":",
"varname",
"=",
"bits",
"[",
"3",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"model",
"=",
"bits",
"[",
"5",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"if",
"bits",
"[",
"2",
"]",
"==",
"'using'",
":",
"varname",
"=",
"bits",
"[",
"5",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"model",
"=",
"bits",
"[",
"3",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"category",
"=",
"FilterExpression",
"(",
"bits",
"[",
"1",
"]",
",",
"parser",
")",
"return",
"CategoryDrillDownNode",
"(",
"category",
",",
"varname",
",",
"model",
")"
] | Retrieves the specified category, its ancestors and its immediate children
as an iterable.
Syntax::
{% get_category_drilldown "category name" [using "app.Model"] as varname %}
Example::
{% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %}
or ::
{% get_category_drilldown category_obj as family %}
Sets family to::
Grandparent, Parent, Child 1, Child 2, Child n | [
"Retrieves",
"the",
"specified",
"category",
"its",
"ancestors",
"and",
"its",
"immediate",
"children",
"as",
"an",
"iterable",
"."
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L95-L136 | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | get_top_level_categories | def get_top_level_categories(parser, token):
"""
Retrieves an alphabetical list of all the categories that have no parents.
Syntax::
{% get_top_level_categories [using "app.Model"] as categories %}
Returns an list of categories [<category>, <category>, <category, ...]
"""
bits = token.split_contents()
usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits[0]
if len(bits) == 3:
if bits[1] != 'as':
raise template.TemplateSyntaxError(usage)
varname = bits[2]
model = "categories.category"
elif len(bits) == 5:
if bits[1] not in ('as', 'using') and bits[3] not in ('as', 'using'):
raise template.TemplateSyntaxError(usage)
if bits[1] == 'using':
model = bits[2].strip("'\"")
varname = bits[4].strip("'\"")
else:
model = bits[4].strip("'\"")
varname = bits[2].strip("'\"")
return TopLevelCategoriesNode(varname, model) | python | def get_top_level_categories(parser, token):
"""
Retrieves an alphabetical list of all the categories that have no parents.
Syntax::
{% get_top_level_categories [using "app.Model"] as categories %}
Returns an list of categories [<category>, <category>, <category, ...]
"""
bits = token.split_contents()
usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits[0]
if len(bits) == 3:
if bits[1] != 'as':
raise template.TemplateSyntaxError(usage)
varname = bits[2]
model = "categories.category"
elif len(bits) == 5:
if bits[1] not in ('as', 'using') and bits[3] not in ('as', 'using'):
raise template.TemplateSyntaxError(usage)
if bits[1] == 'using':
model = bits[2].strip("'\"")
varname = bits[4].strip("'\"")
else:
model = bits[4].strip("'\"")
varname = bits[2].strip("'\"")
return TopLevelCategoriesNode(varname, model) | [
"def",
"get_top_level_categories",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"usage",
"=",
"'Usage: {%% %s [using \"app.Model\"] as <variable> %%}'",
"%",
"bits",
"[",
"0",
"]",
"if",
"len",
"(",
"bits",
")",
"==",
"3",
":",
"if",
"bits",
"[",
"1",
"]",
"!=",
"'as'",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"usage",
")",
"varname",
"=",
"bits",
"[",
"2",
"]",
"model",
"=",
"\"categories.category\"",
"elif",
"len",
"(",
"bits",
")",
"==",
"5",
":",
"if",
"bits",
"[",
"1",
"]",
"not",
"in",
"(",
"'as'",
",",
"'using'",
")",
"and",
"bits",
"[",
"3",
"]",
"not",
"in",
"(",
"'as'",
",",
"'using'",
")",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"usage",
")",
"if",
"bits",
"[",
"1",
"]",
"==",
"'using'",
":",
"model",
"=",
"bits",
"[",
"2",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"varname",
"=",
"bits",
"[",
"4",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"else",
":",
"model",
"=",
"bits",
"[",
"4",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"varname",
"=",
"bits",
"[",
"2",
"]",
".",
"strip",
"(",
"\"'\\\"\"",
")",
"return",
"TopLevelCategoriesNode",
"(",
"varname",
",",
"model",
")"
] | Retrieves an alphabetical list of all the categories that have no parents.
Syntax::
{% get_top_level_categories [using "app.Model"] as categories %}
Returns an list of categories [<category>, <category>, <category, ...] | [
"Retrieves",
"an",
"alphabetical",
"list",
"of",
"all",
"the",
"categories",
"that",
"have",
"no",
"parents",
"."
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L237-L264 | train |
callowayproject/django-categories | categories/templatetags/category_tags.py | tree_queryset | def tree_queryset(value):
"""
Converts a normal queryset from an MPTT model to include all the ancestors
so a filtered subset of items can be formatted correctly
"""
from django.db.models.query import QuerySet
from copy import deepcopy
if not isinstance(value, QuerySet):
return value
qs = value
qs2 = deepcopy(qs)
# Reaching into the bowels of query sets to find out whether the qs is
# actually filtered and we need to do the INCLUDE_ANCESTORS dance at all.
# INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed.
is_filtered = bool(qs.query.where.children)
if is_filtered:
include_pages = set()
# Order by 'rght' will return the tree deepest nodes first;
# this cuts down the number of queries considerably since all ancestors
# will already be in include_pages when they are checked, thus not
# trigger additional queries.
for p in qs2.order_by('rght').iterator():
if p.parent_id and p.parent_id not in include_pages and p.id not in include_pages:
ancestor_id_list = p.get_ancestors().values_list('id', flat=True)
include_pages.update(ancestor_id_list)
if include_pages:
qs = qs | qs.model._default_manager.filter(id__in=include_pages)
qs = qs.distinct()
return qs | python | def tree_queryset(value):
"""
Converts a normal queryset from an MPTT model to include all the ancestors
so a filtered subset of items can be formatted correctly
"""
from django.db.models.query import QuerySet
from copy import deepcopy
if not isinstance(value, QuerySet):
return value
qs = value
qs2 = deepcopy(qs)
# Reaching into the bowels of query sets to find out whether the qs is
# actually filtered and we need to do the INCLUDE_ANCESTORS dance at all.
# INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed.
is_filtered = bool(qs.query.where.children)
if is_filtered:
include_pages = set()
# Order by 'rght' will return the tree deepest nodes first;
# this cuts down the number of queries considerably since all ancestors
# will already be in include_pages when they are checked, thus not
# trigger additional queries.
for p in qs2.order_by('rght').iterator():
if p.parent_id and p.parent_id not in include_pages and p.id not in include_pages:
ancestor_id_list = p.get_ancestors().values_list('id', flat=True)
include_pages.update(ancestor_id_list)
if include_pages:
qs = qs | qs.model._default_manager.filter(id__in=include_pages)
qs = qs.distinct()
return qs | [
"def",
"tree_queryset",
"(",
"value",
")",
":",
"from",
"django",
".",
"db",
".",
"models",
".",
"query",
"import",
"QuerySet",
"from",
"copy",
"import",
"deepcopy",
"if",
"not",
"isinstance",
"(",
"value",
",",
"QuerySet",
")",
":",
"return",
"value",
"qs",
"=",
"value",
"qs2",
"=",
"deepcopy",
"(",
"qs",
")",
"# Reaching into the bowels of query sets to find out whether the qs is",
"# actually filtered and we need to do the INCLUDE_ANCESTORS dance at all.",
"# INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed.",
"is_filtered",
"=",
"bool",
"(",
"qs",
".",
"query",
".",
"where",
".",
"children",
")",
"if",
"is_filtered",
":",
"include_pages",
"=",
"set",
"(",
")",
"# Order by 'rght' will return the tree deepest nodes first;",
"# this cuts down the number of queries considerably since all ancestors",
"# will already be in include_pages when they are checked, thus not",
"# trigger additional queries.",
"for",
"p",
"in",
"qs2",
".",
"order_by",
"(",
"'rght'",
")",
".",
"iterator",
"(",
")",
":",
"if",
"p",
".",
"parent_id",
"and",
"p",
".",
"parent_id",
"not",
"in",
"include_pages",
"and",
"p",
".",
"id",
"not",
"in",
"include_pages",
":",
"ancestor_id_list",
"=",
"p",
".",
"get_ancestors",
"(",
")",
".",
"values_list",
"(",
"'id'",
",",
"flat",
"=",
"True",
")",
"include_pages",
".",
"update",
"(",
"ancestor_id_list",
")",
"if",
"include_pages",
":",
"qs",
"=",
"qs",
"|",
"qs",
".",
"model",
".",
"_default_manager",
".",
"filter",
"(",
"id__in",
"=",
"include_pages",
")",
"qs",
"=",
"qs",
".",
"distinct",
"(",
")",
"return",
"qs"
] | Converts a normal queryset from an MPTT model to include all the ancestors
so a filtered subset of items can be formatted correctly | [
"Converts",
"a",
"normal",
"queryset",
"from",
"an",
"MPTT",
"model",
"to",
"include",
"all",
"the",
"ancestors",
"so",
"a",
"filtered",
"subset",
"of",
"items",
"can",
"be",
"formatted",
"correctly"
] | 3765851320a79b12c6d3306f3784a2302ea64812 | https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L346-L377 | train |
maweigert/gputools | gputools/convolve/convolve.py | convolve | def convolve(data, h, res_g=None, sub_blocks=None):
"""
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
"""
if not len(data.shape) in [1, 2, 3]:
raise ValueError("dim = %s not supported" % (len(data.shape)))
if len(data.shape) != len(h.shape):
raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape)))
if isinstance(data, OCLArray) and isinstance(h, OCLArray):
return _convolve_buf(data, h, res_g)
elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray):
if sub_blocks == (1,) * len(data.shape) or sub_blocks is None:
return _convolve_np(data, h)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = [int(s / 2) for s in h.shape]
res = np.empty(data.shape, np.float32)
for data_tile, data_s_src, data_s_dest \
in tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant"):
res_tile = _convolve_np(data_tile.copy(),
h)
res[data_s_src] = res_tile[data_s_dest]
return res
else:
raise TypeError("unknown types (%s, %s)" % (type(data), type(h))) | python | def convolve(data, h, res_g=None, sub_blocks=None):
"""
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
"""
if not len(data.shape) in [1, 2, 3]:
raise ValueError("dim = %s not supported" % (len(data.shape)))
if len(data.shape) != len(h.shape):
raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape)))
if isinstance(data, OCLArray) and isinstance(h, OCLArray):
return _convolve_buf(data, h, res_g)
elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray):
if sub_blocks == (1,) * len(data.shape) or sub_blocks is None:
return _convolve_np(data, h)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = [int(s / 2) for s in h.shape]
res = np.empty(data.shape, np.float32)
for data_tile, data_s_src, data_s_dest \
in tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant"):
res_tile = _convolve_np(data_tile.copy(),
h)
res[data_s_src] = res_tile[data_s_dest]
return res
else:
raise TypeError("unknown types (%s, %s)" % (type(data), type(h))) | [
"def",
"convolve",
"(",
"data",
",",
"h",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"None",
")",
":",
"if",
"not",
"len",
"(",
"data",
".",
"shape",
")",
"in",
"[",
"1",
",",
"2",
",",
"3",
"]",
":",
"raise",
"ValueError",
"(",
"\"dim = %s not supported\"",
"%",
"(",
"len",
"(",
"data",
".",
"shape",
")",
")",
")",
"if",
"len",
"(",
"data",
".",
"shape",
")",
"!=",
"len",
"(",
"h",
".",
"shape",
")",
":",
"raise",
"ValueError",
"(",
"\"dimemnsion of data (%s) and h (%s) are different\"",
"%",
"(",
"len",
"(",
"data",
".",
"shape",
")",
",",
"len",
"(",
"h",
".",
"shape",
")",
")",
")",
"if",
"isinstance",
"(",
"data",
",",
"OCLArray",
")",
"and",
"isinstance",
"(",
"h",
",",
"OCLArray",
")",
":",
"return",
"_convolve_buf",
"(",
"data",
",",
"h",
",",
"res_g",
")",
"elif",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"isinstance",
"(",
"h",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"sub_blocks",
"==",
"(",
"1",
",",
")",
"*",
"len",
"(",
"data",
".",
"shape",
")",
"or",
"sub_blocks",
"is",
"None",
":",
"return",
"_convolve_np",
"(",
"data",
",",
"h",
")",
"else",
":",
"# cut the image into tile and operate on every of them",
"N_sub",
"=",
"[",
"int",
"(",
"np",
".",
"ceil",
"(",
"1.",
"*",
"n",
"/",
"s",
")",
")",
"for",
"n",
",",
"s",
"in",
"zip",
"(",
"data",
".",
"shape",
",",
"sub_blocks",
")",
"]",
"Npads",
"=",
"[",
"int",
"(",
"s",
"/",
"2",
")",
"for",
"s",
"in",
"h",
".",
"shape",
"]",
"res",
"=",
"np",
".",
"empty",
"(",
"data",
".",
"shape",
",",
"np",
".",
"float32",
")",
"for",
"data_tile",
",",
"data_s_src",
",",
"data_s_dest",
"in",
"tile_iterator",
"(",
"data",
",",
"blocksize",
"=",
"N_sub",
",",
"padsize",
"=",
"Npads",
",",
"mode",
"=",
"\"constant\"",
")",
":",
"res_tile",
"=",
"_convolve_np",
"(",
"data_tile",
".",
"copy",
"(",
")",
",",
"h",
")",
"res",
"[",
"data_s_src",
"]",
"=",
"res_tile",
"[",
"data_s_dest",
"]",
"return",
"res",
"else",
":",
"raise",
"TypeError",
"(",
"\"unknown types (%s, %s)\"",
"%",
"(",
"type",
"(",
"data",
")",
",",
"type",
"(",
"h",
")",
")",
")"
] | convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge. | [
"convolves",
"1d",
"-",
"3d",
"data",
"with",
"kernel",
"h"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L18-L54 | train |
maweigert/gputools | gputools/convolve/convolve.py | _convolve3_old | def _convolve3_old(data, h, dev=None):
"""convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
"""
if dev is None:
dev = get_device()
if dev is None:
raise ValueError("no OpenCLDevice found...")
dtype = data.dtype.type
dtypes_options = {np.float32: "",
np.uint16: "-D SHORTTYPE"}
if not dtype in dtypes_options:
raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys()))
prog = OCLProgram(abspath("kernels/convolve3.cl"),
build_options=dtypes_options[dtype])
hbuf = OCLArray.from_array(h.astype(np.float32))
img = OCLImage.from_array(data)
res = OCLArray.empty(data.shape, dtype=np.float32)
Ns = [np.int32(n) for n in data.shape + h.shape]
prog.run_kernel("convolve3d", img.shape, None,
img, hbuf.data, res.data,
*Ns)
return res.get() | python | def _convolve3_old(data, h, dev=None):
"""convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
"""
if dev is None:
dev = get_device()
if dev is None:
raise ValueError("no OpenCLDevice found...")
dtype = data.dtype.type
dtypes_options = {np.float32: "",
np.uint16: "-D SHORTTYPE"}
if not dtype in dtypes_options:
raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys()))
prog = OCLProgram(abspath("kernels/convolve3.cl"),
build_options=dtypes_options[dtype])
hbuf = OCLArray.from_array(h.astype(np.float32))
img = OCLImage.from_array(data)
res = OCLArray.empty(data.shape, dtype=np.float32)
Ns = [np.int32(n) for n in data.shape + h.shape]
prog.run_kernel("convolve3d", img.shape, None,
img, hbuf.data, res.data,
*Ns)
return res.get() | [
"def",
"_convolve3_old",
"(",
"data",
",",
"h",
",",
"dev",
"=",
"None",
")",
":",
"if",
"dev",
"is",
"None",
":",
"dev",
"=",
"get_device",
"(",
")",
"if",
"dev",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"no OpenCLDevice found...\"",
")",
"dtype",
"=",
"data",
".",
"dtype",
".",
"type",
"dtypes_options",
"=",
"{",
"np",
".",
"float32",
":",
"\"\"",
",",
"np",
".",
"uint16",
":",
"\"-D SHORTTYPE\"",
"}",
"if",
"not",
"dtype",
"in",
"dtypes_options",
":",
"raise",
"TypeError",
"(",
"\"data type %s not supported yet, please convert to:\"",
"%",
"dtype",
",",
"list",
"(",
"dtypes_options",
".",
"keys",
"(",
")",
")",
")",
"prog",
"=",
"OCLProgram",
"(",
"abspath",
"(",
"\"kernels/convolve3.cl\"",
")",
",",
"build_options",
"=",
"dtypes_options",
"[",
"dtype",
"]",
")",
"hbuf",
"=",
"OCLArray",
".",
"from_array",
"(",
"h",
".",
"astype",
"(",
"np",
".",
"float32",
")",
")",
"img",
"=",
"OCLImage",
".",
"from_array",
"(",
"data",
")",
"res",
"=",
"OCLArray",
".",
"empty",
"(",
"data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"Ns",
"=",
"[",
"np",
".",
"int32",
"(",
"n",
")",
"for",
"n",
"in",
"data",
".",
"shape",
"+",
"h",
".",
"shape",
"]",
"prog",
".",
"run_kernel",
"(",
"\"convolve3d\"",
",",
"img",
".",
"shape",
",",
"None",
",",
"img",
",",
"hbuf",
".",
"data",
",",
"res",
".",
"data",
",",
"*",
"Ns",
")",
"return",
"res",
".",
"get",
"(",
")"
] | convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used | [
"convolves",
"3d",
"data",
"with",
"kernel",
"h",
"on",
"the",
"GPU",
"Device",
"dev",
"boundary",
"conditions",
"are",
"clamping",
"to",
"edge",
".",
"h",
"is",
"converted",
"to",
"float32"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L116-L151 | train |
maweigert/gputools | gputools/transforms/scale.py | _scale_shape | def _scale_shape(dshape, scale = (1,1,1)):
"""returns the shape after scaling (should be the same as ndimage.zoom"""
nshape = np.round(np.array(dshape) * np.array(scale))
return tuple(nshape.astype(np.int)) | python | def _scale_shape(dshape, scale = (1,1,1)):
"""returns the shape after scaling (should be the same as ndimage.zoom"""
nshape = np.round(np.array(dshape) * np.array(scale))
return tuple(nshape.astype(np.int)) | [
"def",
"_scale_shape",
"(",
"dshape",
",",
"scale",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
")",
":",
"nshape",
"=",
"np",
".",
"round",
"(",
"np",
".",
"array",
"(",
"dshape",
")",
"*",
"np",
".",
"array",
"(",
"scale",
")",
")",
"return",
"tuple",
"(",
"nshape",
".",
"astype",
"(",
"np",
".",
"int",
")",
")"
] | returns the shape after scaling (should be the same as ndimage.zoom | [
"returns",
"the",
"shape",
"after",
"scaling",
"(",
"should",
"be",
"the",
"same",
"as",
"ndimage",
".",
"zoom"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/scale.py#L17-L20 | train |
maweigert/gputools | gputools/fft/fftshift.py | fftshift | def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False):
"""
gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array
"""
if axes is None:
axes = list(range(arr_obj.ndim))
if isinstance(arr_obj, OCLArray):
if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES:
raise NotImplementedError("only works for float32 or complex64")
elif isinstance(arr_obj, np.ndarray):
if np.iscomplexobj(arr_obj):
arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False))
else:
arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False))
else:
raise ValueError("unknown type (%s)"%(type(arr_obj)))
if not np.all([arr_obj.shape[a]%2==0 for a in axes]):
raise NotImplementedError("only works on axes of even dimensions")
if res_g is None:
res_g = OCLArray.empty_like(arr_obj)
# iterate over all axes
# FIXME: this is still rather inefficient
in_g = arr_obj
for ax in axes:
_fftshift_single(in_g, res_g, ax)
in_g = res_g
if return_buffer:
return res_g
else:
return res_g.get() | python | def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False):
"""
gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array
"""
if axes is None:
axes = list(range(arr_obj.ndim))
if isinstance(arr_obj, OCLArray):
if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES:
raise NotImplementedError("only works for float32 or complex64")
elif isinstance(arr_obj, np.ndarray):
if np.iscomplexobj(arr_obj):
arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False))
else:
arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False))
else:
raise ValueError("unknown type (%s)"%(type(arr_obj)))
if not np.all([arr_obj.shape[a]%2==0 for a in axes]):
raise NotImplementedError("only works on axes of even dimensions")
if res_g is None:
res_g = OCLArray.empty_like(arr_obj)
# iterate over all axes
# FIXME: this is still rather inefficient
in_g = arr_obj
for ax in axes:
_fftshift_single(in_g, res_g, ax)
in_g = res_g
if return_buffer:
return res_g
else:
return res_g.get() | [
"def",
"fftshift",
"(",
"arr_obj",
",",
"axes",
"=",
"None",
",",
"res_g",
"=",
"None",
",",
"return_buffer",
"=",
"False",
")",
":",
"if",
"axes",
"is",
"None",
":",
"axes",
"=",
"list",
"(",
"range",
"(",
"arr_obj",
".",
"ndim",
")",
")",
"if",
"isinstance",
"(",
"arr_obj",
",",
"OCLArray",
")",
":",
"if",
"not",
"arr_obj",
".",
"dtype",
".",
"type",
"in",
"DTYPE_KERNEL_NAMES",
":",
"raise",
"NotImplementedError",
"(",
"\"only works for float32 or complex64\"",
")",
"elif",
"isinstance",
"(",
"arr_obj",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"np",
".",
"iscomplexobj",
"(",
"arr_obj",
")",
":",
"arr_obj",
"=",
"OCLArray",
".",
"from_array",
"(",
"arr_obj",
".",
"astype",
"(",
"np",
".",
"complex64",
",",
"copy",
"=",
"False",
")",
")",
"else",
":",
"arr_obj",
"=",
"OCLArray",
".",
"from_array",
"(",
"arr_obj",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown type (%s)\"",
"%",
"(",
"type",
"(",
"arr_obj",
")",
")",
")",
"if",
"not",
"np",
".",
"all",
"(",
"[",
"arr_obj",
".",
"shape",
"[",
"a",
"]",
"%",
"2",
"==",
"0",
"for",
"a",
"in",
"axes",
"]",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"only works on axes of even dimensions\"",
")",
"if",
"res_g",
"is",
"None",
":",
"res_g",
"=",
"OCLArray",
".",
"empty_like",
"(",
"arr_obj",
")",
"# iterate over all axes",
"# FIXME: this is still rather inefficient",
"in_g",
"=",
"arr_obj",
"for",
"ax",
"in",
"axes",
":",
"_fftshift_single",
"(",
"in_g",
",",
"res_g",
",",
"ax",
")",
"in_g",
"=",
"res_g",
"if",
"return_buffer",
":",
"return",
"res_g",
"else",
":",
"return",
"res_g",
".",
"get",
"(",
")"
] | gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array | [
"gpu",
"version",
"of",
"fftshift",
"for",
"numpy",
"arrays",
"or",
"OCLArrays"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/fftshift.py#L27-L80 | train |
maweigert/gputools | gputools/fft/fftshift.py | _fftshift_single | def _fftshift_single(d_g, res_g, ax = 0):
"""
basic fftshift of an OCLArray
shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k]
= [N1, N, N2]
the we can address each element in the flat buffer by
index = i + N2*j + N2*N*k
where i = 1 .. N2
j = 1 .. N
k = 1 .. N1
and the swap of elements is performed on the index j
"""
dtype_kernel_name = {np.float32:"fftshift_1_f",
np.complex64:"fftshift_1_c"
}
N = d_g.shape[ax]
N1 = 1 if ax==0 else np.prod(d_g.shape[:ax])
N2 = 1 if ax == len(d_g.shape)-1 else np.prod(d_g.shape[ax+1:])
dtype = d_g.dtype.type
prog = OCLProgram(abspath("kernels/fftshift.cl"))
prog.run_kernel(dtype_kernel_name[dtype],(N2,N//2,N1),None,
d_g.data, res_g.data,
np.int32(N),
np.int32(N2))
return res_g | python | def _fftshift_single(d_g, res_g, ax = 0):
"""
basic fftshift of an OCLArray
shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k]
= [N1, N, N2]
the we can address each element in the flat buffer by
index = i + N2*j + N2*N*k
where i = 1 .. N2
j = 1 .. N
k = 1 .. N1
and the swap of elements is performed on the index j
"""
dtype_kernel_name = {np.float32:"fftshift_1_f",
np.complex64:"fftshift_1_c"
}
N = d_g.shape[ax]
N1 = 1 if ax==0 else np.prod(d_g.shape[:ax])
N2 = 1 if ax == len(d_g.shape)-1 else np.prod(d_g.shape[ax+1:])
dtype = d_g.dtype.type
prog = OCLProgram(abspath("kernels/fftshift.cl"))
prog.run_kernel(dtype_kernel_name[dtype],(N2,N//2,N1),None,
d_g.data, res_g.data,
np.int32(N),
np.int32(N2))
return res_g | [
"def",
"_fftshift_single",
"(",
"d_g",
",",
"res_g",
",",
"ax",
"=",
"0",
")",
":",
"dtype_kernel_name",
"=",
"{",
"np",
".",
"float32",
":",
"\"fftshift_1_f\"",
",",
"np",
".",
"complex64",
":",
"\"fftshift_1_c\"",
"}",
"N",
"=",
"d_g",
".",
"shape",
"[",
"ax",
"]",
"N1",
"=",
"1",
"if",
"ax",
"==",
"0",
"else",
"np",
".",
"prod",
"(",
"d_g",
".",
"shape",
"[",
":",
"ax",
"]",
")",
"N2",
"=",
"1",
"if",
"ax",
"==",
"len",
"(",
"d_g",
".",
"shape",
")",
"-",
"1",
"else",
"np",
".",
"prod",
"(",
"d_g",
".",
"shape",
"[",
"ax",
"+",
"1",
":",
"]",
")",
"dtype",
"=",
"d_g",
".",
"dtype",
".",
"type",
"prog",
"=",
"OCLProgram",
"(",
"abspath",
"(",
"\"kernels/fftshift.cl\"",
")",
")",
"prog",
".",
"run_kernel",
"(",
"dtype_kernel_name",
"[",
"dtype",
"]",
",",
"(",
"N2",
",",
"N",
"//",
"2",
",",
"N1",
")",
",",
"None",
",",
"d_g",
".",
"data",
",",
"res_g",
".",
"data",
",",
"np",
".",
"int32",
"(",
"N",
")",
",",
"np",
".",
"int32",
"(",
"N2",
")",
")",
"return",
"res_g"
] | basic fftshift of an OCLArray
shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k]
= [N1, N, N2]
the we can address each element in the flat buffer by
index = i + N2*j + N2*N*k
where i = 1 .. N2
j = 1 .. N
k = 1 .. N1
and the swap of elements is performed on the index j | [
"basic",
"fftshift",
"of",
"an",
"OCLArray"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/fftshift.py#L83-L119 | train |
maweigert/gputools | gputools/fft/oclfft_convolve.py | fft_convolve | def fft_convolve(data, h, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolves data with kernel h via FFTs
data should be either a numpy array or a OCLArray (see doc for fft)
both data and h should be same shape
if data/h are OCLArrays, then:
- type should be complex64
- shape should be equal and power of two
- h is assumed to be already fftshifted
(otherwise set kernel_is_fftshifted to true)
"""
if isinstance(data,np.ndarray):
return _fft_convolve_numpy(data, h,
plan = plan,
kernel_is_fft = kernel_is_fft,
kernel_is_fftshifted = kernel_is_fftshifted)
elif isinstance(data,OCLArray):
return _fft_convolve_gpu(data,h, res_g = res_g,
plan = plan, inplace = inplace,
kernel_is_fft = kernel_is_fft)
else:
raise TypeError("array argument (1) has bad type: %s"%type(data)) | python | def fft_convolve(data, h, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolves data with kernel h via FFTs
data should be either a numpy array or a OCLArray (see doc for fft)
both data and h should be same shape
if data/h are OCLArrays, then:
- type should be complex64
- shape should be equal and power of two
- h is assumed to be already fftshifted
(otherwise set kernel_is_fftshifted to true)
"""
if isinstance(data,np.ndarray):
return _fft_convolve_numpy(data, h,
plan = plan,
kernel_is_fft = kernel_is_fft,
kernel_is_fftshifted = kernel_is_fftshifted)
elif isinstance(data,OCLArray):
return _fft_convolve_gpu(data,h, res_g = res_g,
plan = plan, inplace = inplace,
kernel_is_fft = kernel_is_fft)
else:
raise TypeError("array argument (1) has bad type: %s"%type(data)) | [
"def",
"fft_convolve",
"(",
"data",
",",
"h",
",",
"res_g",
"=",
"None",
",",
"plan",
"=",
"None",
",",
"inplace",
"=",
"False",
",",
"kernel_is_fft",
"=",
"False",
",",
"kernel_is_fftshifted",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"_fft_convolve_numpy",
"(",
"data",
",",
"h",
",",
"plan",
"=",
"plan",
",",
"kernel_is_fft",
"=",
"kernel_is_fft",
",",
"kernel_is_fftshifted",
"=",
"kernel_is_fftshifted",
")",
"elif",
"isinstance",
"(",
"data",
",",
"OCLArray",
")",
":",
"return",
"_fft_convolve_gpu",
"(",
"data",
",",
"h",
",",
"res_g",
"=",
"res_g",
",",
"plan",
"=",
"plan",
",",
"inplace",
"=",
"inplace",
",",
"kernel_is_fft",
"=",
"kernel_is_fft",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"array argument (1) has bad type: %s\"",
"%",
"type",
"(",
"data",
")",
")"
] | convolves data with kernel h via FFTs
data should be either a numpy array or a OCLArray (see doc for fft)
both data and h should be same shape
if data/h are OCLArrays, then:
- type should be complex64
- shape should be equal and power of two
- h is assumed to be already fftshifted
(otherwise set kernel_is_fftshifted to true) | [
"convolves",
"data",
"with",
"kernel",
"h",
"via",
"FFTs"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L15-L45 | train |
maweigert/gputools | gputools/fft/oclfft_convolve.py | _fft_convolve_numpy | def _fft_convolve_numpy(data, h, plan = None,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolving via opencl fft for numpy arrays
data and h must have the same size
"""
if data.shape != h.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape)))
data_g = OCLArray.from_array(data.astype(np.complex64))
if not kernel_is_fftshifted:
h = np.fft.fftshift(h)
h_g = OCLArray.from_array(h.astype(np.complex64))
res_g = OCLArray.empty_like(data_g)
_fft_convolve_gpu(data_g,h_g,res_g = res_g,
plan = plan,
kernel_is_fft = kernel_is_fft)
res = abs(res_g.get())
del data_g
del h_g
del res_g
return res | python | def _fft_convolve_numpy(data, h, plan = None,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolving via opencl fft for numpy arrays
data and h must have the same size
"""
if data.shape != h.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape)))
data_g = OCLArray.from_array(data.astype(np.complex64))
if not kernel_is_fftshifted:
h = np.fft.fftshift(h)
h_g = OCLArray.from_array(h.astype(np.complex64))
res_g = OCLArray.empty_like(data_g)
_fft_convolve_gpu(data_g,h_g,res_g = res_g,
plan = plan,
kernel_is_fft = kernel_is_fft)
res = abs(res_g.get())
del data_g
del h_g
del res_g
return res | [
"def",
"_fft_convolve_numpy",
"(",
"data",
",",
"h",
",",
"plan",
"=",
"None",
",",
"kernel_is_fft",
"=",
"False",
",",
"kernel_is_fftshifted",
"=",
"False",
")",
":",
"if",
"data",
".",
"shape",
"!=",
"h",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"data and kernel must have same size! %s vs %s \"",
"%",
"(",
"str",
"(",
"data",
".",
"shape",
")",
",",
"str",
"(",
"h",
".",
"shape",
")",
")",
")",
"data_g",
"=",
"OCLArray",
".",
"from_array",
"(",
"data",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
")",
"if",
"not",
"kernel_is_fftshifted",
":",
"h",
"=",
"np",
".",
"fft",
".",
"fftshift",
"(",
"h",
")",
"h_g",
"=",
"OCLArray",
".",
"from_array",
"(",
"h",
".",
"astype",
"(",
"np",
".",
"complex64",
")",
")",
"res_g",
"=",
"OCLArray",
".",
"empty_like",
"(",
"data_g",
")",
"_fft_convolve_gpu",
"(",
"data_g",
",",
"h_g",
",",
"res_g",
"=",
"res_g",
",",
"plan",
"=",
"plan",
",",
"kernel_is_fft",
"=",
"kernel_is_fft",
")",
"res",
"=",
"abs",
"(",
"res_g",
".",
"get",
"(",
")",
")",
"del",
"data_g",
"del",
"h_g",
"del",
"res_g",
"return",
"res"
] | convolving via opencl fft for numpy arrays
data and h must have the same size | [
"convolving",
"via",
"opencl",
"fft",
"for",
"numpy",
"arrays"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L49-L80 | train |
maweigert/gputools | gputools/fft/oclfft_convolve.py | _fft_convolve_gpu | def _fft_convolve_gpu(data_g, h_g, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False):
""" fft convolve for gpu buffer
"""
assert_bufs_type(np.complex64,data_g,h_g)
if data_g.shape != h_g.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data_g.shape),str(h_g.shape)))
if plan is None:
plan = fft_plan(data_g.shape)
if inplace:
res_g = data_g
else:
if res_g is None:
res_g = OCLArray.empty(data_g.shape,data_g.dtype)
res_g.copy_buffer(data_g)
if not kernel_is_fft:
kern_g = OCLArray.empty(h_g.shape,h_g.dtype)
kern_g.copy_buffer(h_g)
fft(kern_g,inplace=True, plan = plan)
else:
kern_g = h_g
fft(res_g,inplace=True, plan = plan)
#multiply in fourier domain
_complex_multiply_kernel(res_g,kern_g)
fft(res_g,inplace = True, inverse = True, plan = plan)
return res_g | python | def _fft_convolve_gpu(data_g, h_g, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False):
""" fft convolve for gpu buffer
"""
assert_bufs_type(np.complex64,data_g,h_g)
if data_g.shape != h_g.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data_g.shape),str(h_g.shape)))
if plan is None:
plan = fft_plan(data_g.shape)
if inplace:
res_g = data_g
else:
if res_g is None:
res_g = OCLArray.empty(data_g.shape,data_g.dtype)
res_g.copy_buffer(data_g)
if not kernel_is_fft:
kern_g = OCLArray.empty(h_g.shape,h_g.dtype)
kern_g.copy_buffer(h_g)
fft(kern_g,inplace=True, plan = plan)
else:
kern_g = h_g
fft(res_g,inplace=True, plan = plan)
#multiply in fourier domain
_complex_multiply_kernel(res_g,kern_g)
fft(res_g,inplace = True, inverse = True, plan = plan)
return res_g | [
"def",
"_fft_convolve_gpu",
"(",
"data_g",
",",
"h_g",
",",
"res_g",
"=",
"None",
",",
"plan",
"=",
"None",
",",
"inplace",
"=",
"False",
",",
"kernel_is_fft",
"=",
"False",
")",
":",
"assert_bufs_type",
"(",
"np",
".",
"complex64",
",",
"data_g",
",",
"h_g",
")",
"if",
"data_g",
".",
"shape",
"!=",
"h_g",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"data and kernel must have same size! %s vs %s \"",
"%",
"(",
"str",
"(",
"data_g",
".",
"shape",
")",
",",
"str",
"(",
"h_g",
".",
"shape",
")",
")",
")",
"if",
"plan",
"is",
"None",
":",
"plan",
"=",
"fft_plan",
"(",
"data_g",
".",
"shape",
")",
"if",
"inplace",
":",
"res_g",
"=",
"data_g",
"else",
":",
"if",
"res_g",
"is",
"None",
":",
"res_g",
"=",
"OCLArray",
".",
"empty",
"(",
"data_g",
".",
"shape",
",",
"data_g",
".",
"dtype",
")",
"res_g",
".",
"copy_buffer",
"(",
"data_g",
")",
"if",
"not",
"kernel_is_fft",
":",
"kern_g",
"=",
"OCLArray",
".",
"empty",
"(",
"h_g",
".",
"shape",
",",
"h_g",
".",
"dtype",
")",
"kern_g",
".",
"copy_buffer",
"(",
"h_g",
")",
"fft",
"(",
"kern_g",
",",
"inplace",
"=",
"True",
",",
"plan",
"=",
"plan",
")",
"else",
":",
"kern_g",
"=",
"h_g",
"fft",
"(",
"res_g",
",",
"inplace",
"=",
"True",
",",
"plan",
"=",
"plan",
")",
"#multiply in fourier domain",
"_complex_multiply_kernel",
"(",
"res_g",
",",
"kern_g",
")",
"fft",
"(",
"res_g",
",",
"inplace",
"=",
"True",
",",
"inverse",
"=",
"True",
",",
"plan",
"=",
"plan",
")",
"return",
"res_g"
] | fft convolve for gpu buffer | [
"fft",
"convolve",
"for",
"gpu",
"buffer"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L83-L124 | train |
maweigert/gputools | gputools/convolve/median_filter.py | median_filter | def median_filter(data, size=3, cval = 0, res_g=None, sub_blocks=None):
"""
median filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
cval: scalar,
the constant value for out of border access (cf mode = "constant")
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_median_filter_gpu_2d())
elif data.ndim == 3:
_filt = make_filter(_median_filter_gpu_3d())
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, cval = cval, res_g=res_g, sub_blocks=sub_blocks) | python | def median_filter(data, size=3, cval = 0, res_g=None, sub_blocks=None):
"""
median filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
cval: scalar,
the constant value for out of border access (cf mode = "constant")
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_median_filter_gpu_2d())
elif data.ndim == 3:
_filt = make_filter(_median_filter_gpu_3d())
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, cval = cval, res_g=res_g, sub_blocks=sub_blocks) | [
"def",
"median_filter",
"(",
"data",
",",
"size",
"=",
"3",
",",
"cval",
"=",
"0",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"None",
")",
":",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"_filt",
"=",
"make_filter",
"(",
"_median_filter_gpu_2d",
"(",
")",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"_filt",
"=",
"make_filter",
"(",
"_median_filter_gpu_3d",
"(",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"currently only 2 or 3 dimensional data is supported\"",
")",
"return",
"_filt",
"(",
"data",
"=",
"data",
",",
"size",
"=",
"size",
",",
"cval",
"=",
"cval",
",",
"res_g",
"=",
"res_g",
",",
"sub_blocks",
"=",
"sub_blocks",
")"
] | median filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
cval: scalar,
the constant value for out of border access (cf mode = "constant")
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray) | [
"median",
"filter",
"of",
"given",
"size"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/median_filter.py#L112-L141 | train |
maweigert/gputools | gputools/transforms/transformations.py | rotate | def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"):
"""
rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input)
"""
if center is None:
center = tuple([s // 2 for s in data.shape])
cx, cy, cz = center
m = np.dot(mat4_translate(cx, cy, cz),
np.dot(mat4_rotate(angle, *axis),
mat4_translate(-cx, -cy, -cz)))
m = np.linalg.inv(m)
return affine(data, m, mode=mode, interpolation=interpolation) | python | def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"):
"""
rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input)
"""
if center is None:
center = tuple([s // 2 for s in data.shape])
cx, cy, cz = center
m = np.dot(mat4_translate(cx, cy, cz),
np.dot(mat4_rotate(angle, *axis),
mat4_translate(-cx, -cy, -cz)))
m = np.linalg.inv(m)
return affine(data, m, mode=mode, interpolation=interpolation) | [
"def",
"rotate",
"(",
"data",
",",
"axis",
"=",
"(",
"1.",
",",
"0",
",",
"0",
")",
",",
"angle",
"=",
"0.",
",",
"center",
"=",
"None",
",",
"mode",
"=",
"\"constant\"",
",",
"interpolation",
"=",
"\"linear\"",
")",
":",
"if",
"center",
"is",
"None",
":",
"center",
"=",
"tuple",
"(",
"[",
"s",
"//",
"2",
"for",
"s",
"in",
"data",
".",
"shape",
"]",
")",
"cx",
",",
"cy",
",",
"cz",
"=",
"center",
"m",
"=",
"np",
".",
"dot",
"(",
"mat4_translate",
"(",
"cx",
",",
"cy",
",",
"cz",
")",
",",
"np",
".",
"dot",
"(",
"mat4_rotate",
"(",
"angle",
",",
"*",
"axis",
")",
",",
"mat4_translate",
"(",
"-",
"cx",
",",
"-",
"cy",
",",
"-",
"cz",
")",
")",
")",
"m",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"m",
")",
"return",
"affine",
"(",
"data",
",",
"m",
",",
"mode",
"=",
"mode",
",",
"interpolation",
"=",
"interpolation",
")"
] | rotates data around axis by a given angle
Parameters
----------
data: ndarray
3d array
axis: tuple
axis to rotate by angle about
axis = (x,y,z)
angle: float
center: tuple or None
origin of rotation (cz,cy,cx) in pixels
if None, center is the middle of data
mode: string
boundary mode, one of the following:
'constant'
pads with zeros
'edge'
pads with edge values
'wrap'
pads with the repeated version of the input
interpolation, string
interpolation mode, one of the following
'linear'
'nearest'
Returns
-------
res: ndarray
rotated array (same shape as input) | [
"rotates",
"data",
"around",
"axis",
"by",
"a",
"given",
"angle"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/transformations.py#L128-L171 | train |
maweigert/gputools | gputools/transforms/transformations.py | map_coordinates | def map_coordinates(data, coordinates, interpolation="linear",
mode='constant'):
"""
Map data to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input.
should correspond to scipy.ndimage.map_coordinates
Parameters
----------
data
coordinates
output
interpolation
mode
cval
prefilter
Returns
-------
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
coordinates = np.asarray(coordinates, np.int32)
if not (coordinates.shape[0] == data.ndim):
raise ValueError("coordinate has to be of shape (data.ndim,m) ")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]]
d_im = OCLImage.from_array(data)
coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False))
res_g = OCLArray.empty(coordinates.shape[1], data.dtype)
prog = OCLProgram(abspath("kernels/map_coordinates.cl")
, build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines)
kernel = "map_coordinates{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
(coordinates.shape[-1],), None,
d_im, res_g.data, coordinates_g.data)
return res_g.get() | python | def map_coordinates(data, coordinates, interpolation="linear",
mode='constant'):
"""
Map data to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input.
should correspond to scipy.ndimage.map_coordinates
Parameters
----------
data
coordinates
output
interpolation
mode
cval
prefilter
Returns
-------
"""
if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)):
raise ValueError("input data has to be a 2d or 3d array!")
coordinates = np.asarray(coordinates, np.int32)
if not (coordinates.shape[0] == data.ndim):
raise ValueError("coordinate has to be of shape (data.ndim,m) ")
interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"],
"nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]}
mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"],
"wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"],
"edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"]
}
if not interpolation in interpolation_defines:
raise KeyError(
"interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys())))
if not mode in mode_defines:
raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys())))
if not data.dtype.type in cl_buffer_datatype_dict:
raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys())))
dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]]
d_im = OCLImage.from_array(data)
coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False))
res_g = OCLArray.empty(coordinates.shape[1], data.dtype)
prog = OCLProgram(abspath("kernels/map_coordinates.cl")
, build_options=interpolation_defines[interpolation] +
mode_defines[mode] + dtype_defines)
kernel = "map_coordinates{ndim}".format(ndim=data.ndim)
prog.run_kernel(kernel,
(coordinates.shape[-1],), None,
d_im, res_g.data, coordinates_g.data)
return res_g.get() | [
"def",
"map_coordinates",
"(",
"data",
",",
"coordinates",
",",
"interpolation",
"=",
"\"linear\"",
",",
"mode",
"=",
"'constant'",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"ndim",
"in",
"(",
"2",
",",
"3",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"input data has to be a 2d or 3d array!\"",
")",
"coordinates",
"=",
"np",
".",
"asarray",
"(",
"coordinates",
",",
"np",
".",
"int32",
")",
"if",
"not",
"(",
"coordinates",
".",
"shape",
"[",
"0",
"]",
"==",
"data",
".",
"ndim",
")",
":",
"raise",
"ValueError",
"(",
"\"coordinate has to be of shape (data.ndim,m) \"",
")",
"interpolation_defines",
"=",
"{",
"\"linear\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_FILTER=CLK_FILTER_LINEAR\"",
"]",
",",
"\"nearest\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_FILTER=CLK_FILTER_NEAREST\"",
"]",
"}",
"mode_defines",
"=",
"{",
"\"constant\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP\"",
"]",
",",
"\"wrap\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT\"",
"]",
",",
"\"edge\"",
":",
"[",
"\"-D\"",
",",
"\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE\"",
"]",
"}",
"if",
"not",
"interpolation",
"in",
"interpolation_defines",
":",
"raise",
"KeyError",
"(",
"\"interpolation = '%s' not defined ,valid: %s\"",
"%",
"(",
"interpolation",
",",
"list",
"(",
"interpolation_defines",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"not",
"mode",
"in",
"mode_defines",
":",
"raise",
"KeyError",
"(",
"\"mode = '%s' not defined ,valid: %s\"",
"%",
"(",
"mode",
",",
"list",
"(",
"mode_defines",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"not",
"data",
".",
"dtype",
".",
"type",
"in",
"cl_buffer_datatype_dict",
":",
"raise",
"KeyError",
"(",
"\"dtype %s not supported yet (%s)\"",
"%",
"(",
"data",
".",
"dtype",
".",
"type",
",",
"tuple",
"(",
"cl_buffer_datatype_dict",
".",
"keys",
"(",
")",
")",
")",
")",
"dtype_defines",
"=",
"[",
"\"-D\"",
",",
"\"DTYPE=%s\"",
"%",
"cl_buffer_datatype_dict",
"[",
"data",
".",
"dtype",
".",
"type",
"]",
"]",
"d_im",
"=",
"OCLImage",
".",
"from_array",
"(",
"data",
")",
"coordinates_g",
"=",
"OCLArray",
".",
"from_array",
"(",
"coordinates",
".",
"astype",
"(",
"np",
".",
"float32",
",",
"copy",
"=",
"False",
")",
")",
"res_g",
"=",
"OCLArray",
".",
"empty",
"(",
"coordinates",
".",
"shape",
"[",
"1",
"]",
",",
"data",
".",
"dtype",
")",
"prog",
"=",
"OCLProgram",
"(",
"abspath",
"(",
"\"kernels/map_coordinates.cl\"",
")",
",",
"build_options",
"=",
"interpolation_defines",
"[",
"interpolation",
"]",
"+",
"mode_defines",
"[",
"mode",
"]",
"+",
"dtype_defines",
")",
"kernel",
"=",
"\"map_coordinates{ndim}\"",
".",
"format",
"(",
"ndim",
"=",
"data",
".",
"ndim",
")",
"prog",
".",
"run_kernel",
"(",
"kernel",
",",
"(",
"coordinates",
".",
"shape",
"[",
"-",
"1",
"]",
",",
")",
",",
"None",
",",
"d_im",
",",
"res_g",
".",
"data",
",",
"coordinates_g",
".",
"data",
")",
"return",
"res_g",
".",
"get",
"(",
")"
] | Map data to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input.
should correspond to scipy.ndimage.map_coordinates
Parameters
----------
data
coordinates
output
interpolation
mode
cval
prefilter
Returns
------- | [
"Map",
"data",
"to",
"new",
"coordinates",
"by",
"interpolation",
".",
"The",
"array",
"of",
"coordinates",
"is",
"used",
"to",
"find",
"for",
"each",
"point",
"in",
"the",
"output",
"the",
"corresponding",
"coordinates",
"in",
"the",
"input",
"."
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/transformations.py#L174-L237 | train |
maweigert/gputools | gputools/utils/utils.py | pad_to_shape | def pad_to_shape(d, dshape, mode = "constant"):
"""
pad array d to shape dshape
"""
if d.shape == dshape:
return d
diff = np.array(dshape)- np.array(d.shape)
#first shrink
slices = tuple(slice(-x//2,x//2) if x<0 else slice(None,None) for x in diff)
res = d[slices]
#then pad
# return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode)
return np.pad(res,[(int(np.ceil(d/2.)),d-int(np.ceil(d/2.))) if d>0 else (0,0) for d in diff],mode=mode) | python | def pad_to_shape(d, dshape, mode = "constant"):
"""
pad array d to shape dshape
"""
if d.shape == dshape:
return d
diff = np.array(dshape)- np.array(d.shape)
#first shrink
slices = tuple(slice(-x//2,x//2) if x<0 else slice(None,None) for x in diff)
res = d[slices]
#then pad
# return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode)
return np.pad(res,[(int(np.ceil(d/2.)),d-int(np.ceil(d/2.))) if d>0 else (0,0) for d in diff],mode=mode) | [
"def",
"pad_to_shape",
"(",
"d",
",",
"dshape",
",",
"mode",
"=",
"\"constant\"",
")",
":",
"if",
"d",
".",
"shape",
"==",
"dshape",
":",
"return",
"d",
"diff",
"=",
"np",
".",
"array",
"(",
"dshape",
")",
"-",
"np",
".",
"array",
"(",
"d",
".",
"shape",
")",
"#first shrink",
"slices",
"=",
"tuple",
"(",
"slice",
"(",
"-",
"x",
"//",
"2",
",",
"x",
"//",
"2",
")",
"if",
"x",
"<",
"0",
"else",
"slice",
"(",
"None",
",",
"None",
")",
"for",
"x",
"in",
"diff",
")",
"res",
"=",
"d",
"[",
"slices",
"]",
"#then pad",
"# return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode)",
"return",
"np",
".",
"pad",
"(",
"res",
",",
"[",
"(",
"int",
"(",
"np",
".",
"ceil",
"(",
"d",
"/",
"2.",
")",
")",
",",
"d",
"-",
"int",
"(",
"np",
".",
"ceil",
"(",
"d",
"/",
"2.",
")",
")",
")",
"if",
"d",
">",
"0",
"else",
"(",
"0",
",",
"0",
")",
"for",
"d",
"in",
"diff",
"]",
",",
"mode",
"=",
"mode",
")"
] | pad array d to shape dshape | [
"pad",
"array",
"d",
"to",
"shape",
"dshape"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/utils/utils.py#L4-L17 | train |
maweigert/gputools | gputools/utils/utils.py | pad_to_power2 | def pad_to_power2(data, axis = None, mode="constant"):
"""
pad data to a shape of power 2
if axis == None all axis are padded
"""
if axis is None:
axis = list(range(data.ndim))
if np.all([_is_power2(n) for i, n in enumerate(data.shape) if i in axis]):
return data
else:
return pad_to_shape(data,[(_next_power_of_2(n) if i in axis else n) for i,n in enumerate(data.shape)], mode) | python | def pad_to_power2(data, axis = None, mode="constant"):
"""
pad data to a shape of power 2
if axis == None all axis are padded
"""
if axis is None:
axis = list(range(data.ndim))
if np.all([_is_power2(n) for i, n in enumerate(data.shape) if i in axis]):
return data
else:
return pad_to_shape(data,[(_next_power_of_2(n) if i in axis else n) for i,n in enumerate(data.shape)], mode) | [
"def",
"pad_to_power2",
"(",
"data",
",",
"axis",
"=",
"None",
",",
"mode",
"=",
"\"constant\"",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"list",
"(",
"range",
"(",
"data",
".",
"ndim",
")",
")",
"if",
"np",
".",
"all",
"(",
"[",
"_is_power2",
"(",
"n",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"data",
".",
"shape",
")",
"if",
"i",
"in",
"axis",
"]",
")",
":",
"return",
"data",
"else",
":",
"return",
"pad_to_shape",
"(",
"data",
",",
"[",
"(",
"_next_power_of_2",
"(",
"n",
")",
"if",
"i",
"in",
"axis",
"else",
"n",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"data",
".",
"shape",
")",
"]",
",",
"mode",
")"
] | pad data to a shape of power 2
if axis == None all axis are padded | [
"pad",
"data",
"to",
"a",
"shape",
"of",
"power",
"2",
"if",
"axis",
"==",
"None",
"all",
"axis",
"are",
"padded"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/utils/utils.py#L27-L38 | train |
maweigert/gputools | gputools/convolve/generic_separable_filters.py | max_filter | def max_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)):
"""
maximum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY"))
return _filt(data = data, size = size, res_g = res_g, sub_blocks=sub_blocks) | python | def max_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)):
"""
maximum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY"))
return _filt(data = data, size = size, res_g = res_g, sub_blocks=sub_blocks) | [
"def",
"max_filter",
"(",
"data",
",",
"size",
"=",
"7",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
")",
":",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_2d",
"(",
"FUNC",
"=",
"\"(val>res?val:res)\"",
",",
"DEFAULT",
"=",
"\"-INFINITY\"",
")",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_3d",
"(",
"FUNC",
"=",
"\"(val>res?val:res)\"",
",",
"DEFAULT",
"=",
"\"-INFINITY\"",
")",
")",
"return",
"_filt",
"(",
"data",
"=",
"data",
",",
"size",
"=",
"size",
",",
"res_g",
"=",
"res_g",
",",
"sub_blocks",
"=",
"sub_blocks",
")"
] | maximum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray) | [
"maximum",
"filter",
"of",
"given",
"size"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L115-L139 | train |
maweigert/gputools | gputools/convolve/generic_separable_filters.py | min_filter | def min_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)):
"""
minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) | python | def min_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)):
"""
minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT="INFINITY"))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) | [
"def",
"min_filter",
"(",
"data",
",",
"size",
"=",
"7",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
")",
":",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_2d",
"(",
"FUNC",
"=",
"\"(val<res?val:res)\"",
",",
"DEFAULT",
"=",
"\"INFINITY\"",
")",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_3d",
"(",
"FUNC",
"=",
"\"(val<res?val:res)\"",
",",
"DEFAULT",
"=",
"\"INFINITY\"",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"currently only 2 or 3 dimensional data is supported\"",
")",
"return",
"_filt",
"(",
"data",
"=",
"data",
",",
"size",
"=",
"size",
",",
"res_g",
"=",
"res_g",
",",
"sub_blocks",
"=",
"sub_blocks",
")"
] | minimum filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray) | [
"minimum",
"filter",
"of",
"given",
"size"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L142-L167 | train |
maweigert/gputools | gputools/convolve/generic_separable_filters.py | uniform_filter | def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True):
"""
mean filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
normalized: bool
if True, the filter corresponds to mean
if False, the filter corresponds to sum
Returns
-------
filtered image or None (if OCLArray)
"""
if normalized:
if np.isscalar(size):
norm = size
else:
norm = np.int32(np.prod(size))**(1./len(size))
FUNC = "res+val/%s"%norm
else:
FUNC = "res+val"
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0"))
res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
return res | python | def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True):
"""
mean filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
normalized: bool
if True, the filter corresponds to mean
if False, the filter corresponds to sum
Returns
-------
filtered image or None (if OCLArray)
"""
if normalized:
if np.isscalar(size):
norm = size
else:
norm = np.int32(np.prod(size))**(1./len(size))
FUNC = "res+val/%s"%norm
else:
FUNC = "res+val"
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0"))
res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
return res | [
"def",
"uniform_filter",
"(",
"data",
",",
"size",
"=",
"7",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
",",
"normalized",
"=",
"True",
")",
":",
"if",
"normalized",
":",
"if",
"np",
".",
"isscalar",
"(",
"size",
")",
":",
"norm",
"=",
"size",
"else",
":",
"norm",
"=",
"np",
".",
"int32",
"(",
"np",
".",
"prod",
"(",
"size",
")",
")",
"**",
"(",
"1.",
"/",
"len",
"(",
"size",
")",
")",
"FUNC",
"=",
"\"res+val/%s\"",
"%",
"norm",
"else",
":",
"FUNC",
"=",
"\"res+val\"",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_2d",
"(",
"FUNC",
"=",
"FUNC",
",",
"DEFAULT",
"=",
"\"0\"",
")",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_3d",
"(",
"FUNC",
"=",
"FUNC",
",",
"DEFAULT",
"=",
"\"0\"",
")",
")",
"res",
"=",
"_filt",
"(",
"data",
"=",
"data",
",",
"size",
"=",
"size",
",",
"res_g",
"=",
"res_g",
",",
"sub_blocks",
"=",
"sub_blocks",
")",
"return",
"res"
] | mean filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
normalized: bool
if True, the filter corresponds to mean
if False, the filter corresponds to sum
Returns
-------
filtered image or None (if OCLArray) | [
"mean",
"filter",
"of",
"given",
"size"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L171-L210 | train |
maweigert/gputools | gputools/convolve/generic_separable_filters.py | _gauss_filter | def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)):
"""
gaussian filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
truncate = 4.
radius = tuple(int(truncate*s +0.5) for s in sigma)
size = tuple(2*r+1 for r in radius)
s = sigma[0]
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f"))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) | python | def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)):
"""
gaussian filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray)
"""
truncate = 4.
radius = tuple(int(truncate*s +0.5) for s in sigma)
size = tuple(2*r+1 for r in radius)
s = sigma[0]
if data.ndim == 2:
_filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f"))
elif data.ndim == 3:
_filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f"))
else:
raise ValueError("currently only 2 or 3 dimensional data is supported")
return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) | [
"def",
"_gauss_filter",
"(",
"data",
",",
"sigma",
"=",
"4",
",",
"res_g",
"=",
"None",
",",
"sub_blocks",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
")",
":",
"truncate",
"=",
"4.",
"radius",
"=",
"tuple",
"(",
"int",
"(",
"truncate",
"*",
"s",
"+",
"0.5",
")",
"for",
"s",
"in",
"sigma",
")",
"size",
"=",
"tuple",
"(",
"2",
"*",
"r",
"+",
"1",
"for",
"r",
"in",
"radius",
")",
"s",
"=",
"sigma",
"[",
"0",
"]",
"if",
"data",
".",
"ndim",
"==",
"2",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_2d",
"(",
"FUNC",
"=",
"\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"",
"%",
"(",
"size",
"[",
"0",
"]",
"//",
"2",
",",
"size",
"[",
"0",
"]",
"//",
"2",
",",
"s",
",",
"s",
")",
",",
"DEFAULT",
"=",
"\"0.f\"",
")",
")",
"elif",
"data",
".",
"ndim",
"==",
"3",
":",
"_filt",
"=",
"make_filter",
"(",
"_generic_filter_gpu_3d",
"(",
"FUNC",
"=",
"\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"",
"%",
"(",
"size",
"[",
"0",
"]",
"//",
"2",
",",
"size",
"[",
"0",
"]",
"//",
"2",
",",
"s",
",",
"s",
")",
",",
"DEFAULT",
"=",
"\"0.f\"",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"currently only 2 or 3 dimensional data is supported\"",
")",
"return",
"_filt",
"(",
"data",
"=",
"data",
",",
"size",
"=",
"size",
",",
"res_g",
"=",
"res_g",
",",
"sub_blocks",
"=",
"sub_blocks",
")"
] | gaussian filter of given size
Parameters
----------
data: 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size: scalar, tuple
the size of the patch to consider
res_g: OCLArray
store result in buffer if given
sub_blocks:
perform over subblock tiling (only if data is ndarray)
Returns
-------
filtered image or None (if OCLArray) | [
"gaussian",
"filter",
"of",
"given",
"size"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L216-L248 | train |
maweigert/gputools | gputools/separable/separable_approx.py | _separable_series2 | def _separable_series2(h, N=1):
""" finds separable approximations to the 2d function 2d h
returns res = (hx, hy)[N]
s.t. h \approx sum_i outer(res[i,0],res[i,1])
"""
if min(h.shape)<N:
raise ValueError("smallest dimension of h is smaller than approximation order! (%s < %s)"%(min(h.shape),N))
U, S, V = linalg.svd(h)
hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)]
hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)]
return np.array(list(zip(hx, hy))) | python | def _separable_series2(h, N=1):
""" finds separable approximations to the 2d function 2d h
returns res = (hx, hy)[N]
s.t. h \approx sum_i outer(res[i,0],res[i,1])
"""
if min(h.shape)<N:
raise ValueError("smallest dimension of h is smaller than approximation order! (%s < %s)"%(min(h.shape),N))
U, S, V = linalg.svd(h)
hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)]
hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)]
return np.array(list(zip(hx, hy))) | [
"def",
"_separable_series2",
"(",
"h",
",",
"N",
"=",
"1",
")",
":",
"if",
"min",
"(",
"h",
".",
"shape",
")",
"<",
"N",
":",
"raise",
"ValueError",
"(",
"\"smallest dimension of h is smaller than approximation order! (%s < %s)\"",
"%",
"(",
"min",
"(",
"h",
".",
"shape",
")",
",",
"N",
")",
")",
"U",
",",
"S",
",",
"V",
"=",
"linalg",
".",
"svd",
"(",
"h",
")",
"hx",
"=",
"[",
"-",
"U",
"[",
":",
",",
"n",
"]",
"*",
"np",
".",
"sqrt",
"(",
"S",
"[",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
"]",
"hy",
"=",
"[",
"-",
"V",
"[",
"n",
",",
":",
"]",
"*",
"np",
".",
"sqrt",
"(",
"S",
"[",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"N",
")",
"]",
"return",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"hx",
",",
"hy",
")",
")",
")"
] | finds separable approximations to the 2d function 2d h
returns res = (hx, hy)[N]
s.t. h \approx sum_i outer(res[i,0],res[i,1]) | [
"finds",
"separable",
"approximations",
"to",
"the",
"2d",
"function",
"2d",
"h"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L16-L29 | train |
maweigert/gputools | gputools/separable/separable_approx.py | _separable_approx2 | def _separable_approx2(h, N=1):
""" returns the N first approximations to the 2d function h
whose sum should be h
"""
return np.cumsum([np.outer(fy, fx) for fy, fx in _separable_series2(h, N)], 0) | python | def _separable_approx2(h, N=1):
""" returns the N first approximations to the 2d function h
whose sum should be h
"""
return np.cumsum([np.outer(fy, fx) for fy, fx in _separable_series2(h, N)], 0) | [
"def",
"_separable_approx2",
"(",
"h",
",",
"N",
"=",
"1",
")",
":",
"return",
"np",
".",
"cumsum",
"(",
"[",
"np",
".",
"outer",
"(",
"fy",
",",
"fx",
")",
"for",
"fy",
",",
"fx",
"in",
"_separable_series2",
"(",
"h",
",",
"N",
")",
"]",
",",
"0",
")"
] | returns the N first approximations to the 2d function h
whose sum should be h | [
"returns",
"the",
"N",
"first",
"approximations",
"to",
"the",
"2d",
"function",
"h",
"whose",
"sum",
"should",
"be",
"h"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L32-L36 | train |
maweigert/gputools | gputools/separable/separable_approx.py | _separable_approx3 | def _separable_approx3(h, N=1):
""" returns the N first approximations to the 3d function h
"""
return np.cumsum([np.einsum("i,j,k", fz, fy, fx) for fz, fy, fx in _separable_series3(h, N)], 0) | python | def _separable_approx3(h, N=1):
""" returns the N first approximations to the 3d function h
"""
return np.cumsum([np.einsum("i,j,k", fz, fy, fx) for fz, fy, fx in _separable_series3(h, N)], 0) | [
"def",
"_separable_approx3",
"(",
"h",
",",
"N",
"=",
"1",
")",
":",
"return",
"np",
".",
"cumsum",
"(",
"[",
"np",
".",
"einsum",
"(",
"\"i,j,k\"",
",",
"fz",
",",
"fy",
",",
"fx",
")",
"for",
"fz",
",",
"fy",
",",
"fx",
"in",
"_separable_series3",
"(",
"h",
",",
"N",
")",
"]",
",",
"0",
")"
] | returns the N first approximations to the 3d function h | [
"returns",
"the",
"N",
"first",
"approximations",
"to",
"the",
"3d",
"function",
"h"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L85-L88 | train |
maweigert/gputools | gputools/separable/separable_approx.py | separable_approx | def separable_approx(h, N=1):
"""
finds the k-th rank approximation to h, where k = 1..N
similar to separable_series
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
all N apprxoimations res[i], the i-th approximation
"""
if h.ndim == 2:
return _separable_approx2(h, N)
elif h.ndim == 3:
return _separable_approx3(h, N)
else:
raise ValueError("unsupported array dimension: %s (only 2d or 3d) " % h.ndim) | python | def separable_approx(h, N=1):
"""
finds the k-th rank approximation to h, where k = 1..N
similar to separable_series
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
all N apprxoimations res[i], the i-th approximation
"""
if h.ndim == 2:
return _separable_approx2(h, N)
elif h.ndim == 3:
return _separable_approx3(h, N)
else:
raise ValueError("unsupported array dimension: %s (only 2d or 3d) " % h.ndim) | [
"def",
"separable_approx",
"(",
"h",
",",
"N",
"=",
"1",
")",
":",
"if",
"h",
".",
"ndim",
"==",
"2",
":",
"return",
"_separable_approx2",
"(",
"h",
",",
"N",
")",
"elif",
"h",
".",
"ndim",
"==",
"3",
":",
"return",
"_separable_approx3",
"(",
"h",
",",
"N",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unsupported array dimension: %s (only 2d or 3d) \"",
"%",
"h",
".",
"ndim",
")"
] | finds the k-th rank approximation to h, where k = 1..N
similar to separable_series
Parameters
----------
h: ndarray
input array (2 or 2 dimensional)
N: int
order of approximation
Returns
-------
all N apprxoimations res[i], the i-th approximation | [
"finds",
"the",
"k",
"-",
"th",
"rank",
"approximation",
"to",
"h",
"where",
"k",
"=",
"1",
"..",
"N"
] | 6ab26efeb05dceef74cf13aadeeeb9b009b529dd | https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L127-L150 | train |
alculquicondor/psqlparse | psqlparse/nodes/nodes.py | Node.tables | def tables(self):
"""
Generic method that does a depth-first search on the node attributes.
Child classes should override this method for better performance.
"""
_tables = set()
for attr in six.itervalues(self.__dict__):
if isinstance(attr, list):
for item in attr:
if isinstance(item, Node):
_tables |= item.tables()
elif isinstance(attr, Node):
_tables |= attr.tables()
return _tables | python | def tables(self):
"""
Generic method that does a depth-first search on the node attributes.
Child classes should override this method for better performance.
"""
_tables = set()
for attr in six.itervalues(self.__dict__):
if isinstance(attr, list):
for item in attr:
if isinstance(item, Node):
_tables |= item.tables()
elif isinstance(attr, Node):
_tables |= attr.tables()
return _tables | [
"def",
"tables",
"(",
"self",
")",
":",
"_tables",
"=",
"set",
"(",
")",
"for",
"attr",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"__dict__",
")",
":",
"if",
"isinstance",
"(",
"attr",
",",
"list",
")",
":",
"for",
"item",
"in",
"attr",
":",
"if",
"isinstance",
"(",
"item",
",",
"Node",
")",
":",
"_tables",
"|=",
"item",
".",
"tables",
"(",
")",
"elif",
"isinstance",
"(",
"attr",
",",
"Node",
")",
":",
"_tables",
"|=",
"attr",
".",
"tables",
"(",
")",
"return",
"_tables"
] | Generic method that does a depth-first search on the node attributes.
Child classes should override this method for better performance. | [
"Generic",
"method",
"that",
"does",
"a",
"depth",
"-",
"first",
"search",
"on",
"the",
"node",
"attributes",
"."
] | 9c2af04f45ddc4068d7fd87580612457d374e97d | https://github.com/alculquicondor/psqlparse/blob/9c2af04f45ddc4068d7fd87580612457d374e97d/psqlparse/nodes/nodes.py#L6-L22 | train |
sloria/konch | docopt.py | Pattern.fix_identities | def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, child in enumerate(self.children):
if not hasattr(child, 'children'):
assert child in uniq
self.children[i] = uniq[uniq.index(child)]
else:
child.fix_identities(uniq) | python | def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, child in enumerate(self.children):
if not hasattr(child, 'children'):
assert child in uniq
self.children[i] = uniq[uniq.index(child)]
else:
child.fix_identities(uniq) | [
"def",
"fix_identities",
"(",
"self",
",",
"uniq",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'children'",
")",
":",
"return",
"self",
"uniq",
"=",
"list",
"(",
"set",
"(",
"self",
".",
"flat",
"(",
")",
")",
")",
"if",
"uniq",
"is",
"None",
"else",
"uniq",
"for",
"i",
",",
"child",
"in",
"enumerate",
"(",
"self",
".",
"children",
")",
":",
"if",
"not",
"hasattr",
"(",
"child",
",",
"'children'",
")",
":",
"assert",
"child",
"in",
"uniq",
"self",
".",
"children",
"[",
"i",
"]",
"=",
"uniq",
"[",
"uniq",
".",
"index",
"(",
"child",
")",
"]",
"else",
":",
"child",
".",
"fix_identities",
"(",
"uniq",
")"
] | Make pattern-tree tips point to same object if they are equal. | [
"Make",
"pattern",
"-",
"tree",
"tips",
"point",
"to",
"same",
"object",
"if",
"they",
"are",
"equal",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/docopt.py#L46-L56 | train |
sloria/konch | setup.py | find_version | def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ""
with open(fname, "r") as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError("Cannot find version information")
return version | python | def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ""
with open(fname, "r") as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError("Cannot find version information")
return version | [
"def",
"find_version",
"(",
"fname",
")",
":",
"version",
"=",
"\"\"",
"with",
"open",
"(",
"fname",
",",
"\"r\"",
")",
"as",
"fp",
":",
"reg",
"=",
"re",
".",
"compile",
"(",
"r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]'",
")",
"for",
"line",
"in",
"fp",
":",
"m",
"=",
"reg",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"version",
"=",
"m",
".",
"group",
"(",
"1",
")",
"break",
"if",
"not",
"version",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot find version information\"",
")",
"return",
"version"
] | Attempts to find the version number in the file names fname.
Raises RuntimeError if not found. | [
"Attempts",
"to",
"find",
"the",
"version",
"number",
"in",
"the",
"file",
"names",
"fname",
".",
"Raises",
"RuntimeError",
"if",
"not",
"found",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/setup.py#L46-L60 | train |
sloria/konch | konch.py | format_context | def format_context(
context: Context, formatter: typing.Union[str, Formatter] = "full"
) -> str:
"""Output the a context dictionary as a string."""
if not context:
return ""
if callable(formatter):
formatter_func = formatter
else:
if formatter in CONTEXT_FORMATTERS:
formatter_func = CONTEXT_FORMATTERS[formatter]
else:
raise ValueError(f'Invalid context format: "{formatter}"')
return formatter_func(context) | python | def format_context(
context: Context, formatter: typing.Union[str, Formatter] = "full"
) -> str:
"""Output the a context dictionary as a string."""
if not context:
return ""
if callable(formatter):
formatter_func = formatter
else:
if formatter in CONTEXT_FORMATTERS:
formatter_func = CONTEXT_FORMATTERS[formatter]
else:
raise ValueError(f'Invalid context format: "{formatter}"')
return formatter_func(context) | [
"def",
"format_context",
"(",
"context",
":",
"Context",
",",
"formatter",
":",
"typing",
".",
"Union",
"[",
"str",
",",
"Formatter",
"]",
"=",
"\"full\"",
")",
"->",
"str",
":",
"if",
"not",
"context",
":",
"return",
"\"\"",
"if",
"callable",
"(",
"formatter",
")",
":",
"formatter_func",
"=",
"formatter",
"else",
":",
"if",
"formatter",
"in",
"CONTEXT_FORMATTERS",
":",
"formatter_func",
"=",
"CONTEXT_FORMATTERS",
"[",
"formatter",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"f'Invalid context format: \"{formatter}\"'",
")",
"return",
"formatter_func",
"(",
"context",
")"
] | Output the a context dictionary as a string. | [
"Output",
"the",
"a",
"context",
"dictionary",
"as",
"a",
"string",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L245-L259 | train |
sloria/konch | konch.py | make_banner | def make_banner(
text: typing.Optional[str] = None,
context: typing.Optional[Context] = None,
banner_template: typing.Optional[str] = None,
context_format: ContextFormat = "full",
) -> str:
"""Generates a full banner with version info, the given text, and a
formatted list of context variables.
"""
banner_text = text or speak()
banner_template = banner_template or BANNER_TEMPLATE
ctx = format_context(context or {}, formatter=context_format)
out = banner_template.format(version=sys.version, text=banner_text, context=ctx)
return out | python | def make_banner(
text: typing.Optional[str] = None,
context: typing.Optional[Context] = None,
banner_template: typing.Optional[str] = None,
context_format: ContextFormat = "full",
) -> str:
"""Generates a full banner with version info, the given text, and a
formatted list of context variables.
"""
banner_text = text or speak()
banner_template = banner_template or BANNER_TEMPLATE
ctx = format_context(context or {}, formatter=context_format)
out = banner_template.format(version=sys.version, text=banner_text, context=ctx)
return out | [
"def",
"make_banner",
"(",
"text",
":",
"typing",
".",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"context",
":",
"typing",
".",
"Optional",
"[",
"Context",
"]",
"=",
"None",
",",
"banner_template",
":",
"typing",
".",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"context_format",
":",
"ContextFormat",
"=",
"\"full\"",
",",
")",
"->",
"str",
":",
"banner_text",
"=",
"text",
"or",
"speak",
"(",
")",
"banner_template",
"=",
"banner_template",
"or",
"BANNER_TEMPLATE",
"ctx",
"=",
"format_context",
"(",
"context",
"or",
"{",
"}",
",",
"formatter",
"=",
"context_format",
")",
"out",
"=",
"banner_template",
".",
"format",
"(",
"version",
"=",
"sys",
".",
"version",
",",
"text",
"=",
"banner_text",
",",
"context",
"=",
"ctx",
")",
"return",
"out"
] | Generates a full banner with version info, the given text, and a
formatted list of context variables. | [
"Generates",
"a",
"full",
"banner",
"with",
"version",
"info",
"the",
"given",
"text",
"and",
"a",
"formatted",
"list",
"of",
"context",
"variables",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L269-L282 | train |
sloria/konch | konch.py | config | def config(config_dict: typing.Mapping) -> Config:
"""Configures the konch shell. This function should be called in a
.konchrc file.
:param dict config_dict: Dict that may contain 'context', 'banner', and/or
'shell' (default shell class to use).
"""
logger.debug(f"Updating with {config_dict}")
_cfg.update(config_dict)
return _cfg | python | def config(config_dict: typing.Mapping) -> Config:
"""Configures the konch shell. This function should be called in a
.konchrc file.
:param dict config_dict: Dict that may contain 'context', 'banner', and/or
'shell' (default shell class to use).
"""
logger.debug(f"Updating with {config_dict}")
_cfg.update(config_dict)
return _cfg | [
"def",
"config",
"(",
"config_dict",
":",
"typing",
".",
"Mapping",
")",
"->",
"Config",
":",
"logger",
".",
"debug",
"(",
"f\"Updating with {config_dict}\"",
")",
"_cfg",
".",
"update",
"(",
"config_dict",
")",
"return",
"_cfg"
] | Configures the konch shell. This function should be called in a
.konchrc file.
:param dict config_dict: Dict that may contain 'context', 'banner', and/or
'shell' (default shell class to use). | [
"Configures",
"the",
"konch",
"shell",
".",
"This",
"function",
"should",
"be",
"called",
"in",
"a",
".",
"konchrc",
"file",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L828-L837 | train |
sloria/konch | konch.py | named_config | def named_config(name: str, config_dict: typing.Mapping) -> None:
"""Adds a named config to the config registry. The first argument
may either be a string or a collection of strings.
This function should be called in a .konchrc file.
"""
names = (
name
if isinstance(name, Iterable) and not isinstance(name, (str, bytes))
else [name]
)
for each in names:
_config_registry[each] = Config(**config_dict) | python | def named_config(name: str, config_dict: typing.Mapping) -> None:
"""Adds a named config to the config registry. The first argument
may either be a string or a collection of strings.
This function should be called in a .konchrc file.
"""
names = (
name
if isinstance(name, Iterable) and not isinstance(name, (str, bytes))
else [name]
)
for each in names:
_config_registry[each] = Config(**config_dict) | [
"def",
"named_config",
"(",
"name",
":",
"str",
",",
"config_dict",
":",
"typing",
".",
"Mapping",
")",
"->",
"None",
":",
"names",
"=",
"(",
"name",
"if",
"isinstance",
"(",
"name",
",",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"name",
",",
"(",
"str",
",",
"bytes",
")",
")",
"else",
"[",
"name",
"]",
")",
"for",
"each",
"in",
"names",
":",
"_config_registry",
"[",
"each",
"]",
"=",
"Config",
"(",
"*",
"*",
"config_dict",
")"
] | Adds a named config to the config registry. The first argument
may either be a string or a collection of strings.
This function should be called in a .konchrc file. | [
"Adds",
"a",
"named",
"config",
"to",
"the",
"config",
"registry",
".",
"The",
"first",
"argument",
"may",
"either",
"be",
"a",
"string",
"or",
"a",
"collection",
"of",
"strings",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L840-L852 | train |
sloria/konch | konch.py | __ensure_directory_in_path | def __ensure_directory_in_path(filename: Path) -> None:
"""Ensures that a file's directory is in the Python path.
"""
directory = Path(filename).parent.resolve()
if directory not in sys.path:
logger.debug(f"Adding {directory} to sys.path")
sys.path.insert(0, str(directory)) | python | def __ensure_directory_in_path(filename: Path) -> None:
"""Ensures that a file's directory is in the Python path.
"""
directory = Path(filename).parent.resolve()
if directory not in sys.path:
logger.debug(f"Adding {directory} to sys.path")
sys.path.insert(0, str(directory)) | [
"def",
"__ensure_directory_in_path",
"(",
"filename",
":",
"Path",
")",
"->",
"None",
":",
"directory",
"=",
"Path",
"(",
"filename",
")",
".",
"parent",
".",
"resolve",
"(",
")",
"if",
"directory",
"not",
"in",
"sys",
".",
"path",
":",
"logger",
".",
"debug",
"(",
"f\"Adding {directory} to sys.path\"",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"str",
"(",
"directory",
")",
")"
] | Ensures that a file's directory is in the Python path. | [
"Ensures",
"that",
"a",
"file",
"s",
"directory",
"is",
"in",
"the",
"Python",
"path",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L861-L867 | train |
sloria/konch | konch.py | use_file | def use_file(
filename: typing.Union[Path, str, None], trust: bool = False
) -> typing.Union[types.ModuleType, None]:
"""Load filename as a python file. Import ``filename`` and return it
as a module.
"""
config_file = filename or resolve_path(CONFIG_FILE)
def preview_unauthorized() -> None:
if not config_file:
return None
print(SEPARATOR, file=sys.stderr)
with Path(config_file).open("r", encoding="utf-8") as fp:
for line in fp:
print(line, end="", file=sys.stderr)
print(SEPARATOR, file=sys.stderr)
if config_file and not Path(config_file).exists():
print_error(f'"{filename}" not found.')
sys.exit(1)
if config_file and Path(config_file).exists():
if not trust:
with AuthFile.load() as authfile:
try:
authfile.check(Path(config_file))
except KonchrcChangedError:
print_error(f'"{config_file}" has changed since you last used it.')
preview_unauthorized()
if confirm("Would you like to authorize it?"):
authfile.allow(Path(config_file))
print()
else:
sys.exit(1)
except KonchrcNotAuthorizedError:
print_error(f'"{config_file}" is blocked.')
preview_unauthorized()
if confirm("Would you like to authorize it?"):
authfile.allow(Path(config_file))
print()
else:
sys.exit(1)
logger.info(f"Using {config_file}")
# Ensure that relative imports are possible
__ensure_directory_in_path(Path(config_file))
mod = None
try:
mod = imp.load_source("konchrc", str(config_file))
except UnboundLocalError: # File not found
pass
else:
return mod
if not config_file:
print_warning("No konch config file found.")
else:
print_warning(f'"{config_file}" not found.')
return None | python | def use_file(
filename: typing.Union[Path, str, None], trust: bool = False
) -> typing.Union[types.ModuleType, None]:
"""Load filename as a python file. Import ``filename`` and return it
as a module.
"""
config_file = filename or resolve_path(CONFIG_FILE)
def preview_unauthorized() -> None:
if not config_file:
return None
print(SEPARATOR, file=sys.stderr)
with Path(config_file).open("r", encoding="utf-8") as fp:
for line in fp:
print(line, end="", file=sys.stderr)
print(SEPARATOR, file=sys.stderr)
if config_file and not Path(config_file).exists():
print_error(f'"{filename}" not found.')
sys.exit(1)
if config_file and Path(config_file).exists():
if not trust:
with AuthFile.load() as authfile:
try:
authfile.check(Path(config_file))
except KonchrcChangedError:
print_error(f'"{config_file}" has changed since you last used it.')
preview_unauthorized()
if confirm("Would you like to authorize it?"):
authfile.allow(Path(config_file))
print()
else:
sys.exit(1)
except KonchrcNotAuthorizedError:
print_error(f'"{config_file}" is blocked.')
preview_unauthorized()
if confirm("Would you like to authorize it?"):
authfile.allow(Path(config_file))
print()
else:
sys.exit(1)
logger.info(f"Using {config_file}")
# Ensure that relative imports are possible
__ensure_directory_in_path(Path(config_file))
mod = None
try:
mod = imp.load_source("konchrc", str(config_file))
except UnboundLocalError: # File not found
pass
else:
return mod
if not config_file:
print_warning("No konch config file found.")
else:
print_warning(f'"{config_file}" not found.')
return None | [
"def",
"use_file",
"(",
"filename",
":",
"typing",
".",
"Union",
"[",
"Path",
",",
"str",
",",
"None",
"]",
",",
"trust",
":",
"bool",
"=",
"False",
")",
"->",
"typing",
".",
"Union",
"[",
"types",
".",
"ModuleType",
",",
"None",
"]",
":",
"config_file",
"=",
"filename",
"or",
"resolve_path",
"(",
"CONFIG_FILE",
")",
"def",
"preview_unauthorized",
"(",
")",
"->",
"None",
":",
"if",
"not",
"config_file",
":",
"return",
"None",
"print",
"(",
"SEPARATOR",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"with",
"Path",
"(",
"config_file",
")",
".",
"open",
"(",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"print",
"(",
"line",
",",
"end",
"=",
"\"\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"SEPARATOR",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"config_file",
"and",
"not",
"Path",
"(",
"config_file",
")",
".",
"exists",
"(",
")",
":",
"print_error",
"(",
"f'\"{filename}\" not found.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"config_file",
"and",
"Path",
"(",
"config_file",
")",
".",
"exists",
"(",
")",
":",
"if",
"not",
"trust",
":",
"with",
"AuthFile",
".",
"load",
"(",
")",
"as",
"authfile",
":",
"try",
":",
"authfile",
".",
"check",
"(",
"Path",
"(",
"config_file",
")",
")",
"except",
"KonchrcChangedError",
":",
"print_error",
"(",
"f'\"{config_file}\" has changed since you last used it.'",
")",
"preview_unauthorized",
"(",
")",
"if",
"confirm",
"(",
"\"Would you like to authorize it?\"",
")",
":",
"authfile",
".",
"allow",
"(",
"Path",
"(",
"config_file",
")",
")",
"print",
"(",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"KonchrcNotAuthorizedError",
":",
"print_error",
"(",
"f'\"{config_file}\" is blocked.'",
")",
"preview_unauthorized",
"(",
")",
"if",
"confirm",
"(",
"\"Would you like to authorize it?\"",
")",
":",
"authfile",
".",
"allow",
"(",
"Path",
"(",
"config_file",
")",
")",
"print",
"(",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"logger",
".",
"info",
"(",
"f\"Using {config_file}\"",
")",
"# Ensure that relative imports are possible",
"__ensure_directory_in_path",
"(",
"Path",
"(",
"config_file",
")",
")",
"mod",
"=",
"None",
"try",
":",
"mod",
"=",
"imp",
".",
"load_source",
"(",
"\"konchrc\"",
",",
"str",
"(",
"config_file",
")",
")",
"except",
"UnboundLocalError",
":",
"# File not found",
"pass",
"else",
":",
"return",
"mod",
"if",
"not",
"config_file",
":",
"print_warning",
"(",
"\"No konch config file found.\"",
")",
"else",
":",
"print_warning",
"(",
"f'\"{config_file}\" not found.'",
")",
"return",
"None"
] | Load filename as a python file. Import ``filename`` and return it
as a module. | [
"Load",
"filename",
"as",
"a",
"python",
"file",
".",
"Import",
"filename",
"and",
"return",
"it",
"as",
"a",
"module",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L898-L954 | train |
sloria/konch | konch.py | resolve_path | def resolve_path(filename: Path) -> typing.Union[Path, None]:
"""Find a file by walking up parent directories until the file is found.
Return the absolute path of the file.
"""
current = Path.cwd()
# Stop search at home directory
sentinel_dir = Path.home().parent.resolve()
while current != sentinel_dir:
target = Path(current) / Path(filename)
if target.exists():
return target.resolve()
else:
current = current.parent.resolve()
return None | python | def resolve_path(filename: Path) -> typing.Union[Path, None]:
"""Find a file by walking up parent directories until the file is found.
Return the absolute path of the file.
"""
current = Path.cwd()
# Stop search at home directory
sentinel_dir = Path.home().parent.resolve()
while current != sentinel_dir:
target = Path(current) / Path(filename)
if target.exists():
return target.resolve()
else:
current = current.parent.resolve()
return None | [
"def",
"resolve_path",
"(",
"filename",
":",
"Path",
")",
"->",
"typing",
".",
"Union",
"[",
"Path",
",",
"None",
"]",
":",
"current",
"=",
"Path",
".",
"cwd",
"(",
")",
"# Stop search at home directory",
"sentinel_dir",
"=",
"Path",
".",
"home",
"(",
")",
".",
"parent",
".",
"resolve",
"(",
")",
"while",
"current",
"!=",
"sentinel_dir",
":",
"target",
"=",
"Path",
"(",
"current",
")",
"/",
"Path",
"(",
"filename",
")",
"if",
"target",
".",
"exists",
"(",
")",
":",
"return",
"target",
".",
"resolve",
"(",
")",
"else",
":",
"current",
"=",
"current",
".",
"parent",
".",
"resolve",
"(",
")",
"return",
"None"
] | Find a file by walking up parent directories until the file is found.
Return the absolute path of the file. | [
"Find",
"a",
"file",
"by",
"walking",
"up",
"parent",
"directories",
"until",
"the",
"file",
"is",
"found",
".",
"Return",
"the",
"absolute",
"path",
"of",
"the",
"file",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L957-L971 | train |
sloria/konch | konch.py | parse_args | def parse_args(argv: typing.Optional[typing.Sequence] = None) -> typing.Dict[str, str]:
"""Exposes the docopt command-line arguments parser.
Return a dictionary of arguments.
"""
return docopt(__doc__, argv=argv, version=__version__) | python | def parse_args(argv: typing.Optional[typing.Sequence] = None) -> typing.Dict[str, str]:
"""Exposes the docopt command-line arguments parser.
Return a dictionary of arguments.
"""
return docopt(__doc__, argv=argv, version=__version__) | [
"def",
"parse_args",
"(",
"argv",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Sequence",
"]",
"=",
"None",
")",
"->",
"typing",
".",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"return",
"docopt",
"(",
"__doc__",
",",
"argv",
"=",
"argv",
",",
"version",
"=",
"__version__",
")"
] | Exposes the docopt command-line arguments parser.
Return a dictionary of arguments. | [
"Exposes",
"the",
"docopt",
"command",
"-",
"line",
"arguments",
"parser",
".",
"Return",
"a",
"dictionary",
"of",
"arguments",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L1132-L1136 | train |
sloria/konch | konch.py | main | def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn:
"""Main entry point for the konch CLI."""
args = parse_args(argv)
if args["--debug"]:
logging.basicConfig(
format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG
)
logger.debug(args)
config_file: typing.Union[Path, None]
if args["init"]:
config_file = Path(args["<config_file>"] or CONFIG_FILE)
init_config(config_file)
else:
config_file = Path(args["<config_file>"]) if args["<config_file>"] else None
if args["edit"]:
edit_config(config_file)
elif args["allow"]:
allow_config(config_file)
elif args["deny"]:
deny_config(config_file)
mod = use_file(Path(args["--file"]) if args["--file"] else None)
if hasattr(mod, "setup"):
mod.setup() # type: ignore
if args["--name"]:
if args["--name"] not in _config_registry:
print_error(f'Invalid --name: "{args["--name"]}"')
sys.exit(1)
config_dict = _config_registry[args["--name"]]
logger.debug(f'Using named config: "{args["--name"]}"')
logger.debug(config_dict)
else:
config_dict = _cfg
# Allow default shell to be overriden by command-line argument
shell_name = args["--shell"]
if shell_name:
config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell)
logger.debug(f"Starting with config {config_dict}")
start(**config_dict)
if hasattr(mod, "teardown"):
mod.teardown() # type: ignore
sys.exit(0) | python | def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn:
"""Main entry point for the konch CLI."""
args = parse_args(argv)
if args["--debug"]:
logging.basicConfig(
format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG
)
logger.debug(args)
config_file: typing.Union[Path, None]
if args["init"]:
config_file = Path(args["<config_file>"] or CONFIG_FILE)
init_config(config_file)
else:
config_file = Path(args["<config_file>"]) if args["<config_file>"] else None
if args["edit"]:
edit_config(config_file)
elif args["allow"]:
allow_config(config_file)
elif args["deny"]:
deny_config(config_file)
mod = use_file(Path(args["--file"]) if args["--file"] else None)
if hasattr(mod, "setup"):
mod.setup() # type: ignore
if args["--name"]:
if args["--name"] not in _config_registry:
print_error(f'Invalid --name: "{args["--name"]}"')
sys.exit(1)
config_dict = _config_registry[args["--name"]]
logger.debug(f'Using named config: "{args["--name"]}"')
logger.debug(config_dict)
else:
config_dict = _cfg
# Allow default shell to be overriden by command-line argument
shell_name = args["--shell"]
if shell_name:
config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell)
logger.debug(f"Starting with config {config_dict}")
start(**config_dict)
if hasattr(mod, "teardown"):
mod.teardown() # type: ignore
sys.exit(0) | [
"def",
"main",
"(",
"argv",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Sequence",
"]",
"=",
"None",
")",
"->",
"typing",
".",
"NoReturn",
":",
"args",
"=",
"parse_args",
"(",
"argv",
")",
"if",
"args",
"[",
"\"--debug\"",
"]",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"\"%(levelname)s %(filename)s: %(message)s\"",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"logger",
".",
"debug",
"(",
"args",
")",
"config_file",
":",
"typing",
".",
"Union",
"[",
"Path",
",",
"None",
"]",
"if",
"args",
"[",
"\"init\"",
"]",
":",
"config_file",
"=",
"Path",
"(",
"args",
"[",
"\"<config_file>\"",
"]",
"or",
"CONFIG_FILE",
")",
"init_config",
"(",
"config_file",
")",
"else",
":",
"config_file",
"=",
"Path",
"(",
"args",
"[",
"\"<config_file>\"",
"]",
")",
"if",
"args",
"[",
"\"<config_file>\"",
"]",
"else",
"None",
"if",
"args",
"[",
"\"edit\"",
"]",
":",
"edit_config",
"(",
"config_file",
")",
"elif",
"args",
"[",
"\"allow\"",
"]",
":",
"allow_config",
"(",
"config_file",
")",
"elif",
"args",
"[",
"\"deny\"",
"]",
":",
"deny_config",
"(",
"config_file",
")",
"mod",
"=",
"use_file",
"(",
"Path",
"(",
"args",
"[",
"\"--file\"",
"]",
")",
"if",
"args",
"[",
"\"--file\"",
"]",
"else",
"None",
")",
"if",
"hasattr",
"(",
"mod",
",",
"\"setup\"",
")",
":",
"mod",
".",
"setup",
"(",
")",
"# type: ignore",
"if",
"args",
"[",
"\"--name\"",
"]",
":",
"if",
"args",
"[",
"\"--name\"",
"]",
"not",
"in",
"_config_registry",
":",
"print_error",
"(",
"f'Invalid --name: \"{args[\"--name\"]}\"'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"config_dict",
"=",
"_config_registry",
"[",
"args",
"[",
"\"--name\"",
"]",
"]",
"logger",
".",
"debug",
"(",
"f'Using named config: \"{args[\"--name\"]}\"'",
")",
"logger",
".",
"debug",
"(",
"config_dict",
")",
"else",
":",
"config_dict",
"=",
"_cfg",
"# Allow default shell to be overriden by command-line argument",
"shell_name",
"=",
"args",
"[",
"\"--shell\"",
"]",
"if",
"shell_name",
":",
"config_dict",
"[",
"\"shell\"",
"]",
"=",
"SHELL_MAP",
".",
"get",
"(",
"shell_name",
".",
"lower",
"(",
")",
",",
"AutoShell",
")",
"logger",
".",
"debug",
"(",
"f\"Starting with config {config_dict}\"",
")",
"start",
"(",
"*",
"*",
"config_dict",
")",
"if",
"hasattr",
"(",
"mod",
",",
"\"teardown\"",
")",
":",
"mod",
".",
"teardown",
"(",
")",
"# type: ignore",
"sys",
".",
"exit",
"(",
"0",
")"
] | Main entry point for the konch CLI. | [
"Main",
"entry",
"point",
"for",
"the",
"konch",
"CLI",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L1139-L1184 | train |
sloria/konch | konch.py | IPythonShell.init_autoreload | def init_autoreload(mode: int) -> None:
"""Load and initialize the IPython autoreload extension."""
from IPython.extensions import autoreload
ip = get_ipython() # type: ignore # noqa: F821
autoreload.load_ipython_extension(ip)
ip.magics_manager.magics["line"]["autoreload"](str(mode)) | python | def init_autoreload(mode: int) -> None:
"""Load and initialize the IPython autoreload extension."""
from IPython.extensions import autoreload
ip = get_ipython() # type: ignore # noqa: F821
autoreload.load_ipython_extension(ip)
ip.magics_manager.magics["line"]["autoreload"](str(mode)) | [
"def",
"init_autoreload",
"(",
"mode",
":",
"int",
")",
"->",
"None",
":",
"from",
"IPython",
".",
"extensions",
"import",
"autoreload",
"ip",
"=",
"get_ipython",
"(",
")",
"# type: ignore # noqa: F821",
"autoreload",
".",
"load_ipython_extension",
"(",
"ip",
")",
"ip",
".",
"magics_manager",
".",
"magics",
"[",
"\"line\"",
"]",
"[",
"\"autoreload\"",
"]",
"(",
"str",
"(",
"mode",
")",
")"
] | Load and initialize the IPython autoreload extension. | [
"Load",
"and",
"initialize",
"the",
"IPython",
"autoreload",
"extension",
"."
] | 15160bd0a0cac967eeeab84794bd6cdd0b5b637d | https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L427-L433 | train |
JamesPHoughton/pysd | pysd/py_backend/vensim/table2py.py | read_tabular | def read_tabular(table_file, sheetname='Sheet1'):
"""
Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used.
"""
if isinstance(table_file, str):
extension = table_file.split('.')[-1]
if extension in ['xls', 'xlsx']:
table = pd.read_excel(table_file, sheetname=sheetname)
elif extension == 'csv':
table = pd.read_csv(table_file, encoding='UTF-8')
elif extension == 'tab':
table = pd.read_csv(table_file, sep='\t', encoding='UTF-8')
else:
raise ValueError('Unknown file or table type')
else:
raise ValueError('Unknown file or table type')
if not set(table.columns).issuperset({'Variable', 'Equation'}):
raise ValueError('Table must contain at least columns "Variable" and "Equation"')
if "Units" not in set(table.columns):
warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2)
table['Units'] = ''
if "Min" not in set(table.columns):
warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2)
table['Min'] = ''
if "Max" not in set(table.columns):
warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2)
table['Max'] = ''
mdl_file = table_file.replace(extension, 'mdl')
with open(mdl_file, 'w', encoding='UTF-8') as outfile:
for element in table.to_dict(orient='records'):
outfile.write(
"%(Variable)s = \n"
"\t %(Equation)s \n"
"\t~\t %(Units)s [%(Min)s, %(Max)s] \n"
"\t~\t %(Comment)s \n\t|\n\n" % element
)
outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.')
return read_vensim(mdl_file) | python | def read_tabular(table_file, sheetname='Sheet1'):
"""
Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used.
"""
if isinstance(table_file, str):
extension = table_file.split('.')[-1]
if extension in ['xls', 'xlsx']:
table = pd.read_excel(table_file, sheetname=sheetname)
elif extension == 'csv':
table = pd.read_csv(table_file, encoding='UTF-8')
elif extension == 'tab':
table = pd.read_csv(table_file, sep='\t', encoding='UTF-8')
else:
raise ValueError('Unknown file or table type')
else:
raise ValueError('Unknown file or table type')
if not set(table.columns).issuperset({'Variable', 'Equation'}):
raise ValueError('Table must contain at least columns "Variable" and "Equation"')
if "Units" not in set(table.columns):
warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2)
table['Units'] = ''
if "Min" not in set(table.columns):
warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2)
table['Min'] = ''
if "Max" not in set(table.columns):
warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2)
table['Max'] = ''
mdl_file = table_file.replace(extension, 'mdl')
with open(mdl_file, 'w', encoding='UTF-8') as outfile:
for element in table.to_dict(orient='records'):
outfile.write(
"%(Variable)s = \n"
"\t %(Equation)s \n"
"\t~\t %(Units)s [%(Min)s, %(Max)s] \n"
"\t~\t %(Comment)s \n\t|\n\n" % element
)
outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.')
return read_vensim(mdl_file) | [
"def",
"read_tabular",
"(",
"table_file",
",",
"sheetname",
"=",
"'Sheet1'",
")",
":",
"if",
"isinstance",
"(",
"table_file",
",",
"str",
")",
":",
"extension",
"=",
"table_file",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"extension",
"in",
"[",
"'xls'",
",",
"'xlsx'",
"]",
":",
"table",
"=",
"pd",
".",
"read_excel",
"(",
"table_file",
",",
"sheetname",
"=",
"sheetname",
")",
"elif",
"extension",
"==",
"'csv'",
":",
"table",
"=",
"pd",
".",
"read_csv",
"(",
"table_file",
",",
"encoding",
"=",
"'UTF-8'",
")",
"elif",
"extension",
"==",
"'tab'",
":",
"table",
"=",
"pd",
".",
"read_csv",
"(",
"table_file",
",",
"sep",
"=",
"'\\t'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown file or table type'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown file or table type'",
")",
"if",
"not",
"set",
"(",
"table",
".",
"columns",
")",
".",
"issuperset",
"(",
"{",
"'Variable'",
",",
"'Equation'",
"}",
")",
":",
"raise",
"ValueError",
"(",
"'Table must contain at least columns \"Variable\" and \"Equation\"'",
")",
"if",
"\"Units\"",
"not",
"in",
"set",
"(",
"table",
".",
"columns",
")",
":",
"warnings",
".",
"warn",
"(",
"'Column for \"Units\" not found'",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
")",
"table",
"[",
"'Units'",
"]",
"=",
"''",
"if",
"\"Min\"",
"not",
"in",
"set",
"(",
"table",
".",
"columns",
")",
":",
"warnings",
".",
"warn",
"(",
"'Column for \"Min\" not found'",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
")",
"table",
"[",
"'Min'",
"]",
"=",
"''",
"if",
"\"Max\"",
"not",
"in",
"set",
"(",
"table",
".",
"columns",
")",
":",
"warnings",
".",
"warn",
"(",
"'Column for \"Max\" not found'",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
")",
"table",
"[",
"'Max'",
"]",
"=",
"''",
"mdl_file",
"=",
"table_file",
".",
"replace",
"(",
"extension",
",",
"'mdl'",
")",
"with",
"open",
"(",
"mdl_file",
",",
"'w'",
",",
"encoding",
"=",
"'UTF-8'",
")",
"as",
"outfile",
":",
"for",
"element",
"in",
"table",
".",
"to_dict",
"(",
"orient",
"=",
"'records'",
")",
":",
"outfile",
".",
"write",
"(",
"\"%(Variable)s = \\n\"",
"\"\\t %(Equation)s \\n\"",
"\"\\t~\\t %(Units)s [%(Min)s, %(Max)s] \\n\"",
"\"\\t~\\t %(Comment)s \\n\\t|\\n\\n\"",
"%",
"element",
")",
"outfile",
".",
"write",
"(",
"u'\\\\\\---/// Sketch information - this is where sketch stuff would go.'",
")",
"return",
"read_vensim",
"(",
"mdl_file",
")"
] | Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used. | [
"Reads",
"a",
"vensim",
"syntax",
"model",
"which",
"has",
"been",
"formatted",
"as",
"a",
"table",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/table2py.py#L6-L80 | train |
JamesPHoughton/pysd | pysd/pysd.py | read_xmile | def read_xmile(xmile_file):
""" Construct a model object from `.xmile` file. """
from . import py_backend
from .py_backend.xmile.xmile2py import translate_xmile
py_model_file = translate_xmile(xmile_file)
model = load(py_model_file)
model.xmile_file = xmile_file
return model | python | def read_xmile(xmile_file):
""" Construct a model object from `.xmile` file. """
from . import py_backend
from .py_backend.xmile.xmile2py import translate_xmile
py_model_file = translate_xmile(xmile_file)
model = load(py_model_file)
model.xmile_file = xmile_file
return model | [
"def",
"read_xmile",
"(",
"xmile_file",
")",
":",
"from",
".",
"import",
"py_backend",
"from",
".",
"py_backend",
".",
"xmile",
".",
"xmile2py",
"import",
"translate_xmile",
"py_model_file",
"=",
"translate_xmile",
"(",
"xmile_file",
")",
"model",
"=",
"load",
"(",
"py_model_file",
")",
"model",
".",
"xmile_file",
"=",
"xmile_file",
"return",
"model"
] | Construct a model object from `.xmile` file. | [
"Construct",
"a",
"model",
"object",
"from",
".",
"xmile",
"file",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/pysd.py#L16-L23 | train |
JamesPHoughton/pysd | pysd/pysd.py | read_vensim | def read_vensim(mdl_file):
"""
Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : <string>
The relative path filename for a raw Vensim `.mdl` file
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
"""
from .py_backend.vensim.vensim2py import translate_vensim
from .py_backend import functions
py_model_file = translate_vensim(mdl_file)
model = functions.Model(py_model_file)
model.mdl_file = mdl_file
return model | python | def read_vensim(mdl_file):
"""
Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : <string>
The relative path filename for a raw Vensim `.mdl` file
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
"""
from .py_backend.vensim.vensim2py import translate_vensim
from .py_backend import functions
py_model_file = translate_vensim(mdl_file)
model = functions.Model(py_model_file)
model.mdl_file = mdl_file
return model | [
"def",
"read_vensim",
"(",
"mdl_file",
")",
":",
"from",
".",
"py_backend",
".",
"vensim",
".",
"vensim2py",
"import",
"translate_vensim",
"from",
".",
"py_backend",
"import",
"functions",
"py_model_file",
"=",
"translate_vensim",
"(",
"mdl_file",
")",
"model",
"=",
"functions",
".",
"Model",
"(",
"py_model_file",
")",
"model",
".",
"mdl_file",
"=",
"mdl_file",
"return",
"model"
] | Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : <string>
The relative path filename for a raw Vensim `.mdl` file
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') | [
"Construct",
"a",
"model",
"from",
"Vensim",
".",
"mdl",
"file",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/pysd.py#L25-L49 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | cache | def cache(horizon):
"""
Put a wrapper around a model function
Decorators with parameters are tricky, you have to
essentially create a decorator that returns a decorator,
which itself then returns the function wrapper.
Parameters
----------
horizon: string
- 'step' means cache just until the next timestep
- 'run' means cache until the next initialization of the model
Returns
-------
new_func: decorated function
function wrapping the original function, handling caching
"""
def cache_step(func):
""" Decorator for caching at a step level"""
@wraps(func)
def cached(*args):
"""Step wise cache function"""
try: # fails if cache is out of date or not instantiated
data = func.__globals__['__data']
assert cached.cache_t == data['time']()
assert hasattr(cached, 'cache_val')
assert cached.cache_val is not None
except (AssertionError, AttributeError):
cached.cache_val = func(*args)
data = func.__globals__['__data']
cached.cache_t = data['time']()
return cached.cache_val
return cached
def cache_run(func):
""" Decorator for caching at the run level"""
@wraps(func)
def cached(*args):
"""Run wise cache function"""
try: # fails if cache is not instantiated
return cached.cache_val
except AttributeError:
cached.cache_val = func(*args)
return cached.cache_val
return cached
if horizon == 'step':
return cache_step
elif horizon == 'run':
return cache_run
else:
raise (AttributeError('Bad horizon for cache decorator')) | python | def cache(horizon):
"""
Put a wrapper around a model function
Decorators with parameters are tricky, you have to
essentially create a decorator that returns a decorator,
which itself then returns the function wrapper.
Parameters
----------
horizon: string
- 'step' means cache just until the next timestep
- 'run' means cache until the next initialization of the model
Returns
-------
new_func: decorated function
function wrapping the original function, handling caching
"""
def cache_step(func):
""" Decorator for caching at a step level"""
@wraps(func)
def cached(*args):
"""Step wise cache function"""
try: # fails if cache is out of date or not instantiated
data = func.__globals__['__data']
assert cached.cache_t == data['time']()
assert hasattr(cached, 'cache_val')
assert cached.cache_val is not None
except (AssertionError, AttributeError):
cached.cache_val = func(*args)
data = func.__globals__['__data']
cached.cache_t = data['time']()
return cached.cache_val
return cached
def cache_run(func):
""" Decorator for caching at the run level"""
@wraps(func)
def cached(*args):
"""Run wise cache function"""
try: # fails if cache is not instantiated
return cached.cache_val
except AttributeError:
cached.cache_val = func(*args)
return cached.cache_val
return cached
if horizon == 'step':
return cache_step
elif horizon == 'run':
return cache_run
else:
raise (AttributeError('Bad horizon for cache decorator')) | [
"def",
"cache",
"(",
"horizon",
")",
":",
"def",
"cache_step",
"(",
"func",
")",
":",
"\"\"\" Decorator for caching at a step level\"\"\"",
"@",
"wraps",
"(",
"func",
")",
"def",
"cached",
"(",
"*",
"args",
")",
":",
"\"\"\"Step wise cache function\"\"\"",
"try",
":",
"# fails if cache is out of date or not instantiated",
"data",
"=",
"func",
".",
"__globals__",
"[",
"'__data'",
"]",
"assert",
"cached",
".",
"cache_t",
"==",
"data",
"[",
"'time'",
"]",
"(",
")",
"assert",
"hasattr",
"(",
"cached",
",",
"'cache_val'",
")",
"assert",
"cached",
".",
"cache_val",
"is",
"not",
"None",
"except",
"(",
"AssertionError",
",",
"AttributeError",
")",
":",
"cached",
".",
"cache_val",
"=",
"func",
"(",
"*",
"args",
")",
"data",
"=",
"func",
".",
"__globals__",
"[",
"'__data'",
"]",
"cached",
".",
"cache_t",
"=",
"data",
"[",
"'time'",
"]",
"(",
")",
"return",
"cached",
".",
"cache_val",
"return",
"cached",
"def",
"cache_run",
"(",
"func",
")",
":",
"\"\"\" Decorator for caching at the run level\"\"\"",
"@",
"wraps",
"(",
"func",
")",
"def",
"cached",
"(",
"*",
"args",
")",
":",
"\"\"\"Run wise cache function\"\"\"",
"try",
":",
"# fails if cache is not instantiated",
"return",
"cached",
".",
"cache_val",
"except",
"AttributeError",
":",
"cached",
".",
"cache_val",
"=",
"func",
"(",
"*",
"args",
")",
"return",
"cached",
".",
"cache_val",
"return",
"cached",
"if",
"horizon",
"==",
"'step'",
":",
"return",
"cache_step",
"elif",
"horizon",
"==",
"'run'",
":",
"return",
"cache_run",
"else",
":",
"raise",
"(",
"AttributeError",
"(",
"'Bad horizon for cache decorator'",
")",
")"
] | Put a wrapper around a model function
Decorators with parameters are tricky, you have to
essentially create a decorator that returns a decorator,
which itself then returns the function wrapper.
Parameters
----------
horizon: string
- 'step' means cache just until the next timestep
- 'run' means cache until the next initialization of the model
Returns
-------
new_func: decorated function
function wrapping the original function, handling caching | [
"Put",
"a",
"wrapper",
"around",
"a",
"model",
"function"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L44-L105 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | ramp | def ramp(time, slope, start, finish=0):
"""
Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
--------
"""
t = time()
if t < start:
return 0
else:
if finish <= 0:
return slope * (t - start)
elif t > finish:
return slope * (finish - start)
else:
return slope * (t - start) | python | def ramp(time, slope, start, finish=0):
"""
Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
--------
"""
t = time()
if t < start:
return 0
else:
if finish <= 0:
return slope * (t - start)
elif t > finish:
return slope * (finish - start)
else:
return slope * (t - start) | [
"def",
"ramp",
"(",
"time",
",",
"slope",
",",
"start",
",",
"finish",
"=",
"0",
")",
":",
"t",
"=",
"time",
"(",
")",
"if",
"t",
"<",
"start",
":",
"return",
"0",
"else",
":",
"if",
"finish",
"<=",
"0",
":",
"return",
"slope",
"*",
"(",
"t",
"-",
"start",
")",
"elif",
"t",
">",
"finish",
":",
"return",
"slope",
"*",
"(",
"finish",
"-",
"start",
")",
"else",
":",
"return",
"slope",
"*",
"(",
"t",
"-",
"start",
")"
] | Implements vensim's and xmile's RAMP function
Parameters
----------
time: function
The current time of modelling
slope: float
The slope of the ramp starting at zero at time start
start: float
Time at which the ramp begins
finish: float
Optional. Time at which the ramp ends
Returns
-------
response: float
If prior to ramp start, returns zero
If after ramp ends, returns top of ramp
Examples
-------- | [
"Implements",
"vensim",
"s",
"and",
"xmile",
"s",
"RAMP",
"function"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L803-L837 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | pulse | def pulse(time, start, duration):
""" Implements vensim's PULSE function
In range [-inf, start) returns 0
In range [start, start + duration) returns 1
In range [start + duration, +inf] returns 0
"""
t = time()
return 1 if start <= t < start + duration else 0 | python | def pulse(time, start, duration):
""" Implements vensim's PULSE function
In range [-inf, start) returns 0
In range [start, start + duration) returns 1
In range [start + duration, +inf] returns 0
"""
t = time()
return 1 if start <= t < start + duration else 0 | [
"def",
"pulse",
"(",
"time",
",",
"start",
",",
"duration",
")",
":",
"t",
"=",
"time",
"(",
")",
"return",
"1",
"if",
"start",
"<=",
"t",
"<",
"start",
"+",
"duration",
"else",
"0"
] | Implements vensim's PULSE function
In range [-inf, start) returns 0
In range [start, start + duration) returns 1
In range [start + duration, +inf] returns 0 | [
"Implements",
"vensim",
"s",
"PULSE",
"function"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L859-L867 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | pulse_train | def pulse_train(time, start, duration, repeat_time, end):
""" Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
"""
t = time()
if start <= t < end:
return 1 if (t - start) % repeat_time < duration else 0
else:
return 0 | python | def pulse_train(time, start, duration, repeat_time, end):
""" Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
"""
t = time()
if start <= t < end:
return 1 if (t - start) % repeat_time < duration else 0
else:
return 0 | [
"def",
"pulse_train",
"(",
"time",
",",
"start",
",",
"duration",
",",
"repeat_time",
",",
"end",
")",
":",
"t",
"=",
"time",
"(",
")",
"if",
"start",
"<=",
"t",
"<",
"end",
":",
"return",
"1",
"if",
"(",
"t",
"-",
"start",
")",
"%",
"repeat_time",
"<",
"duration",
"else",
"0",
"else",
":",
"return",
"0"
] | Implements vensim's PULSE TRAIN function
In range [-inf, start) returns 0
In range [start + n * repeat_time, start + n * repeat_time + duration) return 1
In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 | [
"Implements",
"vensim",
"s",
"PULSE",
"TRAIN",
"function"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L870-L881 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | lookup_extrapolation | def lookup_extrapolation(x, xs, ys):
"""
Intermediate values are calculated with linear interpolation between the intermediate points.
Out-of-range values are calculated with linear extrapolation from the last two values at either end.
"""
length = len(xs)
if x < xs[0]:
dx = xs[1] - xs[0]
dy = ys[1] - ys[0]
k = dy / dx
return ys[0] + (x - xs[0]) * k
if x > xs[length - 1]:
dx = xs[length - 1] - xs[length - 2]
dy = ys[length - 1] - ys[length - 2]
k = dy / dx
return ys[length - 1] + (x - xs[length - 1]) * k
return np.interp(x, xs, ys) | python | def lookup_extrapolation(x, xs, ys):
"""
Intermediate values are calculated with linear interpolation between the intermediate points.
Out-of-range values are calculated with linear extrapolation from the last two values at either end.
"""
length = len(xs)
if x < xs[0]:
dx = xs[1] - xs[0]
dy = ys[1] - ys[0]
k = dy / dx
return ys[0] + (x - xs[0]) * k
if x > xs[length - 1]:
dx = xs[length - 1] - xs[length - 2]
dy = ys[length - 1] - ys[length - 2]
k = dy / dx
return ys[length - 1] + (x - xs[length - 1]) * k
return np.interp(x, xs, ys) | [
"def",
"lookup_extrapolation",
"(",
"x",
",",
"xs",
",",
"ys",
")",
":",
"length",
"=",
"len",
"(",
"xs",
")",
"if",
"x",
"<",
"xs",
"[",
"0",
"]",
":",
"dx",
"=",
"xs",
"[",
"1",
"]",
"-",
"xs",
"[",
"0",
"]",
"dy",
"=",
"ys",
"[",
"1",
"]",
"-",
"ys",
"[",
"0",
"]",
"k",
"=",
"dy",
"/",
"dx",
"return",
"ys",
"[",
"0",
"]",
"+",
"(",
"x",
"-",
"xs",
"[",
"0",
"]",
")",
"*",
"k",
"if",
"x",
">",
"xs",
"[",
"length",
"-",
"1",
"]",
":",
"dx",
"=",
"xs",
"[",
"length",
"-",
"1",
"]",
"-",
"xs",
"[",
"length",
"-",
"2",
"]",
"dy",
"=",
"ys",
"[",
"length",
"-",
"1",
"]",
"-",
"ys",
"[",
"length",
"-",
"2",
"]",
"k",
"=",
"dy",
"/",
"dx",
"return",
"ys",
"[",
"length",
"-",
"1",
"]",
"+",
"(",
"x",
"-",
"xs",
"[",
"length",
"-",
"1",
"]",
")",
"*",
"k",
"return",
"np",
".",
"interp",
"(",
"x",
",",
"xs",
",",
"ys",
")"
] | Intermediate values are calculated with linear interpolation between the intermediate points.
Out-of-range values are calculated with linear extrapolation from the last two values at either end. | [
"Intermediate",
"values",
"are",
"calculated",
"with",
"linear",
"interpolation",
"between",
"the",
"intermediate",
"points",
".",
"Out",
"-",
"of",
"-",
"range",
"values",
"are",
"calculated",
"with",
"linear",
"extrapolation",
"from",
"the",
"last",
"two",
"values",
"at",
"either",
"end",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L917-L933 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | xidz | def xidz(numerator, denominator, value_if_denom_is_zero):
"""
Implements Vensim's XIDZ function.
This function executes a division, robust to denominator being zero.
In the case of zero denominator, the final argument is returned.
Parameters
----------
numerator: float
denominator: float
Components of the division operation
value_if_denom_is_zero: float
The value to return if the denominator is zero
Returns
-------
numerator / denominator if denominator > 1e-6
otherwise, returns value_if_denom_is_zero
"""
small = 1e-6 # What is considered zero according to Vensim Help
if abs(denominator) < small:
return value_if_denom_is_zero
else:
return numerator * 1.0 / denominator | python | def xidz(numerator, denominator, value_if_denom_is_zero):
"""
Implements Vensim's XIDZ function.
This function executes a division, robust to denominator being zero.
In the case of zero denominator, the final argument is returned.
Parameters
----------
numerator: float
denominator: float
Components of the division operation
value_if_denom_is_zero: float
The value to return if the denominator is zero
Returns
-------
numerator / denominator if denominator > 1e-6
otherwise, returns value_if_denom_is_zero
"""
small = 1e-6 # What is considered zero according to Vensim Help
if abs(denominator) < small:
return value_if_denom_is_zero
else:
return numerator * 1.0 / denominator | [
"def",
"xidz",
"(",
"numerator",
",",
"denominator",
",",
"value_if_denom_is_zero",
")",
":",
"small",
"=",
"1e-6",
"# What is considered zero according to Vensim Help",
"if",
"abs",
"(",
"denominator",
")",
"<",
"small",
":",
"return",
"value_if_denom_is_zero",
"else",
":",
"return",
"numerator",
"*",
"1.0",
"/",
"denominator"
] | Implements Vensim's XIDZ function.
This function executes a division, robust to denominator being zero.
In the case of zero denominator, the final argument is returned.
Parameters
----------
numerator: float
denominator: float
Components of the division operation
value_if_denom_is_zero: float
The value to return if the denominator is zero
Returns
-------
numerator / denominator if denominator > 1e-6
otherwise, returns value_if_denom_is_zero | [
"Implements",
"Vensim",
"s",
"XIDZ",
"function",
".",
"This",
"function",
"executes",
"a",
"division",
"robust",
"to",
"denominator",
"being",
"zero",
".",
"In",
"the",
"case",
"of",
"zero",
"denominator",
"the",
"final",
"argument",
"is",
"returned",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L950-L973 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro.initialize | def initialize(self, initialization_order=None):
"""
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again.
"""
# Initialize time
if self.time is None:
if self.time_initialization is None:
self.time = Time()
else:
self.time = self.time_initialization()
# if self.time is None:
# self.time = time
# self.components.time = self.time
# self.components.functions.time = self.time # rewrite functions so we don't need this
self.components._init_outer_references({
'scope': self,
'time': self.time
})
remaining = set(self._stateful_elements)
while remaining:
progress = set()
for element in remaining:
try:
element.initialize()
progress.add(element)
except (KeyError, TypeError, AttributeError):
pass
if progress:
remaining.difference_update(progress)
else:
raise KeyError('Unresolvable Reference: Probable circular initialization' +
'\n'.join([repr(e) for e in remaining])) | python | def initialize(self, initialization_order=None):
"""
This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again.
"""
# Initialize time
if self.time is None:
if self.time_initialization is None:
self.time = Time()
else:
self.time = self.time_initialization()
# if self.time is None:
# self.time = time
# self.components.time = self.time
# self.components.functions.time = self.time # rewrite functions so we don't need this
self.components._init_outer_references({
'scope': self,
'time': self.time
})
remaining = set(self._stateful_elements)
while remaining:
progress = set()
for element in remaining:
try:
element.initialize()
progress.add(element)
except (KeyError, TypeError, AttributeError):
pass
if progress:
remaining.difference_update(progress)
else:
raise KeyError('Unresolvable Reference: Probable circular initialization' +
'\n'.join([repr(e) for e in remaining])) | [
"def",
"initialize",
"(",
"self",
",",
"initialization_order",
"=",
"None",
")",
":",
"# Initialize time",
"if",
"self",
".",
"time",
"is",
"None",
":",
"if",
"self",
".",
"time_initialization",
"is",
"None",
":",
"self",
".",
"time",
"=",
"Time",
"(",
")",
"else",
":",
"self",
".",
"time",
"=",
"self",
".",
"time_initialization",
"(",
")",
"# if self.time is None:",
"# self.time = time",
"# self.components.time = self.time",
"# self.components.functions.time = self.time # rewrite functions so we don't need this",
"self",
".",
"components",
".",
"_init_outer_references",
"(",
"{",
"'scope'",
":",
"self",
",",
"'time'",
":",
"self",
".",
"time",
"}",
")",
"remaining",
"=",
"set",
"(",
"self",
".",
"_stateful_elements",
")",
"while",
"remaining",
":",
"progress",
"=",
"set",
"(",
")",
"for",
"element",
"in",
"remaining",
":",
"try",
":",
"element",
".",
"initialize",
"(",
")",
"progress",
".",
"add",
"(",
"element",
")",
"except",
"(",
"KeyError",
",",
"TypeError",
",",
"AttributeError",
")",
":",
"pass",
"if",
"progress",
":",
"remaining",
".",
"difference_update",
"(",
"progress",
")",
"else",
":",
"raise",
"KeyError",
"(",
"'Unresolvable Reference: Probable circular initialization'",
"+",
"'\\n'",
".",
"join",
"(",
"[",
"repr",
"(",
"e",
")",
"for",
"e",
"in",
"remaining",
"]",
")",
")"
] | This function tries to initialize the stateful objects.
In the case where an initialization function for `Stock A` depends on
the value of `Stock B`, if we try to initialize `Stock A` before `Stock B`
then we will get an error, as the value will not yet exist.
In this case, just skip initializing `Stock A` for now, and
go on to the other state initializations. Then come back to it and try again. | [
"This",
"function",
"tries",
"to",
"initialize",
"the",
"stateful",
"objects",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L320-L363 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro.set_components | def set_components(self, params):
""" Set the value of exogenous model elements.
Element values can be passed as keyword=value pairs in the function call.
Values can be numeric type or pandas Series.
Series will be interpolated by integrator.
Examples
--------
>>> model.set_components({'birth_rate': 10})
>>> model.set_components({'Birth Rate': 10})
>>> br = pandas.Series(index=range(30), values=np.sin(range(30))
>>> model.set_components({'birth_rate': br})
"""
# It might make sense to allow the params argument to take a pandas series, where
# the indices of the series are variable names. This would make it easier to
# do a Pandas apply on a DataFrame of parameter values. However, this may conflict
# with a pandas series being passed in as a dictionary element.
for key, value in params.items():
if isinstance(value, pd.Series):
new_function = self._timeseries_component(value)
elif callable(value):
new_function = value
else:
new_function = self._constant_component(value)
func_name = utils.get_value_by_insensitive_key_or_value(key, self.components._namespace)
if func_name is None:
raise NameError('%s is not recognized as a model component' % key)
if '_integ_' + func_name in dir(self.components): # this won't handle other statefuls...
warnings.warn("Replacing the equation of stock {} with params".format(key),
stacklevel=2)
setattr(self.components, func_name, new_function) | python | def set_components(self, params):
""" Set the value of exogenous model elements.
Element values can be passed as keyword=value pairs in the function call.
Values can be numeric type or pandas Series.
Series will be interpolated by integrator.
Examples
--------
>>> model.set_components({'birth_rate': 10})
>>> model.set_components({'Birth Rate': 10})
>>> br = pandas.Series(index=range(30), values=np.sin(range(30))
>>> model.set_components({'birth_rate': br})
"""
# It might make sense to allow the params argument to take a pandas series, where
# the indices of the series are variable names. This would make it easier to
# do a Pandas apply on a DataFrame of parameter values. However, this may conflict
# with a pandas series being passed in as a dictionary element.
for key, value in params.items():
if isinstance(value, pd.Series):
new_function = self._timeseries_component(value)
elif callable(value):
new_function = value
else:
new_function = self._constant_component(value)
func_name = utils.get_value_by_insensitive_key_or_value(key, self.components._namespace)
if func_name is None:
raise NameError('%s is not recognized as a model component' % key)
if '_integ_' + func_name in dir(self.components): # this won't handle other statefuls...
warnings.warn("Replacing the equation of stock {} with params".format(key),
stacklevel=2)
setattr(self.components, func_name, new_function) | [
"def",
"set_components",
"(",
"self",
",",
"params",
")",
":",
"# It might make sense to allow the params argument to take a pandas series, where",
"# the indices of the series are variable names. This would make it easier to",
"# do a Pandas apply on a DataFrame of parameter values. However, this may conflict",
"# with a pandas series being passed in as a dictionary element.",
"for",
"key",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"pd",
".",
"Series",
")",
":",
"new_function",
"=",
"self",
".",
"_timeseries_component",
"(",
"value",
")",
"elif",
"callable",
"(",
"value",
")",
":",
"new_function",
"=",
"value",
"else",
":",
"new_function",
"=",
"self",
".",
"_constant_component",
"(",
"value",
")",
"func_name",
"=",
"utils",
".",
"get_value_by_insensitive_key_or_value",
"(",
"key",
",",
"self",
".",
"components",
".",
"_namespace",
")",
"if",
"func_name",
"is",
"None",
":",
"raise",
"NameError",
"(",
"'%s is not recognized as a model component'",
"%",
"key",
")",
"if",
"'_integ_'",
"+",
"func_name",
"in",
"dir",
"(",
"self",
".",
"components",
")",
":",
"# this won't handle other statefuls...",
"warnings",
".",
"warn",
"(",
"\"Replacing the equation of stock {} with params\"",
".",
"format",
"(",
"key",
")",
",",
"stacklevel",
"=",
"2",
")",
"setattr",
"(",
"self",
".",
"components",
",",
"func_name",
",",
"new_function",
")"
] | Set the value of exogenous model elements.
Element values can be passed as keyword=value pairs in the function call.
Values can be numeric type or pandas Series.
Series will be interpolated by integrator.
Examples
--------
>>> model.set_components({'birth_rate': 10})
>>> model.set_components({'Birth Rate': 10})
>>> br = pandas.Series(index=range(30), values=np.sin(range(30))
>>> model.set_components({'birth_rate': br}) | [
"Set",
"the",
"value",
"of",
"exogenous",
"model",
"elements",
".",
"Element",
"values",
"can",
"be",
"passed",
"as",
"keyword",
"=",
"value",
"pairs",
"in",
"the",
"function",
"call",
".",
"Values",
"can",
"be",
"numeric",
"type",
"or",
"pandas",
"Series",
".",
"Series",
"will",
"be",
"interpolated",
"by",
"integrator",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L376-L415 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro._timeseries_component | def _timeseries_component(self, series):
""" Internal function for creating a timeseries model element """
# this is only called if the set_component function recognizes a pandas series
# Todo: raise a warning if extrapolating from the end of the series.
return lambda: np.interp(self.time(), series.index, series.values) | python | def _timeseries_component(self, series):
""" Internal function for creating a timeseries model element """
# this is only called if the set_component function recognizes a pandas series
# Todo: raise a warning if extrapolating from the end of the series.
return lambda: np.interp(self.time(), series.index, series.values) | [
"def",
"_timeseries_component",
"(",
"self",
",",
"series",
")",
":",
"# this is only called if the set_component function recognizes a pandas series",
"# Todo: raise a warning if extrapolating from the end of the series.",
"return",
"lambda",
":",
"np",
".",
"interp",
"(",
"self",
".",
"time",
"(",
")",
",",
"series",
".",
"index",
",",
"series",
".",
"values",
")"
] | Internal function for creating a timeseries model element | [
"Internal",
"function",
"for",
"creating",
"a",
"timeseries",
"model",
"element"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L417-L421 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro.set_state | def set_state(self, t, state):
""" Set the system state.
Parameters
----------
t : numeric
The system time
state : dict
A (possibly partial) dictionary of the system state.
The keys to this dictionary may be either pysafe names or original model file names
"""
self.time.update(t)
for key, value in state.items():
# TODO Implement map with reference between component and stateful element?
component_name = utils.get_value_by_insensitive_key_or_value(key, self.components._namespace)
if component_name is not None:
stateful_name = '_integ_%s' % component_name
else:
component_name = key
stateful_name = key
# Try to update stateful component
if hasattr(self.components, stateful_name):
try:
element = getattr(self.components, stateful_name)
element.update(value)
except AttributeError:
print("'%s' has no state elements, assignment failed")
raise
else:
# Try to override component
try:
setattr(self.components, component_name, self._constant_component(value))
except AttributeError:
print("'%s' has no component, assignment failed")
raise | python | def set_state(self, t, state):
""" Set the system state.
Parameters
----------
t : numeric
The system time
state : dict
A (possibly partial) dictionary of the system state.
The keys to this dictionary may be either pysafe names or original model file names
"""
self.time.update(t)
for key, value in state.items():
# TODO Implement map with reference between component and stateful element?
component_name = utils.get_value_by_insensitive_key_or_value(key, self.components._namespace)
if component_name is not None:
stateful_name = '_integ_%s' % component_name
else:
component_name = key
stateful_name = key
# Try to update stateful component
if hasattr(self.components, stateful_name):
try:
element = getattr(self.components, stateful_name)
element.update(value)
except AttributeError:
print("'%s' has no state elements, assignment failed")
raise
else:
# Try to override component
try:
setattr(self.components, component_name, self._constant_component(value))
except AttributeError:
print("'%s' has no component, assignment failed")
raise | [
"def",
"set_state",
"(",
"self",
",",
"t",
",",
"state",
")",
":",
"self",
".",
"time",
".",
"update",
"(",
"t",
")",
"for",
"key",
",",
"value",
"in",
"state",
".",
"items",
"(",
")",
":",
"# TODO Implement map with reference between component and stateful element?",
"component_name",
"=",
"utils",
".",
"get_value_by_insensitive_key_or_value",
"(",
"key",
",",
"self",
".",
"components",
".",
"_namespace",
")",
"if",
"component_name",
"is",
"not",
"None",
":",
"stateful_name",
"=",
"'_integ_%s'",
"%",
"component_name",
"else",
":",
"component_name",
"=",
"key",
"stateful_name",
"=",
"key",
"# Try to update stateful component",
"if",
"hasattr",
"(",
"self",
".",
"components",
",",
"stateful_name",
")",
":",
"try",
":",
"element",
"=",
"getattr",
"(",
"self",
".",
"components",
",",
"stateful_name",
")",
"element",
".",
"update",
"(",
"value",
")",
"except",
"AttributeError",
":",
"print",
"(",
"\"'%s' has no state elements, assignment failed\"",
")",
"raise",
"else",
":",
"# Try to override component",
"try",
":",
"setattr",
"(",
"self",
".",
"components",
",",
"component_name",
",",
"self",
".",
"_constant_component",
"(",
"value",
")",
")",
"except",
"AttributeError",
":",
"print",
"(",
"\"'%s' has no component, assignment failed\"",
")",
"raise"
] | Set the system state.
Parameters
----------
t : numeric
The system time
state : dict
A (possibly partial) dictionary of the system state.
The keys to this dictionary may be either pysafe names or original model file names | [
"Set",
"the",
"system",
"state",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L427-L464 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro.clear_caches | def clear_caches(self):
""" Clears the Caches for all model elements """
for element_name in dir(self.components):
element = getattr(self.components, element_name)
if hasattr(element, 'cache_val'):
delattr(element, 'cache_val') | python | def clear_caches(self):
""" Clears the Caches for all model elements """
for element_name in dir(self.components):
element = getattr(self.components, element_name)
if hasattr(element, 'cache_val'):
delattr(element, 'cache_val') | [
"def",
"clear_caches",
"(",
"self",
")",
":",
"for",
"element_name",
"in",
"dir",
"(",
"self",
".",
"components",
")",
":",
"element",
"=",
"getattr",
"(",
"self",
".",
"components",
",",
"element_name",
")",
"if",
"hasattr",
"(",
"element",
",",
"'cache_val'",
")",
":",
"delattr",
"(",
"element",
",",
"'cache_val'",
")"
] | Clears the Caches for all model elements | [
"Clears",
"the",
"Caches",
"for",
"all",
"model",
"elements"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L466-L471 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Macro.doc | def doc(self):
"""
Formats a table of documentation strings to help users remember variable names, and
understand how they are translated into python safe names.
Returns
-------
docs_df: pandas dataframe
Dataframe with columns for the model components:
- Real names
- Python safe identifiers (as used in model.components)
- Units string
- Documentation strings from the original model file
"""
collector = []
for name, varname in self.components._namespace.items():
try:
docstring = getattr(self.components, varname).__doc__
lines = docstring.split('\n')
collector.append({'Real Name': name,
'Py Name': varname,
'Eqn': lines[2].replace("Original Eqn:", "").strip(),
'Unit': lines[3].replace("Units:", "").strip(),
'Lims': lines[4].replace("Limits:", "").strip(),
'Type': lines[5].replace("Type:", "").strip(),
'Comment': '\n'.join(lines[7:]).strip()})
except:
pass
docs_df = _pd.DataFrame(collector)
docs_df.fillna('None', inplace=True)
order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Eqn', 'Comment']
return docs_df[order].sort_values(by='Real Name').reset_index(drop=True) | python | def doc(self):
"""
Formats a table of documentation strings to help users remember variable names, and
understand how they are translated into python safe names.
Returns
-------
docs_df: pandas dataframe
Dataframe with columns for the model components:
- Real names
- Python safe identifiers (as used in model.components)
- Units string
- Documentation strings from the original model file
"""
collector = []
for name, varname in self.components._namespace.items():
try:
docstring = getattr(self.components, varname).__doc__
lines = docstring.split('\n')
collector.append({'Real Name': name,
'Py Name': varname,
'Eqn': lines[2].replace("Original Eqn:", "").strip(),
'Unit': lines[3].replace("Units:", "").strip(),
'Lims': lines[4].replace("Limits:", "").strip(),
'Type': lines[5].replace("Type:", "").strip(),
'Comment': '\n'.join(lines[7:]).strip()})
except:
pass
docs_df = _pd.DataFrame(collector)
docs_df.fillna('None', inplace=True)
order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Eqn', 'Comment']
return docs_df[order].sort_values(by='Real Name').reset_index(drop=True) | [
"def",
"doc",
"(",
"self",
")",
":",
"collector",
"=",
"[",
"]",
"for",
"name",
",",
"varname",
"in",
"self",
".",
"components",
".",
"_namespace",
".",
"items",
"(",
")",
":",
"try",
":",
"docstring",
"=",
"getattr",
"(",
"self",
".",
"components",
",",
"varname",
")",
".",
"__doc__",
"lines",
"=",
"docstring",
".",
"split",
"(",
"'\\n'",
")",
"collector",
".",
"append",
"(",
"{",
"'Real Name'",
":",
"name",
",",
"'Py Name'",
":",
"varname",
",",
"'Eqn'",
":",
"lines",
"[",
"2",
"]",
".",
"replace",
"(",
"\"Original Eqn:\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
",",
"'Unit'",
":",
"lines",
"[",
"3",
"]",
".",
"replace",
"(",
"\"Units:\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
",",
"'Lims'",
":",
"lines",
"[",
"4",
"]",
".",
"replace",
"(",
"\"Limits:\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
",",
"'Type'",
":",
"lines",
"[",
"5",
"]",
".",
"replace",
"(",
"\"Type:\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
",",
"'Comment'",
":",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"7",
":",
"]",
")",
".",
"strip",
"(",
")",
"}",
")",
"except",
":",
"pass",
"docs_df",
"=",
"_pd",
".",
"DataFrame",
"(",
"collector",
")",
"docs_df",
".",
"fillna",
"(",
"'None'",
",",
"inplace",
"=",
"True",
")",
"order",
"=",
"[",
"'Real Name'",
",",
"'Py Name'",
",",
"'Unit'",
",",
"'Lims'",
",",
"'Type'",
",",
"'Eqn'",
",",
"'Comment'",
"]",
"return",
"docs_df",
"[",
"order",
"]",
".",
"sort_values",
"(",
"by",
"=",
"'Real Name'",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")"
] | Formats a table of documentation strings to help users remember variable names, and
understand how they are translated into python safe names.
Returns
-------
docs_df: pandas dataframe
Dataframe with columns for the model components:
- Real names
- Python safe identifiers (as used in model.components)
- Units string
- Documentation strings from the original model file | [
"Formats",
"a",
"table",
"of",
"documentation",
"strings",
"to",
"help",
"users",
"remember",
"variable",
"names",
"and",
"understand",
"how",
"they",
"are",
"translated",
"into",
"python",
"safe",
"names",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L473-L506 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model.initialize | def initialize(self):
""" Initializes the simulation model """
self.time.update(self.components.initial_time())
self.time.stage = 'Initialization'
super(Model, self).initialize() | python | def initialize(self):
""" Initializes the simulation model """
self.time.update(self.components.initial_time())
self.time.stage = 'Initialization'
super(Model, self).initialize() | [
"def",
"initialize",
"(",
"self",
")",
":",
"self",
".",
"time",
".",
"update",
"(",
"self",
".",
"components",
".",
"initial_time",
"(",
")",
")",
"self",
".",
"time",
".",
"stage",
"=",
"'Initialization'",
"super",
"(",
"Model",
",",
"self",
")",
".",
"initialize",
"(",
")"
] | Initializes the simulation model | [
"Initializes",
"the",
"simulation",
"model"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L547-L551 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model._format_return_timestamps | def _format_return_timestamps(self, return_timestamps=None):
"""
Format the passed in return timestamps value as a numpy array.
If no value is passed, build up array of timestamps based upon
model start and end times, and the 'saveper' value.
"""
if return_timestamps is None:
# Build based upon model file Start, Stop times and Saveper
# Vensim's standard is to expect that the data set includes the `final time`,
# so we have to add an extra period to make sure we get that value in what
# numpy's `arange` gives us.
return_timestamps_array = np.arange(
self.components.initial_time(),
self.components.final_time() + self.components.saveper(),
self.components.saveper(), dtype=np.float64
)
elif inspect.isclass(range) and isinstance(return_timestamps, range):
return_timestamps_array = np.array(return_timestamps, ndmin=1)
elif isinstance(return_timestamps, (list, int, float, np.ndarray)):
return_timestamps_array = np.array(return_timestamps, ndmin=1)
elif isinstance(return_timestamps, _pd.Series):
return_timestamps_array = return_timestamps.as_matrix()
else:
raise TypeError('`return_timestamps` expects a list, array, pandas Series, '
'or numeric value')
return return_timestamps_array | python | def _format_return_timestamps(self, return_timestamps=None):
"""
Format the passed in return timestamps value as a numpy array.
If no value is passed, build up array of timestamps based upon
model start and end times, and the 'saveper' value.
"""
if return_timestamps is None:
# Build based upon model file Start, Stop times and Saveper
# Vensim's standard is to expect that the data set includes the `final time`,
# so we have to add an extra period to make sure we get that value in what
# numpy's `arange` gives us.
return_timestamps_array = np.arange(
self.components.initial_time(),
self.components.final_time() + self.components.saveper(),
self.components.saveper(), dtype=np.float64
)
elif inspect.isclass(range) and isinstance(return_timestamps, range):
return_timestamps_array = np.array(return_timestamps, ndmin=1)
elif isinstance(return_timestamps, (list, int, float, np.ndarray)):
return_timestamps_array = np.array(return_timestamps, ndmin=1)
elif isinstance(return_timestamps, _pd.Series):
return_timestamps_array = return_timestamps.as_matrix()
else:
raise TypeError('`return_timestamps` expects a list, array, pandas Series, '
'or numeric value')
return return_timestamps_array | [
"def",
"_format_return_timestamps",
"(",
"self",
",",
"return_timestamps",
"=",
"None",
")",
":",
"if",
"return_timestamps",
"is",
"None",
":",
"# Build based upon model file Start, Stop times and Saveper",
"# Vensim's standard is to expect that the data set includes the `final time`,",
"# so we have to add an extra period to make sure we get that value in what",
"# numpy's `arange` gives us.",
"return_timestamps_array",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"components",
".",
"initial_time",
"(",
")",
",",
"self",
".",
"components",
".",
"final_time",
"(",
")",
"+",
"self",
".",
"components",
".",
"saveper",
"(",
")",
",",
"self",
".",
"components",
".",
"saveper",
"(",
")",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"elif",
"inspect",
".",
"isclass",
"(",
"range",
")",
"and",
"isinstance",
"(",
"return_timestamps",
",",
"range",
")",
":",
"return_timestamps_array",
"=",
"np",
".",
"array",
"(",
"return_timestamps",
",",
"ndmin",
"=",
"1",
")",
"elif",
"isinstance",
"(",
"return_timestamps",
",",
"(",
"list",
",",
"int",
",",
"float",
",",
"np",
".",
"ndarray",
")",
")",
":",
"return_timestamps_array",
"=",
"np",
".",
"array",
"(",
"return_timestamps",
",",
"ndmin",
"=",
"1",
")",
"elif",
"isinstance",
"(",
"return_timestamps",
",",
"_pd",
".",
"Series",
")",
":",
"return_timestamps_array",
"=",
"return_timestamps",
".",
"as_matrix",
"(",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'`return_timestamps` expects a list, array, pandas Series, '",
"'or numeric value'",
")",
"return",
"return_timestamps_array"
] | Format the passed in return timestamps value as a numpy array.
If no value is passed, build up array of timestamps based upon
model start and end times, and the 'saveper' value. | [
"Format",
"the",
"passed",
"in",
"return",
"timestamps",
"value",
"as",
"a",
"numpy",
"array",
".",
"If",
"no",
"value",
"is",
"passed",
"build",
"up",
"array",
"of",
"timestamps",
"based",
"upon",
"model",
"start",
"and",
"end",
"times",
"and",
"the",
"saveper",
"value",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L588-L613 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model.run | def run(self, params=None, return_columns=None, return_timestamps=None,
initial_condition='original', reload=False):
""" Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions
"""
if reload:
self.reload()
if params:
self.set_components(params)
self.set_initial_condition(initial_condition)
return_timestamps = self._format_return_timestamps(return_timestamps)
t_series = self._build_euler_timeseries(return_timestamps)
if return_columns is None:
return_columns = self._default_return_columns()
self.time.stage = 'Run'
self.clear_caches()
capture_elements, return_addresses = utils.get_return_elements(
return_columns, self.components._namespace, self.components._subscript_dict)
res = self._integrate(t_series, capture_elements, return_timestamps)
return_df = utils.make_flat_df(res, return_addresses)
return_df.index = return_timestamps
return return_df | python | def run(self, params=None, return_columns=None, return_timestamps=None,
initial_condition='original', reload=False):
""" Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions
"""
if reload:
self.reload()
if params:
self.set_components(params)
self.set_initial_condition(initial_condition)
return_timestamps = self._format_return_timestamps(return_timestamps)
t_series = self._build_euler_timeseries(return_timestamps)
if return_columns is None:
return_columns = self._default_return_columns()
self.time.stage = 'Run'
self.clear_caches()
capture_elements, return_addresses = utils.get_return_elements(
return_columns, self.components._namespace, self.components._subscript_dict)
res = self._integrate(t_series, capture_elements, return_timestamps)
return_df = utils.make_flat_df(res, return_addresses)
return_df.index = return_timestamps
return return_df | [
"def",
"run",
"(",
"self",
",",
"params",
"=",
"None",
",",
"return_columns",
"=",
"None",
",",
"return_timestamps",
"=",
"None",
",",
"initial_condition",
"=",
"'original'",
",",
"reload",
"=",
"False",
")",
":",
"if",
"reload",
":",
"self",
".",
"reload",
"(",
")",
"if",
"params",
":",
"self",
".",
"set_components",
"(",
"params",
")",
"self",
".",
"set_initial_condition",
"(",
"initial_condition",
")",
"return_timestamps",
"=",
"self",
".",
"_format_return_timestamps",
"(",
"return_timestamps",
")",
"t_series",
"=",
"self",
".",
"_build_euler_timeseries",
"(",
"return_timestamps",
")",
"if",
"return_columns",
"is",
"None",
":",
"return_columns",
"=",
"self",
".",
"_default_return_columns",
"(",
")",
"self",
".",
"time",
".",
"stage",
"=",
"'Run'",
"self",
".",
"clear_caches",
"(",
")",
"capture_elements",
",",
"return_addresses",
"=",
"utils",
".",
"get_return_elements",
"(",
"return_columns",
",",
"self",
".",
"components",
".",
"_namespace",
",",
"self",
".",
"components",
".",
"_subscript_dict",
")",
"res",
"=",
"self",
".",
"_integrate",
"(",
"t_series",
",",
"capture_elements",
",",
"return_timestamps",
")",
"return_df",
"=",
"utils",
".",
"make_flat_df",
"(",
"res",
",",
"return_addresses",
")",
"return_df",
".",
"index",
"=",
"return_timestamps",
"return",
"return_df"
] | Simulate the model's behavior over time.
Return a pandas dataframe with timestamps as rows,
model elements as columns.
Parameters
----------
params : dictionary
Keys are strings of model component names.
Values are numeric or pandas Series.
Numeric values represent constants over the model integration.
Timeseries will be interpolated to give time-varying input.
return_timestamps : list, numeric, numpy array(1-D)
Timestamps in model execution at which to return state information.
Defaults to model-file specified timesteps.
return_columns : list of string model component names
Returned dataframe will have corresponding columns.
Defaults to model stock values.
initial_condition : 'original'/'o', 'current'/'c', (t, {state})
The starting time, and the state of the system (the values of all the stocks)
at that starting time.
* 'original' (default) uses model-file specified initial condition
* 'current' uses the state of the model after the previous execution
* (t, {state}) lets the user specify a starting time and (possibly partial)
list of stock values.
reload : bool
If true, reloads the model from the translated model file before making changes
Examples
--------
>>> model.run(params={'exogenous_constant': 42})
>>> model.run(params={'exogenous_variable': timeseries_input})
>>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10])
>>> model.run(return_timestamps=10)
>>> model.run(return_timestamps=np.linspace(1, 10, 20))
See Also
--------
pysd.set_components : handles setting model parameters
pysd.set_initial_condition : handles setting initial conditions | [
"Simulate",
"the",
"model",
"s",
"behavior",
"over",
"time",
".",
"Return",
"a",
"pandas",
"dataframe",
"with",
"timestamps",
"as",
"rows",
"model",
"elements",
"as",
"columns",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L615-L690 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model._default_return_columns | def _default_return_columns(self):
"""
Return a list of the model elements that does not include lookup functions
or other functions that take parameters.
"""
return_columns = []
parsed_expr = []
for key, value in self.components._namespace.items():
if hasattr(self.components, value):
sig = signature(getattr(self.components, value))
# The `*args` reference handles the py2.7 decorator.
if len(set(sig.parameters) - {'args'}) == 0:
expr = self.components._namespace[key]
if not expr in parsed_expr:
return_columns.append(key)
parsed_expr.append(expr)
return return_columns | python | def _default_return_columns(self):
"""
Return a list of the model elements that does not include lookup functions
or other functions that take parameters.
"""
return_columns = []
parsed_expr = []
for key, value in self.components._namespace.items():
if hasattr(self.components, value):
sig = signature(getattr(self.components, value))
# The `*args` reference handles the py2.7 decorator.
if len(set(sig.parameters) - {'args'}) == 0:
expr = self.components._namespace[key]
if not expr in parsed_expr:
return_columns.append(key)
parsed_expr.append(expr)
return return_columns | [
"def",
"_default_return_columns",
"(",
"self",
")",
":",
"return_columns",
"=",
"[",
"]",
"parsed_expr",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"self",
".",
"components",
".",
"_namespace",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"components",
",",
"value",
")",
":",
"sig",
"=",
"signature",
"(",
"getattr",
"(",
"self",
".",
"components",
",",
"value",
")",
")",
"# The `*args` reference handles the py2.7 decorator.",
"if",
"len",
"(",
"set",
"(",
"sig",
".",
"parameters",
")",
"-",
"{",
"'args'",
"}",
")",
"==",
"0",
":",
"expr",
"=",
"self",
".",
"components",
".",
"_namespace",
"[",
"key",
"]",
"if",
"not",
"expr",
"in",
"parsed_expr",
":",
"return_columns",
".",
"append",
"(",
"key",
")",
"parsed_expr",
".",
"append",
"(",
"expr",
")",
"return",
"return_columns"
] | Return a list of the model elements that does not include lookup functions
or other functions that take parameters. | [
"Return",
"a",
"list",
"of",
"the",
"model",
"elements",
"that",
"does",
"not",
"include",
"lookup",
"functions",
"or",
"other",
"functions",
"that",
"take",
"parameters",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L698-L716 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model.set_initial_condition | def set_initial_condition(self, initial_condition):
""" Set the initial conditions of the integration.
Parameters
----------
initial_condition : <string> or <tuple>
Takes on one of the following sets of values:
* 'original'/'o' : Reset to the model-file specified initial condition.
* 'current'/'c' : Use the current state of the system to start
the next simulation. This includes the simulation time, so this
initial condition must be paired with new return timestamps
* (t, {state}) : Lets the user specify a starting time and list of stock values.
>>> model.set_initial_condition('original')
>>> model.set_initial_condition('current')
>>> model.set_initial_condition((10, {'teacup_temperature': 50}))
See Also
--------
PySD.set_state()
"""
if isinstance(initial_condition, tuple):
# Todo: check the values more than just seeing if they are a tuple.
self.set_state(*initial_condition)
elif isinstance(initial_condition, str):
if initial_condition.lower() in ['original', 'o']:
self.initialize()
elif initial_condition.lower() in ['current', 'c']:
pass
else:
raise ValueError('Valid initial condition strings include: \n' +
' "original"/"o", \n' +
' "current"/"c"')
else:
raise TypeError('Check documentation for valid entries') | python | def set_initial_condition(self, initial_condition):
""" Set the initial conditions of the integration.
Parameters
----------
initial_condition : <string> or <tuple>
Takes on one of the following sets of values:
* 'original'/'o' : Reset to the model-file specified initial condition.
* 'current'/'c' : Use the current state of the system to start
the next simulation. This includes the simulation time, so this
initial condition must be paired with new return timestamps
* (t, {state}) : Lets the user specify a starting time and list of stock values.
>>> model.set_initial_condition('original')
>>> model.set_initial_condition('current')
>>> model.set_initial_condition((10, {'teacup_temperature': 50}))
See Also
--------
PySD.set_state()
"""
if isinstance(initial_condition, tuple):
# Todo: check the values more than just seeing if they are a tuple.
self.set_state(*initial_condition)
elif isinstance(initial_condition, str):
if initial_condition.lower() in ['original', 'o']:
self.initialize()
elif initial_condition.lower() in ['current', 'c']:
pass
else:
raise ValueError('Valid initial condition strings include: \n' +
' "original"/"o", \n' +
' "current"/"c"')
else:
raise TypeError('Check documentation for valid entries') | [
"def",
"set_initial_condition",
"(",
"self",
",",
"initial_condition",
")",
":",
"if",
"isinstance",
"(",
"initial_condition",
",",
"tuple",
")",
":",
"# Todo: check the values more than just seeing if they are a tuple.",
"self",
".",
"set_state",
"(",
"*",
"initial_condition",
")",
"elif",
"isinstance",
"(",
"initial_condition",
",",
"str",
")",
":",
"if",
"initial_condition",
".",
"lower",
"(",
")",
"in",
"[",
"'original'",
",",
"'o'",
"]",
":",
"self",
".",
"initialize",
"(",
")",
"elif",
"initial_condition",
".",
"lower",
"(",
")",
"in",
"[",
"'current'",
",",
"'c'",
"]",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"'Valid initial condition strings include: \\n'",
"+",
"' \"original\"/\"o\", \\n'",
"+",
"' \"current\"/\"c\"'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Check documentation for valid entries'",
")"
] | Set the initial conditions of the integration.
Parameters
----------
initial_condition : <string> or <tuple>
Takes on one of the following sets of values:
* 'original'/'o' : Reset to the model-file specified initial condition.
* 'current'/'c' : Use the current state of the system to start
the next simulation. This includes the simulation time, so this
initial condition must be paired with new return timestamps
* (t, {state}) : Lets the user specify a starting time and list of stock values.
>>> model.set_initial_condition('original')
>>> model.set_initial_condition('current')
>>> model.set_initial_condition((10, {'teacup_temperature': 50}))
See Also
--------
PySD.set_state() | [
"Set",
"the",
"initial",
"conditions",
"of",
"the",
"integration",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L718-L755 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model._euler_step | def _euler_step(self, dt):
""" Performs a single step in the euler integration,
updating stateful components
Parameters
----------
dt : float
This is the amount to increase time by this step
"""
self.state = self.state + self.ddt() * dt | python | def _euler_step(self, dt):
""" Performs a single step in the euler integration,
updating stateful components
Parameters
----------
dt : float
This is the amount to increase time by this step
"""
self.state = self.state + self.ddt() * dt | [
"def",
"_euler_step",
"(",
"self",
",",
"dt",
")",
":",
"self",
".",
"state",
"=",
"self",
".",
"state",
"+",
"self",
".",
"ddt",
"(",
")",
"*",
"dt"
] | Performs a single step in the euler integration,
updating stateful components
Parameters
----------
dt : float
This is the amount to increase time by this step | [
"Performs",
"a",
"single",
"step",
"in",
"the",
"euler",
"integration",
"updating",
"stateful",
"components"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L757-L766 | train |
JamesPHoughton/pysd | pysd/py_backend/functions.py | Model._integrate | def _integrate(self, time_steps, capture_elements, return_timestamps):
"""
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
"""
# Todo: consider adding the timestamp to the return elements, and using that as the index
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2) # this will clear the stepwise caches
# need to add one more time step, because we run only the state updates in the previous
# loop and thus may be one short.
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs | python | def _integrate(self, time_steps, capture_elements, return_timestamps):
"""
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
"""
# Todo: consider adding the timestamp to the return elements, and using that as the index
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2) # this will clear the stepwise caches
# need to add one more time step, because we run only the state updates in the previous
# loop and thus may be one short.
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs | [
"def",
"_integrate",
"(",
"self",
",",
"time_steps",
",",
"capture_elements",
",",
"return_timestamps",
")",
":",
"# Todo: consider adding the timestamp to the return elements, and using that as the index",
"outputs",
"=",
"[",
"]",
"for",
"t2",
"in",
"time_steps",
"[",
"1",
":",
"]",
":",
"if",
"self",
".",
"time",
"(",
")",
"in",
"return_timestamps",
":",
"outputs",
".",
"append",
"(",
"{",
"key",
":",
"getattr",
"(",
"self",
".",
"components",
",",
"key",
")",
"(",
")",
"for",
"key",
"in",
"capture_elements",
"}",
")",
"self",
".",
"_euler_step",
"(",
"t2",
"-",
"self",
".",
"time",
"(",
")",
")",
"self",
".",
"time",
".",
"update",
"(",
"t2",
")",
"# this will clear the stepwise caches",
"# need to add one more time step, because we run only the state updates in the previous",
"# loop and thus may be one short.",
"if",
"self",
".",
"time",
"(",
")",
"in",
"return_timestamps",
":",
"outputs",
".",
"append",
"(",
"{",
"key",
":",
"getattr",
"(",
"self",
".",
"components",
",",
"key",
")",
"(",
")",
"for",
"key",
"in",
"capture_elements",
"}",
")",
"return",
"outputs"
] | Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries | [
"Performs",
"euler",
"integration"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L768-L800 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | merge_partial_elements | def merge_partial_elements(element_list):
"""
merges model elements which collectively all define the model component,
mostly for multidimensional subscripts
Parameters
----------
element_list
Returns
-------
"""
outs = dict() # output data structure
for element in element_list:
if element['py_expr'] != "None": # for
name = element['py_name']
if name not in outs:
# Use 'expr' for Vensim models, and 'eqn' for Xmile (This makes the Vensim equation prettier.)
eqn = element['expr'] if 'expr' in element else element['eqn']
outs[name] = {
'py_name': element['py_name'],
'real_name': element['real_name'],
'doc': element['doc'],
'py_expr': [element['py_expr']], # in a list
'unit': element['unit'],
'subs': [element['subs']],
'lims': element['lims'],
'eqn': eqn,
'kind': element['kind'],
'arguments': element['arguments']
}
else:
outs[name]['doc'] = outs[name]['doc'] or element['doc']
outs[name]['unit'] = outs[name]['unit'] or element['unit']
outs[name]['lims'] = outs[name]['lims'] or element['lims']
outs[name]['eqn'] = outs[name]['eqn'] or element['eqn']
outs[name]['py_expr'] += [element['py_expr']]
outs[name]['subs'] += [element['subs']]
outs[name]['arguments'] = element['arguments']
return list(outs.values()) | python | def merge_partial_elements(element_list):
"""
merges model elements which collectively all define the model component,
mostly for multidimensional subscripts
Parameters
----------
element_list
Returns
-------
"""
outs = dict() # output data structure
for element in element_list:
if element['py_expr'] != "None": # for
name = element['py_name']
if name not in outs:
# Use 'expr' for Vensim models, and 'eqn' for Xmile (This makes the Vensim equation prettier.)
eqn = element['expr'] if 'expr' in element else element['eqn']
outs[name] = {
'py_name': element['py_name'],
'real_name': element['real_name'],
'doc': element['doc'],
'py_expr': [element['py_expr']], # in a list
'unit': element['unit'],
'subs': [element['subs']],
'lims': element['lims'],
'eqn': eqn,
'kind': element['kind'],
'arguments': element['arguments']
}
else:
outs[name]['doc'] = outs[name]['doc'] or element['doc']
outs[name]['unit'] = outs[name]['unit'] or element['unit']
outs[name]['lims'] = outs[name]['lims'] or element['lims']
outs[name]['eqn'] = outs[name]['eqn'] or element['eqn']
outs[name]['py_expr'] += [element['py_expr']]
outs[name]['subs'] += [element['subs']]
outs[name]['arguments'] = element['arguments']
return list(outs.values()) | [
"def",
"merge_partial_elements",
"(",
"element_list",
")",
":",
"outs",
"=",
"dict",
"(",
")",
"# output data structure",
"for",
"element",
"in",
"element_list",
":",
"if",
"element",
"[",
"'py_expr'",
"]",
"!=",
"\"None\"",
":",
"# for",
"name",
"=",
"element",
"[",
"'py_name'",
"]",
"if",
"name",
"not",
"in",
"outs",
":",
"# Use 'expr' for Vensim models, and 'eqn' for Xmile (This makes the Vensim equation prettier.)",
"eqn",
"=",
"element",
"[",
"'expr'",
"]",
"if",
"'expr'",
"in",
"element",
"else",
"element",
"[",
"'eqn'",
"]",
"outs",
"[",
"name",
"]",
"=",
"{",
"'py_name'",
":",
"element",
"[",
"'py_name'",
"]",
",",
"'real_name'",
":",
"element",
"[",
"'real_name'",
"]",
",",
"'doc'",
":",
"element",
"[",
"'doc'",
"]",
",",
"'py_expr'",
":",
"[",
"element",
"[",
"'py_expr'",
"]",
"]",
",",
"# in a list",
"'unit'",
":",
"element",
"[",
"'unit'",
"]",
",",
"'subs'",
":",
"[",
"element",
"[",
"'subs'",
"]",
"]",
",",
"'lims'",
":",
"element",
"[",
"'lims'",
"]",
",",
"'eqn'",
":",
"eqn",
",",
"'kind'",
":",
"element",
"[",
"'kind'",
"]",
",",
"'arguments'",
":",
"element",
"[",
"'arguments'",
"]",
"}",
"else",
":",
"outs",
"[",
"name",
"]",
"[",
"'doc'",
"]",
"=",
"outs",
"[",
"name",
"]",
"[",
"'doc'",
"]",
"or",
"element",
"[",
"'doc'",
"]",
"outs",
"[",
"name",
"]",
"[",
"'unit'",
"]",
"=",
"outs",
"[",
"name",
"]",
"[",
"'unit'",
"]",
"or",
"element",
"[",
"'unit'",
"]",
"outs",
"[",
"name",
"]",
"[",
"'lims'",
"]",
"=",
"outs",
"[",
"name",
"]",
"[",
"'lims'",
"]",
"or",
"element",
"[",
"'lims'",
"]",
"outs",
"[",
"name",
"]",
"[",
"'eqn'",
"]",
"=",
"outs",
"[",
"name",
"]",
"[",
"'eqn'",
"]",
"or",
"element",
"[",
"'eqn'",
"]",
"outs",
"[",
"name",
"]",
"[",
"'py_expr'",
"]",
"+=",
"[",
"element",
"[",
"'py_expr'",
"]",
"]",
"outs",
"[",
"name",
"]",
"[",
"'subs'",
"]",
"+=",
"[",
"element",
"[",
"'subs'",
"]",
"]",
"outs",
"[",
"name",
"]",
"[",
"'arguments'",
"]",
"=",
"element",
"[",
"'arguments'",
"]",
"return",
"list",
"(",
"outs",
".",
"values",
"(",
")",
")"
] | merges model elements which collectively all define the model component,
mostly for multidimensional subscripts
Parameters
----------
element_list
Returns
------- | [
"merges",
"model",
"elements",
"which",
"collectively",
"all",
"define",
"the",
"model",
"component",
"mostly",
"for",
"multidimensional",
"subscripts"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L187-L229 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | add_n_delay | def add_n_delay(delay_input, delay_time, initial_value, order, subs, subscript_dict):
"""
Creates code to instantiate a stateful 'Delay' object,
and provides reference to that object's output.
The name of the stateful object is based upon the passed in parameters, so if
there are multiple places where identical delay functions are referenced, the
translated python file will only maintain one stateful object, and reference it
multiple times.
Parameters
----------
delay_input: <string>
Reference to the model component that is the input to the delay
delay_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
Returns
-------
reference: basestring
reference to the delay object `__call__` method, which will return the output
of the delay process
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
# the py name has to be unique to all the passed parameters, or if there are two things
# that delay the output by different amounts, they'll overwrite the original function...
stateful = {
'py_name': utils.make_python_identifier('_delay_%s_%s_%s_%s' % (delay_input,
delay_time,
initial_value,
order))[0],
'real_name': 'Delay of %s' % delay_input,
'doc': 'Delay time: %s \n Delay initial value %s \n Delay order %s' % (
delay_time, initial_value, order),
'py_expr': 'functions.Delay(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % (
delay_input, delay_time, initial_value, order),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | python | def add_n_delay(delay_input, delay_time, initial_value, order, subs, subscript_dict):
"""
Creates code to instantiate a stateful 'Delay' object,
and provides reference to that object's output.
The name of the stateful object is based upon the passed in parameters, so if
there are multiple places where identical delay functions are referenced, the
translated python file will only maintain one stateful object, and reference it
multiple times.
Parameters
----------
delay_input: <string>
Reference to the model component that is the input to the delay
delay_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
Returns
-------
reference: basestring
reference to the delay object `__call__` method, which will return the output
of the delay process
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
# the py name has to be unique to all the passed parameters, or if there are two things
# that delay the output by different amounts, they'll overwrite the original function...
stateful = {
'py_name': utils.make_python_identifier('_delay_%s_%s_%s_%s' % (delay_input,
delay_time,
initial_value,
order))[0],
'real_name': 'Delay of %s' % delay_input,
'doc': 'Delay time: %s \n Delay initial value %s \n Delay order %s' % (
delay_time, initial_value, order),
'py_expr': 'functions.Delay(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % (
delay_input, delay_time, initial_value, order),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | [
"def",
"add_n_delay",
"(",
"delay_input",
",",
"delay_time",
",",
"initial_value",
",",
"order",
",",
"subs",
",",
"subscript_dict",
")",
":",
"# the py name has to be unique to all the passed parameters, or if there are two things",
"# that delay the output by different amounts, they'll overwrite the original function...",
"stateful",
"=",
"{",
"'py_name'",
":",
"utils",
".",
"make_python_identifier",
"(",
"'_delay_%s_%s_%s_%s'",
"%",
"(",
"delay_input",
",",
"delay_time",
",",
"initial_value",
",",
"order",
")",
")",
"[",
"0",
"]",
",",
"'real_name'",
":",
"'Delay of %s'",
"%",
"delay_input",
",",
"'doc'",
":",
"'Delay time: %s \\n Delay initial value %s \\n Delay order %s'",
"%",
"(",
"delay_time",
",",
"initial_value",
",",
"order",
")",
",",
"'py_expr'",
":",
"'functions.Delay(lambda: %s, lambda: %s, lambda: %s, lambda: %s)'",
"%",
"(",
"delay_input",
",",
"delay_time",
",",
"initial_value",
",",
"order",
")",
",",
"'unit'",
":",
"'None'",
",",
"'lims'",
":",
"'None'",
",",
"'eqn'",
":",
"'None'",
",",
"'subs'",
":",
"''",
",",
"'kind'",
":",
"'stateful'",
",",
"'arguments'",
":",
"''",
"}",
"return",
"\"%s()\"",
"%",
"stateful",
"[",
"'py_name'",
"]",
",",
"[",
"stateful",
"]"
] | Creates code to instantiate a stateful 'Delay' object,
and provides reference to that object's output.
The name of the stateful object is based upon the passed in parameters, so if
there are multiple places where identical delay functions are referenced, the
translated python file will only maintain one stateful object, and reference it
multiple times.
Parameters
----------
delay_input: <string>
Reference to the model component that is the input to the delay
delay_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
Returns
-------
reference: basestring
reference to the delay object `__call__` method, which will return the output
of the delay process
new_structure: list
list of element construction dictionaries for the builder to assemble | [
"Creates",
"code",
"to",
"instantiate",
"a",
"stateful",
"Delay",
"object",
"and",
"provides",
"reference",
"to",
"that",
"object",
"s",
"output",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L340-L401 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | add_n_smooth | def add_n_smooth(smooth_input, smooth_time, initial_value, order, subs, subscript_dict):
"""Constructs stock and flow chains that implement the calculation of
a smoothing function.
Parameters
----------
smooth_input: <string>
Reference to the model component that is the input to the smoothing function
smooth_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
subs: list of strings
List of strings of subscript indices that correspond to the
list of expressions, and collectively define the shape of the output
See `builder.add_flaux` for more info
Returns
-------
reference: basestring
reference to the smooth object `__call__` method, which will return the output
of the smooth process
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
stateful = {
'py_name': utils.make_python_identifier('_smooth_%s_%s_%s_%s' % (smooth_input,
smooth_time,
initial_value,
order))[0],
'real_name': 'Smooth of %s' % smooth_input,
'doc': 'Smooth time: %s \n Smooth initial value %s \n Smooth order %s' % (
smooth_time, initial_value, order),
'py_expr': 'functions.Smooth(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % (
smooth_input, smooth_time, initial_value, order),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | python | def add_n_smooth(smooth_input, smooth_time, initial_value, order, subs, subscript_dict):
"""Constructs stock and flow chains that implement the calculation of
a smoothing function.
Parameters
----------
smooth_input: <string>
Reference to the model component that is the input to the smoothing function
smooth_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
subs: list of strings
List of strings of subscript indices that correspond to the
list of expressions, and collectively define the shape of the output
See `builder.add_flaux` for more info
Returns
-------
reference: basestring
reference to the smooth object `__call__` method, which will return the output
of the smooth process
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
stateful = {
'py_name': utils.make_python_identifier('_smooth_%s_%s_%s_%s' % (smooth_input,
smooth_time,
initial_value,
order))[0],
'real_name': 'Smooth of %s' % smooth_input,
'doc': 'Smooth time: %s \n Smooth initial value %s \n Smooth order %s' % (
smooth_time, initial_value, order),
'py_expr': 'functions.Smooth(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % (
smooth_input, smooth_time, initial_value, order),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | [
"def",
"add_n_smooth",
"(",
"smooth_input",
",",
"smooth_time",
",",
"initial_value",
",",
"order",
",",
"subs",
",",
"subscript_dict",
")",
":",
"stateful",
"=",
"{",
"'py_name'",
":",
"utils",
".",
"make_python_identifier",
"(",
"'_smooth_%s_%s_%s_%s'",
"%",
"(",
"smooth_input",
",",
"smooth_time",
",",
"initial_value",
",",
"order",
")",
")",
"[",
"0",
"]",
",",
"'real_name'",
":",
"'Smooth of %s'",
"%",
"smooth_input",
",",
"'doc'",
":",
"'Smooth time: %s \\n Smooth initial value %s \\n Smooth order %s'",
"%",
"(",
"smooth_time",
",",
"initial_value",
",",
"order",
")",
",",
"'py_expr'",
":",
"'functions.Smooth(lambda: %s, lambda: %s, lambda: %s, lambda: %s)'",
"%",
"(",
"smooth_input",
",",
"smooth_time",
",",
"initial_value",
",",
"order",
")",
",",
"'unit'",
":",
"'None'",
",",
"'lims'",
":",
"'None'",
",",
"'eqn'",
":",
"'None'",
",",
"'subs'",
":",
"''",
",",
"'kind'",
":",
"'stateful'",
",",
"'arguments'",
":",
"''",
"}",
"return",
"\"%s()\"",
"%",
"stateful",
"[",
"'py_name'",
"]",
",",
"[",
"stateful",
"]"
] | Constructs stock and flow chains that implement the calculation of
a smoothing function.
Parameters
----------
smooth_input: <string>
Reference to the model component that is the input to the smoothing function
smooth_time: <string>
Can be a number (in string format) or a reference to another model element
which will calculate the delay. This is calculated throughout the simulation
at runtime.
initial_value: <string>
This is used to initialize the stocks that are present in the delay. We
initialize the stocks with equal values so that the outflow in the first
timestep is equal to this value.
order: string
The number of stocks in the delay pipeline. As we construct the delays at
build time, this must be an integer and cannot be calculated from other
model components. Anything else will yield a ValueError.
subs: list of strings
List of strings of subscript indices that correspond to the
list of expressions, and collectively define the shape of the output
See `builder.add_flaux` for more info
Returns
-------
reference: basestring
reference to the smooth object `__call__` method, which will return the output
of the smooth process
new_structure: list
list of element construction dictionaries for the builder to assemble | [
"Constructs",
"stock",
"and",
"flow",
"chains",
"that",
"implement",
"the",
"calculation",
"of",
"a",
"smoothing",
"function",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L404-L461 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | add_initial | def add_initial(initial_input):
"""
Constructs a stateful object for handling vensim's 'Initial' functionality
Parameters
----------
initial_input: basestring
The expression which will be evaluated, and the first value of which returned
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
stateful = {
'py_name': utils.make_python_identifier('_initial_%s' % initial_input)[0],
'real_name': 'Smooth of %s' % initial_input,
'doc': 'Returns the value taken on during the initialization phase',
'py_expr': 'functions.Initial(lambda: %s)' % (
initial_input),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | python | def add_initial(initial_input):
"""
Constructs a stateful object for handling vensim's 'Initial' functionality
Parameters
----------
initial_input: basestring
The expression which will be evaluated, and the first value of which returned
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
stateful = {
'py_name': utils.make_python_identifier('_initial_%s' % initial_input)[0],
'real_name': 'Smooth of %s' % initial_input,
'doc': 'Returns the value taken on during the initialization phase',
'py_expr': 'functions.Initial(lambda: %s)' % (
initial_input),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | [
"def",
"add_initial",
"(",
"initial_input",
")",
":",
"stateful",
"=",
"{",
"'py_name'",
":",
"utils",
".",
"make_python_identifier",
"(",
"'_initial_%s'",
"%",
"initial_input",
")",
"[",
"0",
"]",
",",
"'real_name'",
":",
"'Smooth of %s'",
"%",
"initial_input",
",",
"'doc'",
":",
"'Returns the value taken on during the initialization phase'",
",",
"'py_expr'",
":",
"'functions.Initial(lambda: %s)'",
"%",
"(",
"initial_input",
")",
",",
"'unit'",
":",
"'None'",
",",
"'lims'",
":",
"'None'",
",",
"'eqn'",
":",
"'None'",
",",
"'subs'",
":",
"''",
",",
"'kind'",
":",
"'stateful'",
",",
"'arguments'",
":",
"''",
"}",
"return",
"\"%s()\"",
"%",
"stateful",
"[",
"'py_name'",
"]",
",",
"[",
"stateful",
"]"
] | Constructs a stateful object for handling vensim's 'Initial' functionality
Parameters
----------
initial_input: basestring
The expression which will be evaluated, and the first value of which returned
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble | [
"Constructs",
"a",
"stateful",
"object",
"for",
"handling",
"vensim",
"s",
"Initial",
"functionality"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L511-L544 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | add_macro | def add_macro(macro_name, filename, arg_names, arg_vals):
"""
Constructs a stateful object instantiating a 'Macro'
Parameters
----------
macro_name: basestring
python safe name for macro
filename: basestring
filepath to macro definition
func_args: dict
dictionary of values to be passed to macro
{key: function}
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
func_args = '{ %s }' % ', '.join(["'%s': lambda: %s" % (key, val) for key, val in
zip(arg_names, arg_vals)])
stateful = {
'py_name': '_macro_' + macro_name + '_' + '_'.join(
[utils.make_python_identifier(f)[0] for f in arg_vals]),
'real_name': 'Macro Instantiation of ' + macro_name,
'doc': 'Instantiates the Macro',
'py_expr': "functions.Macro('%s', %s, '%s', time_initialization=lambda: __data['time'])" % (filename, func_args, macro_name),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | python | def add_macro(macro_name, filename, arg_names, arg_vals):
"""
Constructs a stateful object instantiating a 'Macro'
Parameters
----------
macro_name: basestring
python safe name for macro
filename: basestring
filepath to macro definition
func_args: dict
dictionary of values to be passed to macro
{key: function}
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble
"""
func_args = '{ %s }' % ', '.join(["'%s': lambda: %s" % (key, val) for key, val in
zip(arg_names, arg_vals)])
stateful = {
'py_name': '_macro_' + macro_name + '_' + '_'.join(
[utils.make_python_identifier(f)[0] for f in arg_vals]),
'real_name': 'Macro Instantiation of ' + macro_name,
'doc': 'Instantiates the Macro',
'py_expr': "functions.Macro('%s', %s, '%s', time_initialization=lambda: __data['time'])" % (filename, func_args, macro_name),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful] | [
"def",
"add_macro",
"(",
"macro_name",
",",
"filename",
",",
"arg_names",
",",
"arg_vals",
")",
":",
"func_args",
"=",
"'{ %s }'",
"%",
"', '",
".",
"join",
"(",
"[",
"\"'%s': lambda: %s\"",
"%",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"zip",
"(",
"arg_names",
",",
"arg_vals",
")",
"]",
")",
"stateful",
"=",
"{",
"'py_name'",
":",
"'_macro_'",
"+",
"macro_name",
"+",
"'_'",
"+",
"'_'",
".",
"join",
"(",
"[",
"utils",
".",
"make_python_identifier",
"(",
"f",
")",
"[",
"0",
"]",
"for",
"f",
"in",
"arg_vals",
"]",
")",
",",
"'real_name'",
":",
"'Macro Instantiation of '",
"+",
"macro_name",
",",
"'doc'",
":",
"'Instantiates the Macro'",
",",
"'py_expr'",
":",
"\"functions.Macro('%s', %s, '%s', time_initialization=lambda: __data['time'])\"",
"%",
"(",
"filename",
",",
"func_args",
",",
"macro_name",
")",
",",
"'unit'",
":",
"'None'",
",",
"'lims'",
":",
"'None'",
",",
"'eqn'",
":",
"'None'",
",",
"'subs'",
":",
"''",
",",
"'kind'",
":",
"'stateful'",
",",
"'arguments'",
":",
"''",
"}",
"return",
"\"%s()\"",
"%",
"stateful",
"[",
"'py_name'",
"]",
",",
"[",
"stateful",
"]"
] | Constructs a stateful object instantiating a 'Macro'
Parameters
----------
macro_name: basestring
python safe name for macro
filename: basestring
filepath to macro definition
func_args: dict
dictionary of values to be passed to macro
{key: function}
Returns
-------
reference: basestring
reference to the Initial object `__call__` method,
which will return the first calculated value of `initial_input`
new_structure: list
list of element construction dictionaries for the builder to assemble | [
"Constructs",
"a",
"stateful",
"object",
"instantiating",
"a",
"Macro"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L547-L588 | train |
JamesPHoughton/pysd | pysd/py_backend/builder.py | add_incomplete | def add_incomplete(var_name, dependencies):
"""
Incomplete functions don't really need to be 'builders' as they
add no new real structure, but it's helpful to have a function
in which we can raise a warning about the incomplete equation
at translate time.
"""
warnings.warn('%s has no equation specified' % var_name,
SyntaxWarning, stacklevel=2)
# first arg is `self` reference
return "functions.incomplete(%s)" % ', '.join(dependencies[1:]), [] | python | def add_incomplete(var_name, dependencies):
"""
Incomplete functions don't really need to be 'builders' as they
add no new real structure, but it's helpful to have a function
in which we can raise a warning about the incomplete equation
at translate time.
"""
warnings.warn('%s has no equation specified' % var_name,
SyntaxWarning, stacklevel=2)
# first arg is `self` reference
return "functions.incomplete(%s)" % ', '.join(dependencies[1:]), [] | [
"def",
"add_incomplete",
"(",
"var_name",
",",
"dependencies",
")",
":",
"warnings",
".",
"warn",
"(",
"'%s has no equation specified'",
"%",
"var_name",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"# first arg is `self` reference",
"return",
"\"functions.incomplete(%s)\"",
"%",
"', '",
".",
"join",
"(",
"dependencies",
"[",
"1",
":",
"]",
")",
",",
"[",
"]"
] | Incomplete functions don't really need to be 'builders' as they
add no new real structure, but it's helpful to have a function
in which we can raise a warning about the incomplete equation
at translate time. | [
"Incomplete",
"functions",
"don",
"t",
"really",
"need",
"to",
"be",
"builders",
"as",
"they",
"add",
"no",
"new",
"real",
"structure",
"but",
"it",
"s",
"helpful",
"to",
"have",
"a",
"function",
"in",
"which",
"we",
"can",
"raise",
"a",
"warning",
"about",
"the",
"incomplete",
"equation",
"at",
"translate",
"time",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/builder.py#L591-L602 | train |
JamesPHoughton/pysd | pysd/py_backend/vensim/vensim2py.py | get_model_elements | def get_model_elements(model_str):
"""
Takes in a string representing model text and splits it into elements
I think we're making the assumption that all newline characters are removed...
Parameters
----------
model_str : string
Returns
-------
entries : array of dictionaries
Each dictionary contains the components of a different model element, separated into the
equation, units, and docstring.
Examples
--------
# Basic Parsing:
>>> get_model_elements(r'a~b~c| d~e~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Special characters are escaped within double-quotes:
>>> get_model_elements(r'a~b~c| d~e"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Double-quotes within escape groups are themselves escaped with backslashes:
>>> get_model_elements(r'a~b~c| d~e"\\\"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"\\\\"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"\\\"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"\\\\"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e"x\\nx"~f| g~h~|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"x\\\\nx"', 'eqn': 'd'}, {'doc': '', 'unit': 'h', 'eqn': 'g'}]
# Todo: Handle model-level or section-level documentation
>>> get_model_elements(r'*** .model doc ***~ Docstring!| d~e~f| g~h~i|')
[{'doc': 'Docstring!', 'unit': '', 'eqn': ''}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle control sections, returning appropriate docstring pieces
>>> get_model_elements(r'a~b~c| ****.Control***~ Simulation Control Parameters | g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle the model display elements (ignore them)
>>> get_model_elements(r'a~b~c| d~e~f| \\\---///junk|junk~junk')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}]
Notes
-----
- Tildes and pipes are not allowed in element docstrings, but we should still handle them there
"""
model_structure_grammar = _include_common_grammar(r"""
model = (entry / section)+ sketch?
entry = element "~" element "~" element ("~" element)? "|"
section = element "~" element "|"
sketch = ~r".*" #anything
# Either an escape group, or a character that is not tilde or pipe
element = (escape_group / ~r"[^~|]")*
""")
parser = parsimonious.Grammar(model_structure_grammar)
tree = parser.parse(model_str)
class ModelParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.entries = []
self.visit(ast)
def visit_entry(self, n, vc):
units, lims = parse_units(vc[2].strip())
self.entries.append({'eqn': vc[0].strip(),
'unit': units,
'lims': str(lims),
'doc': vc[4].strip(),
'kind': 'entry'})
def visit_section(self, n, vc):
if vc[2].strip() != "Simulation Control Parameters":
self.entries.append({'eqn': '',
'unit': '',
'lims': '',
'doc': vc[2].strip(),
'kind': 'section'})
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text or ''
return ModelParser(tree).entries | python | def get_model_elements(model_str):
"""
Takes in a string representing model text and splits it into elements
I think we're making the assumption that all newline characters are removed...
Parameters
----------
model_str : string
Returns
-------
entries : array of dictionaries
Each dictionary contains the components of a different model element, separated into the
equation, units, and docstring.
Examples
--------
# Basic Parsing:
>>> get_model_elements(r'a~b~c| d~e~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Special characters are escaped within double-quotes:
>>> get_model_elements(r'a~b~c| d~e"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Double-quotes within escape groups are themselves escaped with backslashes:
>>> get_model_elements(r'a~b~c| d~e"\\\"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"\\\\"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"\\\"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"\\\\"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e"x\\nx"~f| g~h~|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"x\\\\nx"', 'eqn': 'd'}, {'doc': '', 'unit': 'h', 'eqn': 'g'}]
# Todo: Handle model-level or section-level documentation
>>> get_model_elements(r'*** .model doc ***~ Docstring!| d~e~f| g~h~i|')
[{'doc': 'Docstring!', 'unit': '', 'eqn': ''}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle control sections, returning appropriate docstring pieces
>>> get_model_elements(r'a~b~c| ****.Control***~ Simulation Control Parameters | g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle the model display elements (ignore them)
>>> get_model_elements(r'a~b~c| d~e~f| \\\---///junk|junk~junk')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}]
Notes
-----
- Tildes and pipes are not allowed in element docstrings, but we should still handle them there
"""
model_structure_grammar = _include_common_grammar(r"""
model = (entry / section)+ sketch?
entry = element "~" element "~" element ("~" element)? "|"
section = element "~" element "|"
sketch = ~r".*" #anything
# Either an escape group, or a character that is not tilde or pipe
element = (escape_group / ~r"[^~|]")*
""")
parser = parsimonious.Grammar(model_structure_grammar)
tree = parser.parse(model_str)
class ModelParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.entries = []
self.visit(ast)
def visit_entry(self, n, vc):
units, lims = parse_units(vc[2].strip())
self.entries.append({'eqn': vc[0].strip(),
'unit': units,
'lims': str(lims),
'doc': vc[4].strip(),
'kind': 'entry'})
def visit_section(self, n, vc):
if vc[2].strip() != "Simulation Control Parameters":
self.entries.append({'eqn': '',
'unit': '',
'lims': '',
'doc': vc[2].strip(),
'kind': 'section'})
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text or ''
return ModelParser(tree).entries | [
"def",
"get_model_elements",
"(",
"model_str",
")",
":",
"model_structure_grammar",
"=",
"_include_common_grammar",
"(",
"r\"\"\"\n model = (entry / section)+ sketch?\n entry = element \"~\" element \"~\" element (\"~\" element)? \"|\"\n section = element \"~\" element \"|\"\n sketch = ~r\".*\" #anything\n\n # Either an escape group, or a character that is not tilde or pipe\n element = (escape_group / ~r\"[^~|]\")*\n \"\"\"",
")",
"parser",
"=",
"parsimonious",
".",
"Grammar",
"(",
"model_structure_grammar",
")",
"tree",
"=",
"parser",
".",
"parse",
"(",
"model_str",
")",
"class",
"ModelParser",
"(",
"parsimonious",
".",
"NodeVisitor",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"ast",
")",
":",
"self",
".",
"entries",
"=",
"[",
"]",
"self",
".",
"visit",
"(",
"ast",
")",
"def",
"visit_entry",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"units",
",",
"lims",
"=",
"parse_units",
"(",
"vc",
"[",
"2",
"]",
".",
"strip",
"(",
")",
")",
"self",
".",
"entries",
".",
"append",
"(",
"{",
"'eqn'",
":",
"vc",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"'unit'",
":",
"units",
",",
"'lims'",
":",
"str",
"(",
"lims",
")",
",",
"'doc'",
":",
"vc",
"[",
"4",
"]",
".",
"strip",
"(",
")",
",",
"'kind'",
":",
"'entry'",
"}",
")",
"def",
"visit_section",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"if",
"vc",
"[",
"2",
"]",
".",
"strip",
"(",
")",
"!=",
"\"Simulation Control Parameters\"",
":",
"self",
".",
"entries",
".",
"append",
"(",
"{",
"'eqn'",
":",
"''",
",",
"'unit'",
":",
"''",
",",
"'lims'",
":",
"''",
",",
"'doc'",
":",
"vc",
"[",
"2",
"]",
".",
"strip",
"(",
")",
",",
"'kind'",
":",
"'section'",
"}",
")",
"def",
"generic_visit",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"return",
"''",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"vc",
")",
")",
"or",
"n",
".",
"text",
"or",
"''",
"return",
"ModelParser",
"(",
"tree",
")",
".",
"entries"
] | Takes in a string representing model text and splits it into elements
I think we're making the assumption that all newline characters are removed...
Parameters
----------
model_str : string
Returns
-------
entries : array of dictionaries
Each dictionary contains the components of a different model element, separated into the
equation, units, and docstring.
Examples
--------
# Basic Parsing:
>>> get_model_elements(r'a~b~c| d~e~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Special characters are escaped within double-quotes:
>>> get_model_elements(r'a~b~c| d~e"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Double-quotes within escape groups are themselves escaped with backslashes:
>>> get_model_elements(r'a~b~c| d~e"\\\"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"\\\\"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"\\\"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"\\\\"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e"x\\nx"~f| g~h~|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"x\\\\nx"', 'eqn': 'd'}, {'doc': '', 'unit': 'h', 'eqn': 'g'}]
# Todo: Handle model-level or section-level documentation
>>> get_model_elements(r'*** .model doc ***~ Docstring!| d~e~f| g~h~i|')
[{'doc': 'Docstring!', 'unit': '', 'eqn': ''}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle control sections, returning appropriate docstring pieces
>>> get_model_elements(r'a~b~c| ****.Control***~ Simulation Control Parameters | g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle the model display elements (ignore them)
>>> get_model_elements(r'a~b~c| d~e~f| \\\---///junk|junk~junk')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}]
Notes
-----
- Tildes and pipes are not allowed in element docstrings, but we should still handle them there | [
"Takes",
"in",
"a",
"string",
"representing",
"model",
"text",
"and",
"splits",
"it",
"into",
"elements"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/vensim2py.py#L87-L181 | train |
JamesPHoughton/pysd | pysd/py_backend/vensim/vensim2py.py | get_equation_components | def get_equation_components(equation_str):
"""
Breaks down a string representing only the equation part of a model element.
Recognizes the various types of model elements that may exist, and identifies them.
Parameters
----------
equation_str : basestring
the first section in each model element - the full equation.
Returns
-------
Returns a dictionary containing the following:
real_name: basestring
The name of the element as given in the original vensim file
subs: list of strings
list of subscripts or subscript elements
expr: basestring
kind: basestring
What type of equation have we found?
- *component* - normal model expression or constant
- *lookup* - a lookup table
- *subdef* - a subscript definition
Examples
--------
>>> get_equation_components(r'constant = 25')
{'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'}
Notes
-----
in this function we dont create python identifiers, we use real names.
This is so that when everything comes back together, we can manage
any potential namespace conflicts properly
"""
component_structure_grammar = _include_common_grammar(r"""
entry = component / subscript_definition / lookup_definition
component = name _ subscriptlist? _ "=" _ expression
subscript_definition = name _ ":" _ subscript _ ("," _ subscript)*
lookup_definition = name _ &"(" _ expression # uses lookahead assertion to capture whole group
name = basic_id / escape_group
subscriptlist = '[' _ subscript _ ("," _ subscript)* _ ']'
expression = ~r".*" # expression could be anything, at this point.
subscript = basic_id / escape_group
""")
# replace any amount of whitespace with a single space
equation_str = equation_str.replace('\\t', ' ')
equation_str = re.sub(r"\s+", ' ', equation_str)
parser = parsimonious.Grammar(component_structure_grammar)
tree = parser.parse(equation_str)
class ComponentParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.subscripts = []
self.real_name = None
self.expression = None
self.kind = None
self.visit(ast)
def visit_subscript_definition(self, n, vc):
self.kind = 'subdef'
def visit_lookup_definition(self, n, vc):
self.kind = 'lookup'
def visit_component(self, n, vc):
self.kind = 'component'
def visit_name(self, n, vc):
(name,) = vc
self.real_name = name.strip()
def visit_subscript(self, n, vc):
(subscript,) = vc
self.subscripts.append(subscript.strip())
def visit_expression(self, n, vc):
self.expression = n.text.strip()
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
def visit__(self, n, vc):
return ' '
parse_object = ComponentParser(tree)
return {'real_name': parse_object.real_name,
'subs': parse_object.subscripts,
'expr': parse_object.expression,
'kind': parse_object.kind} | python | def get_equation_components(equation_str):
"""
Breaks down a string representing only the equation part of a model element.
Recognizes the various types of model elements that may exist, and identifies them.
Parameters
----------
equation_str : basestring
the first section in each model element - the full equation.
Returns
-------
Returns a dictionary containing the following:
real_name: basestring
The name of the element as given in the original vensim file
subs: list of strings
list of subscripts or subscript elements
expr: basestring
kind: basestring
What type of equation have we found?
- *component* - normal model expression or constant
- *lookup* - a lookup table
- *subdef* - a subscript definition
Examples
--------
>>> get_equation_components(r'constant = 25')
{'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'}
Notes
-----
in this function we dont create python identifiers, we use real names.
This is so that when everything comes back together, we can manage
any potential namespace conflicts properly
"""
component_structure_grammar = _include_common_grammar(r"""
entry = component / subscript_definition / lookup_definition
component = name _ subscriptlist? _ "=" _ expression
subscript_definition = name _ ":" _ subscript _ ("," _ subscript)*
lookup_definition = name _ &"(" _ expression # uses lookahead assertion to capture whole group
name = basic_id / escape_group
subscriptlist = '[' _ subscript _ ("," _ subscript)* _ ']'
expression = ~r".*" # expression could be anything, at this point.
subscript = basic_id / escape_group
""")
# replace any amount of whitespace with a single space
equation_str = equation_str.replace('\\t', ' ')
equation_str = re.sub(r"\s+", ' ', equation_str)
parser = parsimonious.Grammar(component_structure_grammar)
tree = parser.parse(equation_str)
class ComponentParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.subscripts = []
self.real_name = None
self.expression = None
self.kind = None
self.visit(ast)
def visit_subscript_definition(self, n, vc):
self.kind = 'subdef'
def visit_lookup_definition(self, n, vc):
self.kind = 'lookup'
def visit_component(self, n, vc):
self.kind = 'component'
def visit_name(self, n, vc):
(name,) = vc
self.real_name = name.strip()
def visit_subscript(self, n, vc):
(subscript,) = vc
self.subscripts.append(subscript.strip())
def visit_expression(self, n, vc):
self.expression = n.text.strip()
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
def visit__(self, n, vc):
return ' '
parse_object = ComponentParser(tree)
return {'real_name': parse_object.real_name,
'subs': parse_object.subscripts,
'expr': parse_object.expression,
'kind': parse_object.kind} | [
"def",
"get_equation_components",
"(",
"equation_str",
")",
":",
"component_structure_grammar",
"=",
"_include_common_grammar",
"(",
"r\"\"\"\n entry = component / subscript_definition / lookup_definition\n component = name _ subscriptlist? _ \"=\" _ expression\n subscript_definition = name _ \":\" _ subscript _ (\",\" _ subscript)*\n lookup_definition = name _ &\"(\" _ expression # uses lookahead assertion to capture whole group\n\n name = basic_id / escape_group\n subscriptlist = '[' _ subscript _ (\",\" _ subscript)* _ ']'\n expression = ~r\".*\" # expression could be anything, at this point.\n\n subscript = basic_id / escape_group\n \"\"\"",
")",
"# replace any amount of whitespace with a single space",
"equation_str",
"=",
"equation_str",
".",
"replace",
"(",
"'\\\\t'",
",",
"' '",
")",
"equation_str",
"=",
"re",
".",
"sub",
"(",
"r\"\\s+\"",
",",
"' '",
",",
"equation_str",
")",
"parser",
"=",
"parsimonious",
".",
"Grammar",
"(",
"component_structure_grammar",
")",
"tree",
"=",
"parser",
".",
"parse",
"(",
"equation_str",
")",
"class",
"ComponentParser",
"(",
"parsimonious",
".",
"NodeVisitor",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"ast",
")",
":",
"self",
".",
"subscripts",
"=",
"[",
"]",
"self",
".",
"real_name",
"=",
"None",
"self",
".",
"expression",
"=",
"None",
"self",
".",
"kind",
"=",
"None",
"self",
".",
"visit",
"(",
"ast",
")",
"def",
"visit_subscript_definition",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"self",
".",
"kind",
"=",
"'subdef'",
"def",
"visit_lookup_definition",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"self",
".",
"kind",
"=",
"'lookup'",
"def",
"visit_component",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"self",
".",
"kind",
"=",
"'component'",
"def",
"visit_name",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"(",
"name",
",",
")",
"=",
"vc",
"self",
".",
"real_name",
"=",
"name",
".",
"strip",
"(",
")",
"def",
"visit_subscript",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"(",
"subscript",
",",
")",
"=",
"vc",
"self",
".",
"subscripts",
".",
"append",
"(",
"subscript",
".",
"strip",
"(",
")",
")",
"def",
"visit_expression",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"self",
".",
"expression",
"=",
"n",
".",
"text",
".",
"strip",
"(",
")",
"def",
"generic_visit",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"return",
"''",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"vc",
")",
")",
"or",
"n",
".",
"text",
"def",
"visit__",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"return",
"' '",
"parse_object",
"=",
"ComponentParser",
"(",
"tree",
")",
"return",
"{",
"'real_name'",
":",
"parse_object",
".",
"real_name",
",",
"'subs'",
":",
"parse_object",
".",
"subscripts",
",",
"'expr'",
":",
"parse_object",
".",
"expression",
",",
"'kind'",
":",
"parse_object",
".",
"kind",
"}"
] | Breaks down a string representing only the equation part of a model element.
Recognizes the various types of model elements that may exist, and identifies them.
Parameters
----------
equation_str : basestring
the first section in each model element - the full equation.
Returns
-------
Returns a dictionary containing the following:
real_name: basestring
The name of the element as given in the original vensim file
subs: list of strings
list of subscripts or subscript elements
expr: basestring
kind: basestring
What type of equation have we found?
- *component* - normal model expression or constant
- *lookup* - a lookup table
- *subdef* - a subscript definition
Examples
--------
>>> get_equation_components(r'constant = 25')
{'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'}
Notes
-----
in this function we dont create python identifiers, we use real names.
This is so that when everything comes back together, we can manage
any potential namespace conflicts properly | [
"Breaks",
"down",
"a",
"string",
"representing",
"only",
"the",
"equation",
"part",
"of",
"a",
"model",
"element",
".",
"Recognizes",
"the",
"various",
"types",
"of",
"model",
"elements",
"that",
"may",
"exist",
"and",
"identifies",
"them",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/vensim2py.py#L206-L305 | train |
JamesPHoughton/pysd | pysd/py_backend/vensim/vensim2py.py | parse_units | def parse_units(units_str):
"""
Extract and parse the units
Extract the bounds over which the expression is assumed to apply.
Parameters
----------
units_str
Returns
-------
Examples
--------
>>> parse_units('Widgets/Month [-10,10,1]')
('Widgets/Month', (-10,10,1))
>>> parse_units('Month [0,?]')
('Month', [-10, None])
>>> parse_units('Widgets [0,100]')
('Widgets', (0, 100))
>>> parse_units('Widgets')
('Widgets', (None, None))
>>> parse_units('[0, 100]')
('', (0, 100))
"""
if not len(units_str):
return units_str, (None, None)
if units_str[-1] == ']':
units, lims = units_str.rsplit('[') # type: str, str
else:
units = units_str
lims = '?, ?]'
lims = tuple([float(x) if x.strip() != '?' else None for x in lims.strip(']').split(',')])
return units.strip(), lims | python | def parse_units(units_str):
"""
Extract and parse the units
Extract the bounds over which the expression is assumed to apply.
Parameters
----------
units_str
Returns
-------
Examples
--------
>>> parse_units('Widgets/Month [-10,10,1]')
('Widgets/Month', (-10,10,1))
>>> parse_units('Month [0,?]')
('Month', [-10, None])
>>> parse_units('Widgets [0,100]')
('Widgets', (0, 100))
>>> parse_units('Widgets')
('Widgets', (None, None))
>>> parse_units('[0, 100]')
('', (0, 100))
"""
if not len(units_str):
return units_str, (None, None)
if units_str[-1] == ']':
units, lims = units_str.rsplit('[') # type: str, str
else:
units = units_str
lims = '?, ?]'
lims = tuple([float(x) if x.strip() != '?' else None for x in lims.strip(']').split(',')])
return units.strip(), lims | [
"def",
"parse_units",
"(",
"units_str",
")",
":",
"if",
"not",
"len",
"(",
"units_str",
")",
":",
"return",
"units_str",
",",
"(",
"None",
",",
"None",
")",
"if",
"units_str",
"[",
"-",
"1",
"]",
"==",
"']'",
":",
"units",
",",
"lims",
"=",
"units_str",
".",
"rsplit",
"(",
"'['",
")",
"# type: str, str",
"else",
":",
"units",
"=",
"units_str",
"lims",
"=",
"'?, ?]'",
"lims",
"=",
"tuple",
"(",
"[",
"float",
"(",
"x",
")",
"if",
"x",
".",
"strip",
"(",
")",
"!=",
"'?'",
"else",
"None",
"for",
"x",
"in",
"lims",
".",
"strip",
"(",
"']'",
")",
".",
"split",
"(",
"','",
")",
"]",
")",
"return",
"units",
".",
"strip",
"(",
")",
",",
"lims"
] | Extract and parse the units
Extract the bounds over which the expression is assumed to apply.
Parameters
----------
units_str
Returns
-------
Examples
--------
>>> parse_units('Widgets/Month [-10,10,1]')
('Widgets/Month', (-10,10,1))
>>> parse_units('Month [0,?]')
('Month', [-10, None])
>>> parse_units('Widgets [0,100]')
('Widgets', (0, 100))
>>> parse_units('Widgets')
('Widgets', (None, None))
>>> parse_units('[0, 100]')
('', (0, 100)) | [
"Extract",
"and",
"parse",
"the",
"units",
"Extract",
"the",
"bounds",
"over",
"which",
"the",
"expression",
"is",
"assumed",
"to",
"apply",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/vensim2py.py#L308-L349 | train |
JamesPHoughton/pysd | pysd/py_backend/vensim/vensim2py.py | parse_lookup_expression | def parse_lookup_expression(element):
""" This syntax parses lookups that are defined with their own element """
lookup_grammar = r"""
lookup = _ "(" range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ ")"
number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?"
_ = ~r"[\s\\]*" # whitespace character
range = _ "[" ~r"[^\]]*" "]" _ ","
"""
parser = parsimonious.Grammar(lookup_grammar)
tree = parser.parse(element['expr'])
class LookupParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.translation = ""
self.new_structure = []
self.visit(ast)
def visit__(self, n, vc):
# remove whitespace
return ''
def visit_lookup(self, n, vc):
pairs = max(vc, key=len)
mixed_list = pairs.replace('(', '').replace(')', '').split(',')
xs = mixed_list[::2]
ys = mixed_list[1::2]
string = "functions.lookup(x, [%(xs)s], [%(ys)s])" % {
'xs': ','.join(xs),
'ys': ','.join(ys)
}
self.translation = string
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
parse_object = LookupParser(tree)
return {'py_expr': parse_object.translation,
'arguments': 'x'} | python | def parse_lookup_expression(element):
""" This syntax parses lookups that are defined with their own element """
lookup_grammar = r"""
lookup = _ "(" range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ ")"
number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?"
_ = ~r"[\s\\]*" # whitespace character
range = _ "[" ~r"[^\]]*" "]" _ ","
"""
parser = parsimonious.Grammar(lookup_grammar)
tree = parser.parse(element['expr'])
class LookupParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.translation = ""
self.new_structure = []
self.visit(ast)
def visit__(self, n, vc):
# remove whitespace
return ''
def visit_lookup(self, n, vc):
pairs = max(vc, key=len)
mixed_list = pairs.replace('(', '').replace(')', '').split(',')
xs = mixed_list[::2]
ys = mixed_list[1::2]
string = "functions.lookup(x, [%(xs)s], [%(ys)s])" % {
'xs': ','.join(xs),
'ys': ','.join(ys)
}
self.translation = string
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
parse_object = LookupParser(tree)
return {'py_expr': parse_object.translation,
'arguments': 'x'} | [
"def",
"parse_lookup_expression",
"(",
"element",
")",
":",
"lookup_grammar",
"=",
"r\"\"\"\n lookup = _ \"(\" range? _ ( \"(\" _ number _ \",\" _ number _ \")\" _ \",\"? _ )+ \")\"\n number = (\"+\"/\"-\")? ~r\"\\d+\\.?\\d*(e[+-]\\d+)?\"\n _ = ~r\"[\\s\\\\]*\" # whitespace character\n\trange = _ \"[\" ~r\"[^\\]]*\" \"]\" _ \",\"\n \"\"\"",
"parser",
"=",
"parsimonious",
".",
"Grammar",
"(",
"lookup_grammar",
")",
"tree",
"=",
"parser",
".",
"parse",
"(",
"element",
"[",
"'expr'",
"]",
")",
"class",
"LookupParser",
"(",
"parsimonious",
".",
"NodeVisitor",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"ast",
")",
":",
"self",
".",
"translation",
"=",
"\"\"",
"self",
".",
"new_structure",
"=",
"[",
"]",
"self",
".",
"visit",
"(",
"ast",
")",
"def",
"visit__",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"# remove whitespace",
"return",
"''",
"def",
"visit_lookup",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"pairs",
"=",
"max",
"(",
"vc",
",",
"key",
"=",
"len",
")",
"mixed_list",
"=",
"pairs",
".",
"replace",
"(",
"'('",
",",
"''",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
"xs",
"=",
"mixed_list",
"[",
":",
":",
"2",
"]",
"ys",
"=",
"mixed_list",
"[",
"1",
":",
":",
"2",
"]",
"string",
"=",
"\"functions.lookup(x, [%(xs)s], [%(ys)s])\"",
"%",
"{",
"'xs'",
":",
"','",
".",
"join",
"(",
"xs",
")",
",",
"'ys'",
":",
"','",
".",
"join",
"(",
"ys",
")",
"}",
"self",
".",
"translation",
"=",
"string",
"def",
"generic_visit",
"(",
"self",
",",
"n",
",",
"vc",
")",
":",
"return",
"''",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"vc",
")",
")",
"or",
"n",
".",
"text",
"parse_object",
"=",
"LookupParser",
"(",
"tree",
")",
"return",
"{",
"'py_expr'",
":",
"parse_object",
".",
"translation",
",",
"'arguments'",
":",
"'x'",
"}"
] | This syntax parses lookups that are defined with their own element | [
"This",
"syntax",
"parses",
"lookups",
"that",
"are",
"defined",
"with",
"their",
"own",
"element"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/vensim2py.py#L807-L845 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | dict_find | def dict_find(in_dict, value):
""" Helper function for looking up directory keys by their values.
This isn't robust to repeated values
Parameters
----------
in_dict : dictionary
A dictionary containing `value`
value : any type
What we wish to find in the dictionary
Returns
-------
key: basestring
The key at which the value can be found
Examples
--------
>>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B')
'Key2'
"""
# Todo: make this robust to repeated values
# Todo: make this robust to missing values
return list(in_dict.keys())[list(in_dict.values()).index(value)] | python | def dict_find(in_dict, value):
""" Helper function for looking up directory keys by their values.
This isn't robust to repeated values
Parameters
----------
in_dict : dictionary
A dictionary containing `value`
value : any type
What we wish to find in the dictionary
Returns
-------
key: basestring
The key at which the value can be found
Examples
--------
>>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B')
'Key2'
"""
# Todo: make this robust to repeated values
# Todo: make this robust to missing values
return list(in_dict.keys())[list(in_dict.values()).index(value)] | [
"def",
"dict_find",
"(",
"in_dict",
",",
"value",
")",
":",
"# Todo: make this robust to repeated values",
"# Todo: make this robust to missing values",
"return",
"list",
"(",
"in_dict",
".",
"keys",
"(",
")",
")",
"[",
"list",
"(",
"in_dict",
".",
"values",
"(",
")",
")",
".",
"index",
"(",
"value",
")",
"]"
] | Helper function for looking up directory keys by their values.
This isn't robust to repeated values
Parameters
----------
in_dict : dictionary
A dictionary containing `value`
value : any type
What we wish to find in the dictionary
Returns
-------
key: basestring
The key at which the value can be found
Examples
--------
>>> dict_find({'Key1': 'A', 'Key2': 'B'}, 'B')
'Key2' | [
"Helper",
"function",
"for",
"looking",
"up",
"directory",
"keys",
"by",
"their",
"values",
".",
"This",
"isn",
"t",
"robust",
"to",
"repeated",
"values"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L9-L34 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | find_subscript_name | def find_subscript_name(subscript_dict, element):
"""
Given a subscript dictionary, and a member of a subscript family,
return the first key of which the member is within the value list.
If element is already a subscript name, return that
Parameters
----------
subscript_dict: dictionary
Follows the {'subscript name':['list','of','subscript','elements']} format
element: string
Returns
-------
Examples:
>>> find_subscript_name({'Dim1': ['A', 'B'],
... 'Dim2': ['C', 'D', 'E'],
... 'Dim3': ['F', 'G', 'H', 'I']},
... 'D')
'Dim2'
"""
if element in subscript_dict.keys():
return element
for name, elements in subscript_dict.items():
if element in elements:
return name | python | def find_subscript_name(subscript_dict, element):
"""
Given a subscript dictionary, and a member of a subscript family,
return the first key of which the member is within the value list.
If element is already a subscript name, return that
Parameters
----------
subscript_dict: dictionary
Follows the {'subscript name':['list','of','subscript','elements']} format
element: string
Returns
-------
Examples:
>>> find_subscript_name({'Dim1': ['A', 'B'],
... 'Dim2': ['C', 'D', 'E'],
... 'Dim3': ['F', 'G', 'H', 'I']},
... 'D')
'Dim2'
"""
if element in subscript_dict.keys():
return element
for name, elements in subscript_dict.items():
if element in elements:
return name | [
"def",
"find_subscript_name",
"(",
"subscript_dict",
",",
"element",
")",
":",
"if",
"element",
"in",
"subscript_dict",
".",
"keys",
"(",
")",
":",
"return",
"element",
"for",
"name",
",",
"elements",
"in",
"subscript_dict",
".",
"items",
"(",
")",
":",
"if",
"element",
"in",
"elements",
":",
"return",
"name"
] | Given a subscript dictionary, and a member of a subscript family,
return the first key of which the member is within the value list.
If element is already a subscript name, return that
Parameters
----------
subscript_dict: dictionary
Follows the {'subscript name':['list','of','subscript','elements']} format
element: string
Returns
-------
Examples:
>>> find_subscript_name({'Dim1': ['A', 'B'],
... 'Dim2': ['C', 'D', 'E'],
... 'Dim3': ['F', 'G', 'H', 'I']},
... 'D')
'Dim2' | [
"Given",
"a",
"subscript",
"dictionary",
"and",
"a",
"member",
"of",
"a",
"subscript",
"family",
"return",
"the",
"first",
"key",
"of",
"which",
"the",
"member",
"is",
"within",
"the",
"value",
"list",
".",
"If",
"element",
"is",
"already",
"a",
"subscript",
"name",
"return",
"that"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L65-L93 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | make_coord_dict | def make_coord_dict(subs, subscript_dict, terse=True):
"""
This is for assisting with the lookup of a particular element, such that the output
of this function would take the place of %s in this expression
`variable.loc[%s]`
Parameters
----------
subs: list of strings
coordinates, either as names of dimensions, or positions within a dimension
subscript_dict: dict
the full dictionary of subscript names and values
terse: Binary Flag
- If true, includes only elements that do not cover the full range of values in their
respective dimension
- If false, returns all dimensions
Returns
-------
coordinates: dictionary
Coordinates needed to access the xarray quantities we're interested in.
Examples
--------
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D', 'E', 'F']})
{'Dim2': ['D']}
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2':['D', 'E', 'F']},
>>> terse=False)
{'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']}
"""
sub_elems_list = [y for x in subscript_dict.values() for y in x]
coordinates = {}
for sub in subs:
if sub in sub_elems_list:
name = find_subscript_name(subscript_dict, sub)
coordinates[name] = [sub]
elif not terse:
coordinates[sub] = subscript_dict[sub]
return coordinates | python | def make_coord_dict(subs, subscript_dict, terse=True):
"""
This is for assisting with the lookup of a particular element, such that the output
of this function would take the place of %s in this expression
`variable.loc[%s]`
Parameters
----------
subs: list of strings
coordinates, either as names of dimensions, or positions within a dimension
subscript_dict: dict
the full dictionary of subscript names and values
terse: Binary Flag
- If true, includes only elements that do not cover the full range of values in their
respective dimension
- If false, returns all dimensions
Returns
-------
coordinates: dictionary
Coordinates needed to access the xarray quantities we're interested in.
Examples
--------
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D', 'E', 'F']})
{'Dim2': ['D']}
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2':['D', 'E', 'F']},
>>> terse=False)
{'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']}
"""
sub_elems_list = [y for x in subscript_dict.values() for y in x]
coordinates = {}
for sub in subs:
if sub in sub_elems_list:
name = find_subscript_name(subscript_dict, sub)
coordinates[name] = [sub]
elif not terse:
coordinates[sub] = subscript_dict[sub]
return coordinates | [
"def",
"make_coord_dict",
"(",
"subs",
",",
"subscript_dict",
",",
"terse",
"=",
"True",
")",
":",
"sub_elems_list",
"=",
"[",
"y",
"for",
"x",
"in",
"subscript_dict",
".",
"values",
"(",
")",
"for",
"y",
"in",
"x",
"]",
"coordinates",
"=",
"{",
"}",
"for",
"sub",
"in",
"subs",
":",
"if",
"sub",
"in",
"sub_elems_list",
":",
"name",
"=",
"find_subscript_name",
"(",
"subscript_dict",
",",
"sub",
")",
"coordinates",
"[",
"name",
"]",
"=",
"[",
"sub",
"]",
"elif",
"not",
"terse",
":",
"coordinates",
"[",
"sub",
"]",
"=",
"subscript_dict",
"[",
"sub",
"]",
"return",
"coordinates"
] | This is for assisting with the lookup of a particular element, such that the output
of this function would take the place of %s in this expression
`variable.loc[%s]`
Parameters
----------
subs: list of strings
coordinates, either as names of dimensions, or positions within a dimension
subscript_dict: dict
the full dictionary of subscript names and values
terse: Binary Flag
- If true, includes only elements that do not cover the full range of values in their
respective dimension
- If false, returns all dimensions
Returns
-------
coordinates: dictionary
Coordinates needed to access the xarray quantities we're interested in.
Examples
--------
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D', 'E', 'F']})
{'Dim2': ['D']}
>>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2':['D', 'E', 'F']},
>>> terse=False)
{'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']} | [
"This",
"is",
"for",
"assisting",
"with",
"the",
"lookup",
"of",
"a",
"particular",
"element",
"such",
"that",
"the",
"output",
"of",
"this",
"function",
"would",
"take",
"the",
"place",
"of",
"%s",
"in",
"this",
"expression"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L96-L135 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | make_python_identifier | def make_python_identifier(string, namespace=None, reserved_words=None,
convert='drop', handle='force'):
"""
Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers
"""
if namespace is None:
namespace = dict()
if reserved_words is None:
reserved_words = list()
if string in namespace:
return namespace[string], namespace
# create a working copy (and make it lowercase, while we're at it)
s = string.lower()
# remove leading and trailing whitespace
s = s.strip()
# Make spaces into underscores
s = re.sub('[\\s\\t\\n]+', '_', s)
if convert == 'hex':
# Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language),
# \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)
# and \p{n} designates all numbers. We allow any of these to be present in the regex.
s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s])
elif convert == 'drop':
# Remove invalid characters
s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s)
# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.
s = re.sub('^[^\p{l}_]+', '', s)
# Check that the string is not a python identifier
while (s in keyword.kwlist or
s in namespace.values() or
s in reserved_words):
if handle == 'throw':
raise NameError(s + ' already exists in namespace or is a reserved word')
if handle == 'force':
if re.match(".*?_\d+$", s):
i = re.match(".*?_(\d+)$", s).groups()[0]
s = s.strip('_' + i) + '_' + str(int(i) + 1)
else:
s += '_1'
namespace[string] = s
return s, namespace | python | def make_python_identifier(string, namespace=None, reserved_words=None,
convert='drop', handle='force'):
"""
Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers
"""
if namespace is None:
namespace = dict()
if reserved_words is None:
reserved_words = list()
if string in namespace:
return namespace[string], namespace
# create a working copy (and make it lowercase, while we're at it)
s = string.lower()
# remove leading and trailing whitespace
s = s.strip()
# Make spaces into underscores
s = re.sub('[\\s\\t\\n]+', '_', s)
if convert == 'hex':
# Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language),
# \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)
# and \p{n} designates all numbers. We allow any of these to be present in the regex.
s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s])
elif convert == 'drop':
# Remove invalid characters
s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s)
# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.
s = re.sub('^[^\p{l}_]+', '', s)
# Check that the string is not a python identifier
while (s in keyword.kwlist or
s in namespace.values() or
s in reserved_words):
if handle == 'throw':
raise NameError(s + ' already exists in namespace or is a reserved word')
if handle == 'force':
if re.match(".*?_\d+$", s):
i = re.match(".*?_(\d+)$", s).groups()[0]
s = s.strip('_' + i) + '_' + str(int(i) + 1)
else:
s += '_1'
namespace[string] = s
return s, namespace | [
"def",
"make_python_identifier",
"(",
"string",
",",
"namespace",
"=",
"None",
",",
"reserved_words",
"=",
"None",
",",
"convert",
"=",
"'drop'",
",",
"handle",
"=",
"'force'",
")",
":",
"if",
"namespace",
"is",
"None",
":",
"namespace",
"=",
"dict",
"(",
")",
"if",
"reserved_words",
"is",
"None",
":",
"reserved_words",
"=",
"list",
"(",
")",
"if",
"string",
"in",
"namespace",
":",
"return",
"namespace",
"[",
"string",
"]",
",",
"namespace",
"# create a working copy (and make it lowercase, while we're at it)",
"s",
"=",
"string",
".",
"lower",
"(",
")",
"# remove leading and trailing whitespace",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"# Make spaces into underscores",
"s",
"=",
"re",
".",
"sub",
"(",
"'[\\\\s\\\\t\\\\n]+'",
",",
"'_'",
",",
"s",
")",
"if",
"convert",
"==",
"'hex'",
":",
"# Convert invalid characters to hex. Note: \\p{l} designates all Unicode letter characters (any language),",
"# \\p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)",
"# and \\p{n} designates all numbers. We allow any of these to be present in the regex.",
"s",
"=",
"''",
".",
"join",
"(",
"[",
"c",
".",
"encode",
"(",
"\"hex\"",
")",
"if",
"re",
".",
"findall",
"(",
"'[^\\p{l}\\p{m}\\p{n}_]'",
",",
"c",
")",
"else",
"c",
"for",
"c",
"in",
"s",
"]",
")",
"elif",
"convert",
"==",
"'drop'",
":",
"# Remove invalid characters",
"s",
"=",
"re",
".",
"sub",
"(",
"'[^\\p{l}\\p{m}\\p{n}_]'",
",",
"''",
",",
"s",
")",
"# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.",
"s",
"=",
"re",
".",
"sub",
"(",
"'^[^\\p{l}_]+'",
",",
"''",
",",
"s",
")",
"# Check that the string is not a python identifier",
"while",
"(",
"s",
"in",
"keyword",
".",
"kwlist",
"or",
"s",
"in",
"namespace",
".",
"values",
"(",
")",
"or",
"s",
"in",
"reserved_words",
")",
":",
"if",
"handle",
"==",
"'throw'",
":",
"raise",
"NameError",
"(",
"s",
"+",
"' already exists in namespace or is a reserved word'",
")",
"if",
"handle",
"==",
"'force'",
":",
"if",
"re",
".",
"match",
"(",
"\".*?_\\d+$\"",
",",
"s",
")",
":",
"i",
"=",
"re",
".",
"match",
"(",
"\".*?_(\\d+)$\"",
",",
"s",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"s",
"=",
"s",
".",
"strip",
"(",
"'_'",
"+",
"i",
")",
"+",
"'_'",
"+",
"str",
"(",
"int",
"(",
"i",
")",
"+",
"1",
")",
"else",
":",
"s",
"+=",
"'_1'",
"namespace",
"[",
"string",
"]",
"=",
"s",
"return",
"s",
",",
"namespace"
] | Takes an arbitrary string and creates a valid Python identifier.
If the input string is in the namespace, return its value.
If the python identifier created is already in the namespace,
but the input string is not (ie, two similar strings resolve to
the same python identifier)
or if the identifier is a reserved word in the reserved_words
list, or is a python default reserved word,
adds _1, or if _1 is in the namespace, _2, etc.
Parameters
----------
string : <basestring>
The text to be converted into a valid python identifier
namespace : <dictionary>
Map of existing translations into python safe identifiers.
This is to ensure that two strings are not translated into
the same python identifier
reserved_words : <list of strings>
List of words that are reserved (because they have other meanings
in this particular program, such as also being the names of
libraries, etc.
convert : <string>
Tells the function what to do with characters that are not
valid in python identifiers
- 'hex' implies that they will be converted to their hexidecimal
representation. This is handy if you have variables that
have a lot of reserved characters, or you don't want the
name to be dependent on when things were added to the
namespace
- 'drop' implies that they will just be dropped altogether
handle : <string>
Tells the function how to deal with namespace conflicts
- 'force' will create a representation which is not in conflict
by appending _n to the resulting variable where n is
the lowest number necessary to avoid a conflict
- 'throw' will raise an exception
Returns
-------
identifier : <string>
A vaild python identifier based on the input string
namespace : <dictionary>
An updated map of the translations of words to python identifiers,
including the passed in 'string'.
Examples
--------
>>> make_python_identifier('Capital')
('capital', {'Capital': 'capital'})
>>> make_python_identifier('multiple words')
('multiple_words', {'multiple words': 'multiple_words'})
>>> make_python_identifier('multiple spaces')
('multiple_spaces', {'multiple spaces': 'multiple_spaces'})
When the name is a python keyword, add '_1' to differentiate it
>>> make_python_identifier('for')
('for_1', {'for': 'for_1'})
Remove leading and trailing whitespace
>>> make_python_identifier(' whitespace ')
('whitespace', {' whitespace ': 'whitespace'})
Remove most special characters outright:
>>> make_python_identifier('H@t tr!ck')
('ht_trck', {'H@t tr!ck': 'ht_trck'})
Replace special characters with their hex representations
>>> make_python_identifier('H@t tr!ck', convert='hex')
('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'})
remove leading digits
>>> make_python_identifier('123abc')
('abc', {'123abc': 'abc'})
already in namespace
>>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'})
('variable', {'Variable$': 'variable'})
namespace conflicts
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'})
('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'})
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable',
>>> 'Variable%': 'variable_1'})
('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'})
throw exception instead
>>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw')
Traceback (most recent call last):
...
NameError: variable already exists in namespace or is a reserved word
References
----------
Identifiers must follow the convention outlined here:
https://docs.python.org/2/reference/lexical_analysis.html#identifiers | [
"Takes",
"an",
"arbitrary",
"string",
"and",
"creates",
"a",
"valid",
"Python",
"identifier",
"."
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L138-L291 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | make_flat_df | def make_flat_df(frames, return_addresses):
"""
Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
"""
# Todo: could also try a list comprehension here, or parallel apply
visited = list(map(lambda x: visit_addresses(x, return_addresses), frames))
return pd.DataFrame(visited) | python | def make_flat_df(frames, return_addresses):
"""
Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
"""
# Todo: could also try a list comprehension here, or parallel apply
visited = list(map(lambda x: visit_addresses(x, return_addresses), frames))
return pd.DataFrame(visited) | [
"def",
"make_flat_df",
"(",
"frames",
",",
"return_addresses",
")",
":",
"# Todo: could also try a list comprehension here, or parallel apply",
"visited",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"visit_addresses",
"(",
"x",
",",
"return_addresses",
")",
",",
"frames",
")",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"visited",
")"
] | Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
------- | [
"Takes",
"a",
"list",
"of",
"dictionaries",
"each",
"representing",
"what",
"is",
"returned",
"from",
"the",
"model",
"at",
"a",
"particular",
"time",
"and",
"creates",
"a",
"dataframe",
"whose",
"columns",
"correspond",
"to",
"the",
"keys",
"of",
"return",
"addresses"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L344-L367 | train |
JamesPHoughton/pysd | pysd/py_backend/utils.py | visit_addresses | def visit_addresses(frame, return_addresses):
"""
Visits all of the addresses, returns a new dict
which contains just the addressed elements
Parameters
----------
frame
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
outdict: dictionary
"""
outdict = dict()
for real_name, (pyname, address) in return_addresses.items():
if address:
xrval = frame[pyname].loc[address]
if xrval.size > 1:
outdict[real_name] = xrval
else:
outdict[real_name] = float(np.squeeze(xrval.values))
else:
outdict[real_name] = frame[pyname]
return outdict | python | def visit_addresses(frame, return_addresses):
"""
Visits all of the addresses, returns a new dict
which contains just the addressed elements
Parameters
----------
frame
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
outdict: dictionary
"""
outdict = dict()
for real_name, (pyname, address) in return_addresses.items():
if address:
xrval = frame[pyname].loc[address]
if xrval.size > 1:
outdict[real_name] = xrval
else:
outdict[real_name] = float(np.squeeze(xrval.values))
else:
outdict[real_name] = frame[pyname]
return outdict | [
"def",
"visit_addresses",
"(",
"frame",
",",
"return_addresses",
")",
":",
"outdict",
"=",
"dict",
"(",
")",
"for",
"real_name",
",",
"(",
"pyname",
",",
"address",
")",
"in",
"return_addresses",
".",
"items",
"(",
")",
":",
"if",
"address",
":",
"xrval",
"=",
"frame",
"[",
"pyname",
"]",
".",
"loc",
"[",
"address",
"]",
"if",
"xrval",
".",
"size",
">",
"1",
":",
"outdict",
"[",
"real_name",
"]",
"=",
"xrval",
"else",
":",
"outdict",
"[",
"real_name",
"]",
"=",
"float",
"(",
"np",
".",
"squeeze",
"(",
"xrval",
".",
"values",
")",
")",
"else",
":",
"outdict",
"[",
"real_name",
"]",
"=",
"frame",
"[",
"pyname",
"]",
"return",
"outdict"
] | Visits all of the addresses, returns a new dict
which contains just the addressed elements
Parameters
----------
frame
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
outdict: dictionary | [
"Visits",
"all",
"of",
"the",
"addresses",
"returns",
"a",
"new",
"dict",
"which",
"contains",
"just",
"the",
"addressed",
"elements"
] | bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda | https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L370-L401 | train |
hirokiky/django-basicauth | basicauth/basicauthutils.py | validate_request | def validate_request(request):
"""Check an incoming request.
Returns:
- True if authentication passed
- Adding request['REMOTE_USER'] as authenticated username.
"""
if getattr(settings, 'BASICAUTH_DISABLE', False):
# Not to use this env
return True
if 'HTTP_AUTHORIZATION' not in request.META:
return False
authorization_header = request.META['HTTP_AUTHORIZATION']
ret = extract_basicauth(authorization_header)
if not ret:
return False
username, password = ret
raw_pass = settings.BASICAUTH_USERS.get(username)
if raw_pass is None:
return False
# To avoid timing atacks
# https://security.stackexchange.com/questions/83660/simple-string-comparisons-not-secure-against-timing-attacks
if not constant_time_compare(raw_pass, password):
return False
request.META['REMOTE_USER'] = username
return True | python | def validate_request(request):
"""Check an incoming request.
Returns:
- True if authentication passed
- Adding request['REMOTE_USER'] as authenticated username.
"""
if getattr(settings, 'BASICAUTH_DISABLE', False):
# Not to use this env
return True
if 'HTTP_AUTHORIZATION' not in request.META:
return False
authorization_header = request.META['HTTP_AUTHORIZATION']
ret = extract_basicauth(authorization_header)
if not ret:
return False
username, password = ret
raw_pass = settings.BASICAUTH_USERS.get(username)
if raw_pass is None:
return False
# To avoid timing atacks
# https://security.stackexchange.com/questions/83660/simple-string-comparisons-not-secure-against-timing-attacks
if not constant_time_compare(raw_pass, password):
return False
request.META['REMOTE_USER'] = username
return True | [
"def",
"validate_request",
"(",
"request",
")",
":",
"if",
"getattr",
"(",
"settings",
",",
"'BASICAUTH_DISABLE'",
",",
"False",
")",
":",
"# Not to use this env",
"return",
"True",
"if",
"'HTTP_AUTHORIZATION'",
"not",
"in",
"request",
".",
"META",
":",
"return",
"False",
"authorization_header",
"=",
"request",
".",
"META",
"[",
"'HTTP_AUTHORIZATION'",
"]",
"ret",
"=",
"extract_basicauth",
"(",
"authorization_header",
")",
"if",
"not",
"ret",
":",
"return",
"False",
"username",
",",
"password",
"=",
"ret",
"raw_pass",
"=",
"settings",
".",
"BASICAUTH_USERS",
".",
"get",
"(",
"username",
")",
"if",
"raw_pass",
"is",
"None",
":",
"return",
"False",
"# To avoid timing atacks",
"# https://security.stackexchange.com/questions/83660/simple-string-comparisons-not-secure-against-timing-attacks",
"if",
"not",
"constant_time_compare",
"(",
"raw_pass",
",",
"password",
")",
":",
"return",
"False",
"request",
".",
"META",
"[",
"'REMOTE_USER'",
"]",
"=",
"username",
"return",
"True"
] | Check an incoming request.
Returns:
- True if authentication passed
- Adding request['REMOTE_USER'] as authenticated username. | [
"Check",
"an",
"incoming",
"request",
"."
] | dcc956ef1507f289bb50dce770e13c114ebd9a9b | https://github.com/hirokiky/django-basicauth/blob/dcc956ef1507f289bb50dce770e13c114ebd9a9b/basicauth/basicauthutils.py#L38-L69 | train |
google/ipaddr-py | ipaddr.py | _find_address_range | def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence,
and the index of the last IP address in the sequence.
"""
first = last = addresses[0]
last_index = 0
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
last_index += 1
else:
break
return (first, last, last_index) | python | def _find_address_range(addresses):
"""Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence,
and the index of the last IP address in the sequence.
"""
first = last = addresses[0]
last_index = 0
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
last_index += 1
else:
break
return (first, last, last_index) | [
"def",
"_find_address_range",
"(",
"addresses",
")",
":",
"first",
"=",
"last",
"=",
"addresses",
"[",
"0",
"]",
"last_index",
"=",
"0",
"for",
"ip",
"in",
"addresses",
"[",
"1",
":",
"]",
":",
"if",
"ip",
".",
"_ip",
"==",
"last",
".",
"_ip",
"+",
"1",
":",
"last",
"=",
"ip",
"last_index",
"+=",
"1",
"else",
":",
"break",
"return",
"(",
"first",
",",
"last",
",",
"last_index",
")"
] | Find a sequence of addresses.
Args:
addresses: a list of IPv4 or IPv6 addresses.
Returns:
A tuple containing the first and last IP addresses in the sequence,
and the index of the last IP address in the sequence. | [
"Find",
"a",
"sequence",
"of",
"addresses",
"."
] | 99e55513666db1276596d74f24863e056ca50851 | https://github.com/google/ipaddr-py/blob/99e55513666db1276596d74f24863e056ca50851/ipaddr.py#L157-L176 | train |
google/ipaddr-py | ipaddr.py | _BaseNet._prefix_from_prefix_int | def _prefix_from_prefix_int(self, prefixlen):
"""Validate and return a prefix length integer.
Args:
prefixlen: An integer containing the prefix length.
Returns:
The input, possibly converted from long to int.
Raises:
NetmaskValueError: If the input is not an integer, or out of range.
"""
if not isinstance(prefixlen, (int, long)):
raise NetmaskValueError('%r is not an integer' % prefixlen)
prefixlen = int(prefixlen)
if not (0 <= prefixlen <= self._max_prefixlen):
raise NetmaskValueError('%d is not a valid prefix length' %
prefixlen)
return prefixlen | python | def _prefix_from_prefix_int(self, prefixlen):
"""Validate and return a prefix length integer.
Args:
prefixlen: An integer containing the prefix length.
Returns:
The input, possibly converted from long to int.
Raises:
NetmaskValueError: If the input is not an integer, or out of range.
"""
if not isinstance(prefixlen, (int, long)):
raise NetmaskValueError('%r is not an integer' % prefixlen)
prefixlen = int(prefixlen)
if not (0 <= prefixlen <= self._max_prefixlen):
raise NetmaskValueError('%d is not a valid prefix length' %
prefixlen)
return prefixlen | [
"def",
"_prefix_from_prefix_int",
"(",
"self",
",",
"prefixlen",
")",
":",
"if",
"not",
"isinstance",
"(",
"prefixlen",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"raise",
"NetmaskValueError",
"(",
"'%r is not an integer'",
"%",
"prefixlen",
")",
"prefixlen",
"=",
"int",
"(",
"prefixlen",
")",
"if",
"not",
"(",
"0",
"<=",
"prefixlen",
"<=",
"self",
".",
"_max_prefixlen",
")",
":",
"raise",
"NetmaskValueError",
"(",
"'%d is not a valid prefix length'",
"%",
"prefixlen",
")",
"return",
"prefixlen"
] | Validate and return a prefix length integer.
Args:
prefixlen: An integer containing the prefix length.
Returns:
The input, possibly converted from long to int.
Raises:
NetmaskValueError: If the input is not an integer, or out of range. | [
"Validate",
"and",
"return",
"a",
"prefix",
"length",
"integer",
"."
] | 99e55513666db1276596d74f24863e056ca50851 | https://github.com/google/ipaddr-py/blob/99e55513666db1276596d74f24863e056ca50851/ipaddr.py#L887-L905 | train |
raimon49/pip-licenses | piplicenses.py | output_colored | def output_colored(code, text, is_bold=False):
"""
Create function to output with color sequence
"""
if is_bold:
code = '1;%s' % code
return '\033[%sm%s\033[0m' % (code, text) | python | def output_colored(code, text, is_bold=False):
"""
Create function to output with color sequence
"""
if is_bold:
code = '1;%s' % code
return '\033[%sm%s\033[0m' % (code, text) | [
"def",
"output_colored",
"(",
"code",
",",
"text",
",",
"is_bold",
"=",
"False",
")",
":",
"if",
"is_bold",
":",
"code",
"=",
"'1;%s'",
"%",
"code",
"return",
"'\\033[%sm%s\\033[0m'",
"%",
"(",
"code",
",",
"text",
")"
] | Create function to output with color sequence | [
"Create",
"function",
"to",
"output",
"with",
"color",
"sequence"
] | 879eddd9d75228ba7d6529bd3050d11ae6bf1712 | https://github.com/raimon49/pip-licenses/blob/879eddd9d75228ba7d6529bd3050d11ae6bf1712/piplicenses.py#L504-L511 | train |
nickjj/flask-webpack | flask_webpack/__init__.py | Webpack._set_asset_paths | def _set_asset_paths(self, app):
"""
Read in the manifest json file which acts as a manifest for assets.
This allows us to get the asset path as well as hashed names.
:param app: Flask application
:return: None
"""
webpack_stats = app.config['WEBPACK_MANIFEST_PATH']
try:
with app.open_resource(webpack_stats, 'r') as stats_json:
stats = json.load(stats_json)
if app.config['WEBPACK_ASSETS_URL']:
self.assets_url = app.config['WEBPACK_ASSETS_URL']
else:
self.assets_url = stats['publicPath']
self.assets = stats['assets']
except IOError:
raise RuntimeError(
"Flask-Webpack requires 'WEBPACK_MANIFEST_PATH' to be set and "
"it must point to a valid json file.") | python | def _set_asset_paths(self, app):
"""
Read in the manifest json file which acts as a manifest for assets.
This allows us to get the asset path as well as hashed names.
:param app: Flask application
:return: None
"""
webpack_stats = app.config['WEBPACK_MANIFEST_PATH']
try:
with app.open_resource(webpack_stats, 'r') as stats_json:
stats = json.load(stats_json)
if app.config['WEBPACK_ASSETS_URL']:
self.assets_url = app.config['WEBPACK_ASSETS_URL']
else:
self.assets_url = stats['publicPath']
self.assets = stats['assets']
except IOError:
raise RuntimeError(
"Flask-Webpack requires 'WEBPACK_MANIFEST_PATH' to be set and "
"it must point to a valid json file.") | [
"def",
"_set_asset_paths",
"(",
"self",
",",
"app",
")",
":",
"webpack_stats",
"=",
"app",
".",
"config",
"[",
"'WEBPACK_MANIFEST_PATH'",
"]",
"try",
":",
"with",
"app",
".",
"open_resource",
"(",
"webpack_stats",
",",
"'r'",
")",
"as",
"stats_json",
":",
"stats",
"=",
"json",
".",
"load",
"(",
"stats_json",
")",
"if",
"app",
".",
"config",
"[",
"'WEBPACK_ASSETS_URL'",
"]",
":",
"self",
".",
"assets_url",
"=",
"app",
".",
"config",
"[",
"'WEBPACK_ASSETS_URL'",
"]",
"else",
":",
"self",
".",
"assets_url",
"=",
"stats",
"[",
"'publicPath'",
"]",
"self",
".",
"assets",
"=",
"stats",
"[",
"'assets'",
"]",
"except",
"IOError",
":",
"raise",
"RuntimeError",
"(",
"\"Flask-Webpack requires 'WEBPACK_MANIFEST_PATH' to be set and \"",
"\"it must point to a valid json file.\"",
")"
] | Read in the manifest json file which acts as a manifest for assets.
This allows us to get the asset path as well as hashed names.
:param app: Flask application
:return: None | [
"Read",
"in",
"the",
"manifest",
"json",
"file",
"which",
"acts",
"as",
"a",
"manifest",
"for",
"assets",
".",
"This",
"allows",
"us",
"to",
"get",
"the",
"asset",
"path",
"as",
"well",
"as",
"hashed",
"names",
"."
] | 241617c6ce0fd9ec11f507204958ddd0ec467634 | https://github.com/nickjj/flask-webpack/blob/241617c6ce0fd9ec11f507204958ddd0ec467634/flask_webpack/__init__.py#L47-L70 | train |
nickjj/flask-webpack | flask_webpack/__init__.py | Webpack.javascript_tag | def javascript_tag(self, *args):
"""
Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset
"""
tags = []
for arg in args:
asset_path = self.asset_url_for('{0}.js'.format(arg))
if asset_path:
tags.append('<script src="{0}"></script>'.format(asset_path))
return '\n'.join(tags) | python | def javascript_tag(self, *args):
"""
Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset
"""
tags = []
for arg in args:
asset_path = self.asset_url_for('{0}.js'.format(arg))
if asset_path:
tags.append('<script src="{0}"></script>'.format(asset_path))
return '\n'.join(tags) | [
"def",
"javascript_tag",
"(",
"self",
",",
"*",
"args",
")",
":",
"tags",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"asset_path",
"=",
"self",
".",
"asset_url_for",
"(",
"'{0}.js'",
".",
"format",
"(",
"arg",
")",
")",
"if",
"asset_path",
":",
"tags",
".",
"append",
"(",
"'<script src=\"{0}\"></script>'",
".",
"format",
"(",
"asset_path",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"tags",
")"
] | Convenience tag to output 1 or more javascript tags.
:param args: 1 or more javascript file names
:return: Script tag(s) containing the asset | [
"Convenience",
"tag",
"to",
"output",
"1",
"or",
"more",
"javascript",
"tags",
"."
] | 241617c6ce0fd9ec11f507204958ddd0ec467634 | https://github.com/nickjj/flask-webpack/blob/241617c6ce0fd9ec11f507204958ddd0ec467634/flask_webpack/__init__.py#L81-L95 | train |
nickjj/flask-webpack | flask_webpack/__init__.py | Webpack.asset_url_for | def asset_url_for(self, asset):
"""
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
"""
if '//' in asset:
return asset
if asset not in self.assets:
return None
return '{0}{1}'.format(self.assets_url, self.assets[asset]) | python | def asset_url_for(self, asset):
"""
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
"""
if '//' in asset:
return asset
if asset not in self.assets:
return None
return '{0}{1}'.format(self.assets_url, self.assets[asset]) | [
"def",
"asset_url_for",
"(",
"self",
",",
"asset",
")",
":",
"if",
"'//'",
"in",
"asset",
":",
"return",
"asset",
"if",
"asset",
"not",
"in",
"self",
".",
"assets",
":",
"return",
"None",
"return",
"'{0}{1}'",
".",
"format",
"(",
"self",
".",
"assets_url",
",",
"self",
".",
"assets",
"[",
"asset",
"]",
")"
] | Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found | [
"Lookup",
"the",
"hashed",
"asset",
"path",
"of",
"a",
"file",
"name",
"unless",
"it",
"starts",
"with",
"something",
"that",
"resembles",
"a",
"web",
"address",
"then",
"take",
"it",
"as",
"is",
"."
] | 241617c6ce0fd9ec11f507204958ddd0ec467634 | https://github.com/nickjj/flask-webpack/blob/241617c6ce0fd9ec11f507204958ddd0ec467634/flask_webpack/__init__.py#L114-L129 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/observer/observer.py | ModelObserver.pre_change_receiver | def pre_change_receiver(self, instance: Model, action: Action):
"""
Entry point for triggering the old_binding from save signals.
"""
if action == Action.CREATE:
group_names = set()
else:
group_names = set(self.group_names(instance))
# use a thread local dict to be safe...
if not hasattr(instance, '__instance_groups'):
instance.__instance_groups = threading.local()
instance.__instance_groups.observers = {}
if not hasattr(instance.__instance_groups, 'observers'):
instance.__instance_groups.observers = {}
instance.__instance_groups.observers[self] = group_names | python | def pre_change_receiver(self, instance: Model, action: Action):
"""
Entry point for triggering the old_binding from save signals.
"""
if action == Action.CREATE:
group_names = set()
else:
group_names = set(self.group_names(instance))
# use a thread local dict to be safe...
if not hasattr(instance, '__instance_groups'):
instance.__instance_groups = threading.local()
instance.__instance_groups.observers = {}
if not hasattr(instance.__instance_groups, 'observers'):
instance.__instance_groups.observers = {}
instance.__instance_groups.observers[self] = group_names | [
"def",
"pre_change_receiver",
"(",
"self",
",",
"instance",
":",
"Model",
",",
"action",
":",
"Action",
")",
":",
"if",
"action",
"==",
"Action",
".",
"CREATE",
":",
"group_names",
"=",
"set",
"(",
")",
"else",
":",
"group_names",
"=",
"set",
"(",
"self",
".",
"group_names",
"(",
"instance",
")",
")",
"# use a thread local dict to be safe...",
"if",
"not",
"hasattr",
"(",
"instance",
",",
"'__instance_groups'",
")",
":",
"instance",
".",
"__instance_groups",
"=",
"threading",
".",
"local",
"(",
")",
"instance",
".",
"__instance_groups",
".",
"observers",
"=",
"{",
"}",
"if",
"not",
"hasattr",
"(",
"instance",
".",
"__instance_groups",
",",
"'observers'",
")",
":",
"instance",
".",
"__instance_groups",
".",
"observers",
"=",
"{",
"}",
"instance",
".",
"__instance_groups",
".",
"observers",
"[",
"self",
"]",
"=",
"group_names"
] | Entry point for triggering the old_binding from save signals. | [
"Entry",
"point",
"for",
"triggering",
"the",
"old_binding",
"from",
"save",
"signals",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/observer/observer.py#L171-L187 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/observer/observer.py | ModelObserver.post_change_receiver | def post_change_receiver(self, instance: Model, action: Action, **kwargs):
"""
Triggers the old_binding to possibly send to its group.
"""
try:
old_group_names = instance.__instance_groups.observers[self]
except (ValueError, KeyError):
old_group_names = set()
if action == Action.DELETE:
new_group_names = set()
else:
new_group_names = set(self.group_names(instance))
# if post delete, new_group_names should be []
# Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons.
self.send_messages(
instance,
old_group_names - new_group_names,
Action.DELETE,
**kwargs
)
# the object has been updated so that its groups are not the same.
self.send_messages(
instance,
old_group_names & new_group_names,
Action.UPDATE,
**kwargs
)
#
self.send_messages(
instance,
new_group_names - old_group_names,
Action.CREATE,
**kwargs
) | python | def post_change_receiver(self, instance: Model, action: Action, **kwargs):
"""
Triggers the old_binding to possibly send to its group.
"""
try:
old_group_names = instance.__instance_groups.observers[self]
except (ValueError, KeyError):
old_group_names = set()
if action == Action.DELETE:
new_group_names = set()
else:
new_group_names = set(self.group_names(instance))
# if post delete, new_group_names should be []
# Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons.
self.send_messages(
instance,
old_group_names - new_group_names,
Action.DELETE,
**kwargs
)
# the object has been updated so that its groups are not the same.
self.send_messages(
instance,
old_group_names & new_group_names,
Action.UPDATE,
**kwargs
)
#
self.send_messages(
instance,
new_group_names - old_group_names,
Action.CREATE,
**kwargs
) | [
"def",
"post_change_receiver",
"(",
"self",
",",
"instance",
":",
"Model",
",",
"action",
":",
"Action",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"old_group_names",
"=",
"instance",
".",
"__instance_groups",
".",
"observers",
"[",
"self",
"]",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"old_group_names",
"=",
"set",
"(",
")",
"if",
"action",
"==",
"Action",
".",
"DELETE",
":",
"new_group_names",
"=",
"set",
"(",
")",
"else",
":",
"new_group_names",
"=",
"set",
"(",
"self",
".",
"group_names",
"(",
"instance",
")",
")",
"# if post delete, new_group_names should be []",
"# Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons.",
"self",
".",
"send_messages",
"(",
"instance",
",",
"old_group_names",
"-",
"new_group_names",
",",
"Action",
".",
"DELETE",
",",
"*",
"*",
"kwargs",
")",
"# the object has been updated so that its groups are not the same.",
"self",
".",
"send_messages",
"(",
"instance",
",",
"old_group_names",
"&",
"new_group_names",
",",
"Action",
".",
"UPDATE",
",",
"*",
"*",
"kwargs",
")",
"#",
"self",
".",
"send_messages",
"(",
"instance",
",",
"new_group_names",
"-",
"old_group_names",
",",
"Action",
".",
"CREATE",
",",
"*",
"*",
"kwargs",
")"
] | Triggers the old_binding to possibly send to its group. | [
"Triggers",
"the",
"old_binding",
"to",
"possibly",
"send",
"to",
"its",
"group",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/observer/observer.py#L189-L226 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/generics.py | GenericAsyncAPIConsumer.get_queryset | def get_queryset(self, **kwargs) -> QuerySet:
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset | python | def get_queryset(self, **kwargs) -> QuerySet:
"""
Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user)
"""
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, "
"or override the `get_queryset()` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset | [
"def",
"get_queryset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"QuerySet",
":",
"assert",
"self",
".",
"queryset",
"is",
"not",
"None",
",",
"(",
"\"'%s' should either include a `queryset` attribute, \"",
"\"or override the `get_queryset()` method.\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"queryset",
"=",
"self",
".",
"queryset",
"if",
"isinstance",
"(",
"queryset",
",",
"QuerySet",
")",
":",
"# Ensure queryset is re-evaluated on each request.",
"queryset",
"=",
"queryset",
".",
"all",
"(",
")",
"return",
"queryset"
] | Get the list of items for this view.
This must be an iterable, and may be a queryset.
Defaults to using `self.queryset`.
This method should always be used rather than accessing `self.queryset`
directly, as `self.queryset` gets evaluated only once, and those results
are cached for all subsequent requests.
You may want to override this if you need to provide different
querysets depending on the incoming request.
(Eg. return a list of items that is specific to the user) | [
"Get",
"the",
"list",
"of",
"items",
"for",
"this",
"view",
".",
"This",
"must",
"be",
"an",
"iterable",
"and",
"may",
"be",
"a",
"queryset",
".",
"Defaults",
"to",
"using",
"self",
".",
"queryset",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/generics.py#L34-L59 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/generics.py | GenericAsyncAPIConsumer.get_serializer_class | def get_serializer_class(self, **kwargs) -> Type[Serializer]:
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class | python | def get_serializer_class(self, **kwargs) -> Type[Serializer]:
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class | [
"def",
"get_serializer_class",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"Type",
"[",
"Serializer",
"]",
":",
"assert",
"self",
".",
"serializer_class",
"is",
"not",
"None",
",",
"(",
"\"'%s' should either include a `serializer_class` attribute, \"",
"\"or override the `get_serializer_class()` method.\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"return",
"self",
".",
"serializer_class"
] | Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization) | [
"Return",
"the",
"class",
"to",
"use",
"for",
"the",
"serializer",
".",
"Defaults",
"to",
"using",
"self",
".",
"serializer_class",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/generics.py#L109-L125 | train |
hishnash/djangochannelsrestframework | djangochannelsrestframework/consumers.py | view_as_consumer | def view_as_consumer(
wrapped_view: typing.Callable[[HttpRequest], HttpResponse],
mapped_actions: typing.Optional[
typing.Dict[str, str]
]=None) -> Type[AsyncConsumer]:
"""
Wrap a django View so that it will be triggered by actions over this json
websocket consumer.
"""
if mapped_actions is None:
mapped_actions = {
'create': 'PUT',
'update': 'PATCH',
'list': 'GET',
'retrieve': 'GET'
}
class DjangoViewWrapper(DjangoViewAsConsumer):
view = wrapped_view
actions = mapped_actions
return DjangoViewWrapper | python | def view_as_consumer(
wrapped_view: typing.Callable[[HttpRequest], HttpResponse],
mapped_actions: typing.Optional[
typing.Dict[str, str]
]=None) -> Type[AsyncConsumer]:
"""
Wrap a django View so that it will be triggered by actions over this json
websocket consumer.
"""
if mapped_actions is None:
mapped_actions = {
'create': 'PUT',
'update': 'PATCH',
'list': 'GET',
'retrieve': 'GET'
}
class DjangoViewWrapper(DjangoViewAsConsumer):
view = wrapped_view
actions = mapped_actions
return DjangoViewWrapper | [
"def",
"view_as_consumer",
"(",
"wrapped_view",
":",
"typing",
".",
"Callable",
"[",
"[",
"HttpRequest",
"]",
",",
"HttpResponse",
"]",
",",
"mapped_actions",
":",
"typing",
".",
"Optional",
"[",
"typing",
".",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"Type",
"[",
"AsyncConsumer",
"]",
":",
"if",
"mapped_actions",
"is",
"None",
":",
"mapped_actions",
"=",
"{",
"'create'",
":",
"'PUT'",
",",
"'update'",
":",
"'PATCH'",
",",
"'list'",
":",
"'GET'",
",",
"'retrieve'",
":",
"'GET'",
"}",
"class",
"DjangoViewWrapper",
"(",
"DjangoViewAsConsumer",
")",
":",
"view",
"=",
"wrapped_view",
"actions",
"=",
"mapped_actions",
"return",
"DjangoViewWrapper"
] | Wrap a django View so that it will be triggered by actions over this json
websocket consumer. | [
"Wrap",
"a",
"django",
"View",
"so",
"that",
"it",
"will",
"be",
"triggered",
"by",
"actions",
"over",
"this",
"json",
"websocket",
"consumer",
"."
] | 19fdec7efd785b1a94d19612a8de934e1948e344 | https://github.com/hishnash/djangochannelsrestframework/blob/19fdec7efd785b1a94d19612a8de934e1948e344/djangochannelsrestframework/consumers.py#L303-L324 | train |
Subsets and Splits