repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
opendatateam/udata | udata/commands/__init__.py | error | def error(msg, details=None):
'''Display an error message with optional details'''
msg = '{0} {1}'.format(red(KO), white(safe_unicode(msg)))
msg = safe_unicode(msg)
if details:
msg = b'\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg)) | python | def error(msg, details=None):
'''Display an error message with optional details'''
msg = '{0} {1}'.format(red(KO), white(safe_unicode(msg)))
msg = safe_unicode(msg)
if details:
msg = b'\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg)) | [
"def",
"error",
"(",
"msg",
",",
"details",
"=",
"None",
")",
":",
"msg",
"=",
"'{0} {1}'",
".",
"format",
"(",
"red",
"(",
"KO",
")",
",",
"white",
"(",
"safe_unicode",
"(",
"msg",
")",
")",
")",
"msg",
"=",
"safe_unicode",
"(",
"msg",
")",
"if",
"details",
":",
"msg",
"=",
"b'\\n'",
".",
"join",
"(",
"(",
"msg",
",",
"safe_unicode",
"(",
"details",
")",
")",
")",
"echo",
"(",
"format_multiline",
"(",
"msg",
")",
")"
] | Display an error message with optional details | [
"Display",
"an",
"error",
"message",
"with",
"optional",
"details"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/__init__.py#L62-L68 | train |
opendatateam/udata | udata/commands/__init__.py | UdataGroup.main | def main(self, *args, **kwargs):
'''
Instanciate ScriptInfo before parent does
to ensure the `settings` parameters is available to `create_app
'''
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
# This is the import line: allows create_app to access the settings
obj.settings = kwargs.pop('settings', 'udata.settings.Defaults')
kwargs['obj'] = obj
return super(UdataGroup, self).main(*args, **kwargs) | python | def main(self, *args, **kwargs):
'''
Instanciate ScriptInfo before parent does
to ensure the `settings` parameters is available to `create_app
'''
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
# This is the import line: allows create_app to access the settings
obj.settings = kwargs.pop('settings', 'udata.settings.Defaults')
kwargs['obj'] = obj
return super(UdataGroup, self).main(*args, **kwargs) | [
"def",
"main",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"kwargs",
".",
"get",
"(",
"'obj'",
")",
"if",
"obj",
"is",
"None",
":",
"obj",
"=",
"ScriptInfo",
"(",
"create_app",
"=",
"self",
".",
"create_app",
")",
"# This is the import line: allows create_app to access the settings",
"obj",
".",
"settings",
"=",
"kwargs",
".",
"pop",
"(",
"'settings'",
",",
"'udata.settings.Defaults'",
")",
"kwargs",
"[",
"'obj'",
"]",
"=",
"obj",
"return",
"super",
"(",
"UdataGroup",
",",
"self",
")",
".",
"main",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Instanciate ScriptInfo before parent does
to ensure the `settings` parameters is available to `create_app | [
"Instanciate",
"ScriptInfo",
"before",
"parent",
"does",
"to",
"ensure",
"the",
"settings",
"parameters",
"is",
"available",
"to",
"create_app"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/__init__.py#L240-L251 | train |
opendatateam/udata | udata/entrypoints.py | get_plugins_dists | def get_plugins_dists(app, name=None):
'''Return a list of Distributions with enabled udata plugins'''
if name:
plugins = set(e.name for e in iter_all(name) if e.name in app.config['PLUGINS'])
else:
plugins = set(app.config['PLUGINS'])
return [
d for d in known_dists()
if any(set(v.keys()) & plugins for v in d.get_entry_map().values())
] | python | def get_plugins_dists(app, name=None):
'''Return a list of Distributions with enabled udata plugins'''
if name:
plugins = set(e.name for e in iter_all(name) if e.name in app.config['PLUGINS'])
else:
plugins = set(app.config['PLUGINS'])
return [
d for d in known_dists()
if any(set(v.keys()) & plugins for v in d.get_entry_map().values())
] | [
"def",
"get_plugins_dists",
"(",
"app",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"plugins",
"=",
"set",
"(",
"e",
".",
"name",
"for",
"e",
"in",
"iter_all",
"(",
"name",
")",
"if",
"e",
".",
"name",
"in",
"app",
".",
"config",
"[",
"'PLUGINS'",
"]",
")",
"else",
":",
"plugins",
"=",
"set",
"(",
"app",
".",
"config",
"[",
"'PLUGINS'",
"]",
")",
"return",
"[",
"d",
"for",
"d",
"in",
"known_dists",
"(",
")",
"if",
"any",
"(",
"set",
"(",
"v",
".",
"keys",
"(",
")",
")",
"&",
"plugins",
"for",
"v",
"in",
"d",
".",
"get_entry_map",
"(",
")",
".",
"values",
"(",
")",
")",
"]"
] | Return a list of Distributions with enabled udata plugins | [
"Return",
"a",
"list",
"of",
"Distributions",
"with",
"enabled",
"udata",
"plugins"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/entrypoints.py#L64-L73 | train |
opendatateam/udata | udata/routing.py | lazy_raise_or_redirect | def lazy_raise_or_redirect():
'''
Raise exception lazily to ensure request.endpoint is set
Also perform redirect if needed
'''
if not request.view_args:
return
for name, value in request.view_args.items():
if isinstance(value, NotFound):
request.routing_exception = value
break
elif isinstance(value, LazyRedirect):
new_args = request.view_args
new_args[name] = value.arg
new_url = url_for(request.endpoint, **new_args)
return redirect(new_url) | python | def lazy_raise_or_redirect():
'''
Raise exception lazily to ensure request.endpoint is set
Also perform redirect if needed
'''
if not request.view_args:
return
for name, value in request.view_args.items():
if isinstance(value, NotFound):
request.routing_exception = value
break
elif isinstance(value, LazyRedirect):
new_args = request.view_args
new_args[name] = value.arg
new_url = url_for(request.endpoint, **new_args)
return redirect(new_url) | [
"def",
"lazy_raise_or_redirect",
"(",
")",
":",
"if",
"not",
"request",
".",
"view_args",
":",
"return",
"for",
"name",
",",
"value",
"in",
"request",
".",
"view_args",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"NotFound",
")",
":",
"request",
".",
"routing_exception",
"=",
"value",
"break",
"elif",
"isinstance",
"(",
"value",
",",
"LazyRedirect",
")",
":",
"new_args",
"=",
"request",
".",
"view_args",
"new_args",
"[",
"name",
"]",
"=",
"value",
".",
"arg",
"new_url",
"=",
"url_for",
"(",
"request",
".",
"endpoint",
",",
"*",
"*",
"new_args",
")",
"return",
"redirect",
"(",
"new_url",
")"
] | Raise exception lazily to ensure request.endpoint is set
Also perform redirect if needed | [
"Raise",
"exception",
"lazily",
"to",
"ensure",
"request",
".",
"endpoint",
"is",
"set",
"Also",
"perform",
"redirect",
"if",
"needed"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/routing.py#L199-L214 | train |
opendatateam/udata | udata/routing.py | TerritoryConverter.to_python | def to_python(self, value):
"""
`value` has slashs in it, that's why we inherit from `PathConverter`.
E.g.: `commune/13200@latest/`, `departement/13@1860-07-01/` or
`region/76@2016-01-01/Auvergne-Rhone-Alpes/`.
Note that the slug is not significative but cannot be omitted.
"""
if '/' not in value:
return
level, code = value.split('/')[:2] # Ignore optional slug
geoid = GeoZone.SEPARATOR.join([level, code])
zone = GeoZone.objects.resolve(geoid)
if not zone and GeoZone.SEPARATOR not in level:
# Try implicit default prefix
level = GeoZone.SEPARATOR.join([self.DEFAULT_PREFIX, level])
geoid = GeoZone.SEPARATOR.join([level, code])
zone = GeoZone.objects.resolve(geoid)
return zone or NotFound() | python | def to_python(self, value):
"""
`value` has slashs in it, that's why we inherit from `PathConverter`.
E.g.: `commune/13200@latest/`, `departement/13@1860-07-01/` or
`region/76@2016-01-01/Auvergne-Rhone-Alpes/`.
Note that the slug is not significative but cannot be omitted.
"""
if '/' not in value:
return
level, code = value.split('/')[:2] # Ignore optional slug
geoid = GeoZone.SEPARATOR.join([level, code])
zone = GeoZone.objects.resolve(geoid)
if not zone and GeoZone.SEPARATOR not in level:
# Try implicit default prefix
level = GeoZone.SEPARATOR.join([self.DEFAULT_PREFIX, level])
geoid = GeoZone.SEPARATOR.join([level, code])
zone = GeoZone.objects.resolve(geoid)
return zone or NotFound() | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"'/'",
"not",
"in",
"value",
":",
"return",
"level",
",",
"code",
"=",
"value",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"2",
"]",
"# Ignore optional slug",
"geoid",
"=",
"GeoZone",
".",
"SEPARATOR",
".",
"join",
"(",
"[",
"level",
",",
"code",
"]",
")",
"zone",
"=",
"GeoZone",
".",
"objects",
".",
"resolve",
"(",
"geoid",
")",
"if",
"not",
"zone",
"and",
"GeoZone",
".",
"SEPARATOR",
"not",
"in",
"level",
":",
"# Try implicit default prefix",
"level",
"=",
"GeoZone",
".",
"SEPARATOR",
".",
"join",
"(",
"[",
"self",
".",
"DEFAULT_PREFIX",
",",
"level",
"]",
")",
"geoid",
"=",
"GeoZone",
".",
"SEPARATOR",
".",
"join",
"(",
"[",
"level",
",",
"code",
"]",
")",
"zone",
"=",
"GeoZone",
".",
"objects",
".",
"resolve",
"(",
"geoid",
")",
"return",
"zone",
"or",
"NotFound",
"(",
")"
] | `value` has slashs in it, that's why we inherit from `PathConverter`.
E.g.: `commune/13200@latest/`, `departement/13@1860-07-01/` or
`region/76@2016-01-01/Auvergne-Rhone-Alpes/`.
Note that the slug is not significative but cannot be omitted. | [
"value",
"has",
"slashs",
"in",
"it",
"that",
"s",
"why",
"we",
"inherit",
"from",
"PathConverter",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/routing.py#L152-L175 | train |
opendatateam/udata | udata/routing.py | TerritoryConverter.to_url | def to_url(self, obj):
"""
Reconstruct the URL from level name, code or datagouv id and slug.
"""
level_name = getattr(obj, 'level_name', None)
if not level_name:
raise ValueError('Unable to serialize "%s" to url' % obj)
code = getattr(obj, 'code', None)
slug = getattr(obj, 'slug', None)
validity = getattr(obj, 'validity', None)
if code and slug:
return '{level_name}/{code}@{start_date}/{slug}'.format(
level_name=level_name,
code=code,
start_date=getattr(validity, 'start', None) or 'latest',
slug=slug
)
else:
raise ValueError('Unable to serialize "%s" to url' % obj) | python | def to_url(self, obj):
"""
Reconstruct the URL from level name, code or datagouv id and slug.
"""
level_name = getattr(obj, 'level_name', None)
if not level_name:
raise ValueError('Unable to serialize "%s" to url' % obj)
code = getattr(obj, 'code', None)
slug = getattr(obj, 'slug', None)
validity = getattr(obj, 'validity', None)
if code and slug:
return '{level_name}/{code}@{start_date}/{slug}'.format(
level_name=level_name,
code=code,
start_date=getattr(validity, 'start', None) or 'latest',
slug=slug
)
else:
raise ValueError('Unable to serialize "%s" to url' % obj) | [
"def",
"to_url",
"(",
"self",
",",
"obj",
")",
":",
"level_name",
"=",
"getattr",
"(",
"obj",
",",
"'level_name'",
",",
"None",
")",
"if",
"not",
"level_name",
":",
"raise",
"ValueError",
"(",
"'Unable to serialize \"%s\" to url'",
"%",
"obj",
")",
"code",
"=",
"getattr",
"(",
"obj",
",",
"'code'",
",",
"None",
")",
"slug",
"=",
"getattr",
"(",
"obj",
",",
"'slug'",
",",
"None",
")",
"validity",
"=",
"getattr",
"(",
"obj",
",",
"'validity'",
",",
"None",
")",
"if",
"code",
"and",
"slug",
":",
"return",
"'{level_name}/{code}@{start_date}/{slug}'",
".",
"format",
"(",
"level_name",
"=",
"level_name",
",",
"code",
"=",
"code",
",",
"start_date",
"=",
"getattr",
"(",
"validity",
",",
"'start'",
",",
"None",
")",
"or",
"'latest'",
",",
"slug",
"=",
"slug",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unable to serialize \"%s\" to url'",
"%",
"obj",
")"
] | Reconstruct the URL from level name, code or datagouv id and slug. | [
"Reconstruct",
"the",
"URL",
"from",
"level",
"name",
"code",
"or",
"datagouv",
"id",
"and",
"slug",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/routing.py#L177-L196 | train |
opendatateam/udata | udata/tasks.py | job | def job(name, **kwargs):
'''A shortcut decorator for declaring jobs'''
return task(name=name, schedulable=True, base=JobTask,
bind=True, **kwargs) | python | def job(name, **kwargs):
'''A shortcut decorator for declaring jobs'''
return task(name=name, schedulable=True, base=JobTask,
bind=True, **kwargs) | [
"def",
"job",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"task",
"(",
"name",
"=",
"name",
",",
"schedulable",
"=",
"True",
",",
"base",
"=",
"JobTask",
",",
"bind",
"=",
"True",
",",
"*",
"*",
"kwargs",
")"
] | A shortcut decorator for declaring jobs | [
"A",
"shortcut",
"decorator",
"for",
"declaring",
"jobs"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tasks.py#L80-L83 | train |
opendatateam/udata | udata/tasks.py | Scheduler.apply_async | def apply_async(self, entry, **kwargs):
'''A MongoScheduler storing the last task_id'''
result = super(Scheduler, self).apply_async(entry, **kwargs)
entry._task.last_run_id = result.id
return result | python | def apply_async(self, entry, **kwargs):
'''A MongoScheduler storing the last task_id'''
result = super(Scheduler, self).apply_async(entry, **kwargs)
entry._task.last_run_id = result.id
return result | [
"def",
"apply_async",
"(",
"self",
",",
"entry",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"super",
"(",
"Scheduler",
",",
"self",
")",
".",
"apply_async",
"(",
"entry",
",",
"*",
"*",
"kwargs",
")",
"entry",
".",
"_task",
".",
"last_run_id",
"=",
"result",
".",
"id",
"return",
"result"
] | A MongoScheduler storing the last task_id | [
"A",
"MongoScheduler",
"storing",
"the",
"last",
"task_id"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/tasks.py#L40-L44 | train |
opendatateam/udata | udata/commands/info.py | config | def config():
'''Display some details about the local configuration'''
if hasattr(current_app, 'settings_file'):
log.info('Loaded configuration from %s', current_app.settings_file)
log.info(white('Current configuration'))
for key in sorted(current_app.config):
if key.startswith('__') or not key.isupper():
continue
echo('{0}: {1}'.format(white(key), current_app.config[key])) | python | def config():
'''Display some details about the local configuration'''
if hasattr(current_app, 'settings_file'):
log.info('Loaded configuration from %s', current_app.settings_file)
log.info(white('Current configuration'))
for key in sorted(current_app.config):
if key.startswith('__') or not key.isupper():
continue
echo('{0}: {1}'.format(white(key), current_app.config[key])) | [
"def",
"config",
"(",
")",
":",
"if",
"hasattr",
"(",
"current_app",
",",
"'settings_file'",
")",
":",
"log",
".",
"info",
"(",
"'Loaded configuration from %s'",
",",
"current_app",
".",
"settings_file",
")",
"log",
".",
"info",
"(",
"white",
"(",
"'Current configuration'",
")",
")",
"for",
"key",
"in",
"sorted",
"(",
"current_app",
".",
"config",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'__'",
")",
"or",
"not",
"key",
".",
"isupper",
"(",
")",
":",
"continue",
"echo",
"(",
"'{0}: {1}'",
".",
"format",
"(",
"white",
"(",
"key",
")",
",",
"current_app",
".",
"config",
"[",
"key",
"]",
")",
")"
] | Display some details about the local configuration | [
"Display",
"some",
"details",
"about",
"the",
"local",
"configuration"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/info.py#L34-L43 | train |
opendatateam/udata | udata/commands/info.py | plugins | def plugins():
'''Display some details about the local plugins'''
plugins = current_app.config['PLUGINS']
for name, description in entrypoints.ENTRYPOINTS.items():
echo('{0} ({1})'.format(white(description), name))
if name == 'udata.themes':
actives = [current_app.config['THEME']]
elif name == 'udata.avatars':
actives = [avatar_config('provider')]
else:
actives = plugins
for ep in sorted(entrypoints.iter_all(name), key=by_name):
echo('> {0}: {1}'.format(ep.name, is_active(ep, actives))) | python | def plugins():
'''Display some details about the local plugins'''
plugins = current_app.config['PLUGINS']
for name, description in entrypoints.ENTRYPOINTS.items():
echo('{0} ({1})'.format(white(description), name))
if name == 'udata.themes':
actives = [current_app.config['THEME']]
elif name == 'udata.avatars':
actives = [avatar_config('provider')]
else:
actives = plugins
for ep in sorted(entrypoints.iter_all(name), key=by_name):
echo('> {0}: {1}'.format(ep.name, is_active(ep, actives))) | [
"def",
"plugins",
"(",
")",
":",
"plugins",
"=",
"current_app",
".",
"config",
"[",
"'PLUGINS'",
"]",
"for",
"name",
",",
"description",
"in",
"entrypoints",
".",
"ENTRYPOINTS",
".",
"items",
"(",
")",
":",
"echo",
"(",
"'{0} ({1})'",
".",
"format",
"(",
"white",
"(",
"description",
")",
",",
"name",
")",
")",
"if",
"name",
"==",
"'udata.themes'",
":",
"actives",
"=",
"[",
"current_app",
".",
"config",
"[",
"'THEME'",
"]",
"]",
"elif",
"name",
"==",
"'udata.avatars'",
":",
"actives",
"=",
"[",
"avatar_config",
"(",
"'provider'",
")",
"]",
"else",
":",
"actives",
"=",
"plugins",
"for",
"ep",
"in",
"sorted",
"(",
"entrypoints",
".",
"iter_all",
"(",
"name",
")",
",",
"key",
"=",
"by_name",
")",
":",
"echo",
"(",
"'> {0}: {1}'",
".",
"format",
"(",
"ep",
".",
"name",
",",
"is_active",
"(",
"ep",
",",
"actives",
")",
")",
")"
] | Display some details about the local plugins | [
"Display",
"some",
"details",
"about",
"the",
"local",
"plugins"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/commands/info.py#L47-L59 | train |
opendatateam/udata | udata/frontend/views.py | BaseView.can | def can(self, *args, **kwargs):
'''Overwrite this method to implement custom contextual permissions'''
if isinstance(self.require, auth.Permission):
return self.require.can()
elif callable(self.require):
return self.require()
elif isinstance(self.require, bool):
return self.require
else:
return True | python | def can(self, *args, **kwargs):
'''Overwrite this method to implement custom contextual permissions'''
if isinstance(self.require, auth.Permission):
return self.require.can()
elif callable(self.require):
return self.require()
elif isinstance(self.require, bool):
return self.require
else:
return True | [
"def",
"can",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"require",
",",
"auth",
".",
"Permission",
")",
":",
"return",
"self",
".",
"require",
".",
"can",
"(",
")",
"elif",
"callable",
"(",
"self",
".",
"require",
")",
":",
"return",
"self",
".",
"require",
"(",
")",
"elif",
"isinstance",
"(",
"self",
".",
"require",
",",
"bool",
")",
":",
"return",
"self",
".",
"require",
"else",
":",
"return",
"True"
] | Overwrite this method to implement custom contextual permissions | [
"Overwrite",
"this",
"method",
"to",
"implement",
"custom",
"contextual",
"permissions"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/views.py#L36-L45 | train |
opendatateam/udata | udata/core/storages/utils.py | extension | def extension(filename):
'''Properly extract the extension from filename'''
filename = os.path.basename(filename)
extension = None
while '.' in filename:
filename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
extension = ext if not extension else ext + '.' + extension
return extension | python | def extension(filename):
'''Properly extract the extension from filename'''
filename = os.path.basename(filename)
extension = None
while '.' in filename:
filename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
extension = ext if not extension else ext + '.' + extension
return extension | [
"def",
"extension",
"(",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"extension",
"=",
"None",
"while",
"'.'",
"in",
"filename",
":",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"extension",
"=",
"ext",
"if",
"not",
"extension",
"else",
"ext",
"+",
"'.'",
"+",
"extension",
"return",
"extension"
] | Properly extract the extension from filename | [
"Properly",
"extract",
"the",
"extension",
"from",
"filename"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/storages/utils.py#L48-L59 | train |
opendatateam/udata | udata/theme/__init__.py | theme_static_with_version | def theme_static_with_version(ctx, filename, external=False):
'''Override the default theme static to add cache burst'''
if current_app.theme_manager.static_folder:
url = assets.cdn_for('_themes.static',
filename=current.identifier + '/' + filename,
_external=external)
else:
url = assets.cdn_for('_themes.static',
themeid=current.identifier,
filename=filename,
_external=external)
if url.endswith('/'): # this is a directory, no need for cache burst
return url
if current_app.config['DEBUG']:
burst = time()
else:
burst = current.entrypoint.dist.version
return '{url}?_={burst}'.format(url=url, burst=burst) | python | def theme_static_with_version(ctx, filename, external=False):
'''Override the default theme static to add cache burst'''
if current_app.theme_manager.static_folder:
url = assets.cdn_for('_themes.static',
filename=current.identifier + '/' + filename,
_external=external)
else:
url = assets.cdn_for('_themes.static',
themeid=current.identifier,
filename=filename,
_external=external)
if url.endswith('/'): # this is a directory, no need for cache burst
return url
if current_app.config['DEBUG']:
burst = time()
else:
burst = current.entrypoint.dist.version
return '{url}?_={burst}'.format(url=url, burst=burst) | [
"def",
"theme_static_with_version",
"(",
"ctx",
",",
"filename",
",",
"external",
"=",
"False",
")",
":",
"if",
"current_app",
".",
"theme_manager",
".",
"static_folder",
":",
"url",
"=",
"assets",
".",
"cdn_for",
"(",
"'_themes.static'",
",",
"filename",
"=",
"current",
".",
"identifier",
"+",
"'/'",
"+",
"filename",
",",
"_external",
"=",
"external",
")",
"else",
":",
"url",
"=",
"assets",
".",
"cdn_for",
"(",
"'_themes.static'",
",",
"themeid",
"=",
"current",
".",
"identifier",
",",
"filename",
"=",
"filename",
",",
"_external",
"=",
"external",
")",
"if",
"url",
".",
"endswith",
"(",
"'/'",
")",
":",
"# this is a directory, no need for cache burst",
"return",
"url",
"if",
"current_app",
".",
"config",
"[",
"'DEBUG'",
"]",
":",
"burst",
"=",
"time",
"(",
")",
"else",
":",
"burst",
"=",
"current",
".",
"entrypoint",
".",
"dist",
".",
"version",
"return",
"'{url}?_={burst}'",
".",
"format",
"(",
"url",
"=",
"url",
",",
"burst",
"=",
"burst",
")"
] | Override the default theme static to add cache burst | [
"Override",
"the",
"default",
"theme",
"static",
"to",
"add",
"cache",
"burst"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/theme/__init__.py#L48-L65 | train |
opendatateam/udata | udata/theme/__init__.py | render | def render(template, **context):
'''
Render a template with uData frontend specifics
* Theme
'''
theme = current_app.config['THEME']
return render_theme_template(get_theme(theme), template, **context) | python | def render(template, **context):
'''
Render a template with uData frontend specifics
* Theme
'''
theme = current_app.config['THEME']
return render_theme_template(get_theme(theme), template, **context) | [
"def",
"render",
"(",
"template",
",",
"*",
"*",
"context",
")",
":",
"theme",
"=",
"current_app",
".",
"config",
"[",
"'THEME'",
"]",
"return",
"render_theme_template",
"(",
"get_theme",
"(",
"theme",
")",
",",
"template",
",",
"*",
"*",
"context",
")"
] | Render a template with uData frontend specifics
* Theme | [
"Render",
"a",
"template",
"with",
"uData",
"frontend",
"specifics"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/theme/__init__.py#L141-L148 | train |
opendatateam/udata | udata/theme/__init__.py | context | def context(name):
'''A decorator for theme context processors'''
def wrapper(func):
g.theme.context_processors[name] = func
return func
return wrapper | python | def context(name):
'''A decorator for theme context processors'''
def wrapper(func):
g.theme.context_processors[name] = func
return func
return wrapper | [
"def",
"context",
"(",
"name",
")",
":",
"def",
"wrapper",
"(",
"func",
")",
":",
"g",
".",
"theme",
".",
"context_processors",
"[",
"name",
"]",
"=",
"func",
"return",
"func",
"return",
"wrapper"
] | A decorator for theme context processors | [
"A",
"decorator",
"for",
"theme",
"context",
"processors"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/theme/__init__.py#L159-L164 | train |
opendatateam/udata | udata/theme/__init__.py | ConfigurableTheme.variant | def variant(self):
'''Get the current theme variant'''
variant = current_app.config['THEME_VARIANT']
if variant not in self.variants:
log.warning('Unkown theme variant: %s', variant)
return 'default'
else:
return variant | python | def variant(self):
'''Get the current theme variant'''
variant = current_app.config['THEME_VARIANT']
if variant not in self.variants:
log.warning('Unkown theme variant: %s', variant)
return 'default'
else:
return variant | [
"def",
"variant",
"(",
"self",
")",
":",
"variant",
"=",
"current_app",
".",
"config",
"[",
"'THEME_VARIANT'",
"]",
"if",
"variant",
"not",
"in",
"self",
".",
"variants",
":",
"log",
".",
"warning",
"(",
"'Unkown theme variant: %s'",
",",
"variant",
")",
"return",
"'default'",
"else",
":",
"return",
"variant"
] | Get the current theme variant | [
"Get",
"the",
"current",
"theme",
"variant"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/theme/__init__.py#L110-L117 | train |
opendatateam/udata | udata/core/dataset/views.py | resource_redirect | def resource_redirect(id):
'''
Redirect to the latest version of a resource given its identifier.
'''
resource = get_resource(id)
return redirect(resource.url.strip()) if resource else abort(404) | python | def resource_redirect(id):
'''
Redirect to the latest version of a resource given its identifier.
'''
resource = get_resource(id)
return redirect(resource.url.strip()) if resource else abort(404) | [
"def",
"resource_redirect",
"(",
"id",
")",
":",
"resource",
"=",
"get_resource",
"(",
"id",
")",
"return",
"redirect",
"(",
"resource",
".",
"url",
".",
"strip",
"(",
")",
")",
"if",
"resource",
"else",
"abort",
"(",
"404",
")"
] | Redirect to the latest version of a resource given its identifier. | [
"Redirect",
"to",
"the",
"latest",
"version",
"of",
"a",
"resource",
"given",
"its",
"identifier",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/views.py#L130-L135 | train |
opendatateam/udata | udata/core/dataset/views.py | group_resources_by_type | def group_resources_by_type(resources):
"""Group a list of `resources` by `type` with order"""
groups = defaultdict(list)
for resource in resources:
groups[getattr(resource, 'type')].append(resource)
ordered = OrderedDict()
for rtype, rtype_label in RESOURCE_TYPES.items():
if groups[rtype]:
ordered[(rtype, rtype_label)] = groups[rtype]
return ordered | python | def group_resources_by_type(resources):
"""Group a list of `resources` by `type` with order"""
groups = defaultdict(list)
for resource in resources:
groups[getattr(resource, 'type')].append(resource)
ordered = OrderedDict()
for rtype, rtype_label in RESOURCE_TYPES.items():
if groups[rtype]:
ordered[(rtype, rtype_label)] = groups[rtype]
return ordered | [
"def",
"group_resources_by_type",
"(",
"resources",
")",
":",
"groups",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"resource",
"in",
"resources",
":",
"groups",
"[",
"getattr",
"(",
"resource",
",",
"'type'",
")",
"]",
".",
"append",
"(",
"resource",
")",
"ordered",
"=",
"OrderedDict",
"(",
")",
"for",
"rtype",
",",
"rtype_label",
"in",
"RESOURCE_TYPES",
".",
"items",
"(",
")",
":",
"if",
"groups",
"[",
"rtype",
"]",
":",
"ordered",
"[",
"(",
"rtype",
",",
"rtype_label",
")",
"]",
"=",
"groups",
"[",
"rtype",
"]",
"return",
"ordered"
] | Group a list of `resources` by `type` with order | [
"Group",
"a",
"list",
"of",
"resources",
"by",
"type",
"with",
"order"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/views.py#L166-L175 | train |
opendatateam/udata | udata/core/metrics/__init__.py | Metric.aggregate | def aggregate(self, start, end):
'''
This method encpsualte the metric aggregation logic.
Override this method when you inherit this class.
By default, it takes the last value.
'''
last = self.objects(
level='daily', date__lte=self.iso(end),
date__gte=self.iso(start)).order_by('-date').first()
return last.values[self.name] | python | def aggregate(self, start, end):
'''
This method encpsualte the metric aggregation logic.
Override this method when you inherit this class.
By default, it takes the last value.
'''
last = self.objects(
level='daily', date__lte=self.iso(end),
date__gte=self.iso(start)).order_by('-date').first()
return last.values[self.name] | [
"def",
"aggregate",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"last",
"=",
"self",
".",
"objects",
"(",
"level",
"=",
"'daily'",
",",
"date__lte",
"=",
"self",
".",
"iso",
"(",
"end",
")",
",",
"date__gte",
"=",
"self",
".",
"iso",
"(",
"start",
")",
")",
".",
"order_by",
"(",
"'-date'",
")",
".",
"first",
"(",
")",
"return",
"last",
".",
"values",
"[",
"self",
".",
"name",
"]"
] | This method encpsualte the metric aggregation logic.
Override this method when you inherit this class.
By default, it takes the last value. | [
"This",
"method",
"encpsualte",
"the",
"metric",
"aggregation",
"logic",
".",
"Override",
"this",
"method",
"when",
"you",
"inherit",
"this",
"class",
".",
"By",
"default",
"it",
"takes",
"the",
"last",
"value",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/metrics/__init__.py#L92-L101 | train |
opendatateam/udata | udata/harvest/actions.py | paginate_sources | def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE):
'''Paginate harvest sources'''
sources = _sources_queryset(owner=owner)
page = max(page or 1, 1)
return sources.paginate(page, page_size) | python | def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE):
'''Paginate harvest sources'''
sources = _sources_queryset(owner=owner)
page = max(page or 1, 1)
return sources.paginate(page, page_size) | [
"def",
"paginate_sources",
"(",
"owner",
"=",
"None",
",",
"page",
"=",
"1",
",",
"page_size",
"=",
"DEFAULT_PAGE_SIZE",
")",
":",
"sources",
"=",
"_sources_queryset",
"(",
"owner",
"=",
"owner",
")",
"page",
"=",
"max",
"(",
"page",
"or",
"1",
",",
"1",
")",
"return",
"sources",
".",
"paginate",
"(",
"page",
",",
"page_size",
")"
] | Paginate harvest sources | [
"Paginate",
"harvest",
"sources"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L45-L49 | train |
opendatateam/udata | udata/harvest/actions.py | update_source | def update_source(ident, data):
'''Update an harvest source'''
source = get_source(ident)
source.modify(**data)
signals.harvest_source_updated.send(source)
return source | python | def update_source(ident, data):
'''Update an harvest source'''
source = get_source(ident)
source.modify(**data)
signals.harvest_source_updated.send(source)
return source | [
"def",
"update_source",
"(",
"ident",
",",
"data",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"source",
".",
"modify",
"(",
"*",
"*",
"data",
")",
"signals",
".",
"harvest_source_updated",
".",
"send",
"(",
"source",
")",
"return",
"source"
] | Update an harvest source | [
"Update",
"an",
"harvest",
"source"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L90-L95 | train |
opendatateam/udata | udata/harvest/actions.py | validate_source | def validate_source(ident, comment=None):
'''Validate a source for automatic harvesting'''
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_ACCEPTED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object()
source.save()
schedule(ident, cron=current_app.config['HARVEST_DEFAULT_SCHEDULE'])
launch(ident)
return source | python | def validate_source(ident, comment=None):
'''Validate a source for automatic harvesting'''
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_ACCEPTED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object()
source.save()
schedule(ident, cron=current_app.config['HARVEST_DEFAULT_SCHEDULE'])
launch(ident)
return source | [
"def",
"validate_source",
"(",
"ident",
",",
"comment",
"=",
"None",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"source",
".",
"validation",
".",
"on",
"=",
"datetime",
".",
"now",
"(",
")",
"source",
".",
"validation",
".",
"comment",
"=",
"comment",
"source",
".",
"validation",
".",
"state",
"=",
"VALIDATION_ACCEPTED",
"if",
"current_user",
".",
"is_authenticated",
":",
"source",
".",
"validation",
".",
"by",
"=",
"current_user",
".",
"_get_current_object",
"(",
")",
"source",
".",
"save",
"(",
")",
"schedule",
"(",
"ident",
",",
"cron",
"=",
"current_app",
".",
"config",
"[",
"'HARVEST_DEFAULT_SCHEDULE'",
"]",
")",
"launch",
"(",
"ident",
")",
"return",
"source"
] | Validate a source for automatic harvesting | [
"Validate",
"a",
"source",
"for",
"automatic",
"harvesting"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L98-L109 | train |
opendatateam/udata | udata/harvest/actions.py | reject_source | def reject_source(ident, comment):
'''Reject a source for automatic harvesting'''
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_REFUSED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object()
source.save()
return source | python | def reject_source(ident, comment):
'''Reject a source for automatic harvesting'''
source = get_source(ident)
source.validation.on = datetime.now()
source.validation.comment = comment
source.validation.state = VALIDATION_REFUSED
if current_user.is_authenticated:
source.validation.by = current_user._get_current_object()
source.save()
return source | [
"def",
"reject_source",
"(",
"ident",
",",
"comment",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"source",
".",
"validation",
".",
"on",
"=",
"datetime",
".",
"now",
"(",
")",
"source",
".",
"validation",
".",
"comment",
"=",
"comment",
"source",
".",
"validation",
".",
"state",
"=",
"VALIDATION_REFUSED",
"if",
"current_user",
".",
"is_authenticated",
":",
"source",
".",
"validation",
".",
"by",
"=",
"current_user",
".",
"_get_current_object",
"(",
")",
"source",
".",
"save",
"(",
")",
"return",
"source"
] | Reject a source for automatic harvesting | [
"Reject",
"a",
"source",
"for",
"automatic",
"harvesting"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L112-L121 | train |
opendatateam/udata | udata/harvest/actions.py | delete_source | def delete_source(ident):
'''Delete an harvest source'''
source = get_source(ident)
source.deleted = datetime.now()
source.save()
signals.harvest_source_deleted.send(source)
return source | python | def delete_source(ident):
'''Delete an harvest source'''
source = get_source(ident)
source.deleted = datetime.now()
source.save()
signals.harvest_source_deleted.send(source)
return source | [
"def",
"delete_source",
"(",
"ident",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"source",
".",
"deleted",
"=",
"datetime",
".",
"now",
"(",
")",
"source",
".",
"save",
"(",
")",
"signals",
".",
"harvest_source_deleted",
".",
"send",
"(",
"source",
")",
"return",
"source"
] | Delete an harvest source | [
"Delete",
"an",
"harvest",
"source"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L124-L130 | train |
opendatateam/udata | udata/harvest/actions.py | run | def run(ident):
'''Launch or resume an harvesting for a given source if none is running'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
backend = cls(source)
backend.harvest() | python | def run(ident):
'''Launch or resume an harvesting for a given source if none is running'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
backend = cls(source)
backend.harvest() | [
"def",
"run",
"(",
"ident",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"cls",
"=",
"backends",
".",
"get",
"(",
"current_app",
",",
"source",
".",
"backend",
")",
"backend",
"=",
"cls",
"(",
"source",
")",
"backend",
".",
"harvest",
"(",
")"
] | Launch or resume an harvesting for a given source if none is running | [
"Launch",
"or",
"resume",
"an",
"harvesting",
"for",
"a",
"given",
"source",
"if",
"none",
"is",
"running"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L138-L143 | train |
opendatateam/udata | udata/harvest/actions.py | preview | def preview(ident):
'''Preview an harvesting for a given source'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest() | python | def preview(ident):
'''Preview an harvesting for a given source'''
source = get_source(ident)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest() | [
"def",
"preview",
"(",
"ident",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"cls",
"=",
"backends",
".",
"get",
"(",
"current_app",
",",
"source",
".",
"backend",
")",
"max_items",
"=",
"current_app",
".",
"config",
"[",
"'HARVEST_PREVIEW_MAX_ITEMS'",
"]",
"backend",
"=",
"cls",
"(",
"source",
",",
"dryrun",
"=",
"True",
",",
"max_items",
"=",
"max_items",
")",
"return",
"backend",
".",
"harvest",
"(",
")"
] | Preview an harvesting for a given source | [
"Preview",
"an",
"harvesting",
"for",
"a",
"given",
"source"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L151-L157 | train |
opendatateam/udata | udata/harvest/actions.py | preview_from_config | def preview_from_config(name, url, backend,
description=None,
frequency=DEFAULT_HARVEST_FREQUENCY,
owner=None,
organization=None,
config=None,
):
'''Preview an harvesting from a source created with the given parameters'''
if owner and not isinstance(owner, User):
owner = User.get(owner)
if organization and not isinstance(organization, Organization):
organization = Organization.get(organization)
source = HarvestSource(
name=name,
url=url,
backend=backend,
description=description,
frequency=frequency or DEFAULT_HARVEST_FREQUENCY,
owner=owner,
organization=organization,
config=config,
)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest() | python | def preview_from_config(name, url, backend,
description=None,
frequency=DEFAULT_HARVEST_FREQUENCY,
owner=None,
organization=None,
config=None,
):
'''Preview an harvesting from a source created with the given parameters'''
if owner and not isinstance(owner, User):
owner = User.get(owner)
if organization and not isinstance(organization, Organization):
organization = Organization.get(organization)
source = HarvestSource(
name=name,
url=url,
backend=backend,
description=description,
frequency=frequency or DEFAULT_HARVEST_FREQUENCY,
owner=owner,
organization=organization,
config=config,
)
cls = backends.get(current_app, source.backend)
max_items = current_app.config['HARVEST_PREVIEW_MAX_ITEMS']
backend = cls(source, dryrun=True, max_items=max_items)
return backend.harvest() | [
"def",
"preview_from_config",
"(",
"name",
",",
"url",
",",
"backend",
",",
"description",
"=",
"None",
",",
"frequency",
"=",
"DEFAULT_HARVEST_FREQUENCY",
",",
"owner",
"=",
"None",
",",
"organization",
"=",
"None",
",",
"config",
"=",
"None",
",",
")",
":",
"if",
"owner",
"and",
"not",
"isinstance",
"(",
"owner",
",",
"User",
")",
":",
"owner",
"=",
"User",
".",
"get",
"(",
"owner",
")",
"if",
"organization",
"and",
"not",
"isinstance",
"(",
"organization",
",",
"Organization",
")",
":",
"organization",
"=",
"Organization",
".",
"get",
"(",
"organization",
")",
"source",
"=",
"HarvestSource",
"(",
"name",
"=",
"name",
",",
"url",
"=",
"url",
",",
"backend",
"=",
"backend",
",",
"description",
"=",
"description",
",",
"frequency",
"=",
"frequency",
"or",
"DEFAULT_HARVEST_FREQUENCY",
",",
"owner",
"=",
"owner",
",",
"organization",
"=",
"organization",
",",
"config",
"=",
"config",
",",
")",
"cls",
"=",
"backends",
".",
"get",
"(",
"current_app",
",",
"source",
".",
"backend",
")",
"max_items",
"=",
"current_app",
".",
"config",
"[",
"'HARVEST_PREVIEW_MAX_ITEMS'",
"]",
"backend",
"=",
"cls",
"(",
"source",
",",
"dryrun",
"=",
"True",
",",
"max_items",
"=",
"max_items",
")",
"return",
"backend",
".",
"harvest",
"(",
")"
] | Preview an harvesting from a source created with the given parameters | [
"Preview",
"an",
"harvesting",
"from",
"a",
"source",
"created",
"with",
"the",
"given",
"parameters"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L160-L187 | train |
opendatateam/udata | udata/harvest/actions.py | schedule | def schedule(ident, cron=None, minute='*', hour='*',
day_of_week='*', day_of_month='*', month_of_year='*'):
'''Schedule an harvesting on a source given a crontab'''
source = get_source(ident)
if cron:
minute, hour, day_of_month, month_of_year, day_of_week = cron.split()
crontab = PeriodicTask.Crontab(
minute=str(minute),
hour=str(hour),
day_of_week=str(day_of_week),
day_of_month=str(day_of_month),
month_of_year=str(month_of_year)
)
if source.periodic_task:
source.periodic_task.modify(crontab=crontab)
else:
source.modify(periodic_task=PeriodicTask.objects.create(
task='harvest',
name='Harvest {0}'.format(source.name),
description='Periodic Harvesting',
enabled=True,
args=[str(source.id)],
crontab=crontab,
))
signals.harvest_source_scheduled.send(source)
return source | python | def schedule(ident, cron=None, minute='*', hour='*',
day_of_week='*', day_of_month='*', month_of_year='*'):
'''Schedule an harvesting on a source given a crontab'''
source = get_source(ident)
if cron:
minute, hour, day_of_month, month_of_year, day_of_week = cron.split()
crontab = PeriodicTask.Crontab(
minute=str(minute),
hour=str(hour),
day_of_week=str(day_of_week),
day_of_month=str(day_of_month),
month_of_year=str(month_of_year)
)
if source.periodic_task:
source.periodic_task.modify(crontab=crontab)
else:
source.modify(periodic_task=PeriodicTask.objects.create(
task='harvest',
name='Harvest {0}'.format(source.name),
description='Periodic Harvesting',
enabled=True,
args=[str(source.id)],
crontab=crontab,
))
signals.harvest_source_scheduled.send(source)
return source | [
"def",
"schedule",
"(",
"ident",
",",
"cron",
"=",
"None",
",",
"minute",
"=",
"'*'",
",",
"hour",
"=",
"'*'",
",",
"day_of_week",
"=",
"'*'",
",",
"day_of_month",
"=",
"'*'",
",",
"month_of_year",
"=",
"'*'",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"if",
"cron",
":",
"minute",
",",
"hour",
",",
"day_of_month",
",",
"month_of_year",
",",
"day_of_week",
"=",
"cron",
".",
"split",
"(",
")",
"crontab",
"=",
"PeriodicTask",
".",
"Crontab",
"(",
"minute",
"=",
"str",
"(",
"minute",
")",
",",
"hour",
"=",
"str",
"(",
"hour",
")",
",",
"day_of_week",
"=",
"str",
"(",
"day_of_week",
")",
",",
"day_of_month",
"=",
"str",
"(",
"day_of_month",
")",
",",
"month_of_year",
"=",
"str",
"(",
"month_of_year",
")",
")",
"if",
"source",
".",
"periodic_task",
":",
"source",
".",
"periodic_task",
".",
"modify",
"(",
"crontab",
"=",
"crontab",
")",
"else",
":",
"source",
".",
"modify",
"(",
"periodic_task",
"=",
"PeriodicTask",
".",
"objects",
".",
"create",
"(",
"task",
"=",
"'harvest'",
",",
"name",
"=",
"'Harvest {0}'",
".",
"format",
"(",
"source",
".",
"name",
")",
",",
"description",
"=",
"'Periodic Harvesting'",
",",
"enabled",
"=",
"True",
",",
"args",
"=",
"[",
"str",
"(",
"source",
".",
"id",
")",
"]",
",",
"crontab",
"=",
"crontab",
",",
")",
")",
"signals",
".",
"harvest_source_scheduled",
".",
"send",
"(",
"source",
")",
"return",
"source"
] | Schedule an harvesting on a source given a crontab | [
"Schedule",
"an",
"harvesting",
"on",
"a",
"source",
"given",
"a",
"crontab"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L190-L218 | train |
opendatateam/udata | udata/harvest/actions.py | unschedule | def unschedule(ident):
'''Unschedule an harvesting on a source'''
source = get_source(ident)
if not source.periodic_task:
msg = 'Harvesting on source {0} is ot scheduled'.format(source.name)
raise ValueError(msg)
source.periodic_task.delete()
signals.harvest_source_unscheduled.send(source)
return source | python | def unschedule(ident):
'''Unschedule an harvesting on a source'''
source = get_source(ident)
if not source.periodic_task:
msg = 'Harvesting on source {0} is ot scheduled'.format(source.name)
raise ValueError(msg)
source.periodic_task.delete()
signals.harvest_source_unscheduled.send(source)
return source | [
"def",
"unschedule",
"(",
"ident",
")",
":",
"source",
"=",
"get_source",
"(",
"ident",
")",
"if",
"not",
"source",
".",
"periodic_task",
":",
"msg",
"=",
"'Harvesting on source {0} is ot scheduled'",
".",
"format",
"(",
"source",
".",
"name",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"source",
".",
"periodic_task",
".",
"delete",
"(",
")",
"signals",
".",
"harvest_source_unscheduled",
".",
"send",
"(",
"source",
")",
"return",
"source"
] | Unschedule an harvesting on a source | [
"Unschedule",
"an",
"harvesting",
"on",
"a",
"source"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L221-L230 | train |
opendatateam/udata | udata/harvest/actions.py | attach | def attach(domain, filename):
'''Attach existing dataset to their harvest remote id before harvesting.
The expected csv file format is the following:
- a column with header "local" and the local IDs or slugs
- a column with header "remote" and the remote IDs
The delimiter should be ";". columns order
and extras columns does not matter
'''
count = 0
errors = 0
with open(filename) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=b';',
quotechar=b'"')
for row in reader:
try:
dataset = Dataset.objects.get(id=ObjectId(row['local']))
except: # noqa (Never stop on failure)
log.warning('Unable to attach dataset : %s', row['local'])
errors += 1
continue
# Detach previously attached dataset
Dataset.objects(**{
'extras__harvest:domain': domain,
'extras__harvest:remote_id': row['remote']
}).update(**{
'unset__extras__harvest:domain': True,
'unset__extras__harvest:remote_id': True
})
dataset.extras['harvest:domain'] = domain
dataset.extras['harvest:remote_id'] = row['remote']
dataset.last_modified = datetime.now()
dataset.save()
count += 1
return AttachResult(count, errors) | python | def attach(domain, filename):
'''Attach existing dataset to their harvest remote id before harvesting.
The expected csv file format is the following:
- a column with header "local" and the local IDs or slugs
- a column with header "remote" and the remote IDs
The delimiter should be ";". columns order
and extras columns does not matter
'''
count = 0
errors = 0
with open(filename) as csvfile:
reader = csv.DictReader(csvfile,
delimiter=b';',
quotechar=b'"')
for row in reader:
try:
dataset = Dataset.objects.get(id=ObjectId(row['local']))
except: # noqa (Never stop on failure)
log.warning('Unable to attach dataset : %s', row['local'])
errors += 1
continue
# Detach previously attached dataset
Dataset.objects(**{
'extras__harvest:domain': domain,
'extras__harvest:remote_id': row['remote']
}).update(**{
'unset__extras__harvest:domain': True,
'unset__extras__harvest:remote_id': True
})
dataset.extras['harvest:domain'] = domain
dataset.extras['harvest:remote_id'] = row['remote']
dataset.last_modified = datetime.now()
dataset.save()
count += 1
return AttachResult(count, errors) | [
"def",
"attach",
"(",
"domain",
",",
"filename",
")",
":",
"count",
"=",
"0",
"errors",
"=",
"0",
"with",
"open",
"(",
"filename",
")",
"as",
"csvfile",
":",
"reader",
"=",
"csv",
".",
"DictReader",
"(",
"csvfile",
",",
"delimiter",
"=",
"b';'",
",",
"quotechar",
"=",
"b'\"'",
")",
"for",
"row",
"in",
"reader",
":",
"try",
":",
"dataset",
"=",
"Dataset",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"ObjectId",
"(",
"row",
"[",
"'local'",
"]",
")",
")",
"except",
":",
"# noqa (Never stop on failure)",
"log",
".",
"warning",
"(",
"'Unable to attach dataset : %s'",
",",
"row",
"[",
"'local'",
"]",
")",
"errors",
"+=",
"1",
"continue",
"# Detach previously attached dataset",
"Dataset",
".",
"objects",
"(",
"*",
"*",
"{",
"'extras__harvest:domain'",
":",
"domain",
",",
"'extras__harvest:remote_id'",
":",
"row",
"[",
"'remote'",
"]",
"}",
")",
".",
"update",
"(",
"*",
"*",
"{",
"'unset__extras__harvest:domain'",
":",
"True",
",",
"'unset__extras__harvest:remote_id'",
":",
"True",
"}",
")",
"dataset",
".",
"extras",
"[",
"'harvest:domain'",
"]",
"=",
"domain",
"dataset",
".",
"extras",
"[",
"'harvest:remote_id'",
"]",
"=",
"row",
"[",
"'remote'",
"]",
"dataset",
".",
"last_modified",
"=",
"datetime",
".",
"now",
"(",
")",
"dataset",
".",
"save",
"(",
")",
"count",
"+=",
"1",
"return",
"AttachResult",
"(",
"count",
",",
"errors",
")"
] | Attach existing dataset to their harvest remote id before harvesting.
The expected csv file format is the following:
- a column with header "local" and the local IDs or slugs
- a column with header "remote" and the remote IDs
The delimiter should be ";". columns order
and extras columns does not matter | [
"Attach",
"existing",
"dataset",
"to",
"their",
"harvest",
"remote",
"id",
"before",
"harvesting",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L236-L276 | train |
opendatateam/udata | udata/models/extras_fields.py | ExtrasField.register | def register(self, key, dbtype):
'''Register a DB type to add constraint on a given extra key'''
if not issubclass(dbtype, (BaseField, EmbeddedDocument)):
msg = 'ExtrasField can only register MongoEngine fields'
raise TypeError(msg)
self.registered[key] = dbtype | python | def register(self, key, dbtype):
'''Register a DB type to add constraint on a given extra key'''
if not issubclass(dbtype, (BaseField, EmbeddedDocument)):
msg = 'ExtrasField can only register MongoEngine fields'
raise TypeError(msg)
self.registered[key] = dbtype | [
"def",
"register",
"(",
"self",
",",
"key",
",",
"dbtype",
")",
":",
"if",
"not",
"issubclass",
"(",
"dbtype",
",",
"(",
"BaseField",
",",
"EmbeddedDocument",
")",
")",
":",
"msg",
"=",
"'ExtrasField can only register MongoEngine fields'",
"raise",
"TypeError",
"(",
"msg",
")",
"self",
".",
"registered",
"[",
"key",
"]",
"=",
"dbtype"
] | Register a DB type to add constraint on a given extra key | [
"Register",
"a",
"DB",
"type",
"to",
"add",
"constraint",
"on",
"a",
"given",
"extra",
"key"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/extras_fields.py#L22-L27 | train |
opendatateam/udata | udata/core/user/commands.py | delete | def delete():
'''Delete an existing user'''
email = click.prompt('Email')
user = User.objects(email=email).first()
if not user:
exit_with_error('Invalid user')
user.delete()
success('User deleted successfully') | python | def delete():
'''Delete an existing user'''
email = click.prompt('Email')
user = User.objects(email=email).first()
if not user:
exit_with_error('Invalid user')
user.delete()
success('User deleted successfully') | [
"def",
"delete",
"(",
")",
":",
"email",
"=",
"click",
".",
"prompt",
"(",
"'Email'",
")",
"user",
"=",
"User",
".",
"objects",
"(",
"email",
"=",
"email",
")",
".",
"first",
"(",
")",
"if",
"not",
"user",
":",
"exit_with_error",
"(",
"'Invalid user'",
")",
"user",
".",
"delete",
"(",
")",
"success",
"(",
"'User deleted successfully'",
")"
] | Delete an existing user | [
"Delete",
"an",
"existing",
"user"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/commands.py#L66-L73 | train |
opendatateam/udata | udata/core/user/commands.py | set_admin | def set_admin(email):
'''Set an user as administrator'''
user = datastore.get_user(email)
log.info('Adding admin role to user %s (%s)', user.fullname, user.email)
role = datastore.find_or_create_role('admin')
datastore.add_role_to_user(user, role)
success('User %s (%s) is now administrator' % (user.fullname, user.email)) | python | def set_admin(email):
'''Set an user as administrator'''
user = datastore.get_user(email)
log.info('Adding admin role to user %s (%s)', user.fullname, user.email)
role = datastore.find_or_create_role('admin')
datastore.add_role_to_user(user, role)
success('User %s (%s) is now administrator' % (user.fullname, user.email)) | [
"def",
"set_admin",
"(",
"email",
")",
":",
"user",
"=",
"datastore",
".",
"get_user",
"(",
"email",
")",
"log",
".",
"info",
"(",
"'Adding admin role to user %s (%s)'",
",",
"user",
".",
"fullname",
",",
"user",
".",
"email",
")",
"role",
"=",
"datastore",
".",
"find_or_create_role",
"(",
"'admin'",
")",
"datastore",
".",
"add_role_to_user",
"(",
"user",
",",
"role",
")",
"success",
"(",
"'User %s (%s) is now administrator'",
"%",
"(",
"user",
".",
"fullname",
",",
"user",
".",
"email",
")",
")"
] | Set an user as administrator | [
"Set",
"an",
"user",
"as",
"administrator"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/user/commands.py#L78-L84 | train |
opendatateam/udata | udata/core/storages/api.py | combine_chunks | def combine_chunks(storage, args, prefix=None):
'''
Combine a chunked file into a whole file again.
Goes through each part, in order,
and appends that part's bytes to another destination file.
Chunks are stored in the chunks storage.
'''
uuid = args['uuid']
# Normalize filename including extension
target = utils.normalize(args['filename'])
if prefix:
target = os.path.join(prefix, target)
with storage.open(target, 'wb') as out:
for i in xrange(args['totalparts']):
partname = chunk_filename(uuid, i)
out.write(chunks.read(partname))
chunks.delete(partname)
chunks.delete(chunk_filename(uuid, META))
return target | python | def combine_chunks(storage, args, prefix=None):
'''
Combine a chunked file into a whole file again.
Goes through each part, in order,
and appends that part's bytes to another destination file.
Chunks are stored in the chunks storage.
'''
uuid = args['uuid']
# Normalize filename including extension
target = utils.normalize(args['filename'])
if prefix:
target = os.path.join(prefix, target)
with storage.open(target, 'wb') as out:
for i in xrange(args['totalparts']):
partname = chunk_filename(uuid, i)
out.write(chunks.read(partname))
chunks.delete(partname)
chunks.delete(chunk_filename(uuid, META))
return target | [
"def",
"combine_chunks",
"(",
"storage",
",",
"args",
",",
"prefix",
"=",
"None",
")",
":",
"uuid",
"=",
"args",
"[",
"'uuid'",
"]",
"# Normalize filename including extension",
"target",
"=",
"utils",
".",
"normalize",
"(",
"args",
"[",
"'filename'",
"]",
")",
"if",
"prefix",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"target",
")",
"with",
"storage",
".",
"open",
"(",
"target",
",",
"'wb'",
")",
"as",
"out",
":",
"for",
"i",
"in",
"xrange",
"(",
"args",
"[",
"'totalparts'",
"]",
")",
":",
"partname",
"=",
"chunk_filename",
"(",
"uuid",
",",
"i",
")",
"out",
".",
"write",
"(",
"chunks",
".",
"read",
"(",
"partname",
")",
")",
"chunks",
".",
"delete",
"(",
"partname",
")",
"chunks",
".",
"delete",
"(",
"chunk_filename",
"(",
"uuid",
",",
"META",
")",
")",
"return",
"target"
] | Combine a chunked file into a whole file again.
Goes through each part, in order,
and appends that part's bytes to another destination file.
Chunks are stored in the chunks storage. | [
"Combine",
"a",
"chunked",
"file",
"into",
"a",
"whole",
"file",
"again",
".",
"Goes",
"through",
"each",
"part",
"in",
"order",
"and",
"appends",
"that",
"part",
"s",
"bytes",
"to",
"another",
"destination",
"file",
".",
"Chunks",
"are",
"stored",
"in",
"the",
"chunks",
"storage",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/storages/api.py#L111-L129 | train |
opendatateam/udata | udata/models/__init__.py | UDataMongoEngine.resolve_model | def resolve_model(self, model):
'''
Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists
'''
if not model:
raise ValueError('Unsupported model specifications')
if isinstance(model, basestring):
classname = model
elif isinstance(model, dict) and 'class' in model:
classname = model['class']
else:
raise ValueError('Unsupported model specifications')
try:
return get_document(classname)
except self.NotRegistered:
message = 'Model "{0}" does not exist'.format(classname)
raise ValueError(message) | python | def resolve_model(self, model):
'''
Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists
'''
if not model:
raise ValueError('Unsupported model specifications')
if isinstance(model, basestring):
classname = model
elif isinstance(model, dict) and 'class' in model:
classname = model['class']
else:
raise ValueError('Unsupported model specifications')
try:
return get_document(classname)
except self.NotRegistered:
message = 'Model "{0}" does not exist'.format(classname)
raise ValueError(message) | [
"def",
"resolve_model",
"(",
"self",
",",
"model",
")",
":",
"if",
"not",
"model",
":",
"raise",
"ValueError",
"(",
"'Unsupported model specifications'",
")",
"if",
"isinstance",
"(",
"model",
",",
"basestring",
")",
":",
"classname",
"=",
"model",
"elif",
"isinstance",
"(",
"model",
",",
"dict",
")",
"and",
"'class'",
"in",
"model",
":",
"classname",
"=",
"model",
"[",
"'class'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported model specifications'",
")",
"try",
":",
"return",
"get_document",
"(",
"classname",
")",
"except",
"self",
".",
"NotRegistered",
":",
"message",
"=",
"'Model \"{0}\" does not exist'",
".",
"format",
"(",
"classname",
")",
"raise",
"ValueError",
"(",
"message",
")"
] | Resolve a model given a name or dict with `class` entry.
:raises ValueError: model specification is wrong or does not exists | [
"Resolve",
"a",
"model",
"given",
"a",
"name",
"or",
"dict",
"with",
"class",
"entry",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/models/__init__.py#L60-L79 | train |
opendatateam/udata | udata/frontend/helpers.py | tooltip_ellipsis | def tooltip_ellipsis(source, length=0):
''' return the plain text representation of markdown encoded text. That
is the texted without any html tags. If ``length`` is 0 then it
will not be truncated.'''
try:
length = int(length)
except ValueError: # invalid literal for int()
return source # Fail silently.
ellipsis = '<a href v-tooltip title="{0}">...</a>'.format(source)
return Markup((source[:length] + ellipsis)
if len(source) > length and length > 0 else source) | python | def tooltip_ellipsis(source, length=0):
''' return the plain text representation of markdown encoded text. That
is the texted without any html tags. If ``length`` is 0 then it
will not be truncated.'''
try:
length = int(length)
except ValueError: # invalid literal for int()
return source # Fail silently.
ellipsis = '<a href v-tooltip title="{0}">...</a>'.format(source)
return Markup((source[:length] + ellipsis)
if len(source) > length and length > 0 else source) | [
"def",
"tooltip_ellipsis",
"(",
"source",
",",
"length",
"=",
"0",
")",
":",
"try",
":",
"length",
"=",
"int",
"(",
"length",
")",
"except",
"ValueError",
":",
"# invalid literal for int()",
"return",
"source",
"# Fail silently.",
"ellipsis",
"=",
"'<a href v-tooltip title=\"{0}\">...</a>'",
".",
"format",
"(",
"source",
")",
"return",
"Markup",
"(",
"(",
"source",
"[",
":",
"length",
"]",
"+",
"ellipsis",
")",
"if",
"len",
"(",
"source",
")",
">",
"length",
"and",
"length",
">",
"0",
"else",
"source",
")"
] | return the plain text representation of markdown encoded text. That
is the texted without any html tags. If ``length`` is 0 then it
will not be truncated. | [
"return",
"the",
"plain",
"text",
"representation",
"of",
"markdown",
"encoded",
"text",
".",
"That",
"is",
"the",
"texted",
"without",
"any",
"html",
"tags",
".",
"If",
"length",
"is",
"0",
"then",
"it",
"will",
"not",
"be",
"truncated",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/helpers.py#L254-L264 | train |
opendatateam/udata | udata/frontend/helpers.py | filesize | def filesize(value):
'''Display a human readable filesize'''
suffix = 'o'
for unit in '', 'K', 'M', 'G', 'T', 'P', 'E', 'Z':
if abs(value) < 1024.0:
return "%3.1f%s%s" % (value, unit, suffix)
value /= 1024.0
return "%.1f%s%s" % (value, 'Y', suffix) | python | def filesize(value):
'''Display a human readable filesize'''
suffix = 'o'
for unit in '', 'K', 'M', 'G', 'T', 'P', 'E', 'Z':
if abs(value) < 1024.0:
return "%3.1f%s%s" % (value, unit, suffix)
value /= 1024.0
return "%.1f%s%s" % (value, 'Y', suffix) | [
"def",
"filesize",
"(",
"value",
")",
":",
"suffix",
"=",
"'o'",
"for",
"unit",
"in",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
",",
"'P'",
",",
"'E'",
",",
"'Z'",
":",
"if",
"abs",
"(",
"value",
")",
"<",
"1024.0",
":",
"return",
"\"%3.1f%s%s\"",
"%",
"(",
"value",
",",
"unit",
",",
"suffix",
")",
"value",
"/=",
"1024.0",
"return",
"\"%.1f%s%s\"",
"%",
"(",
"value",
",",
"'Y'",
",",
"suffix",
")"
] | Display a human readable filesize | [
"Display",
"a",
"human",
"readable",
"filesize"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/helpers.py#L399-L406 | train |
opendatateam/udata | udata/rdf.py | negociate_content | def negociate_content(default='json-ld'):
'''Perform a content negociation on the format given the Accept header'''
mimetype = request.accept_mimetypes.best_match(ACCEPTED_MIME_TYPES.keys())
return ACCEPTED_MIME_TYPES.get(mimetype, default) | python | def negociate_content(default='json-ld'):
'''Perform a content negociation on the format given the Accept header'''
mimetype = request.accept_mimetypes.best_match(ACCEPTED_MIME_TYPES.keys())
return ACCEPTED_MIME_TYPES.get(mimetype, default) | [
"def",
"negociate_content",
"(",
"default",
"=",
"'json-ld'",
")",
":",
"mimetype",
"=",
"request",
".",
"accept_mimetypes",
".",
"best_match",
"(",
"ACCEPTED_MIME_TYPES",
".",
"keys",
"(",
")",
")",
"return",
"ACCEPTED_MIME_TYPES",
".",
"get",
"(",
"mimetype",
",",
"default",
")"
] | Perform a content negociation on the format given the Accept header | [
"Perform",
"a",
"content",
"negociation",
"on",
"the",
"format",
"given",
"the",
"Accept",
"header"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/rdf.py#L97-L100 | train |
opendatateam/udata | udata/rdf.py | url_from_rdf | def url_from_rdf(rdf, prop):
'''
Try to extract An URL from a resource property.
It can be expressed in many forms as a URIRef or a Literal
'''
value = rdf.value(prop)
if isinstance(value, (URIRef, Literal)):
return value.toPython()
elif isinstance(value, RdfResource):
return value.identifier.toPython() | python | def url_from_rdf(rdf, prop):
'''
Try to extract An URL from a resource property.
It can be expressed in many forms as a URIRef or a Literal
'''
value = rdf.value(prop)
if isinstance(value, (URIRef, Literal)):
return value.toPython()
elif isinstance(value, RdfResource):
return value.identifier.toPython() | [
"def",
"url_from_rdf",
"(",
"rdf",
",",
"prop",
")",
":",
"value",
"=",
"rdf",
".",
"value",
"(",
"prop",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"URIRef",
",",
"Literal",
")",
")",
":",
"return",
"value",
".",
"toPython",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"RdfResource",
")",
":",
"return",
"value",
".",
"identifier",
".",
"toPython",
"(",
")"
] | Try to extract An URL from a resource property.
It can be expressed in many forms as a URIRef or a Literal | [
"Try",
"to",
"extract",
"An",
"URL",
"from",
"a",
"resource",
"property",
".",
"It",
"can",
"be",
"expressed",
"in",
"many",
"forms",
"as",
"a",
"URIRef",
"or",
"a",
"Literal"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/rdf.py#L224-L233 | train |
opendatateam/udata | udata/rdf.py | graph_response | def graph_response(graph, format):
'''
Return a proper flask response for a RDF resource given an expected format.
'''
fmt = guess_format(format)
if not fmt:
abort(404)
headers = {
'Content-Type': RDF_MIME_TYPES[fmt]
}
kwargs = {}
if fmt == 'json-ld':
kwargs['context'] = context
if isinstance(graph, RdfResource):
graph = graph.graph
return graph.serialize(format=fmt, **kwargs), 200, headers | python | def graph_response(graph, format):
'''
Return a proper flask response for a RDF resource given an expected format.
'''
fmt = guess_format(format)
if not fmt:
abort(404)
headers = {
'Content-Type': RDF_MIME_TYPES[fmt]
}
kwargs = {}
if fmt == 'json-ld':
kwargs['context'] = context
if isinstance(graph, RdfResource):
graph = graph.graph
return graph.serialize(format=fmt, **kwargs), 200, headers | [
"def",
"graph_response",
"(",
"graph",
",",
"format",
")",
":",
"fmt",
"=",
"guess_format",
"(",
"format",
")",
"if",
"not",
"fmt",
":",
"abort",
"(",
"404",
")",
"headers",
"=",
"{",
"'Content-Type'",
":",
"RDF_MIME_TYPES",
"[",
"fmt",
"]",
"}",
"kwargs",
"=",
"{",
"}",
"if",
"fmt",
"==",
"'json-ld'",
":",
"kwargs",
"[",
"'context'",
"]",
"=",
"context",
"if",
"isinstance",
"(",
"graph",
",",
"RdfResource",
")",
":",
"graph",
"=",
"graph",
".",
"graph",
"return",
"graph",
".",
"serialize",
"(",
"format",
"=",
"fmt",
",",
"*",
"*",
"kwargs",
")",
",",
"200",
",",
"headers"
] | Return a proper flask response for a RDF resource given an expected format. | [
"Return",
"a",
"proper",
"flask",
"response",
"for",
"a",
"RDF",
"resource",
"given",
"an",
"expected",
"format",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/rdf.py#L236-L251 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZoneQuerySet.valid_at | def valid_at(self, valid_date):
'''Limit current QuerySet to zone valid at a given date'''
is_valid = db.Q(validity__end__gt=valid_date,
validity__start__lte=valid_date)
no_validity = db.Q(validity=None)
return self(is_valid | no_validity) | python | def valid_at(self, valid_date):
'''Limit current QuerySet to zone valid at a given date'''
is_valid = db.Q(validity__end__gt=valid_date,
validity__start__lte=valid_date)
no_validity = db.Q(validity=None)
return self(is_valid | no_validity) | [
"def",
"valid_at",
"(",
"self",
",",
"valid_date",
")",
":",
"is_valid",
"=",
"db",
".",
"Q",
"(",
"validity__end__gt",
"=",
"valid_date",
",",
"validity__start__lte",
"=",
"valid_date",
")",
"no_validity",
"=",
"db",
".",
"Q",
"(",
"validity",
"=",
"None",
")",
"return",
"self",
"(",
"is_valid",
"|",
"no_validity",
")"
] | Limit current QuerySet to zone valid at a given date | [
"Limit",
"current",
"QuerySet",
"to",
"zone",
"valid",
"at",
"a",
"given",
"date"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L44-L49 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZoneQuerySet.resolve | def resolve(self, geoid, id_only=False):
'''
Resolve a GeoZone given a GeoID.
The start date is resolved from the given GeoID,
ie. it find there is a zone valid a the geoid validity,
resolve the `latest` alias
or use `latest` when no validity is given.
If `id_only` is True,
the result will be the resolved GeoID
instead of the resolved zone.
'''
level, code, validity = geoids.parse(geoid)
qs = self(level=level, code=code)
if id_only:
qs = qs.only('id')
if validity == 'latest':
result = qs.latest()
else:
result = qs.valid_at(validity).first()
return result.id if id_only and result else result | python | def resolve(self, geoid, id_only=False):
'''
Resolve a GeoZone given a GeoID.
The start date is resolved from the given GeoID,
ie. it find there is a zone valid a the geoid validity,
resolve the `latest` alias
or use `latest` when no validity is given.
If `id_only` is True,
the result will be the resolved GeoID
instead of the resolved zone.
'''
level, code, validity = geoids.parse(geoid)
qs = self(level=level, code=code)
if id_only:
qs = qs.only('id')
if validity == 'latest':
result = qs.latest()
else:
result = qs.valid_at(validity).first()
return result.id if id_only and result else result | [
"def",
"resolve",
"(",
"self",
",",
"geoid",
",",
"id_only",
"=",
"False",
")",
":",
"level",
",",
"code",
",",
"validity",
"=",
"geoids",
".",
"parse",
"(",
"geoid",
")",
"qs",
"=",
"self",
"(",
"level",
"=",
"level",
",",
"code",
"=",
"code",
")",
"if",
"id_only",
":",
"qs",
"=",
"qs",
".",
"only",
"(",
"'id'",
")",
"if",
"validity",
"==",
"'latest'",
":",
"result",
"=",
"qs",
".",
"latest",
"(",
")",
"else",
":",
"result",
"=",
"qs",
".",
"valid_at",
"(",
"validity",
")",
".",
"first",
"(",
")",
"return",
"result",
".",
"id",
"if",
"id_only",
"and",
"result",
"else",
"result"
] | Resolve a GeoZone given a GeoID.
The start date is resolved from the given GeoID,
ie. it find there is a zone valid a the geoid validity,
resolve the `latest` alias
or use `latest` when no validity is given.
If `id_only` is True,
the result will be the resolved GeoID
instead of the resolved zone. | [
"Resolve",
"a",
"GeoZone",
"given",
"a",
"GeoID",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L60-L81 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZone.keys_values | def keys_values(self):
"""Key values might be a list or not, always return a list."""
keys_values = []
for value in self.keys.values():
if isinstance(value, list):
keys_values += value
elif isinstance(value, basestring) and not value.startswith('-'):
# Avoid -99. Should be fixed in geozones
keys_values.append(value)
elif isinstance(value, int) and value >= 0:
# Avoid -99. Should be fixed in geozones
keys_values.append(str(value))
return keys_values | python | def keys_values(self):
"""Key values might be a list or not, always return a list."""
keys_values = []
for value in self.keys.values():
if isinstance(value, list):
keys_values += value
elif isinstance(value, basestring) and not value.startswith('-'):
# Avoid -99. Should be fixed in geozones
keys_values.append(value)
elif isinstance(value, int) and value >= 0:
# Avoid -99. Should be fixed in geozones
keys_values.append(str(value))
return keys_values | [
"def",
"keys_values",
"(",
"self",
")",
":",
"keys_values",
"=",
"[",
"]",
"for",
"value",
"in",
"self",
".",
"keys",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"keys_values",
"+=",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"and",
"not",
"value",
".",
"startswith",
"(",
"'-'",
")",
":",
"# Avoid -99. Should be fixed in geozones",
"keys_values",
".",
"append",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
"and",
"value",
">=",
"0",
":",
"# Avoid -99. Should be fixed in geozones",
"keys_values",
".",
"append",
"(",
"str",
"(",
"value",
")",
")",
"return",
"keys_values"
] | Key values might be a list or not, always return a list. | [
"Key",
"values",
"might",
"be",
"a",
"list",
"or",
"not",
"always",
"return",
"a",
"list",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L134-L146 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZone.level_i18n_name | def level_i18n_name(self):
"""In use within templates for dynamic translations."""
for level, name in spatial_granularities:
if self.level == level:
return name
return self.level_name | python | def level_i18n_name(self):
"""In use within templates for dynamic translations."""
for level, name in spatial_granularities:
if self.level == level:
return name
return self.level_name | [
"def",
"level_i18n_name",
"(",
"self",
")",
":",
"for",
"level",
",",
"name",
"in",
"spatial_granularities",
":",
"if",
"self",
".",
"level",
"==",
"level",
":",
"return",
"name",
"return",
"self",
".",
"level_name"
] | In use within templates for dynamic translations. | [
"In",
"use",
"within",
"templates",
"for",
"dynamic",
"translations",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L164-L169 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZone.ancestors_objects | def ancestors_objects(self):
"""Ancestors objects sorted by name."""
ancestors_objects = []
for ancestor in self.ancestors:
try:
ancestor_object = GeoZone.objects.get(id=ancestor)
except GeoZone.DoesNotExist:
continue
ancestors_objects.append(ancestor_object)
ancestors_objects.sort(key=lambda a: a.name)
return ancestors_objects | python | def ancestors_objects(self):
"""Ancestors objects sorted by name."""
ancestors_objects = []
for ancestor in self.ancestors:
try:
ancestor_object = GeoZone.objects.get(id=ancestor)
except GeoZone.DoesNotExist:
continue
ancestors_objects.append(ancestor_object)
ancestors_objects.sort(key=lambda a: a.name)
return ancestors_objects | [
"def",
"ancestors_objects",
"(",
"self",
")",
":",
"ancestors_objects",
"=",
"[",
"]",
"for",
"ancestor",
"in",
"self",
".",
"ancestors",
":",
"try",
":",
"ancestor_object",
"=",
"GeoZone",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"ancestor",
")",
"except",
"GeoZone",
".",
"DoesNotExist",
":",
"continue",
"ancestors_objects",
".",
"append",
"(",
"ancestor_object",
")",
"ancestors_objects",
".",
"sort",
"(",
"key",
"=",
"lambda",
"a",
":",
"a",
".",
"name",
")",
"return",
"ancestors_objects"
] | Ancestors objects sorted by name. | [
"Ancestors",
"objects",
"sorted",
"by",
"name",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L172-L182 | train |
opendatateam/udata | udata/core/spatial/models.py | GeoZone.child_level | def child_level(self):
"""Return the child level given handled levels."""
HANDLED_LEVELS = current_app.config.get('HANDLED_LEVELS')
try:
return HANDLED_LEVELS[HANDLED_LEVELS.index(self.level) - 1]
except (IndexError, ValueError):
return None | python | def child_level(self):
"""Return the child level given handled levels."""
HANDLED_LEVELS = current_app.config.get('HANDLED_LEVELS')
try:
return HANDLED_LEVELS[HANDLED_LEVELS.index(self.level) - 1]
except (IndexError, ValueError):
return None | [
"def",
"child_level",
"(",
"self",
")",
":",
"HANDLED_LEVELS",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'HANDLED_LEVELS'",
")",
"try",
":",
"return",
"HANDLED_LEVELS",
"[",
"HANDLED_LEVELS",
".",
"index",
"(",
"self",
".",
"level",
")",
"-",
"1",
"]",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"return",
"None"
] | Return the child level given handled levels. | [
"Return",
"the",
"child",
"level",
"given",
"handled",
"levels",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L185-L191 | train |
opendatateam/udata | udata/harvest/backends/base.py | BaseBackend.harvest | def harvest(self):
'''Start the harvesting process'''
if self.perform_initialization() is not None:
self.process_items()
self.finalize()
return self.job | python | def harvest(self):
'''Start the harvesting process'''
if self.perform_initialization() is not None:
self.process_items()
self.finalize()
return self.job | [
"def",
"harvest",
"(",
"self",
")",
":",
"if",
"self",
".",
"perform_initialization",
"(",
")",
"is",
"not",
"None",
":",
"self",
".",
"process_items",
"(",
")",
"self",
".",
"finalize",
"(",
")",
"return",
"self",
".",
"job"
] | Start the harvesting process | [
"Start",
"the",
"harvesting",
"process"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/base.py#L127-L132 | train |
opendatateam/udata | udata/harvest/backends/base.py | BaseBackend.perform_initialization | def perform_initialization(self):
'''Initialize the harvesting for a given job'''
log.debug('Initializing backend')
factory = HarvestJob if self.dryrun else HarvestJob.objects.create
self.job = factory(status='initializing',
started=datetime.now(),
source=self.source)
before_harvest_job.send(self)
try:
self.initialize()
self.job.status = 'initialized'
if not self.dryrun:
self.job.save()
except HarvestValidationError as e:
log.info('Initialization failed for "%s" (%s)',
safe_unicode(self.source.name), self.source.backend)
error = HarvestError(message=safe_unicode(e))
self.job.errors.append(error)
self.job.status = 'failed'
self.end()
return
except Exception as e:
self.job.status = 'failed'
error = HarvestError(message=safe_unicode(e))
self.job.errors.append(error)
self.end()
msg = 'Initialization failed for "{0.name}" ({0.backend})'
log.exception(msg.format(self.source))
return
if self.max_items:
self.job.items = self.job.items[:self.max_items]
if self.job.items:
log.debug('Queued %s items', len(self.job.items))
return len(self.job.items) | python | def perform_initialization(self):
'''Initialize the harvesting for a given job'''
log.debug('Initializing backend')
factory = HarvestJob if self.dryrun else HarvestJob.objects.create
self.job = factory(status='initializing',
started=datetime.now(),
source=self.source)
before_harvest_job.send(self)
try:
self.initialize()
self.job.status = 'initialized'
if not self.dryrun:
self.job.save()
except HarvestValidationError as e:
log.info('Initialization failed for "%s" (%s)',
safe_unicode(self.source.name), self.source.backend)
error = HarvestError(message=safe_unicode(e))
self.job.errors.append(error)
self.job.status = 'failed'
self.end()
return
except Exception as e:
self.job.status = 'failed'
error = HarvestError(message=safe_unicode(e))
self.job.errors.append(error)
self.end()
msg = 'Initialization failed for "{0.name}" ({0.backend})'
log.exception(msg.format(self.source))
return
if self.max_items:
self.job.items = self.job.items[:self.max_items]
if self.job.items:
log.debug('Queued %s items', len(self.job.items))
return len(self.job.items) | [
"def",
"perform_initialization",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"'Initializing backend'",
")",
"factory",
"=",
"HarvestJob",
"if",
"self",
".",
"dryrun",
"else",
"HarvestJob",
".",
"objects",
".",
"create",
"self",
".",
"job",
"=",
"factory",
"(",
"status",
"=",
"'initializing'",
",",
"started",
"=",
"datetime",
".",
"now",
"(",
")",
",",
"source",
"=",
"self",
".",
"source",
")",
"before_harvest_job",
".",
"send",
"(",
"self",
")",
"try",
":",
"self",
".",
"initialize",
"(",
")",
"self",
".",
"job",
".",
"status",
"=",
"'initialized'",
"if",
"not",
"self",
".",
"dryrun",
":",
"self",
".",
"job",
".",
"save",
"(",
")",
"except",
"HarvestValidationError",
"as",
"e",
":",
"log",
".",
"info",
"(",
"'Initialization failed for \"%s\" (%s)'",
",",
"safe_unicode",
"(",
"self",
".",
"source",
".",
"name",
")",
",",
"self",
".",
"source",
".",
"backend",
")",
"error",
"=",
"HarvestError",
"(",
"message",
"=",
"safe_unicode",
"(",
"e",
")",
")",
"self",
".",
"job",
".",
"errors",
".",
"append",
"(",
"error",
")",
"self",
".",
"job",
".",
"status",
"=",
"'failed'",
"self",
".",
"end",
"(",
")",
"return",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"job",
".",
"status",
"=",
"'failed'",
"error",
"=",
"HarvestError",
"(",
"message",
"=",
"safe_unicode",
"(",
"e",
")",
")",
"self",
".",
"job",
".",
"errors",
".",
"append",
"(",
"error",
")",
"self",
".",
"end",
"(",
")",
"msg",
"=",
"'Initialization failed for \"{0.name}\" ({0.backend})'",
"log",
".",
"exception",
"(",
"msg",
".",
"format",
"(",
"self",
".",
"source",
")",
")",
"return",
"if",
"self",
".",
"max_items",
":",
"self",
".",
"job",
".",
"items",
"=",
"self",
".",
"job",
".",
"items",
"[",
":",
"self",
".",
"max_items",
"]",
"if",
"self",
".",
"job",
".",
"items",
":",
"log",
".",
"debug",
"(",
"'Queued %s items'",
",",
"len",
"(",
"self",
".",
"job",
".",
"items",
")",
")",
"return",
"len",
"(",
"self",
".",
"job",
".",
"items",
")"
] | Initialize the harvesting for a given job | [
"Initialize",
"the",
"harvesting",
"for",
"a",
"given",
"job"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/base.py#L134-L172 | train |
opendatateam/udata | udata/harvest/backends/base.py | BaseBackend.validate | def validate(self, data, schema):
'''Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against
'''
try:
return schema(data)
except MultipleInvalid as ie:
errors = []
for error in ie.errors:
if error.path:
field = '.'.join(str(p) for p in error.path)
path = error.path
value = data
while path:
attr = path.pop(0)
try:
if isinstance(value, (list, tuple)):
attr = int(attr)
value = value[attr]
except Exception:
value = None
txt = safe_unicode(error).replace('for dictionary value', '')
txt = txt.strip()
if isinstance(error, RequiredFieldInvalid):
msg = '[{0}] {1}'
else:
msg = '[{0}] {1}: {2}'
try:
msg = msg.format(field, txt, str(value))
except Exception:
msg = '[{0}] {1}'.format(field, txt)
else:
msg = str(error)
errors.append(msg)
msg = '\n- '.join(['Validation error:'] + errors)
raise HarvestValidationError(msg) | python | def validate(self, data, schema):
'''Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against
'''
try:
return schema(data)
except MultipleInvalid as ie:
errors = []
for error in ie.errors:
if error.path:
field = '.'.join(str(p) for p in error.path)
path = error.path
value = data
while path:
attr = path.pop(0)
try:
if isinstance(value, (list, tuple)):
attr = int(attr)
value = value[attr]
except Exception:
value = None
txt = safe_unicode(error).replace('for dictionary value', '')
txt = txt.strip()
if isinstance(error, RequiredFieldInvalid):
msg = '[{0}] {1}'
else:
msg = '[{0}] {1}: {2}'
try:
msg = msg.format(field, txt, str(value))
except Exception:
msg = '[{0}] {1}'.format(field, txt)
else:
msg = str(error)
errors.append(msg)
msg = '\n- '.join(['Validation error:'] + errors)
raise HarvestValidationError(msg) | [
"def",
"validate",
"(",
"self",
",",
"data",
",",
"schema",
")",
":",
"try",
":",
"return",
"schema",
"(",
"data",
")",
"except",
"MultipleInvalid",
"as",
"ie",
":",
"errors",
"=",
"[",
"]",
"for",
"error",
"in",
"ie",
".",
"errors",
":",
"if",
"error",
".",
"path",
":",
"field",
"=",
"'.'",
".",
"join",
"(",
"str",
"(",
"p",
")",
"for",
"p",
"in",
"error",
".",
"path",
")",
"path",
"=",
"error",
".",
"path",
"value",
"=",
"data",
"while",
"path",
":",
"attr",
"=",
"path",
".",
"pop",
"(",
"0",
")",
"try",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"attr",
"=",
"int",
"(",
"attr",
")",
"value",
"=",
"value",
"[",
"attr",
"]",
"except",
"Exception",
":",
"value",
"=",
"None",
"txt",
"=",
"safe_unicode",
"(",
"error",
")",
".",
"replace",
"(",
"'for dictionary value'",
",",
"''",
")",
"txt",
"=",
"txt",
".",
"strip",
"(",
")",
"if",
"isinstance",
"(",
"error",
",",
"RequiredFieldInvalid",
")",
":",
"msg",
"=",
"'[{0}] {1}'",
"else",
":",
"msg",
"=",
"'[{0}] {1}: {2}'",
"try",
":",
"msg",
"=",
"msg",
".",
"format",
"(",
"field",
",",
"txt",
",",
"str",
"(",
"value",
")",
")",
"except",
"Exception",
":",
"msg",
"=",
"'[{0}] {1}'",
".",
"format",
"(",
"field",
",",
"txt",
")",
"else",
":",
"msg",
"=",
"str",
"(",
"error",
")",
"errors",
".",
"append",
"(",
"msg",
")",
"msg",
"=",
"'\\n- '",
".",
"join",
"(",
"[",
"'Validation error:'",
"]",
"+",
"errors",
")",
"raise",
"HarvestValidationError",
"(",
"msg",
")"
] | Perform a data validation against a given schema.
:param data: an object to validate
:param schema: a Voluptous schema to validate against | [
"Perform",
"a",
"data",
"validation",
"against",
"a",
"given",
"schema",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/backends/base.py#L265-L304 | train |
opendatateam/udata | udata/core/badges/api.py | add | def add(obj):
'''
Handle a badge add API.
- Expecting badge_fieds as payload
- Return the badge as payload
- Return 200 if the badge is already
- Return 201 if the badge is added
'''
Form = badge_form(obj.__class__)
form = api.validate(Form)
kind = form.kind.data
badge = obj.get_badge(kind)
if badge:
return badge
else:
return obj.add_badge(kind), 201 | python | def add(obj):
'''
Handle a badge add API.
- Expecting badge_fieds as payload
- Return the badge as payload
- Return 200 if the badge is already
- Return 201 if the badge is added
'''
Form = badge_form(obj.__class__)
form = api.validate(Form)
kind = form.kind.data
badge = obj.get_badge(kind)
if badge:
return badge
else:
return obj.add_badge(kind), 201 | [
"def",
"add",
"(",
"obj",
")",
":",
"Form",
"=",
"badge_form",
"(",
"obj",
".",
"__class__",
")",
"form",
"=",
"api",
".",
"validate",
"(",
"Form",
")",
"kind",
"=",
"form",
".",
"kind",
".",
"data",
"badge",
"=",
"obj",
".",
"get_badge",
"(",
"kind",
")",
"if",
"badge",
":",
"return",
"badge",
"else",
":",
"return",
"obj",
".",
"add_badge",
"(",
"kind",
")",
",",
"201"
] | Handle a badge add API.
- Expecting badge_fieds as payload
- Return the badge as payload
- Return 200 if the badge is already
- Return 201 if the badge is added | [
"Handle",
"a",
"badge",
"add",
"API",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/api.py#L16-L32 | train |
opendatateam/udata | udata/core/badges/api.py | remove | def remove(obj, kind):
'''
Handle badge removal API
- Returns 404 if the badge for this kind is absent
- Returns 204 on success
'''
if not obj.get_badge(kind):
api.abort(404, 'Badge does not exists')
obj.remove_badge(kind)
return '', 204 | python | def remove(obj, kind):
'''
Handle badge removal API
- Returns 404 if the badge for this kind is absent
- Returns 204 on success
'''
if not obj.get_badge(kind):
api.abort(404, 'Badge does not exists')
obj.remove_badge(kind)
return '', 204 | [
"def",
"remove",
"(",
"obj",
",",
"kind",
")",
":",
"if",
"not",
"obj",
".",
"get_badge",
"(",
"kind",
")",
":",
"api",
".",
"abort",
"(",
"404",
",",
"'Badge does not exists'",
")",
"obj",
".",
"remove_badge",
"(",
"kind",
")",
"return",
"''",
",",
"204"
] | Handle badge removal API
- Returns 404 if the badge for this kind is absent
- Returns 204 on success | [
"Handle",
"badge",
"removal",
"API"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/api.py#L35-L45 | train |
opendatateam/udata | udata/features/territories/__init__.py | check_for_territories | def check_for_territories(query):
"""
Return a geozone queryset of territories given the `query`.
Results are sorted by population and area (biggest first).
"""
if not query or not current_app.config.get('ACTIVATE_TERRITORIES'):
return []
dbqs = db.Q()
query = query.lower()
is_digit = query.isdigit()
query_length = len(query)
for level in current_app.config.get('HANDLED_LEVELS'):
if level == 'country':
continue # Level not fully handled yet.
q = db.Q(level=level)
if (query_length == 2 and level == 'fr:departement' and
(is_digit or query in ('2a', '2b'))):
# Counties + Corsica.
q &= db.Q(code=query)
elif query_length == 3 and level == 'fr:departement' and is_digit:
# French DROM-COM.
q &= db.Q(code=query)
elif query_length == 5 and level == 'fr:commune' and (
is_digit or query.startswith('2a') or query.startswith('2b')):
# INSEE code then postal codes with Corsica exceptions.
q &= db.Q(code=query) | db.Q(keys__postal__contains=query)
elif query_length >= 4:
# Check names starting with query or exact match.
q &= db.Q(name__istartswith=query) | db.Q(name__iexact=query)
else:
continue
# Meta Q object, ready to be passed to a queryset.
dbqs |= q
if dbqs.empty:
return []
# Sort matching results by population and area.
return GeoZone.objects(dbqs).order_by('-population', '-area') | python | def check_for_territories(query):
"""
Return a geozone queryset of territories given the `query`.
Results are sorted by population and area (biggest first).
"""
if not query or not current_app.config.get('ACTIVATE_TERRITORIES'):
return []
dbqs = db.Q()
query = query.lower()
is_digit = query.isdigit()
query_length = len(query)
for level in current_app.config.get('HANDLED_LEVELS'):
if level == 'country':
continue # Level not fully handled yet.
q = db.Q(level=level)
if (query_length == 2 and level == 'fr:departement' and
(is_digit or query in ('2a', '2b'))):
# Counties + Corsica.
q &= db.Q(code=query)
elif query_length == 3 and level == 'fr:departement' and is_digit:
# French DROM-COM.
q &= db.Q(code=query)
elif query_length == 5 and level == 'fr:commune' and (
is_digit or query.startswith('2a') or query.startswith('2b')):
# INSEE code then postal codes with Corsica exceptions.
q &= db.Q(code=query) | db.Q(keys__postal__contains=query)
elif query_length >= 4:
# Check names starting with query or exact match.
q &= db.Q(name__istartswith=query) | db.Q(name__iexact=query)
else:
continue
# Meta Q object, ready to be passed to a queryset.
dbqs |= q
if dbqs.empty:
return []
# Sort matching results by population and area.
return GeoZone.objects(dbqs).order_by('-population', '-area') | [
"def",
"check_for_territories",
"(",
"query",
")",
":",
"if",
"not",
"query",
"or",
"not",
"current_app",
".",
"config",
".",
"get",
"(",
"'ACTIVATE_TERRITORIES'",
")",
":",
"return",
"[",
"]",
"dbqs",
"=",
"db",
".",
"Q",
"(",
")",
"query",
"=",
"query",
".",
"lower",
"(",
")",
"is_digit",
"=",
"query",
".",
"isdigit",
"(",
")",
"query_length",
"=",
"len",
"(",
"query",
")",
"for",
"level",
"in",
"current_app",
".",
"config",
".",
"get",
"(",
"'HANDLED_LEVELS'",
")",
":",
"if",
"level",
"==",
"'country'",
":",
"continue",
"# Level not fully handled yet.",
"q",
"=",
"db",
".",
"Q",
"(",
"level",
"=",
"level",
")",
"if",
"(",
"query_length",
"==",
"2",
"and",
"level",
"==",
"'fr:departement'",
"and",
"(",
"is_digit",
"or",
"query",
"in",
"(",
"'2a'",
",",
"'2b'",
")",
")",
")",
":",
"# Counties + Corsica.",
"q",
"&=",
"db",
".",
"Q",
"(",
"code",
"=",
"query",
")",
"elif",
"query_length",
"==",
"3",
"and",
"level",
"==",
"'fr:departement'",
"and",
"is_digit",
":",
"# French DROM-COM.",
"q",
"&=",
"db",
".",
"Q",
"(",
"code",
"=",
"query",
")",
"elif",
"query_length",
"==",
"5",
"and",
"level",
"==",
"'fr:commune'",
"and",
"(",
"is_digit",
"or",
"query",
".",
"startswith",
"(",
"'2a'",
")",
"or",
"query",
".",
"startswith",
"(",
"'2b'",
")",
")",
":",
"# INSEE code then postal codes with Corsica exceptions.",
"q",
"&=",
"db",
".",
"Q",
"(",
"code",
"=",
"query",
")",
"|",
"db",
".",
"Q",
"(",
"keys__postal__contains",
"=",
"query",
")",
"elif",
"query_length",
">=",
"4",
":",
"# Check names starting with query or exact match.",
"q",
"&=",
"db",
".",
"Q",
"(",
"name__istartswith",
"=",
"query",
")",
"|",
"db",
".",
"Q",
"(",
"name__iexact",
"=",
"query",
")",
"else",
":",
"continue",
"# Meta Q object, ready to be passed to a queryset.",
"dbqs",
"|=",
"q",
"if",
"dbqs",
".",
"empty",
":",
"return",
"[",
"]",
"# Sort matching results by population and area.",
"return",
"GeoZone",
".",
"objects",
"(",
"dbqs",
")",
".",
"order_by",
"(",
"'-population'",
",",
"'-area'",
")"
] | Return a geozone queryset of territories given the `query`.
Results are sorted by population and area (biggest first). | [
"Return",
"a",
"geozone",
"queryset",
"of",
"territories",
"given",
"the",
"query",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/features/territories/__init__.py#L9-L50 | train |
opendatateam/udata | udata/core/spatial/geoids.py | build | def build(level, code, validity=None):
'''Serialize a GeoID from its parts'''
spatial = ':'.join((level, code))
if not validity:
return spatial
elif isinstance(validity, basestring):
return '@'.join((spatial, validity))
elif isinstance(validity, datetime):
return '@'.join((spatial, validity.date().isoformat()))
elif isinstance(validity, date):
return '@'.join((spatial, validity.isoformat()))
else:
msg = 'Unknown GeoID validity type: {0}'
raise GeoIDError(msg.format(type(validity).__name__)) | python | def build(level, code, validity=None):
'''Serialize a GeoID from its parts'''
spatial = ':'.join((level, code))
if not validity:
return spatial
elif isinstance(validity, basestring):
return '@'.join((spatial, validity))
elif isinstance(validity, datetime):
return '@'.join((spatial, validity.date().isoformat()))
elif isinstance(validity, date):
return '@'.join((spatial, validity.isoformat()))
else:
msg = 'Unknown GeoID validity type: {0}'
raise GeoIDError(msg.format(type(validity).__name__)) | [
"def",
"build",
"(",
"level",
",",
"code",
",",
"validity",
"=",
"None",
")",
":",
"spatial",
"=",
"':'",
".",
"join",
"(",
"(",
"level",
",",
"code",
")",
")",
"if",
"not",
"validity",
":",
"return",
"spatial",
"elif",
"isinstance",
"(",
"validity",
",",
"basestring",
")",
":",
"return",
"'@'",
".",
"join",
"(",
"(",
"spatial",
",",
"validity",
")",
")",
"elif",
"isinstance",
"(",
"validity",
",",
"datetime",
")",
":",
"return",
"'@'",
".",
"join",
"(",
"(",
"spatial",
",",
"validity",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"validity",
",",
"date",
")",
":",
"return",
"'@'",
".",
"join",
"(",
"(",
"spatial",
",",
"validity",
".",
"isoformat",
"(",
")",
")",
")",
"else",
":",
"msg",
"=",
"'Unknown GeoID validity type: {0}'",
"raise",
"GeoIDError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"validity",
")",
".",
"__name__",
")",
")"
] | Serialize a GeoID from its parts | [
"Serialize",
"a",
"GeoID",
"from",
"its",
"parts"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/geoids.py#L41-L54 | train |
opendatateam/udata | udata/core/spatial/geoids.py | from_zone | def from_zone(zone):
'''Build a GeoID from a given zone'''
validity = zone.validity.start if zone.validity else None
return build(zone.level, zone.code, validity) | python | def from_zone(zone):
'''Build a GeoID from a given zone'''
validity = zone.validity.start if zone.validity else None
return build(zone.level, zone.code, validity) | [
"def",
"from_zone",
"(",
"zone",
")",
":",
"validity",
"=",
"zone",
".",
"validity",
".",
"start",
"if",
"zone",
".",
"validity",
"else",
"None",
"return",
"build",
"(",
"zone",
".",
"level",
",",
"zone",
".",
"code",
",",
"validity",
")"
] | Build a GeoID from a given zone | [
"Build",
"a",
"GeoID",
"from",
"a",
"given",
"zone"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/geoids.py#L57-L60 | train |
opendatateam/udata | udata/core/dataset/rdf.py | temporal_from_rdf | def temporal_from_rdf(period_of_time):
'''Failsafe parsing of a temporal coverage'''
try:
if isinstance(period_of_time, Literal):
return temporal_from_literal(str(period_of_time))
elif isinstance(period_of_time, RdfResource):
return temporal_from_resource(period_of_time)
except Exception:
# There are a lot of cases where parsing could/should fail
# but we never want to break the whole dataset parsing
# so we log the error for future investigation and improvement
log.warning('Unable to parse temporal coverage', exc_info=True) | python | def temporal_from_rdf(period_of_time):
'''Failsafe parsing of a temporal coverage'''
try:
if isinstance(period_of_time, Literal):
return temporal_from_literal(str(period_of_time))
elif isinstance(period_of_time, RdfResource):
return temporal_from_resource(period_of_time)
except Exception:
# There are a lot of cases where parsing could/should fail
# but we never want to break the whole dataset parsing
# so we log the error for future investigation and improvement
log.warning('Unable to parse temporal coverage', exc_info=True) | [
"def",
"temporal_from_rdf",
"(",
"period_of_time",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"period_of_time",
",",
"Literal",
")",
":",
"return",
"temporal_from_literal",
"(",
"str",
"(",
"period_of_time",
")",
")",
"elif",
"isinstance",
"(",
"period_of_time",
",",
"RdfResource",
")",
":",
"return",
"temporal_from_resource",
"(",
"period_of_time",
")",
"except",
"Exception",
":",
"# There are a lot of cases where parsing could/should fail",
"# but we never want to break the whole dataset parsing",
"# so we log the error for future investigation and improvement",
"log",
".",
"warning",
"(",
"'Unable to parse temporal coverage'",
",",
"exc_info",
"=",
"True",
")"
] | Failsafe parsing of a temporal coverage | [
"Failsafe",
"parsing",
"of",
"a",
"temporal",
"coverage"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/rdf.py#L284-L295 | train |
opendatateam/udata | udata/core/dataset/rdf.py | title_from_rdf | def title_from_rdf(rdf, url):
'''
Try to extract a distribution title from a property.
As it's not a mandatory property,
it fallback on building a title from the URL
then the format and in last ressort a generic resource name.
'''
title = rdf_value(rdf, DCT.title)
if title:
return title
if url:
last_part = url.split('/')[-1]
if '.' in last_part and '?' not in last_part:
return last_part
fmt = rdf_value(rdf, DCT.term('format'))
lang = current_app.config['DEFAULT_LANGUAGE']
with i18n.language(lang):
if fmt:
return i18n._('{format} resource').format(format=fmt.lower())
else:
return i18n._('Nameless resource') | python | def title_from_rdf(rdf, url):
'''
Try to extract a distribution title from a property.
As it's not a mandatory property,
it fallback on building a title from the URL
then the format and in last ressort a generic resource name.
'''
title = rdf_value(rdf, DCT.title)
if title:
return title
if url:
last_part = url.split('/')[-1]
if '.' in last_part and '?' not in last_part:
return last_part
fmt = rdf_value(rdf, DCT.term('format'))
lang = current_app.config['DEFAULT_LANGUAGE']
with i18n.language(lang):
if fmt:
return i18n._('{format} resource').format(format=fmt.lower())
else:
return i18n._('Nameless resource') | [
"def",
"title_from_rdf",
"(",
"rdf",
",",
"url",
")",
":",
"title",
"=",
"rdf_value",
"(",
"rdf",
",",
"DCT",
".",
"title",
")",
"if",
"title",
":",
"return",
"title",
"if",
"url",
":",
"last_part",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"if",
"'.'",
"in",
"last_part",
"and",
"'?'",
"not",
"in",
"last_part",
":",
"return",
"last_part",
"fmt",
"=",
"rdf_value",
"(",
"rdf",
",",
"DCT",
".",
"term",
"(",
"'format'",
")",
")",
"lang",
"=",
"current_app",
".",
"config",
"[",
"'DEFAULT_LANGUAGE'",
"]",
"with",
"i18n",
".",
"language",
"(",
"lang",
")",
":",
"if",
"fmt",
":",
"return",
"i18n",
".",
"_",
"(",
"'{format} resource'",
")",
".",
"format",
"(",
"format",
"=",
"fmt",
".",
"lower",
"(",
")",
")",
"else",
":",
"return",
"i18n",
".",
"_",
"(",
"'Nameless resource'",
")"
] | Try to extract a distribution title from a property.
As it's not a mandatory property,
it fallback on building a title from the URL
then the format and in last ressort a generic resource name. | [
"Try",
"to",
"extract",
"a",
"distribution",
"title",
"from",
"a",
"property",
".",
"As",
"it",
"s",
"not",
"a",
"mandatory",
"property",
"it",
"fallback",
"on",
"building",
"a",
"title",
"from",
"the",
"URL",
"then",
"the",
"format",
"and",
"in",
"last",
"ressort",
"a",
"generic",
"resource",
"name",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/dataset/rdf.py#L313-L333 | train |
opendatateam/udata | udata/core/reuse/forms.py | check_url_does_not_exists | def check_url_does_not_exists(form, field):
'''Ensure a reuse URL is not yet registered'''
if field.data != field.object_data and Reuse.url_exists(field.data):
raise validators.ValidationError(_('This URL is already registered')) | python | def check_url_does_not_exists(form, field):
'''Ensure a reuse URL is not yet registered'''
if field.data != field.object_data and Reuse.url_exists(field.data):
raise validators.ValidationError(_('This URL is already registered')) | [
"def",
"check_url_does_not_exists",
"(",
"form",
",",
"field",
")",
":",
"if",
"field",
".",
"data",
"!=",
"field",
".",
"object_data",
"and",
"Reuse",
".",
"url_exists",
"(",
"field",
".",
"data",
")",
":",
"raise",
"validators",
".",
"ValidationError",
"(",
"_",
"(",
"'This URL is already registered'",
")",
")"
] | Ensure a reuse URL is not yet registered | [
"Ensure",
"a",
"reuse",
"URL",
"is",
"not",
"yet",
"registered"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/reuse/forms.py#L13-L16 | train |
opendatateam/udata | udata/search/fields.py | obj_to_string | def obj_to_string(obj):
'''Render an object into a unicode string if possible'''
if not obj:
return None
elif isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, basestring):
return obj
elif is_lazy_string(obj):
return obj.value
elif hasattr(obj, '__html__'):
return obj.__html__()
else:
return str(obj) | python | def obj_to_string(obj):
'''Render an object into a unicode string if possible'''
if not obj:
return None
elif isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, basestring):
return obj
elif is_lazy_string(obj):
return obj.value
elif hasattr(obj, '__html__'):
return obj.__html__()
else:
return str(obj) | [
"def",
"obj_to_string",
"(",
"obj",
")",
":",
"if",
"not",
"obj",
":",
"return",
"None",
"elif",
"isinstance",
"(",
"obj",
",",
"bytes",
")",
":",
"return",
"obj",
".",
"decode",
"(",
"'utf-8'",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"basestring",
")",
":",
"return",
"obj",
"elif",
"is_lazy_string",
"(",
"obj",
")",
":",
"return",
"obj",
".",
"value",
"elif",
"hasattr",
"(",
"obj",
",",
"'__html__'",
")",
":",
"return",
"obj",
".",
"__html__",
"(",
")",
"else",
":",
"return",
"str",
"(",
"obj",
")"
] | Render an object into a unicode string if possible | [
"Render",
"an",
"object",
"into",
"a",
"unicode",
"string",
"if",
"possible"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/fields.py#L41-L54 | train |
opendatateam/udata | udata/search/fields.py | TermsFacet.add_filter | def add_filter(self, filter_values):
"""Improve the original one to deal with OR cases."""
field = self._params['field']
# Build a `AND` query on values wihtout the OR operator.
# and a `OR` query for each value containing the OR operator.
filters = [
Q('bool', should=[
Q('term', **{field: v}) for v in value.split(OR_SEPARATOR)
])
if OR_SEPARATOR in value else
Q('term', **{field: value})
for value in filter_values
]
return Q('bool', must=filters) if len(filters) > 1 else filters[0] | python | def add_filter(self, filter_values):
"""Improve the original one to deal with OR cases."""
field = self._params['field']
# Build a `AND` query on values wihtout the OR operator.
# and a `OR` query for each value containing the OR operator.
filters = [
Q('bool', should=[
Q('term', **{field: v}) for v in value.split(OR_SEPARATOR)
])
if OR_SEPARATOR in value else
Q('term', **{field: value})
for value in filter_values
]
return Q('bool', must=filters) if len(filters) > 1 else filters[0] | [
"def",
"add_filter",
"(",
"self",
",",
"filter_values",
")",
":",
"field",
"=",
"self",
".",
"_params",
"[",
"'field'",
"]",
"# Build a `AND` query on values wihtout the OR operator.",
"# and a `OR` query for each value containing the OR operator.",
"filters",
"=",
"[",
"Q",
"(",
"'bool'",
",",
"should",
"=",
"[",
"Q",
"(",
"'term'",
",",
"*",
"*",
"{",
"field",
":",
"v",
"}",
")",
"for",
"v",
"in",
"value",
".",
"split",
"(",
"OR_SEPARATOR",
")",
"]",
")",
"if",
"OR_SEPARATOR",
"in",
"value",
"else",
"Q",
"(",
"'term'",
",",
"*",
"*",
"{",
"field",
":",
"value",
"}",
")",
"for",
"value",
"in",
"filter_values",
"]",
"return",
"Q",
"(",
"'bool'",
",",
"must",
"=",
"filters",
")",
"if",
"len",
"(",
"filters",
")",
">",
"1",
"else",
"filters",
"[",
"0",
"]"
] | Improve the original one to deal with OR cases. | [
"Improve",
"the",
"original",
"one",
"to",
"deal",
"with",
"OR",
"cases",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/fields.py#L87-L100 | train |
opendatateam/udata | udata/search/fields.py | ModelTermsFacet.get_values | def get_values(self, data, filter_values):
"""
Turn the raw bucket data into a list of tuples containing the object,
number of documents and a flag indicating whether this value has been
selected or not.
"""
values = super(ModelTermsFacet, self).get_values(data, filter_values)
ids = [key for (key, doc_count, selected) in values]
# Perform a model resolution: models are feched from DB
# We use model field to cast IDs
ids = [self.model_field.to_mongo(id) for id in ids]
objects = self.model.objects.in_bulk(ids)
return [
(objects.get(self.model_field.to_mongo(key)), doc_count, selected)
for (key, doc_count, selected) in values
] | python | def get_values(self, data, filter_values):
"""
Turn the raw bucket data into a list of tuples containing the object,
number of documents and a flag indicating whether this value has been
selected or not.
"""
values = super(ModelTermsFacet, self).get_values(data, filter_values)
ids = [key for (key, doc_count, selected) in values]
# Perform a model resolution: models are feched from DB
# We use model field to cast IDs
ids = [self.model_field.to_mongo(id) for id in ids]
objects = self.model.objects.in_bulk(ids)
return [
(objects.get(self.model_field.to_mongo(key)), doc_count, selected)
for (key, doc_count, selected) in values
] | [
"def",
"get_values",
"(",
"self",
",",
"data",
",",
"filter_values",
")",
":",
"values",
"=",
"super",
"(",
"ModelTermsFacet",
",",
"self",
")",
".",
"get_values",
"(",
"data",
",",
"filter_values",
")",
"ids",
"=",
"[",
"key",
"for",
"(",
"key",
",",
"doc_count",
",",
"selected",
")",
"in",
"values",
"]",
"# Perform a model resolution: models are feched from DB",
"# We use model field to cast IDs",
"ids",
"=",
"[",
"self",
".",
"model_field",
".",
"to_mongo",
"(",
"id",
")",
"for",
"id",
"in",
"ids",
"]",
"objects",
"=",
"self",
".",
"model",
".",
"objects",
".",
"in_bulk",
"(",
"ids",
")",
"return",
"[",
"(",
"objects",
".",
"get",
"(",
"self",
".",
"model_field",
".",
"to_mongo",
"(",
"key",
")",
")",
",",
"doc_count",
",",
"selected",
")",
"for",
"(",
"key",
",",
"doc_count",
",",
"selected",
")",
"in",
"values",
"]"
] | Turn the raw bucket data into a list of tuples containing the object,
number of documents and a flag indicating whether this value has been
selected or not. | [
"Turn",
"the",
"raw",
"bucket",
"data",
"into",
"a",
"list",
"of",
"tuples",
"containing",
"the",
"object",
"number",
"of",
"documents",
"and",
"a",
"flag",
"indicating",
"whether",
"this",
"value",
"has",
"been",
"selected",
"or",
"not",
"."
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/search/fields.py#L135-L151 | train |
opendatateam/udata | udata/core/organization/forms.py | OrganizationForm.save | def save(self, commit=True, **kwargs):
'''Register the current user as admin on creation'''
org = super(OrganizationForm, self).save(commit=False, **kwargs)
if not org.id:
user = current_user._get_current_object()
member = Member(user=user, role='admin')
org.members.append(member)
if commit:
org.save()
return org | python | def save(self, commit=True, **kwargs):
'''Register the current user as admin on creation'''
org = super(OrganizationForm, self).save(commit=False, **kwargs)
if not org.id:
user = current_user._get_current_object()
member = Member(user=user, role='admin')
org.members.append(member)
if commit:
org.save()
return org | [
"def",
"save",
"(",
"self",
",",
"commit",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"org",
"=",
"super",
"(",
"OrganizationForm",
",",
"self",
")",
".",
"save",
"(",
"commit",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"org",
".",
"id",
":",
"user",
"=",
"current_user",
".",
"_get_current_object",
"(",
")",
"member",
"=",
"Member",
"(",
"user",
"=",
"user",
",",
"role",
"=",
"'admin'",
")",
"org",
".",
"members",
".",
"append",
"(",
"member",
")",
"if",
"commit",
":",
"org",
".",
"save",
"(",
")",
"return",
"org"
] | Register the current user as admin on creation | [
"Register",
"the",
"current",
"user",
"as",
"admin",
"on",
"creation"
] | f016585af94b0ff6bd73738c700324adc8ba7f8f | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/organization/forms.py#L36-L48 | train |
antonagestam/collectfast | collectfast/etag.py | get_cache_key | def get_cache_key(path):
"""
Create a cache key by concatenating the prefix with a hash of the path.
"""
# Python 2/3 support for path hashing
try:
path_hash = hashlib.md5(path).hexdigest()
except TypeError:
path_hash = hashlib.md5(path.encode('utf-8')).hexdigest()
return settings.cache_key_prefix + path_hash | python | def get_cache_key(path):
"""
Create a cache key by concatenating the prefix with a hash of the path.
"""
# Python 2/3 support for path hashing
try:
path_hash = hashlib.md5(path).hexdigest()
except TypeError:
path_hash = hashlib.md5(path.encode('utf-8')).hexdigest()
return settings.cache_key_prefix + path_hash | [
"def",
"get_cache_key",
"(",
"path",
")",
":",
"# Python 2/3 support for path hashing",
"try",
":",
"path_hash",
"=",
"hashlib",
".",
"md5",
"(",
"path",
")",
".",
"hexdigest",
"(",
")",
"except",
"TypeError",
":",
"path_hash",
"=",
"hashlib",
".",
"md5",
"(",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"settings",
".",
"cache_key_prefix",
"+",
"path_hash"
] | Create a cache key by concatenating the prefix with a hash of the path. | [
"Create",
"a",
"cache",
"key",
"by",
"concatenating",
"the",
"prefix",
"with",
"a",
"hash",
"of",
"the",
"path",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L28-L37 | train |
antonagestam/collectfast | collectfast/etag.py | get_remote_etag | def get_remote_etag(storage, prefixed_path):
"""
Get etag of path from S3 using boto or boto3.
"""
normalized_path = safe_join(storage.location, prefixed_path).replace(
'\\', '/')
try:
return storage.bucket.get_key(normalized_path).etag
except AttributeError:
pass
try:
return storage.bucket.Object(normalized_path).e_tag
except:
pass
return None | python | def get_remote_etag(storage, prefixed_path):
"""
Get etag of path from S3 using boto or boto3.
"""
normalized_path = safe_join(storage.location, prefixed_path).replace(
'\\', '/')
try:
return storage.bucket.get_key(normalized_path).etag
except AttributeError:
pass
try:
return storage.bucket.Object(normalized_path).e_tag
except:
pass
return None | [
"def",
"get_remote_etag",
"(",
"storage",
",",
"prefixed_path",
")",
":",
"normalized_path",
"=",
"safe_join",
"(",
"storage",
".",
"location",
",",
"prefixed_path",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"try",
":",
"return",
"storage",
".",
"bucket",
".",
"get_key",
"(",
"normalized_path",
")",
".",
"etag",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"return",
"storage",
".",
"bucket",
".",
"Object",
"(",
"normalized_path",
")",
".",
"e_tag",
"except",
":",
"pass",
"return",
"None"
] | Get etag of path from S3 using boto or boto3. | [
"Get",
"etag",
"of",
"path",
"from",
"S3",
"using",
"boto",
"or",
"boto3",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L40-L54 | train |
antonagestam/collectfast | collectfast/etag.py | get_etag | def get_etag(storage, path, prefixed_path):
"""
Get etag of path from cache or S3 - in that order.
"""
cache_key = get_cache_key(path)
etag = cache.get(cache_key, False)
if etag is False:
etag = get_remote_etag(storage, prefixed_path)
cache.set(cache_key, etag)
return etag | python | def get_etag(storage, path, prefixed_path):
"""
Get etag of path from cache or S3 - in that order.
"""
cache_key = get_cache_key(path)
etag = cache.get(cache_key, False)
if etag is False:
etag = get_remote_etag(storage, prefixed_path)
cache.set(cache_key, etag)
return etag | [
"def",
"get_etag",
"(",
"storage",
",",
"path",
",",
"prefixed_path",
")",
":",
"cache_key",
"=",
"get_cache_key",
"(",
"path",
")",
"etag",
"=",
"cache",
".",
"get",
"(",
"cache_key",
",",
"False",
")",
"if",
"etag",
"is",
"False",
":",
"etag",
"=",
"get_remote_etag",
"(",
"storage",
",",
"prefixed_path",
")",
"cache",
".",
"set",
"(",
"cache_key",
",",
"etag",
")",
"return",
"etag"
] | Get etag of path from cache or S3 - in that order. | [
"Get",
"etag",
"of",
"path",
"from",
"cache",
"or",
"S3",
"-",
"in",
"that",
"order",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L57-L66 | train |
antonagestam/collectfast | collectfast/etag.py | get_file_hash | def get_file_hash(storage, path):
"""
Create md5 hash from file contents.
"""
contents = storage.open(path).read()
file_hash = hashlib.md5(contents).hexdigest()
# Check if content should be gzipped and hash gzipped content
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
if settings.is_gzipped and content_type in settings.gzip_content_types:
cache_key = get_cache_key('gzip_hash_%s' % file_hash)
file_hash = cache.get(cache_key, False)
if file_hash is False:
buffer = BytesIO()
zf = gzip.GzipFile(
mode='wb', compresslevel=6, fileobj=buffer, mtime=0.0)
zf.write(force_bytes(contents))
zf.close()
file_hash = hashlib.md5(buffer.getvalue()).hexdigest()
cache.set(cache_key, file_hash)
return '"%s"' % file_hash | python | def get_file_hash(storage, path):
"""
Create md5 hash from file contents.
"""
contents = storage.open(path).read()
file_hash = hashlib.md5(contents).hexdigest()
# Check if content should be gzipped and hash gzipped content
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
if settings.is_gzipped and content_type in settings.gzip_content_types:
cache_key = get_cache_key('gzip_hash_%s' % file_hash)
file_hash = cache.get(cache_key, False)
if file_hash is False:
buffer = BytesIO()
zf = gzip.GzipFile(
mode='wb', compresslevel=6, fileobj=buffer, mtime=0.0)
zf.write(force_bytes(contents))
zf.close()
file_hash = hashlib.md5(buffer.getvalue()).hexdigest()
cache.set(cache_key, file_hash)
return '"%s"' % file_hash | [
"def",
"get_file_hash",
"(",
"storage",
",",
"path",
")",
":",
"contents",
"=",
"storage",
".",
"open",
"(",
"path",
")",
".",
"read",
"(",
")",
"file_hash",
"=",
"hashlib",
".",
"md5",
"(",
"contents",
")",
".",
"hexdigest",
"(",
")",
"# Check if content should be gzipped and hash gzipped content",
"content_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"or",
"'application/octet-stream'",
"if",
"settings",
".",
"is_gzipped",
"and",
"content_type",
"in",
"settings",
".",
"gzip_content_types",
":",
"cache_key",
"=",
"get_cache_key",
"(",
"'gzip_hash_%s'",
"%",
"file_hash",
")",
"file_hash",
"=",
"cache",
".",
"get",
"(",
"cache_key",
",",
"False",
")",
"if",
"file_hash",
"is",
"False",
":",
"buffer",
"=",
"BytesIO",
"(",
")",
"zf",
"=",
"gzip",
".",
"GzipFile",
"(",
"mode",
"=",
"'wb'",
",",
"compresslevel",
"=",
"6",
",",
"fileobj",
"=",
"buffer",
",",
"mtime",
"=",
"0.0",
")",
"zf",
".",
"write",
"(",
"force_bytes",
"(",
"contents",
")",
")",
"zf",
".",
"close",
"(",
")",
"file_hash",
"=",
"hashlib",
".",
"md5",
"(",
"buffer",
".",
"getvalue",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"cache",
".",
"set",
"(",
"cache_key",
",",
"file_hash",
")",
"return",
"'\"%s\"'",
"%",
"file_hash"
] | Create md5 hash from file contents. | [
"Create",
"md5",
"hash",
"from",
"file",
"contents",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L76-L97 | train |
antonagestam/collectfast | collectfast/etag.py | has_matching_etag | def has_matching_etag(remote_storage, source_storage, path, prefixed_path):
"""
Compare etag of path in source storage with remote.
"""
storage_etag = get_etag(remote_storage, path, prefixed_path)
local_etag = get_file_hash(source_storage, path)
return storage_etag == local_etag | python | def has_matching_etag(remote_storage, source_storage, path, prefixed_path):
"""
Compare etag of path in source storage with remote.
"""
storage_etag = get_etag(remote_storage, path, prefixed_path)
local_etag = get_file_hash(source_storage, path)
return storage_etag == local_etag | [
"def",
"has_matching_etag",
"(",
"remote_storage",
",",
"source_storage",
",",
"path",
",",
"prefixed_path",
")",
":",
"storage_etag",
"=",
"get_etag",
"(",
"remote_storage",
",",
"path",
",",
"prefixed_path",
")",
"local_etag",
"=",
"get_file_hash",
"(",
"source_storage",
",",
"path",
")",
"return",
"storage_etag",
"==",
"local_etag"
] | Compare etag of path in source storage with remote. | [
"Compare",
"etag",
"of",
"path",
"in",
"source",
"storage",
"with",
"remote",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L100-L106 | train |
antonagestam/collectfast | collectfast/etag.py | should_copy_file | def should_copy_file(remote_storage, path, prefixed_path, source_storage):
"""
Returns True if the file should be copied, otherwise False.
"""
if has_matching_etag(
remote_storage, source_storage, path, prefixed_path):
logger.info("%s: Skipping based on matching file hashes" % path)
return False
# Invalidate cached versions of lookup before copy
destroy_etag(path)
logger.info("%s: Hashes did not match" % path)
return True | python | def should_copy_file(remote_storage, path, prefixed_path, source_storage):
"""
Returns True if the file should be copied, otherwise False.
"""
if has_matching_etag(
remote_storage, source_storage, path, prefixed_path):
logger.info("%s: Skipping based on matching file hashes" % path)
return False
# Invalidate cached versions of lookup before copy
destroy_etag(path)
logger.info("%s: Hashes did not match" % path)
return True | [
"def",
"should_copy_file",
"(",
"remote_storage",
",",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
":",
"if",
"has_matching_etag",
"(",
"remote_storage",
",",
"source_storage",
",",
"path",
",",
"prefixed_path",
")",
":",
"logger",
".",
"info",
"(",
"\"%s: Skipping based on matching file hashes\"",
"%",
"path",
")",
"return",
"False",
"# Invalidate cached versions of lookup before copy",
"destroy_etag",
"(",
"path",
")",
"logger",
".",
"info",
"(",
"\"%s: Hashes did not match\"",
"%",
"path",
")",
"return",
"True"
] | Returns True if the file should be copied, otherwise False. | [
"Returns",
"True",
"if",
"the",
"file",
"should",
"be",
"copied",
"otherwise",
"False",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/etag.py#L109-L121 | train |
antonagestam/collectfast | collectfast/management/commands/collectstatic.py | Command.set_options | def set_options(self, **options):
"""
Set options and handle deprecation.
"""
ignore_etag = options.pop('ignore_etag', False)
disable = options.pop('disable_collectfast', False)
if ignore_etag:
warnings.warn(
"--ignore-etag is deprecated since 0.5.0, use "
"--disable-collectfast instead.")
if ignore_etag or disable:
self.collectfast_enabled = False
super(Command, self).set_options(**options) | python | def set_options(self, **options):
"""
Set options and handle deprecation.
"""
ignore_etag = options.pop('ignore_etag', False)
disable = options.pop('disable_collectfast', False)
if ignore_etag:
warnings.warn(
"--ignore-etag is deprecated since 0.5.0, use "
"--disable-collectfast instead.")
if ignore_etag or disable:
self.collectfast_enabled = False
super(Command, self).set_options(**options) | [
"def",
"set_options",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"ignore_etag",
"=",
"options",
".",
"pop",
"(",
"'ignore_etag'",
",",
"False",
")",
"disable",
"=",
"options",
".",
"pop",
"(",
"'disable_collectfast'",
",",
"False",
")",
"if",
"ignore_etag",
":",
"warnings",
".",
"warn",
"(",
"\"--ignore-etag is deprecated since 0.5.0, use \"",
"\"--disable-collectfast instead.\"",
")",
"if",
"ignore_etag",
"or",
"disable",
":",
"self",
".",
"collectfast_enabled",
"=",
"False",
"super",
"(",
"Command",
",",
"self",
")",
".",
"set_options",
"(",
"*",
"*",
"options",
")"
] | Set options and handle deprecation. | [
"Set",
"options",
"and",
"handle",
"deprecation",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/management/commands/collectstatic.py#L44-L56 | train |
antonagestam/collectfast | collectfast/management/commands/collectstatic.py | Command.handle | def handle(self, **options):
"""
Override handle to supress summary output
"""
super(Command, self).handle(**options)
return "{} static file{} copied.".format(
self.num_copied_files,
'' if self.num_copied_files == 1 else 's') | python | def handle(self, **options):
"""
Override handle to supress summary output
"""
super(Command, self).handle(**options)
return "{} static file{} copied.".format(
self.num_copied_files,
'' if self.num_copied_files == 1 else 's') | [
"def",
"handle",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"super",
"(",
"Command",
",",
"self",
")",
".",
"handle",
"(",
"*",
"*",
"options",
")",
"return",
"\"{} static file{} copied.\"",
".",
"format",
"(",
"self",
".",
"num_copied_files",
",",
"''",
"if",
"self",
".",
"num_copied_files",
"==",
"1",
"else",
"'s'",
")"
] | Override handle to supress summary output | [
"Override",
"handle",
"to",
"supress",
"summary",
"output"
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/management/commands/collectstatic.py#L68-L75 | train |
antonagestam/collectfast | collectfast/management/commands/collectstatic.py | Command.do_copy_file | def do_copy_file(self, args):
"""
Determine if file should be copied or not and handle exceptions.
"""
path, prefixed_path, source_storage = args
reset_connection(self.storage)
if self.collectfast_enabled and not self.dry_run:
try:
if not should_copy_file(
self.storage, path, prefixed_path, source_storage):
return False
except Exception as e:
if settings.debug:
raise
# Ignore errors and let default collectstatic handle copy
self.stdout.write(smart_str(
"Ignored error in Collectfast:\n%s\n--> Continuing using "
"default collectstatic." % e))
self.num_copied_files += 1
return super(Command, self).copy_file(
path, prefixed_path, source_storage) | python | def do_copy_file(self, args):
"""
Determine if file should be copied or not and handle exceptions.
"""
path, prefixed_path, source_storage = args
reset_connection(self.storage)
if self.collectfast_enabled and not self.dry_run:
try:
if not should_copy_file(
self.storage, path, prefixed_path, source_storage):
return False
except Exception as e:
if settings.debug:
raise
# Ignore errors and let default collectstatic handle copy
self.stdout.write(smart_str(
"Ignored error in Collectfast:\n%s\n--> Continuing using "
"default collectstatic." % e))
self.num_copied_files += 1
return super(Command, self).copy_file(
path, prefixed_path, source_storage) | [
"def",
"do_copy_file",
"(",
"self",
",",
"args",
")",
":",
"path",
",",
"prefixed_path",
",",
"source_storage",
"=",
"args",
"reset_connection",
"(",
"self",
".",
"storage",
")",
"if",
"self",
".",
"collectfast_enabled",
"and",
"not",
"self",
".",
"dry_run",
":",
"try",
":",
"if",
"not",
"should_copy_file",
"(",
"self",
".",
"storage",
",",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
":",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"if",
"settings",
".",
"debug",
":",
"raise",
"# Ignore errors and let default collectstatic handle copy",
"self",
".",
"stdout",
".",
"write",
"(",
"smart_str",
"(",
"\"Ignored error in Collectfast:\\n%s\\n--> Continuing using \"",
"\"default collectstatic.\"",
"%",
"e",
")",
")",
"self",
".",
"num_copied_files",
"+=",
"1",
"return",
"super",
"(",
"Command",
",",
"self",
")",
".",
"copy_file",
"(",
"path",
",",
"prefixed_path",
",",
"source_storage",
")"
] | Determine if file should be copied or not and handle exceptions. | [
"Determine",
"if",
"file",
"should",
"be",
"copied",
"or",
"not",
"and",
"handle",
"exceptions",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/management/commands/collectstatic.py#L77-L100 | train |
antonagestam/collectfast | collectfast/management/commands/collectstatic.py | Command.copy_file | def copy_file(self, path, prefixed_path, source_storage):
"""
Appends path to task queue if threads are enabled, otherwise copies
the file with a blocking call.
"""
args = (path, prefixed_path, source_storage)
if settings.threads:
self.tasks.append(args)
else:
self.do_copy_file(args) | python | def copy_file(self, path, prefixed_path, source_storage):
"""
Appends path to task queue if threads are enabled, otherwise copies
the file with a blocking call.
"""
args = (path, prefixed_path, source_storage)
if settings.threads:
self.tasks.append(args)
else:
self.do_copy_file(args) | [
"def",
"copy_file",
"(",
"self",
",",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
":",
"args",
"=",
"(",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
"if",
"settings",
".",
"threads",
":",
"self",
".",
"tasks",
".",
"append",
"(",
"args",
")",
"else",
":",
"self",
".",
"do_copy_file",
"(",
"args",
")"
] | Appends path to task queue if threads are enabled, otherwise copies
the file with a blocking call. | [
"Appends",
"path",
"to",
"task",
"queue",
"if",
"threads",
"are",
"enabled",
"otherwise",
"copies",
"the",
"file",
"with",
"a",
"blocking",
"call",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/management/commands/collectstatic.py#L102-L111 | train |
antonagestam/collectfast | collectfast/management/commands/collectstatic.py | Command.delete_file | def delete_file(self, path, prefixed_path, source_storage):
"""
Override delete_file to skip modified time and exists lookups.
"""
if not self.collectfast_enabled:
return super(Command, self).delete_file(
path, prefixed_path, source_storage)
if not self.dry_run:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
else:
self.log("Pretending to delete '%s'" % path)
return True | python | def delete_file(self, path, prefixed_path, source_storage):
"""
Override delete_file to skip modified time and exists lookups.
"""
if not self.collectfast_enabled:
return super(Command, self).delete_file(
path, prefixed_path, source_storage)
if not self.dry_run:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
else:
self.log("Pretending to delete '%s'" % path)
return True | [
"def",
"delete_file",
"(",
"self",
",",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
":",
"if",
"not",
"self",
".",
"collectfast_enabled",
":",
"return",
"super",
"(",
"Command",
",",
"self",
")",
".",
"delete_file",
"(",
"path",
",",
"prefixed_path",
",",
"source_storage",
")",
"if",
"not",
"self",
".",
"dry_run",
":",
"self",
".",
"log",
"(",
"\"Deleting '%s'\"",
"%",
"path",
")",
"self",
".",
"storage",
".",
"delete",
"(",
"prefixed_path",
")",
"else",
":",
"self",
".",
"log",
"(",
"\"Pretending to delete '%s'\"",
"%",
"path",
")",
"return",
"True"
] | Override delete_file to skip modified time and exists lookups. | [
"Override",
"delete_file",
"to",
"skip",
"modified",
"time",
"and",
"exists",
"lookups",
"."
] | fb9d7976da2a2578528fa6f3bbd053ee87475ecb | https://github.com/antonagestam/collectfast/blob/fb9d7976da2a2578528fa6f3bbd053ee87475ecb/collectfast/management/commands/collectstatic.py#L113-L125 | train |
satellogic/telluric | telluric/georaster.py | join | def join(rasters):
"""
This method takes a list of rasters and returns a raster that is constructed of all of them
"""
raster = rasters[0] # using the first raster to understand what is the type of data we have
mask_band = None
nodata = None
with raster._raster_opener(raster.source_file) as r:
nodata = r.nodata
mask_flags = r.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
if per_dataset_mask and nodata is None:
mask_band = 0
return GeoRaster2.from_rasters(rasters, relative_to_vrt=False, nodata=nodata, mask_band=mask_band) | python | def join(rasters):
"""
This method takes a list of rasters and returns a raster that is constructed of all of them
"""
raster = rasters[0] # using the first raster to understand what is the type of data we have
mask_band = None
nodata = None
with raster._raster_opener(raster.source_file) as r:
nodata = r.nodata
mask_flags = r.mask_flag_enums
per_dataset_mask = all([rasterio.enums.MaskFlags.per_dataset in flags for flags in mask_flags])
if per_dataset_mask and nodata is None:
mask_band = 0
return GeoRaster2.from_rasters(rasters, relative_to_vrt=False, nodata=nodata, mask_band=mask_band) | [
"def",
"join",
"(",
"rasters",
")",
":",
"raster",
"=",
"rasters",
"[",
"0",
"]",
"# using the first raster to understand what is the type of data we have",
"mask_band",
"=",
"None",
"nodata",
"=",
"None",
"with",
"raster",
".",
"_raster_opener",
"(",
"raster",
".",
"source_file",
")",
"as",
"r",
":",
"nodata",
"=",
"r",
".",
"nodata",
"mask_flags",
"=",
"r",
".",
"mask_flag_enums",
"per_dataset_mask",
"=",
"all",
"(",
"[",
"rasterio",
".",
"enums",
".",
"MaskFlags",
".",
"per_dataset",
"in",
"flags",
"for",
"flags",
"in",
"mask_flags",
"]",
")",
"if",
"per_dataset_mask",
"and",
"nodata",
"is",
"None",
":",
"mask_band",
"=",
"0",
"return",
"GeoRaster2",
".",
"from_rasters",
"(",
"rasters",
",",
"relative_to_vrt",
"=",
"False",
",",
"nodata",
"=",
"nodata",
",",
"mask_band",
"=",
"mask_band",
")"
] | This method takes a list of rasters and returns a raster that is constructed of all of them | [
"This",
"method",
"takes",
"a",
"list",
"of",
"rasters",
"and",
"returns",
"a",
"raster",
"that",
"is",
"constructed",
"of",
"all",
"of",
"them"
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L95-L109 | train |
satellogic/telluric | telluric/georaster.py | merge_all | def merge_all(rasters, roi=None, dest_resolution=None, merge_strategy=MergeStrategy.UNION,
shape=None, ul_corner=None, crs=None, pixel_strategy=PixelStrategy.FIRST,
resampling=Resampling.nearest):
"""Merge a list of rasters, cropping by a region of interest.
There are cases that the roi is not precise enough for this cases one can use,
the upper left corner the shape and crs to precisely define the roi.
When roi is provided the ul_corner, shape and crs are ignored
"""
first_raster = rasters[0]
if roi:
crs = crs or roi.crs
dest_resolution = dest_resolution or _dest_resolution(first_raster, crs)
# Create empty raster
empty = GeoRaster2.empty_from_roi(
roi, resolution=dest_resolution, band_names=first_raster.band_names,
dtype=first_raster.dtype, shape=shape, ul_corner=ul_corner, crs=crs)
# Create a list of single band rasters
all_band_names, projected_rasters = _prepare_rasters(rasters, merge_strategy, empty,
resampling=resampling)
assert len(projected_rasters) == len(rasters)
prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy)
# Extend the rasters list with only those that have the requested bands
prepared_rasters = _explode_rasters(prepared_rasters, all_band_names)
if all_band_names:
# Merge common bands
prepared_rasters = _merge_common_bands(prepared_rasters)
# Merge all bands
raster = reduce(_stack_bands, prepared_rasters)
return empty.copy_with(image=raster.image, band_names=raster.band_names)
else:
raise ValueError("result contains no bands, use another merge strategy") | python | def merge_all(rasters, roi=None, dest_resolution=None, merge_strategy=MergeStrategy.UNION,
shape=None, ul_corner=None, crs=None, pixel_strategy=PixelStrategy.FIRST,
resampling=Resampling.nearest):
"""Merge a list of rasters, cropping by a region of interest.
There are cases that the roi is not precise enough for this cases one can use,
the upper left corner the shape and crs to precisely define the roi.
When roi is provided the ul_corner, shape and crs are ignored
"""
first_raster = rasters[0]
if roi:
crs = crs or roi.crs
dest_resolution = dest_resolution or _dest_resolution(first_raster, crs)
# Create empty raster
empty = GeoRaster2.empty_from_roi(
roi, resolution=dest_resolution, band_names=first_raster.band_names,
dtype=first_raster.dtype, shape=shape, ul_corner=ul_corner, crs=crs)
# Create a list of single band rasters
all_band_names, projected_rasters = _prepare_rasters(rasters, merge_strategy, empty,
resampling=resampling)
assert len(projected_rasters) == len(rasters)
prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy)
# Extend the rasters list with only those that have the requested bands
prepared_rasters = _explode_rasters(prepared_rasters, all_band_names)
if all_band_names:
# Merge common bands
prepared_rasters = _merge_common_bands(prepared_rasters)
# Merge all bands
raster = reduce(_stack_bands, prepared_rasters)
return empty.copy_with(image=raster.image, band_names=raster.band_names)
else:
raise ValueError("result contains no bands, use another merge strategy") | [
"def",
"merge_all",
"(",
"rasters",
",",
"roi",
"=",
"None",
",",
"dest_resolution",
"=",
"None",
",",
"merge_strategy",
"=",
"MergeStrategy",
".",
"UNION",
",",
"shape",
"=",
"None",
",",
"ul_corner",
"=",
"None",
",",
"crs",
"=",
"None",
",",
"pixel_strategy",
"=",
"PixelStrategy",
".",
"FIRST",
",",
"resampling",
"=",
"Resampling",
".",
"nearest",
")",
":",
"first_raster",
"=",
"rasters",
"[",
"0",
"]",
"if",
"roi",
":",
"crs",
"=",
"crs",
"or",
"roi",
".",
"crs",
"dest_resolution",
"=",
"dest_resolution",
"or",
"_dest_resolution",
"(",
"first_raster",
",",
"crs",
")",
"# Create empty raster",
"empty",
"=",
"GeoRaster2",
".",
"empty_from_roi",
"(",
"roi",
",",
"resolution",
"=",
"dest_resolution",
",",
"band_names",
"=",
"first_raster",
".",
"band_names",
",",
"dtype",
"=",
"first_raster",
".",
"dtype",
",",
"shape",
"=",
"shape",
",",
"ul_corner",
"=",
"ul_corner",
",",
"crs",
"=",
"crs",
")",
"# Create a list of single band rasters",
"all_band_names",
",",
"projected_rasters",
"=",
"_prepare_rasters",
"(",
"rasters",
",",
"merge_strategy",
",",
"empty",
",",
"resampling",
"=",
"resampling",
")",
"assert",
"len",
"(",
"projected_rasters",
")",
"==",
"len",
"(",
"rasters",
")",
"prepared_rasters",
"=",
"_apply_pixel_strategy",
"(",
"projected_rasters",
",",
"pixel_strategy",
")",
"# Extend the rasters list with only those that have the requested bands",
"prepared_rasters",
"=",
"_explode_rasters",
"(",
"prepared_rasters",
",",
"all_band_names",
")",
"if",
"all_band_names",
":",
"# Merge common bands",
"prepared_rasters",
"=",
"_merge_common_bands",
"(",
"prepared_rasters",
")",
"# Merge all bands",
"raster",
"=",
"reduce",
"(",
"_stack_bands",
",",
"prepared_rasters",
")",
"return",
"empty",
".",
"copy_with",
"(",
"image",
"=",
"raster",
".",
"image",
",",
"band_names",
"=",
"raster",
".",
"band_names",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"result contains no bands, use another merge strategy\"",
")"
] | Merge a list of rasters, cropping by a region of interest.
There are cases that the roi is not precise enough for this cases one can use,
the upper left corner the shape and crs to precisely define the roi.
When roi is provided the ul_corner, shape and crs are ignored | [
"Merge",
"a",
"list",
"of",
"rasters",
"cropping",
"by",
"a",
"region",
"of",
"interest",
".",
"There",
"are",
"cases",
"that",
"the",
"roi",
"is",
"not",
"precise",
"enough",
"for",
"this",
"cases",
"one",
"can",
"use",
"the",
"upper",
"left",
"corner",
"the",
"shape",
"and",
"crs",
"to",
"precisely",
"define",
"the",
"roi",
".",
"When",
"roi",
"is",
"provided",
"the",
"ul_corner",
"shape",
"and",
"crs",
"are",
"ignored"
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L120-L161 | train |
satellogic/telluric | telluric/georaster.py | _merge_common_bands | def _merge_common_bands(rasters):
# type: (List[_Raster]) -> List[_Raster]
"""Combine the common bands.
"""
# Compute band order
all_bands = IndexedSet([rs.band_names[0] for rs in rasters])
def key(rs):
return all_bands.index(rs.band_names[0])
rasters_final = [] # type: List[_Raster]
for band_name, rasters_group in groupby(sorted(rasters, key=key), key=key):
rasters_final.append(reduce(_fill_pixels, rasters_group))
return rasters_final | python | def _merge_common_bands(rasters):
# type: (List[_Raster]) -> List[_Raster]
"""Combine the common bands.
"""
# Compute band order
all_bands = IndexedSet([rs.band_names[0] for rs in rasters])
def key(rs):
return all_bands.index(rs.band_names[0])
rasters_final = [] # type: List[_Raster]
for band_name, rasters_group in groupby(sorted(rasters, key=key), key=key):
rasters_final.append(reduce(_fill_pixels, rasters_group))
return rasters_final | [
"def",
"_merge_common_bands",
"(",
"rasters",
")",
":",
"# type: (List[_Raster]) -> List[_Raster]",
"# Compute band order",
"all_bands",
"=",
"IndexedSet",
"(",
"[",
"rs",
".",
"band_names",
"[",
"0",
"]",
"for",
"rs",
"in",
"rasters",
"]",
")",
"def",
"key",
"(",
"rs",
")",
":",
"return",
"all_bands",
".",
"index",
"(",
"rs",
".",
"band_names",
"[",
"0",
"]",
")",
"rasters_final",
"=",
"[",
"]",
"# type: List[_Raster]",
"for",
"band_name",
",",
"rasters_group",
"in",
"groupby",
"(",
"sorted",
"(",
"rasters",
",",
"key",
"=",
"key",
")",
",",
"key",
"=",
"key",
")",
":",
"rasters_final",
".",
"append",
"(",
"reduce",
"(",
"_fill_pixels",
",",
"rasters_group",
")",
")",
"return",
"rasters_final"
] | Combine the common bands. | [
"Combine",
"the",
"common",
"bands",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L197-L212 | train |
satellogic/telluric | telluric/georaster.py | _explode_raster | def _explode_raster(raster, band_names=[]):
# type: (_Raster, Iterable[str]) -> List[_Raster]
"""Splits a raster into multiband rasters.
"""
# Using band_names=[] does no harm because we are not mutating it in place
# and it makes MyPy happy
if not band_names:
band_names = raster.band_names
else:
band_names = list(IndexedSet(raster.band_names).intersection(band_names))
return [_Raster(image=raster.bands_data([band_name]), band_names=[band_name]) for band_name in band_names] | python | def _explode_raster(raster, band_names=[]):
# type: (_Raster, Iterable[str]) -> List[_Raster]
"""Splits a raster into multiband rasters.
"""
# Using band_names=[] does no harm because we are not mutating it in place
# and it makes MyPy happy
if not band_names:
band_names = raster.band_names
else:
band_names = list(IndexedSet(raster.band_names).intersection(band_names))
return [_Raster(image=raster.bands_data([band_name]), band_names=[band_name]) for band_name in band_names] | [
"def",
"_explode_raster",
"(",
"raster",
",",
"band_names",
"=",
"[",
"]",
")",
":",
"# type: (_Raster, Iterable[str]) -> List[_Raster]",
"# Using band_names=[] does no harm because we are not mutating it in place",
"# and it makes MyPy happy",
"if",
"not",
"band_names",
":",
"band_names",
"=",
"raster",
".",
"band_names",
"else",
":",
"band_names",
"=",
"list",
"(",
"IndexedSet",
"(",
"raster",
".",
"band_names",
")",
".",
"intersection",
"(",
"band_names",
")",
")",
"return",
"[",
"_Raster",
"(",
"image",
"=",
"raster",
".",
"bands_data",
"(",
"[",
"band_name",
"]",
")",
",",
"band_names",
"=",
"[",
"band_name",
"]",
")",
"for",
"band_name",
"in",
"band_names",
"]"
] | Splits a raster into multiband rasters. | [
"Splits",
"a",
"raster",
"into",
"multiband",
"rasters",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L244-L256 | train |
satellogic/telluric | telluric/georaster.py | _fill_pixels | def _fill_pixels(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two single band rasters with the same band by filling the pixels according to depth.
"""
assert len(one.band_names) == len(other.band_names) == 1, "Rasters are not single band"
# We raise an error in the intersection is empty.
# Other options include returning an "empty" raster or just None.
# The problem with the former is that GeoRaster2 expects a 2D or 3D
# numpy array, so there is no obvious way to signal that this raster
# has no bands. Also, returning a (1, 1, 0) numpy array is useless
# for future concatenation, so the expected shape should be used
# instead. The problem with the latter is that it breaks concatenation
# anyway and requires special attention. Suggestions welcome.
if one.band_names != other.band_names:
raise ValueError("rasters have no bands in common, use another merge strategy")
new_image = one.image.copy()
other_image = other.image
# The values that I want to mask are the ones that:
# * Were already masked in the other array, _or_
# * Were already unmasked in the one array, so I don't overwrite them
other_values_mask = (np.ma.getmaskarray(other_image)[0] | (~np.ma.getmaskarray(one.image)[0]))
# Reshape the mask to fit the future array
other_values_mask = other_values_mask[None, ...]
# Overwrite the values that I don't want to mask
new_image[~other_values_mask] = other_image[~other_values_mask]
# In other words, the values that I wanted to write are the ones that:
# * Were already masked in the one array, _and_
# * Were not masked in the other array
# The reason for using the inverted form is to retain the semantics
# of "masked=True" that apply for masked arrays. The same logic
# could be written, using the De Morgan's laws, as
# other_values_mask = (one.image.mask[0] & (~other_image.mask[0])
# other_values_mask = other_values_mask[None, ...]
# new_image[other_values_mask] = other_image[other_values_mask]
# but here the word "mask" does not mean the same as in masked arrays.
return _Raster(image=new_image, band_names=one.band_names) | python | def _fill_pixels(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two single band rasters with the same band by filling the pixels according to depth.
"""
assert len(one.band_names) == len(other.band_names) == 1, "Rasters are not single band"
# We raise an error in the intersection is empty.
# Other options include returning an "empty" raster or just None.
# The problem with the former is that GeoRaster2 expects a 2D or 3D
# numpy array, so there is no obvious way to signal that this raster
# has no bands. Also, returning a (1, 1, 0) numpy array is useless
# for future concatenation, so the expected shape should be used
# instead. The problem with the latter is that it breaks concatenation
# anyway and requires special attention. Suggestions welcome.
if one.band_names != other.band_names:
raise ValueError("rasters have no bands in common, use another merge strategy")
new_image = one.image.copy()
other_image = other.image
# The values that I want to mask are the ones that:
# * Were already masked in the other array, _or_
# * Were already unmasked in the one array, so I don't overwrite them
other_values_mask = (np.ma.getmaskarray(other_image)[0] | (~np.ma.getmaskarray(one.image)[0]))
# Reshape the mask to fit the future array
other_values_mask = other_values_mask[None, ...]
# Overwrite the values that I don't want to mask
new_image[~other_values_mask] = other_image[~other_values_mask]
# In other words, the values that I wanted to write are the ones that:
# * Were already masked in the one array, _and_
# * Were not masked in the other array
# The reason for using the inverted form is to retain the semantics
# of "masked=True" that apply for masked arrays. The same logic
# could be written, using the De Morgan's laws, as
# other_values_mask = (one.image.mask[0] & (~other_image.mask[0])
# other_values_mask = other_values_mask[None, ...]
# new_image[other_values_mask] = other_image[other_values_mask]
# but here the word "mask" does not mean the same as in masked arrays.
return _Raster(image=new_image, band_names=one.band_names) | [
"def",
"_fill_pixels",
"(",
"one",
",",
"other",
")",
":",
"# type: (_Raster, _Raster) -> _Raster",
"assert",
"len",
"(",
"one",
".",
"band_names",
")",
"==",
"len",
"(",
"other",
".",
"band_names",
")",
"==",
"1",
",",
"\"Rasters are not single band\"",
"# We raise an error in the intersection is empty.",
"# Other options include returning an \"empty\" raster or just None.",
"# The problem with the former is that GeoRaster2 expects a 2D or 3D",
"# numpy array, so there is no obvious way to signal that this raster",
"# has no bands. Also, returning a (1, 1, 0) numpy array is useless",
"# for future concatenation, so the expected shape should be used",
"# instead. The problem with the latter is that it breaks concatenation",
"# anyway and requires special attention. Suggestions welcome.",
"if",
"one",
".",
"band_names",
"!=",
"other",
".",
"band_names",
":",
"raise",
"ValueError",
"(",
"\"rasters have no bands in common, use another merge strategy\"",
")",
"new_image",
"=",
"one",
".",
"image",
".",
"copy",
"(",
")",
"other_image",
"=",
"other",
".",
"image",
"# The values that I want to mask are the ones that:",
"# * Were already masked in the other array, _or_",
"# * Were already unmasked in the one array, so I don't overwrite them",
"other_values_mask",
"=",
"(",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"other_image",
")",
"[",
"0",
"]",
"|",
"(",
"~",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"one",
".",
"image",
")",
"[",
"0",
"]",
")",
")",
"# Reshape the mask to fit the future array",
"other_values_mask",
"=",
"other_values_mask",
"[",
"None",
",",
"...",
"]",
"# Overwrite the values that I don't want to mask",
"new_image",
"[",
"~",
"other_values_mask",
"]",
"=",
"other_image",
"[",
"~",
"other_values_mask",
"]",
"# In other words, the values that I wanted to write are the ones that:",
"# * Were already masked in the one array, _and_",
"# * Were not masked in the other array",
"# The reason for using the inverted form is to retain the semantics",
"# of \"masked=True\" that apply for masked arrays. The same logic",
"# could be written, using the De Morgan's laws, as",
"# other_values_mask = (one.image.mask[0] & (~other_image.mask[0])",
"# other_values_mask = other_values_mask[None, ...]",
"# new_image[other_values_mask] = other_image[other_values_mask]",
"# but here the word \"mask\" does not mean the same as in masked arrays.",
"return",
"_Raster",
"(",
"image",
"=",
"new_image",
",",
"band_names",
"=",
"one",
".",
"band_names",
")"
] | Merges two single band rasters with the same band by filling the pixels according to depth. | [
"Merges",
"two",
"single",
"band",
"rasters",
"with",
"the",
"same",
"band",
"by",
"filling",
"the",
"pixels",
"according",
"to",
"depth",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L286-L329 | train |
satellogic/telluric | telluric/georaster.py | _stack_bands | def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
# Concatenate the data along the band axis and apply the mask
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
# We don't copy image and mask here, due to performance issues,
# this output should not use without eventually being copied
# In this context we are copying the object in the end of merge_all merge_first and merge
return _Raster(image=new_image, band_names=new_bands) | python | def _stack_bands(one, other):
# type: (_Raster, _Raster) -> _Raster
"""Merges two rasters with non overlapping bands by stacking the bands.
"""
assert set(one.band_names).intersection(set(other.band_names)) == set()
# We raise an error in the bands are the same. See above.
if one.band_names == other.band_names:
raise ValueError("rasters have the same bands, use another merge strategy")
# Apply "or" to the mask in the same way rasterio does, see
# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks
# In other words, mask the values that are already masked in either
# of the two rasters, since one mask per band is not supported
new_mask = np.ma.getmaskarray(one.image)[0] | np.ma.getmaskarray(other.image)[0]
# Concatenate the data along the band axis and apply the mask
new_image = np.ma.masked_array(
np.concatenate([
one.image.data,
other.image.data
]),
mask=[new_mask] * (one.image.shape[0] + other.image.shape[0])
)
new_bands = one.band_names + other.band_names
# We don't copy image and mask here, due to performance issues,
# this output should not use without eventually being copied
# In this context we are copying the object in the end of merge_all merge_first and merge
return _Raster(image=new_image, band_names=new_bands) | [
"def",
"_stack_bands",
"(",
"one",
",",
"other",
")",
":",
"# type: (_Raster, _Raster) -> _Raster",
"assert",
"set",
"(",
"one",
".",
"band_names",
")",
".",
"intersection",
"(",
"set",
"(",
"other",
".",
"band_names",
")",
")",
"==",
"set",
"(",
")",
"# We raise an error in the bands are the same. See above.",
"if",
"one",
".",
"band_names",
"==",
"other",
".",
"band_names",
":",
"raise",
"ValueError",
"(",
"\"rasters have the same bands, use another merge strategy\"",
")",
"# Apply \"or\" to the mask in the same way rasterio does, see",
"# https://mapbox.github.io/rasterio/topics/masks.html#dataset-masks",
"# In other words, mask the values that are already masked in either",
"# of the two rasters, since one mask per band is not supported",
"new_mask",
"=",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"one",
".",
"image",
")",
"[",
"0",
"]",
"|",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"other",
".",
"image",
")",
"[",
"0",
"]",
"# Concatenate the data along the band axis and apply the mask",
"new_image",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"np",
".",
"concatenate",
"(",
"[",
"one",
".",
"image",
".",
"data",
",",
"other",
".",
"image",
".",
"data",
"]",
")",
",",
"mask",
"=",
"[",
"new_mask",
"]",
"*",
"(",
"one",
".",
"image",
".",
"shape",
"[",
"0",
"]",
"+",
"other",
".",
"image",
".",
"shape",
"[",
"0",
"]",
")",
")",
"new_bands",
"=",
"one",
".",
"band_names",
"+",
"other",
".",
"band_names",
"# We don't copy image and mask here, due to performance issues,",
"# this output should not use without eventually being copied",
"# In this context we are copying the object in the end of merge_all merge_first and merge",
"return",
"_Raster",
"(",
"image",
"=",
"new_image",
",",
"band_names",
"=",
"new_bands",
")"
] | Merges two rasters with non overlapping bands by stacking the bands. | [
"Merges",
"two",
"rasters",
"with",
"non",
"overlapping",
"bands",
"by",
"stacking",
"the",
"bands",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L332-L362 | train |
satellogic/telluric | telluric/georaster.py | merge_two | def merge_two(one, other, merge_strategy=MergeStrategy.UNION, silent=False, pixel_strategy=PixelStrategy.FIRST):
# type: (GeoRaster2, GeoRaster2, MergeStrategy, bool, PixelStrategy) -> GeoRaster2
"""Merge two rasters into one.
Parameters
----------
one : GeoRaster2
Left raster to merge.
other : GeoRaster2
Right raster to merge.
merge_strategy : MergeStrategy, optional
Merge strategy, from :py:data:`telluric.georaster.MergeStrategy` (default to "union").
silent : bool, optional
Whether to raise errors or return some result, default to False (raise errors).
pixel_strategy: PixelStrategy, optional
Pixel strategy, from :py:data:`telluric.georaster.PixelStrategy` (default to "top").
Returns
-------
GeoRaster2
"""
other_res = _prepare_other_raster(one, other)
if other_res is None:
if silent:
return one
else:
raise ValueError("rasters do not intersect")
else:
other = other.copy_with(image=other_res.image, band_names=other_res.band_names) # To make MyPy happy
# Create a list of single band rasters
# Cropping won't happen twice, since other was already cropped
all_band_names, projected_rasters = _prepare_rasters([other], merge_strategy, first=one)
if not all_band_names and not silent:
raise ValueError("rasters have no bands in common, use another merge strategy")
prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy)
prepared_rasters = _explode_rasters(prepared_rasters, all_band_names)
# Merge common bands
prepared_rasters = _merge_common_bands(_explode_raster(one, all_band_names) + prepared_rasters)
# Merge all bands
raster = reduce(_stack_bands, prepared_rasters)
return one.copy_with(image=raster.image, band_names=raster.band_names) | python | def merge_two(one, other, merge_strategy=MergeStrategy.UNION, silent=False, pixel_strategy=PixelStrategy.FIRST):
# type: (GeoRaster2, GeoRaster2, MergeStrategy, bool, PixelStrategy) -> GeoRaster2
"""Merge two rasters into one.
Parameters
----------
one : GeoRaster2
Left raster to merge.
other : GeoRaster2
Right raster to merge.
merge_strategy : MergeStrategy, optional
Merge strategy, from :py:data:`telluric.georaster.MergeStrategy` (default to "union").
silent : bool, optional
Whether to raise errors or return some result, default to False (raise errors).
pixel_strategy: PixelStrategy, optional
Pixel strategy, from :py:data:`telluric.georaster.PixelStrategy` (default to "top").
Returns
-------
GeoRaster2
"""
other_res = _prepare_other_raster(one, other)
if other_res is None:
if silent:
return one
else:
raise ValueError("rasters do not intersect")
else:
other = other.copy_with(image=other_res.image, band_names=other_res.band_names) # To make MyPy happy
# Create a list of single band rasters
# Cropping won't happen twice, since other was already cropped
all_band_names, projected_rasters = _prepare_rasters([other], merge_strategy, first=one)
if not all_band_names and not silent:
raise ValueError("rasters have no bands in common, use another merge strategy")
prepared_rasters = _apply_pixel_strategy(projected_rasters, pixel_strategy)
prepared_rasters = _explode_rasters(prepared_rasters, all_band_names)
# Merge common bands
prepared_rasters = _merge_common_bands(_explode_raster(one, all_band_names) + prepared_rasters)
# Merge all bands
raster = reduce(_stack_bands, prepared_rasters)
return one.copy_with(image=raster.image, band_names=raster.band_names) | [
"def",
"merge_two",
"(",
"one",
",",
"other",
",",
"merge_strategy",
"=",
"MergeStrategy",
".",
"UNION",
",",
"silent",
"=",
"False",
",",
"pixel_strategy",
"=",
"PixelStrategy",
".",
"FIRST",
")",
":",
"# type: (GeoRaster2, GeoRaster2, MergeStrategy, bool, PixelStrategy) -> GeoRaster2",
"other_res",
"=",
"_prepare_other_raster",
"(",
"one",
",",
"other",
")",
"if",
"other_res",
"is",
"None",
":",
"if",
"silent",
":",
"return",
"one",
"else",
":",
"raise",
"ValueError",
"(",
"\"rasters do not intersect\"",
")",
"else",
":",
"other",
"=",
"other",
".",
"copy_with",
"(",
"image",
"=",
"other_res",
".",
"image",
",",
"band_names",
"=",
"other_res",
".",
"band_names",
")",
"# To make MyPy happy",
"# Create a list of single band rasters",
"# Cropping won't happen twice, since other was already cropped",
"all_band_names",
",",
"projected_rasters",
"=",
"_prepare_rasters",
"(",
"[",
"other",
"]",
",",
"merge_strategy",
",",
"first",
"=",
"one",
")",
"if",
"not",
"all_band_names",
"and",
"not",
"silent",
":",
"raise",
"ValueError",
"(",
"\"rasters have no bands in common, use another merge strategy\"",
")",
"prepared_rasters",
"=",
"_apply_pixel_strategy",
"(",
"projected_rasters",
",",
"pixel_strategy",
")",
"prepared_rasters",
"=",
"_explode_rasters",
"(",
"prepared_rasters",
",",
"all_band_names",
")",
"# Merge common bands",
"prepared_rasters",
"=",
"_merge_common_bands",
"(",
"_explode_raster",
"(",
"one",
",",
"all_band_names",
")",
"+",
"prepared_rasters",
")",
"# Merge all bands",
"raster",
"=",
"reduce",
"(",
"_stack_bands",
",",
"prepared_rasters",
")",
"return",
"one",
".",
"copy_with",
"(",
"image",
"=",
"raster",
".",
"image",
",",
"band_names",
"=",
"raster",
".",
"band_names",
")"
] | Merge two rasters into one.
Parameters
----------
one : GeoRaster2
Left raster to merge.
other : GeoRaster2
Right raster to merge.
merge_strategy : MergeStrategy, optional
Merge strategy, from :py:data:`telluric.georaster.MergeStrategy` (default to "union").
silent : bool, optional
Whether to raise errors or return some result, default to False (raise errors).
pixel_strategy: PixelStrategy, optional
Pixel strategy, from :py:data:`telluric.georaster.PixelStrategy` (default to "top").
Returns
-------
GeoRaster2 | [
"Merge",
"two",
"rasters",
"into",
"one",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L365-L414 | train |
satellogic/telluric | telluric/georaster.py | _Raster._set_image | def _set_image(self, image, nodata=None):
"""
Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return:
"""
# convert to masked array:
if isinstance(image, np.ma.core.MaskedArray):
masked = image
elif isinstance(image, np.core.ndarray):
masked = self._build_masked_array(image, nodata)
else:
raise GeoRaster2NotImplementedError('only ndarray or masked array supported, got %s' % type(image))
# make sure array is 3d:
if len(masked.shape) == 3:
self._image = masked
elif len(masked.shape) == 2:
self._image = masked[np.newaxis, :, :]
else:
raise GeoRaster2Error('expected 2d or 3d image, got shape=%s' % masked.shape)
# update shape
if self._shape is None:
self._set_shape(self._image.shape)
self._image_after_load_validations()
if self._image_readonly:
self._image.setflags(write=0) | python | def _set_image(self, image, nodata=None):
"""
Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return:
"""
# convert to masked array:
if isinstance(image, np.ma.core.MaskedArray):
masked = image
elif isinstance(image, np.core.ndarray):
masked = self._build_masked_array(image, nodata)
else:
raise GeoRaster2NotImplementedError('only ndarray or masked array supported, got %s' % type(image))
# make sure array is 3d:
if len(masked.shape) == 3:
self._image = masked
elif len(masked.shape) == 2:
self._image = masked[np.newaxis, :, :]
else:
raise GeoRaster2Error('expected 2d or 3d image, got shape=%s' % masked.shape)
# update shape
if self._shape is None:
self._set_shape(self._image.shape)
self._image_after_load_validations()
if self._image_readonly:
self._image.setflags(write=0) | [
"def",
"_set_image",
"(",
"self",
",",
"image",
",",
"nodata",
"=",
"None",
")",
":",
"# convert to masked array:",
"if",
"isinstance",
"(",
"image",
",",
"np",
".",
"ma",
".",
"core",
".",
"MaskedArray",
")",
":",
"masked",
"=",
"image",
"elif",
"isinstance",
"(",
"image",
",",
"np",
".",
"core",
".",
"ndarray",
")",
":",
"masked",
"=",
"self",
".",
"_build_masked_array",
"(",
"image",
",",
"nodata",
")",
"else",
":",
"raise",
"GeoRaster2NotImplementedError",
"(",
"'only ndarray or masked array supported, got %s'",
"%",
"type",
"(",
"image",
")",
")",
"# make sure array is 3d:",
"if",
"len",
"(",
"masked",
".",
"shape",
")",
"==",
"3",
":",
"self",
".",
"_image",
"=",
"masked",
"elif",
"len",
"(",
"masked",
".",
"shape",
")",
"==",
"2",
":",
"self",
".",
"_image",
"=",
"masked",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
"else",
":",
"raise",
"GeoRaster2Error",
"(",
"'expected 2d or 3d image, got shape=%s'",
"%",
"masked",
".",
"shape",
")",
"# update shape",
"if",
"self",
".",
"_shape",
"is",
"None",
":",
"self",
".",
"_set_shape",
"(",
"self",
".",
"_image",
".",
"shape",
")",
"self",
".",
"_image_after_load_validations",
"(",
")",
"if",
"self",
".",
"_image_readonly",
":",
"self",
".",
"_image",
".",
"setflags",
"(",
"write",
"=",
"0",
")"
] | Set self._image.
:param image: supported: np.ma.array, np.array, TODO: PIL image
:param nodata: if provided image is array (not masked array), treat pixels with value=nodata as nodata
:return: | [
"Set",
"self",
".",
"_image",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L467-L497 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.from_wms | def from_wms(cls, filename, vector, resolution, destination_file=None):
"""Create georaster from the web service definition file."""
doc = wms_vrt(filename,
bounds=vector,
resolution=resolution).tostring()
filename = cls._save_to_destination_file(doc, destination_file)
return GeoRaster2.open(filename) | python | def from_wms(cls, filename, vector, resolution, destination_file=None):
"""Create georaster from the web service definition file."""
doc = wms_vrt(filename,
bounds=vector,
resolution=resolution).tostring()
filename = cls._save_to_destination_file(doc, destination_file)
return GeoRaster2.open(filename) | [
"def",
"from_wms",
"(",
"cls",
",",
"filename",
",",
"vector",
",",
"resolution",
",",
"destination_file",
"=",
"None",
")",
":",
"doc",
"=",
"wms_vrt",
"(",
"filename",
",",
"bounds",
"=",
"vector",
",",
"resolution",
"=",
"resolution",
")",
".",
"tostring",
"(",
")",
"filename",
"=",
"cls",
".",
"_save_to_destination_file",
"(",
"doc",
",",
"destination_file",
")",
"return",
"GeoRaster2",
".",
"open",
"(",
"filename",
")"
] | Create georaster from the web service definition file. | [
"Create",
"georaster",
"from",
"the",
"web",
"service",
"definition",
"file",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L626-L632 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.from_rasters | def from_rasters(cls, rasters, relative_to_vrt=True, destination_file=None, nodata=None, mask_band=None):
"""Create georaster out of a list of rasters."""
if isinstance(rasters, list):
doc = raster_list_vrt(rasters, relative_to_vrt, nodata, mask_band).tostring()
else:
doc = raster_collection_vrt(rasters, relative_to_vrt, nodata, mask_band).tostring()
filename = cls._save_to_destination_file(doc, destination_file)
return GeoRaster2.open(filename) | python | def from_rasters(cls, rasters, relative_to_vrt=True, destination_file=None, nodata=None, mask_band=None):
"""Create georaster out of a list of rasters."""
if isinstance(rasters, list):
doc = raster_list_vrt(rasters, relative_to_vrt, nodata, mask_band).tostring()
else:
doc = raster_collection_vrt(rasters, relative_to_vrt, nodata, mask_band).tostring()
filename = cls._save_to_destination_file(doc, destination_file)
return GeoRaster2.open(filename) | [
"def",
"from_rasters",
"(",
"cls",
",",
"rasters",
",",
"relative_to_vrt",
"=",
"True",
",",
"destination_file",
"=",
"None",
",",
"nodata",
"=",
"None",
",",
"mask_band",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"rasters",
",",
"list",
")",
":",
"doc",
"=",
"raster_list_vrt",
"(",
"rasters",
",",
"relative_to_vrt",
",",
"nodata",
",",
"mask_band",
")",
".",
"tostring",
"(",
")",
"else",
":",
"doc",
"=",
"raster_collection_vrt",
"(",
"rasters",
",",
"relative_to_vrt",
",",
"nodata",
",",
"mask_band",
")",
".",
"tostring",
"(",
")",
"filename",
"=",
"cls",
".",
"_save_to_destination_file",
"(",
"doc",
",",
"destination_file",
")",
"return",
"GeoRaster2",
".",
"open",
"(",
"filename",
")"
] | Create georaster out of a list of rasters. | [
"Create",
"georaster",
"out",
"of",
"a",
"list",
"of",
"rasters",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L635-L642 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.open | def open(cls, filename, band_names=None, lazy_load=True, mutable=False, **kwargs):
"""
Read a georaster from a file.
:param filename: url
:param band_names: list of strings, or string.
if None - will try to read from image, otherwise - these will be ['0', ..]
:param lazy_load: if True - do not load anything
:return: GeoRaster2
"""
if mutable:
geo_raster = MutableGeoRaster(filename=filename, band_names=band_names, **kwargs)
else:
geo_raster = cls(filename=filename, band_names=band_names, **kwargs)
if not lazy_load:
geo_raster._populate_from_rasterio_object(read_image=True)
return geo_raster | python | def open(cls, filename, band_names=None, lazy_load=True, mutable=False, **kwargs):
"""
Read a georaster from a file.
:param filename: url
:param band_names: list of strings, or string.
if None - will try to read from image, otherwise - these will be ['0', ..]
:param lazy_load: if True - do not load anything
:return: GeoRaster2
"""
if mutable:
geo_raster = MutableGeoRaster(filename=filename, band_names=band_names, **kwargs)
else:
geo_raster = cls(filename=filename, band_names=band_names, **kwargs)
if not lazy_load:
geo_raster._populate_from_rasterio_object(read_image=True)
return geo_raster | [
"def",
"open",
"(",
"cls",
",",
"filename",
",",
"band_names",
"=",
"None",
",",
"lazy_load",
"=",
"True",
",",
"mutable",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"mutable",
":",
"geo_raster",
"=",
"MutableGeoRaster",
"(",
"filename",
"=",
"filename",
",",
"band_names",
"=",
"band_names",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"geo_raster",
"=",
"cls",
"(",
"filename",
"=",
"filename",
",",
"band_names",
"=",
"band_names",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"lazy_load",
":",
"geo_raster",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"True",
")",
"return",
"geo_raster"
] | Read a georaster from a file.
:param filename: url
:param band_names: list of strings, or string.
if None - will try to read from image, otherwise - these will be ['0', ..]
:param lazy_load: if True - do not load anything
:return: GeoRaster2 | [
"Read",
"a",
"georaster",
"from",
"a",
"file",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L645-L661 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.tags | def tags(cls, filename, namespace=None):
"""Extract tags from file."""
return cls._raster_opener(filename).tags(ns=namespace) | python | def tags(cls, filename, namespace=None):
"""Extract tags from file."""
return cls._raster_opener(filename).tags(ns=namespace) | [
"def",
"tags",
"(",
"cls",
",",
"filename",
",",
"namespace",
"=",
"None",
")",
":",
"return",
"cls",
".",
"_raster_opener",
"(",
"filename",
")",
".",
"tags",
"(",
"ns",
"=",
"namespace",
")"
] | Extract tags from file. | [
"Extract",
"tags",
"from",
"file",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L724-L726 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.image | def image(self):
"""Raster bitmap in numpy array."""
if self._image is None:
self._populate_from_rasterio_object(read_image=True)
return self._image | python | def image(self):
"""Raster bitmap in numpy array."""
if self._image is None:
self._populate_from_rasterio_object(read_image=True)
return self._image | [
"def",
"image",
"(",
"self",
")",
":",
"if",
"self",
".",
"_image",
"is",
"None",
":",
"self",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"True",
")",
"return",
"self",
".",
"_image"
] | Raster bitmap in numpy array. | [
"Raster",
"bitmap",
"in",
"numpy",
"array",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L729-L733 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.crs | def crs(self): # type: () -> CRS
"""Raster crs."""
if self._crs is None:
self._populate_from_rasterio_object(read_image=False)
return self._crs | python | def crs(self): # type: () -> CRS
"""Raster crs."""
if self._crs is None:
self._populate_from_rasterio_object(read_image=False)
return self._crs | [
"def",
"crs",
"(",
"self",
")",
":",
"# type: () -> CRS",
"if",
"self",
".",
"_crs",
"is",
"None",
":",
"self",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"False",
")",
"return",
"self",
".",
"_crs"
] | Raster crs. | [
"Raster",
"crs",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L758-L762 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.shape | def shape(self):
"""Raster shape."""
if self._shape is None:
self._populate_from_rasterio_object(read_image=False)
return self._shape | python | def shape(self):
"""Raster shape."""
if self._shape is None:
self._populate_from_rasterio_object(read_image=False)
return self._shape | [
"def",
"shape",
"(",
"self",
")",
":",
"if",
"self",
".",
"_shape",
"is",
"None",
":",
"self",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"False",
")",
"return",
"self",
".",
"_shape"
] | Raster shape. | [
"Raster",
"shape",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L765-L769 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.source_file | def source_file(self):
""" When using open, returns the filename used
"""
if self._filename is None:
self._filename = self._as_in_memory_geotiff()._filename
return self._filename | python | def source_file(self):
""" When using open, returns the filename used
"""
if self._filename is None:
self._filename = self._as_in_memory_geotiff()._filename
return self._filename | [
"def",
"source_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"_filename",
"is",
"None",
":",
"self",
".",
"_filename",
"=",
"self",
".",
"_as_in_memory_geotiff",
"(",
")",
".",
"_filename",
"return",
"self",
".",
"_filename"
] | When using open, returns the filename used | [
"When",
"using",
"open",
"returns",
"the",
"filename",
"used"
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L788-L793 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.blockshapes | def blockshapes(self):
"""Raster all bands block shape."""
if self._blockshapes is None:
if self._filename:
self._populate_from_rasterio_object(read_image=False)
else:
# if no file is attached to the raster set the shape of each band to be the data array size
self._blockshapes = [(self.height, self.width) for z in range(self.num_bands)]
return self._blockshapes | python | def blockshapes(self):
"""Raster all bands block shape."""
if self._blockshapes is None:
if self._filename:
self._populate_from_rasterio_object(read_image=False)
else:
# if no file is attached to the raster set the shape of each band to be the data array size
self._blockshapes = [(self.height, self.width) for z in range(self.num_bands)]
return self._blockshapes | [
"def",
"blockshapes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_blockshapes",
"is",
"None",
":",
"if",
"self",
".",
"_filename",
":",
"self",
".",
"_populate_from_rasterio_object",
"(",
"read_image",
"=",
"False",
")",
"else",
":",
"# if no file is attached to the raster set the shape of each band to be the data array size",
"self",
".",
"_blockshapes",
"=",
"[",
"(",
"self",
".",
"height",
",",
"self",
".",
"width",
")",
"for",
"z",
"in",
"range",
"(",
"self",
".",
"num_bands",
")",
"]",
"return",
"self",
".",
"_blockshapes"
] | Raster all bands block shape. | [
"Raster",
"all",
"bands",
"block",
"shape",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L822-L830 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.get | def get(self, point):
"""
Get the pixel values at the requested point.
:param point: A GeoVector(POINT) with the coordinates of the values to get
:return: numpy array of values
"""
if not (isinstance(point, GeoVector) and point.type == 'Point'):
raise TypeError('expect GeoVector(Point), got %s' % (point,))
target = self.to_raster(point)
return self.image[:, int(target.y), int(target.x)] | python | def get(self, point):
"""
Get the pixel values at the requested point.
:param point: A GeoVector(POINT) with the coordinates of the values to get
:return: numpy array of values
"""
if not (isinstance(point, GeoVector) and point.type == 'Point'):
raise TypeError('expect GeoVector(Point), got %s' % (point,))
target = self.to_raster(point)
return self.image[:, int(target.y), int(target.x)] | [
"def",
"get",
"(",
"self",
",",
"point",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"point",
",",
"GeoVector",
")",
"and",
"point",
".",
"type",
"==",
"'Point'",
")",
":",
"raise",
"TypeError",
"(",
"'expect GeoVector(Point), got %s'",
"%",
"(",
"point",
",",
")",
")",
"target",
"=",
"self",
".",
"to_raster",
"(",
"point",
")",
"return",
"self",
".",
"image",
"[",
":",
",",
"int",
"(",
"target",
".",
"y",
")",
",",
"int",
"(",
"target",
".",
"x",
")",
"]"
] | Get the pixel values at the requested point.
:param point: A GeoVector(POINT) with the coordinates of the values to get
:return: numpy array of values | [
"Get",
"the",
"pixel",
"values",
"at",
"the",
"requested",
"point",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L978-L989 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.copy | def copy(self, mutable=False):
"""Return a copy of this GeoRaster with no modifications.
Can be use to create a Mutable copy of the GeoRaster"""
if self.not_loaded():
_cls = self.__class__
if mutable:
_cls = MutableGeoRaster
return _cls.open(self._filename)
return self.copy_with(mutable=mutable) | python | def copy(self, mutable=False):
"""Return a copy of this GeoRaster with no modifications.
Can be use to create a Mutable copy of the GeoRaster"""
if self.not_loaded():
_cls = self.__class__
if mutable:
_cls = MutableGeoRaster
return _cls.open(self._filename)
return self.copy_with(mutable=mutable) | [
"def",
"copy",
"(",
"self",
",",
"mutable",
"=",
"False",
")",
":",
"if",
"self",
".",
"not_loaded",
"(",
")",
":",
"_cls",
"=",
"self",
".",
"__class__",
"if",
"mutable",
":",
"_cls",
"=",
"MutableGeoRaster",
"return",
"_cls",
".",
"open",
"(",
"self",
".",
"_filename",
")",
"return",
"self",
".",
"copy_with",
"(",
"mutable",
"=",
"mutable",
")"
] | Return a copy of this GeoRaster with no modifications.
Can be use to create a Mutable copy of the GeoRaster | [
"Return",
"a",
"copy",
"of",
"this",
"GeoRaster",
"with",
"no",
"modifications",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1198-L1209 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2._resize | def _resize(self, ratio_x, ratio_y, resampling):
"""Return raster resized by ratio."""
new_width = int(np.ceil(self.width * ratio_x))
new_height = int(np.ceil(self.height * ratio_y))
dest_affine = self.affine * Affine.scale(1 / ratio_x, 1 / ratio_y)
if self.not_loaded():
window = rasterio.windows.Window(0, 0, self.width, self.height)
resized_raster = self.get_window(window, xsize=new_width, ysize=new_height, resampling=resampling)
else:
resized_raster = self._reproject(new_width, new_height, dest_affine, resampling=resampling)
return resized_raster | python | def _resize(self, ratio_x, ratio_y, resampling):
"""Return raster resized by ratio."""
new_width = int(np.ceil(self.width * ratio_x))
new_height = int(np.ceil(self.height * ratio_y))
dest_affine = self.affine * Affine.scale(1 / ratio_x, 1 / ratio_y)
if self.not_loaded():
window = rasterio.windows.Window(0, 0, self.width, self.height)
resized_raster = self.get_window(window, xsize=new_width, ysize=new_height, resampling=resampling)
else:
resized_raster = self._reproject(new_width, new_height, dest_affine, resampling=resampling)
return resized_raster | [
"def",
"_resize",
"(",
"self",
",",
"ratio_x",
",",
"ratio_y",
",",
"resampling",
")",
":",
"new_width",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"self",
".",
"width",
"*",
"ratio_x",
")",
")",
"new_height",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"self",
".",
"height",
"*",
"ratio_y",
")",
")",
"dest_affine",
"=",
"self",
".",
"affine",
"*",
"Affine",
".",
"scale",
"(",
"1",
"/",
"ratio_x",
",",
"1",
"/",
"ratio_y",
")",
"if",
"self",
".",
"not_loaded",
"(",
")",
":",
"window",
"=",
"rasterio",
".",
"windows",
".",
"Window",
"(",
"0",
",",
"0",
",",
"self",
".",
"width",
",",
"self",
".",
"height",
")",
"resized_raster",
"=",
"self",
".",
"get_window",
"(",
"window",
",",
"xsize",
"=",
"new_width",
",",
"ysize",
"=",
"new_height",
",",
"resampling",
"=",
"resampling",
")",
"else",
":",
"resized_raster",
"=",
"self",
".",
"_reproject",
"(",
"new_width",
",",
"new_height",
",",
"dest_affine",
",",
"resampling",
"=",
"resampling",
")",
"return",
"resized_raster"
] | Return raster resized by ratio. | [
"Return",
"raster",
"resized",
"by",
"ratio",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1277-L1288 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.to_pillow_image | def to_pillow_image(self, return_mask=False):
"""Return Pillow. Image, and optionally also mask."""
img = np.rollaxis(np.rollaxis(self.image.data, 2), 2)
img = Image.fromarray(img[:, :, 0]) if img.shape[2] == 1 else Image.fromarray(img)
if return_mask:
mask = np.ma.getmaskarray(self.image)
mask = Image.fromarray(np.rollaxis(np.rollaxis(mask, 2), 2).astype(np.uint8)[:, :, 0])
return img, mask
else:
return img | python | def to_pillow_image(self, return_mask=False):
"""Return Pillow. Image, and optionally also mask."""
img = np.rollaxis(np.rollaxis(self.image.data, 2), 2)
img = Image.fromarray(img[:, :, 0]) if img.shape[2] == 1 else Image.fromarray(img)
if return_mask:
mask = np.ma.getmaskarray(self.image)
mask = Image.fromarray(np.rollaxis(np.rollaxis(mask, 2), 2).astype(np.uint8)[:, :, 0])
return img, mask
else:
return img | [
"def",
"to_pillow_image",
"(",
"self",
",",
"return_mask",
"=",
"False",
")",
":",
"img",
"=",
"np",
".",
"rollaxis",
"(",
"np",
".",
"rollaxis",
"(",
"self",
".",
"image",
".",
"data",
",",
"2",
")",
",",
"2",
")",
"img",
"=",
"Image",
".",
"fromarray",
"(",
"img",
"[",
":",
",",
":",
",",
"0",
"]",
")",
"if",
"img",
".",
"shape",
"[",
"2",
"]",
"==",
"1",
"else",
"Image",
".",
"fromarray",
"(",
"img",
")",
"if",
"return_mask",
":",
"mask",
"=",
"np",
".",
"ma",
".",
"getmaskarray",
"(",
"self",
".",
"image",
")",
"mask",
"=",
"Image",
".",
"fromarray",
"(",
"np",
".",
"rollaxis",
"(",
"np",
".",
"rollaxis",
"(",
"mask",
",",
"2",
")",
",",
"2",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"[",
":",
",",
":",
",",
"0",
"]",
")",
"return",
"img",
",",
"mask",
"else",
":",
"return",
"img"
] | Return Pillow. Image, and optionally also mask. | [
"Return",
"Pillow",
".",
"Image",
"and",
"optionally",
"also",
"mask",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1290-L1299 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.from_bytes | def from_bytes(cls, image_bytes, affine, crs, band_names=None):
"""Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red'
"""
b = io.BytesIO(image_bytes)
image = imageio.imread(b)
roll = np.rollaxis(image, 2)
if band_names is None:
band_names = [0, 1, 2]
elif isinstance(band_names, str):
band_names = [band_names]
return GeoRaster2(image=roll[:3, :, :], affine=affine, crs=crs, band_names=band_names) | python | def from_bytes(cls, image_bytes, affine, crs, band_names=None):
"""Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red'
"""
b = io.BytesIO(image_bytes)
image = imageio.imread(b)
roll = np.rollaxis(image, 2)
if band_names is None:
band_names = [0, 1, 2]
elif isinstance(band_names, str):
band_names = [band_names]
return GeoRaster2(image=roll[:3, :, :], affine=affine, crs=crs, band_names=band_names) | [
"def",
"from_bytes",
"(",
"cls",
",",
"image_bytes",
",",
"affine",
",",
"crs",
",",
"band_names",
"=",
"None",
")",
":",
"b",
"=",
"io",
".",
"BytesIO",
"(",
"image_bytes",
")",
"image",
"=",
"imageio",
".",
"imread",
"(",
"b",
")",
"roll",
"=",
"np",
".",
"rollaxis",
"(",
"image",
",",
"2",
")",
"if",
"band_names",
"is",
"None",
":",
"band_names",
"=",
"[",
"0",
",",
"1",
",",
"2",
"]",
"elif",
"isinstance",
"(",
"band_names",
",",
"str",
")",
":",
"band_names",
"=",
"[",
"band_names",
"]",
"return",
"GeoRaster2",
"(",
"image",
"=",
"roll",
"[",
":",
"3",
",",
":",
",",
":",
"]",
",",
"affine",
"=",
"affine",
",",
"crs",
"=",
"crs",
",",
"band_names",
"=",
"band_names",
")"
] | Create GeoRaster from image BytesIo object.
:param image_bytes: io.BytesIO object
:param affine: rasters affine
:param crs: rasters crs
:param band_names: e.g. ['red', 'blue'] or 'red' | [
"Create",
"GeoRaster",
"from",
"image",
"BytesIo",
"object",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1506-L1522 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2._repr_html_ | def _repr_html_(self):
"""Required for jupyter notebook to show raster as an interactive map."""
TileServer.run_tileserver(self, self.footprint())
capture = "raster: %s" % self._filename
mp = TileServer.folium_client(self, self.footprint(), capture=capture)
return mp._repr_html_() | python | def _repr_html_(self):
"""Required for jupyter notebook to show raster as an interactive map."""
TileServer.run_tileserver(self, self.footprint())
capture = "raster: %s" % self._filename
mp = TileServer.folium_client(self, self.footprint(), capture=capture)
return mp._repr_html_() | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"TileServer",
".",
"run_tileserver",
"(",
"self",
",",
"self",
".",
"footprint",
"(",
")",
")",
"capture",
"=",
"\"raster: %s\"",
"%",
"self",
".",
"_filename",
"mp",
"=",
"TileServer",
".",
"folium_client",
"(",
"self",
",",
"self",
".",
"footprint",
"(",
")",
",",
"capture",
"=",
"capture",
")",
"return",
"mp",
".",
"_repr_html_",
"(",
")"
] | Required for jupyter notebook to show raster as an interactive map. | [
"Required",
"for",
"jupyter",
"notebook",
"to",
"show",
"raster",
"as",
"an",
"interactive",
"map",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1524-L1529 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.image_corner | def image_corner(self, corner):
"""Return image corner in pixels, as shapely.Point."""
if corner not in self.corner_types():
raise GeoRaster2Error('corner %s invalid, expected: %s' % (corner, self.corner_types()))
x = 0 if corner[1] == 'l' else self.width
y = 0 if corner[0] == 'u' else self.height
return Point(x, y) | python | def image_corner(self, corner):
"""Return image corner in pixels, as shapely.Point."""
if corner not in self.corner_types():
raise GeoRaster2Error('corner %s invalid, expected: %s' % (corner, self.corner_types()))
x = 0 if corner[1] == 'l' else self.width
y = 0 if corner[0] == 'u' else self.height
return Point(x, y) | [
"def",
"image_corner",
"(",
"self",
",",
"corner",
")",
":",
"if",
"corner",
"not",
"in",
"self",
".",
"corner_types",
"(",
")",
":",
"raise",
"GeoRaster2Error",
"(",
"'corner %s invalid, expected: %s'",
"%",
"(",
"corner",
",",
"self",
".",
"corner_types",
"(",
")",
")",
")",
"x",
"=",
"0",
"if",
"corner",
"[",
"1",
"]",
"==",
"'l'",
"else",
"self",
".",
"width",
"y",
"=",
"0",
"if",
"corner",
"[",
"0",
"]",
"==",
"'u'",
"else",
"self",
".",
"height",
"return",
"Point",
"(",
"x",
",",
"y",
")"
] | Return image corner in pixels, as shapely.Point. | [
"Return",
"image",
"corner",
"in",
"pixels",
"as",
"shapely",
".",
"Point",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1555-L1562 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.center | def center(self):
"""Return footprint center in world coordinates, as GeoVector."""
image_center = Point(self.width / 2, self.height / 2)
return self.to_world(image_center) | python | def center(self):
"""Return footprint center in world coordinates, as GeoVector."""
image_center = Point(self.width / 2, self.height / 2)
return self.to_world(image_center) | [
"def",
"center",
"(",
"self",
")",
":",
"image_center",
"=",
"Point",
"(",
"self",
".",
"width",
"/",
"2",
",",
"self",
".",
"height",
"/",
"2",
")",
"return",
"self",
".",
"to_world",
"(",
"image_center",
")"
] | Return footprint center in world coordinates, as GeoVector. | [
"Return",
"footprint",
"center",
"in",
"world",
"coordinates",
"as",
"GeoVector",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1576-L1579 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.bounds | def bounds(self):
"""Return image rectangle in pixels, as shapely.Polygon."""
corners = [self.image_corner(corner) for corner in self.corner_types()]
return Polygon([[corner.x, corner.y] for corner in corners]) | python | def bounds(self):
"""Return image rectangle in pixels, as shapely.Polygon."""
corners = [self.image_corner(corner) for corner in self.corner_types()]
return Polygon([[corner.x, corner.y] for corner in corners]) | [
"def",
"bounds",
"(",
"self",
")",
":",
"corners",
"=",
"[",
"self",
".",
"image_corner",
"(",
"corner",
")",
"for",
"corner",
"in",
"self",
".",
"corner_types",
"(",
")",
"]",
"return",
"Polygon",
"(",
"[",
"[",
"corner",
".",
"x",
",",
"corner",
".",
"y",
"]",
"for",
"corner",
"in",
"corners",
"]",
")"
] | Return image rectangle in pixels, as shapely.Polygon. | [
"Return",
"image",
"rectangle",
"in",
"pixels",
"as",
"shapely",
".",
"Polygon",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1581-L1584 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2._calc_footprint | def _calc_footprint(self):
"""Return rectangle in world coordinates, as GeoVector."""
corners = [self.corner(corner) for corner in self.corner_types()]
coords = []
for corner in corners:
shape = corner.get_shape(corner.crs)
coords.append([shape.x, shape.y])
shp = Polygon(coords)
# TODO use GeoVector.from_bounds
self._footprint = GeoVector(shp, self.crs)
return self._footprint | python | def _calc_footprint(self):
"""Return rectangle in world coordinates, as GeoVector."""
corners = [self.corner(corner) for corner in self.corner_types()]
coords = []
for corner in corners:
shape = corner.get_shape(corner.crs)
coords.append([shape.x, shape.y])
shp = Polygon(coords)
# TODO use GeoVector.from_bounds
self._footprint = GeoVector(shp, self.crs)
return self._footprint | [
"def",
"_calc_footprint",
"(",
"self",
")",
":",
"corners",
"=",
"[",
"self",
".",
"corner",
"(",
"corner",
")",
"for",
"corner",
"in",
"self",
".",
"corner_types",
"(",
")",
"]",
"coords",
"=",
"[",
"]",
"for",
"corner",
"in",
"corners",
":",
"shape",
"=",
"corner",
".",
"get_shape",
"(",
"corner",
".",
"crs",
")",
"coords",
".",
"append",
"(",
"[",
"shape",
".",
"x",
",",
"shape",
".",
"y",
"]",
")",
"shp",
"=",
"Polygon",
"(",
"coords",
")",
"# TODO use GeoVector.from_bounds",
"self",
".",
"_footprint",
"=",
"GeoVector",
"(",
"shp",
",",
"self",
".",
"crs",
")",
"return",
"self",
".",
"_footprint"
] | Return rectangle in world coordinates, as GeoVector. | [
"Return",
"rectangle",
"in",
"world",
"coordinates",
"as",
"GeoVector",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1586-L1597 | train |
satellogic/telluric | telluric/georaster.py | GeoRaster2.to_raster | def to_raster(self, vector):
"""Return the vector in pixel coordinates, as shapely.Geometry."""
return transform(vector.get_shape(vector.crs), vector.crs, self.crs, dst_affine=~self.affine) | python | def to_raster(self, vector):
"""Return the vector in pixel coordinates, as shapely.Geometry."""
return transform(vector.get_shape(vector.crs), vector.crs, self.crs, dst_affine=~self.affine) | [
"def",
"to_raster",
"(",
"self",
",",
"vector",
")",
":",
"return",
"transform",
"(",
"vector",
".",
"get_shape",
"(",
"vector",
".",
"crs",
")",
",",
"vector",
".",
"crs",
",",
"self",
".",
"crs",
",",
"dst_affine",
"=",
"~",
"self",
".",
"affine",
")"
] | Return the vector in pixel coordinates, as shapely.Geometry. | [
"Return",
"the",
"vector",
"in",
"pixel",
"coordinates",
"as",
"shapely",
".",
"Geometry",
"."
] | e752cd3ee71e339f79717e526fde362e80055d9e | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1611-L1613 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.