id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
5,200 | jbittel/django-mama-cas | mama_cas/cas.py | logout_user | def logout_user(request):
"""End a single sign-on session for the current user."""
logger.debug("Logout request received for %s" % request.user)
if is_authenticated(request.user):
ServiceTicket.objects.consume_tickets(request.user)
ProxyTicket.objects.consume_tickets(request.user)
ProxyGrantingTicket.objects.consume_tickets(request.user)
ServiceTicket.objects.request_sign_out(request.user)
logger.info("Single sign-on session ended for %s" % request.user)
logout(request)
messages.success(request, _('You have been successfully logged out')) | python | def logout_user(request):
"""End a single sign-on session for the current user."""
logger.debug("Logout request received for %s" % request.user)
if is_authenticated(request.user):
ServiceTicket.objects.consume_tickets(request.user)
ProxyTicket.objects.consume_tickets(request.user)
ProxyGrantingTicket.objects.consume_tickets(request.user)
ServiceTicket.objects.request_sign_out(request.user)
logger.info("Single sign-on session ended for %s" % request.user)
logout(request)
messages.success(request, _('You have been successfully logged out')) | [
"def",
"logout_user",
"(",
"request",
")",
":",
"logger",
".",
"debug",
"(",
"\"Logout request received for %s\"",
"%",
"request",
".",
"user",
")",
"if",
"is_authenticated",
"(",
"request",
".",
"user",
")",
":",
"ServiceTicket",
".",
"objects",
".",
"consume_tickets",
"(",
"request",
".",
"user",
")",
"ProxyTicket",
".",
"objects",
".",
"consume_tickets",
"(",
"request",
".",
"user",
")",
"ProxyGrantingTicket",
".",
"objects",
".",
"consume_tickets",
"(",
"request",
".",
"user",
")",
"ServiceTicket",
".",
"objects",
".",
"request_sign_out",
"(",
"request",
".",
"user",
")",
"logger",
".",
"info",
"(",
"\"Single sign-on session ended for %s\"",
"%",
"request",
".",
"user",
")",
"logout",
"(",
"request",
")",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"'You have been successfully logged out'",
")",
")"
] | End a single sign-on session for the current user. | [
"End",
"a",
"single",
"sign",
"-",
"on",
"session",
"for",
"the",
"current",
"user",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L94-L106 |
5,201 | jbittel/django-mama-cas | mama_cas/callbacks.py | user_name_attributes | def user_name_attributes(user, service):
"""Return all available user name related fields and methods."""
attributes = {}
attributes['username'] = user.get_username()
attributes['full_name'] = user.get_full_name()
attributes['short_name'] = user.get_short_name()
return attributes | python | def user_name_attributes(user, service):
"""Return all available user name related fields and methods."""
attributes = {}
attributes['username'] = user.get_username()
attributes['full_name'] = user.get_full_name()
attributes['short_name'] = user.get_short_name()
return attributes | [
"def",
"user_name_attributes",
"(",
"user",
",",
"service",
")",
":",
"attributes",
"=",
"{",
"}",
"attributes",
"[",
"'username'",
"]",
"=",
"user",
".",
"get_username",
"(",
")",
"attributes",
"[",
"'full_name'",
"]",
"=",
"user",
".",
"get_full_name",
"(",
")",
"attributes",
"[",
"'short_name'",
"]",
"=",
"user",
".",
"get_short_name",
"(",
")",
"return",
"attributes"
] | Return all available user name related fields and methods. | [
"Return",
"all",
"available",
"user",
"name",
"related",
"fields",
"and",
"methods",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/callbacks.py#L1-L7 |
5,202 | jbittel/django-mama-cas | mama_cas/callbacks.py | user_model_attributes | def user_model_attributes(user, service):
"""
Return all fields on the user object that are not in the list
of fields to ignore.
"""
ignore_fields = ['id', 'password']
attributes = {}
for field in user._meta.fields:
if field.name not in ignore_fields:
attributes[field.name] = getattr(user, field.name)
return attributes | python | def user_model_attributes(user, service):
"""
Return all fields on the user object that are not in the list
of fields to ignore.
"""
ignore_fields = ['id', 'password']
attributes = {}
for field in user._meta.fields:
if field.name not in ignore_fields:
attributes[field.name] = getattr(user, field.name)
return attributes | [
"def",
"user_model_attributes",
"(",
"user",
",",
"service",
")",
":",
"ignore_fields",
"=",
"[",
"'id'",
",",
"'password'",
"]",
"attributes",
"=",
"{",
"}",
"for",
"field",
"in",
"user",
".",
"_meta",
".",
"fields",
":",
"if",
"field",
".",
"name",
"not",
"in",
"ignore_fields",
":",
"attributes",
"[",
"field",
".",
"name",
"]",
"=",
"getattr",
"(",
"user",
",",
"field",
".",
"name",
")",
"return",
"attributes"
] | Return all fields on the user object that are not in the list
of fields to ignore. | [
"Return",
"all",
"fields",
"on",
"the",
"user",
"object",
"that",
"are",
"not",
"in",
"the",
"list",
"of",
"fields",
"to",
"ignore",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/callbacks.py#L10-L20 |
5,203 | jbittel/django-mama-cas | mama_cas/utils.py | add_query_params | def add_query_params(url, params):
"""
Inject additional query parameters into an existing URL. If
parameters already exist with the same name, they will be
overwritten. Parameters with empty values are ignored. Return
the modified URL as a string.
"""
def encode(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
params = dict([(encode(k), encode(v)) for k, v in params.items() if v])
parts = list(urlparse(url))
query = dict(parse_qsl(parts[4]))
query.update(params)
parts[4] = urlencode(query)
return urlunparse(parts) | python | def add_query_params(url, params):
"""
Inject additional query parameters into an existing URL. If
parameters already exist with the same name, they will be
overwritten. Parameters with empty values are ignored. Return
the modified URL as a string.
"""
def encode(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
params = dict([(encode(k), encode(v)) for k, v in params.items() if v])
parts = list(urlparse(url))
query = dict(parse_qsl(parts[4]))
query.update(params)
parts[4] = urlencode(query)
return urlunparse(parts) | [
"def",
"add_query_params",
"(",
"url",
",",
"params",
")",
":",
"def",
"encode",
"(",
"s",
")",
":",
"return",
"force_bytes",
"(",
"s",
",",
"settings",
".",
"DEFAULT_CHARSET",
")",
"params",
"=",
"dict",
"(",
"[",
"(",
"encode",
"(",
"k",
")",
",",
"encode",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"v",
"]",
")",
"parts",
"=",
"list",
"(",
"urlparse",
"(",
"url",
")",
")",
"query",
"=",
"dict",
"(",
"parse_qsl",
"(",
"parts",
"[",
"4",
"]",
")",
")",
"query",
".",
"update",
"(",
"params",
")",
"parts",
"[",
"4",
"]",
"=",
"urlencode",
"(",
"query",
")",
"return",
"urlunparse",
"(",
"parts",
")"
] | Inject additional query parameters into an existing URL. If
parameters already exist with the same name, they will be
overwritten. Parameters with empty values are ignored. Return
the modified URL as a string. | [
"Inject",
"additional",
"query",
"parameters",
"into",
"an",
"existing",
"URL",
".",
"If",
"parameters",
"already",
"exist",
"with",
"the",
"same",
"name",
"they",
"will",
"be",
"overwritten",
".",
"Parameters",
"with",
"empty",
"values",
"are",
"ignored",
".",
"Return",
"the",
"modified",
"URL",
"as",
"a",
"string",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/utils.py#L23-L38 |
5,204 | jbittel/django-mama-cas | mama_cas/utils.py | match_service | def match_service(service1, service2):
"""
Compare two service URLs. Return ``True`` if the scheme, hostname,
optional port and path match.
"""
s1, s2 = urlparse(service1), urlparse(service2)
try:
return (s1.scheme, s1.netloc, s1.path) == (s2.scheme, s2.netloc, s2.path)
except ValueError:
return False | python | def match_service(service1, service2):
"""
Compare two service URLs. Return ``True`` if the scheme, hostname,
optional port and path match.
"""
s1, s2 = urlparse(service1), urlparse(service2)
try:
return (s1.scheme, s1.netloc, s1.path) == (s2.scheme, s2.netloc, s2.path)
except ValueError:
return False | [
"def",
"match_service",
"(",
"service1",
",",
"service2",
")",
":",
"s1",
",",
"s2",
"=",
"urlparse",
"(",
"service1",
")",
",",
"urlparse",
"(",
"service2",
")",
"try",
":",
"return",
"(",
"s1",
".",
"scheme",
",",
"s1",
".",
"netloc",
",",
"s1",
".",
"path",
")",
"==",
"(",
"s2",
".",
"scheme",
",",
"s2",
".",
"netloc",
",",
"s2",
".",
"path",
")",
"except",
"ValueError",
":",
"return",
"False"
] | Compare two service URLs. Return ``True`` if the scheme, hostname,
optional port and path match. | [
"Compare",
"two",
"service",
"URLs",
".",
"Return",
"True",
"if",
"the",
"scheme",
"hostname",
"optional",
"port",
"and",
"path",
"match",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/utils.py#L58-L67 |
5,205 | jbittel/django-mama-cas | mama_cas/utils.py | redirect | def redirect(to, *args, **kwargs):
"""
Similar to the Django ``redirect`` shortcut but with altered
functionality. If an optional ``params`` argument is provided, the
dictionary items will be injected as query parameters on the
redirection URL.
"""
params = kwargs.pop('params', {})
try:
to = reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
if '/' not in to and '.' not in to:
to = reverse('cas_login')
elif not service_allowed(to):
raise PermissionDenied()
if params:
to = add_query_params(to, params)
logger.debug("Redirecting to %s" % to)
return HttpResponseRedirect(to) | python | def redirect(to, *args, **kwargs):
"""
Similar to the Django ``redirect`` shortcut but with altered
functionality. If an optional ``params`` argument is provided, the
dictionary items will be injected as query parameters on the
redirection URL.
"""
params = kwargs.pop('params', {})
try:
to = reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
if '/' not in to and '.' not in to:
to = reverse('cas_login')
elif not service_allowed(to):
raise PermissionDenied()
if params:
to = add_query_params(to, params)
logger.debug("Redirecting to %s" % to)
return HttpResponseRedirect(to) | [
"def",
"redirect",
"(",
"to",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"kwargs",
".",
"pop",
"(",
"'params'",
",",
"{",
"}",
")",
"try",
":",
"to",
"=",
"reverse",
"(",
"to",
",",
"args",
"=",
"args",
",",
"kwargs",
"=",
"kwargs",
")",
"except",
"NoReverseMatch",
":",
"if",
"'/'",
"not",
"in",
"to",
"and",
"'.'",
"not",
"in",
"to",
":",
"to",
"=",
"reverse",
"(",
"'cas_login'",
")",
"elif",
"not",
"service_allowed",
"(",
"to",
")",
":",
"raise",
"PermissionDenied",
"(",
")",
"if",
"params",
":",
"to",
"=",
"add_query_params",
"(",
"to",
",",
"params",
")",
"logger",
".",
"debug",
"(",
"\"Redirecting to %s\"",
"%",
"to",
")",
"return",
"HttpResponseRedirect",
"(",
"to",
")"
] | Similar to the Django ``redirect`` shortcut but with altered
functionality. If an optional ``params`` argument is provided, the
dictionary items will be injected as query parameters on the
redirection URL. | [
"Similar",
"to",
"the",
"Django",
"redirect",
"shortcut",
"but",
"with",
"altered",
"functionality",
".",
"If",
"an",
"optional",
"params",
"argument",
"is",
"provided",
"the",
"dictionary",
"items",
"will",
"be",
"injected",
"as",
"query",
"parameters",
"on",
"the",
"redirection",
"URL",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/utils.py#L70-L91 |
5,206 | jbittel/django-mama-cas | mama_cas/services/backends.py | ServiceConfig.get_config | def get_config(self, service, setting):
"""
Access the configuration for a given service and setting. If the
service is not found, return a default value.
"""
try:
return self.get_service(service)[setting]
except KeyError:
return getattr(self, setting + '_DEFAULT') | python | def get_config(self, service, setting):
"""
Access the configuration for a given service and setting. If the
service is not found, return a default value.
"""
try:
return self.get_service(service)[setting]
except KeyError:
return getattr(self, setting + '_DEFAULT') | [
"def",
"get_config",
"(",
"self",
",",
"service",
",",
"setting",
")",
":",
"try",
":",
"return",
"self",
".",
"get_service",
"(",
"service",
")",
"[",
"setting",
"]",
"except",
"KeyError",
":",
"return",
"getattr",
"(",
"self",
",",
"setting",
"+",
"'_DEFAULT'",
")"
] | Access the configuration for a given service and setting. If the
service is not found, return a default value. | [
"Access",
"the",
"configuration",
"for",
"a",
"given",
"service",
"and",
"setting",
".",
"If",
"the",
"service",
"is",
"not",
"found",
"return",
"a",
"default",
"value",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/services/backends.py#L47-L55 |
5,207 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_status | def get_status(self, status_value, message=None):
"""
Build a Status XML block for a SAML 1.1 Response.
"""
status = etree.Element('Status')
status_code = etree.SubElement(status, 'StatusCode')
status_code.set('Value', 'samlp:' + status_value)
if message:
status_message = etree.SubElement(status, 'StatusMessage')
status_message.text = message
return status | python | def get_status(self, status_value, message=None):
"""
Build a Status XML block for a SAML 1.1 Response.
"""
status = etree.Element('Status')
status_code = etree.SubElement(status, 'StatusCode')
status_code.set('Value', 'samlp:' + status_value)
if message:
status_message = etree.SubElement(status, 'StatusMessage')
status_message.text = message
return status | [
"def",
"get_status",
"(",
"self",
",",
"status_value",
",",
"message",
"=",
"None",
")",
":",
"status",
"=",
"etree",
".",
"Element",
"(",
"'Status'",
")",
"status_code",
"=",
"etree",
".",
"SubElement",
"(",
"status",
",",
"'StatusCode'",
")",
"status_code",
".",
"set",
"(",
"'Value'",
",",
"'samlp:'",
"+",
"status_value",
")",
"if",
"message",
":",
"status_message",
"=",
"etree",
".",
"SubElement",
"(",
"status",
",",
"'StatusMessage'",
")",
"status_message",
".",
"text",
"=",
"message",
"return",
"status"
] | Build a Status XML block for a SAML 1.1 Response. | [
"Build",
"a",
"Status",
"XML",
"block",
"for",
"a",
"SAML",
"1",
".",
"1",
"Response",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L185-L195 |
5,208 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_assertion | def get_assertion(self, ticket, attributes):
"""
Build a SAML 1.1 Assertion XML block.
"""
assertion = etree.Element('Assertion')
assertion.set('xmlns', 'urn:oasis:names:tc:SAML:1.0:assertion')
assertion.set('AssertionID', self.generate_id())
assertion.set('IssueInstant', self.instant())
assertion.set('Issuer', 'localhost')
assertion.set('MajorVersion', '1')
assertion.set('MinorVersion', '1')
assertion.append(self.get_conditions(ticket.service))
subject = self.get_subject(ticket.user.get_username())
if attributes:
assertion.append(self.get_attribute_statement(subject, attributes))
assertion.append(self.get_authentication_statement(subject, ticket))
return assertion | python | def get_assertion(self, ticket, attributes):
"""
Build a SAML 1.1 Assertion XML block.
"""
assertion = etree.Element('Assertion')
assertion.set('xmlns', 'urn:oasis:names:tc:SAML:1.0:assertion')
assertion.set('AssertionID', self.generate_id())
assertion.set('IssueInstant', self.instant())
assertion.set('Issuer', 'localhost')
assertion.set('MajorVersion', '1')
assertion.set('MinorVersion', '1')
assertion.append(self.get_conditions(ticket.service))
subject = self.get_subject(ticket.user.get_username())
if attributes:
assertion.append(self.get_attribute_statement(subject, attributes))
assertion.append(self.get_authentication_statement(subject, ticket))
return assertion | [
"def",
"get_assertion",
"(",
"self",
",",
"ticket",
",",
"attributes",
")",
":",
"assertion",
"=",
"etree",
".",
"Element",
"(",
"'Assertion'",
")",
"assertion",
".",
"set",
"(",
"'xmlns'",
",",
"'urn:oasis:names:tc:SAML:1.0:assertion'",
")",
"assertion",
".",
"set",
"(",
"'AssertionID'",
",",
"self",
".",
"generate_id",
"(",
")",
")",
"assertion",
".",
"set",
"(",
"'IssueInstant'",
",",
"self",
".",
"instant",
"(",
")",
")",
"assertion",
".",
"set",
"(",
"'Issuer'",
",",
"'localhost'",
")",
"assertion",
".",
"set",
"(",
"'MajorVersion'",
",",
"'1'",
")",
"assertion",
".",
"set",
"(",
"'MinorVersion'",
",",
"'1'",
")",
"assertion",
".",
"append",
"(",
"self",
".",
"get_conditions",
"(",
"ticket",
".",
"service",
")",
")",
"subject",
"=",
"self",
".",
"get_subject",
"(",
"ticket",
".",
"user",
".",
"get_username",
"(",
")",
")",
"if",
"attributes",
":",
"assertion",
".",
"append",
"(",
"self",
".",
"get_attribute_statement",
"(",
"subject",
",",
"attributes",
")",
")",
"assertion",
".",
"append",
"(",
"self",
".",
"get_authentication_statement",
"(",
"subject",
",",
"ticket",
")",
")",
"return",
"assertion"
] | Build a SAML 1.1 Assertion XML block. | [
"Build",
"a",
"SAML",
"1",
".",
"1",
"Assertion",
"XML",
"block",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L197-L214 |
5,209 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_conditions | def get_conditions(self, service_id):
"""
Build a Conditions XML block for a SAML 1.1 Assertion.
"""
conditions = etree.Element('Conditions')
conditions.set('NotBefore', self.instant())
conditions.set('NotOnOrAfter', self.instant(offset=30))
restriction = etree.SubElement(conditions, 'AudienceRestrictionCondition')
audience = etree.SubElement(restriction, 'Audience')
audience.text = service_id
return conditions | python | def get_conditions(self, service_id):
"""
Build a Conditions XML block for a SAML 1.1 Assertion.
"""
conditions = etree.Element('Conditions')
conditions.set('NotBefore', self.instant())
conditions.set('NotOnOrAfter', self.instant(offset=30))
restriction = etree.SubElement(conditions, 'AudienceRestrictionCondition')
audience = etree.SubElement(restriction, 'Audience')
audience.text = service_id
return conditions | [
"def",
"get_conditions",
"(",
"self",
",",
"service_id",
")",
":",
"conditions",
"=",
"etree",
".",
"Element",
"(",
"'Conditions'",
")",
"conditions",
".",
"set",
"(",
"'NotBefore'",
",",
"self",
".",
"instant",
"(",
")",
")",
"conditions",
".",
"set",
"(",
"'NotOnOrAfter'",
",",
"self",
".",
"instant",
"(",
"offset",
"=",
"30",
")",
")",
"restriction",
"=",
"etree",
".",
"SubElement",
"(",
"conditions",
",",
"'AudienceRestrictionCondition'",
")",
"audience",
"=",
"etree",
".",
"SubElement",
"(",
"restriction",
",",
"'Audience'",
")",
"audience",
".",
"text",
"=",
"service_id",
"return",
"conditions"
] | Build a Conditions XML block for a SAML 1.1 Assertion. | [
"Build",
"a",
"Conditions",
"XML",
"block",
"for",
"a",
"SAML",
"1",
".",
"1",
"Assertion",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L216-L226 |
5,210 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_attribute_statement | def get_attribute_statement(self, subject, attributes):
"""
Build an AttributeStatement XML block for a SAML 1.1 Assertion.
"""
attribute_statement = etree.Element('AttributeStatement')
attribute_statement.append(subject)
for name, value in attributes.items():
attribute = etree.SubElement(attribute_statement, 'Attribute')
attribute.set('AttributeName', name)
attribute.set('AttributeNamespace', self.namespace)
if isinstance(value, list):
for v in value:
attribute_value = etree.SubElement(attribute, 'AttributeValue')
attribute_value.text = force_text(v)
else:
attribute_value = etree.SubElement(attribute, 'AttributeValue')
attribute_value.text = force_text(value)
return attribute_statement | python | def get_attribute_statement(self, subject, attributes):
"""
Build an AttributeStatement XML block for a SAML 1.1 Assertion.
"""
attribute_statement = etree.Element('AttributeStatement')
attribute_statement.append(subject)
for name, value in attributes.items():
attribute = etree.SubElement(attribute_statement, 'Attribute')
attribute.set('AttributeName', name)
attribute.set('AttributeNamespace', self.namespace)
if isinstance(value, list):
for v in value:
attribute_value = etree.SubElement(attribute, 'AttributeValue')
attribute_value.text = force_text(v)
else:
attribute_value = etree.SubElement(attribute, 'AttributeValue')
attribute_value.text = force_text(value)
return attribute_statement | [
"def",
"get_attribute_statement",
"(",
"self",
",",
"subject",
",",
"attributes",
")",
":",
"attribute_statement",
"=",
"etree",
".",
"Element",
"(",
"'AttributeStatement'",
")",
"attribute_statement",
".",
"append",
"(",
"subject",
")",
"for",
"name",
",",
"value",
"in",
"attributes",
".",
"items",
"(",
")",
":",
"attribute",
"=",
"etree",
".",
"SubElement",
"(",
"attribute_statement",
",",
"'Attribute'",
")",
"attribute",
".",
"set",
"(",
"'AttributeName'",
",",
"name",
")",
"attribute",
".",
"set",
"(",
"'AttributeNamespace'",
",",
"self",
".",
"namespace",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"v",
"in",
"value",
":",
"attribute_value",
"=",
"etree",
".",
"SubElement",
"(",
"attribute",
",",
"'AttributeValue'",
")",
"attribute_value",
".",
"text",
"=",
"force_text",
"(",
"v",
")",
"else",
":",
"attribute_value",
"=",
"etree",
".",
"SubElement",
"(",
"attribute",
",",
"'AttributeValue'",
")",
"attribute_value",
".",
"text",
"=",
"force_text",
"(",
"value",
")",
"return",
"attribute_statement"
] | Build an AttributeStatement XML block for a SAML 1.1 Assertion. | [
"Build",
"an",
"AttributeStatement",
"XML",
"block",
"for",
"a",
"SAML",
"1",
".",
"1",
"Assertion",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L228-L245 |
5,211 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_authentication_statement | def get_authentication_statement(self, subject, ticket):
"""
Build an AuthenticationStatement XML block for a SAML 1.1
Assertion.
"""
authentication_statement = etree.Element('AuthenticationStatement')
authentication_statement.set('AuthenticationInstant',
self.instant(instant=ticket.consumed))
authentication_statement.set('AuthenticationMethod',
self.authn_method_password)
authentication_statement.append(subject)
return authentication_statement | python | def get_authentication_statement(self, subject, ticket):
"""
Build an AuthenticationStatement XML block for a SAML 1.1
Assertion.
"""
authentication_statement = etree.Element('AuthenticationStatement')
authentication_statement.set('AuthenticationInstant',
self.instant(instant=ticket.consumed))
authentication_statement.set('AuthenticationMethod',
self.authn_method_password)
authentication_statement.append(subject)
return authentication_statement | [
"def",
"get_authentication_statement",
"(",
"self",
",",
"subject",
",",
"ticket",
")",
":",
"authentication_statement",
"=",
"etree",
".",
"Element",
"(",
"'AuthenticationStatement'",
")",
"authentication_statement",
".",
"set",
"(",
"'AuthenticationInstant'",
",",
"self",
".",
"instant",
"(",
"instant",
"=",
"ticket",
".",
"consumed",
")",
")",
"authentication_statement",
".",
"set",
"(",
"'AuthenticationMethod'",
",",
"self",
".",
"authn_method_password",
")",
"authentication_statement",
".",
"append",
"(",
"subject",
")",
"return",
"authentication_statement"
] | Build an AuthenticationStatement XML block for a SAML 1.1
Assertion. | [
"Build",
"an",
"AuthenticationStatement",
"XML",
"block",
"for",
"a",
"SAML",
"1",
".",
"1",
"Assertion",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L247-L258 |
5,212 | jbittel/django-mama-cas | mama_cas/response.py | SamlValidationResponse.get_subject | def get_subject(self, identifier):
"""
Build a Subject XML block for a SAML 1.1
AuthenticationStatement or AttributeStatement.
"""
subject = etree.Element('Subject')
name = etree.SubElement(subject, 'NameIdentifier')
name.text = identifier
subject_confirmation = etree.SubElement(subject, 'SubjectConfirmation')
method = etree.SubElement(subject_confirmation, 'ConfirmationMethod')
method.text = self.confirmation_method
return subject | python | def get_subject(self, identifier):
"""
Build a Subject XML block for a SAML 1.1
AuthenticationStatement or AttributeStatement.
"""
subject = etree.Element('Subject')
name = etree.SubElement(subject, 'NameIdentifier')
name.text = identifier
subject_confirmation = etree.SubElement(subject, 'SubjectConfirmation')
method = etree.SubElement(subject_confirmation, 'ConfirmationMethod')
method.text = self.confirmation_method
return subject | [
"def",
"get_subject",
"(",
"self",
",",
"identifier",
")",
":",
"subject",
"=",
"etree",
".",
"Element",
"(",
"'Subject'",
")",
"name",
"=",
"etree",
".",
"SubElement",
"(",
"subject",
",",
"'NameIdentifier'",
")",
"name",
".",
"text",
"=",
"identifier",
"subject_confirmation",
"=",
"etree",
".",
"SubElement",
"(",
"subject",
",",
"'SubjectConfirmation'",
")",
"method",
"=",
"etree",
".",
"SubElement",
"(",
"subject_confirmation",
",",
"'ConfirmationMethod'",
")",
"method",
".",
"text",
"=",
"self",
".",
"confirmation_method",
"return",
"subject"
] | Build a Subject XML block for a SAML 1.1
AuthenticationStatement or AttributeStatement. | [
"Build",
"a",
"Subject",
"XML",
"block",
"for",
"a",
"SAML",
"1",
".",
"1",
"AuthenticationStatement",
"or",
"AttributeStatement",
"."
] | 03935d97442b46d8127ab9e1cd8deb96953fe156 | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/response.py#L260-L271 |
5,213 | sarugaku/vistir | src/vistir/compat.py | is_bytes | def is_bytes(string):
"""Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool
"""
if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa
return True
elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa
return True
return False | python | def is_bytes(string):
"""Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool
"""
if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa
return True
elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa
return True
return False | [
"def",
"is_bytes",
"(",
"string",
")",
":",
"if",
"six",
".",
"PY3",
"and",
"isinstance",
"(",
"string",
",",
"(",
"bytes",
",",
"memoryview",
",",
"bytearray",
")",
")",
":",
"# noqa",
"return",
"True",
"elif",
"six",
".",
"PY2",
"and",
"isinstance",
"(",
"string",
",",
"(",
"buffer",
",",
"bytearray",
")",
")",
":",
"# noqa",
"return",
"True",
"return",
"False"
] | Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool | [
"Check",
"if",
"a",
"string",
"is",
"a",
"bytes",
"instance"
] | 96c008ee62a43608d1e70797f74634cb66a004c1 | https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/compat.py#L205-L216 |
5,214 | sarugaku/vistir | src/vistir/misc.py | partialclass | def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_ | python | def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_ | [
"def",
"partialclass",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"name_attrs",
"=",
"[",
"n",
"for",
"n",
"in",
"(",
"getattr",
"(",
"cls",
",",
"name",
",",
"str",
"(",
"cls",
")",
")",
"for",
"name",
"in",
"(",
"\"__name__\"",
",",
"\"__qualname__\"",
")",
")",
"if",
"n",
"is",
"not",
"None",
"]",
"name_attrs",
"=",
"name_attrs",
"[",
"0",
"]",
"type_",
"=",
"type",
"(",
"name_attrs",
",",
"(",
"cls",
",",
")",
",",
"{",
"\"__init__\"",
":",
"partialmethod",
"(",
"cls",
".",
"__init__",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"}",
")",
"# Swiped from attrs.make_class",
"try",
":",
"type_",
".",
"__module__",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_globals",
".",
"get",
"(",
"\"__name__\"",
",",
"\"__main__\"",
")",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
":",
"# pragma: no cover",
"pass",
"# pragma: no cover",
"return",
"type_"
] | Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'} | [
"Returns",
"a",
"partially",
"instantiated",
"class"
] | 96c008ee62a43608d1e70797f74634cb66a004c1 | https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L371-L404 |
5,215 | sarugaku/vistir | src/vistir/misc.py | replace_with_text_stream | def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None | python | def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None | [
"def",
"replace_with_text_stream",
"(",
"stream_name",
")",
":",
"new_stream",
"=",
"TEXT_STREAMS",
".",
"get",
"(",
"stream_name",
")",
"if",
"new_stream",
"is",
"not",
"None",
":",
"new_stream",
"=",
"new_stream",
"(",
")",
"setattr",
"(",
"sys",
",",
"stream_name",
",",
"new_stream",
")",
"return",
"None"
] | Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None | [
"Given",
"a",
"stream",
"name",
"replace",
"the",
"target",
"stream",
"with",
"a",
"text",
"-",
"converted",
"equivalent"
] | 96c008ee62a43608d1e70797f74634cb66a004c1 | https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L913-L923 |
5,216 | sarugaku/vistir | src/vistir/backports/tempfile.py | _sanitize_params | def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = "tmp"
else:
prefix = os.fsencode("tmp")
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = fs_encode(gettempdir())
return prefix, suffix, dir, output_type | python | def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = "tmp"
else:
prefix = os.fsencode("tmp")
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = fs_encode(gettempdir())
return prefix, suffix, dir, output_type | [
"def",
"_sanitize_params",
"(",
"prefix",
",",
"suffix",
",",
"dir",
")",
":",
"output_type",
"=",
"_infer_return_type",
"(",
"prefix",
",",
"suffix",
",",
"dir",
")",
"if",
"suffix",
"is",
"None",
":",
"suffix",
"=",
"output_type",
"(",
")",
"if",
"prefix",
"is",
"None",
":",
"if",
"output_type",
"is",
"str",
":",
"prefix",
"=",
"\"tmp\"",
"else",
":",
"prefix",
"=",
"os",
".",
"fsencode",
"(",
"\"tmp\"",
")",
"if",
"dir",
"is",
"None",
":",
"if",
"output_type",
"is",
"str",
":",
"dir",
"=",
"gettempdir",
"(",
")",
"else",
":",
"dir",
"=",
"fs_encode",
"(",
"gettempdir",
"(",
")",
")",
"return",
"prefix",
",",
"suffix",
",",
"dir",
",",
"output_type"
] | Common parameter processing for most APIs in this module. | [
"Common",
"parameter",
"processing",
"for",
"most",
"APIs",
"in",
"this",
"module",
"."
] | 96c008ee62a43608d1e70797f74634cb66a004c1 | https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/backports/tempfile.py#L55-L70 |
5,217 | danielholmstrom/dictalchemy | dictalchemy/utils.py | fromdict | def fromdict(model, data, exclude=None, exclude_underscore=None,
allow_pk=None, follow=None, include=None, only=None):
"""Update a model from a dict
Works almost identically as :meth:`dictalchemy.utils.asdict`. However, it
will not create missing instances or update collections.
This method updates the following properties on a model:
* Simple columns
* Synonyms
* Simple 1-m relationships
:param data: dict of data
:param exclude: list of properties that should be excluded
:param exclude_underscore: If True underscore properties will be excluded,\
if set to None model.dictalchemy_exclude_underscore will be used.
:param allow_pk: If True any column that refers to the primary key will \
be excluded. Defaults model.dictalchemy_fromdict_allow_pk or \
dictable.constants.fromdict_allow_pk. If set to True a primary \
key can still be excluded with the `exclude` parameter.
:param follow: Dict of relations that should be followed, the key is the \
arguments passed to the relation. Relations only works on simple \
relations, not on lists.
:param include: List of properties that should be included. This list \
will override anything in the exclude list. It will not override \
allow_pk.
:param only: List of the only properties that should be set. This \
will not override `allow_pk` or `follow`.
:raises: :class:`dictalchemy.errors.DictalchemyError` If a primary key is \
in data and allow_pk is False
:returns: The model
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
relations = [c.key for c in info.mapper.relationships]
primary_keys = [c.key for c in info.mapper.primary_key]
if allow_pk is None:
allow_pk = getattr(model, 'dictalchemy_fromdict_allow_pk',
constants.default_fromdict_allow_pk)
if only:
valid_keys = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
include = (include or []) + (getattr(model,
'dictalchemy_fromdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
valid_keys = [k for k in columns + synonyms
if k not in exclude] + include
# Keys that will be updated
update_keys = set(valid_keys) & set(data.keys())
# Check for primary keys
data_primary_key= update_keys & set(primary_keys)
if len(data_primary_key) and not allow_pk:
msg = ("Primary keys({0}) cannot be updated by fromdict."
"Set 'dictalchemy_fromdict_allow_pk' to True in your Model"
" or pass 'allow_pk=True'.").format(','.join(data_primary_key))
raise errors.DictalchemyError(msg)
# Update columns and synonyms
for k in update_keys:
setattr(model, k, data[k])
# Update simple relations
for (k, args) in follow.iteritems():
if k not in data:
continue
if k not in relations:
raise errors.MissingRelationError(k)
rel = getattr(model, k)
if hasattr(rel, 'fromdict'):
rel.fromdict(data[k], **args)
return model | python | def fromdict(model, data, exclude=None, exclude_underscore=None,
allow_pk=None, follow=None, include=None, only=None):
"""Update a model from a dict
Works almost identically as :meth:`dictalchemy.utils.asdict`. However, it
will not create missing instances or update collections.
This method updates the following properties on a model:
* Simple columns
* Synonyms
* Simple 1-m relationships
:param data: dict of data
:param exclude: list of properties that should be excluded
:param exclude_underscore: If True underscore properties will be excluded,\
if set to None model.dictalchemy_exclude_underscore will be used.
:param allow_pk: If True any column that refers to the primary key will \
be excluded. Defaults model.dictalchemy_fromdict_allow_pk or \
dictable.constants.fromdict_allow_pk. If set to True a primary \
key can still be excluded with the `exclude` parameter.
:param follow: Dict of relations that should be followed, the key is the \
arguments passed to the relation. Relations only works on simple \
relations, not on lists.
:param include: List of properties that should be included. This list \
will override anything in the exclude list. It will not override \
allow_pk.
:param only: List of the only properties that should be set. This \
will not override `allow_pk` or `follow`.
:raises: :class:`dictalchemy.errors.DictalchemyError` If a primary key is \
in data and allow_pk is False
:returns: The model
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
relations = [c.key for c in info.mapper.relationships]
primary_keys = [c.key for c in info.mapper.primary_key]
if allow_pk is None:
allow_pk = getattr(model, 'dictalchemy_fromdict_allow_pk',
constants.default_fromdict_allow_pk)
if only:
valid_keys = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
include = (include or []) + (getattr(model,
'dictalchemy_fromdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
valid_keys = [k for k in columns + synonyms
if k not in exclude] + include
# Keys that will be updated
update_keys = set(valid_keys) & set(data.keys())
# Check for primary keys
data_primary_key= update_keys & set(primary_keys)
if len(data_primary_key) and not allow_pk:
msg = ("Primary keys({0}) cannot be updated by fromdict."
"Set 'dictalchemy_fromdict_allow_pk' to True in your Model"
" or pass 'allow_pk=True'.").format(','.join(data_primary_key))
raise errors.DictalchemyError(msg)
# Update columns and synonyms
for k in update_keys:
setattr(model, k, data[k])
# Update simple relations
for (k, args) in follow.iteritems():
if k not in data:
continue
if k not in relations:
raise errors.MissingRelationError(k)
rel = getattr(model, k)
if hasattr(rel, 'fromdict'):
rel.fromdict(data[k], **args)
return model | [
"def",
"fromdict",
"(",
"model",
",",
"data",
",",
"exclude",
"=",
"None",
",",
"exclude_underscore",
"=",
"None",
",",
"allow_pk",
"=",
"None",
",",
"follow",
"=",
"None",
",",
"include",
"=",
"None",
",",
"only",
"=",
"None",
")",
":",
"follow",
"=",
"arg_to_dict",
"(",
"follow",
")",
"info",
"=",
"inspect",
"(",
"model",
")",
"columns",
"=",
"[",
"c",
".",
"key",
"for",
"c",
"in",
"info",
".",
"mapper",
".",
"column_attrs",
"]",
"synonyms",
"=",
"[",
"c",
".",
"key",
"for",
"c",
"in",
"info",
".",
"mapper",
".",
"synonyms",
"]",
"relations",
"=",
"[",
"c",
".",
"key",
"for",
"c",
"in",
"info",
".",
"mapper",
".",
"relationships",
"]",
"primary_keys",
"=",
"[",
"c",
".",
"key",
"for",
"c",
"in",
"info",
".",
"mapper",
".",
"primary_key",
"]",
"if",
"allow_pk",
"is",
"None",
":",
"allow_pk",
"=",
"getattr",
"(",
"model",
",",
"'dictalchemy_fromdict_allow_pk'",
",",
"constants",
".",
"default_fromdict_allow_pk",
")",
"if",
"only",
":",
"valid_keys",
"=",
"only",
"else",
":",
"exclude",
"=",
"exclude",
"or",
"[",
"]",
"exclude",
"+=",
"getattr",
"(",
"model",
",",
"'dictalchemy_exclude'",
",",
"constants",
".",
"default_exclude",
")",
"or",
"[",
"]",
"if",
"exclude_underscore",
"is",
"None",
":",
"exclude_underscore",
"=",
"getattr",
"(",
"model",
",",
"'dictalchemy_exclude_underscore'",
",",
"constants",
".",
"default_exclude_underscore",
")",
"if",
"exclude_underscore",
":",
"# Exclude all properties starting with underscore",
"exclude",
"+=",
"[",
"k",
".",
"key",
"for",
"k",
"in",
"info",
".",
"mapper",
".",
"attrs",
"if",
"k",
".",
"key",
"[",
"0",
"]",
"==",
"'_'",
"]",
"include",
"=",
"(",
"include",
"or",
"[",
"]",
")",
"+",
"(",
"getattr",
"(",
"model",
",",
"'dictalchemy_fromdict_include'",
",",
"getattr",
"(",
"model",
",",
"'dictalchemy_include'",
",",
"None",
")",
")",
"or",
"[",
"]",
")",
"valid_keys",
"=",
"[",
"k",
"for",
"k",
"in",
"columns",
"+",
"synonyms",
"if",
"k",
"not",
"in",
"exclude",
"]",
"+",
"include",
"# Keys that will be updated",
"update_keys",
"=",
"set",
"(",
"valid_keys",
")",
"&",
"set",
"(",
"data",
".",
"keys",
"(",
")",
")",
"# Check for primary keys",
"data_primary_key",
"=",
"update_keys",
"&",
"set",
"(",
"primary_keys",
")",
"if",
"len",
"(",
"data_primary_key",
")",
"and",
"not",
"allow_pk",
":",
"msg",
"=",
"(",
"\"Primary keys({0}) cannot be updated by fromdict.\"",
"\"Set 'dictalchemy_fromdict_allow_pk' to True in your Model\"",
"\" or pass 'allow_pk=True'.\"",
")",
".",
"format",
"(",
"','",
".",
"join",
"(",
"data_primary_key",
")",
")",
"raise",
"errors",
".",
"DictalchemyError",
"(",
"msg",
")",
"# Update columns and synonyms",
"for",
"k",
"in",
"update_keys",
":",
"setattr",
"(",
"model",
",",
"k",
",",
"data",
"[",
"k",
"]",
")",
"# Update simple relations",
"for",
"(",
"k",
",",
"args",
")",
"in",
"follow",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"data",
":",
"continue",
"if",
"k",
"not",
"in",
"relations",
":",
"raise",
"errors",
".",
"MissingRelationError",
"(",
"k",
")",
"rel",
"=",
"getattr",
"(",
"model",
",",
"k",
")",
"if",
"hasattr",
"(",
"rel",
",",
"'fromdict'",
")",
":",
"rel",
".",
"fromdict",
"(",
"data",
"[",
"k",
"]",
",",
"*",
"*",
"args",
")",
"return",
"model"
] | Update a model from a dict
Works almost identically as :meth:`dictalchemy.utils.asdict`. However, it
will not create missing instances or update collections.
This method updates the following properties on a model:
* Simple columns
* Synonyms
* Simple 1-m relationships
:param data: dict of data
:param exclude: list of properties that should be excluded
:param exclude_underscore: If True underscore properties will be excluded,\
if set to None model.dictalchemy_exclude_underscore will be used.
:param allow_pk: If True any column that refers to the primary key will \
be excluded. Defaults model.dictalchemy_fromdict_allow_pk or \
dictable.constants.fromdict_allow_pk. If set to True a primary \
key can still be excluded with the `exclude` parameter.
:param follow: Dict of relations that should be followed, the key is the \
arguments passed to the relation. Relations only works on simple \
relations, not on lists.
:param include: List of properties that should be included. This list \
will override anything in the exclude list. It will not override \
allow_pk.
:param only: List of the only properties that should be set. This \
will not override `allow_pk` or `follow`.
:raises: :class:`dictalchemy.errors.DictalchemyError` If a primary key is \
in data and allow_pk is False
:returns: The model | [
"Update",
"a",
"model",
"from",
"a",
"dict"
] | 038b8822b0ed66feef78a80b3af8f3a09f795b5a | https://github.com/danielholmstrom/dictalchemy/blob/038b8822b0ed66feef78a80b3af8f3a09f795b5a/dictalchemy/utils.py#L186-L283 |
5,218 | danielholmstrom/dictalchemy | dictalchemy/utils.py | make_class_dictable | def make_class_dictable(
cls,
exclude=constants.default_exclude,
exclude_underscore=constants.default_exclude_underscore,
fromdict_allow_pk=constants.default_fromdict_allow_pk,
include=None,
asdict_include=None,
fromdict_include=None):
"""Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class
"""
setattr(cls, 'dictalchemy_exclude', exclude)
setattr(cls, 'dictalchemy_exclude_underscore', exclude_underscore)
setattr(cls, 'dictalchemy_fromdict_allow_pk', fromdict_allow_pk)
setattr(cls, 'asdict', asdict)
setattr(cls, 'fromdict', fromdict)
setattr(cls, '__iter__', iter)
setattr(cls, 'dictalchemy_include', include)
setattr(cls, 'dictalchemy_asdict_include', asdict_include)
setattr(cls, 'dictalchemy_fromdict_include', fromdict_include)
return cls | python | def make_class_dictable(
cls,
exclude=constants.default_exclude,
exclude_underscore=constants.default_exclude_underscore,
fromdict_allow_pk=constants.default_fromdict_allow_pk,
include=None,
asdict_include=None,
fromdict_include=None):
"""Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class
"""
setattr(cls, 'dictalchemy_exclude', exclude)
setattr(cls, 'dictalchemy_exclude_underscore', exclude_underscore)
setattr(cls, 'dictalchemy_fromdict_allow_pk', fromdict_allow_pk)
setattr(cls, 'asdict', asdict)
setattr(cls, 'fromdict', fromdict)
setattr(cls, '__iter__', iter)
setattr(cls, 'dictalchemy_include', include)
setattr(cls, 'dictalchemy_asdict_include', asdict_include)
setattr(cls, 'dictalchemy_fromdict_include', fromdict_include)
return cls | [
"def",
"make_class_dictable",
"(",
"cls",
",",
"exclude",
"=",
"constants",
".",
"default_exclude",
",",
"exclude_underscore",
"=",
"constants",
".",
"default_exclude_underscore",
",",
"fromdict_allow_pk",
"=",
"constants",
".",
"default_fromdict_allow_pk",
",",
"include",
"=",
"None",
",",
"asdict_include",
"=",
"None",
",",
"fromdict_include",
"=",
"None",
")",
":",
"setattr",
"(",
"cls",
",",
"'dictalchemy_exclude'",
",",
"exclude",
")",
"setattr",
"(",
"cls",
",",
"'dictalchemy_exclude_underscore'",
",",
"exclude_underscore",
")",
"setattr",
"(",
"cls",
",",
"'dictalchemy_fromdict_allow_pk'",
",",
"fromdict_allow_pk",
")",
"setattr",
"(",
"cls",
",",
"'asdict'",
",",
"asdict",
")",
"setattr",
"(",
"cls",
",",
"'fromdict'",
",",
"fromdict",
")",
"setattr",
"(",
"cls",
",",
"'__iter__'",
",",
"iter",
")",
"setattr",
"(",
"cls",
",",
"'dictalchemy_include'",
",",
"include",
")",
"setattr",
"(",
"cls",
",",
"'dictalchemy_asdict_include'",
",",
"asdict_include",
")",
"setattr",
"(",
"cls",
",",
"'dictalchemy_fromdict_include'",
",",
"fromdict_include",
")",
"return",
"cls"
] | Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class | [
"Make",
"a",
"class",
"dictable"
] | 038b8822b0ed66feef78a80b3af8f3a09f795b5a | https://github.com/danielholmstrom/dictalchemy/blob/038b8822b0ed66feef78a80b3af8f3a09f795b5a/dictalchemy/utils.py#L295-L333 |
5,219 | simon-anders/htseq | python2/HTSeq/__init__.py | parse_GFF_attribute_string | def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d | python | def parse_GFF_attribute_string(attrStr, extra_return_first_value=False):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID.
"""
if attrStr.endswith("\n"):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip(
itertools.count(),
_HTSeq.quotesafe_split(attrStr)):
if _re_attr_empty.match(attr):
continue
if attr.count('"') not in (0, 2):
raise ValueError(
"The attribute string seems to contain mismatched quotes.")
mo = _re_attr_main.match(attr)
if not mo:
raise ValueError("Failure parsing GFF attribute line")
val = mo.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
d[intern(mo.group(1))] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return (d, first_val)
else:
return d | [
"def",
"parse_GFF_attribute_string",
"(",
"attrStr",
",",
"extra_return_first_value",
"=",
"False",
")",
":",
"if",
"attrStr",
".",
"endswith",
"(",
"\"\\n\"",
")",
":",
"attrStr",
"=",
"attrStr",
"[",
":",
"-",
"1",
"]",
"d",
"=",
"{",
"}",
"first_val",
"=",
"\"_unnamed_\"",
"for",
"(",
"i",
",",
"attr",
")",
"in",
"itertools",
".",
"izip",
"(",
"itertools",
".",
"count",
"(",
")",
",",
"_HTSeq",
".",
"quotesafe_split",
"(",
"attrStr",
")",
")",
":",
"if",
"_re_attr_empty",
".",
"match",
"(",
"attr",
")",
":",
"continue",
"if",
"attr",
".",
"count",
"(",
"'\"'",
")",
"not",
"in",
"(",
"0",
",",
"2",
")",
":",
"raise",
"ValueError",
"(",
"\"The attribute string seems to contain mismatched quotes.\"",
")",
"mo",
"=",
"_re_attr_main",
".",
"match",
"(",
"attr",
")",
"if",
"not",
"mo",
":",
"raise",
"ValueError",
"(",
"\"Failure parsing GFF attribute line\"",
")",
"val",
"=",
"mo",
".",
"group",
"(",
"2",
")",
"if",
"val",
".",
"startswith",
"(",
"'\"'",
")",
"and",
"val",
".",
"endswith",
"(",
"'\"'",
")",
":",
"val",
"=",
"val",
"[",
"1",
":",
"-",
"1",
"]",
"d",
"[",
"intern",
"(",
"mo",
".",
"group",
"(",
"1",
")",
")",
"]",
"=",
"intern",
"(",
"val",
")",
"if",
"extra_return_first_value",
"and",
"i",
"==",
"0",
":",
"first_val",
"=",
"val",
"if",
"extra_return_first_value",
":",
"return",
"(",
"d",
",",
"first_val",
")",
"else",
":",
"return",
"d"
] | Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the
ID. | [
"Parses",
"a",
"GFF",
"attribute",
"string",
"and",
"returns",
"it",
"as",
"a",
"dictionary",
"."
] | 6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0 | https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python2/HTSeq/__init__.py#L144-L175 |
5,220 | simon-anders/htseq | python2/HTSeq/__init__.py | pair_SAM_alignments | def pair_SAM_alignments(
alignments,
bundle=False,
primary_only=False):
'''Iterate over SAM aligments, name-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
bundle (bool): if True, bundle all alignments from one read pair into a
single yield. If False (default), each pair of alignments is
yielded separately.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments or, if bundle==True, each bundled
list of alignments.
'''
mate_missing_count = [0]
def process_list(almnt_list):
'''Transform a list of alignment with the same read name into pairs
Args:
almnt_list (list): alignments to process
Yields:
each pair of alignments.
This function is needed because each line of a BAM file is not a read
but an alignment. For uniquely mapped and unmapped reads, those two are
the same. For multimapped reads, however, there can be more than one
alignment for each read. Also, it is normal for a mapper to uniquely
map one read and multimap its mate.
This function goes down the list of alignments for a given read name
and tries to find the first mate. So if read 1 is uniquely mapped but
read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will
yield; the other 3 alignments of read 2 are ignored.
'''
while len(almnt_list) > 0:
a1 = almnt_list.pop(0)
# Find its mate
for a2 in almnt_list:
if a1.pe_which == a2.pe_which:
continue
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
continue
if not (a1.aligned and a2.aligned):
break
if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
break
else:
if a1.mate_aligned:
mate_missing_count[0] += 1
if mate_missing_count[0] == 1:
warnings.warn(
"Read " + a1.read.name + " claims to have an aligned mate " +
"which could not be found in an adjacent line.")
a2 = None
if a2 is not None:
almnt_list.remove(a2)
if a1.pe_which == "first":
yield (a1, a2)
else:
assert a1.pe_which == "second"
yield (a2, a1)
almnt_list = []
current_name = None
for almnt in alignments:
if not almnt.paired_end:
raise ValueError(
"'pair_alignments' needs a sequence of paired-end alignments")
if almnt.pe_which == "unknown":
raise ValueError(
"Paired-end read found with 'unknown' 'pe_which' status.")
# FIXME: almnt.not_primary_alignment currently means secondary
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
continue
if almnt.read.name == current_name:
almnt_list.append(almnt)
else:
if bundle:
yield list(process_list(almnt_list))
else:
for p in process_list(almnt_list):
yield p
current_name = almnt.read.name
almnt_list = [almnt]
if bundle:
yield list(process_list(almnt_list))
else:
for p in process_list(almnt_list):
yield p
if mate_missing_count[0] > 1:
warnings.warn("%d reads with missing mate encountered." %
mate_missing_count[0]) | python | def pair_SAM_alignments(
alignments,
bundle=False,
primary_only=False):
'''Iterate over SAM aligments, name-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
bundle (bool): if True, bundle all alignments from one read pair into a
single yield. If False (default), each pair of alignments is
yielded separately.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments or, if bundle==True, each bundled
list of alignments.
'''
mate_missing_count = [0]
def process_list(almnt_list):
'''Transform a list of alignment with the same read name into pairs
Args:
almnt_list (list): alignments to process
Yields:
each pair of alignments.
This function is needed because each line of a BAM file is not a read
but an alignment. For uniquely mapped and unmapped reads, those two are
the same. For multimapped reads, however, there can be more than one
alignment for each read. Also, it is normal for a mapper to uniquely
map one read and multimap its mate.
This function goes down the list of alignments for a given read name
and tries to find the first mate. So if read 1 is uniquely mapped but
read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will
yield; the other 3 alignments of read 2 are ignored.
'''
while len(almnt_list) > 0:
a1 = almnt_list.pop(0)
# Find its mate
for a2 in almnt_list:
if a1.pe_which == a2.pe_which:
continue
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
continue
if not (a1.aligned and a2.aligned):
break
if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
break
else:
if a1.mate_aligned:
mate_missing_count[0] += 1
if mate_missing_count[0] == 1:
warnings.warn(
"Read " + a1.read.name + " claims to have an aligned mate " +
"which could not be found in an adjacent line.")
a2 = None
if a2 is not None:
almnt_list.remove(a2)
if a1.pe_which == "first":
yield (a1, a2)
else:
assert a1.pe_which == "second"
yield (a2, a1)
almnt_list = []
current_name = None
for almnt in alignments:
if not almnt.paired_end:
raise ValueError(
"'pair_alignments' needs a sequence of paired-end alignments")
if almnt.pe_which == "unknown":
raise ValueError(
"Paired-end read found with 'unknown' 'pe_which' status.")
# FIXME: almnt.not_primary_alignment currently means secondary
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
continue
if almnt.read.name == current_name:
almnt_list.append(almnt)
else:
if bundle:
yield list(process_list(almnt_list))
else:
for p in process_list(almnt_list):
yield p
current_name = almnt.read.name
almnt_list = [almnt]
if bundle:
yield list(process_list(almnt_list))
else:
for p in process_list(almnt_list):
yield p
if mate_missing_count[0] > 1:
warnings.warn("%d reads with missing mate encountered." %
mate_missing_count[0]) | [
"def",
"pair_SAM_alignments",
"(",
"alignments",
",",
"bundle",
"=",
"False",
",",
"primary_only",
"=",
"False",
")",
":",
"mate_missing_count",
"=",
"[",
"0",
"]",
"def",
"process_list",
"(",
"almnt_list",
")",
":",
"'''Transform a list of alignment with the same read name into pairs\n\n Args:\n almnt_list (list): alignments to process\n\n Yields:\n each pair of alignments.\n\n This function is needed because each line of a BAM file is not a read\n but an alignment. For uniquely mapped and unmapped reads, those two are\n the same. For multimapped reads, however, there can be more than one\n alignment for each read. Also, it is normal for a mapper to uniquely\n map one read and multimap its mate.\n\n This function goes down the list of alignments for a given read name\n and tries to find the first mate. So if read 1 is uniquely mapped but\n read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will\n yield; the other 3 alignments of read 2 are ignored.\n '''",
"while",
"len",
"(",
"almnt_list",
")",
">",
"0",
":",
"a1",
"=",
"almnt_list",
".",
"pop",
"(",
"0",
")",
"# Find its mate",
"for",
"a2",
"in",
"almnt_list",
":",
"if",
"a1",
".",
"pe_which",
"==",
"a2",
".",
"pe_which",
":",
"continue",
"if",
"a1",
".",
"aligned",
"!=",
"a2",
".",
"mate_aligned",
"or",
"a1",
".",
"mate_aligned",
"!=",
"a2",
".",
"aligned",
":",
"continue",
"if",
"not",
"(",
"a1",
".",
"aligned",
"and",
"a2",
".",
"aligned",
")",
":",
"break",
"if",
"a1",
".",
"iv",
".",
"chrom",
"==",
"a2",
".",
"mate_start",
".",
"chrom",
"and",
"a1",
".",
"iv",
".",
"start",
"==",
"a2",
".",
"mate_start",
".",
"pos",
"and",
"a2",
".",
"iv",
".",
"chrom",
"==",
"a1",
".",
"mate_start",
".",
"chrom",
"and",
"a2",
".",
"iv",
".",
"start",
"==",
"a1",
".",
"mate_start",
".",
"pos",
":",
"break",
"else",
":",
"if",
"a1",
".",
"mate_aligned",
":",
"mate_missing_count",
"[",
"0",
"]",
"+=",
"1",
"if",
"mate_missing_count",
"[",
"0",
"]",
"==",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"Read \"",
"+",
"a1",
".",
"read",
".",
"name",
"+",
"\" claims to have an aligned mate \"",
"+",
"\"which could not be found in an adjacent line.\"",
")",
"a2",
"=",
"None",
"if",
"a2",
"is",
"not",
"None",
":",
"almnt_list",
".",
"remove",
"(",
"a2",
")",
"if",
"a1",
".",
"pe_which",
"==",
"\"first\"",
":",
"yield",
"(",
"a1",
",",
"a2",
")",
"else",
":",
"assert",
"a1",
".",
"pe_which",
"==",
"\"second\"",
"yield",
"(",
"a2",
",",
"a1",
")",
"almnt_list",
"=",
"[",
"]",
"current_name",
"=",
"None",
"for",
"almnt",
"in",
"alignments",
":",
"if",
"not",
"almnt",
".",
"paired_end",
":",
"raise",
"ValueError",
"(",
"\"'pair_alignments' needs a sequence of paired-end alignments\"",
")",
"if",
"almnt",
".",
"pe_which",
"==",
"\"unknown\"",
":",
"raise",
"ValueError",
"(",
"\"Paired-end read found with 'unknown' 'pe_which' status.\"",
")",
"# FIXME: almnt.not_primary_alignment currently means secondary",
"if",
"primary_only",
"and",
"(",
"almnt",
".",
"not_primary_alignment",
"or",
"almnt",
".",
"supplementary",
")",
":",
"continue",
"if",
"almnt",
".",
"read",
".",
"name",
"==",
"current_name",
":",
"almnt_list",
".",
"append",
"(",
"almnt",
")",
"else",
":",
"if",
"bundle",
":",
"yield",
"list",
"(",
"process_list",
"(",
"almnt_list",
")",
")",
"else",
":",
"for",
"p",
"in",
"process_list",
"(",
"almnt_list",
")",
":",
"yield",
"p",
"current_name",
"=",
"almnt",
".",
"read",
".",
"name",
"almnt_list",
"=",
"[",
"almnt",
"]",
"if",
"bundle",
":",
"yield",
"list",
"(",
"process_list",
"(",
"almnt_list",
")",
")",
"else",
":",
"for",
"p",
"in",
"process_list",
"(",
"almnt_list",
")",
":",
"yield",
"p",
"if",
"mate_missing_count",
"[",
"0",
"]",
">",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"%d reads with missing mate encountered.\"",
"%",
"mate_missing_count",
"[",
"0",
"]",
")"
] | Iterate over SAM aligments, name-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
bundle (bool): if True, bundle all alignments from one read pair into a
single yield. If False (default), each pair of alignments is
yielded separately.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments or, if bundle==True, each bundled
list of alignments. | [
"Iterate",
"over",
"SAM",
"aligments",
"name",
"-",
"sorted",
"paired",
"-",
"end"
] | 6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0 | https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python2/HTSeq/__init__.py#L634-L736 |
5,221 | simon-anders/htseq | python3/HTSeq/__init__.py | pair_SAM_alignments_with_buffer | def pair_SAM_alignments_with_buffer(
alignments,
max_buffer_size=30000000,
primary_only=False):
'''Iterate over SAM aligments with buffer, position-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
max_buffer_size (int): maxmal numer of alignments to keep in memory.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments.
'''
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError(
"Sequence of paired-end alignments expected, but got single-end alignment.")
if almnt.pe_which == "unknown":
raise ValueError(
"Cannot process paired-end alignment found with 'unknown' 'pe_which' status.")
# FIXME: almnt.not_primary_alignment currently means secondary
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
continue
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if matekey in almnt_buffer:
if len(almnt_buffer[matekey]) == 1:
mate = almnt_buffer[matekey][0]
del almnt_buffer[matekey]
else:
mate = almnt_buffer[matekey].pop(0)
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield (almnt, mate)
else:
yield (mate, almnt)
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if almntkey not in almnt_buffer:
almnt_buffer[almntkey] = [almnt]
else:
almnt_buffer[almntkey].append(almnt)
if len(almnt_buffer) > max_buffer_size:
raise ValueError(
"Maximum alignment buffer size exceeded while pairing SAM alignments.")
if len(almnt_buffer) > 0:
warnings.warn(
"Mate records missing for %d records; first such record: %s." %
(len(almnt_buffer), str(list(almnt_buffer.values())[0][0])))
for almnt_list in list(almnt_buffer.values()):
for almnt in almnt_list:
if almnt.pe_which == "first":
yield (almnt, None)
else:
yield (None, almnt)
if ambiguous_pairing_counter > 0:
warnings.warn(
"Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
(ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance))) | python | def pair_SAM_alignments_with_buffer(
alignments,
max_buffer_size=30000000,
primary_only=False):
'''Iterate over SAM aligments with buffer, position-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
max_buffer_size (int): maxmal numer of alignments to keep in memory.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments.
'''
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError(
"Sequence of paired-end alignments expected, but got single-end alignment.")
if almnt.pe_which == "unknown":
raise ValueError(
"Cannot process paired-end alignment found with 'unknown' 'pe_which' status.")
# FIXME: almnt.not_primary_alignment currently means secondary
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
continue
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if matekey in almnt_buffer:
if len(almnt_buffer[matekey]) == 1:
mate = almnt_buffer[matekey][0]
del almnt_buffer[matekey]
else:
mate = almnt_buffer[matekey].pop(0)
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield (almnt, mate)
else:
yield (mate, almnt)
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
if almntkey not in almnt_buffer:
almnt_buffer[almntkey] = [almnt]
else:
almnt_buffer[almntkey].append(almnt)
if len(almnt_buffer) > max_buffer_size:
raise ValueError(
"Maximum alignment buffer size exceeded while pairing SAM alignments.")
if len(almnt_buffer) > 0:
warnings.warn(
"Mate records missing for %d records; first such record: %s." %
(len(almnt_buffer), str(list(almnt_buffer.values())[0][0])))
for almnt_list in list(almnt_buffer.values()):
for almnt in almnt_list:
if almnt.pe_which == "first":
yield (almnt, None)
else:
yield (None, almnt)
if ambiguous_pairing_counter > 0:
warnings.warn(
"Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
(ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance))) | [
"def",
"pair_SAM_alignments_with_buffer",
"(",
"alignments",
",",
"max_buffer_size",
"=",
"30000000",
",",
"primary_only",
"=",
"False",
")",
":",
"almnt_buffer",
"=",
"{",
"}",
"ambiguous_pairing_counter",
"=",
"0",
"for",
"almnt",
"in",
"alignments",
":",
"if",
"not",
"almnt",
".",
"paired_end",
":",
"raise",
"ValueError",
"(",
"\"Sequence of paired-end alignments expected, but got single-end alignment.\"",
")",
"if",
"almnt",
".",
"pe_which",
"==",
"\"unknown\"",
":",
"raise",
"ValueError",
"(",
"\"Cannot process paired-end alignment found with 'unknown' 'pe_which' status.\"",
")",
"# FIXME: almnt.not_primary_alignment currently means secondary",
"if",
"primary_only",
"and",
"(",
"almnt",
".",
"not_primary_alignment",
"or",
"almnt",
".",
"supplementary",
")",
":",
"continue",
"matekey",
"=",
"(",
"almnt",
".",
"read",
".",
"name",
",",
"\"second\"",
"if",
"almnt",
".",
"pe_which",
"==",
"\"first\"",
"else",
"\"first\"",
",",
"almnt",
".",
"mate_start",
".",
"chrom",
"if",
"almnt",
".",
"mate_aligned",
"else",
"None",
",",
"almnt",
".",
"mate_start",
".",
"pos",
"if",
"almnt",
".",
"mate_aligned",
"else",
"None",
",",
"almnt",
".",
"iv",
".",
"chrom",
"if",
"almnt",
".",
"aligned",
"else",
"None",
",",
"almnt",
".",
"iv",
".",
"start",
"if",
"almnt",
".",
"aligned",
"else",
"None",
",",
"-",
"almnt",
".",
"inferred_insert_size",
"if",
"almnt",
".",
"aligned",
"and",
"almnt",
".",
"mate_aligned",
"else",
"None",
")",
"if",
"matekey",
"in",
"almnt_buffer",
":",
"if",
"len",
"(",
"almnt_buffer",
"[",
"matekey",
"]",
")",
"==",
"1",
":",
"mate",
"=",
"almnt_buffer",
"[",
"matekey",
"]",
"[",
"0",
"]",
"del",
"almnt_buffer",
"[",
"matekey",
"]",
"else",
":",
"mate",
"=",
"almnt_buffer",
"[",
"matekey",
"]",
".",
"pop",
"(",
"0",
")",
"if",
"ambiguous_pairing_counter",
"==",
"0",
":",
"ambiguous_pairing_first_occurance",
"=",
"matekey",
"ambiguous_pairing_counter",
"+=",
"1",
"if",
"almnt",
".",
"pe_which",
"==",
"\"first\"",
":",
"yield",
"(",
"almnt",
",",
"mate",
")",
"else",
":",
"yield",
"(",
"mate",
",",
"almnt",
")",
"else",
":",
"almntkey",
"=",
"(",
"almnt",
".",
"read",
".",
"name",
",",
"almnt",
".",
"pe_which",
",",
"almnt",
".",
"iv",
".",
"chrom",
"if",
"almnt",
".",
"aligned",
"else",
"None",
",",
"almnt",
".",
"iv",
".",
"start",
"if",
"almnt",
".",
"aligned",
"else",
"None",
",",
"almnt",
".",
"mate_start",
".",
"chrom",
"if",
"almnt",
".",
"mate_aligned",
"else",
"None",
",",
"almnt",
".",
"mate_start",
".",
"pos",
"if",
"almnt",
".",
"mate_aligned",
"else",
"None",
",",
"almnt",
".",
"inferred_insert_size",
"if",
"almnt",
".",
"aligned",
"and",
"almnt",
".",
"mate_aligned",
"else",
"None",
")",
"if",
"almntkey",
"not",
"in",
"almnt_buffer",
":",
"almnt_buffer",
"[",
"almntkey",
"]",
"=",
"[",
"almnt",
"]",
"else",
":",
"almnt_buffer",
"[",
"almntkey",
"]",
".",
"append",
"(",
"almnt",
")",
"if",
"len",
"(",
"almnt_buffer",
")",
">",
"max_buffer_size",
":",
"raise",
"ValueError",
"(",
"\"Maximum alignment buffer size exceeded while pairing SAM alignments.\"",
")",
"if",
"len",
"(",
"almnt_buffer",
")",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Mate records missing for %d records; first such record: %s.\"",
"%",
"(",
"len",
"(",
"almnt_buffer",
")",
",",
"str",
"(",
"list",
"(",
"almnt_buffer",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
")",
")",
"for",
"almnt_list",
"in",
"list",
"(",
"almnt_buffer",
".",
"values",
"(",
")",
")",
":",
"for",
"almnt",
"in",
"almnt_list",
":",
"if",
"almnt",
".",
"pe_which",
"==",
"\"first\"",
":",
"yield",
"(",
"almnt",
",",
"None",
")",
"else",
":",
"yield",
"(",
"None",
",",
"almnt",
")",
"if",
"ambiguous_pairing_counter",
">",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Mate pairing was ambiguous for %d records; mate key for first such record: %s.\"",
"%",
"(",
"ambiguous_pairing_counter",
",",
"str",
"(",
"ambiguous_pairing_first_occurance",
")",
")",
")"
] | Iterate over SAM aligments with buffer, position-sorted paired-end
Args:
alignments (iterator of SAM/BAM alignments): the alignments to wrap
max_buffer_size (int): maxmal numer of alignments to keep in memory.
primary_only (bool): for each read, consider only the primary line
(SAM flag 0x900 = 0). The SAM specification requires one and only
one of those for each read.
Yields:
2-tuples with each pair of alignments. | [
"Iterate",
"over",
"SAM",
"aligments",
"with",
"buffer",
"position",
"-",
"sorted",
"paired",
"-",
"end"
] | 6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0 | https://github.com/simon-anders/htseq/blob/6f7d66e757e610228c33ebf2bb5dc8cc5051c7f0/python3/HTSeq/__init__.py#L742-L824 |
5,222 | sqreen/PyMiniRacer | py_mini_racer/extension/v8_build.py | ensure_v8_src | def ensure_v8_src():
""" Ensure that v8 src are presents and up-to-date
"""
path = local_path('v8')
if not os.path.isdir(path):
fetch_v8(path)
else:
update_v8(path)
checkout_v8_version(local_path("v8/v8"), V8_VERSION)
dependencies_sync(path) | python | def ensure_v8_src():
""" Ensure that v8 src are presents and up-to-date
"""
path = local_path('v8')
if not os.path.isdir(path):
fetch_v8(path)
else:
update_v8(path)
checkout_v8_version(local_path("v8/v8"), V8_VERSION)
dependencies_sync(path) | [
"def",
"ensure_v8_src",
"(",
")",
":",
"path",
"=",
"local_path",
"(",
"'v8'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"fetch_v8",
"(",
"path",
")",
"else",
":",
"update_v8",
"(",
"path",
")",
"checkout_v8_version",
"(",
"local_path",
"(",
"\"v8/v8\"",
")",
",",
"V8_VERSION",
")",
"dependencies_sync",
"(",
"path",
")"
] | Ensure that v8 src are presents and up-to-date | [
"Ensure",
"that",
"v8",
"src",
"are",
"presents",
"and",
"up",
"-",
"to",
"-",
"date"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/extension/v8_build.py#L57-L68 |
5,223 | sqreen/PyMiniRacer | wheel_pymalloc.py | get_filenames | def get_filenames(directory):
"""Get all the file to copy"""
for filename in os.listdir(directory):
if re.search(r"cp\d{2}mu?-manylinux1_\S+\.whl", filename):
yield filename | python | def get_filenames(directory):
"""Get all the file to copy"""
for filename in os.listdir(directory):
if re.search(r"cp\d{2}mu?-manylinux1_\S+\.whl", filename):
yield filename | [
"def",
"get_filenames",
"(",
"directory",
")",
":",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"if",
"re",
".",
"search",
"(",
"r\"cp\\d{2}mu?-manylinux1_\\S+\\.whl\"",
",",
"filename",
")",
":",
"yield",
"filename"
] | Get all the file to copy | [
"Get",
"all",
"the",
"file",
"to",
"copy"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/wheel_pymalloc.py#L11-L15 |
5,224 | sqreen/PyMiniRacer | wheel_pymalloc.py | copy_file | def copy_file(filename):
"""Copy the file and put the correct tag"""
print("Updating file %s" % filename)
out_dir = os.path.abspath(DIRECTORY)
tags = filename[:-4].split("-")
tags[-2] = tags[-2].replace("m", "")
new_name = "-".join(tags) + ".whl"
wheel_flag = "-".join(tags[2:])
with InWheelCtx(os.path.join(DIRECTORY, filename)) as ctx:
info_fname = os.path.join(_dist_info_dir(ctx.path), 'WHEEL')
infos = pkginfo.read_pkg_info(info_fname)
print("Changing Tag %s to %s" % (infos["Tag"], wheel_flag))
del infos['Tag']
infos.add_header('Tag', wheel_flag)
pkginfo.write_pkg_info(info_fname, infos)
ctx.out_wheel = os.path.join(out_dir, new_name)
print("Saving new wheel into %s" % ctx.out_wheel) | python | def copy_file(filename):
"""Copy the file and put the correct tag"""
print("Updating file %s" % filename)
out_dir = os.path.abspath(DIRECTORY)
tags = filename[:-4].split("-")
tags[-2] = tags[-2].replace("m", "")
new_name = "-".join(tags) + ".whl"
wheel_flag = "-".join(tags[2:])
with InWheelCtx(os.path.join(DIRECTORY, filename)) as ctx:
info_fname = os.path.join(_dist_info_dir(ctx.path), 'WHEEL')
infos = pkginfo.read_pkg_info(info_fname)
print("Changing Tag %s to %s" % (infos["Tag"], wheel_flag))
del infos['Tag']
infos.add_header('Tag', wheel_flag)
pkginfo.write_pkg_info(info_fname, infos)
ctx.out_wheel = os.path.join(out_dir, new_name)
print("Saving new wheel into %s" % ctx.out_wheel) | [
"def",
"copy_file",
"(",
"filename",
")",
":",
"print",
"(",
"\"Updating file %s\"",
"%",
"filename",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"DIRECTORY",
")",
"tags",
"=",
"filename",
"[",
":",
"-",
"4",
"]",
".",
"split",
"(",
"\"-\"",
")",
"tags",
"[",
"-",
"2",
"]",
"=",
"tags",
"[",
"-",
"2",
"]",
".",
"replace",
"(",
"\"m\"",
",",
"\"\"",
")",
"new_name",
"=",
"\"-\"",
".",
"join",
"(",
"tags",
")",
"+",
"\".whl\"",
"wheel_flag",
"=",
"\"-\"",
".",
"join",
"(",
"tags",
"[",
"2",
":",
"]",
")",
"with",
"InWheelCtx",
"(",
"os",
".",
"path",
".",
"join",
"(",
"DIRECTORY",
",",
"filename",
")",
")",
"as",
"ctx",
":",
"info_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_dist_info_dir",
"(",
"ctx",
".",
"path",
")",
",",
"'WHEEL'",
")",
"infos",
"=",
"pkginfo",
".",
"read_pkg_info",
"(",
"info_fname",
")",
"print",
"(",
"\"Changing Tag %s to %s\"",
"%",
"(",
"infos",
"[",
"\"Tag\"",
"]",
",",
"wheel_flag",
")",
")",
"del",
"infos",
"[",
"'Tag'",
"]",
"infos",
".",
"add_header",
"(",
"'Tag'",
",",
"wheel_flag",
")",
"pkginfo",
".",
"write_pkg_info",
"(",
"info_fname",
",",
"infos",
")",
"ctx",
".",
"out_wheel",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"new_name",
")",
"print",
"(",
"\"Saving new wheel into %s\"",
"%",
"ctx",
".",
"out_wheel",
")"
] | Copy the file and put the correct tag | [
"Copy",
"the",
"file",
"and",
"put",
"the",
"correct",
"tag"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/wheel_pymalloc.py#L17-L40 |
5,225 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | is_unicode | def is_unicode(value):
""" Check if a value is a valid unicode string, compatible with python 2 and python 3
>>> is_unicode(u'foo')
True
>>> is_unicode(u'✌')
True
>>> is_unicode(b'foo')
False
>>> is_unicode(42)
False
>>> is_unicode(('abc',))
False
"""
python_version = sys.version_info[0]
if python_version == 2:
return isinstance(value, unicode)
elif python_version == 3:
return isinstance(value, str)
else:
raise NotImplementedError() | python | def is_unicode(value):
""" Check if a value is a valid unicode string, compatible with python 2 and python 3
>>> is_unicode(u'foo')
True
>>> is_unicode(u'✌')
True
>>> is_unicode(b'foo')
False
>>> is_unicode(42)
False
>>> is_unicode(('abc',))
False
"""
python_version = sys.version_info[0]
if python_version == 2:
return isinstance(value, unicode)
elif python_version == 3:
return isinstance(value, str)
else:
raise NotImplementedError() | [
"def",
"is_unicode",
"(",
"value",
")",
":",
"python_version",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"if",
"python_version",
"==",
"2",
":",
"return",
"isinstance",
"(",
"value",
",",
"unicode",
")",
"elif",
"python_version",
"==",
"3",
":",
"return",
"isinstance",
"(",
"value",
",",
"str",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
")"
] | Check if a value is a valid unicode string, compatible with python 2 and python 3
>>> is_unicode(u'foo')
True
>>> is_unicode(u'✌')
True
>>> is_unicode(b'foo')
False
>>> is_unicode(42)
False
>>> is_unicode(('abc',))
False | [
"Check",
"if",
"a",
"value",
"is",
"a",
"valid",
"unicode",
"string",
"compatible",
"with",
"python",
"2",
"and",
"python",
"3"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L59-L80 |
5,226 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | MiniRacer.execute | def execute(self, js_str, timeout=0, max_memory=0):
""" Exec the given JS value """
wrapped = "(function(){return (%s)})()" % js_str
return self.eval(wrapped, timeout, max_memory) | python | def execute(self, js_str, timeout=0, max_memory=0):
""" Exec the given JS value """
wrapped = "(function(){return (%s)})()" % js_str
return self.eval(wrapped, timeout, max_memory) | [
"def",
"execute",
"(",
"self",
",",
"js_str",
",",
"timeout",
"=",
"0",
",",
"max_memory",
"=",
"0",
")",
":",
"wrapped",
"=",
"\"(function(){return (%s)})()\"",
"%",
"js_str",
"return",
"self",
".",
"eval",
"(",
"wrapped",
",",
"timeout",
",",
"max_memory",
")"
] | Exec the given JS value | [
"Exec",
"the",
"given",
"JS",
"value"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L136-L140 |
5,227 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | MiniRacer.eval | def eval(self, js_str, timeout=0, max_memory=0):
""" Eval the JavaScript string """
if is_unicode(js_str):
bytes_val = js_str.encode("utf8")
else:
bytes_val = js_str
res = None
self.lock.acquire()
try:
res = self.ext.mr_eval_context(self.ctx,
bytes_val,
len(bytes_val),
ctypes.c_ulong(timeout),
ctypes.c_size_t(max_memory))
if bool(res) is False:
raise JSConversionException()
python_value = res.contents.to_python()
return python_value
finally:
self.lock.release()
if res is not None:
self.free(res) | python | def eval(self, js_str, timeout=0, max_memory=0):
""" Eval the JavaScript string """
if is_unicode(js_str):
bytes_val = js_str.encode("utf8")
else:
bytes_val = js_str
res = None
self.lock.acquire()
try:
res = self.ext.mr_eval_context(self.ctx,
bytes_val,
len(bytes_val),
ctypes.c_ulong(timeout),
ctypes.c_size_t(max_memory))
if bool(res) is False:
raise JSConversionException()
python_value = res.contents.to_python()
return python_value
finally:
self.lock.release()
if res is not None:
self.free(res) | [
"def",
"eval",
"(",
"self",
",",
"js_str",
",",
"timeout",
"=",
"0",
",",
"max_memory",
"=",
"0",
")",
":",
"if",
"is_unicode",
"(",
"js_str",
")",
":",
"bytes_val",
"=",
"js_str",
".",
"encode",
"(",
"\"utf8\"",
")",
"else",
":",
"bytes_val",
"=",
"js_str",
"res",
"=",
"None",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"try",
":",
"res",
"=",
"self",
".",
"ext",
".",
"mr_eval_context",
"(",
"self",
".",
"ctx",
",",
"bytes_val",
",",
"len",
"(",
"bytes_val",
")",
",",
"ctypes",
".",
"c_ulong",
"(",
"timeout",
")",
",",
"ctypes",
".",
"c_size_t",
"(",
"max_memory",
")",
")",
"if",
"bool",
"(",
"res",
")",
"is",
"False",
":",
"raise",
"JSConversionException",
"(",
")",
"python_value",
"=",
"res",
".",
"contents",
".",
"to_python",
"(",
")",
"return",
"python_value",
"finally",
":",
"self",
".",
"lock",
".",
"release",
"(",
")",
"if",
"res",
"is",
"not",
"None",
":",
"self",
".",
"free",
"(",
"res",
")"
] | Eval the JavaScript string | [
"Eval",
"the",
"JavaScript",
"string"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L142-L166 |
5,228 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | MiniRacer.call | def call(self, identifier, *args, **kwargs):
""" Call the named function with provided arguments
You can pass a custom JSON encoder by passing it in the encoder
keyword only argument.
"""
encoder = kwargs.get('encoder', None)
timeout = kwargs.get('timeout', 0)
max_memory = kwargs.get('max_memory', 0)
json_args = json.dumps(args, separators=(',', ':'), cls=encoder)
js = "{identifier}.apply(this, {json_args})"
return self.eval(js.format(identifier=identifier, json_args=json_args), timeout, max_memory) | python | def call(self, identifier, *args, **kwargs):
""" Call the named function with provided arguments
You can pass a custom JSON encoder by passing it in the encoder
keyword only argument.
"""
encoder = kwargs.get('encoder', None)
timeout = kwargs.get('timeout', 0)
max_memory = kwargs.get('max_memory', 0)
json_args = json.dumps(args, separators=(',', ':'), cls=encoder)
js = "{identifier}.apply(this, {json_args})"
return self.eval(js.format(identifier=identifier, json_args=json_args), timeout, max_memory) | [
"def",
"call",
"(",
"self",
",",
"identifier",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"encoder",
"=",
"kwargs",
".",
"get",
"(",
"'encoder'",
",",
"None",
")",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'timeout'",
",",
"0",
")",
"max_memory",
"=",
"kwargs",
".",
"get",
"(",
"'max_memory'",
",",
"0",
")",
"json_args",
"=",
"json",
".",
"dumps",
"(",
"args",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
",",
"cls",
"=",
"encoder",
")",
"js",
"=",
"\"{identifier}.apply(this, {json_args})\"",
"return",
"self",
".",
"eval",
"(",
"js",
".",
"format",
"(",
"identifier",
"=",
"identifier",
",",
"json_args",
"=",
"json_args",
")",
",",
"timeout",
",",
"max_memory",
")"
] | Call the named function with provided arguments
You can pass a custom JSON encoder by passing it in the encoder
keyword only argument. | [
"Call",
"the",
"named",
"function",
"with",
"provided",
"arguments",
"You",
"can",
"pass",
"a",
"custom",
"JSON",
"encoder",
"by",
"passing",
"it",
"in",
"the",
"encoder",
"keyword",
"only",
"argument",
"."
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L168-L180 |
5,229 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | MiniRacer.heap_stats | def heap_stats(self):
""" Return heap statistics """
self.lock.acquire()
res = self.ext.mr_heap_stats(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | python | def heap_stats(self):
""" Return heap statistics """
self.lock.acquire()
res = self.ext.mr_heap_stats(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | [
"def",
"heap_stats",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"res",
"=",
"self",
".",
"ext",
".",
"mr_heap_stats",
"(",
"self",
".",
"ctx",
")",
"self",
".",
"lock",
".",
"release",
"(",
")",
"python_value",
"=",
"res",
".",
"contents",
".",
"to_python",
"(",
")",
"self",
".",
"free",
"(",
"res",
")",
"return",
"python_value"
] | Return heap statistics | [
"Return",
"heap",
"statistics"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L182-L191 |
5,230 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | MiniRacer.heap_snapshot | def heap_snapshot(self):
""" Return heap snapshot """
self.lock.acquire()
res = self.ext.mr_heap_snapshot(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | python | def heap_snapshot(self):
""" Return heap snapshot """
self.lock.acquire()
res = self.ext.mr_heap_snapshot(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | [
"def",
"heap_snapshot",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"res",
"=",
"self",
".",
"ext",
".",
"mr_heap_snapshot",
"(",
"self",
".",
"ctx",
")",
"self",
".",
"lock",
".",
"release",
"(",
")",
"python_value",
"=",
"res",
".",
"contents",
".",
"to_python",
"(",
")",
"self",
".",
"free",
"(",
"res",
")",
"return",
"python_value"
] | Return heap snapshot | [
"Return",
"heap",
"snapshot"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L196-L205 |
5,231 | sqreen/PyMiniRacer | py_mini_racer/py_mini_racer.py | PythonValue.to_python | def to_python(self):
""" Return an object as native Python """
result = None
if self.type == PythonTypes.null:
result = None
elif self.type == PythonTypes.bool:
result = self.value == 1
elif self.type == PythonTypes.integer:
if self.value is None:
result = 0
else:
result = ctypes.c_int32(self.value).value
elif self.type == PythonTypes.double:
result = self._double_value()
elif self.type == PythonTypes.str_utf8:
buf = ctypes.c_char_p(self.value)
ptr = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char))
result = ptr[0:self.len].decode("utf8")
elif self.type == PythonTypes.array:
if self.len == 0:
return []
ary = []
ary_addr = ctypes.c_void_p.from_address(self.value)
ptr_to_ary = ctypes.pointer(ary_addr)
for i in range(self.len):
pval = PythonValue.from_address(ptr_to_ary[i])
ary.append(pval.to_python())
result = ary
elif self.type == PythonTypes.hash:
if self.len == 0:
return {}
res = {}
hash_ary_addr = ctypes.c_void_p.from_address(self.value)
ptr_to_hash = ctypes.pointer(hash_ary_addr)
for i in range(self.len):
pkey = PythonValue.from_address(ptr_to_hash[i*2])
pval = PythonValue.from_address(ptr_to_hash[i*2+1])
res[pkey.to_python()] = pval.to_python()
result = res
elif self.type == PythonTypes.function:
result = JSFunction()
elif self.type == PythonTypes.parse_exception:
msg = ctypes.c_char_p(self.value).value
raise JSParseException(msg)
elif self.type == PythonTypes.execute_exception:
msg = ctypes.c_char_p(self.value).value
raise JSEvalException(msg.decode('utf-8', errors='replace'))
elif self.type == PythonTypes.oom_exception:
msg = ctypes.c_char_p(self.value).value
raise JSOOMException(msg)
elif self.type == PythonTypes.timeout_exception:
msg = ctypes.c_char_p(self.value).value
raise JSTimeoutException(msg)
elif self.type == PythonTypes.date:
timestamp = self._double_value()
# JS timestamp are milliseconds, in python we are in seconds
result = datetime.datetime.utcfromtimestamp(timestamp / 1000.)
else:
raise WrongReturnTypeException("unknown type %d" % self.type)
return result | python | def to_python(self):
""" Return an object as native Python """
result = None
if self.type == PythonTypes.null:
result = None
elif self.type == PythonTypes.bool:
result = self.value == 1
elif self.type == PythonTypes.integer:
if self.value is None:
result = 0
else:
result = ctypes.c_int32(self.value).value
elif self.type == PythonTypes.double:
result = self._double_value()
elif self.type == PythonTypes.str_utf8:
buf = ctypes.c_char_p(self.value)
ptr = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char))
result = ptr[0:self.len].decode("utf8")
elif self.type == PythonTypes.array:
if self.len == 0:
return []
ary = []
ary_addr = ctypes.c_void_p.from_address(self.value)
ptr_to_ary = ctypes.pointer(ary_addr)
for i in range(self.len):
pval = PythonValue.from_address(ptr_to_ary[i])
ary.append(pval.to_python())
result = ary
elif self.type == PythonTypes.hash:
if self.len == 0:
return {}
res = {}
hash_ary_addr = ctypes.c_void_p.from_address(self.value)
ptr_to_hash = ctypes.pointer(hash_ary_addr)
for i in range(self.len):
pkey = PythonValue.from_address(ptr_to_hash[i*2])
pval = PythonValue.from_address(ptr_to_hash[i*2+1])
res[pkey.to_python()] = pval.to_python()
result = res
elif self.type == PythonTypes.function:
result = JSFunction()
elif self.type == PythonTypes.parse_exception:
msg = ctypes.c_char_p(self.value).value
raise JSParseException(msg)
elif self.type == PythonTypes.execute_exception:
msg = ctypes.c_char_p(self.value).value
raise JSEvalException(msg.decode('utf-8', errors='replace'))
elif self.type == PythonTypes.oom_exception:
msg = ctypes.c_char_p(self.value).value
raise JSOOMException(msg)
elif self.type == PythonTypes.timeout_exception:
msg = ctypes.c_char_p(self.value).value
raise JSTimeoutException(msg)
elif self.type == PythonTypes.date:
timestamp = self._double_value()
# JS timestamp are milliseconds, in python we are in seconds
result = datetime.datetime.utcfromtimestamp(timestamp / 1000.)
else:
raise WrongReturnTypeException("unknown type %d" % self.type)
return result | [
"def",
"to_python",
"(",
"self",
")",
":",
"result",
"=",
"None",
"if",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"null",
":",
"result",
"=",
"None",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"bool",
":",
"result",
"=",
"self",
".",
"value",
"==",
"1",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"integer",
":",
"if",
"self",
".",
"value",
"is",
"None",
":",
"result",
"=",
"0",
"else",
":",
"result",
"=",
"ctypes",
".",
"c_int32",
"(",
"self",
".",
"value",
")",
".",
"value",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"double",
":",
"result",
"=",
"self",
".",
"_double_value",
"(",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"str_utf8",
":",
"buf",
"=",
"ctypes",
".",
"c_char_p",
"(",
"self",
".",
"value",
")",
"ptr",
"=",
"ctypes",
".",
"cast",
"(",
"buf",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
")",
"result",
"=",
"ptr",
"[",
"0",
":",
"self",
".",
"len",
"]",
".",
"decode",
"(",
"\"utf8\"",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"array",
":",
"if",
"self",
".",
"len",
"==",
"0",
":",
"return",
"[",
"]",
"ary",
"=",
"[",
"]",
"ary_addr",
"=",
"ctypes",
".",
"c_void_p",
".",
"from_address",
"(",
"self",
".",
"value",
")",
"ptr_to_ary",
"=",
"ctypes",
".",
"pointer",
"(",
"ary_addr",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"len",
")",
":",
"pval",
"=",
"PythonValue",
".",
"from_address",
"(",
"ptr_to_ary",
"[",
"i",
"]",
")",
"ary",
".",
"append",
"(",
"pval",
".",
"to_python",
"(",
")",
")",
"result",
"=",
"ary",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"hash",
":",
"if",
"self",
".",
"len",
"==",
"0",
":",
"return",
"{",
"}",
"res",
"=",
"{",
"}",
"hash_ary_addr",
"=",
"ctypes",
".",
"c_void_p",
".",
"from_address",
"(",
"self",
".",
"value",
")",
"ptr_to_hash",
"=",
"ctypes",
".",
"pointer",
"(",
"hash_ary_addr",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"len",
")",
":",
"pkey",
"=",
"PythonValue",
".",
"from_address",
"(",
"ptr_to_hash",
"[",
"i",
"*",
"2",
"]",
")",
"pval",
"=",
"PythonValue",
".",
"from_address",
"(",
"ptr_to_hash",
"[",
"i",
"*",
"2",
"+",
"1",
"]",
")",
"res",
"[",
"pkey",
".",
"to_python",
"(",
")",
"]",
"=",
"pval",
".",
"to_python",
"(",
")",
"result",
"=",
"res",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"function",
":",
"result",
"=",
"JSFunction",
"(",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"parse_exception",
":",
"msg",
"=",
"ctypes",
".",
"c_char_p",
"(",
"self",
".",
"value",
")",
".",
"value",
"raise",
"JSParseException",
"(",
"msg",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"execute_exception",
":",
"msg",
"=",
"ctypes",
".",
"c_char_p",
"(",
"self",
".",
"value",
")",
".",
"value",
"raise",
"JSEvalException",
"(",
"msg",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"oom_exception",
":",
"msg",
"=",
"ctypes",
".",
"c_char_p",
"(",
"self",
".",
"value",
")",
".",
"value",
"raise",
"JSOOMException",
"(",
"msg",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"timeout_exception",
":",
"msg",
"=",
"ctypes",
".",
"c_char_p",
"(",
"self",
".",
"value",
")",
".",
"value",
"raise",
"JSTimeoutException",
"(",
"msg",
")",
"elif",
"self",
".",
"type",
"==",
"PythonTypes",
".",
"date",
":",
"timestamp",
"=",
"self",
".",
"_double_value",
"(",
")",
"# JS timestamp are milliseconds, in python we are in seconds",
"result",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"timestamp",
"/",
"1000.",
")",
"else",
":",
"raise",
"WrongReturnTypeException",
"(",
"\"unknown type %d\"",
"%",
"self",
".",
"type",
")",
"return",
"result"
] | Return an object as native Python | [
"Return",
"an",
"object",
"as",
"native",
"Python"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/py_mini_racer/py_mini_racer.py#L248-L308 |
5,232 | sqreen/PyMiniRacer | setup.py | libv8_object | def libv8_object(object_name):
""" Return a path for object_name which is OS independent
"""
filename = join(V8_LIB_DIRECTORY, 'out.gn/x64.release/obj/{}'.format(object_name))
if not isfile(filename):
filename = join(local_path('vendor/v8/out.gn/libv8/obj/{}'.format(object_name)))
if not isfile(filename):
filename = join(V8_LIB_DIRECTORY, 'out.gn/x64.release/obj/{}'.format(object_name))
return filename | python | def libv8_object(object_name):
""" Return a path for object_name which is OS independent
"""
filename = join(V8_LIB_DIRECTORY, 'out.gn/x64.release/obj/{}'.format(object_name))
if not isfile(filename):
filename = join(local_path('vendor/v8/out.gn/libv8/obj/{}'.format(object_name)))
if not isfile(filename):
filename = join(V8_LIB_DIRECTORY, 'out.gn/x64.release/obj/{}'.format(object_name))
return filename | [
"def",
"libv8_object",
"(",
"object_name",
")",
":",
"filename",
"=",
"join",
"(",
"V8_LIB_DIRECTORY",
",",
"'out.gn/x64.release/obj/{}'",
".",
"format",
"(",
"object_name",
")",
")",
"if",
"not",
"isfile",
"(",
"filename",
")",
":",
"filename",
"=",
"join",
"(",
"local_path",
"(",
"'vendor/v8/out.gn/libv8/obj/{}'",
".",
"format",
"(",
"object_name",
")",
")",
")",
"if",
"not",
"isfile",
"(",
"filename",
")",
":",
"filename",
"=",
"join",
"(",
"V8_LIB_DIRECTORY",
",",
"'out.gn/x64.release/obj/{}'",
".",
"format",
"(",
"object_name",
")",
")",
"return",
"filename"
] | Return a path for object_name which is OS independent | [
"Return",
"a",
"path",
"for",
"object_name",
"which",
"is",
"OS",
"independent"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/setup.py#L88-L100 |
5,233 | sqreen/PyMiniRacer | setup.py | get_static_lib_paths | def get_static_lib_paths():
""" Return the required static libraries path
"""
libs = []
is_linux = sys.platform.startswith('linux')
if is_linux:
libs += ['-Wl,--start-group']
libs += get_raw_static_lib_path()
if is_linux:
libs += ['-Wl,--end-group']
return libs | python | def get_static_lib_paths():
""" Return the required static libraries path
"""
libs = []
is_linux = sys.platform.startswith('linux')
if is_linux:
libs += ['-Wl,--start-group']
libs += get_raw_static_lib_path()
if is_linux:
libs += ['-Wl,--end-group']
return libs | [
"def",
"get_static_lib_paths",
"(",
")",
":",
"libs",
"=",
"[",
"]",
"is_linux",
"=",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
"if",
"is_linux",
":",
"libs",
"+=",
"[",
"'-Wl,--start-group'",
"]",
"libs",
"+=",
"get_raw_static_lib_path",
"(",
")",
"if",
"is_linux",
":",
"libs",
"+=",
"[",
"'-Wl,--end-group'",
"]",
"return",
"libs"
] | Return the required static libraries path | [
"Return",
"the",
"required",
"static",
"libraries",
"path"
] | 86747cddb13895ccaba990704ad68e5e059587f9 | https://github.com/sqreen/PyMiniRacer/blob/86747cddb13895ccaba990704ad68e5e059587f9/setup.py#L110-L120 |
5,234 | ajbosco/dag-factory | dagfactory/dagbuilder.py | DagBuilder.get_dag_params | def get_dag_params(self) -> Dict[str, Any]:
"""
Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters
"""
try:
dag_params: Dict[str, Any] = utils.merge_configs(self.dag_config, self.default_config)
except Exception as e:
raise Exception(f"Failed to merge config with default config, err: {e}")
dag_params["dag_id"]: str = self.dag_name
try:
# ensure that default_args dictionary contains key "start_date" with "datetime" value in specified timezone
dag_params["default_args"]["start_date"]: datetime = utils.get_start_date(
date_value=dag_params["default_args"]["start_date"],
timezone=dag_params["default_args"].get("timezone", "UTC"),
)
except KeyError as e:
raise Exception(f"{self.dag_name} config is missing start_date, err: {e}")
return dag_params | python | def get_dag_params(self) -> Dict[str, Any]:
"""
Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters
"""
try:
dag_params: Dict[str, Any] = utils.merge_configs(self.dag_config, self.default_config)
except Exception as e:
raise Exception(f"Failed to merge config with default config, err: {e}")
dag_params["dag_id"]: str = self.dag_name
try:
# ensure that default_args dictionary contains key "start_date" with "datetime" value in specified timezone
dag_params["default_args"]["start_date"]: datetime = utils.get_start_date(
date_value=dag_params["default_args"]["start_date"],
timezone=dag_params["default_args"].get("timezone", "UTC"),
)
except KeyError as e:
raise Exception(f"{self.dag_name} config is missing start_date, err: {e}")
return dag_params | [
"def",
"get_dag_params",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"try",
":",
"dag_params",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"utils",
".",
"merge_configs",
"(",
"self",
".",
"dag_config",
",",
"self",
".",
"default_config",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"Failed to merge config with default config, err: {e}\"",
")",
"dag_params",
"[",
"\"dag_id\"",
"]",
":",
"str",
"=",
"self",
".",
"dag_name",
"try",
":",
"# ensure that default_args dictionary contains key \"start_date\" with \"datetime\" value in specified timezone",
"dag_params",
"[",
"\"default_args\"",
"]",
"[",
"\"start_date\"",
"]",
":",
"datetime",
"=",
"utils",
".",
"get_start_date",
"(",
"date_value",
"=",
"dag_params",
"[",
"\"default_args\"",
"]",
"[",
"\"start_date\"",
"]",
",",
"timezone",
"=",
"dag_params",
"[",
"\"default_args\"",
"]",
".",
"get",
"(",
"\"timezone\"",
",",
"\"UTC\"",
")",
",",
")",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"{self.dag_name} config is missing start_date, err: {e}\"",
")",
"return",
"dag_params"
] | Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters | [
"Merges",
"default",
"config",
"with",
"dag",
"config",
"sets",
"dag_id",
"and",
"extropolates",
"dag_start_date"
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagbuilder.py#L24-L43 |
5,235 | ajbosco/dag-factory | dagfactory/dagbuilder.py | DagBuilder.make_task | def make_task(operator: str, task_params: Dict[str, Any]) -> BaseOperator:
"""
Takes an operator and params and creates an instance of that operator.
:returns: instance of operator object
"""
try:
# class is a Callable https://stackoverflow.com/a/34578836/3679900
operator_obj: Callable[..., BaseOperator] = import_string(operator)
except Exception as e:
raise Exception(f"Failed to import operator: {operator}. err: {e}")
try:
task: BaseOperator = operator_obj(**task_params)
except Exception as e:
raise Exception(f"Failed to create {operator_obj} task. err: {e}")
return task | python | def make_task(operator: str, task_params: Dict[str, Any]) -> BaseOperator:
"""
Takes an operator and params and creates an instance of that operator.
:returns: instance of operator object
"""
try:
# class is a Callable https://stackoverflow.com/a/34578836/3679900
operator_obj: Callable[..., BaseOperator] = import_string(operator)
except Exception as e:
raise Exception(f"Failed to import operator: {operator}. err: {e}")
try:
task: BaseOperator = operator_obj(**task_params)
except Exception as e:
raise Exception(f"Failed to create {operator_obj} task. err: {e}")
return task | [
"def",
"make_task",
"(",
"operator",
":",
"str",
",",
"task_params",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"BaseOperator",
":",
"try",
":",
"# class is a Callable https://stackoverflow.com/a/34578836/3679900",
"operator_obj",
":",
"Callable",
"[",
"...",
",",
"BaseOperator",
"]",
"=",
"import_string",
"(",
"operator",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"Failed to import operator: {operator}. err: {e}\"",
")",
"try",
":",
"task",
":",
"BaseOperator",
"=",
"operator_obj",
"(",
"*",
"*",
"task_params",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"Failed to create {operator_obj} task. err: {e}\"",
")",
"return",
"task"
] | Takes an operator and params and creates an instance of that operator.
:returns: instance of operator object | [
"Takes",
"an",
"operator",
"and",
"params",
"and",
"creates",
"an",
"instance",
"of",
"that",
"operator",
"."
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagbuilder.py#L46-L61 |
5,236 | ajbosco/dag-factory | dagfactory/dagbuilder.py | DagBuilder.build | def build(self) -> Dict[str, Union[str, DAG]]:
"""
Generates a DAG from the DAG parameters.
:returns: dict with dag_id and DAG object
:type: Dict[str, Union[str, DAG]]
"""
dag_params: Dict[str, Any] = self.get_dag_params()
dag: DAG = DAG(
dag_id=dag_params["dag_id"],
schedule_interval=dag_params["schedule_interval"],
description=dag_params.get("description", ""),
max_active_runs=dag_params.get(
"max_active_runs",
configuration.conf.getint("core", "max_active_runs_per_dag"),
),
default_args=dag_params.get("default_args", {}),
)
tasks: Dict[str, Dict[str, Any]] = dag_params["tasks"]
# create dictionary to track tasks and set dependencies
tasks_dict: Dict[str, BaseOperator] = {}
for task_name, task_conf in tasks.items():
task_conf["task_id"]: str = task_name
operator: str = task_conf["operator"]
task_conf["dag"]: DAG = dag
params: Dict[str, Any] = {k: v for k, v in task_conf.items() if k not in SYSTEM_PARAMS}
task: BaseOperator = DagBuilder.make_task(operator=operator, task_params=params)
tasks_dict[task.task_id]: BaseOperator = task
# set task dependencies after creating tasks
for task_name, task_conf in tasks.items():
if task_conf.get("dependencies"):
source_task: BaseOperator = tasks_dict[task_name]
for dep in task_conf["dependencies"]:
dep_task: BaseOperator = tasks_dict[dep]
source_task.set_upstream(dep_task)
return {"dag_id": dag_params["dag_id"], "dag": dag} | python | def build(self) -> Dict[str, Union[str, DAG]]:
"""
Generates a DAG from the DAG parameters.
:returns: dict with dag_id and DAG object
:type: Dict[str, Union[str, DAG]]
"""
dag_params: Dict[str, Any] = self.get_dag_params()
dag: DAG = DAG(
dag_id=dag_params["dag_id"],
schedule_interval=dag_params["schedule_interval"],
description=dag_params.get("description", ""),
max_active_runs=dag_params.get(
"max_active_runs",
configuration.conf.getint("core", "max_active_runs_per_dag"),
),
default_args=dag_params.get("default_args", {}),
)
tasks: Dict[str, Dict[str, Any]] = dag_params["tasks"]
# create dictionary to track tasks and set dependencies
tasks_dict: Dict[str, BaseOperator] = {}
for task_name, task_conf in tasks.items():
task_conf["task_id"]: str = task_name
operator: str = task_conf["operator"]
task_conf["dag"]: DAG = dag
params: Dict[str, Any] = {k: v for k, v in task_conf.items() if k not in SYSTEM_PARAMS}
task: BaseOperator = DagBuilder.make_task(operator=operator, task_params=params)
tasks_dict[task.task_id]: BaseOperator = task
# set task dependencies after creating tasks
for task_name, task_conf in tasks.items():
if task_conf.get("dependencies"):
source_task: BaseOperator = tasks_dict[task_name]
for dep in task_conf["dependencies"]:
dep_task: BaseOperator = tasks_dict[dep]
source_task.set_upstream(dep_task)
return {"dag_id": dag_params["dag_id"], "dag": dag} | [
"def",
"build",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"DAG",
"]",
"]",
":",
"dag_params",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"self",
".",
"get_dag_params",
"(",
")",
"dag",
":",
"DAG",
"=",
"DAG",
"(",
"dag_id",
"=",
"dag_params",
"[",
"\"dag_id\"",
"]",
",",
"schedule_interval",
"=",
"dag_params",
"[",
"\"schedule_interval\"",
"]",
",",
"description",
"=",
"dag_params",
".",
"get",
"(",
"\"description\"",
",",
"\"\"",
")",
",",
"max_active_runs",
"=",
"dag_params",
".",
"get",
"(",
"\"max_active_runs\"",
",",
"configuration",
".",
"conf",
".",
"getint",
"(",
"\"core\"",
",",
"\"max_active_runs_per_dag\"",
")",
",",
")",
",",
"default_args",
"=",
"dag_params",
".",
"get",
"(",
"\"default_args\"",
",",
"{",
"}",
")",
",",
")",
"tasks",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"dag_params",
"[",
"\"tasks\"",
"]",
"# create dictionary to track tasks and set dependencies",
"tasks_dict",
":",
"Dict",
"[",
"str",
",",
"BaseOperator",
"]",
"=",
"{",
"}",
"for",
"task_name",
",",
"task_conf",
"in",
"tasks",
".",
"items",
"(",
")",
":",
"task_conf",
"[",
"\"task_id\"",
"]",
":",
"str",
"=",
"task_name",
"operator",
":",
"str",
"=",
"task_conf",
"[",
"\"operator\"",
"]",
"task_conf",
"[",
"\"dag\"",
"]",
":",
"DAG",
"=",
"dag",
"params",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"task_conf",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"SYSTEM_PARAMS",
"}",
"task",
":",
"BaseOperator",
"=",
"DagBuilder",
".",
"make_task",
"(",
"operator",
"=",
"operator",
",",
"task_params",
"=",
"params",
")",
"tasks_dict",
"[",
"task",
".",
"task_id",
"]",
":",
"BaseOperator",
"=",
"task",
"# set task dependencies after creating tasks",
"for",
"task_name",
",",
"task_conf",
"in",
"tasks",
".",
"items",
"(",
")",
":",
"if",
"task_conf",
".",
"get",
"(",
"\"dependencies\"",
")",
":",
"source_task",
":",
"BaseOperator",
"=",
"tasks_dict",
"[",
"task_name",
"]",
"for",
"dep",
"in",
"task_conf",
"[",
"\"dependencies\"",
"]",
":",
"dep_task",
":",
"BaseOperator",
"=",
"tasks_dict",
"[",
"dep",
"]",
"source_task",
".",
"set_upstream",
"(",
"dep_task",
")",
"return",
"{",
"\"dag_id\"",
":",
"dag_params",
"[",
"\"dag_id\"",
"]",
",",
"\"dag\"",
":",
"dag",
"}"
] | Generates a DAG from the DAG parameters.
:returns: dict with dag_id and DAG object
:type: Dict[str, Union[str, DAG]] | [
"Generates",
"a",
"DAG",
"from",
"the",
"DAG",
"parameters",
"."
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagbuilder.py#L63-L101 |
5,237 | ajbosco/dag-factory | dagfactory/utils.py | merge_configs | def merge_configs(config: Dict[str, Any], default_config: Dict[str, Any]) -> Dict[str, Any]:
"""
Merges a `default` config with DAG config. Used to set default values
for a group of DAGs.
:param config: config to merge in default values
:type config: Dict[str, Any]
:param default_config: config to merge default values from
:type default_config: Dict[str, Any]
:returns: dict with merged configs
:type: Dict[str, Any]
"""
for key in default_config:
if key in config:
if isinstance(config[key], dict) and isinstance(default_config[key], dict):
merge_configs(config[key], default_config[key])
else:
config[key]: Any = default_config[key]
return config | python | def merge_configs(config: Dict[str, Any], default_config: Dict[str, Any]) -> Dict[str, Any]:
"""
Merges a `default` config with DAG config. Used to set default values
for a group of DAGs.
:param config: config to merge in default values
:type config: Dict[str, Any]
:param default_config: config to merge default values from
:type default_config: Dict[str, Any]
:returns: dict with merged configs
:type: Dict[str, Any]
"""
for key in default_config:
if key in config:
if isinstance(config[key], dict) and isinstance(default_config[key], dict):
merge_configs(config[key], default_config[key])
else:
config[key]: Any = default_config[key]
return config | [
"def",
"merge_configs",
"(",
"config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"default_config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"for",
"key",
"in",
"default_config",
":",
"if",
"key",
"in",
"config",
":",
"if",
"isinstance",
"(",
"config",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"default_config",
"[",
"key",
"]",
",",
"dict",
")",
":",
"merge_configs",
"(",
"config",
"[",
"key",
"]",
",",
"default_config",
"[",
"key",
"]",
")",
"else",
":",
"config",
"[",
"key",
"]",
":",
"Any",
"=",
"default_config",
"[",
"key",
"]",
"return",
"config"
] | Merges a `default` config with DAG config. Used to set default values
for a group of DAGs.
:param config: config to merge in default values
:type config: Dict[str, Any]
:param default_config: config to merge default values from
:type default_config: Dict[str, Any]
:returns: dict with merged configs
:type: Dict[str, Any] | [
"Merges",
"a",
"default",
"config",
"with",
"DAG",
"config",
".",
"Used",
"to",
"set",
"default",
"values",
"for",
"a",
"group",
"of",
"DAGs",
"."
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/utils.py#L68-L86 |
5,238 | ajbosco/dag-factory | dagfactory/dagfactory.py | DagFactory._load_config | def _load_config(config_filepath: str) -> Dict[str, Any]:
"""
Loads YAML config file to dictionary
:returns: dict from YAML config file
"""
try:
config: Dict[str, Any] = yaml.load(stream=open(config_filepath, "r"))
except Exception as e:
raise Exception(f"Invalid DAG Factory config file; err: {e}")
return config | python | def _load_config(config_filepath: str) -> Dict[str, Any]:
"""
Loads YAML config file to dictionary
:returns: dict from YAML config file
"""
try:
config: Dict[str, Any] = yaml.load(stream=open(config_filepath, "r"))
except Exception as e:
raise Exception(f"Invalid DAG Factory config file; err: {e}")
return config | [
"def",
"_load_config",
"(",
"config_filepath",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"try",
":",
"config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"yaml",
".",
"load",
"(",
"stream",
"=",
"open",
"(",
"config_filepath",
",",
"\"r\"",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"Invalid DAG Factory config file; err: {e}\"",
")",
"return",
"config"
] | Loads YAML config file to dictionary
:returns: dict from YAML config file | [
"Loads",
"YAML",
"config",
"file",
"to",
"dictionary"
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagfactory.py#L30-L40 |
5,239 | ajbosco/dag-factory | dagfactory/dagfactory.py | DagFactory.get_dag_configs | def get_dag_configs(self) -> Dict[str, Dict[str, Any]]:
"""
Returns configuration for each the DAG in factory
:returns: dict with configuration for dags
"""
return {dag: self.config[dag] for dag in self.config.keys() if dag != "default"} | python | def get_dag_configs(self) -> Dict[str, Dict[str, Any]]:
"""
Returns configuration for each the DAG in factory
:returns: dict with configuration for dags
"""
return {dag: self.config[dag] for dag in self.config.keys() if dag != "default"} | [
"def",
"get_dag_configs",
"(",
"self",
")",
"->",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"return",
"{",
"dag",
":",
"self",
".",
"config",
"[",
"dag",
"]",
"for",
"dag",
"in",
"self",
".",
"config",
".",
"keys",
"(",
")",
"if",
"dag",
"!=",
"\"default\"",
"}"
] | Returns configuration for each the DAG in factory
:returns: dict with configuration for dags | [
"Returns",
"configuration",
"for",
"each",
"the",
"DAG",
"in",
"factory"
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagfactory.py#L42-L48 |
5,240 | ajbosco/dag-factory | dagfactory/dagfactory.py | DagFactory.generate_dags | def generate_dags(self, globals: Dict[str, Any]) -> None:
"""
Generates DAGs from YAML config
:param globals: The globals() from the file used to generate DAGs. The dag_id
must be passed into globals() for Airflow to import
"""
dag_configs: Dict[str, Dict[str, Any]] = self.get_dag_configs()
default_config: Dict[str, Any] = self.get_default_config()
for dag_name, dag_config in dag_configs.items():
dag_builder: DagBuilder = DagBuilder(dag_name=dag_name,
dag_config=dag_config,
default_config=default_config)
try:
dag: Dict[str, Union[str, DAG]] = dag_builder.build()
except Exception as e:
raise Exception(
f"Failed to generate dag {dag_name}. make sure config is properly populated. err:{e}"
)
globals[dag["dag_id"]]: DAG = dag["dag"] | python | def generate_dags(self, globals: Dict[str, Any]) -> None:
"""
Generates DAGs from YAML config
:param globals: The globals() from the file used to generate DAGs. The dag_id
must be passed into globals() for Airflow to import
"""
dag_configs: Dict[str, Dict[str, Any]] = self.get_dag_configs()
default_config: Dict[str, Any] = self.get_default_config()
for dag_name, dag_config in dag_configs.items():
dag_builder: DagBuilder = DagBuilder(dag_name=dag_name,
dag_config=dag_config,
default_config=default_config)
try:
dag: Dict[str, Union[str, DAG]] = dag_builder.build()
except Exception as e:
raise Exception(
f"Failed to generate dag {dag_name}. make sure config is properly populated. err:{e}"
)
globals[dag["dag_id"]]: DAG = dag["dag"] | [
"def",
"generate_dags",
"(",
"self",
",",
"globals",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"None",
":",
"dag_configs",
":",
"Dict",
"[",
"str",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"self",
".",
"get_dag_configs",
"(",
")",
"default_config",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"self",
".",
"get_default_config",
"(",
")",
"for",
"dag_name",
",",
"dag_config",
"in",
"dag_configs",
".",
"items",
"(",
")",
":",
"dag_builder",
":",
"DagBuilder",
"=",
"DagBuilder",
"(",
"dag_name",
"=",
"dag_name",
",",
"dag_config",
"=",
"dag_config",
",",
"default_config",
"=",
"default_config",
")",
"try",
":",
"dag",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"str",
",",
"DAG",
"]",
"]",
"=",
"dag_builder",
".",
"build",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"f\"Failed to generate dag {dag_name}. make sure config is properly populated. err:{e}\"",
")",
"globals",
"[",
"dag",
"[",
"\"dag_id\"",
"]",
"]",
":",
"DAG",
"=",
"dag",
"[",
"\"dag\"",
"]"
] | Generates DAGs from YAML config
:param globals: The globals() from the file used to generate DAGs. The dag_id
must be passed into globals() for Airflow to import | [
"Generates",
"DAGs",
"from",
"YAML",
"config"
] | cc7cfe74e62f82859fe38d527e95311a2805723b | https://github.com/ajbosco/dag-factory/blob/cc7cfe74e62f82859fe38d527e95311a2805723b/dagfactory/dagfactory.py#L58-L78 |
5,241 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.attributes | def attributes(self):
"""Return sync attributes."""
attr = {
'name': self.name,
'id': self.sync_id,
'network_id': self.network_id,
'serial': self.serial,
'status': self.status,
'region': self.region,
'region_id': self.region_id,
}
return attr | python | def attributes(self):
"""Return sync attributes."""
attr = {
'name': self.name,
'id': self.sync_id,
'network_id': self.network_id,
'serial': self.serial,
'status': self.status,
'region': self.region,
'region_id': self.region_id,
}
return attr | [
"def",
"attributes",
"(",
"self",
")",
":",
"attr",
"=",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'id'",
":",
"self",
".",
"sync_id",
",",
"'network_id'",
":",
"self",
".",
"network_id",
",",
"'serial'",
":",
"self",
".",
"serial",
",",
"'status'",
":",
"self",
".",
"status",
",",
"'region'",
":",
"self",
".",
"region",
",",
"'region_id'",
":",
"self",
".",
"region_id",
",",
"}",
"return",
"attr"
] | Return sync attributes. | [
"Return",
"sync",
"attributes",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L41-L52 |
5,242 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.arm | def arm(self, value):
"""Arm or disarm system."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id) | python | def arm(self, value):
"""Arm or disarm system."""
if value:
return api.request_system_arm(self.blink, self.network_id)
return api.request_system_disarm(self.blink, self.network_id) | [
"def",
"arm",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
":",
"return",
"api",
".",
"request_system_arm",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
")",
"return",
"api",
".",
"request_system_disarm",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
")"
] | Arm or disarm system. | [
"Arm",
"or",
"disarm",
"system",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L73-L78 |
5,243 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.start | def start(self):
"""Initialize the system."""
response = api.request_syncmodule(self.blink,
self.network_id,
force=True)
try:
self.summary = response['syncmodule']
self.network_id = self.summary['network_id']
except (TypeError, KeyError):
_LOGGER.error(("Could not retrieve sync module information "
"with response: %s"), response, exc_info=True)
return False
try:
self.sync_id = self.summary['id']
self.serial = self.summary['serial']
self.status = self.summary['status']
except KeyError:
_LOGGER.error("Could not extract some sync module info: %s",
response,
exc_info=True)
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
try:
for camera_config in self.camera_list:
if 'name' not in camera_config:
break
name = camera_config['name']
self.cameras[name] = BlinkCamera(self)
self.motion[name] = False
camera_info = self.get_camera_info(camera_config['id'])
self.cameras[name].update(camera_info,
force_cache=True,
force=True)
except KeyError:
_LOGGER.error("Could not create cameras instances for %s",
self.name,
exc_info=True)
return False
return True | python | def start(self):
"""Initialize the system."""
response = api.request_syncmodule(self.blink,
self.network_id,
force=True)
try:
self.summary = response['syncmodule']
self.network_id = self.summary['network_id']
except (TypeError, KeyError):
_LOGGER.error(("Could not retrieve sync module information "
"with response: %s"), response, exc_info=True)
return False
try:
self.sync_id = self.summary['id']
self.serial = self.summary['serial']
self.status = self.summary['status']
except KeyError:
_LOGGER.error("Could not extract some sync module info: %s",
response,
exc_info=True)
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
try:
for camera_config in self.camera_list:
if 'name' not in camera_config:
break
name = camera_config['name']
self.cameras[name] = BlinkCamera(self)
self.motion[name] = False
camera_info = self.get_camera_info(camera_config['id'])
self.cameras[name].update(camera_info,
force_cache=True,
force=True)
except KeyError:
_LOGGER.error("Could not create cameras instances for %s",
self.name,
exc_info=True)
return False
return True | [
"def",
"start",
"(",
"self",
")",
":",
"response",
"=",
"api",
".",
"request_syncmodule",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"force",
"=",
"True",
")",
"try",
":",
"self",
".",
"summary",
"=",
"response",
"[",
"'syncmodule'",
"]",
"self",
".",
"network_id",
"=",
"self",
".",
"summary",
"[",
"'network_id'",
"]",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"_LOGGER",
".",
"error",
"(",
"(",
"\"Could not retrieve sync module information \"",
"\"with response: %s\"",
")",
",",
"response",
",",
"exc_info",
"=",
"True",
")",
"return",
"False",
"try",
":",
"self",
".",
"sync_id",
"=",
"self",
".",
"summary",
"[",
"'id'",
"]",
"self",
".",
"serial",
"=",
"self",
".",
"summary",
"[",
"'serial'",
"]",
"self",
".",
"status",
"=",
"self",
".",
"summary",
"[",
"'status'",
"]",
"except",
"KeyError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not extract some sync module info: %s\"",
",",
"response",
",",
"exc_info",
"=",
"True",
")",
"self",
".",
"network_info",
"=",
"api",
".",
"request_network_status",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
")",
"self",
".",
"check_new_videos",
"(",
")",
"try",
":",
"for",
"camera_config",
"in",
"self",
".",
"camera_list",
":",
"if",
"'name'",
"not",
"in",
"camera_config",
":",
"break",
"name",
"=",
"camera_config",
"[",
"'name'",
"]",
"self",
".",
"cameras",
"[",
"name",
"]",
"=",
"BlinkCamera",
"(",
"self",
")",
"self",
".",
"motion",
"[",
"name",
"]",
"=",
"False",
"camera_info",
"=",
"self",
".",
"get_camera_info",
"(",
"camera_config",
"[",
"'id'",
"]",
")",
"self",
".",
"cameras",
"[",
"name",
"]",
".",
"update",
"(",
"camera_info",
",",
"force_cache",
"=",
"True",
",",
"force",
"=",
"True",
")",
"except",
"KeyError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not create cameras instances for %s\"",
",",
"self",
".",
"name",
",",
"exc_info",
"=",
"True",
")",
"return",
"False",
"return",
"True"
] | Initialize the system. | [
"Initialize",
"the",
"system",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L80-L123 |
5,244 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.get_events | def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop('force', False)
response = api.request_sync_events(self.blink,
self.network_id,
force=force)
try:
return response['event']
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s",
response,
exc_info=True)
return False | python | def get_events(self, **kwargs):
"""Retrieve events from server."""
force = kwargs.pop('force', False)
response = api.request_sync_events(self.blink,
self.network_id,
force=force)
try:
return response['event']
except (TypeError, KeyError):
_LOGGER.error("Could not extract events: %s",
response,
exc_info=True)
return False | [
"def",
"get_events",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"force",
"=",
"kwargs",
".",
"pop",
"(",
"'force'",
",",
"False",
")",
"response",
"=",
"api",
".",
"request_sync_events",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"force",
"=",
"force",
")",
"try",
":",
"return",
"response",
"[",
"'event'",
"]",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not extract events: %s\"",
",",
"response",
",",
"exc_info",
"=",
"True",
")",
"return",
"False"
] | Retrieve events from server. | [
"Retrieve",
"events",
"from",
"server",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L125-L137 |
5,245 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.get_camera_info | def get_camera_info(self, camera_id):
"""Retrieve camera information."""
response = api.request_camera_info(self.blink,
self.network_id,
camera_id)
try:
return response['camera'][0]
except (TypeError, KeyError):
_LOGGER.error("Could not extract camera info: %s",
response,
exc_info=True)
return [] | python | def get_camera_info(self, camera_id):
"""Retrieve camera information."""
response = api.request_camera_info(self.blink,
self.network_id,
camera_id)
try:
return response['camera'][0]
except (TypeError, KeyError):
_LOGGER.error("Could not extract camera info: %s",
response,
exc_info=True)
return [] | [
"def",
"get_camera_info",
"(",
"self",
",",
"camera_id",
")",
":",
"response",
"=",
"api",
".",
"request_camera_info",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"camera_id",
")",
"try",
":",
"return",
"response",
"[",
"'camera'",
"]",
"[",
"0",
"]",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not extract camera info: %s\"",
",",
"response",
",",
"exc_info",
"=",
"True",
")",
"return",
"[",
"]"
] | Retrieve camera information. | [
"Retrieve",
"camera",
"information",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L139-L150 |
5,246 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.refresh | def refresh(self, force_cache=False):
"""Get all blink cameras and pulls their most recent status."""
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
for camera_name in self.cameras.keys():
camera_id = self.cameras[camera_name].camera_id
camera_info = self.get_camera_info(camera_id)
self.cameras[camera_name].update(camera_info,
force_cache=force_cache) | python | def refresh(self, force_cache=False):
"""Get all blink cameras and pulls their most recent status."""
self.network_info = api.request_network_status(self.blink,
self.network_id)
self.check_new_videos()
for camera_name in self.cameras.keys():
camera_id = self.cameras[camera_name].camera_id
camera_info = self.get_camera_info(camera_id)
self.cameras[camera_name].update(camera_info,
force_cache=force_cache) | [
"def",
"refresh",
"(",
"self",
",",
"force_cache",
"=",
"False",
")",
":",
"self",
".",
"network_info",
"=",
"api",
".",
"request_network_status",
"(",
"self",
".",
"blink",
",",
"self",
".",
"network_id",
")",
"self",
".",
"check_new_videos",
"(",
")",
"for",
"camera_name",
"in",
"self",
".",
"cameras",
".",
"keys",
"(",
")",
":",
"camera_id",
"=",
"self",
".",
"cameras",
"[",
"camera_name",
"]",
".",
"camera_id",
"camera_info",
"=",
"self",
".",
"get_camera_info",
"(",
"camera_id",
")",
"self",
".",
"cameras",
"[",
"camera_name",
"]",
".",
"update",
"(",
"camera_info",
",",
"force_cache",
"=",
"force_cache",
")"
] | Get all blink cameras and pulls their most recent status. | [
"Get",
"all",
"blink",
"cameras",
"and",
"pulls",
"their",
"most",
"recent",
"status",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L152-L161 |
5,247 | fronzbot/blinkpy | blinkpy/sync_module.py | BlinkSyncModule.check_new_videos | def check_new_videos(self):
"""Check if new videos since last refresh."""
resp = api.request_videos(self.blink,
time=self.blink.last_refresh,
page=0)
for camera in self.cameras.keys():
self.motion[camera] = False
try:
info = resp['videos']
except (KeyError, TypeError):
_LOGGER.warning("Could not check for motion. Response: %s", resp)
return False
for entry in info:
try:
name = entry['camera_name']
clip = entry['address']
timestamp = entry['created_at']
self.motion[name] = True
self.last_record[name] = {'clip': clip, 'time': timestamp}
except KeyError:
_LOGGER.debug("No new videos since last refresh.")
return True | python | def check_new_videos(self):
"""Check if new videos since last refresh."""
resp = api.request_videos(self.blink,
time=self.blink.last_refresh,
page=0)
for camera in self.cameras.keys():
self.motion[camera] = False
try:
info = resp['videos']
except (KeyError, TypeError):
_LOGGER.warning("Could not check for motion. Response: %s", resp)
return False
for entry in info:
try:
name = entry['camera_name']
clip = entry['address']
timestamp = entry['created_at']
self.motion[name] = True
self.last_record[name] = {'clip': clip, 'time': timestamp}
except KeyError:
_LOGGER.debug("No new videos since last refresh.")
return True | [
"def",
"check_new_videos",
"(",
"self",
")",
":",
"resp",
"=",
"api",
".",
"request_videos",
"(",
"self",
".",
"blink",
",",
"time",
"=",
"self",
".",
"blink",
".",
"last_refresh",
",",
"page",
"=",
"0",
")",
"for",
"camera",
"in",
"self",
".",
"cameras",
".",
"keys",
"(",
")",
":",
"self",
".",
"motion",
"[",
"camera",
"]",
"=",
"False",
"try",
":",
"info",
"=",
"resp",
"[",
"'videos'",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Could not check for motion. Response: %s\"",
",",
"resp",
")",
"return",
"False",
"for",
"entry",
"in",
"info",
":",
"try",
":",
"name",
"=",
"entry",
"[",
"'camera_name'",
"]",
"clip",
"=",
"entry",
"[",
"'address'",
"]",
"timestamp",
"=",
"entry",
"[",
"'created_at'",
"]",
"self",
".",
"motion",
"[",
"name",
"]",
"=",
"True",
"self",
".",
"last_record",
"[",
"name",
"]",
"=",
"{",
"'clip'",
":",
"clip",
",",
"'time'",
":",
"timestamp",
"}",
"except",
"KeyError",
":",
"_LOGGER",
".",
"debug",
"(",
"\"No new videos since last refresh.\"",
")",
"return",
"True"
] | Check if new videos since last refresh. | [
"Check",
"if",
"new",
"videos",
"since",
"last",
"refresh",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/sync_module.py#L163-L188 |
5,248 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.attributes | def attributes(self):
"""Return dictionary of all camera attributes."""
attributes = {
'name': self.name,
'camera_id': self.camera_id,
'serial': self.serial,
'temperature': self.temperature,
'temperature_c': self.temperature_c,
'temperature_calibrated': self.temperature_calibrated,
'battery': self.battery,
'thumbnail': self.thumbnail,
'video': self.clip,
'motion_enabled': self.motion_enabled,
'motion_detected': self.motion_detected,
'wifi_strength': self.wifi_strength,
'network_id': self.sync.network_id,
'sync_module': self.sync.name,
'last_record': self.last_record
}
return attributes | python | def attributes(self):
"""Return dictionary of all camera attributes."""
attributes = {
'name': self.name,
'camera_id': self.camera_id,
'serial': self.serial,
'temperature': self.temperature,
'temperature_c': self.temperature_c,
'temperature_calibrated': self.temperature_calibrated,
'battery': self.battery,
'thumbnail': self.thumbnail,
'video': self.clip,
'motion_enabled': self.motion_enabled,
'motion_detected': self.motion_detected,
'wifi_strength': self.wifi_strength,
'network_id': self.sync.network_id,
'sync_module': self.sync.name,
'last_record': self.last_record
}
return attributes | [
"def",
"attributes",
"(",
"self",
")",
":",
"attributes",
"=",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'camera_id'",
":",
"self",
".",
"camera_id",
",",
"'serial'",
":",
"self",
".",
"serial",
",",
"'temperature'",
":",
"self",
".",
"temperature",
",",
"'temperature_c'",
":",
"self",
".",
"temperature_c",
",",
"'temperature_calibrated'",
":",
"self",
".",
"temperature_calibrated",
",",
"'battery'",
":",
"self",
".",
"battery",
",",
"'thumbnail'",
":",
"self",
".",
"thumbnail",
",",
"'video'",
":",
"self",
".",
"clip",
",",
"'motion_enabled'",
":",
"self",
".",
"motion_enabled",
",",
"'motion_detected'",
":",
"self",
".",
"motion_detected",
",",
"'wifi_strength'",
":",
"self",
".",
"wifi_strength",
",",
"'network_id'",
":",
"self",
".",
"sync",
".",
"network_id",
",",
"'sync_module'",
":",
"self",
".",
"sync",
".",
"name",
",",
"'last_record'",
":",
"self",
".",
"last_record",
"}",
"return",
"attributes"
] | Return dictionary of all camera attributes. | [
"Return",
"dictionary",
"of",
"all",
"camera",
"attributes",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L34-L53 |
5,249 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.snap_picture | def snap_picture(self):
"""Take a picture with camera to create a new thumbnail."""
return api.request_new_image(self.sync.blink,
self.network_id,
self.camera_id) | python | def snap_picture(self):
"""Take a picture with camera to create a new thumbnail."""
return api.request_new_image(self.sync.blink,
self.network_id,
self.camera_id) | [
"def",
"snap_picture",
"(",
"self",
")",
":",
"return",
"api",
".",
"request_new_image",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"self",
".",
"camera_id",
")"
] | Take a picture with camera to create a new thumbnail. | [
"Take",
"a",
"picture",
"with",
"camera",
"to",
"create",
"a",
"new",
"thumbnail",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L79-L83 |
5,250 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.set_motion_detect | def set_motion_detect(self, enable):
"""Set motion detection."""
if enable:
return api.request_motion_detection_enable(self.sync.blink,
self.network_id,
self.camera_id)
return api.request_motion_detection_disable(self.sync.blink,
self.network_id,
self.camera_id) | python | def set_motion_detect(self, enable):
"""Set motion detection."""
if enable:
return api.request_motion_detection_enable(self.sync.blink,
self.network_id,
self.camera_id)
return api.request_motion_detection_disable(self.sync.blink,
self.network_id,
self.camera_id) | [
"def",
"set_motion_detect",
"(",
"self",
",",
"enable",
")",
":",
"if",
"enable",
":",
"return",
"api",
".",
"request_motion_detection_enable",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"self",
".",
"camera_id",
")",
"return",
"api",
".",
"request_motion_detection_disable",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"self",
".",
"camera_id",
")"
] | Set motion detection. | [
"Set",
"motion",
"detection",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L85-L93 |
5,251 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.update | def update(self, config, force_cache=False, **kwargs):
"""Update camera info."""
# force = kwargs.pop('force', False)
self.name = config['name']
self.camera_id = str(config['id'])
self.network_id = str(config['network_id'])
self.serial = config['serial']
self.motion_enabled = config['enabled']
self.battery_voltage = config['battery_voltage']
self.battery_state = config['battery_state']
self.temperature = config['temperature']
self.wifi_strength = config['wifi_strength']
# Retrieve calibrated temperature from special endpoint
resp = api.request_camera_sensors(self.sync.blink,
self.network_id,
self.camera_id)
try:
self.temperature_calibrated = resp['temp']
except KeyError:
self.temperature_calibrated = self.temperature
_LOGGER.warning("Could not retrieve calibrated temperature.")
# Check if thumbnail exists in config, if not try to
# get it from the homescreen info in teh sync module
# otherwise set it to None and log an error
new_thumbnail = None
if config['thumbnail']:
thumb_addr = config['thumbnail']
else:
thumb_addr = self.get_thumb_from_homescreen()
if thumb_addr is not None:
new_thumbnail = "{}{}.jpg".format(self.sync.urls.base_url,
thumb_addr)
try:
self.motion_detected = self.sync.motion[self.name]
except KeyError:
self.motion_detected = False
clip_addr = None
if self.name in self.sync.last_record:
clip_addr = self.sync.last_record[self.name]['clip']
self.last_record = self.sync.last_record[self.name]['time']
self.clip = "{}{}".format(self.sync.urls.base_url,
clip_addr)
# If the thumbnail or clip have changed, update the cache
update_cached_image = False
if new_thumbnail != self.thumbnail or self._cached_image is None:
update_cached_image = True
self.thumbnail = new_thumbnail
update_cached_video = False
if self._cached_video is None or self.motion_detected:
update_cached_video = True
if new_thumbnail is not None and (update_cached_image or force_cache):
self._cached_image = api.http_get(self.sync.blink,
url=self.thumbnail,
stream=True,
json=False)
if clip_addr is not None and (update_cached_video or force_cache):
self._cached_video = api.http_get(self.sync.blink,
url=self.clip,
stream=True,
json=False) | python | def update(self, config, force_cache=False, **kwargs):
"""Update camera info."""
# force = kwargs.pop('force', False)
self.name = config['name']
self.camera_id = str(config['id'])
self.network_id = str(config['network_id'])
self.serial = config['serial']
self.motion_enabled = config['enabled']
self.battery_voltage = config['battery_voltage']
self.battery_state = config['battery_state']
self.temperature = config['temperature']
self.wifi_strength = config['wifi_strength']
# Retrieve calibrated temperature from special endpoint
resp = api.request_camera_sensors(self.sync.blink,
self.network_id,
self.camera_id)
try:
self.temperature_calibrated = resp['temp']
except KeyError:
self.temperature_calibrated = self.temperature
_LOGGER.warning("Could not retrieve calibrated temperature.")
# Check if thumbnail exists in config, if not try to
# get it from the homescreen info in teh sync module
# otherwise set it to None and log an error
new_thumbnail = None
if config['thumbnail']:
thumb_addr = config['thumbnail']
else:
thumb_addr = self.get_thumb_from_homescreen()
if thumb_addr is not None:
new_thumbnail = "{}{}.jpg".format(self.sync.urls.base_url,
thumb_addr)
try:
self.motion_detected = self.sync.motion[self.name]
except KeyError:
self.motion_detected = False
clip_addr = None
if self.name in self.sync.last_record:
clip_addr = self.sync.last_record[self.name]['clip']
self.last_record = self.sync.last_record[self.name]['time']
self.clip = "{}{}".format(self.sync.urls.base_url,
clip_addr)
# If the thumbnail or clip have changed, update the cache
update_cached_image = False
if new_thumbnail != self.thumbnail or self._cached_image is None:
update_cached_image = True
self.thumbnail = new_thumbnail
update_cached_video = False
if self._cached_video is None or self.motion_detected:
update_cached_video = True
if new_thumbnail is not None and (update_cached_image or force_cache):
self._cached_image = api.http_get(self.sync.blink,
url=self.thumbnail,
stream=True,
json=False)
if clip_addr is not None and (update_cached_video or force_cache):
self._cached_video = api.http_get(self.sync.blink,
url=self.clip,
stream=True,
json=False) | [
"def",
"update",
"(",
"self",
",",
"config",
",",
"force_cache",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# force = kwargs.pop('force', False)",
"self",
".",
"name",
"=",
"config",
"[",
"'name'",
"]",
"self",
".",
"camera_id",
"=",
"str",
"(",
"config",
"[",
"'id'",
"]",
")",
"self",
".",
"network_id",
"=",
"str",
"(",
"config",
"[",
"'network_id'",
"]",
")",
"self",
".",
"serial",
"=",
"config",
"[",
"'serial'",
"]",
"self",
".",
"motion_enabled",
"=",
"config",
"[",
"'enabled'",
"]",
"self",
".",
"battery_voltage",
"=",
"config",
"[",
"'battery_voltage'",
"]",
"self",
".",
"battery_state",
"=",
"config",
"[",
"'battery_state'",
"]",
"self",
".",
"temperature",
"=",
"config",
"[",
"'temperature'",
"]",
"self",
".",
"wifi_strength",
"=",
"config",
"[",
"'wifi_strength'",
"]",
"# Retrieve calibrated temperature from special endpoint",
"resp",
"=",
"api",
".",
"request_camera_sensors",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"self",
".",
"network_id",
",",
"self",
".",
"camera_id",
")",
"try",
":",
"self",
".",
"temperature_calibrated",
"=",
"resp",
"[",
"'temp'",
"]",
"except",
"KeyError",
":",
"self",
".",
"temperature_calibrated",
"=",
"self",
".",
"temperature",
"_LOGGER",
".",
"warning",
"(",
"\"Could not retrieve calibrated temperature.\"",
")",
"# Check if thumbnail exists in config, if not try to",
"# get it from the homescreen info in teh sync module",
"# otherwise set it to None and log an error",
"new_thumbnail",
"=",
"None",
"if",
"config",
"[",
"'thumbnail'",
"]",
":",
"thumb_addr",
"=",
"config",
"[",
"'thumbnail'",
"]",
"else",
":",
"thumb_addr",
"=",
"self",
".",
"get_thumb_from_homescreen",
"(",
")",
"if",
"thumb_addr",
"is",
"not",
"None",
":",
"new_thumbnail",
"=",
"\"{}{}.jpg\"",
".",
"format",
"(",
"self",
".",
"sync",
".",
"urls",
".",
"base_url",
",",
"thumb_addr",
")",
"try",
":",
"self",
".",
"motion_detected",
"=",
"self",
".",
"sync",
".",
"motion",
"[",
"self",
".",
"name",
"]",
"except",
"KeyError",
":",
"self",
".",
"motion_detected",
"=",
"False",
"clip_addr",
"=",
"None",
"if",
"self",
".",
"name",
"in",
"self",
".",
"sync",
".",
"last_record",
":",
"clip_addr",
"=",
"self",
".",
"sync",
".",
"last_record",
"[",
"self",
".",
"name",
"]",
"[",
"'clip'",
"]",
"self",
".",
"last_record",
"=",
"self",
".",
"sync",
".",
"last_record",
"[",
"self",
".",
"name",
"]",
"[",
"'time'",
"]",
"self",
".",
"clip",
"=",
"\"{}{}\"",
".",
"format",
"(",
"self",
".",
"sync",
".",
"urls",
".",
"base_url",
",",
"clip_addr",
")",
"# If the thumbnail or clip have changed, update the cache",
"update_cached_image",
"=",
"False",
"if",
"new_thumbnail",
"!=",
"self",
".",
"thumbnail",
"or",
"self",
".",
"_cached_image",
"is",
"None",
":",
"update_cached_image",
"=",
"True",
"self",
".",
"thumbnail",
"=",
"new_thumbnail",
"update_cached_video",
"=",
"False",
"if",
"self",
".",
"_cached_video",
"is",
"None",
"or",
"self",
".",
"motion_detected",
":",
"update_cached_video",
"=",
"True",
"if",
"new_thumbnail",
"is",
"not",
"None",
"and",
"(",
"update_cached_image",
"or",
"force_cache",
")",
":",
"self",
".",
"_cached_image",
"=",
"api",
".",
"http_get",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"url",
"=",
"self",
".",
"thumbnail",
",",
"stream",
"=",
"True",
",",
"json",
"=",
"False",
")",
"if",
"clip_addr",
"is",
"not",
"None",
"and",
"(",
"update_cached_video",
"or",
"force_cache",
")",
":",
"self",
".",
"_cached_video",
"=",
"api",
".",
"http_get",
"(",
"self",
".",
"sync",
".",
"blink",
",",
"url",
"=",
"self",
".",
"clip",
",",
"stream",
"=",
"True",
",",
"json",
"=",
"False",
")"
] | Update camera info. | [
"Update",
"camera",
"info",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L95-L162 |
5,252 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.image_to_file | def image_to_file(self, path):
"""
Write image to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing image from %s to %s", self.name, path)
response = self._cached_image
if response.status_code == 200:
with open(path, 'wb') as imgfile:
copyfileobj(response.raw, imgfile)
else:
_LOGGER.error("Cannot write image to file, response %s",
response.status_code,
exc_info=True) | python | def image_to_file(self, path):
"""
Write image to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing image from %s to %s", self.name, path)
response = self._cached_image
if response.status_code == 200:
with open(path, 'wb') as imgfile:
copyfileobj(response.raw, imgfile)
else:
_LOGGER.error("Cannot write image to file, response %s",
response.status_code,
exc_info=True) | [
"def",
"image_to_file",
"(",
"self",
",",
"path",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Writing image from %s to %s\"",
",",
"self",
".",
"name",
",",
"path",
")",
"response",
"=",
"self",
".",
"_cached_image",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"imgfile",
":",
"copyfileobj",
"(",
"response",
".",
"raw",
",",
"imgfile",
")",
"else",
":",
"_LOGGER",
".",
"error",
"(",
"\"Cannot write image to file, response %s\"",
",",
"response",
".",
"status_code",
",",
"exc_info",
"=",
"True",
")"
] | Write image to file.
:param path: Path to write file | [
"Write",
"image",
"to",
"file",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L164-L178 |
5,253 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.video_to_file | def video_to_file(self, path):
"""Write video to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing video from %s to %s", self.name, path)
response = self._cached_video
if response is None:
_LOGGER.error("No saved video exist for %s.",
self.name,
exc_info=True)
return
with open(path, 'wb') as vidfile:
copyfileobj(response.raw, vidfile) | python | def video_to_file(self, path):
"""Write video to file.
:param path: Path to write file
"""
_LOGGER.debug("Writing video from %s to %s", self.name, path)
response = self._cached_video
if response is None:
_LOGGER.error("No saved video exist for %s.",
self.name,
exc_info=True)
return
with open(path, 'wb') as vidfile:
copyfileobj(response.raw, vidfile) | [
"def",
"video_to_file",
"(",
"self",
",",
"path",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Writing video from %s to %s\"",
",",
"self",
".",
"name",
",",
"path",
")",
"response",
"=",
"self",
".",
"_cached_video",
"if",
"response",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"\"No saved video exist for %s.\"",
",",
"self",
".",
"name",
",",
"exc_info",
"=",
"True",
")",
"return",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"vidfile",
":",
"copyfileobj",
"(",
"response",
".",
"raw",
",",
"vidfile",
")"
] | Write video to file.
:param path: Path to write file | [
"Write",
"video",
"to",
"file",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L180-L193 |
5,254 | fronzbot/blinkpy | blinkpy/camera.py | BlinkCamera.get_thumb_from_homescreen | def get_thumb_from_homescreen(self):
"""Retrieve thumbnail from homescreen."""
for device in self.sync.homescreen['devices']:
try:
device_type = device['device_type']
device_name = device['name']
device_thumb = device['thumbnail']
if device_type == 'camera' and device_name == self.name:
return device_thumb
except KeyError:
pass
_LOGGER.error("Could not find thumbnail for camera %s",
self.name,
exc_info=True)
return None | python | def get_thumb_from_homescreen(self):
"""Retrieve thumbnail from homescreen."""
for device in self.sync.homescreen['devices']:
try:
device_type = device['device_type']
device_name = device['name']
device_thumb = device['thumbnail']
if device_type == 'camera' and device_name == self.name:
return device_thumb
except KeyError:
pass
_LOGGER.error("Could not find thumbnail for camera %s",
self.name,
exc_info=True)
return None | [
"def",
"get_thumb_from_homescreen",
"(",
"self",
")",
":",
"for",
"device",
"in",
"self",
".",
"sync",
".",
"homescreen",
"[",
"'devices'",
"]",
":",
"try",
":",
"device_type",
"=",
"device",
"[",
"'device_type'",
"]",
"device_name",
"=",
"device",
"[",
"'name'",
"]",
"device_thumb",
"=",
"device",
"[",
"'thumbnail'",
"]",
"if",
"device_type",
"==",
"'camera'",
"and",
"device_name",
"==",
"self",
".",
"name",
":",
"return",
"device_thumb",
"except",
"KeyError",
":",
"pass",
"_LOGGER",
".",
"error",
"(",
"\"Could not find thumbnail for camera %s\"",
",",
"self",
".",
"name",
",",
"exc_info",
"=",
"True",
")",
"return",
"None"
] | Retrieve thumbnail from homescreen. | [
"Retrieve",
"thumbnail",
"from",
"homescreen",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/camera.py#L195-L209 |
5,255 | fronzbot/blinkpy | blinkpy/helpers/util.py | get_time | def get_time(time_to_convert=None):
"""Create blink-compatible timestamp."""
if time_to_convert is None:
time_to_convert = time.time()
return time.strftime(TIMESTAMP_FORMAT, time.localtime(time_to_convert)) | python | def get_time(time_to_convert=None):
"""Create blink-compatible timestamp."""
if time_to_convert is None:
time_to_convert = time.time()
return time.strftime(TIMESTAMP_FORMAT, time.localtime(time_to_convert)) | [
"def",
"get_time",
"(",
"time_to_convert",
"=",
"None",
")",
":",
"if",
"time_to_convert",
"is",
"None",
":",
"time_to_convert",
"=",
"time",
".",
"time",
"(",
")",
"return",
"time",
".",
"strftime",
"(",
"TIMESTAMP_FORMAT",
",",
"time",
".",
"localtime",
"(",
"time_to_convert",
")",
")"
] | Create blink-compatible timestamp. | [
"Create",
"blink",
"-",
"compatible",
"timestamp",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/helpers/util.py#L14-L18 |
5,256 | fronzbot/blinkpy | blinkpy/helpers/util.py | merge_dicts | def merge_dicts(dict_a, dict_b):
"""Merge two dictionaries into one."""
duplicates = [val for val in dict_a if val in dict_b]
if duplicates:
_LOGGER.warning(("Duplicates found during merge: %s. "
"Renaming is recommended."), duplicates)
return {**dict_a, **dict_b} | python | def merge_dicts(dict_a, dict_b):
"""Merge two dictionaries into one."""
duplicates = [val for val in dict_a if val in dict_b]
if duplicates:
_LOGGER.warning(("Duplicates found during merge: %s. "
"Renaming is recommended."), duplicates)
return {**dict_a, **dict_b} | [
"def",
"merge_dicts",
"(",
"dict_a",
",",
"dict_b",
")",
":",
"duplicates",
"=",
"[",
"val",
"for",
"val",
"in",
"dict_a",
"if",
"val",
"in",
"dict_b",
"]",
"if",
"duplicates",
":",
"_LOGGER",
".",
"warning",
"(",
"(",
"\"Duplicates found during merge: %s. \"",
"\"Renaming is recommended.\"",
")",
",",
"duplicates",
")",
"return",
"{",
"*",
"*",
"dict_a",
",",
"*",
"*",
"dict_b",
"}"
] | Merge two dictionaries into one. | [
"Merge",
"two",
"dictionaries",
"into",
"one",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/helpers/util.py#L21-L27 |
5,257 | fronzbot/blinkpy | blinkpy/helpers/util.py | attempt_reauthorization | def attempt_reauthorization(blink):
"""Attempt to refresh auth token and links."""
_LOGGER.info("Auth token expired, attempting reauthorization.")
headers = blink.get_auth_token(is_retry=True)
return headers | python | def attempt_reauthorization(blink):
"""Attempt to refresh auth token and links."""
_LOGGER.info("Auth token expired, attempting reauthorization.")
headers = blink.get_auth_token(is_retry=True)
return headers | [
"def",
"attempt_reauthorization",
"(",
"blink",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"Auth token expired, attempting reauthorization.\"",
")",
"headers",
"=",
"blink",
".",
"get_auth_token",
"(",
"is_retry",
"=",
"True",
")",
"return",
"headers"
] | Attempt to refresh auth token and links. | [
"Attempt",
"to",
"refresh",
"auth",
"token",
"and",
"links",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/helpers/util.py#L36-L40 |
5,258 | fronzbot/blinkpy | blinkpy/helpers/util.py | http_req | def http_req(blink, url='http://example.com', data=None, headers=None,
reqtype='get', stream=False, json_resp=True, is_retry=False):
"""
Perform server requests and check if reauthorization neccessary.
:param blink: Blink instance
:param url: URL to perform request
:param data: Data to send (default: None)
:param headers: Headers to send (default: None)
:param reqtype: Can be 'get' or 'post' (default: 'get')
:param stream: Stream response? True/FALSE
:param json_resp: Return JSON response? TRUE/False
:param is_retry: Is this a retry attempt? True/FALSE
"""
if reqtype == 'post':
req = Request('POST', url, headers=headers, data=data)
elif reqtype == 'get':
req = Request('GET', url, headers=headers)
else:
_LOGGER.error("Invalid request type: %s", reqtype)
raise BlinkException(ERROR.REQUEST)
prepped = req.prepare()
try:
response = blink.session.send(prepped, stream=stream, timeout=10)
if json_resp and 'code' in response.json():
if is_retry:
_LOGGER.error("Cannot obtain new token for server auth.")
return None
else:
headers = attempt_reauthorization(blink)
if not headers:
raise exceptions.ConnectionError
return http_req(blink, url=url, data=data, headers=headers,
reqtype=reqtype, stream=stream,
json_resp=json_resp, is_retry=True)
except (exceptions.ConnectionError, exceptions.Timeout):
_LOGGER.info("Cannot connect to server with url %s.", url)
if not is_retry:
headers = attempt_reauthorization(blink)
return http_req(blink, url=url, data=data, headers=headers,
reqtype=reqtype, stream=stream,
json_resp=json_resp, is_retry=True)
_LOGGER.error("Endpoint %s failed. Possible issue with Blink servers.",
url)
return None
if json_resp:
return response.json()
return response | python | def http_req(blink, url='http://example.com', data=None, headers=None,
reqtype='get', stream=False, json_resp=True, is_retry=False):
"""
Perform server requests and check if reauthorization neccessary.
:param blink: Blink instance
:param url: URL to perform request
:param data: Data to send (default: None)
:param headers: Headers to send (default: None)
:param reqtype: Can be 'get' or 'post' (default: 'get')
:param stream: Stream response? True/FALSE
:param json_resp: Return JSON response? TRUE/False
:param is_retry: Is this a retry attempt? True/FALSE
"""
if reqtype == 'post':
req = Request('POST', url, headers=headers, data=data)
elif reqtype == 'get':
req = Request('GET', url, headers=headers)
else:
_LOGGER.error("Invalid request type: %s", reqtype)
raise BlinkException(ERROR.REQUEST)
prepped = req.prepare()
try:
response = blink.session.send(prepped, stream=stream, timeout=10)
if json_resp and 'code' in response.json():
if is_retry:
_LOGGER.error("Cannot obtain new token for server auth.")
return None
else:
headers = attempt_reauthorization(blink)
if not headers:
raise exceptions.ConnectionError
return http_req(blink, url=url, data=data, headers=headers,
reqtype=reqtype, stream=stream,
json_resp=json_resp, is_retry=True)
except (exceptions.ConnectionError, exceptions.Timeout):
_LOGGER.info("Cannot connect to server with url %s.", url)
if not is_retry:
headers = attempt_reauthorization(blink)
return http_req(blink, url=url, data=data, headers=headers,
reqtype=reqtype, stream=stream,
json_resp=json_resp, is_retry=True)
_LOGGER.error("Endpoint %s failed. Possible issue with Blink servers.",
url)
return None
if json_resp:
return response.json()
return response | [
"def",
"http_req",
"(",
"blink",
",",
"url",
"=",
"'http://example.com'",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"reqtype",
"=",
"'get'",
",",
"stream",
"=",
"False",
",",
"json_resp",
"=",
"True",
",",
"is_retry",
"=",
"False",
")",
":",
"if",
"reqtype",
"==",
"'post'",
":",
"req",
"=",
"Request",
"(",
"'POST'",
",",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
")",
"elif",
"reqtype",
"==",
"'get'",
":",
"req",
"=",
"Request",
"(",
"'GET'",
",",
"url",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"_LOGGER",
".",
"error",
"(",
"\"Invalid request type: %s\"",
",",
"reqtype",
")",
"raise",
"BlinkException",
"(",
"ERROR",
".",
"REQUEST",
")",
"prepped",
"=",
"req",
".",
"prepare",
"(",
")",
"try",
":",
"response",
"=",
"blink",
".",
"session",
".",
"send",
"(",
"prepped",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"10",
")",
"if",
"json_resp",
"and",
"'code'",
"in",
"response",
".",
"json",
"(",
")",
":",
"if",
"is_retry",
":",
"_LOGGER",
".",
"error",
"(",
"\"Cannot obtain new token for server auth.\"",
")",
"return",
"None",
"else",
":",
"headers",
"=",
"attempt_reauthorization",
"(",
"blink",
")",
"if",
"not",
"headers",
":",
"raise",
"exceptions",
".",
"ConnectionError",
"return",
"http_req",
"(",
"blink",
",",
"url",
"=",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"reqtype",
"=",
"reqtype",
",",
"stream",
"=",
"stream",
",",
"json_resp",
"=",
"json_resp",
",",
"is_retry",
"=",
"True",
")",
"except",
"(",
"exceptions",
".",
"ConnectionError",
",",
"exceptions",
".",
"Timeout",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"Cannot connect to server with url %s.\"",
",",
"url",
")",
"if",
"not",
"is_retry",
":",
"headers",
"=",
"attempt_reauthorization",
"(",
"blink",
")",
"return",
"http_req",
"(",
"blink",
",",
"url",
"=",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"reqtype",
"=",
"reqtype",
",",
"stream",
"=",
"stream",
",",
"json_resp",
"=",
"json_resp",
",",
"is_retry",
"=",
"True",
")",
"_LOGGER",
".",
"error",
"(",
"\"Endpoint %s failed. Possible issue with Blink servers.\"",
",",
"url",
")",
"return",
"None",
"if",
"json_resp",
":",
"return",
"response",
".",
"json",
"(",
")",
"return",
"response"
] | Perform server requests and check if reauthorization neccessary.
:param blink: Blink instance
:param url: URL to perform request
:param data: Data to send (default: None)
:param headers: Headers to send (default: None)
:param reqtype: Can be 'get' or 'post' (default: 'get')
:param stream: Stream response? True/FALSE
:param json_resp: Return JSON response? TRUE/False
:param is_retry: Is this a retry attempt? True/FALSE | [
"Perform",
"server",
"requests",
"and",
"check",
"if",
"reauthorization",
"neccessary",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/helpers/util.py#L43-L94 |
5,259 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.start | def start(self):
"""
Perform full system setup.
Method logs in and sets auth token, urls, and ids for future requests.
Essentially this is just a wrapper function for ease of use.
"""
if self._username is None or self._password is None:
if not self.login():
return
elif not self.get_auth_token():
return
camera_list = self.get_cameras()
networks = self.get_ids()
for network_name, network_id in networks.items():
if network_id not in camera_list.keys():
camera_list[network_id] = {}
_LOGGER.warning("No cameras found for %s", network_name)
sync_module = BlinkSyncModule(self,
network_name,
network_id,
camera_list[network_id])
sync_module.start()
self.sync[network_name] = sync_module
self.cameras = self.merge_cameras() | python | def start(self):
"""
Perform full system setup.
Method logs in and sets auth token, urls, and ids for future requests.
Essentially this is just a wrapper function for ease of use.
"""
if self._username is None or self._password is None:
if not self.login():
return
elif not self.get_auth_token():
return
camera_list = self.get_cameras()
networks = self.get_ids()
for network_name, network_id in networks.items():
if network_id not in camera_list.keys():
camera_list[network_id] = {}
_LOGGER.warning("No cameras found for %s", network_name)
sync_module = BlinkSyncModule(self,
network_name,
network_id,
camera_list[network_id])
sync_module.start()
self.sync[network_name] = sync_module
self.cameras = self.merge_cameras() | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_username",
"is",
"None",
"or",
"self",
".",
"_password",
"is",
"None",
":",
"if",
"not",
"self",
".",
"login",
"(",
")",
":",
"return",
"elif",
"not",
"self",
".",
"get_auth_token",
"(",
")",
":",
"return",
"camera_list",
"=",
"self",
".",
"get_cameras",
"(",
")",
"networks",
"=",
"self",
".",
"get_ids",
"(",
")",
"for",
"network_name",
",",
"network_id",
"in",
"networks",
".",
"items",
"(",
")",
":",
"if",
"network_id",
"not",
"in",
"camera_list",
".",
"keys",
"(",
")",
":",
"camera_list",
"[",
"network_id",
"]",
"=",
"{",
"}",
"_LOGGER",
".",
"warning",
"(",
"\"No cameras found for %s\"",
",",
"network_name",
")",
"sync_module",
"=",
"BlinkSyncModule",
"(",
"self",
",",
"network_name",
",",
"network_id",
",",
"camera_list",
"[",
"network_id",
"]",
")",
"sync_module",
".",
"start",
"(",
")",
"self",
".",
"sync",
"[",
"network_name",
"]",
"=",
"sync_module",
"self",
".",
"cameras",
"=",
"self",
".",
"merge_cameras",
"(",
")"
] | Perform full system setup.
Method logs in and sets auth token, urls, and ids for future requests.
Essentially this is just a wrapper function for ease of use. | [
"Perform",
"full",
"system",
"setup",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L82-L107 |
5,260 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.login | def login(self):
"""Prompt user for username and password."""
self._username = input("Username:")
self._password = getpass.getpass("Password:")
if self.get_auth_token():
_LOGGER.debug("Login successful!")
return True
_LOGGER.warning("Unable to login with %s.", self._username)
return False | python | def login(self):
"""Prompt user for username and password."""
self._username = input("Username:")
self._password = getpass.getpass("Password:")
if self.get_auth_token():
_LOGGER.debug("Login successful!")
return True
_LOGGER.warning("Unable to login with %s.", self._username)
return False | [
"def",
"login",
"(",
"self",
")",
":",
"self",
".",
"_username",
"=",
"input",
"(",
"\"Username:\"",
")",
"self",
".",
"_password",
"=",
"getpass",
".",
"getpass",
"(",
"\"Password:\"",
")",
"if",
"self",
".",
"get_auth_token",
"(",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Login successful!\"",
")",
"return",
"True",
"_LOGGER",
".",
"warning",
"(",
"\"Unable to login with %s.\"",
",",
"self",
".",
"_username",
")",
"return",
"False"
] | Prompt user for username and password. | [
"Prompt",
"user",
"for",
"username",
"and",
"password",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L109-L117 |
5,261 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.get_auth_token | def get_auth_token(self, is_retry=False):
"""Retrieve the authentication token from Blink."""
if not isinstance(self._username, str):
raise BlinkAuthenticationException(ERROR.USERNAME)
if not isinstance(self._password, str):
raise BlinkAuthenticationException(ERROR.PASSWORD)
login_urls = [LOGIN_URL, OLD_LOGIN_URL, LOGIN_BACKUP_URL]
response = self.login_request(login_urls, is_retry=is_retry)
if not response:
return False
self._host = "{}.{}".format(self.region_id, BLINK_URL)
self._token = response['authtoken']['authtoken']
self.networks = response['networks']
self._auth_header = {'Host': self._host,
'TOKEN_AUTH': self._token}
self.urls = BlinkURLHandler(self.region_id)
return self._auth_header | python | def get_auth_token(self, is_retry=False):
"""Retrieve the authentication token from Blink."""
if not isinstance(self._username, str):
raise BlinkAuthenticationException(ERROR.USERNAME)
if not isinstance(self._password, str):
raise BlinkAuthenticationException(ERROR.PASSWORD)
login_urls = [LOGIN_URL, OLD_LOGIN_URL, LOGIN_BACKUP_URL]
response = self.login_request(login_urls, is_retry=is_retry)
if not response:
return False
self._host = "{}.{}".format(self.region_id, BLINK_URL)
self._token = response['authtoken']['authtoken']
self.networks = response['networks']
self._auth_header = {'Host': self._host,
'TOKEN_AUTH': self._token}
self.urls = BlinkURLHandler(self.region_id)
return self._auth_header | [
"def",
"get_auth_token",
"(",
"self",
",",
"is_retry",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_username",
",",
"str",
")",
":",
"raise",
"BlinkAuthenticationException",
"(",
"ERROR",
".",
"USERNAME",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_password",
",",
"str",
")",
":",
"raise",
"BlinkAuthenticationException",
"(",
"ERROR",
".",
"PASSWORD",
")",
"login_urls",
"=",
"[",
"LOGIN_URL",
",",
"OLD_LOGIN_URL",
",",
"LOGIN_BACKUP_URL",
"]",
"response",
"=",
"self",
".",
"login_request",
"(",
"login_urls",
",",
"is_retry",
"=",
"is_retry",
")",
"if",
"not",
"response",
":",
"return",
"False",
"self",
".",
"_host",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"self",
".",
"region_id",
",",
"BLINK_URL",
")",
"self",
".",
"_token",
"=",
"response",
"[",
"'authtoken'",
"]",
"[",
"'authtoken'",
"]",
"self",
".",
"networks",
"=",
"response",
"[",
"'networks'",
"]",
"self",
".",
"_auth_header",
"=",
"{",
"'Host'",
":",
"self",
".",
"_host",
",",
"'TOKEN_AUTH'",
":",
"self",
".",
"_token",
"}",
"self",
".",
"urls",
"=",
"BlinkURLHandler",
"(",
"self",
".",
"region_id",
")",
"return",
"self",
".",
"_auth_header"
] | Retrieve the authentication token from Blink. | [
"Retrieve",
"the",
"authentication",
"token",
"from",
"Blink",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L119-L141 |
5,262 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.login_request | def login_request(self, login_urls, is_retry=False):
"""Make a login request."""
try:
login_url = login_urls.pop(0)
except IndexError:
_LOGGER.error("Could not login to blink servers.")
return False
_LOGGER.info("Attempting login with %s", login_url)
response = api.request_login(self,
login_url,
self._username,
self._password,
is_retry=is_retry)
try:
if response.status_code != 200:
response = self.login_request(login_urls)
response = response.json()
(self.region_id, self.region), = response['region'].items()
except AttributeError:
_LOGGER.error("Login API endpoint failed with response %s",
response,
exc_info=True)
return False
except KeyError:
_LOGGER.warning("Could not extract region info.")
self.region_id = 'piri'
self.region = 'UNKNOWN'
self._login_url = login_url
return response | python | def login_request(self, login_urls, is_retry=False):
"""Make a login request."""
try:
login_url = login_urls.pop(0)
except IndexError:
_LOGGER.error("Could not login to blink servers.")
return False
_LOGGER.info("Attempting login with %s", login_url)
response = api.request_login(self,
login_url,
self._username,
self._password,
is_retry=is_retry)
try:
if response.status_code != 200:
response = self.login_request(login_urls)
response = response.json()
(self.region_id, self.region), = response['region'].items()
except AttributeError:
_LOGGER.error("Login API endpoint failed with response %s",
response,
exc_info=True)
return False
except KeyError:
_LOGGER.warning("Could not extract region info.")
self.region_id = 'piri'
self.region = 'UNKNOWN'
self._login_url = login_url
return response | [
"def",
"login_request",
"(",
"self",
",",
"login_urls",
",",
"is_retry",
"=",
"False",
")",
":",
"try",
":",
"login_url",
"=",
"login_urls",
".",
"pop",
"(",
"0",
")",
"except",
"IndexError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Could not login to blink servers.\"",
")",
"return",
"False",
"_LOGGER",
".",
"info",
"(",
"\"Attempting login with %s\"",
",",
"login_url",
")",
"response",
"=",
"api",
".",
"request_login",
"(",
"self",
",",
"login_url",
",",
"self",
".",
"_username",
",",
"self",
".",
"_password",
",",
"is_retry",
"=",
"is_retry",
")",
"try",
":",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"response",
"=",
"self",
".",
"login_request",
"(",
"login_urls",
")",
"response",
"=",
"response",
".",
"json",
"(",
")",
"(",
"self",
".",
"region_id",
",",
"self",
".",
"region",
")",
",",
"=",
"response",
"[",
"'region'",
"]",
".",
"items",
"(",
")",
"except",
"AttributeError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Login API endpoint failed with response %s\"",
",",
"response",
",",
"exc_info",
"=",
"True",
")",
"return",
"False",
"except",
"KeyError",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Could not extract region info.\"",
")",
"self",
".",
"region_id",
"=",
"'piri'",
"self",
".",
"region",
"=",
"'UNKNOWN'",
"self",
".",
"_login_url",
"=",
"login_url",
"return",
"response"
] | Make a login request. | [
"Make",
"a",
"login",
"request",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L143-L176 |
5,263 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.get_ids | def get_ids(self):
"""Set the network ID and Account ID."""
response = api.request_networks(self)
all_networks = []
network_dict = {}
for network, status in self.networks.items():
if status['onboarded']:
all_networks.append('{}'.format(network))
network_dict[status['name']] = network
# For the first onboarded network we find, grab the account id
for resp in response['networks']:
if str(resp['id']) in all_networks:
self.account_id = resp['account_id']
break
self.network_ids = all_networks
return network_dict | python | def get_ids(self):
"""Set the network ID and Account ID."""
response = api.request_networks(self)
all_networks = []
network_dict = {}
for network, status in self.networks.items():
if status['onboarded']:
all_networks.append('{}'.format(network))
network_dict[status['name']] = network
# For the first onboarded network we find, grab the account id
for resp in response['networks']:
if str(resp['id']) in all_networks:
self.account_id = resp['account_id']
break
self.network_ids = all_networks
return network_dict | [
"def",
"get_ids",
"(",
"self",
")",
":",
"response",
"=",
"api",
".",
"request_networks",
"(",
"self",
")",
"all_networks",
"=",
"[",
"]",
"network_dict",
"=",
"{",
"}",
"for",
"network",
",",
"status",
"in",
"self",
".",
"networks",
".",
"items",
"(",
")",
":",
"if",
"status",
"[",
"'onboarded'",
"]",
":",
"all_networks",
".",
"append",
"(",
"'{}'",
".",
"format",
"(",
"network",
")",
")",
"network_dict",
"[",
"status",
"[",
"'name'",
"]",
"]",
"=",
"network",
"# For the first onboarded network we find, grab the account id",
"for",
"resp",
"in",
"response",
"[",
"'networks'",
"]",
":",
"if",
"str",
"(",
"resp",
"[",
"'id'",
"]",
")",
"in",
"all_networks",
":",
"self",
".",
"account_id",
"=",
"resp",
"[",
"'account_id'",
"]",
"break",
"self",
".",
"network_ids",
"=",
"all_networks",
"return",
"network_dict"
] | Set the network ID and Account ID. | [
"Set",
"the",
"network",
"ID",
"and",
"Account",
"ID",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L178-L195 |
5,264 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.get_cameras | def get_cameras(self):
"""Retrieve a camera list for each onboarded network."""
response = api.request_homescreen(self)
try:
all_cameras = {}
for camera in response['cameras']:
camera_network = str(camera['network_id'])
camera_name = camera['name']
camera_id = camera['id']
camera_info = {'name': camera_name, 'id': camera_id}
if camera_network not in all_cameras:
all_cameras[camera_network] = []
all_cameras[camera_network].append(camera_info)
return all_cameras
except KeyError:
_LOGGER.error("Initialization failue. Could not retrieve cameras.")
return {} | python | def get_cameras(self):
"""Retrieve a camera list for each onboarded network."""
response = api.request_homescreen(self)
try:
all_cameras = {}
for camera in response['cameras']:
camera_network = str(camera['network_id'])
camera_name = camera['name']
camera_id = camera['id']
camera_info = {'name': camera_name, 'id': camera_id}
if camera_network not in all_cameras:
all_cameras[camera_network] = []
all_cameras[camera_network].append(camera_info)
return all_cameras
except KeyError:
_LOGGER.error("Initialization failue. Could not retrieve cameras.")
return {} | [
"def",
"get_cameras",
"(",
"self",
")",
":",
"response",
"=",
"api",
".",
"request_homescreen",
"(",
"self",
")",
"try",
":",
"all_cameras",
"=",
"{",
"}",
"for",
"camera",
"in",
"response",
"[",
"'cameras'",
"]",
":",
"camera_network",
"=",
"str",
"(",
"camera",
"[",
"'network_id'",
"]",
")",
"camera_name",
"=",
"camera",
"[",
"'name'",
"]",
"camera_id",
"=",
"camera",
"[",
"'id'",
"]",
"camera_info",
"=",
"{",
"'name'",
":",
"camera_name",
",",
"'id'",
":",
"camera_id",
"}",
"if",
"camera_network",
"not",
"in",
"all_cameras",
":",
"all_cameras",
"[",
"camera_network",
"]",
"=",
"[",
"]",
"all_cameras",
"[",
"camera_network",
"]",
".",
"append",
"(",
"camera_info",
")",
"return",
"all_cameras",
"except",
"KeyError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Initialization failue. Could not retrieve cameras.\"",
")",
"return",
"{",
"}"
] | Retrieve a camera list for each onboarded network. | [
"Retrieve",
"a",
"camera",
"list",
"for",
"each",
"onboarded",
"network",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L197-L214 |
5,265 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.refresh | def refresh(self, force_cache=False):
"""
Perform a system refresh.
:param force_cache: Force an update of the camera cache
"""
if self.check_if_ok_to_update() or force_cache:
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=force_cache)
if not force_cache:
# Prevents rapid clearing of motion detect property
self.last_refresh = int(time.time())
return True
return False | python | def refresh(self, force_cache=False):
"""
Perform a system refresh.
:param force_cache: Force an update of the camera cache
"""
if self.check_if_ok_to_update() or force_cache:
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=force_cache)
if not force_cache:
# Prevents rapid clearing of motion detect property
self.last_refresh = int(time.time())
return True
return False | [
"def",
"refresh",
"(",
"self",
",",
"force_cache",
"=",
"False",
")",
":",
"if",
"self",
".",
"check_if_ok_to_update",
"(",
")",
"or",
"force_cache",
":",
"for",
"sync_name",
",",
"sync_module",
"in",
"self",
".",
"sync",
".",
"items",
"(",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Attempting refresh of sync %s\"",
",",
"sync_name",
")",
"sync_module",
".",
"refresh",
"(",
"force_cache",
"=",
"force_cache",
")",
"if",
"not",
"force_cache",
":",
"# Prevents rapid clearing of motion detect property",
"self",
".",
"last_refresh",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"return",
"True",
"return",
"False"
] | Perform a system refresh.
:param force_cache: Force an update of the camera cache | [
"Perform",
"a",
"system",
"refresh",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L217-L231 |
5,266 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.check_if_ok_to_update | def check_if_ok_to_update(self):
"""Check if it is ok to perform an http request."""
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False | python | def check_if_ok_to_update(self):
"""Check if it is ok to perform an http request."""
current_time = int(time.time())
last_refresh = self.last_refresh
if last_refresh is None:
last_refresh = 0
if current_time >= (last_refresh + self.refresh_rate):
return True
return False | [
"def",
"check_if_ok_to_update",
"(",
"self",
")",
":",
"current_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"last_refresh",
"=",
"self",
".",
"last_refresh",
"if",
"last_refresh",
"is",
"None",
":",
"last_refresh",
"=",
"0",
"if",
"current_time",
">=",
"(",
"last_refresh",
"+",
"self",
".",
"refresh_rate",
")",
":",
"return",
"True",
"return",
"False"
] | Check if it is ok to perform an http request. | [
"Check",
"if",
"it",
"is",
"ok",
"to",
"perform",
"an",
"http",
"request",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L233-L241 |
5,267 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.merge_cameras | def merge_cameras(self):
"""Merge all sync camera dicts into one."""
combined = CaseInsensitiveDict({})
for sync in self.sync:
combined = merge_dicts(combined, self.sync[sync].cameras)
return combined | python | def merge_cameras(self):
"""Merge all sync camera dicts into one."""
combined = CaseInsensitiveDict({})
for sync in self.sync:
combined = merge_dicts(combined, self.sync[sync].cameras)
return combined | [
"def",
"merge_cameras",
"(",
"self",
")",
":",
"combined",
"=",
"CaseInsensitiveDict",
"(",
"{",
"}",
")",
"for",
"sync",
"in",
"self",
".",
"sync",
":",
"combined",
"=",
"merge_dicts",
"(",
"combined",
",",
"self",
".",
"sync",
"[",
"sync",
"]",
".",
"cameras",
")",
"return",
"combined"
] | Merge all sync camera dicts into one. | [
"Merge",
"all",
"sync",
"camera",
"dicts",
"into",
"one",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L243-L248 |
5,268 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink.download_videos | def download_videos(self, path, since=None, camera='all', stop=10):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response['videos']
if not result:
raise IndexError
except (KeyError, IndexError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path) | python | def download_videos(self, path, since=None, camera='all', stop=10):
"""
Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10).
"""
if since is None:
since_epochs = self.last_refresh
else:
parsed_datetime = parse(since, fuzzy=True)
since_epochs = parsed_datetime.timestamp()
formatted_date = get_time(time_to_convert=since_epochs)
_LOGGER.info("Retrieving videos since %s", formatted_date)
if not isinstance(camera, list):
camera = [camera]
for page in range(1, stop):
response = api.request_videos(self, time=since_epochs, page=page)
_LOGGER.debug("Processing page %s", page)
try:
result = response['videos']
if not result:
raise IndexError
except (KeyError, IndexError):
_LOGGER.info("No videos found on page %s. Exiting.", page)
break
self._parse_downloaded_items(result, camera, path) | [
"def",
"download_videos",
"(",
"self",
",",
"path",
",",
"since",
"=",
"None",
",",
"camera",
"=",
"'all'",
",",
"stop",
"=",
"10",
")",
":",
"if",
"since",
"is",
"None",
":",
"since_epochs",
"=",
"self",
".",
"last_refresh",
"else",
":",
"parsed_datetime",
"=",
"parse",
"(",
"since",
",",
"fuzzy",
"=",
"True",
")",
"since_epochs",
"=",
"parsed_datetime",
".",
"timestamp",
"(",
")",
"formatted_date",
"=",
"get_time",
"(",
"time_to_convert",
"=",
"since_epochs",
")",
"_LOGGER",
".",
"info",
"(",
"\"Retrieving videos since %s\"",
",",
"formatted_date",
")",
"if",
"not",
"isinstance",
"(",
"camera",
",",
"list",
")",
":",
"camera",
"=",
"[",
"camera",
"]",
"for",
"page",
"in",
"range",
"(",
"1",
",",
"stop",
")",
":",
"response",
"=",
"api",
".",
"request_videos",
"(",
"self",
",",
"time",
"=",
"since_epochs",
",",
"page",
"=",
"page",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Processing page %s\"",
",",
"page",
")",
"try",
":",
"result",
"=",
"response",
"[",
"'videos'",
"]",
"if",
"not",
"result",
":",
"raise",
"IndexError",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"No videos found on page %s. Exiting.\"",
",",
"page",
")",
"break",
"self",
".",
"_parse_downloaded_items",
"(",
"result",
",",
"camera",
",",
"path",
")"
] | Download all videos from server since specified time.
:param path: Path to write files. /path/<cameraname>_<recorddate>.mp4
:param since: Date and time to get videos from.
Ex: "2018/07/28 12:33:00" to retrieve videos since
July 28th 2018 at 12:33:00
:param camera: Camera name to retrieve. Defaults to "all".
Use a list for multiple cameras.
:param stop: Page to stop on (~25 items per page. Default page 10). | [
"Download",
"all",
"videos",
"from",
"server",
"since",
"specified",
"time",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L250-L285 |
5,269 | fronzbot/blinkpy | blinkpy/blinkpy.py | Blink._parse_downloaded_items | def _parse_downloaded_items(self, result, camera, path):
"""Parse downloaded videos."""
for item in result:
try:
created_at = item['created_at']
camera_name = item['camera_name']
is_deleted = item['deleted']
address = item['address']
except KeyError:
_LOGGER.info("Missing clip information, skipping...")
continue
if camera_name not in camera and 'all' not in camera:
_LOGGER.debug("Skipping videos for %s.", camera_name)
continue
if is_deleted:
_LOGGER.debug("%s: %s is marked as deleted.",
camera_name,
address)
continue
clip_address = "{}{}".format(self.urls.base_url, address)
filename = "{}_{}.mp4".format(camera_name, created_at)
filename = os.path.join(path, filename)
if os.path.isfile(filename):
_LOGGER.info("%s already exists, skipping...", filename)
continue
response = api.http_get(self, url=clip_address,
stream=True, json=False)
with open(filename, 'wb') as vidfile:
copyfileobj(response.raw, vidfile)
_LOGGER.info("Downloaded video to %s", filename) | python | def _parse_downloaded_items(self, result, camera, path):
"""Parse downloaded videos."""
for item in result:
try:
created_at = item['created_at']
camera_name = item['camera_name']
is_deleted = item['deleted']
address = item['address']
except KeyError:
_LOGGER.info("Missing clip information, skipping...")
continue
if camera_name not in camera and 'all' not in camera:
_LOGGER.debug("Skipping videos for %s.", camera_name)
continue
if is_deleted:
_LOGGER.debug("%s: %s is marked as deleted.",
camera_name,
address)
continue
clip_address = "{}{}".format(self.urls.base_url, address)
filename = "{}_{}.mp4".format(camera_name, created_at)
filename = os.path.join(path, filename)
if os.path.isfile(filename):
_LOGGER.info("%s already exists, skipping...", filename)
continue
response = api.http_get(self, url=clip_address,
stream=True, json=False)
with open(filename, 'wb') as vidfile:
copyfileobj(response.raw, vidfile)
_LOGGER.info("Downloaded video to %s", filename) | [
"def",
"_parse_downloaded_items",
"(",
"self",
",",
"result",
",",
"camera",
",",
"path",
")",
":",
"for",
"item",
"in",
"result",
":",
"try",
":",
"created_at",
"=",
"item",
"[",
"'created_at'",
"]",
"camera_name",
"=",
"item",
"[",
"'camera_name'",
"]",
"is_deleted",
"=",
"item",
"[",
"'deleted'",
"]",
"address",
"=",
"item",
"[",
"'address'",
"]",
"except",
"KeyError",
":",
"_LOGGER",
".",
"info",
"(",
"\"Missing clip information, skipping...\"",
")",
"continue",
"if",
"camera_name",
"not",
"in",
"camera",
"and",
"'all'",
"not",
"in",
"camera",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Skipping videos for %s.\"",
",",
"camera_name",
")",
"continue",
"if",
"is_deleted",
":",
"_LOGGER",
".",
"debug",
"(",
"\"%s: %s is marked as deleted.\"",
",",
"camera_name",
",",
"address",
")",
"continue",
"clip_address",
"=",
"\"{}{}\"",
".",
"format",
"(",
"self",
".",
"urls",
".",
"base_url",
",",
"address",
")",
"filename",
"=",
"\"{}_{}.mp4\"",
".",
"format",
"(",
"camera_name",
",",
"created_at",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"%s already exists, skipping...\"",
",",
"filename",
")",
"continue",
"response",
"=",
"api",
".",
"http_get",
"(",
"self",
",",
"url",
"=",
"clip_address",
",",
"stream",
"=",
"True",
",",
"json",
"=",
"False",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"vidfile",
":",
"copyfileobj",
"(",
"response",
".",
"raw",
",",
"vidfile",
")",
"_LOGGER",
".",
"info",
"(",
"\"Downloaded video to %s\"",
",",
"filename",
")"
] | Parse downloaded videos. | [
"Parse",
"downloaded",
"videos",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/blinkpy.py#L287-L322 |
5,270 | fronzbot/blinkpy | blinkpy/api.py | request_login | def request_login(blink, url, username, password, is_retry=False):
"""
Login request.
:param blink: Blink instance.
:param url: Login url.
:param username: Blink username.
:param password: Blink password.
:param is_retry: Is this part of a re-authorization attempt?
"""
headers = {
'Host': DEFAULT_URL,
'Content-Type': 'application/json'
}
data = dumps({
'email': username,
'password': password,
'client_specifier': 'iPhone 9.2 | 2.2 | 222'
})
return http_req(blink, url=url, headers=headers, data=data,
json_resp=False, reqtype='post', is_retry=is_retry) | python | def request_login(blink, url, username, password, is_retry=False):
"""
Login request.
:param blink: Blink instance.
:param url: Login url.
:param username: Blink username.
:param password: Blink password.
:param is_retry: Is this part of a re-authorization attempt?
"""
headers = {
'Host': DEFAULT_URL,
'Content-Type': 'application/json'
}
data = dumps({
'email': username,
'password': password,
'client_specifier': 'iPhone 9.2 | 2.2 | 222'
})
return http_req(blink, url=url, headers=headers, data=data,
json_resp=False, reqtype='post', is_retry=is_retry) | [
"def",
"request_login",
"(",
"blink",
",",
"url",
",",
"username",
",",
"password",
",",
"is_retry",
"=",
"False",
")",
":",
"headers",
"=",
"{",
"'Host'",
":",
"DEFAULT_URL",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"data",
"=",
"dumps",
"(",
"{",
"'email'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'client_specifier'",
":",
"'iPhone 9.2 | 2.2 | 222'",
"}",
")",
"return",
"http_req",
"(",
"blink",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
",",
"json_resp",
"=",
"False",
",",
"reqtype",
"=",
"'post'",
",",
"is_retry",
"=",
"is_retry",
")"
] | Login request.
:param blink: Blink instance.
:param url: Login url.
:param username: Blink username.
:param password: Blink password.
:param is_retry: Is this part of a re-authorization attempt? | [
"Login",
"request",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L14-L34 |
5,271 | fronzbot/blinkpy | blinkpy/api.py | request_networks | def request_networks(blink):
"""Request all networks information."""
url = "{}/networks".format(blink.urls.base_url)
return http_get(blink, url) | python | def request_networks(blink):
"""Request all networks information."""
url = "{}/networks".format(blink.urls.base_url)
return http_get(blink, url) | [
"def",
"request_networks",
"(",
"blink",
")",
":",
"url",
"=",
"\"{}/networks\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request all networks information. | [
"Request",
"all",
"networks",
"information",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L37-L40 |
5,272 | fronzbot/blinkpy | blinkpy/api.py | request_network_status | def request_network_status(blink, network):
"""
Request network information.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}".format(blink.urls.base_url, network)
return http_get(blink, url) | python | def request_network_status(blink, network):
"""
Request network information.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}".format(blink.urls.base_url, network)
return http_get(blink, url) | [
"def",
"request_network_status",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request network information.
:param blink: Blink instance.
:param network: Sync module network id. | [
"Request",
"network",
"information",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L44-L52 |
5,273 | fronzbot/blinkpy | blinkpy/api.py | request_syncmodule | def request_syncmodule(blink, network):
"""
Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/syncmodules".format(blink.urls.base_url, network)
return http_get(blink, url) | python | def request_syncmodule(blink, network):
"""
Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/syncmodules".format(blink.urls.base_url, network)
return http_get(blink, url) | [
"def",
"request_syncmodule",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/syncmodules\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request sync module info.
:param blink: Blink instance.
:param network: Sync module network id. | [
"Request",
"sync",
"module",
"info",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L56-L64 |
5,274 | fronzbot/blinkpy | blinkpy/api.py | request_system_arm | def request_system_arm(blink, network):
"""
Arm system.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/arm".format(blink.urls.base_url, network)
return http_post(blink, url) | python | def request_system_arm(blink, network):
"""
Arm system.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/arm".format(blink.urls.base_url, network)
return http_post(blink, url) | [
"def",
"request_system_arm",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/arm\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_post",
"(",
"blink",
",",
"url",
")"
] | Arm system.
:param blink: Blink instance.
:param network: Sync module network id. | [
"Arm",
"system",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L68-L76 |
5,275 | fronzbot/blinkpy | blinkpy/api.py | request_system_disarm | def request_system_disarm(blink, network):
"""
Disarm system.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/disarm".format(blink.urls.base_url, network)
return http_post(blink, url) | python | def request_system_disarm(blink, network):
"""
Disarm system.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/disarm".format(blink.urls.base_url, network)
return http_post(blink, url) | [
"def",
"request_system_disarm",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/disarm\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_post",
"(",
"blink",
",",
"url",
")"
] | Disarm system.
:param blink: Blink instance.
:param network: Sync module network id. | [
"Disarm",
"system",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L80-L88 |
5,276 | fronzbot/blinkpy | blinkpy/api.py | request_command_status | def request_command_status(blink, network, command_id):
"""
Request command status.
:param blink: Blink instance.
:param network: Sync module network id.
:param command_id: Command id to check.
"""
url = "{}/network/{}/command/{}".format(blink.urls.base_url,
network,
command_id)
return http_get(blink, url) | python | def request_command_status(blink, network, command_id):
"""
Request command status.
:param blink: Blink instance.
:param network: Sync module network id.
:param command_id: Command id to check.
"""
url = "{}/network/{}/command/{}".format(blink.urls.base_url,
network,
command_id)
return http_get(blink, url) | [
"def",
"request_command_status",
"(",
"blink",
",",
"network",
",",
"command_id",
")",
":",
"url",
"=",
"\"{}/network/{}/command/{}\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
",",
"command_id",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request command status.
:param blink: Blink instance.
:param network: Sync module network id.
:param command_id: Command id to check. | [
"Request",
"command",
"status",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L91-L102 |
5,277 | fronzbot/blinkpy | blinkpy/api.py | request_homescreen | def request_homescreen(blink):
"""Request homescreen info."""
url = "{}/api/v3/accounts/{}/homescreen".format(blink.urls.base_url,
blink.account_id)
return http_get(blink, url) | python | def request_homescreen(blink):
"""Request homescreen info."""
url = "{}/api/v3/accounts/{}/homescreen".format(blink.urls.base_url,
blink.account_id)
return http_get(blink, url) | [
"def",
"request_homescreen",
"(",
"blink",
")",
":",
"url",
"=",
"\"{}/api/v3/accounts/{}/homescreen\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"blink",
".",
"account_id",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request homescreen info. | [
"Request",
"homescreen",
"info",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L106-L110 |
5,278 | fronzbot/blinkpy | blinkpy/api.py | request_sync_events | def request_sync_events(blink, network):
"""
Request events from sync module.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/events/network/{}".format(blink.urls.base_url, network)
return http_get(blink, url) | python | def request_sync_events(blink, network):
"""
Request events from sync module.
:param blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/events/network/{}".format(blink.urls.base_url, network)
return http_get(blink, url) | [
"def",
"request_sync_events",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/events/network/{}\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request events from sync module.
:param blink: Blink instance.
:param network: Sync module network id. | [
"Request",
"events",
"from",
"sync",
"module",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L114-L122 |
5,279 | fronzbot/blinkpy | blinkpy/api.py | request_video_count | def request_video_count(blink):
"""Request total video count."""
url = "{}/api/v2/videos/count".format(blink.urls.base_url)
return http_get(blink, url) | python | def request_video_count(blink):
"""Request total video count."""
url = "{}/api/v2/videos/count".format(blink.urls.base_url)
return http_get(blink, url) | [
"def",
"request_video_count",
"(",
"blink",
")",
":",
"url",
"=",
"\"{}/api/v2/videos/count\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request total video count. | [
"Request",
"total",
"video",
"count",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L156-L159 |
5,280 | fronzbot/blinkpy | blinkpy/api.py | request_videos | def request_videos(blink, time=None, page=0):
"""
Perform a request for videos.
:param blink: Blink instance.
:param time: Get videos since this time. In epoch seconds.
:param page: Page number to get videos from.
"""
timestamp = get_time(time)
url = "{}/api/v2/videos/changed?since={}&page={}".format(
blink.urls.base_url, timestamp, page)
return http_get(blink, url) | python | def request_videos(blink, time=None, page=0):
"""
Perform a request for videos.
:param blink: Blink instance.
:param time: Get videos since this time. In epoch seconds.
:param page: Page number to get videos from.
"""
timestamp = get_time(time)
url = "{}/api/v2/videos/changed?since={}&page={}".format(
blink.urls.base_url, timestamp, page)
return http_get(blink, url) | [
"def",
"request_videos",
"(",
"blink",
",",
"time",
"=",
"None",
",",
"page",
"=",
"0",
")",
":",
"timestamp",
"=",
"get_time",
"(",
"time",
")",
"url",
"=",
"\"{}/api/v2/videos/changed?since={}&page={}\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"timestamp",
",",
"page",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Perform a request for videos.
:param blink: Blink instance.
:param time: Get videos since this time. In epoch seconds.
:param page: Page number to get videos from. | [
"Perform",
"a",
"request",
"for",
"videos",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L162-L173 |
5,281 | fronzbot/blinkpy | blinkpy/api.py | request_cameras | def request_cameras(blink, network):
"""
Request all camera information.
:param Blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/cameras".format(blink.urls.base_url, network)
return http_get(blink, url) | python | def request_cameras(blink, network):
"""
Request all camera information.
:param Blink: Blink instance.
:param network: Sync module network id.
"""
url = "{}/network/{}/cameras".format(blink.urls.base_url, network)
return http_get(blink, url) | [
"def",
"request_cameras",
"(",
"blink",
",",
"network",
")",
":",
"url",
"=",
"\"{}/network/{}/cameras\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request all camera information.
:param Blink: Blink instance.
:param network: Sync module network id. | [
"Request",
"all",
"camera",
"information",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L177-L185 |
5,282 | fronzbot/blinkpy | blinkpy/api.py | request_camera_sensors | def request_camera_sensors(blink, network, camera_id):
"""
Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from.
"""
url = "{}/network/{}/camera/{}/signals".format(blink.urls.base_url,
network,
camera_id)
return http_get(blink, url) | python | def request_camera_sensors(blink, network, camera_id):
"""
Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from.
"""
url = "{}/network/{}/camera/{}/signals".format(blink.urls.base_url,
network,
camera_id)
return http_get(blink, url) | [
"def",
"request_camera_sensors",
"(",
"blink",
",",
"network",
",",
"camera_id",
")",
":",
"url",
"=",
"\"{}/network/{}/camera/{}/signals\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
",",
"camera_id",
")",
"return",
"http_get",
"(",
"blink",
",",
"url",
")"
] | Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from. | [
"Request",
"camera",
"sensor",
"info",
"for",
"one",
"camera",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L202-L213 |
5,283 | fronzbot/blinkpy | blinkpy/api.py | request_motion_detection_enable | def request_motion_detection_enable(blink, network, camera_id):
"""
Enable motion detection for a camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to enable.
"""
url = "{}/network/{}/camera/{}/enable".format(blink.urls.base_url,
network,
camera_id)
return http_post(blink, url) | python | def request_motion_detection_enable(blink, network, camera_id):
"""
Enable motion detection for a camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to enable.
"""
url = "{}/network/{}/camera/{}/enable".format(blink.urls.base_url,
network,
camera_id)
return http_post(blink, url) | [
"def",
"request_motion_detection_enable",
"(",
"blink",
",",
"network",
",",
"camera_id",
")",
":",
"url",
"=",
"\"{}/network/{}/camera/{}/enable\"",
".",
"format",
"(",
"blink",
".",
"urls",
".",
"base_url",
",",
"network",
",",
"camera_id",
")",
"return",
"http_post",
"(",
"blink",
",",
"url",
")"
] | Enable motion detection for a camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to enable. | [
"Enable",
"motion",
"detection",
"for",
"a",
"camera",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L217-L228 |
5,284 | fronzbot/blinkpy | blinkpy/api.py | http_get | def http_get(blink, url, stream=False, json=True, is_retry=False):
"""
Perform an http get request.
:param url: URL to perform get request.
:param stream: Stream response? True/FALSE
:param json: Return json response? TRUE/False
:param is_retry: Is this part of a re-auth attempt?
"""
if blink.auth_header is None:
raise BlinkException(ERROR.AUTH_TOKEN)
_LOGGER.debug("Making GET request to %s", url)
return http_req(blink, url=url, headers=blink.auth_header,
reqtype='get', stream=stream, json_resp=json,
is_retry=is_retry) | python | def http_get(blink, url, stream=False, json=True, is_retry=False):
"""
Perform an http get request.
:param url: URL to perform get request.
:param stream: Stream response? True/FALSE
:param json: Return json response? TRUE/False
:param is_retry: Is this part of a re-auth attempt?
"""
if blink.auth_header is None:
raise BlinkException(ERROR.AUTH_TOKEN)
_LOGGER.debug("Making GET request to %s", url)
return http_req(blink, url=url, headers=blink.auth_header,
reqtype='get', stream=stream, json_resp=json,
is_retry=is_retry) | [
"def",
"http_get",
"(",
"blink",
",",
"url",
",",
"stream",
"=",
"False",
",",
"json",
"=",
"True",
",",
"is_retry",
"=",
"False",
")",
":",
"if",
"blink",
".",
"auth_header",
"is",
"None",
":",
"raise",
"BlinkException",
"(",
"ERROR",
".",
"AUTH_TOKEN",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Making GET request to %s\"",
",",
"url",
")",
"return",
"http_req",
"(",
"blink",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"blink",
".",
"auth_header",
",",
"reqtype",
"=",
"'get'",
",",
"stream",
"=",
"stream",
",",
"json_resp",
"=",
"json",
",",
"is_retry",
"=",
"is_retry",
")"
] | Perform an http get request.
:param url: URL to perform get request.
:param stream: Stream response? True/FALSE
:param json: Return json response? TRUE/False
:param is_retry: Is this part of a re-auth attempt? | [
"Perform",
"an",
"http",
"get",
"request",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L245-L259 |
5,285 | fronzbot/blinkpy | blinkpy/api.py | http_post | def http_post(blink, url, is_retry=False):
"""
Perform an http post request.
:param url: URL to perfom post request.
:param is_retry: Is this part of a re-auth attempt?
"""
if blink.auth_header is None:
raise BlinkException(ERROR.AUTH_TOKEN)
_LOGGER.debug("Making POST request to %s", url)
return http_req(blink, url=url, headers=blink.auth_header,
reqtype='post', is_retry=is_retry) | python | def http_post(blink, url, is_retry=False):
"""
Perform an http post request.
:param url: URL to perfom post request.
:param is_retry: Is this part of a re-auth attempt?
"""
if blink.auth_header is None:
raise BlinkException(ERROR.AUTH_TOKEN)
_LOGGER.debug("Making POST request to %s", url)
return http_req(blink, url=url, headers=blink.auth_header,
reqtype='post', is_retry=is_retry) | [
"def",
"http_post",
"(",
"blink",
",",
"url",
",",
"is_retry",
"=",
"False",
")",
":",
"if",
"blink",
".",
"auth_header",
"is",
"None",
":",
"raise",
"BlinkException",
"(",
"ERROR",
".",
"AUTH_TOKEN",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Making POST request to %s\"",
",",
"url",
")",
"return",
"http_req",
"(",
"blink",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"blink",
".",
"auth_header",
",",
"reqtype",
"=",
"'post'",
",",
"is_retry",
"=",
"is_retry",
")"
] | Perform an http post request.
:param url: URL to perfom post request.
:param is_retry: Is this part of a re-auth attempt? | [
"Perform",
"an",
"http",
"post",
"request",
"."
] | bfdc1e47bdd84903f1aca653605846f3c99bcfac | https://github.com/fronzbot/blinkpy/blob/bfdc1e47bdd84903f1aca653605846f3c99bcfac/blinkpy/api.py#L262-L273 |
5,286 | loli/medpy | medpy/filter/image.py | sls | def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True,
sn_size = None, sn_footprint = None, sn_mode = "reflect", sn_cval = 0.0,
pn_size = None, pn_footprint = None, pn_mode = "reflect", pn_cval = 0.0):
r"""
Computes the signed local similarity between two images.
Compares a patch around each voxel of the minuend array to a number of patches
centered at the points of a search neighbourhood in the subtrahend. Thus, creates
a multi-dimensional measure of patch similarity between the minuend and a
corresponding search area in the subtrahend.
This filter can also be used to compute local self-similarity, obtaining a
descriptor similar to the one described in [1]_.
Parameters
----------
minuend : array_like
Input array from which to subtract the subtrahend.
subtrahend : array_like
Input array to subtract from the minuend.
metric : {'ssd', 'mi', 'nmi', 'ncc'}, optional
The `metric` parameter determines the metric used to compute the
filter output. Default is 'ssd'.
noise : {'global', 'local'}, optional
The `noise` parameter determines how the noise is handled. If set
to 'global', the variance determining the noise is a scalar, if
set to 'local', it is a Gaussian smoothed field of estimated local
noise. Default is 'global'.
signed : bool, optional
Whether the filter output should be signed or not. If set to 'False',
only the absolute values will be returned. Default is 'True'.
sn_size : scalar or tuple, optional
See sn_footprint, below
sn_footprint : array, optional
The search neighbourhood.
Either `sn_size` or `sn_footprint` must be defined. `sn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`sn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``sn_size=(n,m)`` is equivalent
to ``sn_footprint=np.ones((n,m))``. We adjust `sn_size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `sn_size` is 2, then the actual size used is
(2,2,2).
sn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `sn_mode` parameter determines how the array borders are
handled, where `sn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
sn_cval : scalar, optional
Value to fill past edges of input if `sn_mode` is 'constant'. Default
is 0.0
pn_size : scalar or tuple, optional
See pn_footprint, below
pn_footprint : array, optional
The patch over which the distance measure is applied.
Either `pn_size` or `pn_footprint` must be defined. `pn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`pn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``pn_size=(n,m)`` is equivalent
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `pn_size` is 2, then the actual size used is
(2,2,2).
pn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `pn_mode` parameter determines how the array borders are
handled, where `pn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
pn_cval : scalar, optional
Value to fill past edges of input if `pn_mode` is 'constant'. Default
is 0.0
Returns
-------
sls : ndarray
The signed local similarity image between subtrahend and minuend.
References
----------
.. [1] Mattias P. Heinrich, Mark Jenkinson, Manav Bhushan, Tahreema Matin, Fergus V. Gleeson, Sir Michael Brady, Julia A. Schnabel
MIND: Modality independent neighbourhood descriptor for multi-modal deformable registration
Medical Image Analysis, Volume 16, Issue 7, October 2012, Pages 1423-1435, ISSN 1361-8415
http://dx.doi.org/10.1016/j.media.2012.05.008
"""
minuend = numpy.asarray(minuend)
subtrahend = numpy.asarray(subtrahend)
if numpy.iscomplexobj(minuend):
raise TypeError('complex type not supported')
if numpy.iscomplexobj(subtrahend):
raise TypeError('complex type not supported')
mshape = [ii for ii in minuend.shape if ii > 0]
sshape = [ii for ii in subtrahend.shape if ii > 0]
if not len(mshape) == len(sshape):
raise RuntimeError("minuend and subtrahend must be of same shape")
if not numpy.all([sm == ss for sm, ss in zip(mshape, sshape)]):
raise RuntimeError("minuend and subtrahend must be of same shape")
sn_footprint = __make_footprint(minuend, sn_size, sn_footprint)
sn_fshape = [ii for ii in sn_footprint.shape if ii > 0]
if len(sn_fshape) != minuend.ndim:
raise RuntimeError('search neighbourhood footprint array has incorrect shape.')
#!TODO: Is this required?
if not sn_footprint.flags.contiguous:
sn_footprint = sn_footprint.copy()
# created a padded copy of the subtrahend, whereas the padding mode is always 'reflect'
subtrahend = pad(subtrahend, footprint=sn_footprint, mode=sn_mode, cval=sn_cval)
# compute slicers for position where the search neighbourhood sn_footprint is TRUE
slicers = [[slice(x, (x + 1) - d if 0 != (x + 1) - d else None) for x in range(d)] for d in sn_fshape]
slicers = [sl for sl, tv in zip(itertools.product(*slicers), sn_footprint.flat) if tv]
# compute difference images and sign images for search neighbourhood elements
ssds = [ssd(minuend, subtrahend[slicer], normalized=True, signed=signed, size=pn_size, footprint=pn_footprint, mode=pn_mode, cval=pn_cval) for slicer in slicers]
distance = [x[0] for x in ssds]
distance_sign = [x[1] for x in ssds]
# compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure
variance = numpy.average(distance, 0)
variance = gaussian_filter(variance, sigma=3) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes
if 'global' == noise:
variance = variance.sum() / float(numpy.product(variance.shape))
# variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one?
# compute sls
sls = [dist_sign * numpy.exp(-1 * (dist / variance)) for dist_sign, dist in zip(distance_sign, distance)]
# convert into sls image, swapping dimensions to have varying patches in the last dimension
return numpy.rollaxis(numpy.asarray(sls), 0, minuend.ndim + 1) | python | def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True,
sn_size = None, sn_footprint = None, sn_mode = "reflect", sn_cval = 0.0,
pn_size = None, pn_footprint = None, pn_mode = "reflect", pn_cval = 0.0):
r"""
Computes the signed local similarity between two images.
Compares a patch around each voxel of the minuend array to a number of patches
centered at the points of a search neighbourhood in the subtrahend. Thus, creates
a multi-dimensional measure of patch similarity between the minuend and a
corresponding search area in the subtrahend.
This filter can also be used to compute local self-similarity, obtaining a
descriptor similar to the one described in [1]_.
Parameters
----------
minuend : array_like
Input array from which to subtract the subtrahend.
subtrahend : array_like
Input array to subtract from the minuend.
metric : {'ssd', 'mi', 'nmi', 'ncc'}, optional
The `metric` parameter determines the metric used to compute the
filter output. Default is 'ssd'.
noise : {'global', 'local'}, optional
The `noise` parameter determines how the noise is handled. If set
to 'global', the variance determining the noise is a scalar, if
set to 'local', it is a Gaussian smoothed field of estimated local
noise. Default is 'global'.
signed : bool, optional
Whether the filter output should be signed or not. If set to 'False',
only the absolute values will be returned. Default is 'True'.
sn_size : scalar or tuple, optional
See sn_footprint, below
sn_footprint : array, optional
The search neighbourhood.
Either `sn_size` or `sn_footprint` must be defined. `sn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`sn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``sn_size=(n,m)`` is equivalent
to ``sn_footprint=np.ones((n,m))``. We adjust `sn_size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `sn_size` is 2, then the actual size used is
(2,2,2).
sn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `sn_mode` parameter determines how the array borders are
handled, where `sn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
sn_cval : scalar, optional
Value to fill past edges of input if `sn_mode` is 'constant'. Default
is 0.0
pn_size : scalar or tuple, optional
See pn_footprint, below
pn_footprint : array, optional
The patch over which the distance measure is applied.
Either `pn_size` or `pn_footprint` must be defined. `pn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`pn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``pn_size=(n,m)`` is equivalent
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `pn_size` is 2, then the actual size used is
(2,2,2).
pn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `pn_mode` parameter determines how the array borders are
handled, where `pn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
pn_cval : scalar, optional
Value to fill past edges of input if `pn_mode` is 'constant'. Default
is 0.0
Returns
-------
sls : ndarray
The signed local similarity image between subtrahend and minuend.
References
----------
.. [1] Mattias P. Heinrich, Mark Jenkinson, Manav Bhushan, Tahreema Matin, Fergus V. Gleeson, Sir Michael Brady, Julia A. Schnabel
MIND: Modality independent neighbourhood descriptor for multi-modal deformable registration
Medical Image Analysis, Volume 16, Issue 7, October 2012, Pages 1423-1435, ISSN 1361-8415
http://dx.doi.org/10.1016/j.media.2012.05.008
"""
minuend = numpy.asarray(minuend)
subtrahend = numpy.asarray(subtrahend)
if numpy.iscomplexobj(minuend):
raise TypeError('complex type not supported')
if numpy.iscomplexobj(subtrahend):
raise TypeError('complex type not supported')
mshape = [ii for ii in minuend.shape if ii > 0]
sshape = [ii for ii in subtrahend.shape if ii > 0]
if not len(mshape) == len(sshape):
raise RuntimeError("minuend and subtrahend must be of same shape")
if not numpy.all([sm == ss for sm, ss in zip(mshape, sshape)]):
raise RuntimeError("minuend and subtrahend must be of same shape")
sn_footprint = __make_footprint(minuend, sn_size, sn_footprint)
sn_fshape = [ii for ii in sn_footprint.shape if ii > 0]
if len(sn_fshape) != minuend.ndim:
raise RuntimeError('search neighbourhood footprint array has incorrect shape.')
#!TODO: Is this required?
if not sn_footprint.flags.contiguous:
sn_footprint = sn_footprint.copy()
# created a padded copy of the subtrahend, whereas the padding mode is always 'reflect'
subtrahend = pad(subtrahend, footprint=sn_footprint, mode=sn_mode, cval=sn_cval)
# compute slicers for position where the search neighbourhood sn_footprint is TRUE
slicers = [[slice(x, (x + 1) - d if 0 != (x + 1) - d else None) for x in range(d)] for d in sn_fshape]
slicers = [sl for sl, tv in zip(itertools.product(*slicers), sn_footprint.flat) if tv]
# compute difference images and sign images for search neighbourhood elements
ssds = [ssd(minuend, subtrahend[slicer], normalized=True, signed=signed, size=pn_size, footprint=pn_footprint, mode=pn_mode, cval=pn_cval) for slicer in slicers]
distance = [x[0] for x in ssds]
distance_sign = [x[1] for x in ssds]
# compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure
variance = numpy.average(distance, 0)
variance = gaussian_filter(variance, sigma=3) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes
if 'global' == noise:
variance = variance.sum() / float(numpy.product(variance.shape))
# variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one?
# compute sls
sls = [dist_sign * numpy.exp(-1 * (dist / variance)) for dist_sign, dist in zip(distance_sign, distance)]
# convert into sls image, swapping dimensions to have varying patches in the last dimension
return numpy.rollaxis(numpy.asarray(sls), 0, minuend.ndim + 1) | [
"def",
"sls",
"(",
"minuend",
",",
"subtrahend",
",",
"metric",
"=",
"\"ssd\"",
",",
"noise",
"=",
"\"global\"",
",",
"signed",
"=",
"True",
",",
"sn_size",
"=",
"None",
",",
"sn_footprint",
"=",
"None",
",",
"sn_mode",
"=",
"\"reflect\"",
",",
"sn_cval",
"=",
"0.0",
",",
"pn_size",
"=",
"None",
",",
"pn_footprint",
"=",
"None",
",",
"pn_mode",
"=",
"\"reflect\"",
",",
"pn_cval",
"=",
"0.0",
")",
":",
"minuend",
"=",
"numpy",
".",
"asarray",
"(",
"minuend",
")",
"subtrahend",
"=",
"numpy",
".",
"asarray",
"(",
"subtrahend",
")",
"if",
"numpy",
".",
"iscomplexobj",
"(",
"minuend",
")",
":",
"raise",
"TypeError",
"(",
"'complex type not supported'",
")",
"if",
"numpy",
".",
"iscomplexobj",
"(",
"subtrahend",
")",
":",
"raise",
"TypeError",
"(",
"'complex type not supported'",
")",
"mshape",
"=",
"[",
"ii",
"for",
"ii",
"in",
"minuend",
".",
"shape",
"if",
"ii",
">",
"0",
"]",
"sshape",
"=",
"[",
"ii",
"for",
"ii",
"in",
"subtrahend",
".",
"shape",
"if",
"ii",
">",
"0",
"]",
"if",
"not",
"len",
"(",
"mshape",
")",
"==",
"len",
"(",
"sshape",
")",
":",
"raise",
"RuntimeError",
"(",
"\"minuend and subtrahend must be of same shape\"",
")",
"if",
"not",
"numpy",
".",
"all",
"(",
"[",
"sm",
"==",
"ss",
"for",
"sm",
",",
"ss",
"in",
"zip",
"(",
"mshape",
",",
"sshape",
")",
"]",
")",
":",
"raise",
"RuntimeError",
"(",
"\"minuend and subtrahend must be of same shape\"",
")",
"sn_footprint",
"=",
"__make_footprint",
"(",
"minuend",
",",
"sn_size",
",",
"sn_footprint",
")",
"sn_fshape",
"=",
"[",
"ii",
"for",
"ii",
"in",
"sn_footprint",
".",
"shape",
"if",
"ii",
">",
"0",
"]",
"if",
"len",
"(",
"sn_fshape",
")",
"!=",
"minuend",
".",
"ndim",
":",
"raise",
"RuntimeError",
"(",
"'search neighbourhood footprint array has incorrect shape.'",
")",
"#!TODO: Is this required?",
"if",
"not",
"sn_footprint",
".",
"flags",
".",
"contiguous",
":",
"sn_footprint",
"=",
"sn_footprint",
".",
"copy",
"(",
")",
"# created a padded copy of the subtrahend, whereas the padding mode is always 'reflect' ",
"subtrahend",
"=",
"pad",
"(",
"subtrahend",
",",
"footprint",
"=",
"sn_footprint",
",",
"mode",
"=",
"sn_mode",
",",
"cval",
"=",
"sn_cval",
")",
"# compute slicers for position where the search neighbourhood sn_footprint is TRUE",
"slicers",
"=",
"[",
"[",
"slice",
"(",
"x",
",",
"(",
"x",
"+",
"1",
")",
"-",
"d",
"if",
"0",
"!=",
"(",
"x",
"+",
"1",
")",
"-",
"d",
"else",
"None",
")",
"for",
"x",
"in",
"range",
"(",
"d",
")",
"]",
"for",
"d",
"in",
"sn_fshape",
"]",
"slicers",
"=",
"[",
"sl",
"for",
"sl",
",",
"tv",
"in",
"zip",
"(",
"itertools",
".",
"product",
"(",
"*",
"slicers",
")",
",",
"sn_footprint",
".",
"flat",
")",
"if",
"tv",
"]",
"# compute difference images and sign images for search neighbourhood elements",
"ssds",
"=",
"[",
"ssd",
"(",
"minuend",
",",
"subtrahend",
"[",
"slicer",
"]",
",",
"normalized",
"=",
"True",
",",
"signed",
"=",
"signed",
",",
"size",
"=",
"pn_size",
",",
"footprint",
"=",
"pn_footprint",
",",
"mode",
"=",
"pn_mode",
",",
"cval",
"=",
"pn_cval",
")",
"for",
"slicer",
"in",
"slicers",
"]",
"distance",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"ssds",
"]",
"distance_sign",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"ssds",
"]",
"# compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure",
"variance",
"=",
"numpy",
".",
"average",
"(",
"distance",
",",
"0",
")",
"variance",
"=",
"gaussian_filter",
"(",
"variance",
",",
"sigma",
"=",
"3",
")",
"#!TODO: Figure out if a fixed sigma is desirable here... I think that yes",
"if",
"'global'",
"==",
"noise",
":",
"variance",
"=",
"variance",
".",
"sum",
"(",
")",
"/",
"float",
"(",
"numpy",
".",
"product",
"(",
"variance",
".",
"shape",
")",
")",
"# variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one?",
"# compute sls",
"sls",
"=",
"[",
"dist_sign",
"*",
"numpy",
".",
"exp",
"(",
"-",
"1",
"*",
"(",
"dist",
"/",
"variance",
")",
")",
"for",
"dist_sign",
",",
"dist",
"in",
"zip",
"(",
"distance_sign",
",",
"distance",
")",
"]",
"# convert into sls image, swapping dimensions to have varying patches in the last dimension",
"return",
"numpy",
".",
"rollaxis",
"(",
"numpy",
".",
"asarray",
"(",
"sls",
")",
",",
"0",
",",
"minuend",
".",
"ndim",
"+",
"1",
")"
] | r"""
Computes the signed local similarity between two images.
Compares a patch around each voxel of the minuend array to a number of patches
centered at the points of a search neighbourhood in the subtrahend. Thus, creates
a multi-dimensional measure of patch similarity between the minuend and a
corresponding search area in the subtrahend.
This filter can also be used to compute local self-similarity, obtaining a
descriptor similar to the one described in [1]_.
Parameters
----------
minuend : array_like
Input array from which to subtract the subtrahend.
subtrahend : array_like
Input array to subtract from the minuend.
metric : {'ssd', 'mi', 'nmi', 'ncc'}, optional
The `metric` parameter determines the metric used to compute the
filter output. Default is 'ssd'.
noise : {'global', 'local'}, optional
The `noise` parameter determines how the noise is handled. If set
to 'global', the variance determining the noise is a scalar, if
set to 'local', it is a Gaussian smoothed field of estimated local
noise. Default is 'global'.
signed : bool, optional
Whether the filter output should be signed or not. If set to 'False',
only the absolute values will be returned. Default is 'True'.
sn_size : scalar or tuple, optional
See sn_footprint, below
sn_footprint : array, optional
The search neighbourhood.
Either `sn_size` or `sn_footprint` must be defined. `sn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`sn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``sn_size=(n,m)`` is equivalent
to ``sn_footprint=np.ones((n,m))``. We adjust `sn_size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `sn_size` is 2, then the actual size used is
(2,2,2).
sn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `sn_mode` parameter determines how the array borders are
handled, where `sn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
sn_cval : scalar, optional
Value to fill past edges of input if `sn_mode` is 'constant'. Default
is 0.0
pn_size : scalar or tuple, optional
See pn_footprint, below
pn_footprint : array, optional
The patch over which the distance measure is applied.
Either `pn_size` or `pn_footprint` must be defined. `pn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`pn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``pn_size=(n,m)`` is equivalent
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `pn_size` is 2, then the actual size used is
(2,2,2).
pn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `pn_mode` parameter determines how the array borders are
handled, where `pn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
pn_cval : scalar, optional
Value to fill past edges of input if `pn_mode` is 'constant'. Default
is 0.0
Returns
-------
sls : ndarray
The signed local similarity image between subtrahend and minuend.
References
----------
.. [1] Mattias P. Heinrich, Mark Jenkinson, Manav Bhushan, Tahreema Matin, Fergus V. Gleeson, Sir Michael Brady, Julia A. Schnabel
MIND: Modality independent neighbourhood descriptor for multi-modal deformable registration
Medical Image Analysis, Volume 16, Issue 7, October 2012, Pages 1423-1435, ISSN 1361-8415
http://dx.doi.org/10.1016/j.media.2012.05.008 | [
"r",
"Computes",
"the",
"signed",
"local",
"similarity",
"between",
"two",
"images",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/image.py#L37-L170 |
5,287 | loli/medpy | medpy/filter/image.py | average_filter | def average_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional average filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
average_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
filter_size = footprint.sum()
output = _get_output(output, input)
sum_filter(input, footprint=footprint, output=output, mode=mode, cval=cval, origin=origin)
output /= filter_size
return output | python | def average_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional average filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
average_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
filter_size = footprint.sum()
output = _get_output(output, input)
sum_filter(input, footprint=footprint, output=output, mode=mode, cval=cval, origin=origin)
output /= filter_size
return output | [
"def",
"average_filter",
"(",
"input",
",",
"size",
"=",
"None",
",",
"footprint",
"=",
"None",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"footprint",
"=",
"__make_footprint",
"(",
"input",
",",
"size",
",",
"footprint",
")",
"filter_size",
"=",
"footprint",
".",
"sum",
"(",
")",
"output",
"=",
"_get_output",
"(",
"output",
",",
"input",
")",
"sum_filter",
"(",
"input",
",",
"footprint",
"=",
"footprint",
",",
"output",
"=",
"output",
",",
"mode",
"=",
"mode",
",",
"cval",
"=",
"cval",
",",
"origin",
"=",
"origin",
")",
"output",
"/=",
"filter_size",
"return",
"output"
] | r"""
Calculates a multi-dimensional average filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
average_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel. | [
"r",
"Calculates",
"a",
"multi",
"-",
"dimensional",
"average",
"filter",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/image.py#L230-L284 |
5,288 | loli/medpy | medpy/filter/image.py | sum_filter | def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin) | python | def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin) | [
"def",
"sum_filter",
"(",
"input",
",",
"size",
"=",
"None",
",",
"footprint",
"=",
"None",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"footprint",
"=",
"__make_footprint",
"(",
"input",
",",
"size",
",",
"footprint",
")",
"slicer",
"=",
"[",
"slice",
"(",
"None",
",",
"None",
",",
"-",
"1",
")",
"]",
"*",
"footprint",
".",
"ndim",
"return",
"convolve",
"(",
"input",
",",
"footprint",
"[",
"slicer",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")"
] | r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel. | [
"r",
"Calculates",
"a",
"multi",
"-",
"dimensional",
"sum",
"filter",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/image.py#L287-L337 |
5,289 | loli/medpy | medpy/features/histogram.py | _gaussian_membership_sigma | def _gaussian_membership_sigma(smoothness, eps = 0.0005): # 275us @ smothness=10
r"""Compute the sigma required for a gaussian, such that in a neighbourhood of
smoothness the maximum error is 'eps'.
The error is here the difference between the clipped integral and one.
"""
error = 0
deltas = [0.1, 0.01, 0.001, 0.0001]
sigma = smoothness * 0.3
point = -1. * (smoothness + 0.5)
for delta in deltas:
while error < eps:
sigma += delta
error = scipy.stats.norm.cdf(0.5, point, sigma) - scipy.stats.norm.cdf(-0.5, point, sigma) # x, mu, sigma
sigma -= delta
return sigma | python | def _gaussian_membership_sigma(smoothness, eps = 0.0005): # 275us @ smothness=10
r"""Compute the sigma required for a gaussian, such that in a neighbourhood of
smoothness the maximum error is 'eps'.
The error is here the difference between the clipped integral and one.
"""
error = 0
deltas = [0.1, 0.01, 0.001, 0.0001]
sigma = smoothness * 0.3
point = -1. * (smoothness + 0.5)
for delta in deltas:
while error < eps:
sigma += delta
error = scipy.stats.norm.cdf(0.5, point, sigma) - scipy.stats.norm.cdf(-0.5, point, sigma) # x, mu, sigma
sigma -= delta
return sigma | [
"def",
"_gaussian_membership_sigma",
"(",
"smoothness",
",",
"eps",
"=",
"0.0005",
")",
":",
"# 275us @ smothness=10",
"error",
"=",
"0",
"deltas",
"=",
"[",
"0.1",
",",
"0.01",
",",
"0.001",
",",
"0.0001",
"]",
"sigma",
"=",
"smoothness",
"*",
"0.3",
"point",
"=",
"-",
"1.",
"*",
"(",
"smoothness",
"+",
"0.5",
")",
"for",
"delta",
"in",
"deltas",
":",
"while",
"error",
"<",
"eps",
":",
"sigma",
"+=",
"delta",
"error",
"=",
"scipy",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"0.5",
",",
"point",
",",
"sigma",
")",
"-",
"scipy",
".",
"stats",
".",
"norm",
".",
"cdf",
"(",
"-",
"0.5",
",",
"point",
",",
"sigma",
")",
"# x, mu, sigma",
"sigma",
"-=",
"delta",
"return",
"sigma"
] | r"""Compute the sigma required for a gaussian, such that in a neighbourhood of
smoothness the maximum error is 'eps'.
The error is here the difference between the clipped integral and one. | [
"r",
"Compute",
"the",
"sigma",
"required",
"for",
"a",
"gaussian",
"such",
"that",
"in",
"a",
"neighbourhood",
"of",
"smoothness",
"the",
"maximum",
"error",
"is",
"eps",
".",
"The",
"error",
"is",
"here",
"the",
"difference",
"between",
"the",
"clipped",
"integral",
"and",
"one",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/histogram.py#L361-L375 |
5,290 | loli/medpy | doc/numpydoc/numpydoc/plot_directive.py | out_of_date | def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime) | python | def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime) | [
"def",
"out_of_date",
"(",
"original",
",",
"derived",
")",
":",
"return",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"derived",
")",
"or",
"os",
".",
"stat",
"(",
"derived",
")",
".",
"st_mtime",
"<",
"os",
".",
"stat",
"(",
"original",
")",
".",
"st_mtime",
")"
] | Returns True if derivative is out-of-date wrt original,
both of which are full file paths. | [
"Returns",
"True",
"if",
"derivative",
"is",
"out",
"-",
"of",
"-",
"date",
"wrt",
"original",
"both",
"of",
"which",
"are",
"full",
"file",
"paths",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/doc/numpydoc/numpydoc/plot_directive.py#L481-L487 |
5,291 | loli/medpy | medpy/features/texture.py | local_maxima | def local_maxima(vector,min_distance = 4, brd_mode = "wrap"):
"""
Internal finder for local maxima .
Returns UNSORTED indices of maxima in input vector.
"""
fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode)
for ii in range(len(fits)):
if fits[ii] == fits[ii-1]:
fits[ii-1] = 0.0
maxfits = maximum_filter(fits, size=min_distance, mode=brd_mode)
maxima_mask = fits == maxfits
maximum = numpy.transpose(maxima_mask.nonzero())
return numpy.asarray(maximum) | python | def local_maxima(vector,min_distance = 4, brd_mode = "wrap"):
"""
Internal finder for local maxima .
Returns UNSORTED indices of maxima in input vector.
"""
fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode)
for ii in range(len(fits)):
if fits[ii] == fits[ii-1]:
fits[ii-1] = 0.0
maxfits = maximum_filter(fits, size=min_distance, mode=brd_mode)
maxima_mask = fits == maxfits
maximum = numpy.transpose(maxima_mask.nonzero())
return numpy.asarray(maximum) | [
"def",
"local_maxima",
"(",
"vector",
",",
"min_distance",
"=",
"4",
",",
"brd_mode",
"=",
"\"wrap\"",
")",
":",
"fits",
"=",
"gaussian_filter",
"(",
"numpy",
".",
"asarray",
"(",
"vector",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
",",
"1.",
",",
"mode",
"=",
"brd_mode",
")",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"fits",
")",
")",
":",
"if",
"fits",
"[",
"ii",
"]",
"==",
"fits",
"[",
"ii",
"-",
"1",
"]",
":",
"fits",
"[",
"ii",
"-",
"1",
"]",
"=",
"0.0",
"maxfits",
"=",
"maximum_filter",
"(",
"fits",
",",
"size",
"=",
"min_distance",
",",
"mode",
"=",
"brd_mode",
")",
"maxima_mask",
"=",
"fits",
"==",
"maxfits",
"maximum",
"=",
"numpy",
".",
"transpose",
"(",
"maxima_mask",
".",
"nonzero",
"(",
")",
")",
"return",
"numpy",
".",
"asarray",
"(",
"maximum",
")"
] | Internal finder for local maxima .
Returns UNSORTED indices of maxima in input vector. | [
"Internal",
"finder",
"for",
"local",
"maxima",
".",
"Returns",
"UNSORTED",
"indices",
"of",
"maxima",
"in",
"input",
"vector",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/texture.py#L268-L280 |
5,292 | loli/medpy | medpy/features/texture.py | local_minima | def local_minima(vector,min_distance = 4, brd_mode = "wrap"):
"""
Internal finder for local minima .
Returns UNSORTED indices of minima in input vector.
"""
fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode)
for ii in range(len(fits)):
if fits[ii] == fits[ii-1]:
fits[ii-1] = numpy.pi/2.0
minfits = minimum_filter(fits, size=min_distance, mode=brd_mode)
minima_mask = fits == minfits
minima = numpy.transpose(minima_mask.nonzero())
return numpy.asarray(minima) | python | def local_minima(vector,min_distance = 4, brd_mode = "wrap"):
"""
Internal finder for local minima .
Returns UNSORTED indices of minima in input vector.
"""
fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode)
for ii in range(len(fits)):
if fits[ii] == fits[ii-1]:
fits[ii-1] = numpy.pi/2.0
minfits = minimum_filter(fits, size=min_distance, mode=brd_mode)
minima_mask = fits == minfits
minima = numpy.transpose(minima_mask.nonzero())
return numpy.asarray(minima) | [
"def",
"local_minima",
"(",
"vector",
",",
"min_distance",
"=",
"4",
",",
"brd_mode",
"=",
"\"wrap\"",
")",
":",
"fits",
"=",
"gaussian_filter",
"(",
"numpy",
".",
"asarray",
"(",
"vector",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
",",
"1.",
",",
"mode",
"=",
"brd_mode",
")",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"fits",
")",
")",
":",
"if",
"fits",
"[",
"ii",
"]",
"==",
"fits",
"[",
"ii",
"-",
"1",
"]",
":",
"fits",
"[",
"ii",
"-",
"1",
"]",
"=",
"numpy",
".",
"pi",
"/",
"2.0",
"minfits",
"=",
"minimum_filter",
"(",
"fits",
",",
"size",
"=",
"min_distance",
",",
"mode",
"=",
"brd_mode",
")",
"minima_mask",
"=",
"fits",
"==",
"minfits",
"minima",
"=",
"numpy",
".",
"transpose",
"(",
"minima_mask",
".",
"nonzero",
"(",
")",
")",
"return",
"numpy",
".",
"asarray",
"(",
"minima",
")"
] | Internal finder for local minima .
Returns UNSORTED indices of minima in input vector. | [
"Internal",
"finder",
"for",
"local",
"minima",
".",
"Returns",
"UNSORTED",
"indices",
"of",
"minima",
"in",
"input",
"vector",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/texture.py#L282-L294 |
5,293 | loli/medpy | medpy/features/texture.py | find_valley_range | def find_valley_range(vector, min_distance = 4):
"""
Internal finder peaks and valley ranges.
Returns UNSORTED indices of maxima in input vector.
Returns range of valleys before and after maximum
"""
# http://users.monash.edu.au/~dengs/resource/papers/icme08.pdf
# find min and max with mode = wrap
mode = "wrap"
minima = local_minima(vector,min_distance,mode)
maxima = local_maxima(vector,min_distance,mode)
if len(maxima)>len(minima):
if vector[maxima[0]] >= vector[maxima[-1]]:
maxima=maxima[1:]
else:
maxima=maxima[:-1]
if len(maxima)==len(minima):
valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(minima)-1)] + [len(vector)-minima[-1]+minima[0]])
if minima[0] < maxima[0]:
minima = numpy.asarray(list(minima) + [minima[0]])
else:
minima = numpy.asarray(list(minima) + [minima[-1]])
else:
valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(maxima))])
return maxima, minima, valley_range | python | def find_valley_range(vector, min_distance = 4):
"""
Internal finder peaks and valley ranges.
Returns UNSORTED indices of maxima in input vector.
Returns range of valleys before and after maximum
"""
# http://users.monash.edu.au/~dengs/resource/papers/icme08.pdf
# find min and max with mode = wrap
mode = "wrap"
minima = local_minima(vector,min_distance,mode)
maxima = local_maxima(vector,min_distance,mode)
if len(maxima)>len(minima):
if vector[maxima[0]] >= vector[maxima[-1]]:
maxima=maxima[1:]
else:
maxima=maxima[:-1]
if len(maxima)==len(minima):
valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(minima)-1)] + [len(vector)-minima[-1]+minima[0]])
if minima[0] < maxima[0]:
minima = numpy.asarray(list(minima) + [minima[0]])
else:
minima = numpy.asarray(list(minima) + [minima[-1]])
else:
valley_range = numpy.asarray([minima[ii+1] - minima[ii] for ii in range(len(maxima))])
return maxima, minima, valley_range | [
"def",
"find_valley_range",
"(",
"vector",
",",
"min_distance",
"=",
"4",
")",
":",
"# http://users.monash.edu.au/~dengs/resource/papers/icme08.pdf",
"# find min and max with mode = wrap",
"mode",
"=",
"\"wrap\"",
"minima",
"=",
"local_minima",
"(",
"vector",
",",
"min_distance",
",",
"mode",
")",
"maxima",
"=",
"local_maxima",
"(",
"vector",
",",
"min_distance",
",",
"mode",
")",
"if",
"len",
"(",
"maxima",
")",
">",
"len",
"(",
"minima",
")",
":",
"if",
"vector",
"[",
"maxima",
"[",
"0",
"]",
"]",
">=",
"vector",
"[",
"maxima",
"[",
"-",
"1",
"]",
"]",
":",
"maxima",
"=",
"maxima",
"[",
"1",
":",
"]",
"else",
":",
"maxima",
"=",
"maxima",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"maxima",
")",
"==",
"len",
"(",
"minima",
")",
":",
"valley_range",
"=",
"numpy",
".",
"asarray",
"(",
"[",
"minima",
"[",
"ii",
"+",
"1",
"]",
"-",
"minima",
"[",
"ii",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"minima",
")",
"-",
"1",
")",
"]",
"+",
"[",
"len",
"(",
"vector",
")",
"-",
"minima",
"[",
"-",
"1",
"]",
"+",
"minima",
"[",
"0",
"]",
"]",
")",
"if",
"minima",
"[",
"0",
"]",
"<",
"maxima",
"[",
"0",
"]",
":",
"minima",
"=",
"numpy",
".",
"asarray",
"(",
"list",
"(",
"minima",
")",
"+",
"[",
"minima",
"[",
"0",
"]",
"]",
")",
"else",
":",
"minima",
"=",
"numpy",
".",
"asarray",
"(",
"list",
"(",
"minima",
")",
"+",
"[",
"minima",
"[",
"-",
"1",
"]",
"]",
")",
"else",
":",
"valley_range",
"=",
"numpy",
".",
"asarray",
"(",
"[",
"minima",
"[",
"ii",
"+",
"1",
"]",
"-",
"minima",
"[",
"ii",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"maxima",
")",
")",
"]",
")",
"return",
"maxima",
",",
"minima",
",",
"valley_range"
] | Internal finder peaks and valley ranges.
Returns UNSORTED indices of maxima in input vector.
Returns range of valleys before and after maximum | [
"Internal",
"finder",
"peaks",
"and",
"valley",
"ranges",
".",
"Returns",
"UNSORTED",
"indices",
"of",
"maxima",
"in",
"input",
"vector",
".",
"Returns",
"range",
"of",
"valleys",
"before",
"and",
"after",
"maximum"
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/texture.py#L296-L324 |
5,294 | loli/medpy | medpy/filter/smoothing.py | gauss_xminus1d | def gauss_xminus1d(img, sigma, dim=2):
r"""
Applies a X-1D gauss to a copy of a XD image, slicing it along dim.
Essentially uses `scipy.ndimage.filters.gaussian_filter`, but
applies it to a dimension less than the image has.
Parameters
----------
img : array_like
The image to smooth.
sigma : integer
The sigma i.e. gaussian kernel size in pixel
dim : integer
The dimension along which to apply the filter.
Returns
-------
gauss_xminus1d : ndarray
The input image ``img`` smoothed by a gaussian kernel along dimension ``dim``.
"""
img = numpy.array(img, copy=False)
return xminus1d(img, gaussian_filter, dim, sigma=sigma) | python | def gauss_xminus1d(img, sigma, dim=2):
r"""
Applies a X-1D gauss to a copy of a XD image, slicing it along dim.
Essentially uses `scipy.ndimage.filters.gaussian_filter`, but
applies it to a dimension less than the image has.
Parameters
----------
img : array_like
The image to smooth.
sigma : integer
The sigma i.e. gaussian kernel size in pixel
dim : integer
The dimension along which to apply the filter.
Returns
-------
gauss_xminus1d : ndarray
The input image ``img`` smoothed by a gaussian kernel along dimension ``dim``.
"""
img = numpy.array(img, copy=False)
return xminus1d(img, gaussian_filter, dim, sigma=sigma) | [
"def",
"gauss_xminus1d",
"(",
"img",
",",
"sigma",
",",
"dim",
"=",
"2",
")",
":",
"img",
"=",
"numpy",
".",
"array",
"(",
"img",
",",
"copy",
"=",
"False",
")",
"return",
"xminus1d",
"(",
"img",
",",
"gaussian_filter",
",",
"dim",
",",
"sigma",
"=",
"sigma",
")"
] | r"""
Applies a X-1D gauss to a copy of a XD image, slicing it along dim.
Essentially uses `scipy.ndimage.filters.gaussian_filter`, but
applies it to a dimension less than the image has.
Parameters
----------
img : array_like
The image to smooth.
sigma : integer
The sigma i.e. gaussian kernel size in pixel
dim : integer
The dimension along which to apply the filter.
Returns
-------
gauss_xminus1d : ndarray
The input image ``img`` smoothed by a gaussian kernel along dimension ``dim``. | [
"r",
"Applies",
"a",
"X",
"-",
"1D",
"gauss",
"to",
"a",
"copy",
"of",
"a",
"XD",
"image",
"slicing",
"it",
"along",
"dim",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/smoothing.py#L34-L56 |
5,295 | loli/medpy | medpy/filter/smoothing.py | anisotropic_diffusion | def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1):
r"""
Edge-preserving, XD Anisotropic diffusion.
Parameters
----------
img : array_like
Input image (will be cast to numpy.float).
niter : integer
Number of iterations.
kappa : integer
Conduction coefficient, e.g. 20-100. ``kappa`` controls conduction
as a function of the gradient. If ``kappa`` is low small intensity
gradients are able to block conduction and hence diffusion across
steep edges. A large value reduces the influence of intensity gradients
on conduction.
gamma : float
Controls the speed of diffusion. Pick a value :math:`<= .25` for stability.
voxelspacing : tuple of floats or array_like
The distance between adjacent pixels in all img.ndim directions
option : {1, 2, 3}
Whether to use the Perona Malik diffusion equation No. 1 or No. 2,
or Tukey's biweight function.
Equation 1 favours high contrast edges over low contrast ones, while
equation 2 favours wide regions over smaller ones. See [1]_ for details.
Equation 3 preserves sharper boundaries than previous formulations and
improves the automatic stopping of the diffusion. See [2]_ for details.
Returns
-------
anisotropic_diffusion : ndarray
Diffused image.
Notes
-----
Original MATLAB code by Peter Kovesi,
School of Computer Science & Software Engineering,
The University of Western Australia,
pk @ csse uwa edu au,
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal,
Department of Pharmacology,
University of Oxford,
<[email protected]>
Adapted to arbitrary dimensionality and added to the MedPy library Oskar Maier,
Institute for Medical Informatics,
Universitaet Luebeck,
<[email protected]>
June 2000 original version. -
March 2002 corrected diffusion eqn No 2. -
July 2012 translated to Python -
August 2013 incorporated into MedPy, arbitrary dimensionality -
References
----------
.. [1] P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
.. [2] M.J. Black, G. Sapiro, D. Marimont, D. Heeger
Robust anisotropic diffusion.
IEEE Transactions on Image Processing,
7(3):421-432, March 1998.
"""
# define conduction gradients functions
if option == 1:
def condgradient(delta, spacing):
return numpy.exp(-(delta/kappa)**2.)/float(spacing)
elif option == 2:
def condgradient(delta, spacing):
return 1./(1.+(delta/kappa)**2.)/float(spacing)
elif option == 3:
kappa_s = kappa * (2**0.5)
def condgradient(delta, spacing):
top = 0.5*((1.-(delta/kappa_s)**2.)**2.)/float(spacing)
return numpy.where(numpy.abs(delta) <= kappa_s, top, 0)
# initialize output array
out = numpy.array(img, dtype=numpy.float32, copy=True)
# set default voxel spacing if not supplied
if voxelspacing is None:
voxelspacing = tuple([1.] * img.ndim)
# initialize some internal variables
deltas = [numpy.zeros_like(out) for _ in range(out.ndim)]
for _ in range(niter):
# calculate the diffs
for i in range(out.ndim):
slicer = [slice(None, -1) if j == i else slice(None) for j in range(out.ndim)]
deltas[i][slicer] = numpy.diff(out, axis=i)
# update matrices
matrices = [condgradient(delta, spacing) * delta for delta, spacing in zip(deltas, voxelspacing)]
# subtract a copy that has been shifted ('Up/North/West' in 3D case) by one
# pixel. Don't as questions. just do it. trust me.
for i in range(out.ndim):
slicer = [slice(1, None) if j == i else slice(None) for j in range(out.ndim)]
matrices[i][slicer] = numpy.diff(matrices[i], axis=i)
# update the image
out += gamma * (numpy.sum(matrices, axis=0))
return out | python | def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1):
r"""
Edge-preserving, XD Anisotropic diffusion.
Parameters
----------
img : array_like
Input image (will be cast to numpy.float).
niter : integer
Number of iterations.
kappa : integer
Conduction coefficient, e.g. 20-100. ``kappa`` controls conduction
as a function of the gradient. If ``kappa`` is low small intensity
gradients are able to block conduction and hence diffusion across
steep edges. A large value reduces the influence of intensity gradients
on conduction.
gamma : float
Controls the speed of diffusion. Pick a value :math:`<= .25` for stability.
voxelspacing : tuple of floats or array_like
The distance between adjacent pixels in all img.ndim directions
option : {1, 2, 3}
Whether to use the Perona Malik diffusion equation No. 1 or No. 2,
or Tukey's biweight function.
Equation 1 favours high contrast edges over low contrast ones, while
equation 2 favours wide regions over smaller ones. See [1]_ for details.
Equation 3 preserves sharper boundaries than previous formulations and
improves the automatic stopping of the diffusion. See [2]_ for details.
Returns
-------
anisotropic_diffusion : ndarray
Diffused image.
Notes
-----
Original MATLAB code by Peter Kovesi,
School of Computer Science & Software Engineering,
The University of Western Australia,
pk @ csse uwa edu au,
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal,
Department of Pharmacology,
University of Oxford,
<[email protected]>
Adapted to arbitrary dimensionality and added to the MedPy library Oskar Maier,
Institute for Medical Informatics,
Universitaet Luebeck,
<[email protected]>
June 2000 original version. -
March 2002 corrected diffusion eqn No 2. -
July 2012 translated to Python -
August 2013 incorporated into MedPy, arbitrary dimensionality -
References
----------
.. [1] P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
.. [2] M.J. Black, G. Sapiro, D. Marimont, D. Heeger
Robust anisotropic diffusion.
IEEE Transactions on Image Processing,
7(3):421-432, March 1998.
"""
# define conduction gradients functions
if option == 1:
def condgradient(delta, spacing):
return numpy.exp(-(delta/kappa)**2.)/float(spacing)
elif option == 2:
def condgradient(delta, spacing):
return 1./(1.+(delta/kappa)**2.)/float(spacing)
elif option == 3:
kappa_s = kappa * (2**0.5)
def condgradient(delta, spacing):
top = 0.5*((1.-(delta/kappa_s)**2.)**2.)/float(spacing)
return numpy.where(numpy.abs(delta) <= kappa_s, top, 0)
# initialize output array
out = numpy.array(img, dtype=numpy.float32, copy=True)
# set default voxel spacing if not supplied
if voxelspacing is None:
voxelspacing = tuple([1.] * img.ndim)
# initialize some internal variables
deltas = [numpy.zeros_like(out) for _ in range(out.ndim)]
for _ in range(niter):
# calculate the diffs
for i in range(out.ndim):
slicer = [slice(None, -1) if j == i else slice(None) for j in range(out.ndim)]
deltas[i][slicer] = numpy.diff(out, axis=i)
# update matrices
matrices = [condgradient(delta, spacing) * delta for delta, spacing in zip(deltas, voxelspacing)]
# subtract a copy that has been shifted ('Up/North/West' in 3D case) by one
# pixel. Don't as questions. just do it. trust me.
for i in range(out.ndim):
slicer = [slice(1, None) if j == i else slice(None) for j in range(out.ndim)]
matrices[i][slicer] = numpy.diff(matrices[i], axis=i)
# update the image
out += gamma * (numpy.sum(matrices, axis=0))
return out | [
"def",
"anisotropic_diffusion",
"(",
"img",
",",
"niter",
"=",
"1",
",",
"kappa",
"=",
"50",
",",
"gamma",
"=",
"0.1",
",",
"voxelspacing",
"=",
"None",
",",
"option",
"=",
"1",
")",
":",
"# define conduction gradients functions",
"if",
"option",
"==",
"1",
":",
"def",
"condgradient",
"(",
"delta",
",",
"spacing",
")",
":",
"return",
"numpy",
".",
"exp",
"(",
"-",
"(",
"delta",
"/",
"kappa",
")",
"**",
"2.",
")",
"/",
"float",
"(",
"spacing",
")",
"elif",
"option",
"==",
"2",
":",
"def",
"condgradient",
"(",
"delta",
",",
"spacing",
")",
":",
"return",
"1.",
"/",
"(",
"1.",
"+",
"(",
"delta",
"/",
"kappa",
")",
"**",
"2.",
")",
"/",
"float",
"(",
"spacing",
")",
"elif",
"option",
"==",
"3",
":",
"kappa_s",
"=",
"kappa",
"*",
"(",
"2",
"**",
"0.5",
")",
"def",
"condgradient",
"(",
"delta",
",",
"spacing",
")",
":",
"top",
"=",
"0.5",
"*",
"(",
"(",
"1.",
"-",
"(",
"delta",
"/",
"kappa_s",
")",
"**",
"2.",
")",
"**",
"2.",
")",
"/",
"float",
"(",
"spacing",
")",
"return",
"numpy",
".",
"where",
"(",
"numpy",
".",
"abs",
"(",
"delta",
")",
"<=",
"kappa_s",
",",
"top",
",",
"0",
")",
"# initialize output array",
"out",
"=",
"numpy",
".",
"array",
"(",
"img",
",",
"dtype",
"=",
"numpy",
".",
"float32",
",",
"copy",
"=",
"True",
")",
"# set default voxel spacing if not supplied",
"if",
"voxelspacing",
"is",
"None",
":",
"voxelspacing",
"=",
"tuple",
"(",
"[",
"1.",
"]",
"*",
"img",
".",
"ndim",
")",
"# initialize some internal variables",
"deltas",
"=",
"[",
"numpy",
".",
"zeros_like",
"(",
"out",
")",
"for",
"_",
"in",
"range",
"(",
"out",
".",
"ndim",
")",
"]",
"for",
"_",
"in",
"range",
"(",
"niter",
")",
":",
"# calculate the diffs",
"for",
"i",
"in",
"range",
"(",
"out",
".",
"ndim",
")",
":",
"slicer",
"=",
"[",
"slice",
"(",
"None",
",",
"-",
"1",
")",
"if",
"j",
"==",
"i",
"else",
"slice",
"(",
"None",
")",
"for",
"j",
"in",
"range",
"(",
"out",
".",
"ndim",
")",
"]",
"deltas",
"[",
"i",
"]",
"[",
"slicer",
"]",
"=",
"numpy",
".",
"diff",
"(",
"out",
",",
"axis",
"=",
"i",
")",
"# update matrices",
"matrices",
"=",
"[",
"condgradient",
"(",
"delta",
",",
"spacing",
")",
"*",
"delta",
"for",
"delta",
",",
"spacing",
"in",
"zip",
"(",
"deltas",
",",
"voxelspacing",
")",
"]",
"# subtract a copy that has been shifted ('Up/North/West' in 3D case) by one",
"# pixel. Don't as questions. just do it. trust me.",
"for",
"i",
"in",
"range",
"(",
"out",
".",
"ndim",
")",
":",
"slicer",
"=",
"[",
"slice",
"(",
"1",
",",
"None",
")",
"if",
"j",
"==",
"i",
"else",
"slice",
"(",
"None",
")",
"for",
"j",
"in",
"range",
"(",
"out",
".",
"ndim",
")",
"]",
"matrices",
"[",
"i",
"]",
"[",
"slicer",
"]",
"=",
"numpy",
".",
"diff",
"(",
"matrices",
"[",
"i",
"]",
",",
"axis",
"=",
"i",
")",
"# update the image",
"out",
"+=",
"gamma",
"*",
"(",
"numpy",
".",
"sum",
"(",
"matrices",
",",
"axis",
"=",
"0",
")",
")",
"return",
"out"
] | r"""
Edge-preserving, XD Anisotropic diffusion.
Parameters
----------
img : array_like
Input image (will be cast to numpy.float).
niter : integer
Number of iterations.
kappa : integer
Conduction coefficient, e.g. 20-100. ``kappa`` controls conduction
as a function of the gradient. If ``kappa`` is low small intensity
gradients are able to block conduction and hence diffusion across
steep edges. A large value reduces the influence of intensity gradients
on conduction.
gamma : float
Controls the speed of diffusion. Pick a value :math:`<= .25` for stability.
voxelspacing : tuple of floats or array_like
The distance between adjacent pixels in all img.ndim directions
option : {1, 2, 3}
Whether to use the Perona Malik diffusion equation No. 1 or No. 2,
or Tukey's biweight function.
Equation 1 favours high contrast edges over low contrast ones, while
equation 2 favours wide regions over smaller ones. See [1]_ for details.
Equation 3 preserves sharper boundaries than previous formulations and
improves the automatic stopping of the diffusion. See [2]_ for details.
Returns
-------
anisotropic_diffusion : ndarray
Diffused image.
Notes
-----
Original MATLAB code by Peter Kovesi,
School of Computer Science & Software Engineering,
The University of Western Australia,
pk @ csse uwa edu au,
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal,
Department of Pharmacology,
University of Oxford,
<[email protected]>
Adapted to arbitrary dimensionality and added to the MedPy library Oskar Maier,
Institute for Medical Informatics,
Universitaet Luebeck,
<[email protected]>
June 2000 original version. -
March 2002 corrected diffusion eqn No 2. -
July 2012 translated to Python -
August 2013 incorporated into MedPy, arbitrary dimensionality -
References
----------
.. [1] P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
.. [2] M.J. Black, G. Sapiro, D. Marimont, D. Heeger
Robust anisotropic diffusion.
IEEE Transactions on Image Processing,
7(3):421-432, March 1998. | [
"r",
"Edge",
"-",
"preserving",
"XD",
"Anisotropic",
"diffusion",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/smoothing.py#L58-L169 |
5,296 | loli/medpy | medpy/graphcut/generate.py | __voxel_4conectedness | def __voxel_4conectedness(shape):
"""
Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int
"""
shape = list(shape)
while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)
return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape))) | python | def __voxel_4conectedness(shape):
"""
Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int
"""
shape = list(shape)
while 1 in shape: shape.remove(1) # empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)
return int(round(sum([(dim - 1)/float(dim) for dim in shape]) * scipy.prod(shape))) | [
"def",
"__voxel_4conectedness",
"(",
"shape",
")",
":",
"shape",
"=",
"list",
"(",
"shape",
")",
"while",
"1",
"in",
"shape",
":",
"shape",
".",
"remove",
"(",
"1",
")",
"# empty resp. 1-sized dimensions have to be removed (equal to scipy.squeeze on the array)",
"return",
"int",
"(",
"round",
"(",
"sum",
"(",
"[",
"(",
"dim",
"-",
"1",
")",
"/",
"float",
"(",
"dim",
")",
"for",
"dim",
"in",
"shape",
"]",
")",
"*",
"scipy",
".",
"prod",
"(",
"shape",
")",
")",
")"
] | Returns the number of edges for the supplied image shape assuming 4-connectedness.
The name of the function has historical reasons. Essentially it returns the number
of edges assuming 4-connectedness only for 2D. For 3D it assumes 6-connectedness,
etc.
@param shape the shape of the image
@type shape sequence
@return the number of edges
@rtype int | [
"Returns",
"the",
"number",
"of",
"edges",
"for",
"the",
"supplied",
"image",
"shape",
"assuming",
"4",
"-",
"connectedness",
".",
"The",
"name",
"of",
"the",
"function",
"has",
"historical",
"reasons",
".",
"Essentially",
"it",
"returns",
"the",
"number",
"of",
"edges",
"assuming",
"4",
"-",
"connectedness",
"only",
"for",
"2D",
".",
"For",
"3D",
"it",
"assumes",
"6",
"-",
"connectedness",
"etc",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/generate.py#L316-L331 |
5,297 | loli/medpy | medpy/graphcut/energy_voxel.py | __skeleton_base | def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing):
"""
Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
"""
image = scipy.asarray(image)
image = image.astype(scipy.float_)
# iterate over the image dimensions and for each create the appropriate edges and compute the associated weights
for dim in range(image.ndim):
# construct slice-objects for the current dimension
slices_exclude_last = [slice(None)] * image.ndim
slices_exclude_last[dim] = slice(-1)
slices_exclude_first = [slice(None)] * image.ndim
slices_exclude_first[dim] = slice(1, None)
# compute difference between all layers in the current dimensions direction
neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first])
# apply boundary term
neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term)
# compute key offset for relative key difference
offset_key = [1 if i == dim else 0 for i in range(image.ndim)]
offset = __flatten_index(offset_key, image.shape)
# generate index offset function for index dependent offset
idx_offset_divider = (image.shape[dim] - 1) * offset
idx_offset = lambda x: int(x / idx_offset_divider) * offset
# weight the computed distanced in dimension dim by the corresponding slice spacing provided
if spacing: neighbourhood_intensity_term /= spacing[dim]
for key, value in enumerate(neighbourhood_intensity_term.ravel()):
# apply index dependent offset
key += idx_offset(key)
# add edges and set the weight
graph.set_nweight(key, key + offset, value, value) | python | def __skeleton_base(graph, image, boundary_term, neighbourhood_function, spacing):
"""
Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False
"""
image = scipy.asarray(image)
image = image.astype(scipy.float_)
# iterate over the image dimensions and for each create the appropriate edges and compute the associated weights
for dim in range(image.ndim):
# construct slice-objects for the current dimension
slices_exclude_last = [slice(None)] * image.ndim
slices_exclude_last[dim] = slice(-1)
slices_exclude_first = [slice(None)] * image.ndim
slices_exclude_first[dim] = slice(1, None)
# compute difference between all layers in the current dimensions direction
neighbourhood_intensity_term = neighbourhood_function(image[slices_exclude_last], image[slices_exclude_first])
# apply boundary term
neighbourhood_intensity_term = boundary_term(neighbourhood_intensity_term)
# compute key offset for relative key difference
offset_key = [1 if i == dim else 0 for i in range(image.ndim)]
offset = __flatten_index(offset_key, image.shape)
# generate index offset function for index dependent offset
idx_offset_divider = (image.shape[dim] - 1) * offset
idx_offset = lambda x: int(x / idx_offset_divider) * offset
# weight the computed distanced in dimension dim by the corresponding slice spacing provided
if spacing: neighbourhood_intensity_term /= spacing[dim]
for key, value in enumerate(neighbourhood_intensity_term.ravel()):
# apply index dependent offset
key += idx_offset(key)
# add edges and set the weight
graph.set_nweight(key, key + offset, value, value) | [
"def",
"__skeleton_base",
"(",
"graph",
",",
"image",
",",
"boundary_term",
",",
"neighbourhood_function",
",",
"spacing",
")",
":",
"image",
"=",
"scipy",
".",
"asarray",
"(",
"image",
")",
"image",
"=",
"image",
".",
"astype",
"(",
"scipy",
".",
"float_",
")",
"# iterate over the image dimensions and for each create the appropriate edges and compute the associated weights",
"for",
"dim",
"in",
"range",
"(",
"image",
".",
"ndim",
")",
":",
"# construct slice-objects for the current dimension",
"slices_exclude_last",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"image",
".",
"ndim",
"slices_exclude_last",
"[",
"dim",
"]",
"=",
"slice",
"(",
"-",
"1",
")",
"slices_exclude_first",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"image",
".",
"ndim",
"slices_exclude_first",
"[",
"dim",
"]",
"=",
"slice",
"(",
"1",
",",
"None",
")",
"# compute difference between all layers in the current dimensions direction",
"neighbourhood_intensity_term",
"=",
"neighbourhood_function",
"(",
"image",
"[",
"slices_exclude_last",
"]",
",",
"image",
"[",
"slices_exclude_first",
"]",
")",
"# apply boundary term",
"neighbourhood_intensity_term",
"=",
"boundary_term",
"(",
"neighbourhood_intensity_term",
")",
"# compute key offset for relative key difference",
"offset_key",
"=",
"[",
"1",
"if",
"i",
"==",
"dim",
"else",
"0",
"for",
"i",
"in",
"range",
"(",
"image",
".",
"ndim",
")",
"]",
"offset",
"=",
"__flatten_index",
"(",
"offset_key",
",",
"image",
".",
"shape",
")",
"# generate index offset function for index dependent offset",
"idx_offset_divider",
"=",
"(",
"image",
".",
"shape",
"[",
"dim",
"]",
"-",
"1",
")",
"*",
"offset",
"idx_offset",
"=",
"lambda",
"x",
":",
"int",
"(",
"x",
"/",
"idx_offset_divider",
")",
"*",
"offset",
"# weight the computed distanced in dimension dim by the corresponding slice spacing provided",
"if",
"spacing",
":",
"neighbourhood_intensity_term",
"/=",
"spacing",
"[",
"dim",
"]",
"for",
"key",
",",
"value",
"in",
"enumerate",
"(",
"neighbourhood_intensity_term",
".",
"ravel",
"(",
")",
")",
":",
"# apply index dependent offset",
"key",
"+=",
"idx_offset",
"(",
"key",
")",
"# add edges and set the weight",
"graph",
".",
"set_nweight",
"(",
"key",
",",
"key",
"+",
"offset",
",",
"value",
",",
"value",
")"
] | Base of the skeleton for voxel based boundary term calculation.
This function holds the low level procedures shared by nearly all boundary terms.
@param graph An initialized graph.GCGraph object
@type graph.GCGraph
@param image The image containing the voxel intensity values
@type image numpy.ndarray
@param boundary_term A function to compute the boundary term over an array of
absolute intensity differences
@type boundary_term function
@param neighbourhood_function A function that takes two arrays of neighbouring pixels
and computes an intensity term from them that is
returned as a single array of the same shape
@type neighbourhood_function function
@param spacing A sequence containing the slice spacing used for weighting the
computed neighbourhood weight value for different dimensions. If
False, no distance based weighting of the graph edges is performed.
@param spacing sequence | False | [
"Base",
"of",
"the",
"skeleton",
"for",
"voxel",
"based",
"boundary",
"term",
"calculation",
".",
"This",
"function",
"holds",
"the",
"low",
"level",
"procedures",
"shared",
"by",
"nearly",
"all",
"boundary",
"terms",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/energy_voxel.py#L590-L640 |
5,298 | loli/medpy | medpy/metric/image.py | __range | def __range(a, bins):
'''Compute the histogram range of the values in the array a according to
scipy.stats.histogram.'''
a = numpy.asarray(a)
a_max = a.max()
a_min = a.min()
s = 0.5 * (a_max - a_min) / float(bins - 1)
return (a_min - s, a_max + s) | python | def __range(a, bins):
'''Compute the histogram range of the values in the array a according to
scipy.stats.histogram.'''
a = numpy.asarray(a)
a_max = a.max()
a_min = a.min()
s = 0.5 * (a_max - a_min) / float(bins - 1)
return (a_min - s, a_max + s) | [
"def",
"__range",
"(",
"a",
",",
"bins",
")",
":",
"a",
"=",
"numpy",
".",
"asarray",
"(",
"a",
")",
"a_max",
"=",
"a",
".",
"max",
"(",
")",
"a_min",
"=",
"a",
".",
"min",
"(",
")",
"s",
"=",
"0.5",
"*",
"(",
"a_max",
"-",
"a_min",
")",
"/",
"float",
"(",
"bins",
"-",
"1",
")",
"return",
"(",
"a_min",
"-",
"s",
",",
"a_max",
"+",
"s",
")"
] | Compute the histogram range of the values in the array a according to
scipy.stats.histogram. | [
"Compute",
"the",
"histogram",
"range",
"of",
"the",
"values",
"in",
"the",
"array",
"a",
"according",
"to",
"scipy",
".",
"stats",
".",
"histogram",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/image.py#L103-L110 |
5,299 | loli/medpy | medpy/features/intensity.py | centerdistance | def centerdistance(image, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing) | python | def centerdistance(image, voxelspacing = None, mask = slice(None)):
r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1
"""
if type(image) == tuple or type(image) == list:
image = image[0]
return _extract_feature(_extract_centerdistance, image, mask, voxelspacing = voxelspacing) | [
"def",
"centerdistance",
"(",
"image",
",",
"voxelspacing",
"=",
"None",
",",
"mask",
"=",
"slice",
"(",
"None",
")",
")",
":",
"if",
"type",
"(",
"image",
")",
"==",
"tuple",
"or",
"type",
"(",
"image",
")",
"==",
"list",
":",
"image",
"=",
"image",
"[",
"0",
"]",
"return",
"_extract_feature",
"(",
"_extract_centerdistance",
",",
"image",
",",
"mask",
",",
"voxelspacing",
"=",
"voxelspacing",
")"
] | r"""
Takes a simple or multi-spectral image and returns its voxel-wise center distance in
mm. A multi-spectral image must be supplied as a list or tuple of its spectra.
Optionally a binary mask can be supplied to select the voxels for which the feature
should be extracted.
The center distance is the exact euclidean distance in mm of each voxels center to
the central point of the overal image volume.
Note that this feature is independent of the actual image content, but depends
solely on its shape. Therefore always a one-dimensional feature is returned, even if
a multi-spectral image has been supplied.
Parameters
----------
image : array_like or list/tuple of array_like
A single image or a list/tuple of images (for multi-spectral case).
voxelspacing : sequence of floats
The side-length of each voxel.
mask : array_like
A binary mask for the image.
Returns
-------
centerdistance : ndarray
The distance of each voxel to the images center.
See Also
--------
centerdistance_xdminus1 | [
"r",
"Takes",
"a",
"simple",
"or",
"multi",
"-",
"spectral",
"image",
"and",
"returns",
"its",
"voxel",
"-",
"wise",
"center",
"distance",
"in",
"mm",
".",
"A",
"multi",
"-",
"spectral",
"image",
"must",
"be",
"supplied",
"as",
"a",
"list",
"or",
"tuple",
"of",
"its",
"spectra",
".",
"Optionally",
"a",
"binary",
"mask",
"can",
"be",
"supplied",
"to",
"select",
"the",
"voxels",
"for",
"which",
"the",
"feature",
"should",
"be",
"extracted",
".",
"The",
"center",
"distance",
"is",
"the",
"exact",
"euclidean",
"distance",
"in",
"mm",
"of",
"each",
"voxels",
"center",
"to",
"the",
"central",
"point",
"of",
"the",
"overal",
"image",
"volume",
".",
"Note",
"that",
"this",
"feature",
"is",
"independent",
"of",
"the",
"actual",
"image",
"content",
"but",
"depends",
"solely",
"on",
"its",
"shape",
".",
"Therefore",
"always",
"a",
"one",
"-",
"dimensional",
"feature",
"is",
"returned",
"even",
"if",
"a",
"multi",
"-",
"spectral",
"image",
"has",
"been",
"supplied",
"."
] | 95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5 | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L59-L96 |
Subsets and Splits