id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
248,800 | minhhoit/yacms | yacms/generic/fields.py | BaseGenericRelation.contribute_to_class | def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = self.rel.to
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender) | python | def contribute_to_class(self, cls, name):
"""
Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``.
"""
for field in cls._meta.many_to_many:
if isinstance(field, self.__class__):
e = "Multiple %s fields are not supported (%s.%s, %s.%s)" % (
self.__class__.__name__, cls.__name__, cls.__name__,
name, field.name)
raise ImproperlyConfigured(e)
self.related_field_name = name
super(BaseGenericRelation, self).contribute_to_class(cls, name)
# Not applicable to abstract classes, and in fact will break.
if not cls._meta.abstract:
for (name_string, field) in self.fields.items():
if "%s" in name_string:
name_string = name_string % name
extant_fields = cls._meta._forward_fields_map
if name_string in extant_fields:
continue
if field.verbose_name is None:
field.verbose_name = self.verbose_name
cls.add_to_class(name_string, copy(field))
# Add a getter function to the model we can use to retrieve
# the field/manager by name.
getter_name = "get_%s_name" % self.__class__.__name__.lower()
cls.add_to_class(getter_name, lambda self: name)
sender = self.rel.to
post_save.connect(self._related_items_changed, sender=sender)
post_delete.connect(self._related_items_changed, sender=sender) | [
"def",
"contribute_to_class",
"(",
"self",
",",
"cls",
",",
"name",
")",
":",
"for",
"field",
"in",
"cls",
".",
"_meta",
".",
"many_to_many",
":",
"if",
"isinstance",
"(",
"field",
",",
"self",
".",
"__class__",
")",
":",
"e",
"=",
"\"Multiple %s fields are not supported (%s.%s, %s.%s)\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"cls",
".",
"__name__",
",",
"cls",
".",
"__name__",
",",
"name",
",",
"field",
".",
"name",
")",
"raise",
"ImproperlyConfigured",
"(",
"e",
")",
"self",
".",
"related_field_name",
"=",
"name",
"super",
"(",
"BaseGenericRelation",
",",
"self",
")",
".",
"contribute_to_class",
"(",
"cls",
",",
"name",
")",
"# Not applicable to abstract classes, and in fact will break.",
"if",
"not",
"cls",
".",
"_meta",
".",
"abstract",
":",
"for",
"(",
"name_string",
",",
"field",
")",
"in",
"self",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"\"%s\"",
"in",
"name_string",
":",
"name_string",
"=",
"name_string",
"%",
"name",
"extant_fields",
"=",
"cls",
".",
"_meta",
".",
"_forward_fields_map",
"if",
"name_string",
"in",
"extant_fields",
":",
"continue",
"if",
"field",
".",
"verbose_name",
"is",
"None",
":",
"field",
".",
"verbose_name",
"=",
"self",
".",
"verbose_name",
"cls",
".",
"add_to_class",
"(",
"name_string",
",",
"copy",
"(",
"field",
")",
")",
"# Add a getter function to the model we can use to retrieve",
"# the field/manager by name.",
"getter_name",
"=",
"\"get_%s_name\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
"cls",
".",
"add_to_class",
"(",
"getter_name",
",",
"lambda",
"self",
":",
"name",
")",
"sender",
"=",
"self",
".",
"rel",
".",
"to",
"post_save",
".",
"connect",
"(",
"self",
".",
"_related_items_changed",
",",
"sender",
"=",
"sender",
")",
"post_delete",
".",
"connect",
"(",
"self",
".",
"_related_items_changed",
",",
"sender",
"=",
"sender",
")"
] | Add each of the names and fields in the ``fields`` attribute
to the model the relationship field is applied to, and set up
the related item save and delete signals for calling
``related_items_changed``. | [
"Add",
"each",
"of",
"the",
"names",
"and",
"fields",
"in",
"the",
"fields",
"attribute",
"to",
"the",
"model",
"the",
"relationship",
"field",
"is",
"applied",
"to",
"and",
"set",
"up",
"the",
"related",
"item",
"save",
"and",
"delete",
"signals",
"for",
"calling",
"related_items_changed",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L57-L90 |
248,801 | minhhoit/yacms | yacms/generic/fields.py | BaseGenericRelation._related_items_changed | def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager) | python | def _related_items_changed(self, **kwargs):
"""
Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler.
"""
for_model = kwargs["instance"].content_type.model_class()
if for_model and issubclass(for_model, self.model):
instance_id = kwargs["instance"].object_pk
try:
instance = for_model.objects.get(id=instance_id)
except self.model.DoesNotExist:
# Instance itself was deleted - signals are irrelevant.
return
if hasattr(instance, "get_content_model"):
instance = instance.get_content_model()
related_manager = getattr(instance, self.related_field_name)
self.related_items_changed(instance, related_manager) | [
"def",
"_related_items_changed",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for_model",
"=",
"kwargs",
"[",
"\"instance\"",
"]",
".",
"content_type",
".",
"model_class",
"(",
")",
"if",
"for_model",
"and",
"issubclass",
"(",
"for_model",
",",
"self",
".",
"model",
")",
":",
"instance_id",
"=",
"kwargs",
"[",
"\"instance\"",
"]",
".",
"object_pk",
"try",
":",
"instance",
"=",
"for_model",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"instance_id",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"# Instance itself was deleted - signals are irrelevant.",
"return",
"if",
"hasattr",
"(",
"instance",
",",
"\"get_content_model\"",
")",
":",
"instance",
"=",
"instance",
".",
"get_content_model",
"(",
")",
"related_manager",
"=",
"getattr",
"(",
"instance",
",",
"self",
".",
"related_field_name",
")",
"self",
".",
"related_items_changed",
"(",
"instance",
",",
"related_manager",
")"
] | Ensure that the given related item is actually for the model
this field applies to, and pass the instance to the real
``related_items_changed`` handler. | [
"Ensure",
"that",
"the",
"given",
"related",
"item",
"is",
"actually",
"for",
"the",
"model",
"this",
"field",
"applies",
"to",
"and",
"pass",
"the",
"instance",
"to",
"the",
"real",
"related_items_changed",
"handler",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L92-L109 |
248,802 | minhhoit/yacms | yacms/generic/fields.py | CommentsField.related_items_changed | def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save() | python | def related_items_changed(self, instance, related_manager):
"""
Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic.
"""
try:
count = related_manager.count_queryset()
except AttributeError:
count = related_manager.count()
count_field_name = list(self.fields.keys())[0] % \
self.related_field_name
setattr(instance, count_field_name, count)
instance.save() | [
"def",
"related_items_changed",
"(",
"self",
",",
"instance",
",",
"related_manager",
")",
":",
"try",
":",
"count",
"=",
"related_manager",
".",
"count_queryset",
"(",
")",
"except",
"AttributeError",
":",
"count",
"=",
"related_manager",
".",
"count",
"(",
")",
"count_field_name",
"=",
"list",
"(",
"self",
".",
"fields",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"%",
"self",
".",
"related_field_name",
"setattr",
"(",
"instance",
",",
"count_field_name",
",",
"count",
")",
"instance",
".",
"save",
"(",
")"
] | Stores the number of comments. A custom ``count_filter``
queryset gets checked for, allowing managers to implement
custom count logic. | [
"Stores",
"the",
"number",
"of",
"comments",
".",
"A",
"custom",
"count_filter",
"queryset",
"gets",
"checked",
"for",
"allowing",
"managers",
"to",
"implement",
"custom",
"count",
"logic",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L138-L151 |
248,803 | minhhoit/yacms | yacms/generic/fields.py | KeywordsField.formfield | def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from yacms.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs) | python | def formfield(self, **kwargs):
"""
Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields.
"""
from yacms.generic.forms import KeywordsWidget
kwargs["widget"] = KeywordsWidget
return super(KeywordsField, self).formfield(**kwargs) | [
"def",
"formfield",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"yacms",
".",
"generic",
".",
"forms",
"import",
"KeywordsWidget",
"kwargs",
"[",
"\"widget\"",
"]",
"=",
"KeywordsWidget",
"return",
"super",
"(",
"KeywordsField",
",",
"self",
")",
".",
"formfield",
"(",
"*",
"*",
"kwargs",
")"
] | Provide the custom form widget for the admin, since there
isn't a form field mapped to ``GenericRelation`` model fields. | [
"Provide",
"the",
"custom",
"form",
"widget",
"for",
"the",
"admin",
"since",
"there",
"isn",
"t",
"a",
"form",
"field",
"mapped",
"to",
"GenericRelation",
"model",
"fields",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L174-L181 |
248,804 | minhhoit/yacms | yacms/generic/fields.py | KeywordsField.save_form_data | def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from yacms.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
super(KeywordsField, self).save_form_data(instance, data) | python | def save_form_data(self, instance, data):
"""
The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed.
"""
from yacms.generic.models import Keyword
related_manager = getattr(instance, self.name)
# Get a list of Keyword IDs being removed.
old_ids = [str(a.keyword_id) for a in related_manager.all()]
new_ids = data.split(",")
removed_ids = set(old_ids) - set(new_ids)
# Remove current AssignedKeyword instances.
related_manager.all().delete()
# Convert the data into AssignedKeyword instances.
if data:
data = [related_manager.create(keyword_id=i) for i in new_ids]
# Remove keywords that are no longer assigned to anything.
Keyword.objects.delete_unused(removed_ids)
super(KeywordsField, self).save_form_data(instance, data) | [
"def",
"save_form_data",
"(",
"self",
",",
"instance",
",",
"data",
")",
":",
"from",
"yacms",
".",
"generic",
".",
"models",
"import",
"Keyword",
"related_manager",
"=",
"getattr",
"(",
"instance",
",",
"self",
".",
"name",
")",
"# Get a list of Keyword IDs being removed.",
"old_ids",
"=",
"[",
"str",
"(",
"a",
".",
"keyword_id",
")",
"for",
"a",
"in",
"related_manager",
".",
"all",
"(",
")",
"]",
"new_ids",
"=",
"data",
".",
"split",
"(",
"\",\"",
")",
"removed_ids",
"=",
"set",
"(",
"old_ids",
")",
"-",
"set",
"(",
"new_ids",
")",
"# Remove current AssignedKeyword instances.",
"related_manager",
".",
"all",
"(",
")",
".",
"delete",
"(",
")",
"# Convert the data into AssignedKeyword instances.",
"if",
"data",
":",
"data",
"=",
"[",
"related_manager",
".",
"create",
"(",
"keyword_id",
"=",
"i",
")",
"for",
"i",
"in",
"new_ids",
"]",
"# Remove keywords that are no longer assigned to anything.",
"Keyword",
".",
"objects",
".",
"delete_unused",
"(",
"removed_ids",
")",
"super",
"(",
"KeywordsField",
",",
"self",
")",
".",
"save_form_data",
"(",
"instance",
",",
"data",
")"
] | The ``KeywordsWidget`` field will return data as a string of
comma separated IDs for the ``Keyword`` model - convert these
into actual ``AssignedKeyword`` instances. Also delete
``Keyword`` instances if their last related ``AssignedKeyword``
instance is being removed. | [
"The",
"KeywordsWidget",
"field",
"will",
"return",
"data",
"as",
"a",
"string",
"of",
"comma",
"separated",
"IDs",
"for",
"the",
"Keyword",
"model",
"-",
"convert",
"these",
"into",
"actual",
"AssignedKeyword",
"instances",
".",
"Also",
"delete",
"Keyword",
"instances",
"if",
"their",
"last",
"related",
"AssignedKeyword",
"instance",
"is",
"being",
"removed",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L183-L204 |
248,805 | minhhoit/yacms | yacms/generic/fields.py | KeywordsField.contribute_to_class | def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight | python | def contribute_to_class(self, cls, name):
"""
Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``.
"""
super(KeywordsField, self).contribute_to_class(cls, name)
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if hasattr(cls, "search_fields") and name in cls.search_fields:
try:
weight = cls.search_fields[name]
except TypeError:
# search_fields is a sequence.
index = cls.search_fields.index(name)
search_fields_type = type(cls.search_fields)
cls.search_fields = list(cls.search_fields)
cls.search_fields[index] = string_field_name
cls.search_fields = search_fields_type(cls.search_fields)
else:
del cls.search_fields[name]
cls.search_fields[string_field_name] = weight | [
"def",
"contribute_to_class",
"(",
"self",
",",
"cls",
",",
"name",
")",
":",
"super",
"(",
"KeywordsField",
",",
"self",
")",
".",
"contribute_to_class",
"(",
"cls",
",",
"name",
")",
"string_field_name",
"=",
"list",
"(",
"self",
".",
"fields",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"%",
"self",
".",
"related_field_name",
"if",
"hasattr",
"(",
"cls",
",",
"\"search_fields\"",
")",
"and",
"name",
"in",
"cls",
".",
"search_fields",
":",
"try",
":",
"weight",
"=",
"cls",
".",
"search_fields",
"[",
"name",
"]",
"except",
"TypeError",
":",
"# search_fields is a sequence.",
"index",
"=",
"cls",
".",
"search_fields",
".",
"index",
"(",
"name",
")",
"search_fields_type",
"=",
"type",
"(",
"cls",
".",
"search_fields",
")",
"cls",
".",
"search_fields",
"=",
"list",
"(",
"cls",
".",
"search_fields",
")",
"cls",
".",
"search_fields",
"[",
"index",
"]",
"=",
"string_field_name",
"cls",
".",
"search_fields",
"=",
"search_fields_type",
"(",
"cls",
".",
"search_fields",
")",
"else",
":",
"del",
"cls",
".",
"search_fields",
"[",
"name",
"]",
"cls",
".",
"search_fields",
"[",
"string_field_name",
"]",
"=",
"weight"
] | Swap out any reference to ``KeywordsField`` with the
``KEYWORDS_FIELD_string`` field in ``search_fields``. | [
"Swap",
"out",
"any",
"reference",
"to",
"KeywordsField",
"with",
"the",
"KEYWORDS_FIELD_string",
"field",
"in",
"search_fields",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L206-L226 |
248,806 | minhhoit/yacms | yacms/generic/fields.py | KeywordsField.related_items_changed | def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save() | python | def related_items_changed(self, instance, related_manager):
"""
Stores the keywords as a single string for searching.
"""
assigned = related_manager.select_related("keyword")
keywords = " ".join([str(a.keyword) for a in assigned])
string_field_name = list(self.fields.keys())[0] % \
self.related_field_name
if getattr(instance, string_field_name) != keywords:
setattr(instance, string_field_name, keywords)
instance.save() | [
"def",
"related_items_changed",
"(",
"self",
",",
"instance",
",",
"related_manager",
")",
":",
"assigned",
"=",
"related_manager",
".",
"select_related",
"(",
"\"keyword\"",
")",
"keywords",
"=",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"a",
".",
"keyword",
")",
"for",
"a",
"in",
"assigned",
"]",
")",
"string_field_name",
"=",
"list",
"(",
"self",
".",
"fields",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"%",
"self",
".",
"related_field_name",
"if",
"getattr",
"(",
"instance",
",",
"string_field_name",
")",
"!=",
"keywords",
":",
"setattr",
"(",
"instance",
",",
"string_field_name",
",",
"keywords",
")",
"instance",
".",
"save",
"(",
")"
] | Stores the keywords as a single string for searching. | [
"Stores",
"the",
"keywords",
"as",
"a",
"single",
"string",
"for",
"searching",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L228-L238 |
248,807 | minhhoit/yacms | yacms/generic/fields.py | RatingField.related_items_changed | def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save() | python | def related_items_changed(self, instance, related_manager):
"""
Calculates and saves the average rating.
"""
ratings = [r.value for r in related_manager.all()]
count = len(ratings)
_sum = sum(ratings)
average = _sum / count if count > 0 else 0
setattr(instance, "%s_count" % self.related_field_name, count)
setattr(instance, "%s_sum" % self.related_field_name, _sum)
setattr(instance, "%s_average" % self.related_field_name, average)
instance.save() | [
"def",
"related_items_changed",
"(",
"self",
",",
"instance",
",",
"related_manager",
")",
":",
"ratings",
"=",
"[",
"r",
".",
"value",
"for",
"r",
"in",
"related_manager",
".",
"all",
"(",
")",
"]",
"count",
"=",
"len",
"(",
"ratings",
")",
"_sum",
"=",
"sum",
"(",
"ratings",
")",
"average",
"=",
"_sum",
"/",
"count",
"if",
"count",
">",
"0",
"else",
"0",
"setattr",
"(",
"instance",
",",
"\"%s_count\"",
"%",
"self",
".",
"related_field_name",
",",
"count",
")",
"setattr",
"(",
"instance",
",",
"\"%s_sum\"",
"%",
"self",
".",
"related_field_name",
",",
"_sum",
")",
"setattr",
"(",
"instance",
",",
"\"%s_average\"",
"%",
"self",
".",
"related_field_name",
",",
"average",
")",
"instance",
".",
"save",
"(",
")"
] | Calculates and saves the average rating. | [
"Calculates",
"and",
"saves",
"the",
"average",
"rating",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/fields.py#L253-L264 |
248,808 | jaraco/jaraco.apt | jaraco/apt.py | parse_new_packages | def parse_new_packages(apt_output, include_automatic=False):
"""
Given the output from an apt or aptitude command, determine which packages
are newly-installed.
"""
pat = r'^The following NEW packages will be installed:[\r\n]+(.*?)[\r\n]\w'
matcher = re.search(pat, apt_output, re.DOTALL | re.MULTILINE)
if not matcher:
return []
new_pkg_text = matcher.group(1)
raw_names = re.findall(r'[\w{}\.+-]+', new_pkg_text)
all_packages = list(map(PackageName.from_apt, raw_names))
manual_packages = [pack for pack in all_packages if not pack.automatic]
return all_packages if include_automatic else manual_packages | python | def parse_new_packages(apt_output, include_automatic=False):
"""
Given the output from an apt or aptitude command, determine which packages
are newly-installed.
"""
pat = r'^The following NEW packages will be installed:[\r\n]+(.*?)[\r\n]\w'
matcher = re.search(pat, apt_output, re.DOTALL | re.MULTILINE)
if not matcher:
return []
new_pkg_text = matcher.group(1)
raw_names = re.findall(r'[\w{}\.+-]+', new_pkg_text)
all_packages = list(map(PackageName.from_apt, raw_names))
manual_packages = [pack for pack in all_packages if not pack.automatic]
return all_packages if include_automatic else manual_packages | [
"def",
"parse_new_packages",
"(",
"apt_output",
",",
"include_automatic",
"=",
"False",
")",
":",
"pat",
"=",
"r'^The following NEW packages will be installed:[\\r\\n]+(.*?)[\\r\\n]\\w'",
"matcher",
"=",
"re",
".",
"search",
"(",
"pat",
",",
"apt_output",
",",
"re",
".",
"DOTALL",
"|",
"re",
".",
"MULTILINE",
")",
"if",
"not",
"matcher",
":",
"return",
"[",
"]",
"new_pkg_text",
"=",
"matcher",
".",
"group",
"(",
"1",
")",
"raw_names",
"=",
"re",
".",
"findall",
"(",
"r'[\\w{}\\.+-]+'",
",",
"new_pkg_text",
")",
"all_packages",
"=",
"list",
"(",
"map",
"(",
"PackageName",
".",
"from_apt",
",",
"raw_names",
")",
")",
"manual_packages",
"=",
"[",
"pack",
"for",
"pack",
"in",
"all_packages",
"if",
"not",
"pack",
".",
"automatic",
"]",
"return",
"all_packages",
"if",
"include_automatic",
"else",
"manual_packages"
] | Given the output from an apt or aptitude command, determine which packages
are newly-installed. | [
"Given",
"the",
"output",
"from",
"an",
"apt",
"or",
"aptitude",
"command",
"determine",
"which",
"packages",
"are",
"newly",
"-",
"installed",
"."
] | bf36fc1966c0a633bc509a37617afd5eead66525 | https://github.com/jaraco/jaraco.apt/blob/bf36fc1966c0a633bc509a37617afd5eead66525/jaraco/apt.py#L20-L33 |
248,809 | pjuren/pyokit | src/pyokit/scripts/join.py | file_iterator | def file_iterator(filehandle, verbose=False):
"""Iterate over a file and yield stripped lines. Optionally show progress."""
if type(filehandle).__name__ == "str":
filehandle = open(filehandle)
if verbose:
try:
pind = ProgressIndicator(totalToDo=os.path.getsize(filehandle.name),
messagePrefix="completed",
messageSuffix="of processing " +
filehandle.name)
except AttributeError:
sys.stderr.write("BEDIterator -- warning: " +
"unable to show progress for stream")
verbose = False
for line in filehandle:
# chomp just the newline char, leave eveerything else alone, so we can
# handle empty columns in the first and last positions
line = line.rstrip('\n')
if verbose:
pind.done = filehandle.tell()
pind.showProgress()
if line == "":
continue
yield line | python | def file_iterator(filehandle, verbose=False):
"""Iterate over a file and yield stripped lines. Optionally show progress."""
if type(filehandle).__name__ == "str":
filehandle = open(filehandle)
if verbose:
try:
pind = ProgressIndicator(totalToDo=os.path.getsize(filehandle.name),
messagePrefix="completed",
messageSuffix="of processing " +
filehandle.name)
except AttributeError:
sys.stderr.write("BEDIterator -- warning: " +
"unable to show progress for stream")
verbose = False
for line in filehandle:
# chomp just the newline char, leave eveerything else alone, so we can
# handle empty columns in the first and last positions
line = line.rstrip('\n')
if verbose:
pind.done = filehandle.tell()
pind.showProgress()
if line == "":
continue
yield line | [
"def",
"file_iterator",
"(",
"filehandle",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"type",
"(",
"filehandle",
")",
".",
"__name__",
"==",
"\"str\"",
":",
"filehandle",
"=",
"open",
"(",
"filehandle",
")",
"if",
"verbose",
":",
"try",
":",
"pind",
"=",
"ProgressIndicator",
"(",
"totalToDo",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filehandle",
".",
"name",
")",
",",
"messagePrefix",
"=",
"\"completed\"",
",",
"messageSuffix",
"=",
"\"of processing \"",
"+",
"filehandle",
".",
"name",
")",
"except",
"AttributeError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"BEDIterator -- warning: \"",
"+",
"\"unable to show progress for stream\"",
")",
"verbose",
"=",
"False",
"for",
"line",
"in",
"filehandle",
":",
"# chomp just the newline char, leave eveerything else alone, so we can",
"# handle empty columns in the first and last positions",
"line",
"=",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"if",
"verbose",
":",
"pind",
".",
"done",
"=",
"filehandle",
".",
"tell",
"(",
")",
"pind",
".",
"showProgress",
"(",
")",
"if",
"line",
"==",
"\"\"",
":",
"continue",
"yield",
"line"
] | Iterate over a file and yield stripped lines. Optionally show progress. | [
"Iterate",
"over",
"a",
"file",
"and",
"yield",
"stripped",
"lines",
".",
"Optionally",
"show",
"progress",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L273-L298 |
248,810 | pjuren/pyokit | src/pyokit/scripts/join.py | _build_entry | def _build_entry(parts, existing_list_d, key_value, key_field_num,
key_is_field_number, header=None,
output_type=OutputType.error_on_dups,
ignore_missing_keys=False, keep_key_col=False):
"""
Build and add an entry to existing_list_d.
If the key is a field number, the entry added will be a list of lists. The
inner list contains one item per column. and the outer list allows more than
one entry to be stored per key (but if allow_duplicates is false, an
exception will be raised if more than one needs to be stored).
:param parts: the (already tokenized) list of column entries.
:param existing_list_d: a dictionary indexed by key value containing the
the already processed entries. The new entry will
be added to this using <key_value> as a key
:param key_value: the key value to use to add the new entry to
<existing_list_d>
:param key_field_num: which column is the key_value from (number,
indexed from 0)
:param key_is_field_number: True if the <key_value> is actually the column
index, rather than a column name
:param header: list giving the names of the columns. Can be None
if columns have no names (no header)
:param dup_method: ...
:param ignore_missing_keys: ...
"""
if key_value.strip() == "":
if ignore_missing_keys:
return
raise MissingKeyError("missing key value")
if key_value in existing_list_d:
if output_type is OutputType.error_on_dups:
raise DuplicateKeyError(key_value + " appears multiple times as key")
elif (output_type is OutputType.all_pairwise_combinations or
output_type is OutputType.column_wise_join):
pass # dups okay for these output methods
else:
raise ValueError("Unknown duplicate handling method")
else:
existing_list_d[key_value] = []
if key_is_field_number:
# the entry in the dictionary is a list, minus the key field, in the
# order they occur.
ne = [parts[i] for i in range(0, len(parts))
if i != key_field_num or keep_key_col]
existing_list_d[key_value].append(ne)
else:
# the entry in the dictionary is another dictionary indexed by
# the header value
ne = {}
for i in range(0, len(parts)):
if i == key_field_num and not keep_key_col:
continue
else:
ne[header[i]] = parts[i]
existing_list_d[key_value].append(ne) | python | def _build_entry(parts, existing_list_d, key_value, key_field_num,
key_is_field_number, header=None,
output_type=OutputType.error_on_dups,
ignore_missing_keys=False, keep_key_col=False):
"""
Build and add an entry to existing_list_d.
If the key is a field number, the entry added will be a list of lists. The
inner list contains one item per column. and the outer list allows more than
one entry to be stored per key (but if allow_duplicates is false, an
exception will be raised if more than one needs to be stored).
:param parts: the (already tokenized) list of column entries.
:param existing_list_d: a dictionary indexed by key value containing the
the already processed entries. The new entry will
be added to this using <key_value> as a key
:param key_value: the key value to use to add the new entry to
<existing_list_d>
:param key_field_num: which column is the key_value from (number,
indexed from 0)
:param key_is_field_number: True if the <key_value> is actually the column
index, rather than a column name
:param header: list giving the names of the columns. Can be None
if columns have no names (no header)
:param dup_method: ...
:param ignore_missing_keys: ...
"""
if key_value.strip() == "":
if ignore_missing_keys:
return
raise MissingKeyError("missing key value")
if key_value in existing_list_d:
if output_type is OutputType.error_on_dups:
raise DuplicateKeyError(key_value + " appears multiple times as key")
elif (output_type is OutputType.all_pairwise_combinations or
output_type is OutputType.column_wise_join):
pass # dups okay for these output methods
else:
raise ValueError("Unknown duplicate handling method")
else:
existing_list_d[key_value] = []
if key_is_field_number:
# the entry in the dictionary is a list, minus the key field, in the
# order they occur.
ne = [parts[i] for i in range(0, len(parts))
if i != key_field_num or keep_key_col]
existing_list_d[key_value].append(ne)
else:
# the entry in the dictionary is another dictionary indexed by
# the header value
ne = {}
for i in range(0, len(parts)):
if i == key_field_num and not keep_key_col:
continue
else:
ne[header[i]] = parts[i]
existing_list_d[key_value].append(ne) | [
"def",
"_build_entry",
"(",
"parts",
",",
"existing_list_d",
",",
"key_value",
",",
"key_field_num",
",",
"key_is_field_number",
",",
"header",
"=",
"None",
",",
"output_type",
"=",
"OutputType",
".",
"error_on_dups",
",",
"ignore_missing_keys",
"=",
"False",
",",
"keep_key_col",
"=",
"False",
")",
":",
"if",
"key_value",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"if",
"ignore_missing_keys",
":",
"return",
"raise",
"MissingKeyError",
"(",
"\"missing key value\"",
")",
"if",
"key_value",
"in",
"existing_list_d",
":",
"if",
"output_type",
"is",
"OutputType",
".",
"error_on_dups",
":",
"raise",
"DuplicateKeyError",
"(",
"key_value",
"+",
"\" appears multiple times as key\"",
")",
"elif",
"(",
"output_type",
"is",
"OutputType",
".",
"all_pairwise_combinations",
"or",
"output_type",
"is",
"OutputType",
".",
"column_wise_join",
")",
":",
"pass",
"# dups okay for these output methods",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown duplicate handling method\"",
")",
"else",
":",
"existing_list_d",
"[",
"key_value",
"]",
"=",
"[",
"]",
"if",
"key_is_field_number",
":",
"# the entry in the dictionary is a list, minus the key field, in the",
"# order they occur.",
"ne",
"=",
"[",
"parts",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"parts",
")",
")",
"if",
"i",
"!=",
"key_field_num",
"or",
"keep_key_col",
"]",
"existing_list_d",
"[",
"key_value",
"]",
".",
"append",
"(",
"ne",
")",
"else",
":",
"# the entry in the dictionary is another dictionary indexed by",
"# the header value",
"ne",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"parts",
")",
")",
":",
"if",
"i",
"==",
"key_field_num",
"and",
"not",
"keep_key_col",
":",
"continue",
"else",
":",
"ne",
"[",
"header",
"[",
"i",
"]",
"]",
"=",
"parts",
"[",
"i",
"]",
"existing_list_d",
"[",
"key_value",
"]",
".",
"append",
"(",
"ne",
")"
] | Build and add an entry to existing_list_d.
If the key is a field number, the entry added will be a list of lists. The
inner list contains one item per column. and the outer list allows more than
one entry to be stored per key (but if allow_duplicates is false, an
exception will be raised if more than one needs to be stored).
:param parts: the (already tokenized) list of column entries.
:param existing_list_d: a dictionary indexed by key value containing the
the already processed entries. The new entry will
be added to this using <key_value> as a key
:param key_value: the key value to use to add the new entry to
<existing_list_d>
:param key_field_num: which column is the key_value from (number,
indexed from 0)
:param key_is_field_number: True if the <key_value> is actually the column
index, rather than a column name
:param header: list giving the names of the columns. Can be None
if columns have no names (no header)
:param dup_method: ...
:param ignore_missing_keys: ... | [
"Build",
"and",
"add",
"an",
"entry",
"to",
"existing_list_d",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L320-L378 |
248,811 | pjuren/pyokit | src/pyokit/scripts/join.py | __output_unpaired_vals | def __output_unpaired_vals(d_vals, used_ff_keys, f_f_header, sf_d, s_f_header,
missing_val, out_handler, outfh, delim="\t"):
"""
Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
for k in d_vals:
if k not in used_ff_keys:
f_f_flds = d_vals[k]
if s_f_header is not None:
s_f_flds = [dict(zip(s_f_header, [missing_val] * len(s_f_header)))]
else:
s_f_num_cols = len(sf_d[d_vals.keys()[0]][0])
s_f_flds = [[missing_val] * s_f_num_cols]
out_handler.write_output(outfh, delim, s_f_flds, f_f_flds,
s_f_header, f_f_header) | python | def __output_unpaired_vals(d_vals, used_ff_keys, f_f_header, sf_d, s_f_header,
missing_val, out_handler, outfh, delim="\t"):
"""
Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
for k in d_vals:
if k not in used_ff_keys:
f_f_flds = d_vals[k]
if s_f_header is not None:
s_f_flds = [dict(zip(s_f_header, [missing_val] * len(s_f_header)))]
else:
s_f_num_cols = len(sf_d[d_vals.keys()[0]][0])
s_f_flds = [[missing_val] * s_f_num_cols]
out_handler.write_output(outfh, delim, s_f_flds, f_f_flds,
s_f_header, f_f_header) | [
"def",
"__output_unpaired_vals",
"(",
"d_vals",
",",
"used_ff_keys",
",",
"f_f_header",
",",
"sf_d",
",",
"s_f_header",
",",
"missing_val",
",",
"out_handler",
",",
"outfh",
",",
"delim",
"=",
"\"\\t\"",
")",
":",
"if",
"missing_val",
"is",
"None",
":",
"raise",
"MissingValueError",
"(",
"\"Need missing value to output \"",
"+",
"\" unpaired lines\"",
")",
"for",
"k",
"in",
"d_vals",
":",
"if",
"k",
"not",
"in",
"used_ff_keys",
":",
"f_f_flds",
"=",
"d_vals",
"[",
"k",
"]",
"if",
"s_f_header",
"is",
"not",
"None",
":",
"s_f_flds",
"=",
"[",
"dict",
"(",
"zip",
"(",
"s_f_header",
",",
"[",
"missing_val",
"]",
"*",
"len",
"(",
"s_f_header",
")",
")",
")",
"]",
"else",
":",
"s_f_num_cols",
"=",
"len",
"(",
"sf_d",
"[",
"d_vals",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"[",
"0",
"]",
")",
"s_f_flds",
"=",
"[",
"[",
"missing_val",
"]",
"*",
"s_f_num_cols",
"]",
"out_handler",
".",
"write_output",
"(",
"outfh",
",",
"delim",
",",
"s_f_flds",
",",
"f_f_flds",
",",
"s_f_header",
",",
"f_f_header",
")"
] | Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander. | [
"Use",
"an",
"output",
"handler",
"to",
"output",
"keys",
"that",
"could",
"not",
"be",
"paired",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L418-L440 |
248,812 | pjuren/pyokit | src/pyokit/scripts/join.py | get_key_field | def get_key_field(ui, ui_option_name, default_val=0, default_is_number=True):
"""
parse an option from a UI object as the name of a key field.
If the named option is not set, return the default values for the tuple.
:return: a tuple of two items, first is the value of the option, second is
a boolean value that indicates whether the value is a column name
or a column number (numbers start at 0).
"""
key = default_val
key_is_field_number = default_is_number
if ui.optionIsSet(ui_option_name):
key = ui.getValue(ui_option_name)
try:
key = int(key) - 1
key_is_field_number = True
except ValueError:
key_is_field_number = False
return key, key_is_field_number | python | def get_key_field(ui, ui_option_name, default_val=0, default_is_number=True):
"""
parse an option from a UI object as the name of a key field.
If the named option is not set, return the default values for the tuple.
:return: a tuple of two items, first is the value of the option, second is
a boolean value that indicates whether the value is a column name
or a column number (numbers start at 0).
"""
key = default_val
key_is_field_number = default_is_number
if ui.optionIsSet(ui_option_name):
key = ui.getValue(ui_option_name)
try:
key = int(key) - 1
key_is_field_number = True
except ValueError:
key_is_field_number = False
return key, key_is_field_number | [
"def",
"get_key_field",
"(",
"ui",
",",
"ui_option_name",
",",
"default_val",
"=",
"0",
",",
"default_is_number",
"=",
"True",
")",
":",
"key",
"=",
"default_val",
"key_is_field_number",
"=",
"default_is_number",
"if",
"ui",
".",
"optionIsSet",
"(",
"ui_option_name",
")",
":",
"key",
"=",
"ui",
".",
"getValue",
"(",
"ui_option_name",
")",
"try",
":",
"key",
"=",
"int",
"(",
"key",
")",
"-",
"1",
"key_is_field_number",
"=",
"True",
"except",
"ValueError",
":",
"key_is_field_number",
"=",
"False",
"return",
"key",
",",
"key_is_field_number"
] | parse an option from a UI object as the name of a key field.
If the named option is not set, return the default values for the tuple.
:return: a tuple of two items, first is the value of the option, second is
a boolean value that indicates whether the value is a column name
or a column number (numbers start at 0). | [
"parse",
"an",
"option",
"from",
"a",
"UI",
"object",
"as",
"the",
"name",
"of",
"a",
"key",
"field",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L511-L530 |
248,813 | pjuren/pyokit | src/pyokit/scripts/join.py | populate_unpaired_line | def populate_unpaired_line(d_vals, f_f_header, missing_val=None):
"""
used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
if f_f_header is not None:
f_f_flds = [dict(zip(f_f_header, [missing_val] * len(f_f_header)))]
else:
assert(len(d_vals) > 0)
f_f_num_cols = len(d_vals[d_vals.keys()[0]][0])
f_f_flds = [[missing_val] * f_f_num_cols]
return f_f_flds | python | def populate_unpaired_line(d_vals, f_f_header, missing_val=None):
"""
used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file.
"""
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
if f_f_header is not None:
f_f_flds = [dict(zip(f_f_header, [missing_val] * len(f_f_header)))]
else:
assert(len(d_vals) > 0)
f_f_num_cols = len(d_vals[d_vals.keys()[0]][0])
f_f_flds = [[missing_val] * f_f_num_cols]
return f_f_flds | [
"def",
"populate_unpaired_line",
"(",
"d_vals",
",",
"f_f_header",
",",
"missing_val",
"=",
"None",
")",
":",
"if",
"missing_val",
"is",
"None",
":",
"raise",
"MissingValueError",
"(",
"\"Need missing value to output \"",
"+",
"\" unpaired lines\"",
")",
"if",
"f_f_header",
"is",
"not",
"None",
":",
"f_f_flds",
"=",
"[",
"dict",
"(",
"zip",
"(",
"f_f_header",
",",
"[",
"missing_val",
"]",
"*",
"len",
"(",
"f_f_header",
")",
")",
")",
"]",
"else",
":",
"assert",
"(",
"len",
"(",
"d_vals",
")",
">",
"0",
")",
"f_f_num_cols",
"=",
"len",
"(",
"d_vals",
"[",
"d_vals",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"[",
"0",
"]",
")",
"f_f_flds",
"=",
"[",
"[",
"missing_val",
"]",
"*",
"f_f_num_cols",
"]",
"return",
"f_f_flds"
] | used when a value in d_vals doesn't match anything in the other file.
:return: a dictionary, indexed by key value, with the correct missing values
populated for the other file. | [
"used",
"when",
"a",
"value",
"in",
"d_vals",
"doesn",
"t",
"match",
"anything",
"in",
"the",
"other",
"file",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L593-L609 |
248,814 | pjuren/pyokit | src/pyokit/scripts/join.py | build_mock_open_side_effect | def build_mock_open_side_effect(string_d, stream_d):
"""
Build a mock open side effect using a dictionary of content for the files.
:param string_d: keys are file names, values are string file contents
:param stream_d: keys are file names, values are stream of contents
"""
assert(len(set(string_d.keys()).intersection(set(stream_d.keys()))) == 0)
def mock_open_side_effect(*args, **kwargs):
if args[0] in string_d:
return StringIO.StringIO(string_d[args[0]])
elif args[0] in stream_d:
return stream_d[args[0]]
else:
raise IOError("No such file: " + args[0])
return mock_open_side_effect | python | def build_mock_open_side_effect(string_d, stream_d):
"""
Build a mock open side effect using a dictionary of content for the files.
:param string_d: keys are file names, values are string file contents
:param stream_d: keys are file names, values are stream of contents
"""
assert(len(set(string_d.keys()).intersection(set(stream_d.keys()))) == 0)
def mock_open_side_effect(*args, **kwargs):
if args[0] in string_d:
return StringIO.StringIO(string_d[args[0]])
elif args[0] in stream_d:
return stream_d[args[0]]
else:
raise IOError("No such file: " + args[0])
return mock_open_side_effect | [
"def",
"build_mock_open_side_effect",
"(",
"string_d",
",",
"stream_d",
")",
":",
"assert",
"(",
"len",
"(",
"set",
"(",
"string_d",
".",
"keys",
"(",
")",
")",
".",
"intersection",
"(",
"set",
"(",
"stream_d",
".",
"keys",
"(",
")",
")",
")",
")",
"==",
"0",
")",
"def",
"mock_open_side_effect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"[",
"0",
"]",
"in",
"string_d",
":",
"return",
"StringIO",
".",
"StringIO",
"(",
"string_d",
"[",
"args",
"[",
"0",
"]",
"]",
")",
"elif",
"args",
"[",
"0",
"]",
"in",
"stream_d",
":",
"return",
"stream_d",
"[",
"args",
"[",
"0",
"]",
"]",
"else",
":",
"raise",
"IOError",
"(",
"\"No such file: \"",
"+",
"args",
"[",
"0",
"]",
")",
"return",
"mock_open_side_effect"
] | Build a mock open side effect using a dictionary of content for the files.
:param string_d: keys are file names, values are string file contents
:param stream_d: keys are file names, values are stream of contents | [
"Build",
"a",
"mock",
"open",
"side",
"effect",
"using",
"a",
"dictionary",
"of",
"content",
"for",
"the",
"files",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L799-L815 |
248,815 | pjuren/pyokit | src/pyokit/scripts/join.py | OutputHandlerBase.write_header | def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields,
f1_header=None, f2_header=None, missing_val=None):
"""
Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val:
"""
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") | python | def write_header(self, out_strm, delim, f1_num_fields, f2_num_fields,
f1_header=None, f2_header=None, missing_val=None):
"""
Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val:
"""
mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None:
raise InvalidHeaderError("Cannot generate output header when one " +
"input file is missing a header and no " +
"missing value was provided to replace " +
"unknown entries.")
if f1_header is not None and f2_header is not None:
out_strm.write(delim.join(f1_header) + delim +
delim.join(f2_header) + "\n")
elif f1_header is None and f2_header is not None:
dummy_h = f1_num_fields * [missing_val]
out_strm.write(delim.join(dummy_h) + delim +
delim.join(f2_header) + "\n")
elif f1_header is not None and f2_header is None:
dummy_h = f2_num_fields * [missing_val]
out_strm.write(delim.join(f1_header) + delim +
delim.join(dummy_h) + "\n") | [
"def",
"write_header",
"(",
"self",
",",
"out_strm",
",",
"delim",
",",
"f1_num_fields",
",",
"f2_num_fields",
",",
"f1_header",
"=",
"None",
",",
"f2_header",
"=",
"None",
",",
"missing_val",
"=",
"None",
")",
":",
"mm",
"=",
"f1_header",
"!=",
"f2_header",
"one_none",
"=",
"f1_header",
"is",
"None",
"or",
"f2_header",
"is",
"None",
"if",
"mm",
"and",
"one_none",
"and",
"missing_val",
"is",
"None",
":",
"raise",
"InvalidHeaderError",
"(",
"\"Cannot generate output header when one \"",
"+",
"\"input file is missing a header and no \"",
"+",
"\"missing value was provided to replace \"",
"+",
"\"unknown entries.\"",
")",
"if",
"f1_header",
"is",
"not",
"None",
"and",
"f2_header",
"is",
"not",
"None",
":",
"out_strm",
".",
"write",
"(",
"delim",
".",
"join",
"(",
"f1_header",
")",
"+",
"delim",
"+",
"delim",
".",
"join",
"(",
"f2_header",
")",
"+",
"\"\\n\"",
")",
"elif",
"f1_header",
"is",
"None",
"and",
"f2_header",
"is",
"not",
"None",
":",
"dummy_h",
"=",
"f1_num_fields",
"*",
"[",
"missing_val",
"]",
"out_strm",
".",
"write",
"(",
"delim",
".",
"join",
"(",
"dummy_h",
")",
"+",
"delim",
"+",
"delim",
".",
"join",
"(",
"f2_header",
")",
"+",
"\"\\n\"",
")",
"elif",
"f1_header",
"is",
"not",
"None",
"and",
"f2_header",
"is",
"None",
":",
"dummy_h",
"=",
"f2_num_fields",
"*",
"[",
"missing_val",
"]",
"out_strm",
".",
"write",
"(",
"delim",
".",
"join",
"(",
"f1_header",
")",
"+",
"delim",
"+",
"delim",
".",
"join",
"(",
"dummy_h",
")",
"+",
"\"\\n\"",
")"
] | Write the header for a joined file. If headers are provided for one or more
of the input files, then a header is generated for the output file.
Otherwise, this does not output anything.
:param out_strm: write to this stream
:param delim:
:param f1_num_fields: the number of columns in the first file
:param f2_num_fields: the number of columns in the second file
:param f1_header:
:param f2_header:
:param missing_val: | [
"Write",
"the",
"header",
"for",
"a",
"joined",
"file",
".",
"If",
"headers",
"are",
"provided",
"for",
"one",
"or",
"more",
"of",
"the",
"input",
"files",
"then",
"a",
"header",
"is",
"generated",
"for",
"the",
"output",
"file",
".",
"Otherwise",
"this",
"does",
"not",
"output",
"anything",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L104-L137 |
248,816 | emencia/emencia-django-forum | forum/markup.py | clean_restructuredtext | def clean_restructuredtext(form_instance, content):
"""
RST syntax validation
"""
if content:
errors = SourceReporter(content)
if errors:
raise ValidationError(map(map_parsing_errors, errors))
return content | python | def clean_restructuredtext(form_instance, content):
"""
RST syntax validation
"""
if content:
errors = SourceReporter(content)
if errors:
raise ValidationError(map(map_parsing_errors, errors))
return content | [
"def",
"clean_restructuredtext",
"(",
"form_instance",
",",
"content",
")",
":",
"if",
"content",
":",
"errors",
"=",
"SourceReporter",
"(",
"content",
")",
"if",
"errors",
":",
"raise",
"ValidationError",
"(",
"map",
"(",
"map_parsing_errors",
",",
"errors",
")",
")",
"return",
"content"
] | RST syntax validation | [
"RST",
"syntax",
"validation"
] | cda74ed7e5822675c340ee5ec71548d981bccd3b | https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/markup.py#L20-L28 |
248,817 | MrKriss/vigilance | vigilance/decorators.py | returns | def returns(*checkers_args):
""" Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2)
"""
@decorator
def run_checkers(func, *args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) != tuple:
ret = (ret, )
assert len(ret) == len(checkers_args)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(ret[idx])
return ret
return run_checkers | python | def returns(*checkers_args):
""" Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2)
"""
@decorator
def run_checkers(func, *args, **kwargs):
ret = func(*args, **kwargs)
if type(ret) != tuple:
ret = (ret, )
assert len(ret) == len(checkers_args)
if checkers_args:
for idx, checker_function in enumerate(checkers_args):
if callable(checker_function):
result = checker_function(ret[idx])
return ret
return run_checkers | [
"def",
"returns",
"(",
"*",
"checkers_args",
")",
":",
"@",
"decorator",
"def",
"run_checkers",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"type",
"(",
"ret",
")",
"!=",
"tuple",
":",
"ret",
"=",
"(",
"ret",
",",
")",
"assert",
"len",
"(",
"ret",
")",
"==",
"len",
"(",
"checkers_args",
")",
"if",
"checkers_args",
":",
"for",
"idx",
",",
"checker_function",
"in",
"enumerate",
"(",
"checkers_args",
")",
":",
"if",
"callable",
"(",
"checker_function",
")",
":",
"result",
"=",
"checker_function",
"(",
"ret",
"[",
"idx",
"]",
")",
"return",
"ret",
"return",
"run_checkers"
] | Create a decorator for validating function return values.
Parameters
----------
checkers_args: positional arguments
A single functions to apply to the output of the decorated function. If a tuple is returned
by the decorated function, multiple function can be listed and are assumed to match by
possition to the elements in the returned tuple.
Examples
--------
@returns(df_checker)
def do_something_with_df(df, args*, kw**):
print(df.head())
return df
@returns(df_checker1, df_checker2)
def do_something_with_dfs(df1, df2, args*, kw**):
# Do somethign with both dfs
return (df1, df2) | [
"Create",
"a",
"decorator",
"for",
"validating",
"function",
"return",
"values",
"."
] | 2946b09f524c042c12d796f111f287866e7a3c67 | https://github.com/MrKriss/vigilance/blob/2946b09f524c042c12d796f111f287866e7a3c67/vigilance/decorators.py#L54-L90 |
248,818 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | regular_generic_msg | def regular_generic_msg(hostname, result, oneline, caption):
''' output on the result of a module run that is not command '''
if not oneline:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
else:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result)) | python | def regular_generic_msg(hostname, result, oneline, caption):
''' output on the result of a module run that is not command '''
if not oneline:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
else:
return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result)) | [
"def",
"regular_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"caption",
")",
":",
"if",
"not",
"oneline",
":",
"return",
"\"%s | %s >> %s\\n\"",
"%",
"(",
"hostname",
",",
"caption",
",",
"utils",
".",
"jsonify",
"(",
"result",
",",
"format",
"=",
"True",
")",
")",
"else",
":",
"return",
"\"%s | %s >> %s\\n\"",
"%",
"(",
"hostname",
",",
"caption",
",",
"utils",
".",
"jsonify",
"(",
"result",
")",
")"
] | output on the result of a module run that is not command | [
"output",
"on",
"the",
"result",
"of",
"a",
"module",
"run",
"that",
"is",
"not",
"command"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L115-L121 |
248,819 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | command_generic_msg | def command_generic_msg(hostname, result, oneline, caption):
''' output the result of a command run '''
rc = result.get('rc', '0')
stdout = result.get('stdout','')
stderr = result.get('stderr', '')
msg = result.get('msg', '')
hostname = hostname.encode('utf-8')
caption = caption.encode('utf-8')
if not oneline:
buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
if stdout:
buf += stdout
if stderr:
buf += stderr
if msg:
buf += msg
return buf + "\n"
else:
if stderr:
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout) | python | def command_generic_msg(hostname, result, oneline, caption):
''' output the result of a command run '''
rc = result.get('rc', '0')
stdout = result.get('stdout','')
stderr = result.get('stderr', '')
msg = result.get('msg', '')
hostname = hostname.encode('utf-8')
caption = caption.encode('utf-8')
if not oneline:
buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
if stdout:
buf += stdout
if stderr:
buf += stderr
if msg:
buf += msg
return buf + "\n"
else:
if stderr:
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout) | [
"def",
"command_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"caption",
")",
":",
"rc",
"=",
"result",
".",
"get",
"(",
"'rc'",
",",
"'0'",
")",
"stdout",
"=",
"result",
".",
"get",
"(",
"'stdout'",
",",
"''",
")",
"stderr",
"=",
"result",
".",
"get",
"(",
"'stderr'",
",",
"''",
")",
"msg",
"=",
"result",
".",
"get",
"(",
"'msg'",
",",
"''",
")",
"hostname",
"=",
"hostname",
".",
"encode",
"(",
"'utf-8'",
")",
"caption",
"=",
"caption",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"not",
"oneline",
":",
"buf",
"=",
"\"%s | %s | rc=%s >>\\n\"",
"%",
"(",
"hostname",
",",
"caption",
",",
"result",
".",
"get",
"(",
"'rc'",
",",
"0",
")",
")",
"if",
"stdout",
":",
"buf",
"+=",
"stdout",
"if",
"stderr",
":",
"buf",
"+=",
"stderr",
"if",
"msg",
":",
"buf",
"+=",
"msg",
"return",
"buf",
"+",
"\"\\n\"",
"else",
":",
"if",
"stderr",
":",
"return",
"\"%s | %s | rc=%s | (stdout) %s (stderr) %s\"",
"%",
"(",
"hostname",
",",
"caption",
",",
"rc",
",",
"stdout",
",",
"stderr",
")",
"else",
":",
"return",
"\"%s | %s | rc=%s | (stdout) %s\"",
"%",
"(",
"hostname",
",",
"caption",
",",
"rc",
",",
"stdout",
")"
] | output the result of a command run | [
"output",
"the",
"result",
"of",
"a",
"command",
"run"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L134-L158 |
248,820 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | host_report_msg | def host_report_msg(hostname, module_name, result, oneline):
''' summarize the JSON results for a particular host '''
failed = utils.is_failed(result)
msg = ''
if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
if not failed:
msg = command_generic_msg(hostname, result, oneline, 'success')
else:
msg = command_generic_msg(hostname, result, oneline, 'FAILED')
else:
if not failed:
msg = regular_generic_msg(hostname, result, oneline, 'success')
else:
msg = regular_generic_msg(hostname, result, oneline, 'FAILED')
return msg | python | def host_report_msg(hostname, module_name, result, oneline):
''' summarize the JSON results for a particular host '''
failed = utils.is_failed(result)
msg = ''
if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
if not failed:
msg = command_generic_msg(hostname, result, oneline, 'success')
else:
msg = command_generic_msg(hostname, result, oneline, 'FAILED')
else:
if not failed:
msg = regular_generic_msg(hostname, result, oneline, 'success')
else:
msg = regular_generic_msg(hostname, result, oneline, 'FAILED')
return msg | [
"def",
"host_report_msg",
"(",
"hostname",
",",
"module_name",
",",
"result",
",",
"oneline",
")",
":",
"failed",
"=",
"utils",
".",
"is_failed",
"(",
"result",
")",
"msg",
"=",
"''",
"if",
"module_name",
"in",
"[",
"'command'",
",",
"'shell'",
",",
"'raw'",
"]",
"and",
"'ansible_job_id'",
"not",
"in",
"result",
"and",
"result",
".",
"get",
"(",
"'parsed'",
",",
"True",
")",
"!=",
"False",
":",
"if",
"not",
"failed",
":",
"msg",
"=",
"command_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"'success'",
")",
"else",
":",
"msg",
"=",
"command_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"'FAILED'",
")",
"else",
":",
"if",
"not",
"failed",
":",
"msg",
"=",
"regular_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"'success'",
")",
"else",
":",
"msg",
"=",
"regular_generic_msg",
"(",
"hostname",
",",
"result",
",",
"oneline",
",",
"'FAILED'",
")",
"return",
"msg"
] | summarize the JSON results for a particular host | [
"summarize",
"the",
"JSON",
"results",
"for",
"a",
"particular",
"host"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L160-L175 |
248,821 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | AggregateStats._increment | def _increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1 | python | def _increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1 | [
"def",
"_increment",
"(",
"self",
",",
"what",
",",
"host",
")",
":",
"self",
".",
"processed",
"[",
"host",
"]",
"=",
"1",
"prev",
"=",
"(",
"getattr",
"(",
"self",
",",
"what",
")",
")",
".",
"get",
"(",
"host",
",",
"0",
")",
"getattr",
"(",
"self",
",",
"what",
")",
"[",
"host",
"]",
"=",
"prev",
"+",
"1"
] | helper function to bump a statistic | [
"helper",
"function",
"to",
"bump",
"a",
"statistic"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L74-L79 |
248,822 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | AggregateStats.compute | def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not ignore_errors and (('failed' in value and bool(value['failed'])) or
('rc' in value and value['rc'] != 0)):
self._increment('failures', host)
elif 'skipped' in value and bool(value['skipped']):
self._increment('skipped', host)
elif 'changed' in value and bool(value['changed']):
if not setup and not poll:
self._increment('changed', host)
self._increment('ok', host)
else:
if not poll or ('finished' in value and bool(value['finished'])):
self._increment('ok', host)
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host) | python | def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not ignore_errors and (('failed' in value and bool(value['failed'])) or
('rc' in value and value['rc'] != 0)):
self._increment('failures', host)
elif 'skipped' in value and bool(value['skipped']):
self._increment('skipped', host)
elif 'changed' in value and bool(value['changed']):
if not setup and not poll:
self._increment('changed', host)
self._increment('ok', host)
else:
if not poll or ('finished' in value and bool(value['finished'])):
self._increment('ok', host)
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host) | [
"def",
"compute",
"(",
"self",
",",
"runner_results",
",",
"setup",
"=",
"False",
",",
"poll",
"=",
"False",
",",
"ignore_errors",
"=",
"False",
")",
":",
"for",
"(",
"host",
",",
"value",
")",
"in",
"runner_results",
".",
"get",
"(",
"'contacted'",
",",
"{",
"}",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"ignore_errors",
"and",
"(",
"(",
"'failed'",
"in",
"value",
"and",
"bool",
"(",
"value",
"[",
"'failed'",
"]",
")",
")",
"or",
"(",
"'rc'",
"in",
"value",
"and",
"value",
"[",
"'rc'",
"]",
"!=",
"0",
")",
")",
":",
"self",
".",
"_increment",
"(",
"'failures'",
",",
"host",
")",
"elif",
"'skipped'",
"in",
"value",
"and",
"bool",
"(",
"value",
"[",
"'skipped'",
"]",
")",
":",
"self",
".",
"_increment",
"(",
"'skipped'",
",",
"host",
")",
"elif",
"'changed'",
"in",
"value",
"and",
"bool",
"(",
"value",
"[",
"'changed'",
"]",
")",
":",
"if",
"not",
"setup",
"and",
"not",
"poll",
":",
"self",
".",
"_increment",
"(",
"'changed'",
",",
"host",
")",
"self",
".",
"_increment",
"(",
"'ok'",
",",
"host",
")",
"else",
":",
"if",
"not",
"poll",
"or",
"(",
"'finished'",
"in",
"value",
"and",
"bool",
"(",
"value",
"[",
"'finished'",
"]",
")",
")",
":",
"self",
".",
"_increment",
"(",
"'ok'",
",",
"host",
")",
"for",
"(",
"host",
",",
"value",
")",
"in",
"runner_results",
".",
"get",
"(",
"'dark'",
",",
"{",
"}",
")",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"_increment",
"(",
"'dark'",
",",
"host",
")"
] | walk through all results and increment stats | [
"walk",
"through",
"all",
"results",
"and",
"increment",
"stats"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L81-L99 |
248,823 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/callbacks.py | AggregateStats.summarize | def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
) | python | def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
) | [
"def",
"summarize",
"(",
"self",
",",
"host",
")",
":",
"return",
"dict",
"(",
"ok",
"=",
"self",
".",
"ok",
".",
"get",
"(",
"host",
",",
"0",
")",
",",
"failures",
"=",
"self",
".",
"failures",
".",
"get",
"(",
"host",
",",
"0",
")",
",",
"unreachable",
"=",
"self",
".",
"dark",
".",
"get",
"(",
"host",
",",
"0",
")",
",",
"changed",
"=",
"self",
".",
"changed",
".",
"get",
"(",
"host",
",",
"0",
")",
",",
"skipped",
"=",
"self",
".",
"skipped",
".",
"get",
"(",
"host",
",",
"0",
")",
")"
] | return information about a particular host | [
"return",
"information",
"about",
"a",
"particular",
"host"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/callbacks.py#L102-L111 |
248,824 | ramrod-project/database-brain | schema/brain/connection.py | validate_get_dbs | def validate_get_dbs(connection):
"""
validates the connection object is capable of read access to rethink
should be at least one test database by default
:param connection: <rethinkdb.net.DefaultConnection>
:return: <set> list of databases
:raises: ReqlDriverError AssertionError
"""
remote_dbs = set(rethinkdb.db_list().run(connection))
assert remote_dbs
return remote_dbs | python | def validate_get_dbs(connection):
"""
validates the connection object is capable of read access to rethink
should be at least one test database by default
:param connection: <rethinkdb.net.DefaultConnection>
:return: <set> list of databases
:raises: ReqlDriverError AssertionError
"""
remote_dbs = set(rethinkdb.db_list().run(connection))
assert remote_dbs
return remote_dbs | [
"def",
"validate_get_dbs",
"(",
"connection",
")",
":",
"remote_dbs",
"=",
"set",
"(",
"rethinkdb",
".",
"db_list",
"(",
")",
".",
"run",
"(",
"connection",
")",
")",
"assert",
"remote_dbs",
"return",
"remote_dbs"
] | validates the connection object is capable of read access to rethink
should be at least one test database by default
:param connection: <rethinkdb.net.DefaultConnection>
:return: <set> list of databases
:raises: ReqlDriverError AssertionError | [
"validates",
"the",
"connection",
"object",
"is",
"capable",
"of",
"read",
"access",
"to",
"rethink"
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/connection.py#L55-L67 |
248,825 | ramrod-project/database-brain | schema/brain/connection.py | validate_brain_requirements | def validate_brain_requirements(connection, remote_dbs, requirements):
"""
validates the rethinkdb has the 'correct' databases and tables
should get remote_dbs from brain.connection.validate_get_dbs
:param connection: <rethinkdb.net.DefaultConnection>
:param remote_dbs: <set> database names present in remote database
:param requirements: <dict> example(brain.connection.SELF_TEST)
:return: <bool> True
:raises: AssertionError or Reql*Error
"""
for database in requirements:
assert (database in remote_dbs), "database {} must exist".format(database)
remote_tables = frozenset(rethinkdb.db(database).table_list().run(connection))
for table in requirements[database]:
assert (table in remote_tables), "{} must exist in {}".format(table, database)
return True | python | def validate_brain_requirements(connection, remote_dbs, requirements):
"""
validates the rethinkdb has the 'correct' databases and tables
should get remote_dbs from brain.connection.validate_get_dbs
:param connection: <rethinkdb.net.DefaultConnection>
:param remote_dbs: <set> database names present in remote database
:param requirements: <dict> example(brain.connection.SELF_TEST)
:return: <bool> True
:raises: AssertionError or Reql*Error
"""
for database in requirements:
assert (database in remote_dbs), "database {} must exist".format(database)
remote_tables = frozenset(rethinkdb.db(database).table_list().run(connection))
for table in requirements[database]:
assert (table in remote_tables), "{} must exist in {}".format(table, database)
return True | [
"def",
"validate_brain_requirements",
"(",
"connection",
",",
"remote_dbs",
",",
"requirements",
")",
":",
"for",
"database",
"in",
"requirements",
":",
"assert",
"(",
"database",
"in",
"remote_dbs",
")",
",",
"\"database {} must exist\"",
".",
"format",
"(",
"database",
")",
"remote_tables",
"=",
"frozenset",
"(",
"rethinkdb",
".",
"db",
"(",
"database",
")",
".",
"table_list",
"(",
")",
".",
"run",
"(",
"connection",
")",
")",
"for",
"table",
"in",
"requirements",
"[",
"database",
"]",
":",
"assert",
"(",
"table",
"in",
"remote_tables",
")",
",",
"\"{} must exist in {}\"",
".",
"format",
"(",
"table",
",",
"database",
")",
"return",
"True"
] | validates the rethinkdb has the 'correct' databases and tables
should get remote_dbs from brain.connection.validate_get_dbs
:param connection: <rethinkdb.net.DefaultConnection>
:param remote_dbs: <set> database names present in remote database
:param requirements: <dict> example(brain.connection.SELF_TEST)
:return: <bool> True
:raises: AssertionError or Reql*Error | [
"validates",
"the",
"rethinkdb",
"has",
"the",
"correct",
"databases",
"and",
"tables",
"should",
"get",
"remote_dbs",
"from",
"brain",
".",
"connection",
".",
"validate_get_dbs"
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/connection.py#L70-L86 |
248,826 | ramrod-project/database-brain | schema/brain/connection.py | brain_post | def brain_post(connection, requirements=None):
"""
Power On Self Test for the brain.
Checks that the brain is appropriately seeded and ready for use.
Raises AssertionError's if the brain is not ready.
:param connection: <rethinkdb.net.DefaultConnection>
:param requirements:<dict> keys=Required Databases, key-values=Required Tables in each database
:return: <rethinkdb.net.DefaultConnection> if verified
"""
assert isinstance(connection, DefaultConnection)
remote_dbs = validate_get_dbs(connection)
assert validate_brain_requirements(connection, remote_dbs, requirements)
assert validate_write_access(connection)
return connection | python | def brain_post(connection, requirements=None):
"""
Power On Self Test for the brain.
Checks that the brain is appropriately seeded and ready for use.
Raises AssertionError's if the brain is not ready.
:param connection: <rethinkdb.net.DefaultConnection>
:param requirements:<dict> keys=Required Databases, key-values=Required Tables in each database
:return: <rethinkdb.net.DefaultConnection> if verified
"""
assert isinstance(connection, DefaultConnection)
remote_dbs = validate_get_dbs(connection)
assert validate_brain_requirements(connection, remote_dbs, requirements)
assert validate_write_access(connection)
return connection | [
"def",
"brain_post",
"(",
"connection",
",",
"requirements",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"connection",
",",
"DefaultConnection",
")",
"remote_dbs",
"=",
"validate_get_dbs",
"(",
"connection",
")",
"assert",
"validate_brain_requirements",
"(",
"connection",
",",
"remote_dbs",
",",
"requirements",
")",
"assert",
"validate_write_access",
"(",
"connection",
")",
"return",
"connection"
] | Power On Self Test for the brain.
Checks that the brain is appropriately seeded and ready for use.
Raises AssertionError's if the brain is not ready.
:param connection: <rethinkdb.net.DefaultConnection>
:param requirements:<dict> keys=Required Databases, key-values=Required Tables in each database
:return: <rethinkdb.net.DefaultConnection> if verified | [
"Power",
"On",
"Self",
"Test",
"for",
"the",
"brain",
"."
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/connection.py#L123-L139 |
248,827 | ramrod-project/database-brain | schema/brain/connection.py | connect | def connect(host=None,
port=rethinkdb.DEFAULT_PORT,
timeout=20,
verify=True,
**kwargs):
"""
RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return:
"""
if not host:
host = DEFAULT_HOSTS.get(check_stage_env())
connection = None
tries = 0
time_quit = time() + timeout
while not connection and time() <= time_quit:
tries += 1
connection = _attempt_connect(host, port, timeout/3, verify, **kwargs)
if not connection:
sleep(0.5)
if not connection:
raise BrainNotReady(
"Tried ({}:{}) {} times at {} second max timeout".format(host,
port,
tries,
timeout))
return connection | python | def connect(host=None,
port=rethinkdb.DEFAULT_PORT,
timeout=20,
verify=True,
**kwargs):
"""
RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return:
"""
if not host:
host = DEFAULT_HOSTS.get(check_stage_env())
connection = None
tries = 0
time_quit = time() + timeout
while not connection and time() <= time_quit:
tries += 1
connection = _attempt_connect(host, port, timeout/3, verify, **kwargs)
if not connection:
sleep(0.5)
if not connection:
raise BrainNotReady(
"Tried ({}:{}) {} times at {} second max timeout".format(host,
port,
tries,
timeout))
return connection | [
"def",
"connect",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"rethinkdb",
".",
"DEFAULT_PORT",
",",
"timeout",
"=",
"20",
",",
"verify",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"host",
":",
"host",
"=",
"DEFAULT_HOSTS",
".",
"get",
"(",
"check_stage_env",
"(",
")",
")",
"connection",
"=",
"None",
"tries",
"=",
"0",
"time_quit",
"=",
"time",
"(",
")",
"+",
"timeout",
"while",
"not",
"connection",
"and",
"time",
"(",
")",
"<=",
"time_quit",
":",
"tries",
"+=",
"1",
"connection",
"=",
"_attempt_connect",
"(",
"host",
",",
"port",
",",
"timeout",
"/",
"3",
",",
"verify",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"connection",
":",
"sleep",
"(",
"0.5",
")",
"if",
"not",
"connection",
":",
"raise",
"BrainNotReady",
"(",
"\"Tried ({}:{}) {} times at {} second max timeout\"",
".",
"format",
"(",
"host",
",",
"port",
",",
"tries",
",",
"timeout",
")",
")",
"return",
"connection"
] | RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return: | [
"RethinkDB",
"semantic",
"connection",
"wrapper"
] | b024cb44f34cabb9d80af38271ddb65c25767083 | https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/connection.py#L142-L173 |
248,828 | kervi/kervi-core | kervi/sensors/__init__.py | Sensor.link_to_dashboard | def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
r"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
"""
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs)
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs) | python | def link_to_dashboard(self, dashboard_id=None, panel_id=None, **kwargs):
r"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor.
"""
if self._dimensions == 1:
self._sensor_value.link_to_dashboard(dashboard_id, panel_id, **kwargs)
else:
for dimension in range(0, self._dimensions):
self._sub_sensors[dimension].link_to_dashboard(dashboard_id, panel_id, **kwargs) | [
"def",
"link_to_dashboard",
"(",
"self",
",",
"dashboard_id",
"=",
"None",
",",
"panel_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_dimensions",
"==",
"1",
":",
"self",
".",
"_sensor_value",
".",
"link_to_dashboard",
"(",
"dashboard_id",
",",
"panel_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"for",
"dimension",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_dimensions",
")",
":",
"self",
".",
"_sub_sensors",
"[",
"dimension",
"]",
".",
"link_to_dashboard",
"(",
"dashboard_id",
",",
"panel_id",
",",
"*",
"*",
"kwargs",
")"
] | r"""
Links the sensor to a dashboard.
:param dashboard_id: Id of the dashboard to link to.
Enter a * if the sensor should be linked to all dashboards.
:type dashboard_id: ``str``
:param panel_id: Id of the panel to link to.
This is the id of a panel you have added your self to a dashboard or one of the
system panels *sys-header*, *header* or *footer*
:type panel_id: ``str``
:Keyword Arguments:
* **link_to_header** (``str``) -- Link this input to header of the panel.
* **label_icon** (``str``) -- Icon that should be displayed together with label. All Font Awesome icons are valid just enter the name of the icon without *fa-*
* **label** (``str``) -- Label text, default value is the name of the sensor.
* **flat** (``bool``) -- Flat look and feel.
* **inline** (``bool``) -- Display value, sparkline and label in its actual size otherwise it occupys the entire with of the panel
* **type** (``str``) -- One of the following values *radial_gauge*, *vertical_gauge*, *horizontal_gauge*, *chart* or *value*.
* **show_sparkline** (``bool``) -- Show a sparkline next to the value.
* **icon** (``bool``) -- Icon to show. All Font Awesome icons are valid just enter the name of the icon without *fa-*.
* **show_value** (``bool``) -- Show the numeric value and unit.
* **label** (``str``) -- Label to show default is the name of the sensor. | [
"r",
"Links",
"the",
"sensor",
"to",
"a",
"dashboard",
"."
] | 3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23 | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/sensors/__init__.py#L143-L184 |
248,829 | kervi/kervi-core | kervi/sensors/__init__.py | Sensor._new_sensor_reading | def _new_sensor_reading(self, sensor_value):
"""
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
"""
if not self._active and not self._enabled:
return
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value)
else:
self._sensor_value.value = sensor_value | python | def _new_sensor_reading(self, sensor_value):
"""
Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system.
"""
if not self._active and not self._enabled:
return
if self._dimensions > 1:
for dimension in range(0, self._dimensions):
value = sensor_value[dimension]
self._sub_sensors[dimension]._new_sensor_reading(value)
else:
self._sensor_value.value = sensor_value | [
"def",
"_new_sensor_reading",
"(",
"self",
",",
"sensor_value",
")",
":",
"if",
"not",
"self",
".",
"_active",
"and",
"not",
"self",
".",
"_enabled",
":",
"return",
"if",
"self",
".",
"_dimensions",
">",
"1",
":",
"for",
"dimension",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_dimensions",
")",
":",
"value",
"=",
"sensor_value",
"[",
"dimension",
"]",
"self",
".",
"_sub_sensors",
"[",
"dimension",
"]",
".",
"_new_sensor_reading",
"(",
"value",
")",
"else",
":",
"self",
".",
"_sensor_value",
".",
"value",
"=",
"sensor_value"
] | Call this method to signal a new sensor reading.
This method handles DB storage and triggers different events.
:param value:
New value to be stored in the system. | [
"Call",
"this",
"method",
"to",
"signal",
"a",
"new",
"sensor",
"reading",
".",
"This",
"method",
"handles",
"DB",
"storage",
"and",
"triggers",
"different",
"events",
"."
] | 3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23 | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/sensors/__init__.py#L216-L232 |
248,830 | koodaamo/iconframer | iconframer/iconframer.py | process_path | def process_path(label, pth):
"check and expand paths"
if pth is None:
sys.exit("no %s path given" % label)
if pth.startswith("/"):
pass
elif pth[0] in (".", "~"):
pth = os.path.realpath(pth)
else:
pth = os.getcwd() + os.sep + pth
if not os.path.exists(pth):
sys.exit("%s path %s does not exist" % (label, pth))
return pth | python | def process_path(label, pth):
"check and expand paths"
if pth is None:
sys.exit("no %s path given" % label)
if pth.startswith("/"):
pass
elif pth[0] in (".", "~"):
pth = os.path.realpath(pth)
else:
pth = os.getcwd() + os.sep + pth
if not os.path.exists(pth):
sys.exit("%s path %s does not exist" % (label, pth))
return pth | [
"def",
"process_path",
"(",
"label",
",",
"pth",
")",
":",
"if",
"pth",
"is",
"None",
":",
"sys",
".",
"exit",
"(",
"\"no %s path given\"",
"%",
"label",
")",
"if",
"pth",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"pass",
"elif",
"pth",
"[",
"0",
"]",
"in",
"(",
"\".\"",
",",
"\"~\"",
")",
":",
"pth",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"pth",
")",
"else",
":",
"pth",
"=",
"os",
".",
"getcwd",
"(",
")",
"+",
"os",
".",
"sep",
"+",
"pth",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pth",
")",
":",
"sys",
".",
"exit",
"(",
"\"%s path %s does not exist\"",
"%",
"(",
"label",
",",
"pth",
")",
")",
"return",
"pth"
] | check and expand paths | [
"check",
"and",
"expand",
"paths"
] | 58d71fd78bfe3893a7f20384f429592d033d802a | https://github.com/koodaamo/iconframer/blob/58d71fd78bfe3893a7f20384f429592d033d802a/iconframer/iconframer.py#L23-L39 |
248,831 | koodaamo/iconframer | iconframer/iconframer.py | convert_svg | def convert_svg(svgstr, size, filepath, target):
"convert to PDF or PNG"
# PREPARE CONVERSION PER TYPE
if target == "PDF":
img = cairo.PDFSurface(filepath, size, size)
elif target == "PNG":
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
else:
system.exit("unknown file type conversion")
# PROCESS
ctx = cairo.Context(img)
handler= rsvg.Handle(None, svgstr)
iw,ih, fw,fh = handler.get_dimension_data()
ctx.translate(0,0)
ctx.scale(size/fw, size/fh) # assumes bigger source SVG template
handler.render_cairo(ctx)
# FINALIZE PER TYPE
if target == "PNG":
img.write_to_png(filepath) | python | def convert_svg(svgstr, size, filepath, target):
"convert to PDF or PNG"
# PREPARE CONVERSION PER TYPE
if target == "PDF":
img = cairo.PDFSurface(filepath, size, size)
elif target == "PNG":
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, size, size)
else:
system.exit("unknown file type conversion")
# PROCESS
ctx = cairo.Context(img)
handler= rsvg.Handle(None, svgstr)
iw,ih, fw,fh = handler.get_dimension_data()
ctx.translate(0,0)
ctx.scale(size/fw, size/fh) # assumes bigger source SVG template
handler.render_cairo(ctx)
# FINALIZE PER TYPE
if target == "PNG":
img.write_to_png(filepath) | [
"def",
"convert_svg",
"(",
"svgstr",
",",
"size",
",",
"filepath",
",",
"target",
")",
":",
"# PREPARE CONVERSION PER TYPE",
"if",
"target",
"==",
"\"PDF\"",
":",
"img",
"=",
"cairo",
".",
"PDFSurface",
"(",
"filepath",
",",
"size",
",",
"size",
")",
"elif",
"target",
"==",
"\"PNG\"",
":",
"img",
"=",
"cairo",
".",
"ImageSurface",
"(",
"cairo",
".",
"FORMAT_ARGB32",
",",
"size",
",",
"size",
")",
"else",
":",
"system",
".",
"exit",
"(",
"\"unknown file type conversion\"",
")",
"# PROCESS",
"ctx",
"=",
"cairo",
".",
"Context",
"(",
"img",
")",
"handler",
"=",
"rsvg",
".",
"Handle",
"(",
"None",
",",
"svgstr",
")",
"iw",
",",
"ih",
",",
"fw",
",",
"fh",
"=",
"handler",
".",
"get_dimension_data",
"(",
")",
"ctx",
".",
"translate",
"(",
"0",
",",
"0",
")",
"ctx",
".",
"scale",
"(",
"size",
"/",
"fw",
",",
"size",
"/",
"fh",
")",
"# assumes bigger source SVG template",
"handler",
".",
"render_cairo",
"(",
"ctx",
")",
"# FINALIZE PER TYPE",
"if",
"target",
"==",
"\"PNG\"",
":",
"img",
".",
"write_to_png",
"(",
"filepath",
")"
] | convert to PDF or PNG | [
"convert",
"to",
"PDF",
"or",
"PNG"
] | 58d71fd78bfe3893a7f20384f429592d033d802a | https://github.com/koodaamo/iconframer/blob/58d71fd78bfe3893a7f20384f429592d033d802a/iconframer/iconframer.py#L138-L161 |
248,832 | vistoyn/python-foruse | foruse/lib.py | var_dump | def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
str = var_dump_output(x, 0, ' ', '\n', True)
print (str.strip())
#dump(x, 0, i, '', object)
i += 1 | python | def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
str = var_dump_output(x, 0, ' ', '\n', True)
print (str.strip())
#dump(x, 0, i, '', object)
i += 1 | [
"def",
"var_dump",
"(",
"*",
"obs",
")",
":",
"i",
"=",
"0",
"for",
"x",
"in",
"obs",
":",
"str",
"=",
"var_dump_output",
"(",
"x",
",",
"0",
",",
"' '",
",",
"'\\n'",
",",
"True",
")",
"print",
"(",
"str",
".",
"strip",
"(",
")",
")",
"#dump(x, 0, i, '', object)",
"i",
"+=",
"1"
] | shows structured information of a object, list, tuple etc | [
"shows",
"structured",
"information",
"of",
"a",
"object",
"list",
"tuple",
"etc"
] | 312588d25ac391aa7b3325b7cb4c8f8188b559c7 | https://github.com/vistoyn/python-foruse/blob/312588d25ac391aa7b3325b7cb4c8f8188b559c7/foruse/lib.py#L427-L438 |
248,833 | rsc-dev/pyciagi | pyciagi/__init__.py | difficulties_by_voivodeship | def difficulties_by_voivodeship(voivodeship, dt=datetime.now()):
"""
Get difficulties in voivodeship.
:param voivodeship: Voivodeship numeric value.
:param dt: Datetime for data. Default: datetime.now()
:return: List of difficulties by voivodeship.
"""
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.headers.update({'X-Requested-With': 'XMLHttpRequest'})
session.get('{}/Mapa/'.format(HOST))
url = '{}/Mapa/PodajUtrudnieniaWWojewodztwie?KodWojewodztwa={}&_={}'.format(HOST, str(voivodeship), _datetime_to_asp_date(dt))
response = session.get(url)
json_data = response.json() if len(response.text) > 0 else []
return json_data | python | def difficulties_by_voivodeship(voivodeship, dt=datetime.now()):
"""
Get difficulties in voivodeship.
:param voivodeship: Voivodeship numeric value.
:param dt: Datetime for data. Default: datetime.now()
:return: List of difficulties by voivodeship.
"""
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.headers.update({'X-Requested-With': 'XMLHttpRequest'})
session.get('{}/Mapa/'.format(HOST))
url = '{}/Mapa/PodajUtrudnieniaWWojewodztwie?KodWojewodztwa={}&_={}'.format(HOST, str(voivodeship), _datetime_to_asp_date(dt))
response = session.get(url)
json_data = response.json() if len(response.text) > 0 else []
return json_data | [
"def",
"difficulties_by_voivodeship",
"(",
"voivodeship",
",",
"dt",
"=",
"datetime",
".",
"now",
"(",
")",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'User-Agent'",
":",
"USER_AGENT",
"}",
")",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'X-Requested-With'",
":",
"'XMLHttpRequest'",
"}",
")",
"session",
".",
"get",
"(",
"'{}/Mapa/'",
".",
"format",
"(",
"HOST",
")",
")",
"url",
"=",
"'{}/Mapa/PodajUtrudnieniaWWojewodztwie?KodWojewodztwa={}&_={}'",
".",
"format",
"(",
"HOST",
",",
"str",
"(",
"voivodeship",
")",
",",
"_datetime_to_asp_date",
"(",
"dt",
")",
")",
"response",
"=",
"session",
".",
"get",
"(",
"url",
")",
"json_data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"len",
"(",
"response",
".",
"text",
")",
">",
"0",
"else",
"[",
"]",
"return",
"json_data"
] | Get difficulties in voivodeship.
:param voivodeship: Voivodeship numeric value.
:param dt: Datetime for data. Default: datetime.now()
:return: List of difficulties by voivodeship. | [
"Get",
"difficulties",
"in",
"voivodeship",
"."
] | cf430442e7fb6c1126fdb0e0e990fc629ba2df3b | https://github.com/rsc-dev/pyciagi/blob/cf430442e7fb6c1126fdb0e0e990fc629ba2df3b/pyciagi/__init__.py#L39-L58 |
248,834 | treycucco/bidon | bidon/util/terminal.py | table_to_string | def table_to_string(headers, table, align="", *, lines=("-", "-+-", " | ")):
"""Write a list of headers and a table of rows to the terminal in a nice format.
Parameters:
- headers: a list of strings that are the headers of the table
- table: a list of lists, the actual data to be printed.
- align: a string whose elements are in the set {'<', '^', '>'}. These define how the values
printed in the cells will align. '<': left, '^': center, '>': right. This argument is
optional and all unspecified columns will align as if '<' were passed for them.
- lines: a tuple of line characters to print for the table: (row_sep, row_intersection, col_sep)
"""
header_separator, header_junction, row_separator = lines
align = ("{0:<<" + str(len(headers)) + "}").format(align or "")
all_lens = [tuple(len(c) for c in r) for r in table]
if headers:
all_lens.append(tuple(len(h) for h in headers))
max_lens = [max(r[i] for r in all_lens) for i in range(len(headers))]
col_outs = ["{{{0}: {1}{2}}}".format(i, align[i], w) for i, w in enumerate(max_lens)]
fmt_str = row_separator.join(col_outs)
if headers:
yield fmt_str.format(*headers)
yield header_junction.join((header_separator * ml for ml in max_lens))
for row in table:
yield fmt_str.format(*row) | python | def table_to_string(headers, table, align="", *, lines=("-", "-+-", " | ")):
"""Write a list of headers and a table of rows to the terminal in a nice format.
Parameters:
- headers: a list of strings that are the headers of the table
- table: a list of lists, the actual data to be printed.
- align: a string whose elements are in the set {'<', '^', '>'}. These define how the values
printed in the cells will align. '<': left, '^': center, '>': right. This argument is
optional and all unspecified columns will align as if '<' were passed for them.
- lines: a tuple of line characters to print for the table: (row_sep, row_intersection, col_sep)
"""
header_separator, header_junction, row_separator = lines
align = ("{0:<<" + str(len(headers)) + "}").format(align or "")
all_lens = [tuple(len(c) for c in r) for r in table]
if headers:
all_lens.append(tuple(len(h) for h in headers))
max_lens = [max(r[i] for r in all_lens) for i in range(len(headers))]
col_outs = ["{{{0}: {1}{2}}}".format(i, align[i], w) for i, w in enumerate(max_lens)]
fmt_str = row_separator.join(col_outs)
if headers:
yield fmt_str.format(*headers)
yield header_junction.join((header_separator * ml for ml in max_lens))
for row in table:
yield fmt_str.format(*row) | [
"def",
"table_to_string",
"(",
"headers",
",",
"table",
",",
"align",
"=",
"\"\"",
",",
"*",
",",
"lines",
"=",
"(",
"\"-\"",
",",
"\"-+-\"",
",",
"\" | \"",
")",
")",
":",
"header_separator",
",",
"header_junction",
",",
"row_separator",
"=",
"lines",
"align",
"=",
"(",
"\"{0:<<\"",
"+",
"str",
"(",
"len",
"(",
"headers",
")",
")",
"+",
"\"}\"",
")",
".",
"format",
"(",
"align",
"or",
"\"\"",
")",
"all_lens",
"=",
"[",
"tuple",
"(",
"len",
"(",
"c",
")",
"for",
"c",
"in",
"r",
")",
"for",
"r",
"in",
"table",
"]",
"if",
"headers",
":",
"all_lens",
".",
"append",
"(",
"tuple",
"(",
"len",
"(",
"h",
")",
"for",
"h",
"in",
"headers",
")",
")",
"max_lens",
"=",
"[",
"max",
"(",
"r",
"[",
"i",
"]",
"for",
"r",
"in",
"all_lens",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"headers",
")",
")",
"]",
"col_outs",
"=",
"[",
"\"{{{0}: {1}{2}}}\"",
".",
"format",
"(",
"i",
",",
"align",
"[",
"i",
"]",
",",
"w",
")",
"for",
"i",
",",
"w",
"in",
"enumerate",
"(",
"max_lens",
")",
"]",
"fmt_str",
"=",
"row_separator",
".",
"join",
"(",
"col_outs",
")",
"if",
"headers",
":",
"yield",
"fmt_str",
".",
"format",
"(",
"*",
"headers",
")",
"yield",
"header_junction",
".",
"join",
"(",
"(",
"header_separator",
"*",
"ml",
"for",
"ml",
"in",
"max_lens",
")",
")",
"for",
"row",
"in",
"table",
":",
"yield",
"fmt_str",
".",
"format",
"(",
"*",
"row",
")"
] | Write a list of headers and a table of rows to the terminal in a nice format.
Parameters:
- headers: a list of strings that are the headers of the table
- table: a list of lists, the actual data to be printed.
- align: a string whose elements are in the set {'<', '^', '>'}. These define how the values
printed in the cells will align. '<': left, '^': center, '>': right. This argument is
optional and all unspecified columns will align as if '<' were passed for them.
- lines: a tuple of line characters to print for the table: (row_sep, row_intersection, col_sep) | [
"Write",
"a",
"list",
"of",
"headers",
"and",
"a",
"table",
"of",
"rows",
"to",
"the",
"terminal",
"in",
"a",
"nice",
"format",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/terminal.py#L115-L140 |
248,835 | treycucco/bidon | bidon/util/terminal.py | ProgressPrinter.ratio_and_percentage | def ratio_and_percentage(current, total, time_remaining):
"""Returns the progress ratio and percentage."""
return "{} / {} ({}% completed)".format(current, total, int(current / total * 100)) | python | def ratio_and_percentage(current, total, time_remaining):
"""Returns the progress ratio and percentage."""
return "{} / {} ({}% completed)".format(current, total, int(current / total * 100)) | [
"def",
"ratio_and_percentage",
"(",
"current",
",",
"total",
",",
"time_remaining",
")",
":",
"return",
"\"{} / {} ({}% completed)\"",
".",
"format",
"(",
"current",
",",
"total",
",",
"int",
"(",
"current",
"/",
"total",
"*",
"100",
")",
")"
] | Returns the progress ratio and percentage. | [
"Returns",
"the",
"progress",
"ratio",
"and",
"percentage",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/terminal.py#L86-L88 |
248,836 | treycucco/bidon | bidon/util/terminal.py | ProgressPrinter.ratio_and_percentage_with_time_remaining | def ratio_and_percentage_with_time_remaining(current, total, time_remaining):
"""Returns the progress ratio, percentage and time remaining."""
return "{} / {} ({}% completed) (~{} remaining)".format(
current,
total,
int(current / total * 100),
time_remaining) | python | def ratio_and_percentage_with_time_remaining(current, total, time_remaining):
"""Returns the progress ratio, percentage and time remaining."""
return "{} / {} ({}% completed) (~{} remaining)".format(
current,
total,
int(current / total * 100),
time_remaining) | [
"def",
"ratio_and_percentage_with_time_remaining",
"(",
"current",
",",
"total",
",",
"time_remaining",
")",
":",
"return",
"\"{} / {} ({}% completed) (~{} remaining)\"",
".",
"format",
"(",
"current",
",",
"total",
",",
"int",
"(",
"current",
"/",
"total",
"*",
"100",
")",
",",
"time_remaining",
")"
] | Returns the progress ratio, percentage and time remaining. | [
"Returns",
"the",
"progress",
"ratio",
"percentage",
"and",
"time",
"remaining",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/terminal.py#L101-L107 |
248,837 | AshleySetter/frange | frange/frange/frange.py | frange.get_generator | def get_generator(self):
"""
Returns a generator for the frange object instance.
Returns
-------
gen : generator
A generator that yields successive samples from start (inclusive)
to stop (exclusive) in step steps.
"""
s = self.slice
gen = drange(s.start, s.stop, s.step) # intialises the generator
return gen | python | def get_generator(self):
"""
Returns a generator for the frange object instance.
Returns
-------
gen : generator
A generator that yields successive samples from start (inclusive)
to stop (exclusive) in step steps.
"""
s = self.slice
gen = drange(s.start, s.stop, s.step) # intialises the generator
return gen | [
"def",
"get_generator",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"slice",
"gen",
"=",
"drange",
"(",
"s",
".",
"start",
",",
"s",
".",
"stop",
",",
"s",
".",
"step",
")",
"# intialises the generator",
"return",
"gen"
] | Returns a generator for the frange object instance.
Returns
-------
gen : generator
A generator that yields successive samples from start (inclusive)
to stop (exclusive) in step steps. | [
"Returns",
"a",
"generator",
"for",
"the",
"frange",
"object",
"instance",
"."
] | 6b0412d0584fce04c870af82d3e0bf90ed60fb5b | https://github.com/AshleySetter/frange/blob/6b0412d0584fce04c870af82d3e0bf90ed60fb5b/frange/frange/frange.py#L55-L67 |
248,838 | solocompt/plugs-filter | plugs_filter/utils.py | get_field_lookups | def get_field_lookups(field_type, nullable):
"""
Return lookup table value and append isnull if
this is a nullable field
"""
return LOOKUP_TABLE.get(field_type) + ['isnull'] if nullable else LOOKUP_TABLE.get(field_type) | python | def get_field_lookups(field_type, nullable):
"""
Return lookup table value and append isnull if
this is a nullable field
"""
return LOOKUP_TABLE.get(field_type) + ['isnull'] if nullable else LOOKUP_TABLE.get(field_type) | [
"def",
"get_field_lookups",
"(",
"field_type",
",",
"nullable",
")",
":",
"return",
"LOOKUP_TABLE",
".",
"get",
"(",
"field_type",
")",
"+",
"[",
"'isnull'",
"]",
"if",
"nullable",
"else",
"LOOKUP_TABLE",
".",
"get",
"(",
"field_type",
")"
] | Return lookup table value and append isnull if
this is a nullable field | [
"Return",
"lookup",
"table",
"value",
"and",
"append",
"isnull",
"if",
"this",
"is",
"a",
"nullable",
"field"
] | cb34c7d662d3f96c07c10b3ed0a34bafef78b52c | https://github.com/solocompt/plugs-filter/blob/cb34c7d662d3f96c07c10b3ed0a34bafef78b52c/plugs_filter/utils.py#L14-L19 |
248,839 | solocompt/plugs-filter | plugs_filter/utils.py | match_field | def match_field(field_class):
"""
Iterates the field_classes and
returns the first match
"""
for cls in field_class.mro():
if cls in list(LOOKUP_TABLE.keys()):
return cls
# could not match the field class
raise Exception('{0} None Found '.format(field_class)) | python | def match_field(field_class):
"""
Iterates the field_classes and
returns the first match
"""
for cls in field_class.mro():
if cls in list(LOOKUP_TABLE.keys()):
return cls
# could not match the field class
raise Exception('{0} None Found '.format(field_class)) | [
"def",
"match_field",
"(",
"field_class",
")",
":",
"for",
"cls",
"in",
"field_class",
".",
"mro",
"(",
")",
":",
"if",
"cls",
"in",
"list",
"(",
"LOOKUP_TABLE",
".",
"keys",
"(",
")",
")",
":",
"return",
"cls",
"# could not match the field class",
"raise",
"Exception",
"(",
"'{0} None Found '",
".",
"format",
"(",
"field_class",
")",
")"
] | Iterates the field_classes and
returns the first match | [
"Iterates",
"the",
"field_classes",
"and",
"returns",
"the",
"first",
"match"
] | cb34c7d662d3f96c07c10b3ed0a34bafef78b52c | https://github.com/solocompt/plugs-filter/blob/cb34c7d662d3f96c07c10b3ed0a34bafef78b52c/plugs_filter/utils.py#L21-L30 |
248,840 | chrisnorman7/confmanager | confmanager/parser.py | parse_template | def parse_template(template, target):
"""Given a dictionary template containing at least most of the relevant information and a dictionary target containing sections, options and values, consolidate target into a new confmanager object and return it."""
c = ConfManager('')
for section in template:
c.add_section(section)
for option, o in template[section].items():
try:
value = type(template[section][option]['value'])(target[section][option])
except KeyError:
value = o['value']
finally:
if 'value' in o:
del o['value']
c.set(section, option, value, **o)
return c | python | def parse_template(template, target):
"""Given a dictionary template containing at least most of the relevant information and a dictionary target containing sections, options and values, consolidate target into a new confmanager object and return it."""
c = ConfManager('')
for section in template:
c.add_section(section)
for option, o in template[section].items():
try:
value = type(template[section][option]['value'])(target[section][option])
except KeyError:
value = o['value']
finally:
if 'value' in o:
del o['value']
c.set(section, option, value, **o)
return c | [
"def",
"parse_template",
"(",
"template",
",",
"target",
")",
":",
"c",
"=",
"ConfManager",
"(",
"''",
")",
"for",
"section",
"in",
"template",
":",
"c",
".",
"add_section",
"(",
"section",
")",
"for",
"option",
",",
"o",
"in",
"template",
"[",
"section",
"]",
".",
"items",
"(",
")",
":",
"try",
":",
"value",
"=",
"type",
"(",
"template",
"[",
"section",
"]",
"[",
"option",
"]",
"[",
"'value'",
"]",
")",
"(",
"target",
"[",
"section",
"]",
"[",
"option",
"]",
")",
"except",
"KeyError",
":",
"value",
"=",
"o",
"[",
"'value'",
"]",
"finally",
":",
"if",
"'value'",
"in",
"o",
":",
"del",
"o",
"[",
"'value'",
"]",
"c",
".",
"set",
"(",
"section",
",",
"option",
",",
"value",
",",
"*",
"*",
"o",
")",
"return",
"c"
] | Given a dictionary template containing at least most of the relevant information and a dictionary target containing sections, options and values, consolidate target into a new confmanager object and return it. | [
"Given",
"a",
"dictionary",
"template",
"containing",
"at",
"least",
"most",
"of",
"the",
"relevant",
"information",
"and",
"a",
"dictionary",
"target",
"containing",
"sections",
"options",
"and",
"values",
"consolidate",
"target",
"into",
"a",
"new",
"confmanager",
"object",
"and",
"return",
"it",
"."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/parser.py#L6-L20 |
248,841 | chrisnorman7/confmanager | confmanager/parser.py | parse_json | def parse_json(target, json, create_sections = False, create_options = False):
"""Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes."""
is_dict = isinstance(json, dict)
for o in json:
if is_dict:
section = o
else:
section = o[0]
if not target.has_section(section):
if create_sections:
target.add_section(section)
else:
continue
for k, v in (json[o].items() if is_dict else o[1]):
if target.has_option(section, k) or create_options:
target.set(section, k, v) # Don't add if it shouldn't be there.
return target | python | def parse_json(target, json, create_sections = False, create_options = False):
"""Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes."""
is_dict = isinstance(json, dict)
for o in json:
if is_dict:
section = o
else:
section = o[0]
if not target.has_section(section):
if create_sections:
target.add_section(section)
else:
continue
for k, v in (json[o].items() if is_dict else o[1]):
if target.has_option(section, k) or create_options:
target.set(section, k, v) # Don't add if it shouldn't be there.
return target | [
"def",
"parse_json",
"(",
"target",
",",
"json",
",",
"create_sections",
"=",
"False",
",",
"create_options",
"=",
"False",
")",
":",
"is_dict",
"=",
"isinstance",
"(",
"json",
",",
"dict",
")",
"for",
"o",
"in",
"json",
":",
"if",
"is_dict",
":",
"section",
"=",
"o",
"else",
":",
"section",
"=",
"o",
"[",
"0",
"]",
"if",
"not",
"target",
".",
"has_section",
"(",
"section",
")",
":",
"if",
"create_sections",
":",
"target",
".",
"add_section",
"(",
"section",
")",
"else",
":",
"continue",
"for",
"k",
",",
"v",
"in",
"(",
"json",
"[",
"o",
"]",
".",
"items",
"(",
")",
"if",
"is_dict",
"else",
"o",
"[",
"1",
"]",
")",
":",
"if",
"target",
".",
"has_option",
"(",
"section",
",",
"k",
")",
"or",
"create_options",
":",
"target",
".",
"set",
"(",
"section",
",",
"k",
",",
"v",
")",
"# Don't add if it shouldn't be there.\r",
"return",
"target"
] | Given a confmanager object and a dictionary object, import the values from the dictionary into the object, optionally adding sections and options as it goes. | [
"Given",
"a",
"confmanager",
"object",
"and",
"a",
"dictionary",
"object",
"import",
"the",
"values",
"from",
"the",
"dictionary",
"into",
"the",
"object",
"optionally",
"adding",
"sections",
"and",
"options",
"as",
"it",
"goes",
"."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/parser.py#L22-L38 |
248,842 | DanCardin/tawdry | tawdry/tawdry.py | generate_sitemap | def generate_sitemap(sitemap: typing.Mapping, prefix: list=None):
"""Create a sitemap template from the given sitemap.
The `sitemap` should be a mapping where the key is a string which
represents a single URI segment, and the value is either another mapping
or a callable (e.g. function) object.
Args:
sitemap: The definition of the routes and their views
prefix: The base url segment which gets prepended to the given map.
Examples:
The sitemap should follow the following format:
>>> {
>>> 'string_literal': {
>>> '': func1,
>>> '{arg}': func2,
>>> },
>>> }
The key points here are thus:
- Any string key not matched by the following rule will be matched
literally
- Any string key surrounded by curly brackets matches a url segment
which represents a parameter whose name is the enclosed string
(i.e. should be a valid keyword argument)
- *note* a side effect of this is that an empty string key will
match all routes leading up to the current given mapping
The above sitemap would compile to the following url mappings:
- /string_literal/ -> calls `func1()`
- /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)`
"""
# Ensures all generated urls are prefixed with a the prefix string
if prefix is None:
prefix = []
for segment, sub_segment in sitemap.items():
if isinstance(sub_segment, collections.abc.Mapping):
yield from generate_sitemap(sub_segment, prefix + [segment])
elif isinstance(sub_segment, collections.abc.Callable):
if segment:
prefix = prefix + [segment]
yield (prefix, sub_segment)
else:
raise ValueError('Invalid datatype for sitemap') | python | def generate_sitemap(sitemap: typing.Mapping, prefix: list=None):
"""Create a sitemap template from the given sitemap.
The `sitemap` should be a mapping where the key is a string which
represents a single URI segment, and the value is either another mapping
or a callable (e.g. function) object.
Args:
sitemap: The definition of the routes and their views
prefix: The base url segment which gets prepended to the given map.
Examples:
The sitemap should follow the following format:
>>> {
>>> 'string_literal': {
>>> '': func1,
>>> '{arg}': func2,
>>> },
>>> }
The key points here are thus:
- Any string key not matched by the following rule will be matched
literally
- Any string key surrounded by curly brackets matches a url segment
which represents a parameter whose name is the enclosed string
(i.e. should be a valid keyword argument)
- *note* a side effect of this is that an empty string key will
match all routes leading up to the current given mapping
The above sitemap would compile to the following url mappings:
- /string_literal/ -> calls `func1()`
- /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)`
"""
# Ensures all generated urls are prefixed with a the prefix string
if prefix is None:
prefix = []
for segment, sub_segment in sitemap.items():
if isinstance(sub_segment, collections.abc.Mapping):
yield from generate_sitemap(sub_segment, prefix + [segment])
elif isinstance(sub_segment, collections.abc.Callable):
if segment:
prefix = prefix + [segment]
yield (prefix, sub_segment)
else:
raise ValueError('Invalid datatype for sitemap') | [
"def",
"generate_sitemap",
"(",
"sitemap",
":",
"typing",
".",
"Mapping",
",",
"prefix",
":",
"list",
"=",
"None",
")",
":",
"# Ensures all generated urls are prefixed with a the prefix string",
"if",
"prefix",
"is",
"None",
":",
"prefix",
"=",
"[",
"]",
"for",
"segment",
",",
"sub_segment",
"in",
"sitemap",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"sub_segment",
",",
"collections",
".",
"abc",
".",
"Mapping",
")",
":",
"yield",
"from",
"generate_sitemap",
"(",
"sub_segment",
",",
"prefix",
"+",
"[",
"segment",
"]",
")",
"elif",
"isinstance",
"(",
"sub_segment",
",",
"collections",
".",
"abc",
".",
"Callable",
")",
":",
"if",
"segment",
":",
"prefix",
"=",
"prefix",
"+",
"[",
"segment",
"]",
"yield",
"(",
"prefix",
",",
"sub_segment",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid datatype for sitemap'",
")"
] | Create a sitemap template from the given sitemap.
The `sitemap` should be a mapping where the key is a string which
represents a single URI segment, and the value is either another mapping
or a callable (e.g. function) object.
Args:
sitemap: The definition of the routes and their views
prefix: The base url segment which gets prepended to the given map.
Examples:
The sitemap should follow the following format:
>>> {
>>> 'string_literal': {
>>> '': func1,
>>> '{arg}': func2,
>>> },
>>> }
The key points here are thus:
- Any string key not matched by the following rule will be matched
literally
- Any string key surrounded by curly brackets matches a url segment
which represents a parameter whose name is the enclosed string
(i.e. should be a valid keyword argument)
- *note* a side effect of this is that an empty string key will
match all routes leading up to the current given mapping
The above sitemap would compile to the following url mappings:
- /string_literal/ -> calls `func1()`
- /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)` | [
"Create",
"a",
"sitemap",
"template",
"from",
"the",
"given",
"sitemap",
"."
] | 6683b9c54eb9205f7179a854aac1cc0e6ba34be6 | https://github.com/DanCardin/tawdry/blob/6683b9c54eb9205f7179a854aac1cc0e6ba34be6/tawdry/tawdry.py#L13-L58 |
248,843 | coghost/izen | izen/slnm.py | Robot._mock_input | def _mock_input(self, target, content):
"""
mock human input
:param target: the element to input to
:param content: the content
:return:
"""
content = helper.to_str(content)
for w in content:
target.send_keys(w)
rand_block(0.01, 0.01) | python | def _mock_input(self, target, content):
"""
mock human input
:param target: the element to input to
:param content: the content
:return:
"""
content = helper.to_str(content)
for w in content:
target.send_keys(w)
rand_block(0.01, 0.01) | [
"def",
"_mock_input",
"(",
"self",
",",
"target",
",",
"content",
")",
":",
"content",
"=",
"helper",
".",
"to_str",
"(",
"content",
")",
"for",
"w",
"in",
"content",
":",
"target",
".",
"send_keys",
"(",
"w",
")",
"rand_block",
"(",
"0.01",
",",
"0.01",
")"
] | mock human input
:param target: the element to input to
:param content: the content
:return: | [
"mock",
"human",
"input"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/slnm.py#L164-L175 |
248,844 | coghost/izen | izen/slnm.py | ASite.__has_next_page | def __has_next_page(self, current_page_num=0):
""" this is an example for debug purpose only...
"""
try:
next_page = self.robot.get_elements(
self.base.get('next_page'),
multiple=True
)
log.debug('<Site> has {} next page elems'.format(len(next_page)))
if not next_page:
return False
for i, ele in enumerate(next_page):
if ele.get_attribute('innerText') == 'Next':
log.debug('<Site> {} is the right link'.format(i))
self.next_page = ele
break
return True
except Exception as _:
self.next_page = None
return False | python | def __has_next_page(self, current_page_num=0):
""" this is an example for debug purpose only...
"""
try:
next_page = self.robot.get_elements(
self.base.get('next_page'),
multiple=True
)
log.debug('<Site> has {} next page elems'.format(len(next_page)))
if not next_page:
return False
for i, ele in enumerate(next_page):
if ele.get_attribute('innerText') == 'Next':
log.debug('<Site> {} is the right link'.format(i))
self.next_page = ele
break
return True
except Exception as _:
self.next_page = None
return False | [
"def",
"__has_next_page",
"(",
"self",
",",
"current_page_num",
"=",
"0",
")",
":",
"try",
":",
"next_page",
"=",
"self",
".",
"robot",
".",
"get_elements",
"(",
"self",
".",
"base",
".",
"get",
"(",
"'next_page'",
")",
",",
"multiple",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'<Site> has {} next page elems'",
".",
"format",
"(",
"len",
"(",
"next_page",
")",
")",
")",
"if",
"not",
"next_page",
":",
"return",
"False",
"for",
"i",
",",
"ele",
"in",
"enumerate",
"(",
"next_page",
")",
":",
"if",
"ele",
".",
"get_attribute",
"(",
"'innerText'",
")",
"==",
"'Next'",
":",
"log",
".",
"debug",
"(",
"'<Site> {} is the right link'",
".",
"format",
"(",
"i",
")",
")",
"self",
".",
"next_page",
"=",
"ele",
"break",
"return",
"True",
"except",
"Exception",
"as",
"_",
":",
"self",
".",
"next_page",
"=",
"None",
"return",
"False"
] | this is an example for debug purpose only... | [
"this",
"is",
"an",
"example",
"for",
"debug",
"purpose",
"only",
"..."
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/slnm.py#L478-L497 |
248,845 | coghost/izen | izen/slnm.py | ASite.response_result | def response_result(self, **kwargs):
""" default will fetch MAX_AP pages
yield `self.driver.page_source, self.driver.current_url, 1`
after mock submit, the first page is crawled.
so start@ index of 1, and yield first page first
when running over, use else to yield the last page.
程序运行到此, 已经load 了第一页, 故在进行操作 `点击下一页` 之前, 需要 yield
range(1, page_togo), 则在 page_togo - 1时跳出循环,
此时程序 已经完成点击了下一页, 故 page_togo 这一页已经 load 完成, 故在 else 跳出时 yield
"""
page_togo = kwargs.get('page_togo', self.max_page_togo)
if page_togo <= 1:
return self.robot.driver.page_source, self.robot.driver.current_url, 1
# 从 `1` 开始是由于已经加载了第一页
# 到 `page_togo` 结束, 是因为在 `page_togo -1` 时,已经点击了下一页
# 因此此处不能写为 range(0, page_togo), 或者(1, page_togo + 1)
yield_last = kwargs.get('yield_last', False)
start_yval = 0
for page_done in range(1, page_togo):
# log.debug(self.robot.driver.current_url)
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_done
# click any popups
self.mock_popovers()
if self.has_next_page(page_done):
start_yval = self.goto_next(start_yval)
else:
# 如果无下一页, 直接退出
log.debug('page {} is the last result page!'.format(page_done))
break
else:
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo
if yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo | python | def response_result(self, **kwargs):
""" default will fetch MAX_AP pages
yield `self.driver.page_source, self.driver.current_url, 1`
after mock submit, the first page is crawled.
so start@ index of 1, and yield first page first
when running over, use else to yield the last page.
程序运行到此, 已经load 了第一页, 故在进行操作 `点击下一页` 之前, 需要 yield
range(1, page_togo), 则在 page_togo - 1时跳出循环,
此时程序 已经完成点击了下一页, 故 page_togo 这一页已经 load 完成, 故在 else 跳出时 yield
"""
page_togo = kwargs.get('page_togo', self.max_page_togo)
if page_togo <= 1:
return self.robot.driver.page_source, self.robot.driver.current_url, 1
# 从 `1` 开始是由于已经加载了第一页
# 到 `page_togo` 结束, 是因为在 `page_togo -1` 时,已经点击了下一页
# 因此此处不能写为 range(0, page_togo), 或者(1, page_togo + 1)
yield_last = kwargs.get('yield_last', False)
start_yval = 0
for page_done in range(1, page_togo):
# log.debug(self.robot.driver.current_url)
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_done
# click any popups
self.mock_popovers()
if self.has_next_page(page_done):
start_yval = self.goto_next(start_yval)
else:
# 如果无下一页, 直接退出
log.debug('page {} is the last result page!'.format(page_done))
break
else:
if not yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo
if yield_last:
yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo | [
"def",
"response_result",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"page_togo",
"=",
"kwargs",
".",
"get",
"(",
"'page_togo'",
",",
"self",
".",
"max_page_togo",
")",
"if",
"page_togo",
"<=",
"1",
":",
"return",
"self",
".",
"robot",
".",
"driver",
".",
"page_source",
",",
"self",
".",
"robot",
".",
"driver",
".",
"current_url",
",",
"1",
"# 从 `1` 开始是由于已经加载了第一页",
"# 到 `page_togo` 结束, 是因为在 `page_togo -1` 时,已经点击了下一页",
"# 因此此处不能写为 range(0, page_togo), 或者(1, page_togo + 1)",
"yield_last",
"=",
"kwargs",
".",
"get",
"(",
"'yield_last'",
",",
"False",
")",
"start_yval",
"=",
"0",
"for",
"page_done",
"in",
"range",
"(",
"1",
",",
"page_togo",
")",
":",
"# log.debug(self.robot.driver.current_url)",
"if",
"not",
"yield_last",
":",
"yield",
"self",
".",
"robot",
".",
"driver",
".",
"page_source",
",",
"self",
".",
"robot",
".",
"driver",
".",
"current_url",
",",
"page_done",
"# click any popups",
"self",
".",
"mock_popovers",
"(",
")",
"if",
"self",
".",
"has_next_page",
"(",
"page_done",
")",
":",
"start_yval",
"=",
"self",
".",
"goto_next",
"(",
"start_yval",
")",
"else",
":",
"# 如果无下一页, 直接退出",
"log",
".",
"debug",
"(",
"'page {} is the last result page!'",
".",
"format",
"(",
"page_done",
")",
")",
"break",
"else",
":",
"if",
"not",
"yield_last",
":",
"yield",
"self",
".",
"robot",
".",
"driver",
".",
"page_source",
",",
"self",
".",
"robot",
".",
"driver",
".",
"current_url",
",",
"page_togo",
"if",
"yield_last",
":",
"yield",
"self",
".",
"robot",
".",
"driver",
".",
"page_source",
",",
"self",
".",
"robot",
".",
"driver",
".",
"current_url",
",",
"page_togo"
] | default will fetch MAX_AP pages
yield `self.driver.page_source, self.driver.current_url, 1`
after mock submit, the first page is crawled.
so start@ index of 1, and yield first page first
when running over, use else to yield the last page.
程序运行到此, 已经load 了第一页, 故在进行操作 `点击下一页` 之前, 需要 yield
range(1, page_togo), 则在 page_togo - 1时跳出循环,
此时程序 已经完成点击了下一页, 故 page_togo 这一页已经 load 完成, 故在 else 跳出时 yield | [
"default",
"will",
"fetch",
"MAX_AP",
"pages",
"yield",
"self",
".",
"driver",
".",
"page_source",
"self",
".",
"driver",
".",
"current_url",
"1"
] | 432db017f99dd2ba809e1ba1792145ab6510263d | https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/slnm.py#L609-L648 |
248,846 | tBaxter/tango-comments | build/lib/tango_comments/admin.py | CommentsAdmin._bulk_flag | def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext('1 comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)}) | python | def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext('1 comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)}) | [
"def",
"_bulk_flag",
"(",
"self",
",",
"request",
",",
"queryset",
",",
"action",
",",
"done_message",
")",
":",
"n_comments",
"=",
"0",
"for",
"comment",
"in",
"queryset",
":",
"action",
"(",
"request",
",",
"comment",
")",
"n_comments",
"+=",
"1",
"msg",
"=",
"ungettext",
"(",
"'1 comment was successfully %(action)s.'",
",",
"'%(count)s comments were successfully %(action)s.'",
",",
"n_comments",
")",
"self",
".",
"message_user",
"(",
"request",
",",
"msg",
"%",
"{",
"'count'",
":",
"n_comments",
",",
"'action'",
":",
"done_message",
"(",
"n_comments",
")",
"}",
")"
] | Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting. | [
"Flag",
"approve",
"or",
"remove",
"some",
"comments",
"from",
"an",
"admin",
"action",
".",
"Actually",
"calls",
"the",
"action",
"argument",
"to",
"perform",
"the",
"heavy",
"lifting",
"."
] | 1fd335c6fc9e81bba158e42e1483f1a149622ab4 | https://github.com/tBaxter/tango-comments/blob/1fd335c6fc9e81bba158e42e1483f1a149622ab4/build/lib/tango_comments/admin.py#L72-L85 |
248,847 | cariad/py-wpconfigr | wpconfigr/wp_config_file.py | WpConfigFile.set | def set(self, key, value):
"""
Updates the value of the given key in the file.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made.
"""
changed = super().set(key=key, value=value)
if not changed:
return False
self._log.info('Saving configuration to "%s"...', self._filename)
with open(self._filename, 'w') as stream:
stream.write(self.content)
self._log.info('Saved configuration to "%s".', self._filename)
return True | python | def set(self, key, value):
"""
Updates the value of the given key in the file.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made.
"""
changed = super().set(key=key, value=value)
if not changed:
return False
self._log.info('Saving configuration to "%s"...', self._filename)
with open(self._filename, 'w') as stream:
stream.write(self.content)
self._log.info('Saved configuration to "%s".', self._filename)
return True | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"changed",
"=",
"super",
"(",
")",
".",
"set",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
"if",
"not",
"changed",
":",
"return",
"False",
"self",
".",
"_log",
".",
"info",
"(",
"'Saving configuration to \"%s\"...'",
",",
"self",
".",
"_filename",
")",
"with",
"open",
"(",
"self",
".",
"_filename",
",",
"'w'",
")",
"as",
"stream",
":",
"stream",
".",
"write",
"(",
"self",
".",
"content",
")",
"self",
".",
"_log",
".",
"info",
"(",
"'Saved configuration to \"%s\".'",
",",
"self",
".",
"_filename",
")",
"return",
"True"
] | Updates the value of the given key in the file.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made. | [
"Updates",
"the",
"value",
"of",
"the",
"given",
"key",
"in",
"the",
"file",
"."
] | 8f25bb849b72ce95957566544a2be8445316c818 | https://github.com/cariad/py-wpconfigr/blob/8f25bb849b72ce95957566544a2be8445316c818/wpconfigr/wp_config_file.py#L29-L52 |
248,848 | mitakas/wallpaper | wallpaper/cubic.py | Cubic.next_color | def next_color(self):
"""
Returns the next color. Currently returns a random
color from the Colorbrewer 11-class diverging BrBG palette.
Returns
-------
next_rgb_color: tuple of ImageColor
"""
next_rgb_color = ImageColor.getrgb(random.choice(BrBG_11.hex_colors))
return next_rgb_color | python | def next_color(self):
"""
Returns the next color. Currently returns a random
color from the Colorbrewer 11-class diverging BrBG palette.
Returns
-------
next_rgb_color: tuple of ImageColor
"""
next_rgb_color = ImageColor.getrgb(random.choice(BrBG_11.hex_colors))
return next_rgb_color | [
"def",
"next_color",
"(",
"self",
")",
":",
"next_rgb_color",
"=",
"ImageColor",
".",
"getrgb",
"(",
"random",
".",
"choice",
"(",
"BrBG_11",
".",
"hex_colors",
")",
")",
"return",
"next_rgb_color"
] | Returns the next color. Currently returns a random
color from the Colorbrewer 11-class diverging BrBG palette.
Returns
-------
next_rgb_color: tuple of ImageColor | [
"Returns",
"the",
"next",
"color",
".",
"Currently",
"returns",
"a",
"random",
"color",
"from",
"the",
"Colorbrewer",
"11",
"-",
"class",
"diverging",
"BrBG",
"palette",
"."
] | 83d90f56cf888d39c98aeb84e0e64d1289e4d0c0 | https://github.com/mitakas/wallpaper/blob/83d90f56cf888d39c98aeb84e0e64d1289e4d0c0/wallpaper/cubic.py#L40-L51 |
248,849 | mitakas/wallpaper | wallpaper/cubic.py | Cubic.paint_cube | def paint_cube(self, x, y):
"""
Paints a cube at a certain position a color.
Parameters
----------
x: int
Horizontal position of the upper left corner of the cube.
y: int
Vertical position of the upper left corner of the cube.
"""
# get the color
color = self.next_color()
# calculate the position
cube_pos = [x, y, x + self.cube_size, y + self.cube_size]
# draw the cube
draw = ImageDraw.Draw(im=self.image)
draw.rectangle(xy=cube_pos, fill=color) | python | def paint_cube(self, x, y):
"""
Paints a cube at a certain position a color.
Parameters
----------
x: int
Horizontal position of the upper left corner of the cube.
y: int
Vertical position of the upper left corner of the cube.
"""
# get the color
color = self.next_color()
# calculate the position
cube_pos = [x, y, x + self.cube_size, y + self.cube_size]
# draw the cube
draw = ImageDraw.Draw(im=self.image)
draw.rectangle(xy=cube_pos, fill=color) | [
"def",
"paint_cube",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"# get the color",
"color",
"=",
"self",
".",
"next_color",
"(",
")",
"# calculate the position",
"cube_pos",
"=",
"[",
"x",
",",
"y",
",",
"x",
"+",
"self",
".",
"cube_size",
",",
"y",
"+",
"self",
".",
"cube_size",
"]",
"# draw the cube",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"im",
"=",
"self",
".",
"image",
")",
"draw",
".",
"rectangle",
"(",
"xy",
"=",
"cube_pos",
",",
"fill",
"=",
"color",
")"
] | Paints a cube at a certain position a color.
Parameters
----------
x: int
Horizontal position of the upper left corner of the cube.
y: int
Vertical position of the upper left corner of the cube. | [
"Paints",
"a",
"cube",
"at",
"a",
"certain",
"position",
"a",
"color",
"."
] | 83d90f56cf888d39c98aeb84e0e64d1289e4d0c0 | https://github.com/mitakas/wallpaper/blob/83d90f56cf888d39c98aeb84e0e64d1289e4d0c0/wallpaper/cubic.py#L53-L71 |
248,850 | mitakas/wallpaper | wallpaper/cubic.py | Cubic.paint_pattern | def paint_pattern(self):
"""
Paints all the cubes.
"""
x = 0
while x < self.width:
y = 0
while y < self.height:
self.paint_cube(x, y)
y += self.cube_size
x += self.cube_size | python | def paint_pattern(self):
"""
Paints all the cubes.
"""
x = 0
while x < self.width:
y = 0
while y < self.height:
self.paint_cube(x, y)
y += self.cube_size
x += self.cube_size | [
"def",
"paint_pattern",
"(",
"self",
")",
":",
"x",
"=",
"0",
"while",
"x",
"<",
"self",
".",
"width",
":",
"y",
"=",
"0",
"while",
"y",
"<",
"self",
".",
"height",
":",
"self",
".",
"paint_cube",
"(",
"x",
",",
"y",
")",
"y",
"+=",
"self",
".",
"cube_size",
"x",
"+=",
"self",
".",
"cube_size"
] | Paints all the cubes. | [
"Paints",
"all",
"the",
"cubes",
"."
] | 83d90f56cf888d39c98aeb84e0e64d1289e4d0c0 | https://github.com/mitakas/wallpaper/blob/83d90f56cf888d39c98aeb84e0e64d1289e4d0c0/wallpaper/cubic.py#L73-L84 |
248,851 | akissa/sachannelupdate | sachannelupdate/transports.py | get_key_files | def get_key_files(kfiles, dirname, names):
"""Return key files"""
for name in names:
fullname = os.path.join(dirname, name)
if os.path.isfile(fullname) and \
fullname.endswith('_rsa') or \
fullname.endswith('_dsa'):
kfiles.put(fullname) | python | def get_key_files(kfiles, dirname, names):
"""Return key files"""
for name in names:
fullname = os.path.join(dirname, name)
if os.path.isfile(fullname) and \
fullname.endswith('_rsa') or \
fullname.endswith('_dsa'):
kfiles.put(fullname) | [
"def",
"get_key_files",
"(",
"kfiles",
",",
"dirname",
",",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"fullname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fullname",
")",
"and",
"fullname",
".",
"endswith",
"(",
"'_rsa'",
")",
"or",
"fullname",
".",
"endswith",
"(",
"'_dsa'",
")",
":",
"kfiles",
".",
"put",
"(",
"fullname",
")"
] | Return key files | [
"Return",
"key",
"files"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L35-L42 |
248,852 | akissa/sachannelupdate | sachannelupdate/transports.py | get_ssh_keys | def get_ssh_keys(sshdir):
"""Get SSH keys"""
keys = Queue()
for root, _, files in os.walk(os.path.abspath(sshdir)):
if not files:
continue
for filename in files:
fullname = os.path.join(root, filename)
if (os.path.isfile(fullname) and fullname.endswith('_rsa') or
fullname.endswith('_dsa')):
keys.put(fullname)
return keys | python | def get_ssh_keys(sshdir):
"""Get SSH keys"""
keys = Queue()
for root, _, files in os.walk(os.path.abspath(sshdir)):
if not files:
continue
for filename in files:
fullname = os.path.join(root, filename)
if (os.path.isfile(fullname) and fullname.endswith('_rsa') or
fullname.endswith('_dsa')):
keys.put(fullname)
return keys | [
"def",
"get_ssh_keys",
"(",
"sshdir",
")",
":",
"keys",
"=",
"Queue",
"(",
")",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"sshdir",
")",
")",
":",
"if",
"not",
"files",
":",
"continue",
"for",
"filename",
"in",
"files",
":",
"fullname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"fullname",
")",
"and",
"fullname",
".",
"endswith",
"(",
"'_rsa'",
")",
"or",
"fullname",
".",
"endswith",
"(",
"'_dsa'",
")",
")",
":",
"keys",
".",
"put",
"(",
"fullname",
")",
"return",
"keys"
] | Get SSH keys | [
"Get",
"SSH",
"keys"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L45-L56 |
248,853 | akissa/sachannelupdate | sachannelupdate/transports.py | get_ssh_dir | def get_ssh_dir(config, username):
"""Get the users ssh dir"""
sshdir = config.get('ssh_config_dir')
if not sshdir:
sshdir = os.path.expanduser('~/.ssh')
if not os.path.isdir(sshdir):
pwentry = getpwnam(username)
sshdir = os.path.join(pwentry.pw_dir, '.ssh')
if not os.path.isdir(sshdir):
sshdir = None
return sshdir | python | def get_ssh_dir(config, username):
"""Get the users ssh dir"""
sshdir = config.get('ssh_config_dir')
if not sshdir:
sshdir = os.path.expanduser('~/.ssh')
if not os.path.isdir(sshdir):
pwentry = getpwnam(username)
sshdir = os.path.join(pwentry.pw_dir, '.ssh')
if not os.path.isdir(sshdir):
sshdir = None
return sshdir | [
"def",
"get_ssh_dir",
"(",
"config",
",",
"username",
")",
":",
"sshdir",
"=",
"config",
".",
"get",
"(",
"'ssh_config_dir'",
")",
"if",
"not",
"sshdir",
":",
"sshdir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"sshdir",
")",
":",
"pwentry",
"=",
"getpwnam",
"(",
"username",
")",
"sshdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pwentry",
".",
"pw_dir",
",",
"'.ssh'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"sshdir",
")",
":",
"sshdir",
"=",
"None",
"return",
"sshdir"
] | Get the users ssh dir | [
"Get",
"the",
"users",
"ssh",
"dir"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L65-L75 |
248,854 | akissa/sachannelupdate | sachannelupdate/transports.py | get_local_user | def get_local_user(username):
"""Get the local username"""
try:
_ = getpwnam(username)
luser = username
except KeyError:
luser = getuser()
return luser | python | def get_local_user(username):
"""Get the local username"""
try:
_ = getpwnam(username)
luser = username
except KeyError:
luser = getuser()
return luser | [
"def",
"get_local_user",
"(",
"username",
")",
":",
"try",
":",
"_",
"=",
"getpwnam",
"(",
"username",
")",
"luser",
"=",
"username",
"except",
"KeyError",
":",
"luser",
"=",
"getuser",
"(",
")",
"return",
"luser"
] | Get the local username | [
"Get",
"the",
"local",
"username"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L78-L85 |
248,855 | akissa/sachannelupdate | sachannelupdate/transports.py | get_host_keys | def get_host_keys(hostname, sshdir):
"""get host key"""
hostkey = None
try:
host_keys = load_host_keys(os.path.join(sshdir, 'known_hosts'))
except IOError:
host_keys = {}
if hostname in host_keys:
hostkeytype = host_keys[hostname].keys()[0]
hostkey = host_keys[hostname][hostkeytype]
return hostkey | python | def get_host_keys(hostname, sshdir):
"""get host key"""
hostkey = None
try:
host_keys = load_host_keys(os.path.join(sshdir, 'known_hosts'))
except IOError:
host_keys = {}
if hostname in host_keys:
hostkeytype = host_keys[hostname].keys()[0]
hostkey = host_keys[hostname][hostkeytype]
return hostkey | [
"def",
"get_host_keys",
"(",
"hostname",
",",
"sshdir",
")",
":",
"hostkey",
"=",
"None",
"try",
":",
"host_keys",
"=",
"load_host_keys",
"(",
"os",
".",
"path",
".",
"join",
"(",
"sshdir",
",",
"'known_hosts'",
")",
")",
"except",
"IOError",
":",
"host_keys",
"=",
"{",
"}",
"if",
"hostname",
"in",
"host_keys",
":",
"hostkeytype",
"=",
"host_keys",
"[",
"hostname",
"]",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"hostkey",
"=",
"host_keys",
"[",
"hostname",
"]",
"[",
"hostkeytype",
"]",
"return",
"hostkey"
] | get host key | [
"get",
"host",
"key"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L88-L101 |
248,856 | akissa/sachannelupdate | sachannelupdate/transports.py | get_sftp_conn | def get_sftp_conn(config):
"""Make a SFTP connection, returns sftp client and connection objects"""
remote = config.get('remote_location')
parts = urlparse(remote)
if ':' in parts.netloc:
hostname, port = parts.netloc.split(':')
else:
hostname = parts.netloc
port = 22
port = int(port)
username = config.get('remote_username') or getuser()
luser = get_local_user(username)
sshdir = get_ssh_dir(config, luser)
hostkey = get_host_keys(hostname, sshdir)
try:
sftp = None
keys = get_ssh_keys(sshdir)
transport = Transport((hostname, port))
while not keys.empty():
try:
key = PKey.from_private_key_file(keys.get())
transport.connect(
hostkey=hostkey,
username=username,
password=None,
pkey=key)
sftp = SFTPClient.from_transport(transport)
break
except (PasswordRequiredException, SSHException):
pass
if sftp is None:
raise SaChannelUpdateTransportError("SFTP connection failed")
return sftp, transport
except BaseException as msg:
raise SaChannelUpdateTransportError(msg) | python | def get_sftp_conn(config):
"""Make a SFTP connection, returns sftp client and connection objects"""
remote = config.get('remote_location')
parts = urlparse(remote)
if ':' in parts.netloc:
hostname, port = parts.netloc.split(':')
else:
hostname = parts.netloc
port = 22
port = int(port)
username = config.get('remote_username') or getuser()
luser = get_local_user(username)
sshdir = get_ssh_dir(config, luser)
hostkey = get_host_keys(hostname, sshdir)
try:
sftp = None
keys = get_ssh_keys(sshdir)
transport = Transport((hostname, port))
while not keys.empty():
try:
key = PKey.from_private_key_file(keys.get())
transport.connect(
hostkey=hostkey,
username=username,
password=None,
pkey=key)
sftp = SFTPClient.from_transport(transport)
break
except (PasswordRequiredException, SSHException):
pass
if sftp is None:
raise SaChannelUpdateTransportError("SFTP connection failed")
return sftp, transport
except BaseException as msg:
raise SaChannelUpdateTransportError(msg) | [
"def",
"get_sftp_conn",
"(",
"config",
")",
":",
"remote",
"=",
"config",
".",
"get",
"(",
"'remote_location'",
")",
"parts",
"=",
"urlparse",
"(",
"remote",
")",
"if",
"':'",
"in",
"parts",
".",
"netloc",
":",
"hostname",
",",
"port",
"=",
"parts",
".",
"netloc",
".",
"split",
"(",
"':'",
")",
"else",
":",
"hostname",
"=",
"parts",
".",
"netloc",
"port",
"=",
"22",
"port",
"=",
"int",
"(",
"port",
")",
"username",
"=",
"config",
".",
"get",
"(",
"'remote_username'",
")",
"or",
"getuser",
"(",
")",
"luser",
"=",
"get_local_user",
"(",
"username",
")",
"sshdir",
"=",
"get_ssh_dir",
"(",
"config",
",",
"luser",
")",
"hostkey",
"=",
"get_host_keys",
"(",
"hostname",
",",
"sshdir",
")",
"try",
":",
"sftp",
"=",
"None",
"keys",
"=",
"get_ssh_keys",
"(",
"sshdir",
")",
"transport",
"=",
"Transport",
"(",
"(",
"hostname",
",",
"port",
")",
")",
"while",
"not",
"keys",
".",
"empty",
"(",
")",
":",
"try",
":",
"key",
"=",
"PKey",
".",
"from_private_key_file",
"(",
"keys",
".",
"get",
"(",
")",
")",
"transport",
".",
"connect",
"(",
"hostkey",
"=",
"hostkey",
",",
"username",
"=",
"username",
",",
"password",
"=",
"None",
",",
"pkey",
"=",
"key",
")",
"sftp",
"=",
"SFTPClient",
".",
"from_transport",
"(",
"transport",
")",
"break",
"except",
"(",
"PasswordRequiredException",
",",
"SSHException",
")",
":",
"pass",
"if",
"sftp",
"is",
"None",
":",
"raise",
"SaChannelUpdateTransportError",
"(",
"\"SFTP connection failed\"",
")",
"return",
"sftp",
",",
"transport",
"except",
"BaseException",
"as",
"msg",
":",
"raise",
"SaChannelUpdateTransportError",
"(",
"msg",
")"
] | Make a SFTP connection, returns sftp client and connection objects | [
"Make",
"a",
"SFTP",
"connection",
"returns",
"sftp",
"client",
"and",
"connection",
"objects"
] | a1c3c3d86b874f9c92c2407e2608963165d3ae98 | https://github.com/akissa/sachannelupdate/blob/a1c3c3d86b874f9c92c2407e2608963165d3ae98/sachannelupdate/transports.py#L104-L141 |
248,857 | chartbeat-labs/swailing | swailing/token_bucket.py | TokenBucket.check_and_consume | def check_and_consume(self):
"""Returns True if there is currently at least one token, and reduces
it by one.
"""
if self._count < 1.0:
self._fill()
consumable = self._count >= 1.0
if consumable:
self._count -= 1.0
self.throttle_count = 0
else:
self.throttle_count += 1
return consumable | python | def check_and_consume(self):
"""Returns True if there is currently at least one token, and reduces
it by one.
"""
if self._count < 1.0:
self._fill()
consumable = self._count >= 1.0
if consumable:
self._count -= 1.0
self.throttle_count = 0
else:
self.throttle_count += 1
return consumable | [
"def",
"check_and_consume",
"(",
"self",
")",
":",
"if",
"self",
".",
"_count",
"<",
"1.0",
":",
"self",
".",
"_fill",
"(",
")",
"consumable",
"=",
"self",
".",
"_count",
">=",
"1.0",
"if",
"consumable",
":",
"self",
".",
"_count",
"-=",
"1.0",
"self",
".",
"throttle_count",
"=",
"0",
"else",
":",
"self",
".",
"throttle_count",
"+=",
"1",
"return",
"consumable"
] | Returns True if there is currently at least one token, and reduces
it by one. | [
"Returns",
"True",
"if",
"there",
"is",
"currently",
"at",
"least",
"one",
"token",
"and",
"reduces",
"it",
"by",
"one",
"."
] | d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222 | https://github.com/chartbeat-labs/swailing/blob/d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222/swailing/token_bucket.py#L19-L35 |
248,858 | chartbeat-labs/swailing | swailing/token_bucket.py | TokenBucket._fill | def _fill(self):
"""Fills bucket with accrued tokens since last fill."""
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now | python | def _fill(self):
"""Fills bucket with accrued tokens since last fill."""
right_now = time.time()
time_diff = right_now - self._last_fill
if time_diff < 0:
return
self._count = min(
self._count + self._fill_rate * time_diff,
self._capacity,
)
self._last_fill = right_now | [
"def",
"_fill",
"(",
"self",
")",
":",
"right_now",
"=",
"time",
".",
"time",
"(",
")",
"time_diff",
"=",
"right_now",
"-",
"self",
".",
"_last_fill",
"if",
"time_diff",
"<",
"0",
":",
"return",
"self",
".",
"_count",
"=",
"min",
"(",
"self",
".",
"_count",
"+",
"self",
".",
"_fill_rate",
"*",
"time_diff",
",",
"self",
".",
"_capacity",
",",
")",
"self",
".",
"_last_fill",
"=",
"right_now"
] | Fills bucket with accrued tokens since last fill. | [
"Fills",
"bucket",
"with",
"accrued",
"tokens",
"since",
"last",
"fill",
"."
] | d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222 | https://github.com/chartbeat-labs/swailing/blob/d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222/swailing/token_bucket.py#L42-L54 |
248,859 | ulf1/oxyba | oxyba/mysql_batch_and_fetch.py | mysql_batch_and_fetch | def mysql_batch_and_fetch(mysql_config, *sql_queries):
"""
Excute a series of SQL statements before the final Select query
Parameters
----------
mysql_config : dict
The user credentials as defined in MySQLdb.connect, e.g.
mysql_conig = {'user': 'myname', 'passwd': 'supersecret',
'host': '<ip adress or domain>', 'db': '<myschema>'}
sql_queries : list or tuple
A list or tuple of SQL queries wheras the last SQL command
have to be final Select query.
(If a string is provided the semicolon ";" is used to split
the string into a list of strings)
Returns
-------
result_table : tuple
The result table as tuple of tuples.
Sources
-------
* http://mysqlclient.readthedocs.io/user_guide.html
"""
# load modules
import MySQLdb as mydb
import sys
import gc
# ensure that `sqlqueries` is a list/tuple
# split a string into a list
if len(sql_queries) == 1:
if isinstance(sql_queries[0], str):
sql_queries = sql_queries[0].split(";")
if isinstance(sql_queries[0], (list, tuple)):
sql_queries = sql_queries[0]
# connect and execute queries
try:
conn = mydb.connect(**mysql_config)
curs = conn.cursor()
for sql_query in sql_queries:
if len(sql_query) > 0:
curs.execute(sql_query)
result_table = curs.fetchall()
except mydb.Error as err:
print(err)
gc.collect()
sys.exit(1)
else:
if conn:
conn.close()
gc.collect()
return result_table | python | def mysql_batch_and_fetch(mysql_config, *sql_queries):
"""
Excute a series of SQL statements before the final Select query
Parameters
----------
mysql_config : dict
The user credentials as defined in MySQLdb.connect, e.g.
mysql_conig = {'user': 'myname', 'passwd': 'supersecret',
'host': '<ip adress or domain>', 'db': '<myschema>'}
sql_queries : list or tuple
A list or tuple of SQL queries wheras the last SQL command
have to be final Select query.
(If a string is provided the semicolon ";" is used to split
the string into a list of strings)
Returns
-------
result_table : tuple
The result table as tuple of tuples.
Sources
-------
* http://mysqlclient.readthedocs.io/user_guide.html
"""
# load modules
import MySQLdb as mydb
import sys
import gc
# ensure that `sqlqueries` is a list/tuple
# split a string into a list
if len(sql_queries) == 1:
if isinstance(sql_queries[0], str):
sql_queries = sql_queries[0].split(";")
if isinstance(sql_queries[0], (list, tuple)):
sql_queries = sql_queries[0]
# connect and execute queries
try:
conn = mydb.connect(**mysql_config)
curs = conn.cursor()
for sql_query in sql_queries:
if len(sql_query) > 0:
curs.execute(sql_query)
result_table = curs.fetchall()
except mydb.Error as err:
print(err)
gc.collect()
sys.exit(1)
else:
if conn:
conn.close()
gc.collect()
return result_table | [
"def",
"mysql_batch_and_fetch",
"(",
"mysql_config",
",",
"*",
"sql_queries",
")",
":",
"# load modules",
"import",
"MySQLdb",
"as",
"mydb",
"import",
"sys",
"import",
"gc",
"# ensure that `sqlqueries` is a list/tuple",
"# split a string into a list",
"if",
"len",
"(",
"sql_queries",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"sql_queries",
"[",
"0",
"]",
",",
"str",
")",
":",
"sql_queries",
"=",
"sql_queries",
"[",
"0",
"]",
".",
"split",
"(",
"\";\"",
")",
"if",
"isinstance",
"(",
"sql_queries",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"sql_queries",
"=",
"sql_queries",
"[",
"0",
"]",
"# connect and execute queries",
"try",
":",
"conn",
"=",
"mydb",
".",
"connect",
"(",
"*",
"*",
"mysql_config",
")",
"curs",
"=",
"conn",
".",
"cursor",
"(",
")",
"for",
"sql_query",
"in",
"sql_queries",
":",
"if",
"len",
"(",
"sql_query",
")",
">",
"0",
":",
"curs",
".",
"execute",
"(",
"sql_query",
")",
"result_table",
"=",
"curs",
".",
"fetchall",
"(",
")",
"except",
"mydb",
".",
"Error",
"as",
"err",
":",
"print",
"(",
"err",
")",
"gc",
".",
"collect",
"(",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")",
"gc",
".",
"collect",
"(",
")",
"return",
"result_table"
] | Excute a series of SQL statements before the final Select query
Parameters
----------
mysql_config : dict
The user credentials as defined in MySQLdb.connect, e.g.
mysql_conig = {'user': 'myname', 'passwd': 'supersecret',
'host': '<ip adress or domain>', 'db': '<myschema>'}
sql_queries : list or tuple
A list or tuple of SQL queries wheras the last SQL command
have to be final Select query.
(If a string is provided the semicolon ";" is used to split
the string into a list of strings)
Returns
-------
result_table : tuple
The result table as tuple of tuples.
Sources
-------
* http://mysqlclient.readthedocs.io/user_guide.html | [
"Excute",
"a",
"series",
"of",
"SQL",
"statements",
"before",
"the",
"final",
"Select",
"query"
] | b3043116050de275124365cb11e7df91fb40169d | https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/mysql_batch_and_fetch.py#L2-L57 |
248,860 | henrysher/kotocore | kotocore/utils/mangle.py | to_camel_case | def to_camel_case(snake_case_name):
"""
Converts snake_cased_names to CamelCaseNames.
:param snake_case_name: The name you'd like to convert from.
:type snake_case_name: string
:returns: A converted string
:rtype: string
"""
bits = snake_case_name.split('_')
return ''.join([bit.capitalize() for bit in bits]) | python | def to_camel_case(snake_case_name):
"""
Converts snake_cased_names to CamelCaseNames.
:param snake_case_name: The name you'd like to convert from.
:type snake_case_name: string
:returns: A converted string
:rtype: string
"""
bits = snake_case_name.split('_')
return ''.join([bit.capitalize() for bit in bits]) | [
"def",
"to_camel_case",
"(",
"snake_case_name",
")",
":",
"bits",
"=",
"snake_case_name",
".",
"split",
"(",
"'_'",
")",
"return",
"''",
".",
"join",
"(",
"[",
"bit",
".",
"capitalize",
"(",
")",
"for",
"bit",
"in",
"bits",
"]",
")"
] | Converts snake_cased_names to CamelCaseNames.
:param snake_case_name: The name you'd like to convert from.
:type snake_case_name: string
:returns: A converted string
:rtype: string | [
"Converts",
"snake_cased_names",
"to",
"CamelCaseNames",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/utils/mangle.py#L19-L30 |
248,861 | henrysher/kotocore | kotocore/utils/mangle.py | html_to_rst | def html_to_rst(html):
"""
Converts the service HTML docs to reStructured Text, for use in docstrings.
:param html: The raw HTML to convert
:type html: string
:returns: A reStructured Text formatted version of the text
:rtype: string
"""
doc = ReSTDocument()
doc.include_doc_string(html)
raw_doc = doc.getvalue()
return raw_doc.decode('utf-8') | python | def html_to_rst(html):
"""
Converts the service HTML docs to reStructured Text, for use in docstrings.
:param html: The raw HTML to convert
:type html: string
:returns: A reStructured Text formatted version of the text
:rtype: string
"""
doc = ReSTDocument()
doc.include_doc_string(html)
raw_doc = doc.getvalue()
return raw_doc.decode('utf-8') | [
"def",
"html_to_rst",
"(",
"html",
")",
":",
"doc",
"=",
"ReSTDocument",
"(",
")",
"doc",
".",
"include_doc_string",
"(",
"html",
")",
"raw_doc",
"=",
"doc",
".",
"getvalue",
"(",
")",
"return",
"raw_doc",
".",
"decode",
"(",
"'utf-8'",
")"
] | Converts the service HTML docs to reStructured Text, for use in docstrings.
:param html: The raw HTML to convert
:type html: string
:returns: A reStructured Text formatted version of the text
:rtype: string | [
"Converts",
"the",
"service",
"HTML",
"docs",
"to",
"reStructured",
"Text",
"for",
"use",
"in",
"docstrings",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/utils/mangle.py#L33-L46 |
248,862 | dirkcuys/s3imageresize | s3imageresize/s3imageresize.py | resize_image_folder | def resize_image_folder(bucket, key_prefix, pil_size):
""" This function resizes all the images in a folder """
con = boto.connect_s3()
b = con.get_bucket(bucket)
for key in b.list(key_prefix):
key = b.get_key(key.name)
if 'image' not in key.content_type:
continue
size = key.get_metadata('size')
if size == str(pil_size):
continue
with tempfile.TemporaryFile() as big, tempfile.TemporaryFile() as small:
# download file and resize
key.get_contents_to_file(big)
big.flush()
big.seek(0)
img = Image.open(big)
img.thumbnail(pil_size, Image.ANTIALIAS)
img.save(small, img.format)
small.flush()
small.seek(0)
key.set_metadata('size', str(pil_size))
key.set_contents_from_file(small, headers={'Content-Type': key.content_type}) | python | def resize_image_folder(bucket, key_prefix, pil_size):
""" This function resizes all the images in a folder """
con = boto.connect_s3()
b = con.get_bucket(bucket)
for key in b.list(key_prefix):
key = b.get_key(key.name)
if 'image' not in key.content_type:
continue
size = key.get_metadata('size')
if size == str(pil_size):
continue
with tempfile.TemporaryFile() as big, tempfile.TemporaryFile() as small:
# download file and resize
key.get_contents_to_file(big)
big.flush()
big.seek(0)
img = Image.open(big)
img.thumbnail(pil_size, Image.ANTIALIAS)
img.save(small, img.format)
small.flush()
small.seek(0)
key.set_metadata('size', str(pil_size))
key.set_contents_from_file(small, headers={'Content-Type': key.content_type}) | [
"def",
"resize_image_folder",
"(",
"bucket",
",",
"key_prefix",
",",
"pil_size",
")",
":",
"con",
"=",
"boto",
".",
"connect_s3",
"(",
")",
"b",
"=",
"con",
".",
"get_bucket",
"(",
"bucket",
")",
"for",
"key",
"in",
"b",
".",
"list",
"(",
"key_prefix",
")",
":",
"key",
"=",
"b",
".",
"get_key",
"(",
"key",
".",
"name",
")",
"if",
"'image'",
"not",
"in",
"key",
".",
"content_type",
":",
"continue",
"size",
"=",
"key",
".",
"get_metadata",
"(",
"'size'",
")",
"if",
"size",
"==",
"str",
"(",
"pil_size",
")",
":",
"continue",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"big",
",",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"small",
":",
"# download file and resize",
"key",
".",
"get_contents_to_file",
"(",
"big",
")",
"big",
".",
"flush",
"(",
")",
"big",
".",
"seek",
"(",
"0",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"big",
")",
"img",
".",
"thumbnail",
"(",
"pil_size",
",",
"Image",
".",
"ANTIALIAS",
")",
"img",
".",
"save",
"(",
"small",
",",
"img",
".",
"format",
")",
"small",
".",
"flush",
"(",
")",
"small",
".",
"seek",
"(",
"0",
")",
"key",
".",
"set_metadata",
"(",
"'size'",
",",
"str",
"(",
"pil_size",
")",
")",
"key",
".",
"set_contents_from_file",
"(",
"small",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"key",
".",
"content_type",
"}",
")"
] | This function resizes all the images in a folder | [
"This",
"function",
"resizes",
"all",
"the",
"images",
"in",
"a",
"folder"
] | eb70147ce7d92892b93f29612c695e63b513b3a3 | https://github.com/dirkcuys/s3imageresize/blob/eb70147ce7d92892b93f29612c695e63b513b3a3/s3imageresize/s3imageresize.py#L6-L29 |
248,863 | chrisnorman7/confmanager | confmanager/__init__.py | ConfManager.add_section | def add_section(self, section, friendly_name = None):
"""Adds a section and optionally gives it a friendly name.."""
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section)
# See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off.
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title()
if '&' not in friendly_name:
friendly_name = '&' + friendly_name
self.section_names[section] = friendly_name | python | def add_section(self, section, friendly_name = None):
"""Adds a section and optionally gives it a friendly name.."""
if not isinstance(section, BASESTRING): # Make sure the user isn't expecting to use something stupid as a key.
raise ValueError(section)
# See if we've got this section already:
if section in self.config:
raise DuplicateSectionError(section) # Yep... Kick off.
else:
self.config[section] = OrderedDict() # Nope... Ad it
if friendly_name == None:
friendly_name = section.title()
if '&' not in friendly_name:
friendly_name = '&' + friendly_name
self.section_names[section] = friendly_name | [
"def",
"add_section",
"(",
"self",
",",
"section",
",",
"friendly_name",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"section",
",",
"BASESTRING",
")",
":",
"# Make sure the user isn't expecting to use something stupid as a key.\r",
"raise",
"ValueError",
"(",
"section",
")",
"# See if we've got this section already:\r",
"if",
"section",
"in",
"self",
".",
"config",
":",
"raise",
"DuplicateSectionError",
"(",
"section",
")",
"# Yep... Kick off.\r",
"else",
":",
"self",
".",
"config",
"[",
"section",
"]",
"=",
"OrderedDict",
"(",
")",
"# Nope... Ad it\r",
"if",
"friendly_name",
"==",
"None",
":",
"friendly_name",
"=",
"section",
".",
"title",
"(",
")",
"if",
"'&'",
"not",
"in",
"friendly_name",
":",
"friendly_name",
"=",
"'&'",
"+",
"friendly_name",
"self",
".",
"section_names",
"[",
"section",
"]",
"=",
"friendly_name"
] | Adds a section and optionally gives it a friendly name.. | [
"Adds",
"a",
"section",
"and",
"optionally",
"gives",
"it",
"a",
"friendly",
"name",
".."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/__init__.py#L39-L52 |
248,864 | chrisnorman7/confmanager | confmanager/__init__.py | ConfManager.toggle | def toggle(self, section, option):
"""Toggles option in section."""
self.set(section, option, not self.get(section, option)) | python | def toggle(self, section, option):
"""Toggles option in section."""
self.set(section, option, not self.get(section, option)) | [
"def",
"toggle",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"self",
".",
"set",
"(",
"section",
",",
"option",
",",
"not",
"self",
".",
"get",
"(",
"section",
",",
"option",
")",
")"
] | Toggles option in section. | [
"Toggles",
"option",
"in",
"section",
"."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/__init__.py#L125-L127 |
248,865 | chrisnorman7/confmanager | confmanager/__init__.py | ConfManager.get | def get(self, section, option, default = None):
"""Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError."""
if self.has_section(section):
try:
return self.config[section][option].get('value', None)
except KeyError:
if default == None:
raise NoOptionError(option)
else:
return default
else:
raise NoSectionError(section) | python | def get(self, section, option, default = None):
"""Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError."""
if self.has_section(section):
try:
return self.config[section][option].get('value', None)
except KeyError:
if default == None:
raise NoOptionError(option)
else:
return default
else:
raise NoSectionError(section) | [
"def",
"get",
"(",
"self",
",",
"section",
",",
"option",
",",
"default",
"=",
"None",
")",
":",
"if",
"self",
".",
"has_section",
"(",
"section",
")",
":",
"try",
":",
"return",
"self",
".",
"config",
"[",
"section",
"]",
"[",
"option",
"]",
".",
"get",
"(",
"'value'",
",",
"None",
")",
"except",
"KeyError",
":",
"if",
"default",
"==",
"None",
":",
"raise",
"NoOptionError",
"(",
"option",
")",
"else",
":",
"return",
"default",
"else",
":",
"raise",
"NoSectionError",
"(",
"section",
")"
] | Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError. | [
"Returns",
"the",
"option",
"s",
"value",
"converted",
"into",
"it",
"s",
"intended",
"type",
".",
"If",
"default",
"is",
"specified",
"return",
"that",
"on",
"failure",
"else",
"raise",
"NoOptionError",
"."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/__init__.py#L137-L148 |
248,866 | chrisnorman7/confmanager | confmanager/__init__.py | ConfManager.get_dump | def get_dump(self):
"""Returns options and values."""
res = []
for section in self.sections():
sec = []
for option in self.options(section):
sec.append([option, self.get(section, option)])
res.append([section, sec])
return res | python | def get_dump(self):
"""Returns options and values."""
res = []
for section in self.sections():
sec = []
for option in self.options(section):
sec.append([option, self.get(section, option)])
res.append([section, sec])
return res | [
"def",
"get_dump",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"section",
"in",
"self",
".",
"sections",
"(",
")",
":",
"sec",
"=",
"[",
"]",
"for",
"option",
"in",
"self",
".",
"options",
"(",
"section",
")",
":",
"sec",
".",
"append",
"(",
"[",
"option",
",",
"self",
".",
"get",
"(",
"section",
",",
"option",
")",
"]",
")",
"res",
".",
"append",
"(",
"[",
"section",
",",
"sec",
"]",
")",
"return",
"res"
] | Returns options and values. | [
"Returns",
"options",
"and",
"values",
"."
] | 54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1 | https://github.com/chrisnorman7/confmanager/blob/54a3ce0b596f9da32ae82fd4ff4cb46bb1cf23b1/confmanager/__init__.py#L165-L173 |
248,867 | bmuller/txairbrake | txairbrake/observers.py | AirbrakeLogObserver._onError | def _onError(self, error):
"""
Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions.
"""
self.stop()
self._logModule.err(
error,
"Unhandled error logging exception to %s" % (self.airbrakeURL,))
self.start() | python | def _onError(self, error):
"""
Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions.
"""
self.stop()
self._logModule.err(
error,
"Unhandled error logging exception to %s" % (self.airbrakeURL,))
self.start() | [
"def",
"_onError",
"(",
"self",
",",
"error",
")",
":",
"self",
".",
"stop",
"(",
")",
"self",
".",
"_logModule",
".",
"err",
"(",
"error",
",",
"\"Unhandled error logging exception to %s\"",
"%",
"(",
"self",
".",
"airbrakeURL",
",",
")",
")",
"self",
".",
"start",
"(",
")"
] | Stop observer, raise exception, then restart. This prevents an infinite ping pong game of exceptions. | [
"Stop",
"observer",
"raise",
"exception",
"then",
"restart",
".",
"This",
"prevents",
"an",
"infinite",
"ping",
"pong",
"game",
"of",
"exceptions",
"."
] | 38e65fe2330c6ce7fb788bad0cff0a85cceb1943 | https://github.com/bmuller/txairbrake/blob/38e65fe2330c6ce7fb788bad0cff0a85cceb1943/txairbrake/observers.py#L62-L70 |
248,868 | SanketDG/mexe | mexe.py | parse_arguments | def parse_arguments():
"""
Parses all the command line arguments using argparse and returns them.
"""
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar="FILE", nargs='+',
help='file to be made executable')
parser.add_argument("-p", "--python", metavar="VERSION",
help="python version (2 or 3)")
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__, help='show version')
parser.add_argument('-r', '--recursive', action='store_true',
help='recursively iterate the directories')
args = parser.parse_args()
return args | python | def parse_arguments():
"""
Parses all the command line arguments using argparse and returns them.
"""
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar="FILE", nargs='+',
help='file to be made executable')
parser.add_argument("-p", "--python", metavar="VERSION",
help="python version (2 or 3)")
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__, help='show version')
parser.add_argument('-r', '--recursive', action='store_true',
help='recursively iterate the directories')
args = parser.parse_args()
return args | [
"def",
"parse_arguments",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'file'",
",",
"metavar",
"=",
"\"FILE\"",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'file to be made executable'",
")",
"parser",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--python\"",
",",
"metavar",
"=",
"\"VERSION\"",
",",
"help",
"=",
"\"python version (2 or 3)\"",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s '",
"+",
"__version__",
",",
"help",
"=",
"'show version'",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--recursive'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'recursively iterate the directories'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args"
] | Parses all the command line arguments using argparse and returns them. | [
"Parses",
"all",
"the",
"command",
"line",
"arguments",
"using",
"argparse",
"and",
"returns",
"them",
"."
] | ad24507b34eabaa1c849de49cba99cc97c2cd759 | https://github.com/SanketDG/mexe/blob/ad24507b34eabaa1c849de49cba99cc97c2cd759/mexe.py#L17-L33 |
248,869 | SanketDG/mexe | mexe.py | contains_shebang | def contains_shebang(f):
"""
Returns true if any shebang line is present in the first line of the file.
"""
first_line = f.readline()
if first_line in shebangs.values():
return True
return False | python | def contains_shebang(f):
"""
Returns true if any shebang line is present in the first line of the file.
"""
first_line = f.readline()
if first_line in shebangs.values():
return True
return False | [
"def",
"contains_shebang",
"(",
"f",
")",
":",
"first_line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"first_line",
"in",
"shebangs",
".",
"values",
"(",
")",
":",
"return",
"True",
"return",
"False"
] | Returns true if any shebang line is present in the first line of the file. | [
"Returns",
"true",
"if",
"any",
"shebang",
"line",
"is",
"present",
"in",
"the",
"first",
"line",
"of",
"the",
"file",
"."
] | ad24507b34eabaa1c849de49cba99cc97c2cd759 | https://github.com/SanketDG/mexe/blob/ad24507b34eabaa1c849de49cba99cc97c2cd759/mexe.py#L36-L43 |
248,870 | SanketDG/mexe | mexe.py | make_exec | def make_exec(fname, version):
"""
Writes the shebang and makes the file executable.
"""
# if no version is specified, use system default.
if version is None:
version = 'default'
# write the shebang and then make the file executable.
with open(fname, 'rb+') as f:
put_shebang(f, version)
# make the file
os.chmod(fname, os.stat(fname).st_mode | 0o0111)
print("{} is now executable".format(fname)) | python | def make_exec(fname, version):
"""
Writes the shebang and makes the file executable.
"""
# if no version is specified, use system default.
if version is None:
version = 'default'
# write the shebang and then make the file executable.
with open(fname, 'rb+') as f:
put_shebang(f, version)
# make the file
os.chmod(fname, os.stat(fname).st_mode | 0o0111)
print("{} is now executable".format(fname)) | [
"def",
"make_exec",
"(",
"fname",
",",
"version",
")",
":",
"# if no version is specified, use system default.",
"if",
"version",
"is",
"None",
":",
"version",
"=",
"'default'",
"# write the shebang and then make the file executable.",
"with",
"open",
"(",
"fname",
",",
"'rb+'",
")",
"as",
"f",
":",
"put_shebang",
"(",
"f",
",",
"version",
")",
"# make the file",
"os",
".",
"chmod",
"(",
"fname",
",",
"os",
".",
"stat",
"(",
"fname",
")",
".",
"st_mode",
"|",
"0o0111",
")",
"print",
"(",
"\"{} is now executable\"",
".",
"format",
"(",
"fname",
")",
")"
] | Writes the shebang and makes the file executable. | [
"Writes",
"the",
"shebang",
"and",
"makes",
"the",
"file",
"executable",
"."
] | ad24507b34eabaa1c849de49cba99cc97c2cd759 | https://github.com/SanketDG/mexe/blob/ad24507b34eabaa1c849de49cba99cc97c2cd759/mexe.py#L58-L71 |
248,871 | heikomuller/sco-datastore | scodata/subject.py | SubjectHandle.data_file | def data_file(self):
"""Original uploaded data file the subject was created from.
Returns
-------
File-type object
Reference to file on local disk
"""
return os.path.join(self.upload_directory, self.properties[datastore.PROPERTY_FILENAME]) | python | def data_file(self):
"""Original uploaded data file the subject was created from.
Returns
-------
File-type object
Reference to file on local disk
"""
return os.path.join(self.upload_directory, self.properties[datastore.PROPERTY_FILENAME]) | [
"def",
"data_file",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"upload_directory",
",",
"self",
".",
"properties",
"[",
"datastore",
".",
"PROPERTY_FILENAME",
"]",
")"
] | Original uploaded data file the subject was created from.
Returns
-------
File-type object
Reference to file on local disk | [
"Original",
"uploaded",
"data",
"file",
"the",
"subject",
"was",
"created",
"from",
"."
] | 7180a6b51150667e47629da566aedaa742e39342 | https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/subject.py#L106-L114 |
248,872 | heikomuller/sco-datastore | scodata/subject.py | DefaultSubjectManager.upload_file | def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY):
"""Create an anatomy object on local disk from the given file.
Currently, only Freesurfer anatomy directories are supported. Expects a
tar file.
Parameters
----------
filename : string
Name of the (uploaded) file
file_type : string
File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY)
Returns
-------
SubjectHandle
Handle for created subject in database
"""
# We currently only support one file type (i.e., FREESURFER_DIRECTORY).
if file_type != FILE_TYPE_FREESURFER_DIRECTORY:
raise ValueError('Unsupported file type: ' + file_type)
return self.upload_freesurfer_archive(filename) | python | def upload_file(self, filename, file_type=FILE_TYPE_FREESURFER_DIRECTORY):
"""Create an anatomy object on local disk from the given file.
Currently, only Freesurfer anatomy directories are supported. Expects a
tar file.
Parameters
----------
filename : string
Name of the (uploaded) file
file_type : string
File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY)
Returns
-------
SubjectHandle
Handle for created subject in database
"""
# We currently only support one file type (i.e., FREESURFER_DIRECTORY).
if file_type != FILE_TYPE_FREESURFER_DIRECTORY:
raise ValueError('Unsupported file type: ' + file_type)
return self.upload_freesurfer_archive(filename) | [
"def",
"upload_file",
"(",
"self",
",",
"filename",
",",
"file_type",
"=",
"FILE_TYPE_FREESURFER_DIRECTORY",
")",
":",
"# We currently only support one file type (i.e., FREESURFER_DIRECTORY).",
"if",
"file_type",
"!=",
"FILE_TYPE_FREESURFER_DIRECTORY",
":",
"raise",
"ValueError",
"(",
"'Unsupported file type: '",
"+",
"file_type",
")",
"return",
"self",
".",
"upload_freesurfer_archive",
"(",
"filename",
")"
] | Create an anatomy object on local disk from the given file.
Currently, only Freesurfer anatomy directories are supported. Expects a
tar file.
Parameters
----------
filename : string
Name of the (uploaded) file
file_type : string
File type (currently expects FILE_TYPE_FREESURFER_DIRECTORY)
Returns
-------
SubjectHandle
Handle for created subject in database | [
"Create",
"an",
"anatomy",
"object",
"on",
"local",
"disk",
"from",
"the",
"given",
"file",
".",
"Currently",
"only",
"Freesurfer",
"anatomy",
"directories",
"are",
"supported",
".",
"Expects",
"a",
"tar",
"file",
"."
] | 7180a6b51150667e47629da566aedaa742e39342 | https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/subject.py#L193-L213 |
248,873 | heikomuller/sco-datastore | scodata/subject.py | DefaultSubjectManager.upload_freesurfer_archive | def upload_freesurfer_archive(self, filename, object_identifier=None, read_only=False):
"""Create an anatomy object on local disk from a Freesurfer anatomy
tar file. If the given file is a Freesurfer file it will be copied to
the created subject's upload directory.
Parameters
----------
filename : string
Name of the (uploaded) file
object_identifier : string
Unique object identifier, optional
read_only : boolean, optional
Optional value for the read-only property
Returns
-------
SubjectHandle
Handle for created subject in database
"""
# At this point we expect the file to be a (compressed) tar archive.
# Extract the archive contents into a new temporary directory
temp_dir = tempfile.mkdtemp()
try:
tf = tarfile.open(name=filename, mode='r')
tf.extractall(path=temp_dir)
except (tarfile.ReadError, IOError) as err:
# Clean up in case there is an error during extraction
shutil.rmtree(temp_dir)
raise ValueError(str(err))
# Find a folder that contains sub-folders 'surf' and 'mri'. These
# are the only folders we keep in the new anatomy folder. Raise an
# error if no such folder esists
freesurf_dir = get_freesurfer_dir(temp_dir)
if not freesurf_dir:
# Remove anatomy directory and extracted files
shutil.rmtree(temp_dir)
raise ValueError('not a valid subject directory')
# Create a new identifier. This identifier will be used as the
# directory name.
if object_identifier is None:
identifier = str(uuid.uuid4()).replace('-', '')
else:
identifier = object_identifier
subject_dir = os.path.join(self.directory, identifier)
# Create the initial set of properties for the new anatomy object. The
# name is derived from the filename minus any known extensions
prop_filename = os.path.basename(os.path.normpath(filename))
prop_name = prop_filename
# Based on the valid list of suffixes the file is either a tar-file
# or a zipped tar-file.
prop_mime = 'application/x-tar' if filename.endswith('.tar') else 'application/x-gzip'
for suffix in ['.tar', '.tgz', '.tar.gz']:
if prop_name.endswith(suffix):
prop_name = prop_name[:-len(suffix)]
break
properties = {
datastore.PROPERTY_FILENAME : prop_filename,
datastore.PROPERTY_FILESIZE : os.path.getsize(filename),
datastore.PROPERTY_FILETYPE : FILE_TYPE_FREESURFER_DIRECTORY,
datastore.PROPERTY_MIMETYPE : prop_mime,
datastore.PROPERTY_NAME : prop_name
}
if read_only:
properties[datastore.PROPERTY_READONLY] = True
# Create the directory for the anatomy object, the unpacked data files
# and the original uploaded file (for download).
os.mkdir(subject_dir)
data_dir = os.path.join(subject_dir, DATA_DIRECTORY)
os.mkdir(data_dir)
upload_dir = os.path.join(subject_dir, UPLOAD_DIRECTORY)
os.mkdir(upload_dir)
# Move all sub-folders from the Freesurfer directory to the new anatomy
# data directory
for f in os.listdir(freesurf_dir):
sub_folder = os.path.join(freesurf_dir, f)
if os.path.isdir(sub_folder):
shutil.move(sub_folder, data_dir)
# Move original upload file to upload directory
shutil.copyfile(filename, os.path.join(upload_dir, prop_filename))
# Remove the temp directory
shutil.rmtree(temp_dir)
# Use current time in UTC as the object's timestamp
obj = SubjectHandle(
identifier,
properties,
subject_dir
)
self.insert_object(obj)
return obj | python | def upload_freesurfer_archive(self, filename, object_identifier=None, read_only=False):
"""Create an anatomy object on local disk from a Freesurfer anatomy
tar file. If the given file is a Freesurfer file it will be copied to
the created subject's upload directory.
Parameters
----------
filename : string
Name of the (uploaded) file
object_identifier : string
Unique object identifier, optional
read_only : boolean, optional
Optional value for the read-only property
Returns
-------
SubjectHandle
Handle for created subject in database
"""
# At this point we expect the file to be a (compressed) tar archive.
# Extract the archive contents into a new temporary directory
temp_dir = tempfile.mkdtemp()
try:
tf = tarfile.open(name=filename, mode='r')
tf.extractall(path=temp_dir)
except (tarfile.ReadError, IOError) as err:
# Clean up in case there is an error during extraction
shutil.rmtree(temp_dir)
raise ValueError(str(err))
# Find a folder that contains sub-folders 'surf' and 'mri'. These
# are the only folders we keep in the new anatomy folder. Raise an
# error if no such folder esists
freesurf_dir = get_freesurfer_dir(temp_dir)
if not freesurf_dir:
# Remove anatomy directory and extracted files
shutil.rmtree(temp_dir)
raise ValueError('not a valid subject directory')
# Create a new identifier. This identifier will be used as the
# directory name.
if object_identifier is None:
identifier = str(uuid.uuid4()).replace('-', '')
else:
identifier = object_identifier
subject_dir = os.path.join(self.directory, identifier)
# Create the initial set of properties for the new anatomy object. The
# name is derived from the filename minus any known extensions
prop_filename = os.path.basename(os.path.normpath(filename))
prop_name = prop_filename
# Based on the valid list of suffixes the file is either a tar-file
# or a zipped tar-file.
prop_mime = 'application/x-tar' if filename.endswith('.tar') else 'application/x-gzip'
for suffix in ['.tar', '.tgz', '.tar.gz']:
if prop_name.endswith(suffix):
prop_name = prop_name[:-len(suffix)]
break
properties = {
datastore.PROPERTY_FILENAME : prop_filename,
datastore.PROPERTY_FILESIZE : os.path.getsize(filename),
datastore.PROPERTY_FILETYPE : FILE_TYPE_FREESURFER_DIRECTORY,
datastore.PROPERTY_MIMETYPE : prop_mime,
datastore.PROPERTY_NAME : prop_name
}
if read_only:
properties[datastore.PROPERTY_READONLY] = True
# Create the directory for the anatomy object, the unpacked data files
# and the original uploaded file (for download).
os.mkdir(subject_dir)
data_dir = os.path.join(subject_dir, DATA_DIRECTORY)
os.mkdir(data_dir)
upload_dir = os.path.join(subject_dir, UPLOAD_DIRECTORY)
os.mkdir(upload_dir)
# Move all sub-folders from the Freesurfer directory to the new anatomy
# data directory
for f in os.listdir(freesurf_dir):
sub_folder = os.path.join(freesurf_dir, f)
if os.path.isdir(sub_folder):
shutil.move(sub_folder, data_dir)
# Move original upload file to upload directory
shutil.copyfile(filename, os.path.join(upload_dir, prop_filename))
# Remove the temp directory
shutil.rmtree(temp_dir)
# Use current time in UTC as the object's timestamp
obj = SubjectHandle(
identifier,
properties,
subject_dir
)
self.insert_object(obj)
return obj | [
"def",
"upload_freesurfer_archive",
"(",
"self",
",",
"filename",
",",
"object_identifier",
"=",
"None",
",",
"read_only",
"=",
"False",
")",
":",
"# At this point we expect the file to be a (compressed) tar archive.",
"# Extract the archive contents into a new temporary directory",
"temp_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"try",
":",
"tf",
"=",
"tarfile",
".",
"open",
"(",
"name",
"=",
"filename",
",",
"mode",
"=",
"'r'",
")",
"tf",
".",
"extractall",
"(",
"path",
"=",
"temp_dir",
")",
"except",
"(",
"tarfile",
".",
"ReadError",
",",
"IOError",
")",
"as",
"err",
":",
"# Clean up in case there is an error during extraction",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
")",
"raise",
"ValueError",
"(",
"str",
"(",
"err",
")",
")",
"# Find a folder that contains sub-folders 'surf' and 'mri'. These",
"# are the only folders we keep in the new anatomy folder. Raise an",
"# error if no such folder esists",
"freesurf_dir",
"=",
"get_freesurfer_dir",
"(",
"temp_dir",
")",
"if",
"not",
"freesurf_dir",
":",
"# Remove anatomy directory and extracted files",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
")",
"raise",
"ValueError",
"(",
"'not a valid subject directory'",
")",
"# Create a new identifier. This identifier will be used as the",
"# directory name.",
"if",
"object_identifier",
"is",
"None",
":",
"identifier",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"else",
":",
"identifier",
"=",
"object_identifier",
"subject_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"identifier",
")",
"# Create the initial set of properties for the new anatomy object. The",
"# name is derived from the filename minus any known extensions",
"prop_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"filename",
")",
")",
"prop_name",
"=",
"prop_filename",
"# Based on the valid list of suffixes the file is either a tar-file",
"# or a zipped tar-file.",
"prop_mime",
"=",
"'application/x-tar'",
"if",
"filename",
".",
"endswith",
"(",
"'.tar'",
")",
"else",
"'application/x-gzip'",
"for",
"suffix",
"in",
"[",
"'.tar'",
",",
"'.tgz'",
",",
"'.tar.gz'",
"]",
":",
"if",
"prop_name",
".",
"endswith",
"(",
"suffix",
")",
":",
"prop_name",
"=",
"prop_name",
"[",
":",
"-",
"len",
"(",
"suffix",
")",
"]",
"break",
"properties",
"=",
"{",
"datastore",
".",
"PROPERTY_FILENAME",
":",
"prop_filename",
",",
"datastore",
".",
"PROPERTY_FILESIZE",
":",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
",",
"datastore",
".",
"PROPERTY_FILETYPE",
":",
"FILE_TYPE_FREESURFER_DIRECTORY",
",",
"datastore",
".",
"PROPERTY_MIMETYPE",
":",
"prop_mime",
",",
"datastore",
".",
"PROPERTY_NAME",
":",
"prop_name",
"}",
"if",
"read_only",
":",
"properties",
"[",
"datastore",
".",
"PROPERTY_READONLY",
"]",
"=",
"True",
"# Create the directory for the anatomy object, the unpacked data files",
"# and the original uploaded file (for download).",
"os",
".",
"mkdir",
"(",
"subject_dir",
")",
"data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"subject_dir",
",",
"DATA_DIRECTORY",
")",
"os",
".",
"mkdir",
"(",
"data_dir",
")",
"upload_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"subject_dir",
",",
"UPLOAD_DIRECTORY",
")",
"os",
".",
"mkdir",
"(",
"upload_dir",
")",
"# Move all sub-folders from the Freesurfer directory to the new anatomy",
"# data directory",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"freesurf_dir",
")",
":",
"sub_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"freesurf_dir",
",",
"f",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sub_folder",
")",
":",
"shutil",
".",
"move",
"(",
"sub_folder",
",",
"data_dir",
")",
"# Move original upload file to upload directory",
"shutil",
".",
"copyfile",
"(",
"filename",
",",
"os",
".",
"path",
".",
"join",
"(",
"upload_dir",
",",
"prop_filename",
")",
")",
"# Remove the temp directory",
"shutil",
".",
"rmtree",
"(",
"temp_dir",
")",
"# Use current time in UTC as the object's timestamp",
"obj",
"=",
"SubjectHandle",
"(",
"identifier",
",",
"properties",
",",
"subject_dir",
")",
"self",
".",
"insert_object",
"(",
"obj",
")",
"return",
"obj"
] | Create an anatomy object on local disk from a Freesurfer anatomy
tar file. If the given file is a Freesurfer file it will be copied to
the created subject's upload directory.
Parameters
----------
filename : string
Name of the (uploaded) file
object_identifier : string
Unique object identifier, optional
read_only : boolean, optional
Optional value for the read-only property
Returns
-------
SubjectHandle
Handle for created subject in database | [
"Create",
"an",
"anatomy",
"object",
"on",
"local",
"disk",
"from",
"a",
"Freesurfer",
"anatomy",
"tar",
"file",
".",
"If",
"the",
"given",
"file",
"is",
"a",
"Freesurfer",
"file",
"it",
"will",
"be",
"copied",
"to",
"the",
"created",
"subject",
"s",
"upload",
"directory",
"."
] | 7180a6b51150667e47629da566aedaa742e39342 | https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/subject.py#L215-L303 |
248,874 | pjuren/pyokit | src/pyokit/scripts/genomicIntersection.py | getUI | def getUI(args):
"""
build and return a UI object for this script.
:param args: raw arguments to parse
"""
programName = os.path.basename(sys.argv[0])
longDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
shortDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
ui = CLI(programName, shortDescription, longDescription)
ui.minArgs = 2
ui.maxArgs = 2
ui.addOption(Option(short="o", long="output", argName="filename",
description="output to given file, else stdout",
required=False, type=str))
ui.addOption(Option(short="s", long="stranded",
description="treat regions on separate strands as " +
"disjoint, even if they overlap",
required=False))
ui.addOption(Option(short="v", long="verbose",
description="output additional messages to stderr " +
"about run", required=False))
ui.addOption(Option(short="h", long="help",
description="show this help message ", special=True))
ui.addOption(Option(short="u", long="test",
description="run unit tests ", special=True))
ui.parseCommandLine(args)
return ui | python | def getUI(args):
"""
build and return a UI object for this script.
:param args: raw arguments to parse
"""
programName = os.path.basename(sys.argv[0])
longDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
shortDescription = "takes a file with a list of p-values and applies " +\
"Benjamini and Hochberg FDR to convert to q-values "
ui = CLI(programName, shortDescription, longDescription)
ui.minArgs = 2
ui.maxArgs = 2
ui.addOption(Option(short="o", long="output", argName="filename",
description="output to given file, else stdout",
required=False, type=str))
ui.addOption(Option(short="s", long="stranded",
description="treat regions on separate strands as " +
"disjoint, even if they overlap",
required=False))
ui.addOption(Option(short="v", long="verbose",
description="output additional messages to stderr " +
"about run", required=False))
ui.addOption(Option(short="h", long="help",
description="show this help message ", special=True))
ui.addOption(Option(short="u", long="test",
description="run unit tests ", special=True))
ui.parseCommandLine(args)
return ui | [
"def",
"getUI",
"(",
"args",
")",
":",
"programName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
"longDescription",
"=",
"\"takes a file with a list of p-values and applies \"",
"+",
"\"Benjamini and Hochberg FDR to convert to q-values \"",
"shortDescription",
"=",
"\"takes a file with a list of p-values and applies \"",
"+",
"\"Benjamini and Hochberg FDR to convert to q-values \"",
"ui",
"=",
"CLI",
"(",
"programName",
",",
"shortDescription",
",",
"longDescription",
")",
"ui",
".",
"minArgs",
"=",
"2",
"ui",
".",
"maxArgs",
"=",
"2",
"ui",
".",
"addOption",
"(",
"Option",
"(",
"short",
"=",
"\"o\"",
",",
"long",
"=",
"\"output\"",
",",
"argName",
"=",
"\"filename\"",
",",
"description",
"=",
"\"output to given file, else stdout\"",
",",
"required",
"=",
"False",
",",
"type",
"=",
"str",
")",
")",
"ui",
".",
"addOption",
"(",
"Option",
"(",
"short",
"=",
"\"s\"",
",",
"long",
"=",
"\"stranded\"",
",",
"description",
"=",
"\"treat regions on separate strands as \"",
"+",
"\"disjoint, even if they overlap\"",
",",
"required",
"=",
"False",
")",
")",
"ui",
".",
"addOption",
"(",
"Option",
"(",
"short",
"=",
"\"v\"",
",",
"long",
"=",
"\"verbose\"",
",",
"description",
"=",
"\"output additional messages to stderr \"",
"+",
"\"about run\"",
",",
"required",
"=",
"False",
")",
")",
"ui",
".",
"addOption",
"(",
"Option",
"(",
"short",
"=",
"\"h\"",
",",
"long",
"=",
"\"help\"",
",",
"description",
"=",
"\"show this help message \"",
",",
"special",
"=",
"True",
")",
")",
"ui",
".",
"addOption",
"(",
"Option",
"(",
"short",
"=",
"\"u\"",
",",
"long",
"=",
"\"test\"",
",",
"description",
"=",
"\"run unit tests \"",
",",
"special",
"=",
"True",
")",
")",
"ui",
".",
"parseCommandLine",
"(",
"args",
")",
"return",
"ui"
] | build and return a UI object for this script.
:param args: raw arguments to parse | [
"build",
"and",
"return",
"a",
"UI",
"object",
"for",
"this",
"script",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/genomicIntersection.py#L47-L78 |
248,875 | pjuren/pyokit | src/pyokit/scripts/genomicIntersection.py | main | def main(args):
"""
main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
"""
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# stranded?
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# get input file-handles -- we know we'll get exactly two, since we
# specified it in the UI definition
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") | python | def main(args):
"""
main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list.
"""
# get options and arguments
ui = getUI(args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# stranded?
stranded = ui.optionIsSet("stranded")
if stranded:
sys.stderr.write("Sorry, stranded mode hasn't been implemented yet.")
sys.exit()
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# get input file-handles -- we know we'll get exactly two, since we
# specified it in the UI definition
regions_1 = [x for x in BEDIterator(ui.getArgument(0), verbose=verbose)]
regions_2 = [x for x in BEDIterator(ui.getArgument(1), verbose=verbose)]
for r in regionsIntersection(regions_1, regions_2):
out_fh.write(str(r) + "\n") | [
"def",
"main",
"(",
"args",
")",
":",
"# get options and arguments",
"ui",
"=",
"getUI",
"(",
"args",
")",
"if",
"ui",
".",
"optionIsSet",
"(",
"\"test\"",
")",
":",
"# just run unit tests",
"unittest",
".",
"main",
"(",
"argv",
"=",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
"]",
")",
"elif",
"ui",
".",
"optionIsSet",
"(",
"\"help\"",
")",
":",
"# just show help",
"ui",
".",
"usage",
"(",
")",
"else",
":",
"verbose",
"=",
"ui",
".",
"optionIsSet",
"(",
"\"verbose\"",
")",
"# stranded?",
"stranded",
"=",
"ui",
".",
"optionIsSet",
"(",
"\"stranded\"",
")",
"if",
"stranded",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Sorry, stranded mode hasn't been implemented yet.\"",
")",
"sys",
".",
"exit",
"(",
")",
"# get output handle",
"out_fh",
"=",
"sys",
".",
"stdout",
"if",
"ui",
".",
"optionIsSet",
"(",
"\"output\"",
")",
":",
"out_fh",
"=",
"open",
"(",
"ui",
".",
"getValue",
"(",
"\"output\"",
")",
",",
"\"w\"",
")",
"# get input file-handles -- we know we'll get exactly two, since we",
"# specified it in the UI definition",
"regions_1",
"=",
"[",
"x",
"for",
"x",
"in",
"BEDIterator",
"(",
"ui",
".",
"getArgument",
"(",
"0",
")",
",",
"verbose",
"=",
"verbose",
")",
"]",
"regions_2",
"=",
"[",
"x",
"for",
"x",
"in",
"BEDIterator",
"(",
"ui",
".",
"getArgument",
"(",
"1",
")",
",",
"verbose",
"=",
"verbose",
")",
"]",
"for",
"r",
"in",
"regionsIntersection",
"(",
"regions_1",
",",
"regions_2",
")",
":",
"out_fh",
".",
"write",
"(",
"str",
"(",
"r",
")",
"+",
"\"\\n\"",
")"
] | main entry point for the GenomicIntIntersection script.
:param args: the arguments for this script, as a list of string. Should
already have had things like the script name stripped. That
is, if there are no args provided, this should be an empty
list. | [
"main",
"entry",
"point",
"for",
"the",
"GenomicIntIntersection",
"script",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/genomicIntersection.py#L85-L123 |
248,876 | elkan1788/ppytools | ppytools/cfgreader.py | ConfReader.getAsTuple | def getAsTuple(self, section):
"""Get section name tuple
:param section: section name
:return: tuple object
"""
keys = self.getKeys(section)
value_dict = self.getValues(section)
return namedtuple(section, keys)(**value_dict) | python | def getAsTuple(self, section):
"""Get section name tuple
:param section: section name
:return: tuple object
"""
keys = self.getKeys(section)
value_dict = self.getValues(section)
return namedtuple(section, keys)(**value_dict) | [
"def",
"getAsTuple",
"(",
"self",
",",
"section",
")",
":",
"keys",
"=",
"self",
".",
"getKeys",
"(",
"section",
")",
"value_dict",
"=",
"self",
".",
"getValues",
"(",
"section",
")",
"return",
"namedtuple",
"(",
"section",
",",
"keys",
")",
"(",
"*",
"*",
"value_dict",
")"
] | Get section name tuple
:param section: section name
:return: tuple object | [
"Get",
"section",
"name",
"tuple"
] | 117aeed9f669ae46e0dd6cb11c5687a5f797816c | https://github.com/elkan1788/ppytools/blob/117aeed9f669ae46e0dd6cb11c5687a5f797816c/ppytools/cfgreader.py#L81-L89 |
248,877 | KnowledgeLinks/rdfframework | rdfframework/configuration/rdfwconfig.py | update_req | def update_req(name, old_req, config={}):
"""
Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition
"""
if not name:
return old_req
new_req = copy.deepcopy(old_req)
del_idxs = []
if "req_items" in old_req:
req_key = get_req_key(old_req['req_items'])
for i, item in enumerate(old_req['req_items']):
if name == item[req_key] and item.get("dict_params"):
for param, value in item['dict_params'].items():
new_req['item_dict'][param].update(value)
if item.get("remove_if"):
test_val = get_attr(config, item['remove_if']['attr'])
if test_val == item['remove_if']['value']:
del_idxs.append(i)
for idx in sorted(del_idxs, reverse=True):
del new_req['req_items'][idx]
return new_req | python | def update_req(name, old_req, config={}):
"""
Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition
"""
if not name:
return old_req
new_req = copy.deepcopy(old_req)
del_idxs = []
if "req_items" in old_req:
req_key = get_req_key(old_req['req_items'])
for i, item in enumerate(old_req['req_items']):
if name == item[req_key] and item.get("dict_params"):
for param, value in item['dict_params'].items():
new_req['item_dict'][param].update(value)
if item.get("remove_if"):
test_val = get_attr(config, item['remove_if']['attr'])
if test_val == item['remove_if']['value']:
del_idxs.append(i)
for idx in sorted(del_idxs, reverse=True):
del new_req['req_items'][idx]
return new_req | [
"def",
"update_req",
"(",
"name",
",",
"old_req",
",",
"config",
"=",
"{",
"}",
")",
":",
"if",
"not",
"name",
":",
"return",
"old_req",
"new_req",
"=",
"copy",
".",
"deepcopy",
"(",
"old_req",
")",
"del_idxs",
"=",
"[",
"]",
"if",
"\"req_items\"",
"in",
"old_req",
":",
"req_key",
"=",
"get_req_key",
"(",
"old_req",
"[",
"'req_items'",
"]",
")",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"old_req",
"[",
"'req_items'",
"]",
")",
":",
"if",
"name",
"==",
"item",
"[",
"req_key",
"]",
"and",
"item",
".",
"get",
"(",
"\"dict_params\"",
")",
":",
"for",
"param",
",",
"value",
"in",
"item",
"[",
"'dict_params'",
"]",
".",
"items",
"(",
")",
":",
"new_req",
"[",
"'item_dict'",
"]",
"[",
"param",
"]",
".",
"update",
"(",
"value",
")",
"if",
"item",
".",
"get",
"(",
"\"remove_if\"",
")",
":",
"test_val",
"=",
"get_attr",
"(",
"config",
",",
"item",
"[",
"'remove_if'",
"]",
"[",
"'attr'",
"]",
")",
"if",
"test_val",
"==",
"item",
"[",
"'remove_if'",
"]",
"[",
"'value'",
"]",
":",
"del_idxs",
".",
"append",
"(",
"i",
")",
"for",
"idx",
"in",
"sorted",
"(",
"del_idxs",
",",
"reverse",
"=",
"True",
")",
":",
"del",
"new_req",
"[",
"'req_items'",
"]",
"[",
"idx",
"]",
"return",
"new_req"
] | Takes a requirement and updates it based on a specific attribute key
args:
name: the name of the attribute
old_req: the requirement definition | [
"Takes",
"a",
"requirement",
"and",
"updates",
"it",
"based",
"on",
"a",
"specific",
"attribute",
"key"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/configuration/rdfwconfig.py#L1388-L1413 |
248,878 | KnowledgeLinks/rdfframework | rdfframework/configuration/rdfwconfig.py | get_options_from_str | def get_options_from_str(obj_str, **kwargs):
"""
Returns a list of options from a python object string
args:
obj_str: python list of options or a python object path
Example: "rdfframework.connections.ConnManager[{param1}]"
kwargs:
* kwargs used to format the 'obj_str'
"""
if isinstance(obj_str, list):
return obj_str
try:
obj = get_obj_frm_str(obj_str, **kwargs)
if obj:
return list(obj)
except AttributeError:
pass
return [] | python | def get_options_from_str(obj_str, **kwargs):
"""
Returns a list of options from a python object string
args:
obj_str: python list of options or a python object path
Example: "rdfframework.connections.ConnManager[{param1}]"
kwargs:
* kwargs used to format the 'obj_str'
"""
if isinstance(obj_str, list):
return obj_str
try:
obj = get_obj_frm_str(obj_str, **kwargs)
if obj:
return list(obj)
except AttributeError:
pass
return [] | [
"def",
"get_options_from_str",
"(",
"obj_str",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"obj_str",
",",
"list",
")",
":",
"return",
"obj_str",
"try",
":",
"obj",
"=",
"get_obj_frm_str",
"(",
"obj_str",
",",
"*",
"*",
"kwargs",
")",
"if",
"obj",
":",
"return",
"list",
"(",
"obj",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"[",
"]"
] | Returns a list of options from a python object string
args:
obj_str: python list of options or a python object path
Example: "rdfframework.connections.ConnManager[{param1}]"
kwargs:
* kwargs used to format the 'obj_str' | [
"Returns",
"a",
"list",
"of",
"options",
"from",
"a",
"python",
"object",
"string"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/configuration/rdfwconfig.py#L1417-L1436 |
248,879 | KnowledgeLinks/rdfframework | rdfframework/configuration/rdfwconfig.py | strip_errors | def strip_errors(obj):
"""
Reads through and error object and replaces the error dict with the
value
args:
obj: the error object/dictionary
"""
rtn_obj = copy.deepcopy(obj)
try:
del rtn_obj["__error_keys__"]
except KeyError:
pass
for key in obj.get('__error_keys__', []):
rtn_obj[key] = rtn_obj[key]['value']
return rtn_obj | python | def strip_errors(obj):
"""
Reads through and error object and replaces the error dict with the
value
args:
obj: the error object/dictionary
"""
rtn_obj = copy.deepcopy(obj)
try:
del rtn_obj["__error_keys__"]
except KeyError:
pass
for key in obj.get('__error_keys__', []):
rtn_obj[key] = rtn_obj[key]['value']
return rtn_obj | [
"def",
"strip_errors",
"(",
"obj",
")",
":",
"rtn_obj",
"=",
"copy",
".",
"deepcopy",
"(",
"obj",
")",
"try",
":",
"del",
"rtn_obj",
"[",
"\"__error_keys__\"",
"]",
"except",
"KeyError",
":",
"pass",
"for",
"key",
"in",
"obj",
".",
"get",
"(",
"'__error_keys__'",
",",
"[",
"]",
")",
":",
"rtn_obj",
"[",
"key",
"]",
"=",
"rtn_obj",
"[",
"key",
"]",
"[",
"'value'",
"]",
"return",
"rtn_obj"
] | Reads through and error object and replaces the error dict with the
value
args:
obj: the error object/dictionary | [
"Reads",
"through",
"and",
"error",
"object",
"and",
"replaces",
"the",
"error",
"dict",
"with",
"the",
"value"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/configuration/rdfwconfig.py#L1439-L1454 |
248,880 | gabrielfalcao/dominic | dominic/xpath/yappsrt.py | Parser._peek | def _peek(self, *types):
"""Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow"""
tok = self._scanner.token(self._pos, types)
return tok[2] | python | def _peek(self, *types):
"""Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow"""
tok = self._scanner.token(self._pos, types)
return tok[2] | [
"def",
"_peek",
"(",
"self",
",",
"*",
"types",
")",
":",
"tok",
"=",
"self",
".",
"_scanner",
".",
"token",
"(",
"self",
".",
"_pos",
",",
"types",
")",
"return",
"tok",
"[",
"2",
"]"
] | Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow | [
"Returns",
"the",
"token",
"type",
"for",
"lookahead",
";",
"if",
"there",
"are",
"any",
"args",
"then",
"the",
"list",
"of",
"args",
"is",
"the",
"set",
"of",
"token",
"types",
"to",
"allow"
] | a42f418fc288f3b70cb95847b405eaf7b83bb3a0 | https://github.com/gabrielfalcao/dominic/blob/a42f418fc288f3b70cb95847b405eaf7b83bb3a0/dominic/xpath/yappsrt.py#L109-L113 |
248,881 | uw-it-aca/uw-restclients-hfs | uw_hfs/util.py | last_midnight | def last_midnight():
"""
return a datetime of last mid-night
"""
now = datetime.now()
return datetime(now.year, now.month, now.day) | python | def last_midnight():
"""
return a datetime of last mid-night
"""
now = datetime.now()
return datetime(now.year, now.month, now.day) | [
"def",
"last_midnight",
"(",
")",
":",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"return",
"datetime",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"now",
".",
"day",
")"
] | return a datetime of last mid-night | [
"return",
"a",
"datetime",
"of",
"last",
"mid",
"-",
"night"
] | 685c3b16280d9e8b11b0d295c8852fa876f55ad0 | https://github.com/uw-it-aca/uw-restclients-hfs/blob/685c3b16280d9e8b11b0d295c8852fa876f55ad0/uw_hfs/util.py#L116-L121 |
248,882 | PSU-OIT-ARC/django-cloak | cloak/management/commands/login.py | Command.handle | def handle(self, *args, **options):
"""
With no arguments, find the first user in the system with the
is_superuser or is_staff flag set to true, or just the first user in
the system period.
With a single argument, look for the user with that value as the
USERNAME_FIELD value.
When a user is found, print out a URL slug you can paste into your
browser to login as the user.
"""
user_model = get_user_model()
if len(args) == 0:
# find the first superuser, or staff member or user
filters = [{"is_superuser": True}, {"is_staff": True}, {}]
user = None
for f in filters:
try:
user = user_model._default_manager.filter(**f).order_by("pk").first()
if user:
break
except FieldError as e:
pass
if user is None:
raise CommandError("No users found!")
elif len(args) == 1:
# find the user with the USERNAME_FIELD equal to the command line
# argument
try:
user = user_model._default_manager.get_by_natural_key(args[0])
except user_model.DoesNotExist as e:
raise CommandError("The user does not exist")
else:
raise CommandError("You passed me too many arguments")
signer = TimestampSigner()
signature = signer.sign(str(user.pk))
self.stdout.write(reverse(login, args=(signature,))) | python | def handle(self, *args, **options):
"""
With no arguments, find the first user in the system with the
is_superuser or is_staff flag set to true, or just the first user in
the system period.
With a single argument, look for the user with that value as the
USERNAME_FIELD value.
When a user is found, print out a URL slug you can paste into your
browser to login as the user.
"""
user_model = get_user_model()
if len(args) == 0:
# find the first superuser, or staff member or user
filters = [{"is_superuser": True}, {"is_staff": True}, {}]
user = None
for f in filters:
try:
user = user_model._default_manager.filter(**f).order_by("pk").first()
if user:
break
except FieldError as e:
pass
if user is None:
raise CommandError("No users found!")
elif len(args) == 1:
# find the user with the USERNAME_FIELD equal to the command line
# argument
try:
user = user_model._default_manager.get_by_natural_key(args[0])
except user_model.DoesNotExist as e:
raise CommandError("The user does not exist")
else:
raise CommandError("You passed me too many arguments")
signer = TimestampSigner()
signature = signer.sign(str(user.pk))
self.stdout.write(reverse(login, args=(signature,))) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"user_model",
"=",
"get_user_model",
"(",
")",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"# find the first superuser, or staff member or user",
"filters",
"=",
"[",
"{",
"\"is_superuser\"",
":",
"True",
"}",
",",
"{",
"\"is_staff\"",
":",
"True",
"}",
",",
"{",
"}",
"]",
"user",
"=",
"None",
"for",
"f",
"in",
"filters",
":",
"try",
":",
"user",
"=",
"user_model",
".",
"_default_manager",
".",
"filter",
"(",
"*",
"*",
"f",
")",
".",
"order_by",
"(",
"\"pk\"",
")",
".",
"first",
"(",
")",
"if",
"user",
":",
"break",
"except",
"FieldError",
"as",
"e",
":",
"pass",
"if",
"user",
"is",
"None",
":",
"raise",
"CommandError",
"(",
"\"No users found!\"",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"1",
":",
"# find the user with the USERNAME_FIELD equal to the command line",
"# argument",
"try",
":",
"user",
"=",
"user_model",
".",
"_default_manager",
".",
"get_by_natural_key",
"(",
"args",
"[",
"0",
"]",
")",
"except",
"user_model",
".",
"DoesNotExist",
"as",
"e",
":",
"raise",
"CommandError",
"(",
"\"The user does not exist\"",
")",
"else",
":",
"raise",
"CommandError",
"(",
"\"You passed me too many arguments\"",
")",
"signer",
"=",
"TimestampSigner",
"(",
")",
"signature",
"=",
"signer",
".",
"sign",
"(",
"str",
"(",
"user",
".",
"pk",
")",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"reverse",
"(",
"login",
",",
"args",
"=",
"(",
"signature",
",",
")",
")",
")"
] | With no arguments, find the first user in the system with the
is_superuser or is_staff flag set to true, or just the first user in
the system period.
With a single argument, look for the user with that value as the
USERNAME_FIELD value.
When a user is found, print out a URL slug you can paste into your
browser to login as the user. | [
"With",
"no",
"arguments",
"find",
"the",
"first",
"user",
"in",
"the",
"system",
"with",
"the",
"is_superuser",
"or",
"is_staff",
"flag",
"set",
"to",
"true",
"or",
"just",
"the",
"first",
"user",
"in",
"the",
"system",
"period",
"."
] | 3f09711837f4fe7b1813692daa064e536135ffa3 | https://github.com/PSU-OIT-ARC/django-cloak/blob/3f09711837f4fe7b1813692daa064e536135ffa3/cloak/management/commands/login.py#L13-L55 |
248,883 | marhag87/pyyamlconfig | pyyamlconfig/pyyamlconfig.py | load_config | def load_config(configfile):
"""
Return a dict with configuration from the supplied yaml file
"""
try:
with open(configfile, 'r') as ymlfile:
try:
config = yaml.load(ymlfile)
return config
except yaml.parser.ParserError:
raise PyYAMLConfigError(
'Could not parse config file: {}'.format(configfile),
)
except IOError:
raise PyYAMLConfigError(
'Could not open config file: {}'.format(configfile),
) | python | def load_config(configfile):
"""
Return a dict with configuration from the supplied yaml file
"""
try:
with open(configfile, 'r') as ymlfile:
try:
config = yaml.load(ymlfile)
return config
except yaml.parser.ParserError:
raise PyYAMLConfigError(
'Could not parse config file: {}'.format(configfile),
)
except IOError:
raise PyYAMLConfigError(
'Could not open config file: {}'.format(configfile),
) | [
"def",
"load_config",
"(",
"configfile",
")",
":",
"try",
":",
"with",
"open",
"(",
"configfile",
",",
"'r'",
")",
"as",
"ymlfile",
":",
"try",
":",
"config",
"=",
"yaml",
".",
"load",
"(",
"ymlfile",
")",
"return",
"config",
"except",
"yaml",
".",
"parser",
".",
"ParserError",
":",
"raise",
"PyYAMLConfigError",
"(",
"'Could not parse config file: {}'",
".",
"format",
"(",
"configfile",
")",
",",
")",
"except",
"IOError",
":",
"raise",
"PyYAMLConfigError",
"(",
"'Could not open config file: {}'",
".",
"format",
"(",
"configfile",
")",
",",
")"
] | Return a dict with configuration from the supplied yaml file | [
"Return",
"a",
"dict",
"with",
"configuration",
"from",
"the",
"supplied",
"yaml",
"file"
] | 4476eb1aadc14bda2ee4af76c996551df4363936 | https://github.com/marhag87/pyyamlconfig/blob/4476eb1aadc14bda2ee4af76c996551df4363936/pyyamlconfig/pyyamlconfig.py#L16-L32 |
248,884 | marhag87/pyyamlconfig | pyyamlconfig/pyyamlconfig.py | write_config | def write_config(configfile, content):
"""
Write dict to a file in yaml format
"""
with open(configfile, 'w+') as ymlfile:
yaml.dump(
content,
ymlfile,
default_flow_style=False,
) | python | def write_config(configfile, content):
"""
Write dict to a file in yaml format
"""
with open(configfile, 'w+') as ymlfile:
yaml.dump(
content,
ymlfile,
default_flow_style=False,
) | [
"def",
"write_config",
"(",
"configfile",
",",
"content",
")",
":",
"with",
"open",
"(",
"configfile",
",",
"'w+'",
")",
"as",
"ymlfile",
":",
"yaml",
".",
"dump",
"(",
"content",
",",
"ymlfile",
",",
"default_flow_style",
"=",
"False",
",",
")"
] | Write dict to a file in yaml format | [
"Write",
"dict",
"to",
"a",
"file",
"in",
"yaml",
"format"
] | 4476eb1aadc14bda2ee4af76c996551df4363936 | https://github.com/marhag87/pyyamlconfig/blob/4476eb1aadc14bda2ee4af76c996551df4363936/pyyamlconfig/pyyamlconfig.py#L35-L44 |
248,885 | storborg/replaylib | replaylib/__init__.py | start_record | def start_record():
"""
Install an httplib wrapper that records but does not modify calls.
"""
global record, playback, current
if record:
raise StateError("Already recording.")
if playback:
raise StateError("Currently playing back.")
record = True
current = ReplayData()
install(RecordingHTTPConnection, RecordingHTTPSConnection) | python | def start_record():
"""
Install an httplib wrapper that records but does not modify calls.
"""
global record, playback, current
if record:
raise StateError("Already recording.")
if playback:
raise StateError("Currently playing back.")
record = True
current = ReplayData()
install(RecordingHTTPConnection, RecordingHTTPSConnection) | [
"def",
"start_record",
"(",
")",
":",
"global",
"record",
",",
"playback",
",",
"current",
"if",
"record",
":",
"raise",
"StateError",
"(",
"\"Already recording.\"",
")",
"if",
"playback",
":",
"raise",
"StateError",
"(",
"\"Currently playing back.\"",
")",
"record",
"=",
"True",
"current",
"=",
"ReplayData",
"(",
")",
"install",
"(",
"RecordingHTTPConnection",
",",
"RecordingHTTPSConnection",
")"
] | Install an httplib wrapper that records but does not modify calls. | [
"Install",
"an",
"httplib",
"wrapper",
"that",
"records",
"but",
"does",
"not",
"modify",
"calls",
"."
] | 16bc3752bb992e3fb364fce9bd7c3f95e887a42d | https://github.com/storborg/replaylib/blob/16bc3752bb992e3fb364fce9bd7c3f95e887a42d/replaylib/__init__.py#L43-L54 |
248,886 | zyga/call | examples/example1.py | check_types | def check_types(func):
"""
Check if annotated function arguments are of the correct type
"""
call = PythonCall(func)
@wraps(func)
def decorator(*args, **kwargs):
parameters = call.bind(args, kwargs)
for arg_name, expected_type in func.__annotations__.items():
if not isinstance(parameters[arg_name], expected_type):
raise TypeError("{} must be a {}".format(
arg_name, expected_type))
return call.apply(args, kwargs)
return decorator | python | def check_types(func):
"""
Check if annotated function arguments are of the correct type
"""
call = PythonCall(func)
@wraps(func)
def decorator(*args, **kwargs):
parameters = call.bind(args, kwargs)
for arg_name, expected_type in func.__annotations__.items():
if not isinstance(parameters[arg_name], expected_type):
raise TypeError("{} must be a {}".format(
arg_name, expected_type))
return call.apply(args, kwargs)
return decorator | [
"def",
"check_types",
"(",
"func",
")",
":",
"call",
"=",
"PythonCall",
"(",
"func",
")",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"parameters",
"=",
"call",
".",
"bind",
"(",
"args",
",",
"kwargs",
")",
"for",
"arg_name",
",",
"expected_type",
"in",
"func",
".",
"__annotations__",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"parameters",
"[",
"arg_name",
"]",
",",
"expected_type",
")",
":",
"raise",
"TypeError",
"(",
"\"{} must be a {}\"",
".",
"format",
"(",
"arg_name",
",",
"expected_type",
")",
")",
"return",
"call",
".",
"apply",
"(",
"args",
",",
"kwargs",
")",
"return",
"decorator"
] | Check if annotated function arguments are of the correct type | [
"Check",
"if",
"annotated",
"function",
"arguments",
"are",
"of",
"the",
"correct",
"type"
] | dcef9a5aac7f9085bd4829dd6bcedc5fc2945d87 | https://github.com/zyga/call/blob/dcef9a5aac7f9085bd4829dd6bcedc5fc2945d87/examples/example1.py#L20-L34 |
248,887 | devricks/soft_drf | soft_drf/api/serializers/base.py | AbsoluteUriMixin.build_absolute_uri | def build_absolute_uri(self, uri):
"""
Return a fully qualified absolute url for the given uri.
"""
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) | python | def build_absolute_uri(self, uri):
"""
Return a fully qualified absolute url for the given uri.
"""
request = self.context.get('request', None)
return (
request.build_absolute_uri(uri) if request is not None else uri
) | [
"def",
"build_absolute_uri",
"(",
"self",
",",
"uri",
")",
":",
"request",
"=",
"self",
".",
"context",
".",
"get",
"(",
"'request'",
",",
"None",
")",
"return",
"(",
"request",
".",
"build_absolute_uri",
"(",
"uri",
")",
"if",
"request",
"is",
"not",
"None",
"else",
"uri",
")"
] | Return a fully qualified absolute url for the given uri. | [
"Return",
"a",
"fully",
"qualified",
"absolute",
"url",
"for",
"the",
"given",
"uri",
"."
] | 1869b13f9341bfcebd931059e93de2bc38570da3 | https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/serializers/base.py#L11-L19 |
248,888 | devricks/soft_drf | soft_drf/api/serializers/base.py | ModelSerializer.get_resource_uri | def get_resource_uri(self, obj):
"""
Return the uri of the given object.
"""
url = 'api:%s:%s-detail' % (
self.api_version,
getattr(
self, 'resource_view_name',
self.Meta.model._meta.model_name
)
)
return reverse(url, request=self.context.get('request', None), kwargs={
self.lookup_field: getattr(obj, self.lookup_field)
}) | python | def get_resource_uri(self, obj):
"""
Return the uri of the given object.
"""
url = 'api:%s:%s-detail' % (
self.api_version,
getattr(
self, 'resource_view_name',
self.Meta.model._meta.model_name
)
)
return reverse(url, request=self.context.get('request', None), kwargs={
self.lookup_field: getattr(obj, self.lookup_field)
}) | [
"def",
"get_resource_uri",
"(",
"self",
",",
"obj",
")",
":",
"url",
"=",
"'api:%s:%s-detail'",
"%",
"(",
"self",
".",
"api_version",
",",
"getattr",
"(",
"self",
",",
"'resource_view_name'",
",",
"self",
".",
"Meta",
".",
"model",
".",
"_meta",
".",
"model_name",
")",
")",
"return",
"reverse",
"(",
"url",
",",
"request",
"=",
"self",
".",
"context",
".",
"get",
"(",
"'request'",
",",
"None",
")",
",",
"kwargs",
"=",
"{",
"self",
".",
"lookup_field",
":",
"getattr",
"(",
"obj",
",",
"self",
".",
"lookup_field",
")",
"}",
")"
] | Return the uri of the given object. | [
"Return",
"the",
"uri",
"of",
"the",
"given",
"object",
"."
] | 1869b13f9341bfcebd931059e93de2bc38570da3 | https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/api/serializers/base.py#L78-L92 |
248,889 | asphalt-framework/asphalt-templating | asphalt/templating/api.py | TemplateRendererProxy.render | def render(self, template: str, **vars) -> str:
"""
Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render(template, **vars) | python | def render(self, template: str, **vars) -> str:
"""
Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render(template, **vars) | [
"def",
"render",
"(",
"self",
",",
"template",
":",
"str",
",",
"*",
"*",
"vars",
")",
"->",
"str",
":",
"vars",
".",
"setdefault",
"(",
"'ctx'",
",",
"self",
".",
"_ctx",
")",
"return",
"self",
".",
"_renderer",
".",
"render",
"(",
"template",
",",
"*",
"*",
"vars",
")"
] | Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results | [
"Render",
"the",
"named",
"template",
"."
] | e5f836290820aa295b048b17b96d3896d5f1eeac | https://github.com/asphalt-framework/asphalt-templating/blob/e5f836290820aa295b048b17b96d3896d5f1eeac/asphalt/templating/api.py#L53-L65 |
248,890 | asphalt-framework/asphalt-templating | asphalt/templating/api.py | TemplateRendererProxy.render_string | def render_string(self, source: str, **vars) -> str:
"""
Render the template contained in the given string.
The current context will be available to the template as the ``ctx`` variable.
:param source: content of the template to render
:param vars: extra variables made available to the template
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render_string(source, **vars) | python | def render_string(self, source: str, **vars) -> str:
"""
Render the template contained in the given string.
The current context will be available to the template as the ``ctx`` variable.
:param source: content of the template to render
:param vars: extra variables made available to the template
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render_string(source, **vars) | [
"def",
"render_string",
"(",
"self",
",",
"source",
":",
"str",
",",
"*",
"*",
"vars",
")",
"->",
"str",
":",
"vars",
".",
"setdefault",
"(",
"'ctx'",
",",
"self",
".",
"_ctx",
")",
"return",
"self",
".",
"_renderer",
".",
"render_string",
"(",
"source",
",",
"*",
"*",
"vars",
")"
] | Render the template contained in the given string.
The current context will be available to the template as the ``ctx`` variable.
:param source: content of the template to render
:param vars: extra variables made available to the template
:return: the rendered results | [
"Render",
"the",
"template",
"contained",
"in",
"the",
"given",
"string",
"."
] | e5f836290820aa295b048b17b96d3896d5f1eeac | https://github.com/asphalt-framework/asphalt-templating/blob/e5f836290820aa295b048b17b96d3896d5f1eeac/asphalt/templating/api.py#L67-L79 |
248,891 | jmgilman/Neolib | neolib/pyamf/remoting/__init__.py | _write_header | def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@param name: Name of the header.
@param header: Header value.
@param required: Whether understanding this header is required (?).
@param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that
will receive the encoded header.
@param encoder: An encoder capable of encoding C{AMF0}.
@param strict: Use strict encoding policy. Default is C{False}. Will write
the correct header length after writing the header.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos) | python | def _write_header(name, header, required, stream, encoder, strict=False):
"""
Write AMF message header.
@param name: Name of the header.
@param header: Header value.
@param required: Whether understanding this header is required (?).
@param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that
will receive the encoded header.
@param encoder: An encoder capable of encoding C{AMF0}.
@param strict: Use strict encoding policy. Default is C{False}. Will write
the correct header length after writing the header.
"""
stream.write_ushort(len(name))
stream.write_utf8_string(name)
stream.write_uchar(required)
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
encoder.writeElement(header)
new_pos = stream.tell()
if strict:
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos) | [
"def",
"_write_header",
"(",
"name",
",",
"header",
",",
"required",
",",
"stream",
",",
"encoder",
",",
"strict",
"=",
"False",
")",
":",
"stream",
".",
"write_ushort",
"(",
"len",
"(",
"name",
")",
")",
"stream",
".",
"write_utf8_string",
"(",
"name",
")",
"stream",
".",
"write_uchar",
"(",
"required",
")",
"write_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"stream",
".",
"write_ulong",
"(",
"0",
")",
"old_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"encoder",
".",
"writeElement",
"(",
"header",
")",
"new_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"if",
"strict",
":",
"stream",
".",
"seek",
"(",
"write_pos",
")",
"stream",
".",
"write_ulong",
"(",
"new_pos",
"-",
"old_pos",
")",
"stream",
".",
"seek",
"(",
"new_pos",
")"
] | Write AMF message header.
@param name: Name of the header.
@param header: Header value.
@param required: Whether understanding this header is required (?).
@param stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>} that
will receive the encoded header.
@param encoder: An encoder capable of encoding C{AMF0}.
@param strict: Use strict encoding policy. Default is C{False}. Will write
the correct header length after writing the header. | [
"Write",
"AMF",
"message",
"header",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/__init__.py#L373-L400 |
248,892 | jmgilman/Neolib | neolib/pyamf/remoting/__init__.py | _read_body | def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError(
"Unexpected AMF3 type with incorrect message type")
return decoder.readElement()
if type_byte != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status) | python | def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError(
"Unexpected AMF3 type with incorrect message type")
return decoder.readElement()
if type_byte != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status) | [
"def",
"_read_body",
"(",
"stream",
",",
"decoder",
",",
"strict",
"=",
"False",
",",
"logger",
"=",
"None",
")",
":",
"def",
"_read_args",
"(",
")",
":",
"# we have to go through this insanity because it seems that amf0",
"# does not keep the array of args in the object references lookup",
"type_byte",
"=",
"stream",
".",
"peek",
"(",
"1",
")",
"if",
"type_byte",
"==",
"'\\x11'",
":",
"if",
"not",
"decoder",
".",
"use_amf3",
":",
"raise",
"pyamf",
".",
"DecodeError",
"(",
"\"Unexpected AMF3 type with incorrect message type\"",
")",
"return",
"decoder",
".",
"readElement",
"(",
")",
"if",
"type_byte",
"!=",
"'\\x0a'",
":",
"raise",
"pyamf",
".",
"DecodeError",
"(",
"\"Array type required for request body\"",
")",
"stream",
".",
"read",
"(",
"1",
")",
"x",
"=",
"stream",
".",
"read_ulong",
"(",
")",
"return",
"[",
"decoder",
".",
"readElement",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"x",
")",
"]",
"target",
"=",
"stream",
".",
"read_utf8_string",
"(",
"stream",
".",
"read_ushort",
"(",
")",
")",
"response",
"=",
"stream",
".",
"read_utf8_string",
"(",
"stream",
".",
"read_ushort",
"(",
")",
")",
"status",
"=",
"STATUS_OK",
"is_request",
"=",
"True",
"for",
"code",
",",
"s",
"in",
"STATUS_CODES",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"target",
".",
"endswith",
"(",
"s",
")",
":",
"continue",
"is_request",
"=",
"False",
"status",
"=",
"code",
"target",
"=",
"target",
"[",
":",
"0",
"-",
"len",
"(",
"s",
")",
"]",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"'Remoting target: %r'",
"%",
"(",
"target",
",",
")",
")",
"data_len",
"=",
"stream",
".",
"read_ulong",
"(",
")",
"pos",
"=",
"stream",
".",
"tell",
"(",
")",
"if",
"is_request",
":",
"data",
"=",
"_read_args",
"(",
")",
"else",
":",
"data",
"=",
"decoder",
".",
"readElement",
"(",
")",
"if",
"strict",
"and",
"pos",
"+",
"data_len",
"!=",
"stream",
".",
"tell",
"(",
")",
":",
"raise",
"pyamf",
".",
"DecodeError",
"(",
"\"Data read from stream does not match body \"",
"\"length (%d != %d)\"",
"%",
"(",
"pos",
"+",
"data_len",
",",
"stream",
".",
"tell",
"(",
")",
",",
")",
")",
"if",
"is_request",
":",
"return",
"response",
",",
"Request",
"(",
"target",
",",
"body",
"=",
"data",
")",
"if",
"status",
"==",
"STATUS_ERROR",
"and",
"isinstance",
"(",
"data",
",",
"pyamf",
".",
"ASObject",
")",
":",
"data",
"=",
"get_fault",
"(",
"data",
")",
"return",
"target",
",",
"Response",
"(",
"data",
",",
"status",
")"
] | Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response} | [
"Read",
"an",
"AMF",
"message",
"body",
"from",
"the",
"stream",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/__init__.py#L403-L471 |
248,893 | jmgilman/Neolib | neolib/pyamf/remoting/__init__.py | _write_body | def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@param message: The AMF L{Message}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param encoder: An AMF0 encoder.
@param strict: Use strict encoding policy. Default is `False`.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos) | python | def _write_body(name, message, stream, encoder, strict=False):
"""
Write AMF message body.
@param name: The name of the request.
@param message: The AMF L{Message}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param encoder: An AMF0 encoder.
@param strict: Use strict encoding policy. Default is `False`.
"""
def _encode_body(message):
if isinstance(message, Response):
encoder.writeElement(message.body)
return
stream.write('\x0a')
stream.write_ulong(len(message.body))
for x in message.body:
encoder.writeElement(x)
if not isinstance(message, (Request, Response)):
raise TypeError("Unknown message type")
target = None
if isinstance(message, Request):
target = unicode(message.target)
else:
target = u"%s%s" % (name, _get_status(message.status))
target = target.encode('utf8')
stream.write_ushort(len(target))
stream.write_utf8_string(target)
response = 'null'
if isinstance(message, Request):
response = name
stream.write_ushort(len(response))
stream.write_utf8_string(response)
if not strict:
stream.write_ulong(0)
_encode_body(message)
return
write_pos = stream.tell()
stream.write_ulong(0)
old_pos = stream.tell()
_encode_body(message)
new_pos = stream.tell()
stream.seek(write_pos)
stream.write_ulong(new_pos - old_pos)
stream.seek(new_pos) | [
"def",
"_write_body",
"(",
"name",
",",
"message",
",",
"stream",
",",
"encoder",
",",
"strict",
"=",
"False",
")",
":",
"def",
"_encode_body",
"(",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"Response",
")",
":",
"encoder",
".",
"writeElement",
"(",
"message",
".",
"body",
")",
"return",
"stream",
".",
"write",
"(",
"'\\x0a'",
")",
"stream",
".",
"write_ulong",
"(",
"len",
"(",
"message",
".",
"body",
")",
")",
"for",
"x",
"in",
"message",
".",
"body",
":",
"encoder",
".",
"writeElement",
"(",
"x",
")",
"if",
"not",
"isinstance",
"(",
"message",
",",
"(",
"Request",
",",
"Response",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Unknown message type\"",
")",
"target",
"=",
"None",
"if",
"isinstance",
"(",
"message",
",",
"Request",
")",
":",
"target",
"=",
"unicode",
"(",
"message",
".",
"target",
")",
"else",
":",
"target",
"=",
"u\"%s%s\"",
"%",
"(",
"name",
",",
"_get_status",
"(",
"message",
".",
"status",
")",
")",
"target",
"=",
"target",
".",
"encode",
"(",
"'utf8'",
")",
"stream",
".",
"write_ushort",
"(",
"len",
"(",
"target",
")",
")",
"stream",
".",
"write_utf8_string",
"(",
"target",
")",
"response",
"=",
"'null'",
"if",
"isinstance",
"(",
"message",
",",
"Request",
")",
":",
"response",
"=",
"name",
"stream",
".",
"write_ushort",
"(",
"len",
"(",
"response",
")",
")",
"stream",
".",
"write_utf8_string",
"(",
"response",
")",
"if",
"not",
"strict",
":",
"stream",
".",
"write_ulong",
"(",
"0",
")",
"_encode_body",
"(",
"message",
")",
"return",
"write_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"stream",
".",
"write_ulong",
"(",
"0",
")",
"old_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"_encode_body",
"(",
"message",
")",
"new_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"stream",
".",
"seek",
"(",
"write_pos",
")",
"stream",
".",
"write_ulong",
"(",
"new_pos",
"-",
"old_pos",
")",
"stream",
".",
"seek",
"(",
"new_pos",
")"
] | Write AMF message body.
@param name: The name of the request.
@param message: The AMF L{Message}
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param encoder: An AMF0 encoder.
@param strict: Use strict encoding policy. Default is `False`. | [
"Write",
"AMF",
"message",
"body",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/__init__.py#L474-L533 |
248,894 | jmgilman/Neolib | neolib/pyamf/remoting/__init__.py | decode | def decode(stream, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param strict: Enforce strict decoding. Default is `False`.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@return: Message L{envelope<Envelope>}.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_ushort()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
context = decoder.context
decoder.use_amf3 = msg.amfVersion == pyamf.AMF3
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in xrange(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger:
logger.debug('remoting.decode end')
return msg | python | def decode(stream, strict=False, logger=None, timezone_offset=None):
"""
Decodes the incoming stream as a remoting message.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param strict: Enforce strict decoding. Default is `False`.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@return: Message L{envelope<Envelope>}.
"""
if not isinstance(stream, util.BufferedByteStream):
stream = util.BufferedByteStream(stream)
if logger:
logger.debug('remoting.decode start')
msg = Envelope()
msg.amfVersion = stream.read_ushort()
# see http://osflash.org/documentation/amf/envelopes/remoting#preamble
# why we are doing this...
if msg.amfVersion > 0x09:
raise pyamf.DecodeError("Malformed stream (amfVersion=%d)" %
msg.amfVersion)
decoder = pyamf.get_decoder(pyamf.AMF0, stream, strict=strict,
timezone_offset=timezone_offset)
context = decoder.context
decoder.use_amf3 = msg.amfVersion == pyamf.AMF3
header_count = stream.read_ushort()
for i in xrange(header_count):
name, required, data = _read_header(stream, decoder, strict)
msg.headers[name] = data
if required:
msg.headers.set_required(name)
body_count = stream.read_short()
for i in xrange(body_count):
context.clear()
target, payload = _read_body(stream, decoder, strict, logger)
msg[target] = payload
if strict and stream.remaining() > 0:
raise RuntimeError("Unable to fully consume the buffer")
if logger:
logger.debug('remoting.decode end')
return msg | [
"def",
"decode",
"(",
"stream",
",",
"strict",
"=",
"False",
",",
"logger",
"=",
"None",
",",
"timezone_offset",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"stream",
",",
"util",
".",
"BufferedByteStream",
")",
":",
"stream",
"=",
"util",
".",
"BufferedByteStream",
"(",
"stream",
")",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"'remoting.decode start'",
")",
"msg",
"=",
"Envelope",
"(",
")",
"msg",
".",
"amfVersion",
"=",
"stream",
".",
"read_ushort",
"(",
")",
"# see http://osflash.org/documentation/amf/envelopes/remoting#preamble",
"# why we are doing this...",
"if",
"msg",
".",
"amfVersion",
">",
"0x09",
":",
"raise",
"pyamf",
".",
"DecodeError",
"(",
"\"Malformed stream (amfVersion=%d)\"",
"%",
"msg",
".",
"amfVersion",
")",
"decoder",
"=",
"pyamf",
".",
"get_decoder",
"(",
"pyamf",
".",
"AMF0",
",",
"stream",
",",
"strict",
"=",
"strict",
",",
"timezone_offset",
"=",
"timezone_offset",
")",
"context",
"=",
"decoder",
".",
"context",
"decoder",
".",
"use_amf3",
"=",
"msg",
".",
"amfVersion",
"==",
"pyamf",
".",
"AMF3",
"header_count",
"=",
"stream",
".",
"read_ushort",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"header_count",
")",
":",
"name",
",",
"required",
",",
"data",
"=",
"_read_header",
"(",
"stream",
",",
"decoder",
",",
"strict",
")",
"msg",
".",
"headers",
"[",
"name",
"]",
"=",
"data",
"if",
"required",
":",
"msg",
".",
"headers",
".",
"set_required",
"(",
"name",
")",
"body_count",
"=",
"stream",
".",
"read_short",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"body_count",
")",
":",
"context",
".",
"clear",
"(",
")",
"target",
",",
"payload",
"=",
"_read_body",
"(",
"stream",
",",
"decoder",
",",
"strict",
",",
"logger",
")",
"msg",
"[",
"target",
"]",
"=",
"payload",
"if",
"strict",
"and",
"stream",
".",
"remaining",
"(",
")",
">",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"Unable to fully consume the buffer\"",
")",
"if",
"logger",
":",
"logger",
".",
"debug",
"(",
"'remoting.decode end'",
")",
"return",
"msg"
] | Decodes the incoming stream as a remoting message.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param strict: Enforce strict decoding. Default is `False`.
@param logger: Used to log interesting events whilst decoding a remoting
message.
@type logger: U{logging.Logger<http://
docs.python.org/library/logging.html#loggers>}
@param timezone_offset: The difference between the current timezone and
UTC. Date/times should always be handled in UTC to avoid confusion but
this is required for legacy systems.
@type timezone_offset: U{datetime.datetime.timedelta<http://
docs.python.org/library/datetime.html#datetime.timedelta}
@return: Message L{envelope<Envelope>}. | [
"Decodes",
"the",
"incoming",
"stream",
"as",
"a",
"remoting",
"message",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/__init__.py#L574-L635 |
248,895 | tomnor/channelpack | channelpack/pack.py | txtpack | def txtpack(fn, **kwargs):
"""Return a ChannelPack instance loaded with text data file fn.
Attempt to read out custom channel names from the file and call
instance.set_channel_names(). Then return the pack.
This is a lazy function to get a loaded instance, using the
cleverness provided by pulltxt module. No delimiter or rows-to-skip
and such need to be provided. However, if necessary, `**kwargs` can
be used to override clevered items to provide to numpys
loadtxt. usecols might be such an item for example. Also, the
cleverness is only clever if all data is numerical.
Note that the call signature is the same as numpys `loadtxt
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy-loadtxt>`_, which look like this::
np.loadtxt(fname, dtype=<type 'float'>, comments='#',
delimiter=None, converters=None, skiprows=0, usecols=None,
unpack=False, ndmin=0)
But, when using this function as a wrapper, the only meaningful
argument to override should be `usecols`.
"""
loadfunc = pulltxt.loadtxt_asdict
cp = ChannelPack(loadfunc)
cp.load(fn, **kwargs)
names = pulltxt.PP.channel_names(kwargs.get('usecols', None))
cp.set_channel_names(names)
cp._patpull = pulltxt.PP # Give a reference to the patternpull.
# cp.set_basefilemtime()
return cp | python | def txtpack(fn, **kwargs):
"""Return a ChannelPack instance loaded with text data file fn.
Attempt to read out custom channel names from the file and call
instance.set_channel_names(). Then return the pack.
This is a lazy function to get a loaded instance, using the
cleverness provided by pulltxt module. No delimiter or rows-to-skip
and such need to be provided. However, if necessary, `**kwargs` can
be used to override clevered items to provide to numpys
loadtxt. usecols might be such an item for example. Also, the
cleverness is only clever if all data is numerical.
Note that the call signature is the same as numpys `loadtxt
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy-loadtxt>`_, which look like this::
np.loadtxt(fname, dtype=<type 'float'>, comments='#',
delimiter=None, converters=None, skiprows=0, usecols=None,
unpack=False, ndmin=0)
But, when using this function as a wrapper, the only meaningful
argument to override should be `usecols`.
"""
loadfunc = pulltxt.loadtxt_asdict
cp = ChannelPack(loadfunc)
cp.load(fn, **kwargs)
names = pulltxt.PP.channel_names(kwargs.get('usecols', None))
cp.set_channel_names(names)
cp._patpull = pulltxt.PP # Give a reference to the patternpull.
# cp.set_basefilemtime()
return cp | [
"def",
"txtpack",
"(",
"fn",
",",
"*",
"*",
"kwargs",
")",
":",
"loadfunc",
"=",
"pulltxt",
".",
"loadtxt_asdict",
"cp",
"=",
"ChannelPack",
"(",
"loadfunc",
")",
"cp",
".",
"load",
"(",
"fn",
",",
"*",
"*",
"kwargs",
")",
"names",
"=",
"pulltxt",
".",
"PP",
".",
"channel_names",
"(",
"kwargs",
".",
"get",
"(",
"'usecols'",
",",
"None",
")",
")",
"cp",
".",
"set_channel_names",
"(",
"names",
")",
"cp",
".",
"_patpull",
"=",
"pulltxt",
".",
"PP",
"# Give a reference to the patternpull.",
"# cp.set_basefilemtime()",
"return",
"cp"
] | Return a ChannelPack instance loaded with text data file fn.
Attempt to read out custom channel names from the file and call
instance.set_channel_names(). Then return the pack.
This is a lazy function to get a loaded instance, using the
cleverness provided by pulltxt module. No delimiter or rows-to-skip
and such need to be provided. However, if necessary, `**kwargs` can
be used to override clevered items to provide to numpys
loadtxt. usecols might be such an item for example. Also, the
cleverness is only clever if all data is numerical.
Note that the call signature is the same as numpys `loadtxt
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy-loadtxt>`_, which look like this::
np.loadtxt(fname, dtype=<type 'float'>, comments='#',
delimiter=None, converters=None, skiprows=0, usecols=None,
unpack=False, ndmin=0)
But, when using this function as a wrapper, the only meaningful
argument to override should be `usecols`. | [
"Return",
"a",
"ChannelPack",
"instance",
"loaded",
"with",
"text",
"data",
"file",
"fn",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1206-L1237 |
248,896 | tomnor/channelpack | channelpack/pack.py | dbfpack | def dbfpack(fn, usecols=None):
"""Return a ChannelPack instance loaded with dbf data file fn.
This is a lazy function to get a loaded instance, using pulldbf
module."""
loadfunc = pulldbf.dbf_asdict
cp = ChannelPack(loadfunc)
cp.load(fn, usecols)
names = pulldbf.channel_names(fn, usecols)
cp.set_channel_names(names)
# cp.set_basefilemtime()
return cp | python | def dbfpack(fn, usecols=None):
"""Return a ChannelPack instance loaded with dbf data file fn.
This is a lazy function to get a loaded instance, using pulldbf
module."""
loadfunc = pulldbf.dbf_asdict
cp = ChannelPack(loadfunc)
cp.load(fn, usecols)
names = pulldbf.channel_names(fn, usecols)
cp.set_channel_names(names)
# cp.set_basefilemtime()
return cp | [
"def",
"dbfpack",
"(",
"fn",
",",
"usecols",
"=",
"None",
")",
":",
"loadfunc",
"=",
"pulldbf",
".",
"dbf_asdict",
"cp",
"=",
"ChannelPack",
"(",
"loadfunc",
")",
"cp",
".",
"load",
"(",
"fn",
",",
"usecols",
")",
"names",
"=",
"pulldbf",
".",
"channel_names",
"(",
"fn",
",",
"usecols",
")",
"cp",
".",
"set_channel_names",
"(",
"names",
")",
"# cp.set_basefilemtime()",
"return",
"cp"
] | Return a ChannelPack instance loaded with dbf data file fn.
This is a lazy function to get a loaded instance, using pulldbf
module. | [
"Return",
"a",
"ChannelPack",
"instance",
"loaded",
"with",
"dbf",
"data",
"file",
"fn",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L1240-L1252 |
248,897 | tomnor/channelpack | channelpack/pack.py | ChannelPack.load | def load(self, *args, **kwargs):
"""Load data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
Set the filename attribute.
.. note::
Updates the mask if not no_auto.
ChannelPack is assuming a need for loading data from disc. If
there is a desire to load some made-up data, a filename pointing
to some actual file is nevertheless required. Here is a
suggestion::
>>> import channelpack as cp
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> d = {2: np.arange(5), 5: np.arange(10, 15)}
>>> def lf(fn):
... return d
...
>>> pack = cp.ChannelPack(lf)
>>> pack.load(tf.name)
>>> pack.filename is not None
True
>>> pack.chnames_0
{2: 'ch2', 5: 'ch5'}
"""
D = self.loadfunc(*args, **kwargs)
if self.chnames is not None:
if set(D) - set(self.chnames):
raise ValueError('New data set have different keys')
self.D = D
self.keys = sorted(self.D.keys())
# If not all the same, there should have been an error already
self.rec_cnt = len(self.D[self.keys[0]])
fallnames = _fallback_names(self.keys)
self.chnames_0 = dict(zip(self.keys, fallnames))
self._set_filename(args[0])
self.set_basefilemtime()
self.args = args
self.kwargs = kwargs
if not self.no_auto:
# Called here if a reload is done on the current instance I guess.
self.make_mask() | python | def load(self, *args, **kwargs):
"""Load data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
Set the filename attribute.
.. note::
Updates the mask if not no_auto.
ChannelPack is assuming a need for loading data from disc. If
there is a desire to load some made-up data, a filename pointing
to some actual file is nevertheless required. Here is a
suggestion::
>>> import channelpack as cp
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> d = {2: np.arange(5), 5: np.arange(10, 15)}
>>> def lf(fn):
... return d
...
>>> pack = cp.ChannelPack(lf)
>>> pack.load(tf.name)
>>> pack.filename is not None
True
>>> pack.chnames_0
{2: 'ch2', 5: 'ch5'}
"""
D = self.loadfunc(*args, **kwargs)
if self.chnames is not None:
if set(D) - set(self.chnames):
raise ValueError('New data set have different keys')
self.D = D
self.keys = sorted(self.D.keys())
# If not all the same, there should have been an error already
self.rec_cnt = len(self.D[self.keys[0]])
fallnames = _fallback_names(self.keys)
self.chnames_0 = dict(zip(self.keys, fallnames))
self._set_filename(args[0])
self.set_basefilemtime()
self.args = args
self.kwargs = kwargs
if not self.no_auto:
# Called here if a reload is done on the current instance I guess.
self.make_mask() | [
"def",
"load",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"D",
"=",
"self",
".",
"loadfunc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"chnames",
"is",
"not",
"None",
":",
"if",
"set",
"(",
"D",
")",
"-",
"set",
"(",
"self",
".",
"chnames",
")",
":",
"raise",
"ValueError",
"(",
"'New data set have different keys'",
")",
"self",
".",
"D",
"=",
"D",
"self",
".",
"keys",
"=",
"sorted",
"(",
"self",
".",
"D",
".",
"keys",
"(",
")",
")",
"# If not all the same, there should have been an error already",
"self",
".",
"rec_cnt",
"=",
"len",
"(",
"self",
".",
"D",
"[",
"self",
".",
"keys",
"[",
"0",
"]",
"]",
")",
"fallnames",
"=",
"_fallback_names",
"(",
"self",
".",
"keys",
")",
"self",
".",
"chnames_0",
"=",
"dict",
"(",
"zip",
"(",
"self",
".",
"keys",
",",
"fallnames",
")",
")",
"self",
".",
"_set_filename",
"(",
"args",
"[",
"0",
"]",
")",
"self",
".",
"set_basefilemtime",
"(",
")",
"self",
".",
"args",
"=",
"args",
"self",
".",
"kwargs",
"=",
"kwargs",
"if",
"not",
"self",
".",
"no_auto",
":",
"# Called here if a reload is done on the current instance I guess.",
"self",
".",
"make_mask",
"(",
")"
] | Load data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
Set the filename attribute.
.. note::
Updates the mask if not no_auto.
ChannelPack is assuming a need for loading data from disc. If
there is a desire to load some made-up data, a filename pointing
to some actual file is nevertheless required. Here is a
suggestion::
>>> import channelpack as cp
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> d = {2: np.arange(5), 5: np.arange(10, 15)}
>>> def lf(fn):
... return d
...
>>> pack = cp.ChannelPack(lf)
>>> pack.load(tf.name)
>>> pack.filename is not None
True
>>> pack.chnames_0
{2: 'ch2', 5: 'ch5'} | [
"Load",
"data",
"using",
"loadfunc",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L220-L277 |
248,898 | tomnor/channelpack | channelpack/pack.py | ChannelPack.append_load | def append_load(self, *args, **kwargs):
"""Append data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
If self is not already a loaded instance, call load and return.
Make error if there is a mismatch of channels indexes or
channels count.
Append the data to selfs existing data. Set filename to the new
file.
Create new attribute - a dict with meta-data on all files loaded,
'metamulti.'
.. note::
Updates the mask if not no_auto.
"""
if not self.D:
self.load(*args, **kwargs)
return
newD = self.loadfunc(*args, **kwargs)
s1, s2 = set(self.D.keys()), set(newD.keys())
offenders = s1 ^ s2
if offenders:
mess = ('Those keys (respectively) were in one of the dicts ' +
'but not the other: {}.')
offs = ', '.join([str(n) for n in offenders])
raise KeyError(mess.format(offs))
# Append the data early to fail if fail before other actions.
for k, a in self.D.iteritems():
self.D[k] = np.append(a, newD.pop(k))
if not hasattr(self, 'metamulti'):
self.metamulti = dict(filenames=[], mtimestamps=[], mtimenames=[],
slices=[])
self.metamulti['filenames'].append(self.filename)
self.metamulti['mtimestamps'].append(self.mtimestamp)
self.metamulti['mtimenames'].append(self.mtimefs)
self.metamulti['slices'].append(slice(0, self.rec_cnt))
self.rec_cnt = len(self.D[self.keys[0]])
self._set_filename(args[0])
self.set_basefilemtime()
start = self.metamulti['slices'][-1].stop
stop = self.rec_cnt
self.metamulti['filenames'].append(self.filename)
self.metamulti['mtimestamps'].append(self.mtimestamp)
self.metamulti['mtimenames'].append(self.mtimefs)
self.metamulti['slices'].append(slice(start, stop))
if not self.no_auto:
self.make_mask() | python | def append_load(self, *args, **kwargs):
"""Append data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
If self is not already a loaded instance, call load and return.
Make error if there is a mismatch of channels indexes or
channels count.
Append the data to selfs existing data. Set filename to the new
file.
Create new attribute - a dict with meta-data on all files loaded,
'metamulti.'
.. note::
Updates the mask if not no_auto.
"""
if not self.D:
self.load(*args, **kwargs)
return
newD = self.loadfunc(*args, **kwargs)
s1, s2 = set(self.D.keys()), set(newD.keys())
offenders = s1 ^ s2
if offenders:
mess = ('Those keys (respectively) were in one of the dicts ' +
'but not the other: {}.')
offs = ', '.join([str(n) for n in offenders])
raise KeyError(mess.format(offs))
# Append the data early to fail if fail before other actions.
for k, a in self.D.iteritems():
self.D[k] = np.append(a, newD.pop(k))
if not hasattr(self, 'metamulti'):
self.metamulti = dict(filenames=[], mtimestamps=[], mtimenames=[],
slices=[])
self.metamulti['filenames'].append(self.filename)
self.metamulti['mtimestamps'].append(self.mtimestamp)
self.metamulti['mtimenames'].append(self.mtimefs)
self.metamulti['slices'].append(slice(0, self.rec_cnt))
self.rec_cnt = len(self.D[self.keys[0]])
self._set_filename(args[0])
self.set_basefilemtime()
start = self.metamulti['slices'][-1].stop
stop = self.rec_cnt
self.metamulti['filenames'].append(self.filename)
self.metamulti['mtimestamps'].append(self.mtimestamp)
self.metamulti['mtimenames'].append(self.mtimefs)
self.metamulti['slices'].append(slice(start, stop))
if not self.no_auto:
self.make_mask() | [
"def",
"append_load",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"D",
":",
"self",
".",
"load",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"newD",
"=",
"self",
".",
"loadfunc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"s1",
",",
"s2",
"=",
"set",
"(",
"self",
".",
"D",
".",
"keys",
"(",
")",
")",
",",
"set",
"(",
"newD",
".",
"keys",
"(",
")",
")",
"offenders",
"=",
"s1",
"^",
"s2",
"if",
"offenders",
":",
"mess",
"=",
"(",
"'Those keys (respectively) were in one of the dicts '",
"+",
"'but not the other: {}.'",
")",
"offs",
"=",
"', '",
".",
"join",
"(",
"[",
"str",
"(",
"n",
")",
"for",
"n",
"in",
"offenders",
"]",
")",
"raise",
"KeyError",
"(",
"mess",
".",
"format",
"(",
"offs",
")",
")",
"# Append the data early to fail if fail before other actions.",
"for",
"k",
",",
"a",
"in",
"self",
".",
"D",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"D",
"[",
"k",
"]",
"=",
"np",
".",
"append",
"(",
"a",
",",
"newD",
".",
"pop",
"(",
"k",
")",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'metamulti'",
")",
":",
"self",
".",
"metamulti",
"=",
"dict",
"(",
"filenames",
"=",
"[",
"]",
",",
"mtimestamps",
"=",
"[",
"]",
",",
"mtimenames",
"=",
"[",
"]",
",",
"slices",
"=",
"[",
"]",
")",
"self",
".",
"metamulti",
"[",
"'filenames'",
"]",
".",
"append",
"(",
"self",
".",
"filename",
")",
"self",
".",
"metamulti",
"[",
"'mtimestamps'",
"]",
".",
"append",
"(",
"self",
".",
"mtimestamp",
")",
"self",
".",
"metamulti",
"[",
"'mtimenames'",
"]",
".",
"append",
"(",
"self",
".",
"mtimefs",
")",
"self",
".",
"metamulti",
"[",
"'slices'",
"]",
".",
"append",
"(",
"slice",
"(",
"0",
",",
"self",
".",
"rec_cnt",
")",
")",
"self",
".",
"rec_cnt",
"=",
"len",
"(",
"self",
".",
"D",
"[",
"self",
".",
"keys",
"[",
"0",
"]",
"]",
")",
"self",
".",
"_set_filename",
"(",
"args",
"[",
"0",
"]",
")",
"self",
".",
"set_basefilemtime",
"(",
")",
"start",
"=",
"self",
".",
"metamulti",
"[",
"'slices'",
"]",
"[",
"-",
"1",
"]",
".",
"stop",
"stop",
"=",
"self",
".",
"rec_cnt",
"self",
".",
"metamulti",
"[",
"'filenames'",
"]",
".",
"append",
"(",
"self",
".",
"filename",
")",
"self",
".",
"metamulti",
"[",
"'mtimestamps'",
"]",
".",
"append",
"(",
"self",
".",
"mtimestamp",
")",
"self",
".",
"metamulti",
"[",
"'mtimenames'",
"]",
".",
"append",
"(",
"self",
".",
"mtimefs",
")",
"self",
".",
"metamulti",
"[",
"'slices'",
"]",
".",
"append",
"(",
"slice",
"(",
"start",
",",
"stop",
")",
")",
"if",
"not",
"self",
".",
"no_auto",
":",
"self",
".",
"make_mask",
"(",
")"
] | Append data using loadfunc.
args, kwargs:
forward to the loadfunc. args[0] must be the filename, so it
means that loadfunc must take the filename as it's first
argument.
If self is not already a loaded instance, call load and return.
Make error if there is a mismatch of channels indexes or
channels count.
Append the data to selfs existing data. Set filename to the new
file.
Create new attribute - a dict with meta-data on all files loaded,
'metamulti.'
.. note::
Updates the mask if not no_auto. | [
"Append",
"data",
"using",
"loadfunc",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L279-L342 |
248,899 | tomnor/channelpack | channelpack/pack.py | ChannelPack.set_samplerate | def set_samplerate(self, rate):
"""Set sample rate to rate.
rate: int or float
rate is given as samples / timeunit. If sample rate is set, it
will have an impact on the duration rule conditions. If duration
is set to 2.5 and samplerate is 100, a duration of 250 records
is required for the logical conditions to be true.
.. note::
Updates the mask if not no_auto."""
# Test and set value:
float(rate)
self.conconf.set_condition('samplerate', rate)
if not self.no_auto:
self.make_mask() | python | def set_samplerate(self, rate):
"""Set sample rate to rate.
rate: int or float
rate is given as samples / timeunit. If sample rate is set, it
will have an impact on the duration rule conditions. If duration
is set to 2.5 and samplerate is 100, a duration of 250 records
is required for the logical conditions to be true.
.. note::
Updates the mask if not no_auto."""
# Test and set value:
float(rate)
self.conconf.set_condition('samplerate', rate)
if not self.no_auto:
self.make_mask() | [
"def",
"set_samplerate",
"(",
"self",
",",
"rate",
")",
":",
"# Test and set value:",
"float",
"(",
"rate",
")",
"self",
".",
"conconf",
".",
"set_condition",
"(",
"'samplerate'",
",",
"rate",
")",
"if",
"not",
"self",
".",
"no_auto",
":",
"self",
".",
"make_mask",
"(",
")"
] | Set sample rate to rate.
rate: int or float
rate is given as samples / timeunit. If sample rate is set, it
will have an impact on the duration rule conditions. If duration
is set to 2.5 and samplerate is 100, a duration of 250 records
is required for the logical conditions to be true.
.. note::
Updates the mask if not no_auto. | [
"Set",
"sample",
"rate",
"to",
"rate",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L412-L429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.