id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
251,500 | EventTeam/beliefs | src/beliefs/cells/dicts.py | DictCell.to_dict | def to_dict(self):
"""
This method converts the DictCell into a python `dict`. This is useful
for JSON serialization.
"""
output = {}
for key, value in self.__dict__['p'].iteritems():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif hasattr(value, 'to_dot'):
output[key] = value.to_dot()
elif hasattr(value, 'to_dict'):
output[key] = value.to_dict()
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, dict):
output[key] = []
else:
raise ValueError('cannot encode ' + repr(key))
return output | python | def to_dict(self):
"""
This method converts the DictCell into a python `dict`. This is useful
for JSON serialization.
"""
output = {}
for key, value in self.__dict__['p'].iteritems():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif hasattr(value, 'to_dot'):
output[key] = value.to_dot()
elif hasattr(value, 'to_dict'):
output[key] = value.to_dict()
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, dict):
output[key] = []
else:
raise ValueError('cannot encode ' + repr(key))
return output | [
"def",
"to_dict",
"(",
"self",
")",
":",
"output",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"self",
".",
"__dict__",
"[",
"'p'",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"value",
"is",
"None",
"or",
"isinstance",
"(",
"value",
",",
"SIMPLE_TYPES",
")",
":",
"output",
"[",
"key",
"]",
"=",
"value",
"elif",
"hasattr",
"(",
"value",
",",
"'to_dot'",
")",
":",
"output",
"[",
"key",
"]",
"=",
"value",
".",
"to_dot",
"(",
")",
"elif",
"hasattr",
"(",
"value",
",",
"'to_dict'",
")",
":",
"output",
"[",
"key",
"]",
"=",
"value",
".",
"to_dict",
"(",
")",
"elif",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
":",
"# Convert date/datetime to ms-since-epoch (\"new Date()\").",
"ms",
"=",
"time",
".",
"mktime",
"(",
"value",
".",
"utctimetuple",
"(",
")",
")",
"*",
"1000",
"ms",
"+=",
"getattr",
"(",
"value",
",",
"'microseconds'",
",",
"0",
")",
"/",
"1000",
"output",
"[",
"key",
"]",
"=",
"int",
"(",
"ms",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"output",
"[",
"key",
"]",
"=",
"[",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'cannot encode '",
"+",
"repr",
"(",
"key",
")",
")",
"return",
"output"
] | This method converts the DictCell into a python `dict`. This is useful
for JSON serialization. | [
"This",
"method",
"converts",
"the",
"DictCell",
"into",
"a",
"python",
"dict",
".",
"This",
"is",
"useful",
"for",
"JSON",
"serialization",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/dicts.py#L209-L232 |
251,501 | EventTeam/beliefs | src/beliefs/cells/dicts.py | DictCell.to_latex | def to_latex(self):
""" Returns a LaTeX representation of an attribute-value matrix """
latex = r"[{} "
for attribute, value in self:
if attribute in ['speaker_model', 'is_in_commonground']: continue
value_l = value.to_latex()
if value_l == "": continue
latex += "{attribute:<15} & {value:<20} \\\\ \n".format(attribute=attribute, value=value_l)
latex += "]\n"
return latex | python | def to_latex(self):
""" Returns a LaTeX representation of an attribute-value matrix """
latex = r"[{} "
for attribute, value in self:
if attribute in ['speaker_model', 'is_in_commonground']: continue
value_l = value.to_latex()
if value_l == "": continue
latex += "{attribute:<15} & {value:<20} \\\\ \n".format(attribute=attribute, value=value_l)
latex += "]\n"
return latex | [
"def",
"to_latex",
"(",
"self",
")",
":",
"latex",
"=",
"r\"[{} \"",
"for",
"attribute",
",",
"value",
"in",
"self",
":",
"if",
"attribute",
"in",
"[",
"'speaker_model'",
",",
"'is_in_commonground'",
"]",
":",
"continue",
"value_l",
"=",
"value",
".",
"to_latex",
"(",
")",
"if",
"value_l",
"==",
"\"\"",
":",
"continue",
"latex",
"+=",
"\"{attribute:<15} & {value:<20} \\\\\\\\ \\n\"",
".",
"format",
"(",
"attribute",
"=",
"attribute",
",",
"value",
"=",
"value_l",
")",
"latex",
"+=",
"\"]\\n\"",
"return",
"latex"
] | Returns a LaTeX representation of an attribute-value matrix | [
"Returns",
"a",
"LaTeX",
"representation",
"of",
"an",
"attribute",
"-",
"value",
"matrix"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/dicts.py#L234-L243 |
251,502 | alexhayes/django-toolkit | django_toolkit/date_util.py | business_days | def business_days(start, stop):
"""
Return business days between two inclusive dates - ignoring public holidays.
Note that start must be less than stop or else 0 is returned.
@param start: Start date
@param stop: Stop date
@return int
"""
dates=rrule.rruleset()
# Get dates between start/stop (which are inclusive)
dates.rrule(rrule.rrule(rrule.DAILY, dtstart=start, until=stop))
# Exclude Sat/Sun
dates.exrule(rrule.rrule(rrule.DAILY, byweekday=(rrule.SA, rrule.SU), dtstart=start))
return dates.count() | python | def business_days(start, stop):
"""
Return business days between two inclusive dates - ignoring public holidays.
Note that start must be less than stop or else 0 is returned.
@param start: Start date
@param stop: Stop date
@return int
"""
dates=rrule.rruleset()
# Get dates between start/stop (which are inclusive)
dates.rrule(rrule.rrule(rrule.DAILY, dtstart=start, until=stop))
# Exclude Sat/Sun
dates.exrule(rrule.rrule(rrule.DAILY, byweekday=(rrule.SA, rrule.SU), dtstart=start))
return dates.count() | [
"def",
"business_days",
"(",
"start",
",",
"stop",
")",
":",
"dates",
"=",
"rrule",
".",
"rruleset",
"(",
")",
"# Get dates between start/stop (which are inclusive)",
"dates",
".",
"rrule",
"(",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"DAILY",
",",
"dtstart",
"=",
"start",
",",
"until",
"=",
"stop",
")",
")",
"# Exclude Sat/Sun ",
"dates",
".",
"exrule",
"(",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"DAILY",
",",
"byweekday",
"=",
"(",
"rrule",
".",
"SA",
",",
"rrule",
".",
"SU",
")",
",",
"dtstart",
"=",
"start",
")",
")",
"return",
"dates",
".",
"count",
"(",
")"
] | Return business days between two inclusive dates - ignoring public holidays.
Note that start must be less than stop or else 0 is returned.
@param start: Start date
@param stop: Stop date
@return int | [
"Return",
"business",
"days",
"between",
"two",
"inclusive",
"dates",
"-",
"ignoring",
"public",
"holidays",
".",
"Note",
"that",
"start",
"must",
"be",
"less",
"than",
"stop",
"or",
"else",
"0",
"is",
"returned",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/date_util.py#L33-L48 |
251,503 | alexhayes/django-toolkit | django_toolkit/date_util.py | get_anniversary_periods | def get_anniversary_periods(start, finish, anniversary=1):
"""
Return a list of anniversaries periods between start and finish.
"""
import sys
current = start
periods = []
while current <= finish:
(period_start, period_finish) = date_period(DATE_FREQUENCY_MONTHLY, anniversary, current)
current = period_start + relativedelta(months=+1)
period_start = period_start if period_start > start else start
period_finish = period_finish if period_finish < finish else finish
periods.append((period_start, period_finish))
return periods | python | def get_anniversary_periods(start, finish, anniversary=1):
"""
Return a list of anniversaries periods between start and finish.
"""
import sys
current = start
periods = []
while current <= finish:
(period_start, period_finish) = date_period(DATE_FREQUENCY_MONTHLY, anniversary, current)
current = period_start + relativedelta(months=+1)
period_start = period_start if period_start > start else start
period_finish = period_finish if period_finish < finish else finish
periods.append((period_start, period_finish))
return periods | [
"def",
"get_anniversary_periods",
"(",
"start",
",",
"finish",
",",
"anniversary",
"=",
"1",
")",
":",
"import",
"sys",
"current",
"=",
"start",
"periods",
"=",
"[",
"]",
"while",
"current",
"<=",
"finish",
":",
"(",
"period_start",
",",
"period_finish",
")",
"=",
"date_period",
"(",
"DATE_FREQUENCY_MONTHLY",
",",
"anniversary",
",",
"current",
")",
"current",
"=",
"period_start",
"+",
"relativedelta",
"(",
"months",
"=",
"+",
"1",
")",
"period_start",
"=",
"period_start",
"if",
"period_start",
">",
"start",
"else",
"start",
"period_finish",
"=",
"period_finish",
"if",
"period_finish",
"<",
"finish",
"else",
"finish",
"periods",
".",
"append",
"(",
"(",
"period_start",
",",
"period_finish",
")",
")",
"return",
"periods"
] | Return a list of anniversaries periods between start and finish. | [
"Return",
"a",
"list",
"of",
"anniversaries",
"periods",
"between",
"start",
"and",
"finish",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/date_util.py#L68-L81 |
251,504 | alexhayes/django-toolkit | django_toolkit/date_util.py | previous_quarter | def previous_quarter(d):
"""
Retrieve the previous quarter for dt
"""
from django_toolkit.datetime_util import quarter as datetime_quarter
return quarter( (datetime_quarter(datetime(d.year, d.month, d.day))[0] + timedelta(days=-1)).date() ) | python | def previous_quarter(d):
"""
Retrieve the previous quarter for dt
"""
from django_toolkit.datetime_util import quarter as datetime_quarter
return quarter( (datetime_quarter(datetime(d.year, d.month, d.day))[0] + timedelta(days=-1)).date() ) | [
"def",
"previous_quarter",
"(",
"d",
")",
":",
"from",
"django_toolkit",
".",
"datetime_util",
"import",
"quarter",
"as",
"datetime_quarter",
"return",
"quarter",
"(",
"(",
"datetime_quarter",
"(",
"datetime",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")",
")",
"[",
"0",
"]",
"+",
"timedelta",
"(",
"days",
"=",
"-",
"1",
")",
")",
".",
"date",
"(",
")",
")"
] | Retrieve the previous quarter for dt | [
"Retrieve",
"the",
"previous",
"quarter",
"for",
"dt"
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/date_util.py#L174-L179 |
251,505 | knagra/farnsworth | workshift/signals.py | _check_field_changed | def _check_field_changed(instance, old_instance, field_name, update_fields=None):
"""
Examines update_fields and an attribute of an instance to determine if
that attribute has changed prior to the instance being saved.
Parameters
----------
field_name : str
instance : object
old_instance : object
update_fields : list of str, optional
"""
if update_fields is not None and field_name not in update_fields:
return False
return getattr(instance, field_name) != getattr(old_instance, field_name) | python | def _check_field_changed(instance, old_instance, field_name, update_fields=None):
"""
Examines update_fields and an attribute of an instance to determine if
that attribute has changed prior to the instance being saved.
Parameters
----------
field_name : str
instance : object
old_instance : object
update_fields : list of str, optional
"""
if update_fields is not None and field_name not in update_fields:
return False
return getattr(instance, field_name) != getattr(old_instance, field_name) | [
"def",
"_check_field_changed",
"(",
"instance",
",",
"old_instance",
",",
"field_name",
",",
"update_fields",
"=",
"None",
")",
":",
"if",
"update_fields",
"is",
"not",
"None",
"and",
"field_name",
"not",
"in",
"update_fields",
":",
"return",
"False",
"return",
"getattr",
"(",
"instance",
",",
"field_name",
")",
"!=",
"getattr",
"(",
"old_instance",
",",
"field_name",
")"
] | Examines update_fields and an attribute of an instance to determine if
that attribute has changed prior to the instance being saved.
Parameters
----------
field_name : str
instance : object
old_instance : object
update_fields : list of str, optional | [
"Examines",
"update_fields",
"and",
"an",
"attribute",
"of",
"an",
"instance",
"to",
"determine",
"if",
"that",
"attribute",
"has",
"changed",
"prior",
"to",
"the",
"instance",
"being",
"saved",
"."
] | 1b6589f0d9fea154f0a1e2231ed906764ed26d26 | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/workshift/signals.py#L116-L131 |
251,506 | happy5214/competitions-match | competitions/match/default/SimpleMatch.py | SimpleMatch.play | def play(self):
"""Play the match.
This match simulator iterates through two lists of random numbers
25 times, one for each team, comparing the numbers and awarding a point
to the team with the higher number. The team with more points at the
end of the lists wins and is recorded in the winner field. If the result
is a draw, the winner field is set to None.
@return: The winner (or None if the result is a draw)
@rtype: An object that can be converted to a string or NoneType
"""
score1 = 0
score2 = 0
for __ in range(25):
num1 = random.randint(0, 100)
num2 = random.randint(0, 100)
if num1 > num2:
score1 += 1
elif num2 > num1:
score2 += 1
if score1 > score2:
self.winner = self.team1
self.loser = self.team2
self.drawn = False
elif score2 > score1:
self.winner = self.team2
self.loser = self.team1
self.drawn = False
else:
self.winner = None
self.loser = None
self.drawn = True
self.score1 = score1
self.score2 = score2
return self.winner | python | def play(self):
"""Play the match.
This match simulator iterates through two lists of random numbers
25 times, one for each team, comparing the numbers and awarding a point
to the team with the higher number. The team with more points at the
end of the lists wins and is recorded in the winner field. If the result
is a draw, the winner field is set to None.
@return: The winner (or None if the result is a draw)
@rtype: An object that can be converted to a string or NoneType
"""
score1 = 0
score2 = 0
for __ in range(25):
num1 = random.randint(0, 100)
num2 = random.randint(0, 100)
if num1 > num2:
score1 += 1
elif num2 > num1:
score2 += 1
if score1 > score2:
self.winner = self.team1
self.loser = self.team2
self.drawn = False
elif score2 > score1:
self.winner = self.team2
self.loser = self.team1
self.drawn = False
else:
self.winner = None
self.loser = None
self.drawn = True
self.score1 = score1
self.score2 = score2
return self.winner | [
"def",
"play",
"(",
"self",
")",
":",
"score1",
"=",
"0",
"score2",
"=",
"0",
"for",
"__",
"in",
"range",
"(",
"25",
")",
":",
"num1",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"100",
")",
"num2",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"100",
")",
"if",
"num1",
">",
"num2",
":",
"score1",
"+=",
"1",
"elif",
"num2",
">",
"num1",
":",
"score2",
"+=",
"1",
"if",
"score1",
">",
"score2",
":",
"self",
".",
"winner",
"=",
"self",
".",
"team1",
"self",
".",
"loser",
"=",
"self",
".",
"team2",
"self",
".",
"drawn",
"=",
"False",
"elif",
"score2",
">",
"score1",
":",
"self",
".",
"winner",
"=",
"self",
".",
"team2",
"self",
".",
"loser",
"=",
"self",
".",
"team1",
"self",
".",
"drawn",
"=",
"False",
"else",
":",
"self",
".",
"winner",
"=",
"None",
"self",
".",
"loser",
"=",
"None",
"self",
".",
"drawn",
"=",
"True",
"self",
".",
"score1",
"=",
"score1",
"self",
".",
"score2",
"=",
"score2",
"return",
"self",
".",
"winner"
] | Play the match.
This match simulator iterates through two lists of random numbers
25 times, one for each team, comparing the numbers and awarding a point
to the team with the higher number. The team with more points at the
end of the lists wins and is recorded in the winner field. If the result
is a draw, the winner field is set to None.
@return: The winner (or None if the result is a draw)
@rtype: An object that can be converted to a string or NoneType | [
"Play",
"the",
"match",
"."
] | 0eb77af258d9207c9c3b952e49ce70c856e15588 | https://github.com/happy5214/competitions-match/blob/0eb77af258d9207c9c3b952e49ce70c856e15588/competitions/match/default/SimpleMatch.py#L72-L107 |
251,507 | erbriones/shapeshift | shapeshift/generic.py | create_logger | def create_logger(name, formatter=None, handler=None, level=None):
"""
Returns a new logger for the specified name.
"""
logger = logging.getLogger(name)
#: remove existing handlers
logger.handlers = []
#: use a standard out handler
if handler is None:
handler = logging.StreamHandler(sys.stdout)
#: set the formatter when a formatter is given
if formatter is not None:
handler.setFormatter(formatter)
#: set DEBUG level if no level is specified
if level is None:
level = logging.DEBUG
handler.setLevel(level)
logger.setLevel(level)
logger.addHandler(handler)
return logger | python | def create_logger(name, formatter=None, handler=None, level=None):
"""
Returns a new logger for the specified name.
"""
logger = logging.getLogger(name)
#: remove existing handlers
logger.handlers = []
#: use a standard out handler
if handler is None:
handler = logging.StreamHandler(sys.stdout)
#: set the formatter when a formatter is given
if formatter is not None:
handler.setFormatter(formatter)
#: set DEBUG level if no level is specified
if level is None:
level = logging.DEBUG
handler.setLevel(level)
logger.setLevel(level)
logger.addHandler(handler)
return logger | [
"def",
"create_logger",
"(",
"name",
",",
"formatter",
"=",
"None",
",",
"handler",
"=",
"None",
",",
"level",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"#: remove existing handlers",
"logger",
".",
"handlers",
"=",
"[",
"]",
"#: use a standard out handler",
"if",
"handler",
"is",
"None",
":",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"#: set the formatter when a formatter is given",
"if",
"formatter",
"is",
"not",
"None",
":",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"#: set DEBUG level if no level is specified",
"if",
"level",
"is",
"None",
":",
"level",
"=",
"logging",
".",
"DEBUG",
"handler",
".",
"setLevel",
"(",
"level",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] | Returns a new logger for the specified name. | [
"Returns",
"a",
"new",
"logger",
"for",
"the",
"specified",
"name",
"."
] | f930cdc0d520b08238e0fc2c582458f341b87775 | https://github.com/erbriones/shapeshift/blob/f930cdc0d520b08238e0fc2c582458f341b87775/shapeshift/generic.py#L10-L34 |
251,508 | abe-winter/pg13-py | pg13/diff.py | splitstatus | def splitstatus(a,statusfn):
'split sequence into subsequences based on binary condition statusfn. a is a list, returns list of lists'
groups=[]; mode=None
for elt,status in zip(a,map(statusfn,a)):
assert isinstance(status,bool)
if status!=mode: mode=status; group=[mode]; groups.append(group)
group.append(elt)
return groups | python | def splitstatus(a,statusfn):
'split sequence into subsequences based on binary condition statusfn. a is a list, returns list of lists'
groups=[]; mode=None
for elt,status in zip(a,map(statusfn,a)):
assert isinstance(status,bool)
if status!=mode: mode=status; group=[mode]; groups.append(group)
group.append(elt)
return groups | [
"def",
"splitstatus",
"(",
"a",
",",
"statusfn",
")",
":",
"groups",
"=",
"[",
"]",
"mode",
"=",
"None",
"for",
"elt",
",",
"status",
"in",
"zip",
"(",
"a",
",",
"map",
"(",
"statusfn",
",",
"a",
")",
")",
":",
"assert",
"isinstance",
"(",
"status",
",",
"bool",
")",
"if",
"status",
"!=",
"mode",
":",
"mode",
"=",
"status",
"group",
"=",
"[",
"mode",
"]",
"groups",
".",
"append",
"(",
"group",
")",
"group",
".",
"append",
"(",
"elt",
")",
"return",
"groups"
] | split sequence into subsequences based on binary condition statusfn. a is a list, returns list of lists | [
"split",
"sequence",
"into",
"subsequences",
"based",
"on",
"binary",
"condition",
"statusfn",
".",
"a",
"is",
"a",
"list",
"returns",
"list",
"of",
"lists"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L28-L35 |
251,509 | abe-winter/pg13-py | pg13/diff.py | ungroupslice | def ungroupslice(groups,gslice):
'this is a helper for contigsub.'
'coordinate transform: takes a match from seqingroups() and transforms to ungrouped coordinates'
eltsbefore=0
for i in range(gslice[0]): eltsbefore+=len(groups[i])-1
x=eltsbefore+gslice[1]; return [x-1,x+gslice[2]-1] | python | def ungroupslice(groups,gslice):
'this is a helper for contigsub.'
'coordinate transform: takes a match from seqingroups() and transforms to ungrouped coordinates'
eltsbefore=0
for i in range(gslice[0]): eltsbefore+=len(groups[i])-1
x=eltsbefore+gslice[1]; return [x-1,x+gslice[2]-1] | [
"def",
"ungroupslice",
"(",
"groups",
",",
"gslice",
")",
":",
"'coordinate transform: takes a match from seqingroups() and transforms to ungrouped coordinates'",
"eltsbefore",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"gslice",
"[",
"0",
"]",
")",
":",
"eltsbefore",
"+=",
"len",
"(",
"groups",
"[",
"i",
"]",
")",
"-",
"1",
"x",
"=",
"eltsbefore",
"+",
"gslice",
"[",
"1",
"]",
"return",
"[",
"x",
"-",
"1",
",",
"x",
"+",
"gslice",
"[",
"2",
"]",
"-",
"1",
"]"
] | this is a helper for contigsub. | [
"this",
"is",
"a",
"helper",
"for",
"contigsub",
"."
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L63-L68 |
251,510 | abe-winter/pg13-py | pg13/diff.py | translate_diff | def translate_diff(origtext,deltas):
'take diff run on separated words and convert the deltas to character offsets'
lens=[0]+cumsum(map(len,splitpreserve(origtext))) # [0] at the head for like 'length before'
return [Delta(lens[a],lens[b],''.join(replace)) for a,b,replace in deltas] | python | def translate_diff(origtext,deltas):
'take diff run on separated words and convert the deltas to character offsets'
lens=[0]+cumsum(map(len,splitpreserve(origtext))) # [0] at the head for like 'length before'
return [Delta(lens[a],lens[b],''.join(replace)) for a,b,replace in deltas] | [
"def",
"translate_diff",
"(",
"origtext",
",",
"deltas",
")",
":",
"lens",
"=",
"[",
"0",
"]",
"+",
"cumsum",
"(",
"map",
"(",
"len",
",",
"splitpreserve",
"(",
"origtext",
")",
")",
")",
"# [0] at the head for like 'length before'\r",
"return",
"[",
"Delta",
"(",
"lens",
"[",
"a",
"]",
",",
"lens",
"[",
"b",
"]",
",",
"''",
".",
"join",
"(",
"replace",
")",
")",
"for",
"a",
",",
"b",
",",
"replace",
"in",
"deltas",
"]"
] | take diff run on separated words and convert the deltas to character offsets | [
"take",
"diff",
"run",
"on",
"separated",
"words",
"and",
"convert",
"the",
"deltas",
"to",
"character",
"offsets"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L115-L118 |
251,511 | abe-winter/pg13-py | pg13/diff.py | word_diff | def word_diff(a,b):
'do diff on words but return character offsets'
return translate_diff(a,rediff(splitpreserve(a),splitpreserve(b))) | python | def word_diff(a,b):
'do diff on words but return character offsets'
return translate_diff(a,rediff(splitpreserve(a),splitpreserve(b))) | [
"def",
"word_diff",
"(",
"a",
",",
"b",
")",
":",
"return",
"translate_diff",
"(",
"a",
",",
"rediff",
"(",
"splitpreserve",
"(",
"a",
")",
",",
"splitpreserve",
"(",
"b",
")",
")",
")"
] | do diff on words but return character offsets | [
"do",
"diff",
"on",
"words",
"but",
"return",
"character",
"offsets"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L120-L122 |
251,512 | abe-winter/pg13-py | pg13/diff.py | checkdiff | def checkdiff(a,b,sp=True):
'take diff of a to b, apply to a, return the applied diff so external code can check it against b'
if sp: a=splitpreserve(a); b=splitpreserve(b)
res=applydiff(a,rediff(a,b))
if sp: res=''.join(res)
return res | python | def checkdiff(a,b,sp=True):
'take diff of a to b, apply to a, return the applied diff so external code can check it against b'
if sp: a=splitpreserve(a); b=splitpreserve(b)
res=applydiff(a,rediff(a,b))
if sp: res=''.join(res)
return res | [
"def",
"checkdiff",
"(",
"a",
",",
"b",
",",
"sp",
"=",
"True",
")",
":",
"if",
"sp",
":",
"a",
"=",
"splitpreserve",
"(",
"a",
")",
"b",
"=",
"splitpreserve",
"(",
"b",
")",
"res",
"=",
"applydiff",
"(",
"a",
",",
"rediff",
"(",
"a",
",",
"b",
")",
")",
"if",
"sp",
":",
"res",
"=",
"''",
".",
"join",
"(",
"res",
")",
"return",
"res"
] | take diff of a to b, apply to a, return the applied diff so external code can check it against b | [
"take",
"diff",
"of",
"a",
"to",
"b",
"apply",
"to",
"a",
"return",
"the",
"applied",
"diff",
"so",
"external",
"code",
"can",
"check",
"it",
"against",
"b"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L124-L129 |
251,513 | treycucco/bidon | bidon/db/model/foreign_model_wrapper.py | ForeignModelWrapper.create | def create(cls, source, *, transform_args=None):
"""Create an instance of the class from the source. By default cls.transform_args is used, but
can be overridden by passing in transform_args.
"""
if transform_args is None:
transform_args = cls.transform_args
return cls(get_obj(source, *transform_args)) | python | def create(cls, source, *, transform_args=None):
"""Create an instance of the class from the source. By default cls.transform_args is used, but
can be overridden by passing in transform_args.
"""
if transform_args is None:
transform_args = cls.transform_args
return cls(get_obj(source, *transform_args)) | [
"def",
"create",
"(",
"cls",
",",
"source",
",",
"*",
",",
"transform_args",
"=",
"None",
")",
":",
"if",
"transform_args",
"is",
"None",
":",
"transform_args",
"=",
"cls",
".",
"transform_args",
"return",
"cls",
"(",
"get_obj",
"(",
"source",
",",
"*",
"transform_args",
")",
")"
] | Create an instance of the class from the source. By default cls.transform_args is used, but
can be overridden by passing in transform_args. | [
"Create",
"an",
"instance",
"of",
"the",
"class",
"from",
"the",
"source",
".",
"By",
"default",
"cls",
".",
"transform_args",
"is",
"used",
"but",
"can",
"be",
"overridden",
"by",
"passing",
"in",
"transform_args",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/model/foreign_model_wrapper.py#L20-L27 |
251,514 | treycucco/bidon | bidon/db/model/foreign_model_wrapper.py | ForeignModelWrapper.map | def map(cls, sources, *, transform_args=None):
"""Generates instances from the sources using either cls.transform_args or transform_args
argument if present.
"""
for idx, source in enumerate(sources):
try:
yield cls.create(source, transform_args=transform_args)
except Exception as ex:
raise Exception("An error occurred with item {0}".format(idx)) from ex | python | def map(cls, sources, *, transform_args=None):
"""Generates instances from the sources using either cls.transform_args or transform_args
argument if present.
"""
for idx, source in enumerate(sources):
try:
yield cls.create(source, transform_args=transform_args)
except Exception as ex:
raise Exception("An error occurred with item {0}".format(idx)) from ex | [
"def",
"map",
"(",
"cls",
",",
"sources",
",",
"*",
",",
"transform_args",
"=",
"None",
")",
":",
"for",
"idx",
",",
"source",
"in",
"enumerate",
"(",
"sources",
")",
":",
"try",
":",
"yield",
"cls",
".",
"create",
"(",
"source",
",",
"transform_args",
"=",
"transform_args",
")",
"except",
"Exception",
"as",
"ex",
":",
"raise",
"Exception",
"(",
"\"An error occurred with item {0}\"",
".",
"format",
"(",
"idx",
")",
")",
"from",
"ex"
] | Generates instances from the sources using either cls.transform_args or transform_args
argument if present. | [
"Generates",
"instances",
"from",
"the",
"sources",
"using",
"either",
"cls",
".",
"transform_args",
"or",
"transform_args",
"argument",
"if",
"present",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/model/foreign_model_wrapper.py#L30-L38 |
251,515 | minhhoit/yacms | yacms/generic/models.py | ThreadedComment.save | def save(self, *args, **kwargs):
"""
Set the current site ID, and ``is_public`` based on the setting
``COMMENTS_DEFAULT_APPROVED``.
"""
if not self.id:
self.is_public = settings.COMMENTS_DEFAULT_APPROVED
self.site_id = current_site_id()
super(ThreadedComment, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
"""
Set the current site ID, and ``is_public`` based on the setting
``COMMENTS_DEFAULT_APPROVED``.
"""
if not self.id:
self.is_public = settings.COMMENTS_DEFAULT_APPROVED
self.site_id = current_site_id()
super(ThreadedComment, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"id",
":",
"self",
".",
"is_public",
"=",
"settings",
".",
"COMMENTS_DEFAULT_APPROVED",
"self",
".",
"site_id",
"=",
"current_site_id",
"(",
")",
"super",
"(",
"ThreadedComment",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Set the current site ID, and ``is_public`` based on the setting
``COMMENTS_DEFAULT_APPROVED``. | [
"Set",
"the",
"current",
"site",
"ID",
"and",
"is_public",
"based",
"on",
"the",
"setting",
"COMMENTS_DEFAULT_APPROVED",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/models.py#L50-L58 |
251,516 | minhhoit/yacms | yacms/generic/models.py | Rating.save | def save(self, *args, **kwargs):
"""
Validate that the rating falls between the min and max values.
"""
valid = map(str, settings.RATINGS_RANGE)
if str(self.value) not in valid:
raise ValueError("Invalid rating. %s is not in %s" % (self.value,
", ".join(valid)))
super(Rating, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
"""
Validate that the rating falls between the min and max values.
"""
valid = map(str, settings.RATINGS_RANGE)
if str(self.value) not in valid:
raise ValueError("Invalid rating. %s is not in %s" % (self.value,
", ".join(valid)))
super(Rating, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"valid",
"=",
"map",
"(",
"str",
",",
"settings",
".",
"RATINGS_RANGE",
")",
"if",
"str",
"(",
"self",
".",
"value",
")",
"not",
"in",
"valid",
":",
"raise",
"ValueError",
"(",
"\"Invalid rating. %s is not in %s\"",
"%",
"(",
"self",
".",
"value",
",",
"\", \"",
".",
"join",
"(",
"valid",
")",
")",
")",
"super",
"(",
"Rating",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Validate that the rating falls between the min and max values. | [
"Validate",
"that",
"the",
"rating",
"falls",
"between",
"the",
"min",
"and",
"max",
"values",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/models.py#L140-L148 |
251,517 | kaniblu/pyaap | yaap/__init__.py | ArgParser.add_mutex_switch | def add_mutex_switch(parser, dest, arguments=set(), default=None,
single_arg=False, required=False):
"""Adds mutually exclusive switch arguments.
Args:
arguments: a dictionary that maps switch name to helper text. Use
sets to skip help texts.
"""
if default is not None:
assert default in arguments
if isinstance(arguments, set):
arguments = {k: None for k in arguments}
if not single_arg:
mg = parser.add_mutually_exclusive_group(required=required)
for name, help_text in arguments.items():
kwargs = {
"action": "store_const",
"dest": dest,
"const": name,
"help": help_text
}
if default == name:
kwargs["default"] = name
mg.add_argument("--{}".format(name), **kwargs)
return mg
else:
kwargs = {
"dest": dest,
"type": str,
"default": default,
"help": "\n".join("{}: {}".format(k, v)
for k, v in arguments.items()),
"choices": list(arguments.keys())
}
return parser.add_argument("--{}".format(dest), **kwargs) | python | def add_mutex_switch(parser, dest, arguments=set(), default=None,
single_arg=False, required=False):
"""Adds mutually exclusive switch arguments.
Args:
arguments: a dictionary that maps switch name to helper text. Use
sets to skip help texts.
"""
if default is not None:
assert default in arguments
if isinstance(arguments, set):
arguments = {k: None for k in arguments}
if not single_arg:
mg = parser.add_mutually_exclusive_group(required=required)
for name, help_text in arguments.items():
kwargs = {
"action": "store_const",
"dest": dest,
"const": name,
"help": help_text
}
if default == name:
kwargs["default"] = name
mg.add_argument("--{}".format(name), **kwargs)
return mg
else:
kwargs = {
"dest": dest,
"type": str,
"default": default,
"help": "\n".join("{}: {}".format(k, v)
for k, v in arguments.items()),
"choices": list(arguments.keys())
}
return parser.add_argument("--{}".format(dest), **kwargs) | [
"def",
"add_mutex_switch",
"(",
"parser",
",",
"dest",
",",
"arguments",
"=",
"set",
"(",
")",
",",
"default",
"=",
"None",
",",
"single_arg",
"=",
"False",
",",
"required",
"=",
"False",
")",
":",
"if",
"default",
"is",
"not",
"None",
":",
"assert",
"default",
"in",
"arguments",
"if",
"isinstance",
"(",
"arguments",
",",
"set",
")",
":",
"arguments",
"=",
"{",
"k",
":",
"None",
"for",
"k",
"in",
"arguments",
"}",
"if",
"not",
"single_arg",
":",
"mg",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"required",
")",
"for",
"name",
",",
"help_text",
"in",
"arguments",
".",
"items",
"(",
")",
":",
"kwargs",
"=",
"{",
"\"action\"",
":",
"\"store_const\"",
",",
"\"dest\"",
":",
"dest",
",",
"\"const\"",
":",
"name",
",",
"\"help\"",
":",
"help_text",
"}",
"if",
"default",
"==",
"name",
":",
"kwargs",
"[",
"\"default\"",
"]",
"=",
"name",
"mg",
".",
"add_argument",
"(",
"\"--{}\"",
".",
"format",
"(",
"name",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"mg",
"else",
":",
"kwargs",
"=",
"{",
"\"dest\"",
":",
"dest",
",",
"\"type\"",
":",
"str",
",",
"\"default\"",
":",
"default",
",",
"\"help\"",
":",
"\"\\n\"",
".",
"join",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"arguments",
".",
"items",
"(",
")",
")",
",",
"\"choices\"",
":",
"list",
"(",
"arguments",
".",
"keys",
"(",
")",
")",
"}",
"return",
"parser",
".",
"add_argument",
"(",
"\"--{}\"",
".",
"format",
"(",
"dest",
")",
",",
"*",
"*",
"kwargs",
")"
] | Adds mutually exclusive switch arguments.
Args:
arguments: a dictionary that maps switch name to helper text. Use
sets to skip help texts. | [
"Adds",
"mutually",
"exclusive",
"switch",
"arguments",
"."
] | fbf8370a49f86b160009ddf30f30f22bb4aba9b9 | https://github.com/kaniblu/pyaap/blob/fbf8370a49f86b160009ddf30f30f22bb4aba9b9/yaap/__init__.py#L68-L110 |
251,518 | sci-bots/mpm | mpm/api.py | _save_action | def _save_action(extra_context=None):
'''
Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment.
'''
# Get list of revisions to Conda environment since creation.
revisions_js = ch.conda_exec('list', '--revisions', '--json',
verbose=False)
revisions = json.loads(revisions_js)
# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`
# See [wheeler-microfluidics/microdrop#200][i200].
#
# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200
action = extra_context.copy() if extra_context else {}
action['revisions'] = revisions
action_path = (MICRODROP_CONDA_ACTIONS
.joinpath('rev{}.json.bz2'.format(revisions[-1]['rev'])))
action_path.parent.makedirs_p()
# Compress action file using bz2 to save disk space.
with bz2.BZ2File(action_path, mode='w') as output:
json.dump(action, output, indent=2)
return action_path, action | python | def _save_action(extra_context=None):
'''
Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment.
'''
# Get list of revisions to Conda environment since creation.
revisions_js = ch.conda_exec('list', '--revisions', '--json',
verbose=False)
revisions = json.loads(revisions_js)
# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`
# See [wheeler-microfluidics/microdrop#200][i200].
#
# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200
action = extra_context.copy() if extra_context else {}
action['revisions'] = revisions
action_path = (MICRODROP_CONDA_ACTIONS
.joinpath('rev{}.json.bz2'.format(revisions[-1]['rev'])))
action_path.parent.makedirs_p()
# Compress action file using bz2 to save disk space.
with bz2.BZ2File(action_path, mode='w') as output:
json.dump(action, output, indent=2)
return action_path, action | [
"def",
"_save_action",
"(",
"extra_context",
"=",
"None",
")",
":",
"# Get list of revisions to Conda environment since creation.",
"revisions_js",
"=",
"ch",
".",
"conda_exec",
"(",
"'list'",
",",
"'--revisions'",
",",
"'--json'",
",",
"verbose",
"=",
"False",
")",
"revisions",
"=",
"json",
".",
"loads",
"(",
"revisions_js",
")",
"# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`",
"# See [wheeler-microfluidics/microdrop#200][i200].",
"#",
"# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200",
"action",
"=",
"extra_context",
".",
"copy",
"(",
")",
"if",
"extra_context",
"else",
"{",
"}",
"action",
"[",
"'revisions'",
"]",
"=",
"revisions",
"action_path",
"=",
"(",
"MICRODROP_CONDA_ACTIONS",
".",
"joinpath",
"(",
"'rev{}.json.bz2'",
".",
"format",
"(",
"revisions",
"[",
"-",
"1",
"]",
"[",
"'rev'",
"]",
")",
")",
")",
"action_path",
".",
"parent",
".",
"makedirs_p",
"(",
")",
"# Compress action file using bz2 to save disk space.",
"with",
"bz2",
".",
"BZ2File",
"(",
"action_path",
",",
"mode",
"=",
"'w'",
")",
"as",
"output",
":",
"json",
".",
"dump",
"(",
"action",
",",
"output",
",",
"indent",
"=",
"2",
")",
"return",
"action_path",
",",
"action"
] | Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment. | [
"Save",
"list",
"of",
"revisions",
"revisions",
"for",
"active",
"Conda",
"environment",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L57-L91 |
251,519 | sci-bots/mpm | mpm/api.py | available_packages | def available_packages(*args, **kwargs):
'''
Query available plugin packages based on specified Conda channels.
Parameters
----------
*args
Extra arguments to pass to Conda ``search`` command.
Returns
-------
dict
.. versionchanged:: 0.24
All Conda packages beginning with ``microdrop.`` prefix from all
configured channels.
Each *key* corresponds to a package name.
Each *value* corresponds to a ``list`` of dictionaries, each
corresponding to an available version of the respective package.
For example:
{
"microdrop.dmf-device-ui-plugin": [
...
{
...
"build_number": 0,
"channel": "microdrop-plugins",
"installed": true,
"license": "BSD",
"name": "microdrop.dmf-device-ui-plugin",
"size": 62973,
"version": "2.1.post2",
...
},
...],
...
}
'''
# Get list of available MicroDrop plugins, i.e., Conda packages that start
# with the prefix `microdrop.`.
try:
plugin_packages_info_json = ch.conda_exec('search', '--json',
'^microdrop\.', verbose=False)
return json.loads(plugin_packages_info_json)
except RuntimeError, exception:
if 'CondaHTTPError' in str(exception):
logger.warning('Could not connect to Conda server.')
else:
logger.warning('Error querying available MicroDrop plugins.',
exc_info=True)
except Exception, exception:
logger.warning('Error querying available MicroDrop plugins.',
exc_info=True)
return {} | python | def available_packages(*args, **kwargs):
'''
Query available plugin packages based on specified Conda channels.
Parameters
----------
*args
Extra arguments to pass to Conda ``search`` command.
Returns
-------
dict
.. versionchanged:: 0.24
All Conda packages beginning with ``microdrop.`` prefix from all
configured channels.
Each *key* corresponds to a package name.
Each *value* corresponds to a ``list`` of dictionaries, each
corresponding to an available version of the respective package.
For example:
{
"microdrop.dmf-device-ui-plugin": [
...
{
...
"build_number": 0,
"channel": "microdrop-plugins",
"installed": true,
"license": "BSD",
"name": "microdrop.dmf-device-ui-plugin",
"size": 62973,
"version": "2.1.post2",
...
},
...],
...
}
'''
# Get list of available MicroDrop plugins, i.e., Conda packages that start
# with the prefix `microdrop.`.
try:
plugin_packages_info_json = ch.conda_exec('search', '--json',
'^microdrop\.', verbose=False)
return json.loads(plugin_packages_info_json)
except RuntimeError, exception:
if 'CondaHTTPError' in str(exception):
logger.warning('Could not connect to Conda server.')
else:
logger.warning('Error querying available MicroDrop plugins.',
exc_info=True)
except Exception, exception:
logger.warning('Error querying available MicroDrop plugins.',
exc_info=True)
return {} | [
"def",
"available_packages",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get list of available MicroDrop plugins, i.e., Conda packages that start",
"# with the prefix `microdrop.`.",
"try",
":",
"plugin_packages_info_json",
"=",
"ch",
".",
"conda_exec",
"(",
"'search'",
",",
"'--json'",
",",
"'^microdrop\\.'",
",",
"verbose",
"=",
"False",
")",
"return",
"json",
".",
"loads",
"(",
"plugin_packages_info_json",
")",
"except",
"RuntimeError",
",",
"exception",
":",
"if",
"'CondaHTTPError'",
"in",
"str",
"(",
"exception",
")",
":",
"logger",
".",
"warning",
"(",
"'Could not connect to Conda server.'",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Error querying available MicroDrop plugins.'",
",",
"exc_info",
"=",
"True",
")",
"except",
"Exception",
",",
"exception",
":",
"logger",
".",
"warning",
"(",
"'Error querying available MicroDrop plugins.'",
",",
"exc_info",
"=",
"True",
")",
"return",
"{",
"}"
] | Query available plugin packages based on specified Conda channels.
Parameters
----------
*args
Extra arguments to pass to Conda ``search`` command.
Returns
-------
dict
.. versionchanged:: 0.24
All Conda packages beginning with ``microdrop.`` prefix from all
configured channels.
Each *key* corresponds to a package name.
Each *value* corresponds to a ``list`` of dictionaries, each
corresponding to an available version of the respective package.
For example:
{
"microdrop.dmf-device-ui-plugin": [
...
{
...
"build_number": 0,
"channel": "microdrop-plugins",
"installed": true,
"license": "BSD",
"name": "microdrop.dmf-device-ui-plugin",
"size": 62973,
"version": "2.1.post2",
...
},
...],
...
} | [
"Query",
"available",
"plugin",
"packages",
"based",
"on",
"specified",
"Conda",
"channels",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L136-L192 |
251,520 | sci-bots/mpm | mpm/api.py | install | def install(plugin_name, *args, **kwargs):
'''
Install plugin packages based on specified Conda channels.
.. versionchanged:: 0.19.1
Do not save rollback info on dry-run.
.. versionchanged:: 0.24
Remove channels argument. Use Conda channels as configured in Conda
environment.
Note that channels can still be explicitly set through :data:`*args`.
Parameters
----------
plugin_name : str or list
Plugin package(s) to install.
Version specifiers are also supported, e.g., ``package >=1.0.5``.
*args
Extra arguments to pass to Conda ``install`` command.
Returns
-------
dict
Conda installation log object (from JSON Conda install output).
'''
if isinstance(plugin_name, types.StringTypes):
plugin_name = [plugin_name]
# Perform installation
conda_args = (['install', '-y', '--json'] + list(args) + plugin_name)
install_log_js = ch.conda_exec(*conda_args, verbose=False)
install_log = json.loads(install_log_js.split('\x00')[-1])
if 'actions' in install_log and not install_log.get('dry_run'):
# Install command modified Conda environment.
_save_action({'conda_args': conda_args, 'install_log': install_log})
logger.debug('Installed plugin(s): ```%s```', install_log['actions'])
return install_log | python | def install(plugin_name, *args, **kwargs):
'''
Install plugin packages based on specified Conda channels.
.. versionchanged:: 0.19.1
Do not save rollback info on dry-run.
.. versionchanged:: 0.24
Remove channels argument. Use Conda channels as configured in Conda
environment.
Note that channels can still be explicitly set through :data:`*args`.
Parameters
----------
plugin_name : str or list
Plugin package(s) to install.
Version specifiers are also supported, e.g., ``package >=1.0.5``.
*args
Extra arguments to pass to Conda ``install`` command.
Returns
-------
dict
Conda installation log object (from JSON Conda install output).
'''
if isinstance(plugin_name, types.StringTypes):
plugin_name = [plugin_name]
# Perform installation
conda_args = (['install', '-y', '--json'] + list(args) + plugin_name)
install_log_js = ch.conda_exec(*conda_args, verbose=False)
install_log = json.loads(install_log_js.split('\x00')[-1])
if 'actions' in install_log and not install_log.get('dry_run'):
# Install command modified Conda environment.
_save_action({'conda_args': conda_args, 'install_log': install_log})
logger.debug('Installed plugin(s): ```%s```', install_log['actions'])
return install_log | [
"def",
"install",
"(",
"plugin_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"plugin_name",
",",
"types",
".",
"StringTypes",
")",
":",
"plugin_name",
"=",
"[",
"plugin_name",
"]",
"# Perform installation",
"conda_args",
"=",
"(",
"[",
"'install'",
",",
"'-y'",
",",
"'--json'",
"]",
"+",
"list",
"(",
"args",
")",
"+",
"plugin_name",
")",
"install_log_js",
"=",
"ch",
".",
"conda_exec",
"(",
"*",
"conda_args",
",",
"verbose",
"=",
"False",
")",
"install_log",
"=",
"json",
".",
"loads",
"(",
"install_log_js",
".",
"split",
"(",
"'\\x00'",
")",
"[",
"-",
"1",
"]",
")",
"if",
"'actions'",
"in",
"install_log",
"and",
"not",
"install_log",
".",
"get",
"(",
"'dry_run'",
")",
":",
"# Install command modified Conda environment.",
"_save_action",
"(",
"{",
"'conda_args'",
":",
"conda_args",
",",
"'install_log'",
":",
"install_log",
"}",
")",
"logger",
".",
"debug",
"(",
"'Installed plugin(s): ```%s```'",
",",
"install_log",
"[",
"'actions'",
"]",
")",
"return",
"install_log"
] | Install plugin packages based on specified Conda channels.
.. versionchanged:: 0.19.1
Do not save rollback info on dry-run.
.. versionchanged:: 0.24
Remove channels argument. Use Conda channels as configured in Conda
environment.
Note that channels can still be explicitly set through :data:`*args`.
Parameters
----------
plugin_name : str or list
Plugin package(s) to install.
Version specifiers are also supported, e.g., ``package >=1.0.5``.
*args
Extra arguments to pass to Conda ``install`` command.
Returns
-------
dict
Conda installation log object (from JSON Conda install output). | [
"Install",
"plugin",
"packages",
"based",
"on",
"specified",
"Conda",
"channels",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L196-L234 |
251,521 | sci-bots/mpm | mpm/api.py | uninstall | def uninstall(plugin_name, *args):
'''
Uninstall plugin packages.
Plugin packages must have a directory with the same name as the package in
the following directory:
<conda prefix>/share/microdrop/plugins/available/
Parameters
----------
plugin_name : str or list
Plugin package(s) to uninstall.
*args
Extra arguments to pass to Conda ``uninstall`` command.
Returns
-------
dict
Conda uninstallation log object (from JSON Conda uninstall output).
'''
if isinstance(plugin_name, types.StringTypes):
plugin_name = [plugin_name]
available_path = MICRODROP_CONDA_SHARE.joinpath('plugins', 'available')
for name_i in plugin_name:
plugin_module_i = name_i.split('.')[-1].replace('-', '_')
plugin_path_i = available_path.joinpath(plugin_module_i)
if not _islinklike(plugin_path_i) and not plugin_path_i.isdir():
raise IOError('Plugin `{}` not found in `{}`'
.format(name_i, available_path))
else:
logging.debug('[uninstall] Found plugin `%s`', plugin_path_i)
# Perform uninstall operation.
conda_args = ['uninstall', '--json', '-y'] + list(args) + plugin_name
uninstall_log_js = ch.conda_exec(*conda_args, verbose=False)
# Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`,
# since uninstall may have made one or more packages unavailable.
_remove_broken_links()
logger.debug('Uninstalled plugins: ```%s```', plugin_name)
return json.loads(uninstall_log_js.split('\x00')[-1]) | python | def uninstall(plugin_name, *args):
'''
Uninstall plugin packages.
Plugin packages must have a directory with the same name as the package in
the following directory:
<conda prefix>/share/microdrop/plugins/available/
Parameters
----------
plugin_name : str or list
Plugin package(s) to uninstall.
*args
Extra arguments to pass to Conda ``uninstall`` command.
Returns
-------
dict
Conda uninstallation log object (from JSON Conda uninstall output).
'''
if isinstance(plugin_name, types.StringTypes):
plugin_name = [plugin_name]
available_path = MICRODROP_CONDA_SHARE.joinpath('plugins', 'available')
for name_i in plugin_name:
plugin_module_i = name_i.split('.')[-1].replace('-', '_')
plugin_path_i = available_path.joinpath(plugin_module_i)
if not _islinklike(plugin_path_i) and not plugin_path_i.isdir():
raise IOError('Plugin `{}` not found in `{}`'
.format(name_i, available_path))
else:
logging.debug('[uninstall] Found plugin `%s`', plugin_path_i)
# Perform uninstall operation.
conda_args = ['uninstall', '--json', '-y'] + list(args) + plugin_name
uninstall_log_js = ch.conda_exec(*conda_args, verbose=False)
# Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`,
# since uninstall may have made one or more packages unavailable.
_remove_broken_links()
logger.debug('Uninstalled plugins: ```%s```', plugin_name)
return json.loads(uninstall_log_js.split('\x00')[-1]) | [
"def",
"uninstall",
"(",
"plugin_name",
",",
"*",
"args",
")",
":",
"if",
"isinstance",
"(",
"plugin_name",
",",
"types",
".",
"StringTypes",
")",
":",
"plugin_name",
"=",
"[",
"plugin_name",
"]",
"available_path",
"=",
"MICRODROP_CONDA_SHARE",
".",
"joinpath",
"(",
"'plugins'",
",",
"'available'",
")",
"for",
"name_i",
"in",
"plugin_name",
":",
"plugin_module_i",
"=",
"name_i",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"plugin_path_i",
"=",
"available_path",
".",
"joinpath",
"(",
"plugin_module_i",
")",
"if",
"not",
"_islinklike",
"(",
"plugin_path_i",
")",
"and",
"not",
"plugin_path_i",
".",
"isdir",
"(",
")",
":",
"raise",
"IOError",
"(",
"'Plugin `{}` not found in `{}`'",
".",
"format",
"(",
"name_i",
",",
"available_path",
")",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"'[uninstall] Found plugin `%s`'",
",",
"plugin_path_i",
")",
"# Perform uninstall operation.",
"conda_args",
"=",
"[",
"'uninstall'",
",",
"'--json'",
",",
"'-y'",
"]",
"+",
"list",
"(",
"args",
")",
"+",
"plugin_name",
"uninstall_log_js",
"=",
"ch",
".",
"conda_exec",
"(",
"*",
"conda_args",
",",
"verbose",
"=",
"False",
")",
"# Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`,",
"# since uninstall may have made one or more packages unavailable.",
"_remove_broken_links",
"(",
")",
"logger",
".",
"debug",
"(",
"'Uninstalled plugins: ```%s```'",
",",
"plugin_name",
")",
"return",
"json",
".",
"loads",
"(",
"uninstall_log_js",
".",
"split",
"(",
"'\\x00'",
")",
"[",
"-",
"1",
"]",
")"
] | Uninstall plugin packages.
Plugin packages must have a directory with the same name as the package in
the following directory:
<conda prefix>/share/microdrop/plugins/available/
Parameters
----------
plugin_name : str or list
Plugin package(s) to uninstall.
*args
Extra arguments to pass to Conda ``uninstall`` command.
Returns
-------
dict
Conda uninstallation log object (from JSON Conda uninstall output). | [
"Uninstall",
"plugin",
"packages",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L304-L345 |
251,522 | sci-bots/mpm | mpm/api.py | import_plugin | def import_plugin(package_name, include_available=False):
'''
Import MicroDrop plugin.
Parameters
----------
package_name : str
Name of MicroDrop plugin Conda package.
include_available : bool, optional
If ``True``, import from all available plugins (not just **enabled**
ones).
By default, only the ``<conda>/etc/microdrop/plugins/enabled``
directory is added to the Python import paths (if necessary).
If ``True``, also add the ``<conda>/share/microdrop/plugins/available``
directory to the Python import paths.
Returns
-------
module
Imported plugin module.
'''
available_plugins_dir = MICRODROP_CONDA_SHARE.joinpath('plugins',
'available')
enabled_plugins_dir = MICRODROP_CONDA_ETC.joinpath('plugins', 'enabled')
search_paths = [enabled_plugins_dir]
if include_available:
search_paths += [available_plugins_dir]
for dir_i in search_paths:
if dir_i not in sys.path:
sys.path.insert(0, dir_i)
module_name = package_name.split('.')[-1].replace('-', '_')
return importlib.import_module(module_name) | python | def import_plugin(package_name, include_available=False):
'''
Import MicroDrop plugin.
Parameters
----------
package_name : str
Name of MicroDrop plugin Conda package.
include_available : bool, optional
If ``True``, import from all available plugins (not just **enabled**
ones).
By default, only the ``<conda>/etc/microdrop/plugins/enabled``
directory is added to the Python import paths (if necessary).
If ``True``, also add the ``<conda>/share/microdrop/plugins/available``
directory to the Python import paths.
Returns
-------
module
Imported plugin module.
'''
available_plugins_dir = MICRODROP_CONDA_SHARE.joinpath('plugins',
'available')
enabled_plugins_dir = MICRODROP_CONDA_ETC.joinpath('plugins', 'enabled')
search_paths = [enabled_plugins_dir]
if include_available:
search_paths += [available_plugins_dir]
for dir_i in search_paths:
if dir_i not in sys.path:
sys.path.insert(0, dir_i)
module_name = package_name.split('.')[-1].replace('-', '_')
return importlib.import_module(module_name) | [
"def",
"import_plugin",
"(",
"package_name",
",",
"include_available",
"=",
"False",
")",
":",
"available_plugins_dir",
"=",
"MICRODROP_CONDA_SHARE",
".",
"joinpath",
"(",
"'plugins'",
",",
"'available'",
")",
"enabled_plugins_dir",
"=",
"MICRODROP_CONDA_ETC",
".",
"joinpath",
"(",
"'plugins'",
",",
"'enabled'",
")",
"search_paths",
"=",
"[",
"enabled_plugins_dir",
"]",
"if",
"include_available",
":",
"search_paths",
"+=",
"[",
"available_plugins_dir",
"]",
"for",
"dir_i",
"in",
"search_paths",
":",
"if",
"dir_i",
"not",
"in",
"sys",
".",
"path",
":",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"dir_i",
")",
"module_name",
"=",
"package_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"return",
"importlib",
".",
"import_module",
"(",
"module_name",
")"
] | Import MicroDrop plugin.
Parameters
----------
package_name : str
Name of MicroDrop plugin Conda package.
include_available : bool, optional
If ``True``, import from all available plugins (not just **enabled**
ones).
By default, only the ``<conda>/etc/microdrop/plugins/enabled``
directory is added to the Python import paths (if necessary).
If ``True``, also add the ``<conda>/share/microdrop/plugins/available``
directory to the Python import paths.
Returns
-------
module
Imported plugin module. | [
"Import",
"MicroDrop",
"plugin",
"."
] | a69651cda4b37ee6b17df4fe0809249e7f4dc536 | https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L542-L575 |
251,523 | alexhayes/django-toolkit | django_toolkit/email.py | EmailMultiRelated.attach_related | def attach_related(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.related_attachments.append(filename)
else:
assert content is not None
self.related_attachments.append((filename, content, mimetype)) | python | def attach_related(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.related_attachments.append(filename)
else:
assert content is not None
self.related_attachments.append((filename, content, mimetype)) | [
"def",
"attach_related",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"content",
"=",
"None",
",",
"mimetype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"filename",
",",
"MIMEBase",
")",
":",
"assert",
"content",
"==",
"mimetype",
"==",
"None",
"self",
".",
"related_attachments",
".",
"append",
"(",
"filename",
")",
"else",
":",
"assert",
"content",
"is",
"not",
"None",
"self",
".",
"related_attachments",
".",
"append",
"(",
"(",
"filename",
",",
"content",
",",
"mimetype",
")",
")"
] | Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments. | [
"Attaches",
"a",
"file",
"with",
"the",
"given",
"filename",
"and",
"content",
".",
"The",
"filename",
"can",
"be",
"omitted",
"and",
"the",
"mimetype",
"is",
"guessed",
"if",
"not",
"provided",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/email.py#L24-L37 |
251,524 | alexhayes/django-toolkit | django_toolkit/email.py | EmailMultiRelated.attach_related_file | def attach_related_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach_related(filename, content, mimetype) | python | def attach_related_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach_related(filename, content, mimetype) | [
"def",
"attach_related_file",
"(",
"self",
",",
"path",
",",
"mimetype",
"=",
"None",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"content",
"=",
"open",
"(",
"path",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"self",
".",
"attach_related",
"(",
"filename",
",",
"content",
",",
"mimetype",
")"
] | Attaches a file from the filesystem. | [
"Attaches",
"a",
"file",
"from",
"the",
"filesystem",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/email.py#L39-L43 |
251,525 | calvinku96/labreporthelper | labreporthelper/datafile.py | CustomDataFile.create_dat_file | def create_dat_file(self):
"""
Create and write empty data file in the data directory
"""
output = "## {}\n".format(self.name)
try:
kwargs_items = self.kwargs.iteritems()
except AttributeError:
kwargs_items = self.kwargs.items()
for key, val in kwargs_items:
if val is "l":
output += "#l {}=\n".format(str(key))
elif val is "f" or True:
output += "#f {}=\n".format(str(key))
comment = "## " + "\t".join(["col{" + str(i) + ":d}"
for i in range(self.argnum)])
comment += "\n"
rangeargnum = range(self.argnum)
output += comment.format(*rangeargnum)
if os.path.isfile(self.location_dat):
files = glob.glob(self.location_dat + "*")
count = 2
while (
(self.location_dat + str(count) in files)
) and (count <= 10):
count += 1
os.rename(self.location_dat, self.location_dat + str(count))
dat_file = open(self.location_dat, "wb")
dat_file.write(output)
dat_file.close() | python | def create_dat_file(self):
"""
Create and write empty data file in the data directory
"""
output = "## {}\n".format(self.name)
try:
kwargs_items = self.kwargs.iteritems()
except AttributeError:
kwargs_items = self.kwargs.items()
for key, val in kwargs_items:
if val is "l":
output += "#l {}=\n".format(str(key))
elif val is "f" or True:
output += "#f {}=\n".format(str(key))
comment = "## " + "\t".join(["col{" + str(i) + ":d}"
for i in range(self.argnum)])
comment += "\n"
rangeargnum = range(self.argnum)
output += comment.format(*rangeargnum)
if os.path.isfile(self.location_dat):
files = glob.glob(self.location_dat + "*")
count = 2
while (
(self.location_dat + str(count) in files)
) and (count <= 10):
count += 1
os.rename(self.location_dat, self.location_dat + str(count))
dat_file = open(self.location_dat, "wb")
dat_file.write(output)
dat_file.close() | [
"def",
"create_dat_file",
"(",
"self",
")",
":",
"output",
"=",
"\"## {}\\n\"",
".",
"format",
"(",
"self",
".",
"name",
")",
"try",
":",
"kwargs_items",
"=",
"self",
".",
"kwargs",
".",
"iteritems",
"(",
")",
"except",
"AttributeError",
":",
"kwargs_items",
"=",
"self",
".",
"kwargs",
".",
"items",
"(",
")",
"for",
"key",
",",
"val",
"in",
"kwargs_items",
":",
"if",
"val",
"is",
"\"l\"",
":",
"output",
"+=",
"\"#l {}=\\n\"",
".",
"format",
"(",
"str",
"(",
"key",
")",
")",
"elif",
"val",
"is",
"\"f\"",
"or",
"True",
":",
"output",
"+=",
"\"#f {}=\\n\"",
".",
"format",
"(",
"str",
"(",
"key",
")",
")",
"comment",
"=",
"\"## \"",
"+",
"\"\\t\"",
".",
"join",
"(",
"[",
"\"col{\"",
"+",
"str",
"(",
"i",
")",
"+",
"\":d}\"",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"argnum",
")",
"]",
")",
"comment",
"+=",
"\"\\n\"",
"rangeargnum",
"=",
"range",
"(",
"self",
".",
"argnum",
")",
"output",
"+=",
"comment",
".",
"format",
"(",
"*",
"rangeargnum",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"location_dat",
")",
":",
"files",
"=",
"glob",
".",
"glob",
"(",
"self",
".",
"location_dat",
"+",
"\"*\"",
")",
"count",
"=",
"2",
"while",
"(",
"(",
"self",
".",
"location_dat",
"+",
"str",
"(",
"count",
")",
"in",
"files",
")",
")",
"and",
"(",
"count",
"<=",
"10",
")",
":",
"count",
"+=",
"1",
"os",
".",
"rename",
"(",
"self",
".",
"location_dat",
",",
"self",
".",
"location_dat",
"+",
"str",
"(",
"count",
")",
")",
"dat_file",
"=",
"open",
"(",
"self",
".",
"location_dat",
",",
"\"wb\"",
")",
"dat_file",
".",
"write",
"(",
"output",
")",
"dat_file",
".",
"close",
"(",
")"
] | Create and write empty data file in the data directory | [
"Create",
"and",
"write",
"empty",
"data",
"file",
"in",
"the",
"data",
"directory"
] | 4d436241f389c02eb188c313190df62ab28c3763 | https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/datafile.py#L66-L95 |
251,526 | calvinku96/labreporthelper | labreporthelper/datafile.py | MCADataFile.parse_data_to_internal | def parse_data_to_internal(self, data=None):
"""parse to internal
"""
if data is None:
f = open(self.location_dat, "rb")
data = {
"PMCA SPECTRUM": {},
"DATA": [],
"DP5 CONFIGURATION": {},
"DPP STATUS": {}
}
delimiter = {
"PMCA SPECTRUM": " - ",
"DP5 CONFIGURATION": "=",
"DPP STATUS": ":"
}
comments = {
"PMCA SPECTRUM": None,
"DP5 CONFIGURATION": ";",
"DPP STATUS": None
}
for e in f:
if "<<" in e:
if "<<END>>" in e:
current = None
elif "<<PMCA SPECTRUM>>" in e:
current = "PMCA SPECTRUM"
elif "<<DATA>>" in e:
current = "DATA"
elif "<<DP5 CONFIGURATION>>" in e:
current = "DP5 CONFIGURATION"
elif "<<DPP STATUS>>" in e:
current = "DPP STATUS"
elif "<<ROI>>" in e:
current = "ROI"
else:
if current == "DATA":
data["DATA"].append(float(e))
elif current == "ROI":
continue
elif current is not None:
e = e.split("\r\n")[0]
if comments[current] is not None:
e = e.split(comments[current], 1)[0]
e_list = e.split(delimiter[current], 1)
data[current][e_list[0]] = e_list[1]
f.close()
self.save_to_internal(data) | python | def parse_data_to_internal(self, data=None):
"""parse to internal
"""
if data is None:
f = open(self.location_dat, "rb")
data = {
"PMCA SPECTRUM": {},
"DATA": [],
"DP5 CONFIGURATION": {},
"DPP STATUS": {}
}
delimiter = {
"PMCA SPECTRUM": " - ",
"DP5 CONFIGURATION": "=",
"DPP STATUS": ":"
}
comments = {
"PMCA SPECTRUM": None,
"DP5 CONFIGURATION": ";",
"DPP STATUS": None
}
for e in f:
if "<<" in e:
if "<<END>>" in e:
current = None
elif "<<PMCA SPECTRUM>>" in e:
current = "PMCA SPECTRUM"
elif "<<DATA>>" in e:
current = "DATA"
elif "<<DP5 CONFIGURATION>>" in e:
current = "DP5 CONFIGURATION"
elif "<<DPP STATUS>>" in e:
current = "DPP STATUS"
elif "<<ROI>>" in e:
current = "ROI"
else:
if current == "DATA":
data["DATA"].append(float(e))
elif current == "ROI":
continue
elif current is not None:
e = e.split("\r\n")[0]
if comments[current] is not None:
e = e.split(comments[current], 1)[0]
e_list = e.split(delimiter[current], 1)
data[current][e_list[0]] = e_list[1]
f.close()
self.save_to_internal(data) | [
"def",
"parse_data_to_internal",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"f",
"=",
"open",
"(",
"self",
".",
"location_dat",
",",
"\"rb\"",
")",
"data",
"=",
"{",
"\"PMCA SPECTRUM\"",
":",
"{",
"}",
",",
"\"DATA\"",
":",
"[",
"]",
",",
"\"DP5 CONFIGURATION\"",
":",
"{",
"}",
",",
"\"DPP STATUS\"",
":",
"{",
"}",
"}",
"delimiter",
"=",
"{",
"\"PMCA SPECTRUM\"",
":",
"\" - \"",
",",
"\"DP5 CONFIGURATION\"",
":",
"\"=\"",
",",
"\"DPP STATUS\"",
":",
"\":\"",
"}",
"comments",
"=",
"{",
"\"PMCA SPECTRUM\"",
":",
"None",
",",
"\"DP5 CONFIGURATION\"",
":",
"\";\"",
",",
"\"DPP STATUS\"",
":",
"None",
"}",
"for",
"e",
"in",
"f",
":",
"if",
"\"<<\"",
"in",
"e",
":",
"if",
"\"<<END>>\"",
"in",
"e",
":",
"current",
"=",
"None",
"elif",
"\"<<PMCA SPECTRUM>>\"",
"in",
"e",
":",
"current",
"=",
"\"PMCA SPECTRUM\"",
"elif",
"\"<<DATA>>\"",
"in",
"e",
":",
"current",
"=",
"\"DATA\"",
"elif",
"\"<<DP5 CONFIGURATION>>\"",
"in",
"e",
":",
"current",
"=",
"\"DP5 CONFIGURATION\"",
"elif",
"\"<<DPP STATUS>>\"",
"in",
"e",
":",
"current",
"=",
"\"DPP STATUS\"",
"elif",
"\"<<ROI>>\"",
"in",
"e",
":",
"current",
"=",
"\"ROI\"",
"else",
":",
"if",
"current",
"==",
"\"DATA\"",
":",
"data",
"[",
"\"DATA\"",
"]",
".",
"append",
"(",
"float",
"(",
"e",
")",
")",
"elif",
"current",
"==",
"\"ROI\"",
":",
"continue",
"elif",
"current",
"is",
"not",
"None",
":",
"e",
"=",
"e",
".",
"split",
"(",
"\"\\r\\n\"",
")",
"[",
"0",
"]",
"if",
"comments",
"[",
"current",
"]",
"is",
"not",
"None",
":",
"e",
"=",
"e",
".",
"split",
"(",
"comments",
"[",
"current",
"]",
",",
"1",
")",
"[",
"0",
"]",
"e_list",
"=",
"e",
".",
"split",
"(",
"delimiter",
"[",
"current",
"]",
",",
"1",
")",
"data",
"[",
"current",
"]",
"[",
"e_list",
"[",
"0",
"]",
"]",
"=",
"e_list",
"[",
"1",
"]",
"f",
".",
"close",
"(",
")",
"self",
".",
"save_to_internal",
"(",
"data",
")"
] | parse to internal | [
"parse",
"to",
"internal"
] | 4d436241f389c02eb188c313190df62ab28c3763 | https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/datafile.py#L157-L204 |
251,527 | calvinku96/labreporthelper | labreporthelper/datafile.py | DataFile.parse_data_to_internal | def parse_data_to_internal(self, data=None):
"""Use numpy loadtxt
"""
if data is None:
kwargs = self.kwargs
data = np.loadtxt(
open(self.location_dat, "rb"), **kwargs
)
if self.filetype is "pickle":
pickle.dump(data, open(self.location_internal, "wb"))
elif self.filetype is "hickle":
import hickle
hickle.dump(data, open(self.location_internal, "wb"))
else:
raise ValueError(
"Invalid filetype {} (must be {} or {})".format(
self.filetype, "pickle", "hickle"
)
) | python | def parse_data_to_internal(self, data=None):
"""Use numpy loadtxt
"""
if data is None:
kwargs = self.kwargs
data = np.loadtxt(
open(self.location_dat, "rb"), **kwargs
)
if self.filetype is "pickle":
pickle.dump(data, open(self.location_internal, "wb"))
elif self.filetype is "hickle":
import hickle
hickle.dump(data, open(self.location_internal, "wb"))
else:
raise ValueError(
"Invalid filetype {} (must be {} or {})".format(
self.filetype, "pickle", "hickle"
)
) | [
"def",
"parse_data_to_internal",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"kwargs",
"=",
"self",
".",
"kwargs",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"open",
"(",
"self",
".",
"location_dat",
",",
"\"rb\"",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"filetype",
"is",
"\"pickle\"",
":",
"pickle",
".",
"dump",
"(",
"data",
",",
"open",
"(",
"self",
".",
"location_internal",
",",
"\"wb\"",
")",
")",
"elif",
"self",
".",
"filetype",
"is",
"\"hickle\"",
":",
"import",
"hickle",
"hickle",
".",
"dump",
"(",
"data",
",",
"open",
"(",
"self",
".",
"location_internal",
",",
"\"wb\"",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid filetype {} (must be {} or {})\"",
".",
"format",
"(",
"self",
".",
"filetype",
",",
"\"pickle\"",
",",
"\"hickle\"",
")",
")"
] | Use numpy loadtxt | [
"Use",
"numpy",
"loadtxt"
] | 4d436241f389c02eb188c313190df62ab28c3763 | https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/datafile.py#L216-L234 |
251,528 | klen/muffin-jade | muffin_jade.py | Plugin.ctx_provider | def ctx_provider(self, func):
""" Decorator for adding a context provider.
::
@jade.ctx_provider
def my_context():
return {...}
"""
func = to_coroutine(func)
self.providers.append(func)
return func | python | def ctx_provider(self, func):
""" Decorator for adding a context provider.
::
@jade.ctx_provider
def my_context():
return {...}
"""
func = to_coroutine(func)
self.providers.append(func)
return func | [
"def",
"ctx_provider",
"(",
"self",
",",
"func",
")",
":",
"func",
"=",
"to_coroutine",
"(",
"func",
")",
"self",
".",
"providers",
".",
"append",
"(",
"func",
")",
"return",
"func"
] | Decorator for adding a context provider.
::
@jade.ctx_provider
def my_context():
return {...} | [
"Decorator",
"for",
"adding",
"a",
"context",
"provider",
"."
] | 3ddd6bf27fac03edc0bef3b0840bcd2e278babb3 | https://github.com/klen/muffin-jade/blob/3ddd6bf27fac03edc0bef3b0840bcd2e278babb3/muffin_jade.py#L59-L69 |
251,529 | klen/muffin-jade | muffin_jade.py | Plugin.register | def register(self, func):
""" Register function to templates. """
if callable(func):
self.functions[func.__name__] = func
return func | python | def register(self, func):
""" Register function to templates. """
if callable(func):
self.functions[func.__name__] = func
return func | [
"def",
"register",
"(",
"self",
",",
"func",
")",
":",
"if",
"callable",
"(",
"func",
")",
":",
"self",
".",
"functions",
"[",
"func",
".",
"__name__",
"]",
"=",
"func",
"return",
"func"
] | Register function to templates. | [
"Register",
"function",
"to",
"templates",
"."
] | 3ddd6bf27fac03edc0bef3b0840bcd2e278babb3 | https://github.com/klen/muffin-jade/blob/3ddd6bf27fac03edc0bef3b0840bcd2e278babb3/muffin_jade.py#L71-L75 |
251,530 | klen/muffin-jade | muffin_jade.py | Plugin.render | def render(self, path, **context):
""" Render a template with context. """
funcs = self.functions
ctx = dict(self.functions, jdebug=lambda: dict(
(k, v) for k, v in ctx.items() if k not in funcs and k != 'jdebug'))
for provider in self.providers:
_ctx = yield from provider()
ctx.update(_ctx)
ctx.update(context)
template = self.env.get_template(path)
return self.env.render(template, **ctx) | python | def render(self, path, **context):
""" Render a template with context. """
funcs = self.functions
ctx = dict(self.functions, jdebug=lambda: dict(
(k, v) for k, v in ctx.items() if k not in funcs and k != 'jdebug'))
for provider in self.providers:
_ctx = yield from provider()
ctx.update(_ctx)
ctx.update(context)
template = self.env.get_template(path)
return self.env.render(template, **ctx) | [
"def",
"render",
"(",
"self",
",",
"path",
",",
"*",
"*",
"context",
")",
":",
"funcs",
"=",
"self",
".",
"functions",
"ctx",
"=",
"dict",
"(",
"self",
".",
"functions",
",",
"jdebug",
"=",
"lambda",
":",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"ctx",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"funcs",
"and",
"k",
"!=",
"'jdebug'",
")",
")",
"for",
"provider",
"in",
"self",
".",
"providers",
":",
"_ctx",
"=",
"yield",
"from",
"provider",
"(",
")",
"ctx",
".",
"update",
"(",
"_ctx",
")",
"ctx",
".",
"update",
"(",
"context",
")",
"template",
"=",
"self",
".",
"env",
".",
"get_template",
"(",
"path",
")",
"return",
"self",
".",
"env",
".",
"render",
"(",
"template",
",",
"*",
"*",
"ctx",
")"
] | Render a template with context. | [
"Render",
"a",
"template",
"with",
"context",
"."
] | 3ddd6bf27fac03edc0bef3b0840bcd2e278babb3 | https://github.com/klen/muffin-jade/blob/3ddd6bf27fac03edc0bef3b0840bcd2e278babb3/muffin_jade.py#L78-L88 |
251,531 | klen/muffin-jade | muffin_jade.py | Environment.load_template | def load_template(self, path):
""" Load and compile a template. """
if not path.startswith('/'):
for folder in self.options['template_folders']:
fullpath = op.join(folder, path)
if op.exists(fullpath):
path = fullpath
break
else:
raise JadeException('Template doesnt exist: %s' % path)
with open(path, 'rb') as f:
source = f.read().decode(self.options['encoding'])
return ExtendCompiler(
pyjade.parser.Parser(source).parse(), pretty=self.options['pretty'],
env=self, compileDebug=True
) | python | def load_template(self, path):
""" Load and compile a template. """
if not path.startswith('/'):
for folder in self.options['template_folders']:
fullpath = op.join(folder, path)
if op.exists(fullpath):
path = fullpath
break
else:
raise JadeException('Template doesnt exist: %s' % path)
with open(path, 'rb') as f:
source = f.read().decode(self.options['encoding'])
return ExtendCompiler(
pyjade.parser.Parser(source).parse(), pretty=self.options['pretty'],
env=self, compileDebug=True
) | [
"def",
"load_template",
"(",
"self",
",",
"path",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"for",
"folder",
"in",
"self",
".",
"options",
"[",
"'template_folders'",
"]",
":",
"fullpath",
"=",
"op",
".",
"join",
"(",
"folder",
",",
"path",
")",
"if",
"op",
".",
"exists",
"(",
"fullpath",
")",
":",
"path",
"=",
"fullpath",
"break",
"else",
":",
"raise",
"JadeException",
"(",
"'Template doesnt exist: %s'",
"%",
"path",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"source",
"=",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"self",
".",
"options",
"[",
"'encoding'",
"]",
")",
"return",
"ExtendCompiler",
"(",
"pyjade",
".",
"parser",
".",
"Parser",
"(",
"source",
")",
".",
"parse",
"(",
")",
",",
"pretty",
"=",
"self",
".",
"options",
"[",
"'pretty'",
"]",
",",
"env",
"=",
"self",
",",
"compileDebug",
"=",
"True",
")"
] | Load and compile a template. | [
"Load",
"and",
"compile",
"a",
"template",
"."
] | 3ddd6bf27fac03edc0bef3b0840bcd2e278babb3 | https://github.com/klen/muffin-jade/blob/3ddd6bf27fac03edc0bef3b0840bcd2e278babb3/muffin_jade.py#L142-L159 |
251,532 | pydsigner/taskit | taskit/common.py | FirstBytesProtocol.set_size | def set_size(self, data_size):
"""
Set the data slice size.
"""
if len(str(data_size)) > self.first:
raise ValueError(
'Send size is too large for message size-field width!')
self.data_size = data_size | python | def set_size(self, data_size):
"""
Set the data slice size.
"""
if len(str(data_size)) > self.first:
raise ValueError(
'Send size is too large for message size-field width!')
self.data_size = data_size | [
"def",
"set_size",
"(",
"self",
",",
"data_size",
")",
":",
"if",
"len",
"(",
"str",
"(",
"data_size",
")",
")",
">",
"self",
".",
"first",
":",
"raise",
"ValueError",
"(",
"'Send size is too large for message size-field width!'",
")",
"self",
".",
"data_size",
"=",
"data_size"
] | Set the data slice size. | [
"Set",
"the",
"data",
"slice",
"size",
"."
] | 3b228e2dbac16b3b84b2581f5b46e027d1d8fa7f | https://github.com/pydsigner/taskit/blob/3b228e2dbac16b3b84b2581f5b46e027d1d8fa7f/taskit/common.py#L81-L89 |
251,533 | jldantas/libmft | parallel.py | MFT._is_related | def _is_related(parent_entry, child_entry):
'''This function checks if a child entry is related to the parent entry.
This is done by comparing the reference and sequence numbers.'''
if parent_entry.header.mft_record == child_entry.header.base_record_ref and \
parent_entry.header.seq_number == child_entry.header.base_record_seq:
return True
else:
return False | python | def _is_related(parent_entry, child_entry):
'''This function checks if a child entry is related to the parent entry.
This is done by comparing the reference and sequence numbers.'''
if parent_entry.header.mft_record == child_entry.header.base_record_ref and \
parent_entry.header.seq_number == child_entry.header.base_record_seq:
return True
else:
return False | [
"def",
"_is_related",
"(",
"parent_entry",
",",
"child_entry",
")",
":",
"if",
"parent_entry",
".",
"header",
".",
"mft_record",
"==",
"child_entry",
".",
"header",
".",
"base_record_ref",
"and",
"parent_entry",
".",
"header",
".",
"seq_number",
"==",
"child_entry",
".",
"header",
".",
"base_record_seq",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | This function checks if a child entry is related to the parent entry.
This is done by comparing the reference and sequence numbers. | [
"This",
"function",
"checks",
"if",
"a",
"child",
"entry",
"is",
"related",
"to",
"the",
"parent",
"entry",
".",
"This",
"is",
"done",
"by",
"comparing",
"the",
"reference",
"and",
"sequence",
"numbers",
"."
] | 65a988605fe7663b788bd81dcb52c0a4eaad1549 | https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/parallel.py#L132-L139 |
251,534 | jldantas/libmft | parallel.py | MFT.load_mp | def load_mp(cls, file_pointer, _mft_config=None):
'''The initialization process takes a file like object "file_pointer"
and loads it in the internal structures. "use_cores" can be definied
if multiple cores are to be used. The "size" argument is the size
of the MFT entries. If not provided, the class will try to auto detect
it.
'''
import multiprocessing
import queue
mft_config = _mft_config if _mft_config is not None else MFT.mft_config
mft_entry_size = mft_config["entry_size"]
#self.entries = {}
if not mft_entry_size:
mft_entry_size = MFT._find_mft_size(file_pointer)
file_size = _get_file_size(file_pointer)
if (file_size % mft_entry_size):
#TODO error handling (file size not multiple of mft size)
MOD_LOGGER.error("Unexpected file size. It is not multiple of the MFT entry size.")
end = int(file_size / mft_entry_size)
#setup the multiprocessing stuff
queue_size = 10
n_processes = 3
manager = multiprocessing.Manager()
buffer_queue_in = manager.Queue(queue_size)
buffer_queue_out = manager.Queue(queue_size)
entries = manager.dict()
temp_entries = manager.list()
processes = [multiprocessing.Process(target=MFT._load_entry, args=(mft_config, buffer_queue_in, buffer_queue_out, entries, temp_entries)) for i in range(n_processes)]
for p in processes:
p.start()
for i in range(queue_size):
buffer_queue_out.put(bytearray(mft_entry_size))
#start the game
for i in range(0, end):
try:
data_buffer = buffer_queue_out.get(timeout=1)
file_pointer.readinto(data_buffer)
buffer_queue_in.put((i, data_buffer))
#print("adding", i)
except queue.Empty as e:
print("DAMN")
raise
for i in range(queue_size):
buffer_queue_in.put((-1, None))
for p in processes:
p.join()
print("LOADING DONE")
#process the temporary list and add it to the "model"
for entry in temp_entries:
base_record_ref = entry.header.base_record_ref
if base_record_ref in entries: #if the parent entry has been loaded
if MFT._is_related(entries[base_record_ref], entry):
entries[base_record_ref].copy_attributes(entry)
else: #can happen when you have an orphan entry
entries[i] = entry | python | def load_mp(cls, file_pointer, _mft_config=None):
'''The initialization process takes a file like object "file_pointer"
and loads it in the internal structures. "use_cores" can be definied
if multiple cores are to be used. The "size" argument is the size
of the MFT entries. If not provided, the class will try to auto detect
it.
'''
import multiprocessing
import queue
mft_config = _mft_config if _mft_config is not None else MFT.mft_config
mft_entry_size = mft_config["entry_size"]
#self.entries = {}
if not mft_entry_size:
mft_entry_size = MFT._find_mft_size(file_pointer)
file_size = _get_file_size(file_pointer)
if (file_size % mft_entry_size):
#TODO error handling (file size not multiple of mft size)
MOD_LOGGER.error("Unexpected file size. It is not multiple of the MFT entry size.")
end = int(file_size / mft_entry_size)
#setup the multiprocessing stuff
queue_size = 10
n_processes = 3
manager = multiprocessing.Manager()
buffer_queue_in = manager.Queue(queue_size)
buffer_queue_out = manager.Queue(queue_size)
entries = manager.dict()
temp_entries = manager.list()
processes = [multiprocessing.Process(target=MFT._load_entry, args=(mft_config, buffer_queue_in, buffer_queue_out, entries, temp_entries)) for i in range(n_processes)]
for p in processes:
p.start()
for i in range(queue_size):
buffer_queue_out.put(bytearray(mft_entry_size))
#start the game
for i in range(0, end):
try:
data_buffer = buffer_queue_out.get(timeout=1)
file_pointer.readinto(data_buffer)
buffer_queue_in.put((i, data_buffer))
#print("adding", i)
except queue.Empty as e:
print("DAMN")
raise
for i in range(queue_size):
buffer_queue_in.put((-1, None))
for p in processes:
p.join()
print("LOADING DONE")
#process the temporary list and add it to the "model"
for entry in temp_entries:
base_record_ref = entry.header.base_record_ref
if base_record_ref in entries: #if the parent entry has been loaded
if MFT._is_related(entries[base_record_ref], entry):
entries[base_record_ref].copy_attributes(entry)
else: #can happen when you have an orphan entry
entries[i] = entry | [
"def",
"load_mp",
"(",
"cls",
",",
"file_pointer",
",",
"_mft_config",
"=",
"None",
")",
":",
"import",
"multiprocessing",
"import",
"queue",
"mft_config",
"=",
"_mft_config",
"if",
"_mft_config",
"is",
"not",
"None",
"else",
"MFT",
".",
"mft_config",
"mft_entry_size",
"=",
"mft_config",
"[",
"\"entry_size\"",
"]",
"#self.entries = {}",
"if",
"not",
"mft_entry_size",
":",
"mft_entry_size",
"=",
"MFT",
".",
"_find_mft_size",
"(",
"file_pointer",
")",
"file_size",
"=",
"_get_file_size",
"(",
"file_pointer",
")",
"if",
"(",
"file_size",
"%",
"mft_entry_size",
")",
":",
"#TODO error handling (file size not multiple of mft size)",
"MOD_LOGGER",
".",
"error",
"(",
"\"Unexpected file size. It is not multiple of the MFT entry size.\"",
")",
"end",
"=",
"int",
"(",
"file_size",
"/",
"mft_entry_size",
")",
"#setup the multiprocessing stuff",
"queue_size",
"=",
"10",
"n_processes",
"=",
"3",
"manager",
"=",
"multiprocessing",
".",
"Manager",
"(",
")",
"buffer_queue_in",
"=",
"manager",
".",
"Queue",
"(",
"queue_size",
")",
"buffer_queue_out",
"=",
"manager",
".",
"Queue",
"(",
"queue_size",
")",
"entries",
"=",
"manager",
".",
"dict",
"(",
")",
"temp_entries",
"=",
"manager",
".",
"list",
"(",
")",
"processes",
"=",
"[",
"multiprocessing",
".",
"Process",
"(",
"target",
"=",
"MFT",
".",
"_load_entry",
",",
"args",
"=",
"(",
"mft_config",
",",
"buffer_queue_in",
",",
"buffer_queue_out",
",",
"entries",
",",
"temp_entries",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n_processes",
")",
"]",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"start",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"queue_size",
")",
":",
"buffer_queue_out",
".",
"put",
"(",
"bytearray",
"(",
"mft_entry_size",
")",
")",
"#start the game",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"end",
")",
":",
"try",
":",
"data_buffer",
"=",
"buffer_queue_out",
".",
"get",
"(",
"timeout",
"=",
"1",
")",
"file_pointer",
".",
"readinto",
"(",
"data_buffer",
")",
"buffer_queue_in",
".",
"put",
"(",
"(",
"i",
",",
"data_buffer",
")",
")",
"#print(\"adding\", i)",
"except",
"queue",
".",
"Empty",
"as",
"e",
":",
"print",
"(",
"\"DAMN\"",
")",
"raise",
"for",
"i",
"in",
"range",
"(",
"queue_size",
")",
":",
"buffer_queue_in",
".",
"put",
"(",
"(",
"-",
"1",
",",
"None",
")",
")",
"for",
"p",
"in",
"processes",
":",
"p",
".",
"join",
"(",
")",
"print",
"(",
"\"LOADING DONE\"",
")",
"#process the temporary list and add it to the \"model\"",
"for",
"entry",
"in",
"temp_entries",
":",
"base_record_ref",
"=",
"entry",
".",
"header",
".",
"base_record_ref",
"if",
"base_record_ref",
"in",
"entries",
":",
"#if the parent entry has been loaded",
"if",
"MFT",
".",
"_is_related",
"(",
"entries",
"[",
"base_record_ref",
"]",
",",
"entry",
")",
":",
"entries",
"[",
"base_record_ref",
"]",
".",
"copy_attributes",
"(",
"entry",
")",
"else",
":",
"#can happen when you have an orphan entry",
"entries",
"[",
"i",
"]",
"=",
"entry"
] | The initialization process takes a file like object "file_pointer"
and loads it in the internal structures. "use_cores" can be definied
if multiple cores are to be used. The "size" argument is the size
of the MFT entries. If not provided, the class will try to auto detect
it. | [
"The",
"initialization",
"process",
"takes",
"a",
"file",
"like",
"object",
"file_pointer",
"and",
"loads",
"it",
"in",
"the",
"internal",
"structures",
".",
"use_cores",
"can",
"be",
"definied",
"if",
"multiple",
"cores",
"are",
"to",
"be",
"used",
".",
"The",
"size",
"argument",
"is",
"the",
"size",
"of",
"the",
"MFT",
"entries",
".",
"If",
"not",
"provided",
"the",
"class",
"will",
"try",
"to",
"auto",
"detect",
"it",
"."
] | 65a988605fe7663b788bd81dcb52c0a4eaad1549 | https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/parallel.py#L142-L202 |
251,535 | ryanjdillon/pyotelem | pyotelem/dives.py | finddives2 | def finddives2(depths, min_dive_thresh=10):
'''Find dives in depth data below a minimum dive threshold
Args
----
depths: ndarray
Datalogger depth measurements
min_dive_thresh: float
Minimum depth threshold for which to classify a dive
Returns
-------
dives: ndarray
Dive summary information in a numpy record array
*Columns*:
* dive_id
* start_idx
* stop_idx
* dive_dur
* depth_max
* depth_max_i
* depth_min
* depth_min_i
* depth_mean
* comp_mean
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not.
'''
import numpy
import pandas
from . import utils
# Get start and stop indices for each dive above `min_dive_thresh`
condition = depths > min_dive_thresh
ind_start, ind_end = utils.contiguous_regions(condition)
n_dives = len(ind_start)
dive_mask = numpy.zeros(len(depths), dtype=bool)
dtypes = numpy.dtype([('dive_id', int),
('start_idx', int),
('stop_idx', int),
('dive_dur', int),
('depth_max', float),
('depth_max_idx', float),
('depth_min', float),
('depth_min_idx', float),
('depth_mean', float),
('comp_mean', float),])
dive_data = numpy.zeros(n_dives, dtype=dtypes)
for i in range(n_dives):
dive_mask[ind_start[i]:ind_end[i]] = True
dive_depths = depths[ind_start[i]:ind_end[i]]
dive_data['dive_id'][i] = i
dive_data['start_idx'][i] = ind_start[i]
dive_data['stop_idx'][i] = ind_end[i]
dive_data['dive_dur'][i] = ind_end[i] - ind_start[i]
dive_data['depth_max'][i] = dive_depths.max()
dive_data['depth_max_idx'][i] = numpy.argmax(dive_depths)
dive_data['depth_min'][i] = dive_depths.min()
dive_data['depth_min_idx'][i] = numpy.argmin(dive_depths)
dive_data['depth_mean'][i] = numpy.mean(dive_depths)
# TODO Supposedly time of deepest dive... doesn't appear to be that
dive_data['comp_mean'][i] = numpy.mean(1 + (1/(0.1*dive_depths)))
# Filter any dives with an endpoint with an index beyond bounds of array
dive_data = dive_data[dive_data['stop_idx'] < len(depths)]
# Create pandas data frame with following columns, init'd with nans
dives = pandas.DataFrame(dive_data)
return dives, dive_mask | python | def finddives2(depths, min_dive_thresh=10):
'''Find dives in depth data below a minimum dive threshold
Args
----
depths: ndarray
Datalogger depth measurements
min_dive_thresh: float
Minimum depth threshold for which to classify a dive
Returns
-------
dives: ndarray
Dive summary information in a numpy record array
*Columns*:
* dive_id
* start_idx
* stop_idx
* dive_dur
* depth_max
* depth_max_i
* depth_min
* depth_min_i
* depth_mean
* comp_mean
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not.
'''
import numpy
import pandas
from . import utils
# Get start and stop indices for each dive above `min_dive_thresh`
condition = depths > min_dive_thresh
ind_start, ind_end = utils.contiguous_regions(condition)
n_dives = len(ind_start)
dive_mask = numpy.zeros(len(depths), dtype=bool)
dtypes = numpy.dtype([('dive_id', int),
('start_idx', int),
('stop_idx', int),
('dive_dur', int),
('depth_max', float),
('depth_max_idx', float),
('depth_min', float),
('depth_min_idx', float),
('depth_mean', float),
('comp_mean', float),])
dive_data = numpy.zeros(n_dives, dtype=dtypes)
for i in range(n_dives):
dive_mask[ind_start[i]:ind_end[i]] = True
dive_depths = depths[ind_start[i]:ind_end[i]]
dive_data['dive_id'][i] = i
dive_data['start_idx'][i] = ind_start[i]
dive_data['stop_idx'][i] = ind_end[i]
dive_data['dive_dur'][i] = ind_end[i] - ind_start[i]
dive_data['depth_max'][i] = dive_depths.max()
dive_data['depth_max_idx'][i] = numpy.argmax(dive_depths)
dive_data['depth_min'][i] = dive_depths.min()
dive_data['depth_min_idx'][i] = numpy.argmin(dive_depths)
dive_data['depth_mean'][i] = numpy.mean(dive_depths)
# TODO Supposedly time of deepest dive... doesn't appear to be that
dive_data['comp_mean'][i] = numpy.mean(1 + (1/(0.1*dive_depths)))
# Filter any dives with an endpoint with an index beyond bounds of array
dive_data = dive_data[dive_data['stop_idx'] < len(depths)]
# Create pandas data frame with following columns, init'd with nans
dives = pandas.DataFrame(dive_data)
return dives, dive_mask | [
"def",
"finddives2",
"(",
"depths",
",",
"min_dive_thresh",
"=",
"10",
")",
":",
"import",
"numpy",
"import",
"pandas",
"from",
".",
"import",
"utils",
"# Get start and stop indices for each dive above `min_dive_thresh`",
"condition",
"=",
"depths",
">",
"min_dive_thresh",
"ind_start",
",",
"ind_end",
"=",
"utils",
".",
"contiguous_regions",
"(",
"condition",
")",
"n_dives",
"=",
"len",
"(",
"ind_start",
")",
"dive_mask",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"depths",
")",
",",
"dtype",
"=",
"bool",
")",
"dtypes",
"=",
"numpy",
".",
"dtype",
"(",
"[",
"(",
"'dive_id'",
",",
"int",
")",
",",
"(",
"'start_idx'",
",",
"int",
")",
",",
"(",
"'stop_idx'",
",",
"int",
")",
",",
"(",
"'dive_dur'",
",",
"int",
")",
",",
"(",
"'depth_max'",
",",
"float",
")",
",",
"(",
"'depth_max_idx'",
",",
"float",
")",
",",
"(",
"'depth_min'",
",",
"float",
")",
",",
"(",
"'depth_min_idx'",
",",
"float",
")",
",",
"(",
"'depth_mean'",
",",
"float",
")",
",",
"(",
"'comp_mean'",
",",
"float",
")",
",",
"]",
")",
"dive_data",
"=",
"numpy",
".",
"zeros",
"(",
"n_dives",
",",
"dtype",
"=",
"dtypes",
")",
"for",
"i",
"in",
"range",
"(",
"n_dives",
")",
":",
"dive_mask",
"[",
"ind_start",
"[",
"i",
"]",
":",
"ind_end",
"[",
"i",
"]",
"]",
"=",
"True",
"dive_depths",
"=",
"depths",
"[",
"ind_start",
"[",
"i",
"]",
":",
"ind_end",
"[",
"i",
"]",
"]",
"dive_data",
"[",
"'dive_id'",
"]",
"[",
"i",
"]",
"=",
"i",
"dive_data",
"[",
"'start_idx'",
"]",
"[",
"i",
"]",
"=",
"ind_start",
"[",
"i",
"]",
"dive_data",
"[",
"'stop_idx'",
"]",
"[",
"i",
"]",
"=",
"ind_end",
"[",
"i",
"]",
"dive_data",
"[",
"'dive_dur'",
"]",
"[",
"i",
"]",
"=",
"ind_end",
"[",
"i",
"]",
"-",
"ind_start",
"[",
"i",
"]",
"dive_data",
"[",
"'depth_max'",
"]",
"[",
"i",
"]",
"=",
"dive_depths",
".",
"max",
"(",
")",
"dive_data",
"[",
"'depth_max_idx'",
"]",
"[",
"i",
"]",
"=",
"numpy",
".",
"argmax",
"(",
"dive_depths",
")",
"dive_data",
"[",
"'depth_min'",
"]",
"[",
"i",
"]",
"=",
"dive_depths",
".",
"min",
"(",
")",
"dive_data",
"[",
"'depth_min_idx'",
"]",
"[",
"i",
"]",
"=",
"numpy",
".",
"argmin",
"(",
"dive_depths",
")",
"dive_data",
"[",
"'depth_mean'",
"]",
"[",
"i",
"]",
"=",
"numpy",
".",
"mean",
"(",
"dive_depths",
")",
"# TODO Supposedly time of deepest dive... doesn't appear to be that",
"dive_data",
"[",
"'comp_mean'",
"]",
"[",
"i",
"]",
"=",
"numpy",
".",
"mean",
"(",
"1",
"+",
"(",
"1",
"/",
"(",
"0.1",
"*",
"dive_depths",
")",
")",
")",
"# Filter any dives with an endpoint with an index beyond bounds of array",
"dive_data",
"=",
"dive_data",
"[",
"dive_data",
"[",
"'stop_idx'",
"]",
"<",
"len",
"(",
"depths",
")",
"]",
"# Create pandas data frame with following columns, init'd with nans",
"dives",
"=",
"pandas",
".",
"DataFrame",
"(",
"dive_data",
")",
"return",
"dives",
",",
"dive_mask"
] | Find dives in depth data below a minimum dive threshold
Args
----
depths: ndarray
Datalogger depth measurements
min_dive_thresh: float
Minimum depth threshold for which to classify a dive
Returns
-------
dives: ndarray
Dive summary information in a numpy record array
*Columns*:
* dive_id
* start_idx
* stop_idx
* dive_dur
* depth_max
* depth_max_i
* depth_min
* depth_min_i
* depth_mean
* comp_mean
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not. | [
"Find",
"dives",
"in",
"depth",
"data",
"below",
"a",
"minimum",
"dive",
"threshold"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dives.py#L2-L81 |
251,536 | ryanjdillon/pyotelem | pyotelem/dives.py | get_des_asc2 | def get_des_asc2(depths, dive_mask, pitch, cutoff, fs, order=5):
'''Get boolean masks of descents and ascents in the depth data
Args
----
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not.
pitch: ndarray
Pitch angle in radians
cutoff: float
Cutoff frequency at which signal will be filtered
fs: float
Sampling frequency
order: int
Order of butter filter to apply
Returns
-------
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
'''
import numpy
from . import dsp
asc_mask = numpy.zeros(len(depths), dtype=bool)
des_mask = numpy.zeros(len(depths), dtype=bool)
b, a = dsp.butter_filter(cutoff, fs, order, 'low')
dfilt = dsp.butter_apply(b, a, depths)
dp = numpy.hstack([numpy.diff(dfilt), 0])
asc_mask[dive_mask] = dp[dive_mask] < 0
des_mask[dive_mask] = dp[dive_mask] > 0
# Remove descents/ascents withough a corresponding ascent/descent
des_mask, asc_mask = rm_incomplete_des_asc(des_mask, asc_mask)
return des_mask, asc_mask | python | def get_des_asc2(depths, dive_mask, pitch, cutoff, fs, order=5):
'''Get boolean masks of descents and ascents in the depth data
Args
----
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not.
pitch: ndarray
Pitch angle in radians
cutoff: float
Cutoff frequency at which signal will be filtered
fs: float
Sampling frequency
order: int
Order of butter filter to apply
Returns
-------
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
'''
import numpy
from . import dsp
asc_mask = numpy.zeros(len(depths), dtype=bool)
des_mask = numpy.zeros(len(depths), dtype=bool)
b, a = dsp.butter_filter(cutoff, fs, order, 'low')
dfilt = dsp.butter_apply(b, a, depths)
dp = numpy.hstack([numpy.diff(dfilt), 0])
asc_mask[dive_mask] = dp[dive_mask] < 0
des_mask[dive_mask] = dp[dive_mask] > 0
# Remove descents/ascents withough a corresponding ascent/descent
des_mask, asc_mask = rm_incomplete_des_asc(des_mask, asc_mask)
return des_mask, asc_mask | [
"def",
"get_des_asc2",
"(",
"depths",
",",
"dive_mask",
",",
"pitch",
",",
"cutoff",
",",
"fs",
",",
"order",
"=",
"5",
")",
":",
"import",
"numpy",
"from",
".",
"import",
"dsp",
"asc_mask",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"depths",
")",
",",
"dtype",
"=",
"bool",
")",
"des_mask",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"depths",
")",
",",
"dtype",
"=",
"bool",
")",
"b",
",",
"a",
"=",
"dsp",
".",
"butter_filter",
"(",
"cutoff",
",",
"fs",
",",
"order",
",",
"'low'",
")",
"dfilt",
"=",
"dsp",
".",
"butter_apply",
"(",
"b",
",",
"a",
",",
"depths",
")",
"dp",
"=",
"numpy",
".",
"hstack",
"(",
"[",
"numpy",
".",
"diff",
"(",
"dfilt",
")",
",",
"0",
"]",
")",
"asc_mask",
"[",
"dive_mask",
"]",
"=",
"dp",
"[",
"dive_mask",
"]",
"<",
"0",
"des_mask",
"[",
"dive_mask",
"]",
"=",
"dp",
"[",
"dive_mask",
"]",
">",
"0",
"# Remove descents/ascents withough a corresponding ascent/descent",
"des_mask",
",",
"asc_mask",
"=",
"rm_incomplete_des_asc",
"(",
"des_mask",
",",
"asc_mask",
")",
"return",
"des_mask",
",",
"asc_mask"
] | Get boolean masks of descents and ascents in the depth data
Args
----
dive_mask: ndarray
Boolean mask array over depth data. Cells with `True` are dives and
cells with `False` are not.
pitch: ndarray
Pitch angle in radians
cutoff: float
Cutoff frequency at which signal will be filtered
fs: float
Sampling frequency
order: int
Order of butter filter to apply
Returns
-------
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data | [
"Get",
"boolean",
"masks",
"of",
"descents",
"and",
"ascents",
"in",
"the",
"depth",
"data"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dives.py#L84-L126 |
251,537 | ryanjdillon/pyotelem | pyotelem/dives.py | rm_incomplete_des_asc | def rm_incomplete_des_asc(des_mask, asc_mask):
'''Remove descents-ascents that have no corresponding ascent-descent
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
des_mask: ndarray
Boolean mask of descents with erroneous regions removed
asc_mask: ndarray
Boolean mask of ascents with erroneous regions removed
'''
from . import utils
# Get start/stop indices for descents and ascents
des_start, des_stop = utils.contiguous_regions(des_mask)
asc_start, asc_stop = utils.contiguous_regions(asc_mask)
des_mask = utils.rm_regions(des_mask, asc_mask, des_start, des_stop)
asc_mask = utils.rm_regions(asc_mask, des_mask, asc_start, asc_stop)
return des_mask, asc_mask | python | def rm_incomplete_des_asc(des_mask, asc_mask):
'''Remove descents-ascents that have no corresponding ascent-descent
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
des_mask: ndarray
Boolean mask of descents with erroneous regions removed
asc_mask: ndarray
Boolean mask of ascents with erroneous regions removed
'''
from . import utils
# Get start/stop indices for descents and ascents
des_start, des_stop = utils.contiguous_regions(des_mask)
asc_start, asc_stop = utils.contiguous_regions(asc_mask)
des_mask = utils.rm_regions(des_mask, asc_mask, des_start, des_stop)
asc_mask = utils.rm_regions(asc_mask, des_mask, asc_start, asc_stop)
return des_mask, asc_mask | [
"def",
"rm_incomplete_des_asc",
"(",
"des_mask",
",",
"asc_mask",
")",
":",
"from",
".",
"import",
"utils",
"# Get start/stop indices for descents and ascents",
"des_start",
",",
"des_stop",
"=",
"utils",
".",
"contiguous_regions",
"(",
"des_mask",
")",
"asc_start",
",",
"asc_stop",
"=",
"utils",
".",
"contiguous_regions",
"(",
"asc_mask",
")",
"des_mask",
"=",
"utils",
".",
"rm_regions",
"(",
"des_mask",
",",
"asc_mask",
",",
"des_start",
",",
"des_stop",
")",
"asc_mask",
"=",
"utils",
".",
"rm_regions",
"(",
"asc_mask",
",",
"des_mask",
",",
"asc_start",
",",
"asc_stop",
")",
"return",
"des_mask",
",",
"asc_mask"
] | Remove descents-ascents that have no corresponding ascent-descent
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
des_mask: ndarray
Boolean mask of descents with erroneous regions removed
asc_mask: ndarray
Boolean mask of ascents with erroneous regions removed | [
"Remove",
"descents",
"-",
"ascents",
"that",
"have",
"no",
"corresponding",
"ascent",
"-",
"descent"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dives.py#L129-L155 |
251,538 | ryanjdillon/pyotelem | pyotelem/dives.py | get_bottom | def get_bottom(depths, des_mask, asc_mask):
'''Get boolean mask of regions in depths the animal is at the bottom
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
BOTTOM: ndarray (n,4)
Indices and depths for when the animal is at the bottom
*Index positions*:
0. start ind
1. depth at start
2. stop ind
3. depth at stop
'''
import numpy
from . import utils
# Get start/stop indices for descents and ascents
des_start, des_stop = utils.contiguous_regions(des_mask)
asc_start, asc_stop = utils.contiguous_regions(asc_mask)
# Bottom time is at stop of descent until start of ascent
bottom_len = min(len(des_stop), len(asc_start))
bottom_start = des_stop[:bottom_len]
bottom_stop = asc_start[:bottom_len]
BOTTOM = numpy.zeros((len(bottom_start),4), dtype=float)
# Time (seconds) at start of bottom phase/end of descent
BOTTOM[:,0] = bottom_start
# Depth (m) at start of bottom phase/end of descent
BOTTOM[:,1] = depths[bottom_start]
# Time (seconds) at end of bottom phase/start of asscent
BOTTOM[:,2] = bottom_stop
# Depth (m) at end of bottom phase/start of descent
BOTTOM[:,3] = depths[bottom_stop]
return BOTTOM | python | def get_bottom(depths, des_mask, asc_mask):
'''Get boolean mask of regions in depths the animal is at the bottom
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
BOTTOM: ndarray (n,4)
Indices and depths for when the animal is at the bottom
*Index positions*:
0. start ind
1. depth at start
2. stop ind
3. depth at stop
'''
import numpy
from . import utils
# Get start/stop indices for descents and ascents
des_start, des_stop = utils.contiguous_regions(des_mask)
asc_start, asc_stop = utils.contiguous_regions(asc_mask)
# Bottom time is at stop of descent until start of ascent
bottom_len = min(len(des_stop), len(asc_start))
bottom_start = des_stop[:bottom_len]
bottom_stop = asc_start[:bottom_len]
BOTTOM = numpy.zeros((len(bottom_start),4), dtype=float)
# Time (seconds) at start of bottom phase/end of descent
BOTTOM[:,0] = bottom_start
# Depth (m) at start of bottom phase/end of descent
BOTTOM[:,1] = depths[bottom_start]
# Time (seconds) at end of bottom phase/start of asscent
BOTTOM[:,2] = bottom_stop
# Depth (m) at end of bottom phase/start of descent
BOTTOM[:,3] = depths[bottom_stop]
return BOTTOM | [
"def",
"get_bottom",
"(",
"depths",
",",
"des_mask",
",",
"asc_mask",
")",
":",
"import",
"numpy",
"from",
".",
"import",
"utils",
"# Get start/stop indices for descents and ascents",
"des_start",
",",
"des_stop",
"=",
"utils",
".",
"contiguous_regions",
"(",
"des_mask",
")",
"asc_start",
",",
"asc_stop",
"=",
"utils",
".",
"contiguous_regions",
"(",
"asc_mask",
")",
"# Bottom time is at stop of descent until start of ascent",
"bottom_len",
"=",
"min",
"(",
"len",
"(",
"des_stop",
")",
",",
"len",
"(",
"asc_start",
")",
")",
"bottom_start",
"=",
"des_stop",
"[",
":",
"bottom_len",
"]",
"bottom_stop",
"=",
"asc_start",
"[",
":",
"bottom_len",
"]",
"BOTTOM",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"len",
"(",
"bottom_start",
")",
",",
"4",
")",
",",
"dtype",
"=",
"float",
")",
"# Time (seconds) at start of bottom phase/end of descent",
"BOTTOM",
"[",
":",
",",
"0",
"]",
"=",
"bottom_start",
"# Depth (m) at start of bottom phase/end of descent",
"BOTTOM",
"[",
":",
",",
"1",
"]",
"=",
"depths",
"[",
"bottom_start",
"]",
"# Time (seconds) at end of bottom phase/start of asscent",
"BOTTOM",
"[",
":",
",",
"2",
"]",
"=",
"bottom_stop",
"# Depth (m) at end of bottom phase/start of descent",
"BOTTOM",
"[",
":",
",",
"3",
"]",
"=",
"depths",
"[",
"bottom_stop",
"]",
"return",
"BOTTOM"
] | Get boolean mask of regions in depths the animal is at the bottom
Args
----
des_mask: ndarray
Boolean mask of descents in the depth data
asc_mask: ndarray
Boolean mask of ascents in the depth data
Returns
-------
BOTTOM: ndarray (n,4)
Indices and depths for when the animal is at the bottom
*Index positions*:
0. start ind
1. depth at start
2. stop ind
3. depth at stop | [
"Get",
"boolean",
"mask",
"of",
"regions",
"in",
"depths",
"the",
"animal",
"is",
"at",
"the",
"bottom"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dives.py#L158-L207 |
251,539 | ryanjdillon/pyotelem | pyotelem/dives.py | get_phase | def get_phase(n_samples, des_mask, asc_mask):
'''Get the directional phase sign for each sample in depths
Args
----
n_samples: int
Length of output phase array
des_mask: numpy.ndarray, shape (n,)
Boolean mask of values where animal is descending
asc_mask: numpy.ndarray, shape(n,)
Boolean mask of values where animal is ascending
Returns
-------
phase: numpy.ndarray, shape (n,)
Signed integer array values representing animal's dive phase
*Phases*:
* 0: neither ascending/descending
* 1: ascending
* -1: descending.
'''
import numpy
phase = numpy.zeros(n_samples, dtype=int)
phase[asc_mask] = 1
phase[des_mask] = -1
return phase | python | def get_phase(n_samples, des_mask, asc_mask):
'''Get the directional phase sign for each sample in depths
Args
----
n_samples: int
Length of output phase array
des_mask: numpy.ndarray, shape (n,)
Boolean mask of values where animal is descending
asc_mask: numpy.ndarray, shape(n,)
Boolean mask of values where animal is ascending
Returns
-------
phase: numpy.ndarray, shape (n,)
Signed integer array values representing animal's dive phase
*Phases*:
* 0: neither ascending/descending
* 1: ascending
* -1: descending.
'''
import numpy
phase = numpy.zeros(n_samples, dtype=int)
phase[asc_mask] = 1
phase[des_mask] = -1
return phase | [
"def",
"get_phase",
"(",
"n_samples",
",",
"des_mask",
",",
"asc_mask",
")",
":",
"import",
"numpy",
"phase",
"=",
"numpy",
".",
"zeros",
"(",
"n_samples",
",",
"dtype",
"=",
"int",
")",
"phase",
"[",
"asc_mask",
"]",
"=",
"1",
"phase",
"[",
"des_mask",
"]",
"=",
"-",
"1",
"return",
"phase"
] | Get the directional phase sign for each sample in depths
Args
----
n_samples: int
Length of output phase array
des_mask: numpy.ndarray, shape (n,)
Boolean mask of values where animal is descending
asc_mask: numpy.ndarray, shape(n,)
Boolean mask of values where animal is ascending
Returns
-------
phase: numpy.ndarray, shape (n,)
Signed integer array values representing animal's dive phase
*Phases*:
* 0: neither ascending/descending
* 1: ascending
* -1: descending. | [
"Get",
"the",
"directional",
"phase",
"sign",
"for",
"each",
"sample",
"in",
"depths"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/dives.py#L210-L239 |
251,540 | alexhayes/django-toolkit | django_toolkit/file.py | tempfilename | def tempfilename(**kwargs):
"""
Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed.
"""
kwargs.update(delete=False)
try:
f = NamedTemporaryFile(**kwargs)
f.close()
yield f.name
except Exception:
if os.path.exists(f.name):
# Ensure we clean up after ourself
os.unlink(f.name)
raise | python | def tempfilename(**kwargs):
"""
Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed.
"""
kwargs.update(delete=False)
try:
f = NamedTemporaryFile(**kwargs)
f.close()
yield f.name
except Exception:
if os.path.exists(f.name):
# Ensure we clean up after ourself
os.unlink(f.name)
raise | [
"def",
"tempfilename",
"(",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"delete",
"=",
"False",
")",
"try",
":",
"f",
"=",
"NamedTemporaryFile",
"(",
"*",
"*",
"kwargs",
")",
"f",
".",
"close",
"(",
")",
"yield",
"f",
".",
"name",
"except",
"Exception",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
".",
"name",
")",
":",
"# Ensure we clean up after ourself",
"os",
".",
"unlink",
"(",
"f",
".",
"name",
")",
"raise"
] | Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed. | [
"Reserve",
"a",
"temporary",
"file",
"for",
"future",
"use",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/file.py#L37-L53 |
251,541 | alexhayes/django-toolkit | django_toolkit/file.py | makedirs | def makedirs(p):
"""
A makedirs that avoids a race conditions for multiple processes attempting to create the same directory.
"""
try:
os.makedirs(p, settings.FILE_UPLOAD_PERMISSIONS)
except OSError:
# Perhaps someone beat us to the punch?
if not os.path.isdir(p):
# Nope, must be something else...
raise | python | def makedirs(p):
"""
A makedirs that avoids a race conditions for multiple processes attempting to create the same directory.
"""
try:
os.makedirs(p, settings.FILE_UPLOAD_PERMISSIONS)
except OSError:
# Perhaps someone beat us to the punch?
if not os.path.isdir(p):
# Nope, must be something else...
raise | [
"def",
"makedirs",
"(",
"p",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"p",
",",
"settings",
".",
"FILE_UPLOAD_PERMISSIONS",
")",
"except",
"OSError",
":",
"# Perhaps someone beat us to the punch?",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"p",
")",
":",
"# Nope, must be something else...",
"raise"
] | A makedirs that avoids a race conditions for multiple processes attempting to create the same directory. | [
"A",
"makedirs",
"that",
"avoids",
"a",
"race",
"conditions",
"for",
"multiple",
"processes",
"attempting",
"to",
"create",
"the",
"same",
"directory",
"."
] | b64106392fad596defc915b8235fe6e1d0013b5b | https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/file.py#L80-L90 |
251,542 | rorr73/LifeSOSpy | lifesospy/device.py | SpecialDevice.control_high_limit | def control_high_limit(self) -> Optional[Union[int, float]]:
"""
Control high limit setting for a special sensor.
For LS-10/LS-20 base units only.
"""
return self._get_field_value(SpecialDevice.PROP_CONTROL_HIGH_LIMIT) | python | def control_high_limit(self) -> Optional[Union[int, float]]:
"""
Control high limit setting for a special sensor.
For LS-10/LS-20 base units only.
"""
return self._get_field_value(SpecialDevice.PROP_CONTROL_HIGH_LIMIT) | [
"def",
"control_high_limit",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_CONTROL_HIGH_LIMIT",
")"
] | Control high limit setting for a special sensor.
For LS-10/LS-20 base units only. | [
"Control",
"high",
"limit",
"setting",
"for",
"a",
"special",
"sensor",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L344-L350 |
251,543 | rorr73/LifeSOSpy | lifesospy/device.py | SpecialDevice.control_low_limit | def control_low_limit(self) -> Optional[Union[int, float]]:
"""
Control low limit setting for a special sensor.
For LS-10/LS-20 base units only.
"""
return self._get_field_value(SpecialDevice.PROP_CONTROL_LOW_LIMIT) | python | def control_low_limit(self) -> Optional[Union[int, float]]:
"""
Control low limit setting for a special sensor.
For LS-10/LS-20 base units only.
"""
return self._get_field_value(SpecialDevice.PROP_CONTROL_LOW_LIMIT) | [
"def",
"control_low_limit",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_CONTROL_LOW_LIMIT",
")"
] | Control low limit setting for a special sensor.
For LS-10/LS-20 base units only. | [
"Control",
"low",
"limit",
"setting",
"for",
"a",
"special",
"sensor",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L365-L371 |
251,544 | rorr73/LifeSOSpy | lifesospy/device.py | SpecialDevice.current_reading | def current_reading(self) -> Optional[Union[int, float]]:
"""Current reading for a special sensor."""
return self._get_field_value(SpecialDevice.PROP_CURRENT_READING) | python | def current_reading(self) -> Optional[Union[int, float]]:
"""Current reading for a special sensor."""
return self._get_field_value(SpecialDevice.PROP_CURRENT_READING) | [
"def",
"current_reading",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_CURRENT_READING",
")"
] | Current reading for a special sensor. | [
"Current",
"reading",
"for",
"a",
"special",
"sensor",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L374-L376 |
251,545 | rorr73/LifeSOSpy | lifesospy/device.py | SpecialDevice.high_limit | def high_limit(self) -> Optional[Union[int, float]]:
"""
High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_HIGH_LIMIT) | python | def high_limit(self) -> Optional[Union[int, float]]:
"""
High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_HIGH_LIMIT) | [
"def",
"high_limit",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_HIGH_LIMIT",
")"
] | High limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm high limit.
For LS-30 base units, this is either alarm OR control high limit,
as indicated by special_status ControlAlarm bit flag. | [
"High",
"limit",
"setting",
"for",
"a",
"special",
"sensor",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L379-L387 |
251,546 | rorr73/LifeSOSpy | lifesospy/device.py | SpecialDevice.low_limit | def low_limit(self) -> Optional[Union[int, float]]:
"""
Low limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm low limit.
For LS-30 base units, this is either alarm OR control low limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_LOW_LIMIT) | python | def low_limit(self) -> Optional[Union[int, float]]:
"""
Low limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm low limit.
For LS-30 base units, this is either alarm OR control low limit,
as indicated by special_status ControlAlarm bit flag.
"""
return self._get_field_value(SpecialDevice.PROP_LOW_LIMIT) | [
"def",
"low_limit",
"(",
"self",
")",
"->",
"Optional",
"[",
"Union",
"[",
"int",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"_get_field_value",
"(",
"SpecialDevice",
".",
"PROP_LOW_LIMIT",
")"
] | Low limit setting for a special sensor.
For LS-10/LS-20 base units this is the alarm low limit.
For LS-30 base units, this is either alarm OR control low limit,
as indicated by special_status ControlAlarm bit flag. | [
"Low",
"limit",
"setting",
"for",
"a",
"special",
"sensor",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L390-L398 |
251,547 | rorr73/LifeSOSpy | lifesospy/device.py | DeviceCollection.get | def get(self, device_id: int) -> Optional[Device]:
"""Get device using the specified ID, or None if not found."""
return self._devices.get(device_id) | python | def get(self, device_id: int) -> Optional[Device]:
"""Get device using the specified ID, or None if not found."""
return self._devices.get(device_id) | [
"def",
"get",
"(",
"self",
",",
"device_id",
":",
"int",
")",
"->",
"Optional",
"[",
"Device",
"]",
":",
"return",
"self",
".",
"_devices",
".",
"get",
"(",
"device_id",
")"
] | Get device using the specified ID, or None if not found. | [
"Get",
"device",
"using",
"the",
"specified",
"ID",
"or",
"None",
"if",
"not",
"found",
"."
] | 62360fbab2e90bf04d52b547093bdab2d4e389b4 | https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/device.py#L511-L513 |
251,548 | coopie/ttv | ttv.py | make_ttv_yaml | def make_ttv_yaml(corpora, path_to_ttv_file, ttv_ratio=DEFAULT_TTV_RATIO, deterministic=False):
""" Create a test, train, validation from the corpora given and saves it as a YAML filename.
Each set will be subject independent, meaning that no one subject can have data in more than one
set
# Arguments;
corpora: a list of the paths to corpora used (these have to be formatted accoring to notes.md)
path_to_ttv_file: the path to where the YAML file be be saved
ttv_ratio: a tuple (e.g. (1,4,4) of the relative sizoe of each set)
deterministic: whether or not to shuffle the resources around when making the set.
"""
dataset = get_dataset(corpora)
data_sets = make_ttv(dataset, ttv_ratio=ttv_ratio, deterministic=deterministic)
def get_for_ttv(key):
return (
data_sets['test'][key],
data_sets['train'][key],
data_sets['validation'][key]
)
test, train, validation = get_for_ttv('paths')
number_of_files_for_each_set = list(get_for_ttv('number_of_files'))
number_of_subjects_for_each_set = [len(x) for x in get_for_ttv('subjects')]
dict_for_yaml = {
'split': number_of_files_for_each_set,
'subject_split': number_of_subjects_for_each_set,
"test": test,
"train": train,
"validation": validation
}
with open(path_to_ttv_file, 'w') as f:
yaml.dump(dict_for_yaml, f, default_flow_style=False) | python | def make_ttv_yaml(corpora, path_to_ttv_file, ttv_ratio=DEFAULT_TTV_RATIO, deterministic=False):
""" Create a test, train, validation from the corpora given and saves it as a YAML filename.
Each set will be subject independent, meaning that no one subject can have data in more than one
set
# Arguments;
corpora: a list of the paths to corpora used (these have to be formatted accoring to notes.md)
path_to_ttv_file: the path to where the YAML file be be saved
ttv_ratio: a tuple (e.g. (1,4,4) of the relative sizoe of each set)
deterministic: whether or not to shuffle the resources around when making the set.
"""
dataset = get_dataset(corpora)
data_sets = make_ttv(dataset, ttv_ratio=ttv_ratio, deterministic=deterministic)
def get_for_ttv(key):
return (
data_sets['test'][key],
data_sets['train'][key],
data_sets['validation'][key]
)
test, train, validation = get_for_ttv('paths')
number_of_files_for_each_set = list(get_for_ttv('number_of_files'))
number_of_subjects_for_each_set = [len(x) for x in get_for_ttv('subjects')]
dict_for_yaml = {
'split': number_of_files_for_each_set,
'subject_split': number_of_subjects_for_each_set,
"test": test,
"train": train,
"validation": validation
}
with open(path_to_ttv_file, 'w') as f:
yaml.dump(dict_for_yaml, f, default_flow_style=False) | [
"def",
"make_ttv_yaml",
"(",
"corpora",
",",
"path_to_ttv_file",
",",
"ttv_ratio",
"=",
"DEFAULT_TTV_RATIO",
",",
"deterministic",
"=",
"False",
")",
":",
"dataset",
"=",
"get_dataset",
"(",
"corpora",
")",
"data_sets",
"=",
"make_ttv",
"(",
"dataset",
",",
"ttv_ratio",
"=",
"ttv_ratio",
",",
"deterministic",
"=",
"deterministic",
")",
"def",
"get_for_ttv",
"(",
"key",
")",
":",
"return",
"(",
"data_sets",
"[",
"'test'",
"]",
"[",
"key",
"]",
",",
"data_sets",
"[",
"'train'",
"]",
"[",
"key",
"]",
",",
"data_sets",
"[",
"'validation'",
"]",
"[",
"key",
"]",
")",
"test",
",",
"train",
",",
"validation",
"=",
"get_for_ttv",
"(",
"'paths'",
")",
"number_of_files_for_each_set",
"=",
"list",
"(",
"get_for_ttv",
"(",
"'number_of_files'",
")",
")",
"number_of_subjects_for_each_set",
"=",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"get_for_ttv",
"(",
"'subjects'",
")",
"]",
"dict_for_yaml",
"=",
"{",
"'split'",
":",
"number_of_files_for_each_set",
",",
"'subject_split'",
":",
"number_of_subjects_for_each_set",
",",
"\"test\"",
":",
"test",
",",
"\"train\"",
":",
"train",
",",
"\"validation\"",
":",
"validation",
"}",
"with",
"open",
"(",
"path_to_ttv_file",
",",
"'w'",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"dict_for_yaml",
",",
"f",
",",
"default_flow_style",
"=",
"False",
")"
] | Create a test, train, validation from the corpora given and saves it as a YAML filename.
Each set will be subject independent, meaning that no one subject can have data in more than one
set
# Arguments;
corpora: a list of the paths to corpora used (these have to be formatted accoring to notes.md)
path_to_ttv_file: the path to where the YAML file be be saved
ttv_ratio: a tuple (e.g. (1,4,4) of the relative sizoe of each set)
deterministic: whether or not to shuffle the resources around when making the set. | [
"Create",
"a",
"test",
"train",
"validation",
"from",
"the",
"corpora",
"given",
"and",
"saves",
"it",
"as",
"a",
"YAML",
"filename",
"."
] | 43e2bcddf58945f27665d4db1362473842eb26f3 | https://github.com/coopie/ttv/blob/43e2bcddf58945f27665d4db1362473842eb26f3/ttv.py#L28-L65 |
251,549 | naphatkrit/easyci | easyci/user_config.py | load_user_config | def load_user_config(vcs):
"""Load the user config
Args:
vcs (easyci.vcs.base.Vcs) - the vcs object for the current project
Returns:
dict - the config
Raises:
ConfigFormatError
ConfigNotFoundError
"""
config_path = os.path.join(vcs.path, 'eci.yaml')
if not os.path.exists(config_path):
raise ConfigNotFoundError
with open(config_path, 'r') as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError:
raise ConfigFormatError
if not isinstance(config, dict):
raise ConfigFormatError
for k, v in _default_config.iteritems():
config.setdefault(k, v)
for k, v in _config_types.iteritems():
if not isinstance(config[k], v):
raise ConfigFormatError
return config | python | def load_user_config(vcs):
"""Load the user config
Args:
vcs (easyci.vcs.base.Vcs) - the vcs object for the current project
Returns:
dict - the config
Raises:
ConfigFormatError
ConfigNotFoundError
"""
config_path = os.path.join(vcs.path, 'eci.yaml')
if not os.path.exists(config_path):
raise ConfigNotFoundError
with open(config_path, 'r') as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError:
raise ConfigFormatError
if not isinstance(config, dict):
raise ConfigFormatError
for k, v in _default_config.iteritems():
config.setdefault(k, v)
for k, v in _config_types.iteritems():
if not isinstance(config[k], v):
raise ConfigFormatError
return config | [
"def",
"load_user_config",
"(",
"vcs",
")",
":",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"vcs",
".",
"path",
",",
"'eci.yaml'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"config_path",
")",
":",
"raise",
"ConfigNotFoundError",
"with",
"open",
"(",
"config_path",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"except",
"yaml",
".",
"YAMLError",
":",
"raise",
"ConfigFormatError",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"raise",
"ConfigFormatError",
"for",
"k",
",",
"v",
"in",
"_default_config",
".",
"iteritems",
"(",
")",
":",
"config",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"_config_types",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"config",
"[",
"k",
"]",
",",
"v",
")",
":",
"raise",
"ConfigFormatError",
"return",
"config"
] | Load the user config
Args:
vcs (easyci.vcs.base.Vcs) - the vcs object for the current project
Returns:
dict - the config
Raises:
ConfigFormatError
ConfigNotFoundError | [
"Load",
"the",
"user",
"config"
] | 7aee8d7694fe4e2da42ce35b0f700bc840c8b95f | https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/user_config.py#L27-L55 |
251,550 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | expose_request | def expose_request(func):
"""
A decorator that adds an expose_request flag to the underlying callable.
@raise TypeError: C{func} must be callable.
"""
if not python.callable(func):
raise TypeError("func must be callable")
if isinstance(func, types.UnboundMethodType):
setattr(func.im_func, '_pyamf_expose_request', True)
else:
setattr(func, '_pyamf_expose_request', True)
return func | python | def expose_request(func):
"""
A decorator that adds an expose_request flag to the underlying callable.
@raise TypeError: C{func} must be callable.
"""
if not python.callable(func):
raise TypeError("func must be callable")
if isinstance(func, types.UnboundMethodType):
setattr(func.im_func, '_pyamf_expose_request', True)
else:
setattr(func, '_pyamf_expose_request', True)
return func | [
"def",
"expose_request",
"(",
"func",
")",
":",
"if",
"not",
"python",
".",
"callable",
"(",
"func",
")",
":",
"raise",
"TypeError",
"(",
"\"func must be callable\"",
")",
"if",
"isinstance",
"(",
"func",
",",
"types",
".",
"UnboundMethodType",
")",
":",
"setattr",
"(",
"func",
".",
"im_func",
",",
"'_pyamf_expose_request'",
",",
"True",
")",
"else",
":",
"setattr",
"(",
"func",
",",
"'_pyamf_expose_request'",
",",
"True",
")",
"return",
"func"
] | A decorator that adds an expose_request flag to the underlying callable.
@raise TypeError: C{func} must be callable. | [
"A",
"decorator",
"that",
"adds",
"an",
"expose_request",
"flag",
"to",
"the",
"underlying",
"callable",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L545-L559 |
251,551 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.addService | def addService(self, service, name=None, description=None,
authenticator=None, expose_request=None, preprocessor=None):
"""
Adds a service to the gateway.
@param service: The service to add to the gateway.
@type service: C{callable}, class instance, or a module
@param name: The name of the service.
@type name: C{str}
@raise pyamf.remoting.RemotingError: Service already exists.
@raise TypeError: C{service} cannot be a scalar value.
@raise TypeError: C{service} must be C{callable} or a module.
"""
if isinstance(service, (int, long, float, basestring)):
raise TypeError("Service cannot be a scalar value")
allowed_types = (types.ModuleType, types.FunctionType, types.DictType,
types.MethodType, types.InstanceType, types.ObjectType)
if not python.callable(service) and not isinstance(service, allowed_types):
raise TypeError("Service must be a callable, module, or an object")
if name is None:
# TODO: include the module in the name
if isinstance(service, (type, types.ClassType)):
name = service.__name__
elif isinstance(service, types.FunctionType):
name = service.func_name
elif isinstance(service, types.ModuleType):
name = service.__name__
else:
name = str(service)
if name in self.services:
raise remoting.RemotingError("Service %s already exists" % name)
self.services[name] = ServiceWrapper(service, description,
authenticator, expose_request, preprocessor) | python | def addService(self, service, name=None, description=None,
authenticator=None, expose_request=None, preprocessor=None):
"""
Adds a service to the gateway.
@param service: The service to add to the gateway.
@type service: C{callable}, class instance, or a module
@param name: The name of the service.
@type name: C{str}
@raise pyamf.remoting.RemotingError: Service already exists.
@raise TypeError: C{service} cannot be a scalar value.
@raise TypeError: C{service} must be C{callable} or a module.
"""
if isinstance(service, (int, long, float, basestring)):
raise TypeError("Service cannot be a scalar value")
allowed_types = (types.ModuleType, types.FunctionType, types.DictType,
types.MethodType, types.InstanceType, types.ObjectType)
if not python.callable(service) and not isinstance(service, allowed_types):
raise TypeError("Service must be a callable, module, or an object")
if name is None:
# TODO: include the module in the name
if isinstance(service, (type, types.ClassType)):
name = service.__name__
elif isinstance(service, types.FunctionType):
name = service.func_name
elif isinstance(service, types.ModuleType):
name = service.__name__
else:
name = str(service)
if name in self.services:
raise remoting.RemotingError("Service %s already exists" % name)
self.services[name] = ServiceWrapper(service, description,
authenticator, expose_request, preprocessor) | [
"def",
"addService",
"(",
"self",
",",
"service",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"authenticator",
"=",
"None",
",",
"expose_request",
"=",
"None",
",",
"preprocessor",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"service",
",",
"(",
"int",
",",
"long",
",",
"float",
",",
"basestring",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Service cannot be a scalar value\"",
")",
"allowed_types",
"=",
"(",
"types",
".",
"ModuleType",
",",
"types",
".",
"FunctionType",
",",
"types",
".",
"DictType",
",",
"types",
".",
"MethodType",
",",
"types",
".",
"InstanceType",
",",
"types",
".",
"ObjectType",
")",
"if",
"not",
"python",
".",
"callable",
"(",
"service",
")",
"and",
"not",
"isinstance",
"(",
"service",
",",
"allowed_types",
")",
":",
"raise",
"TypeError",
"(",
"\"Service must be a callable, module, or an object\"",
")",
"if",
"name",
"is",
"None",
":",
"# TODO: include the module in the name",
"if",
"isinstance",
"(",
"service",
",",
"(",
"type",
",",
"types",
".",
"ClassType",
")",
")",
":",
"name",
"=",
"service",
".",
"__name__",
"elif",
"isinstance",
"(",
"service",
",",
"types",
".",
"FunctionType",
")",
":",
"name",
"=",
"service",
".",
"func_name",
"elif",
"isinstance",
"(",
"service",
",",
"types",
".",
"ModuleType",
")",
":",
"name",
"=",
"service",
".",
"__name__",
"else",
":",
"name",
"=",
"str",
"(",
"service",
")",
"if",
"name",
"in",
"self",
".",
"services",
":",
"raise",
"remoting",
".",
"RemotingError",
"(",
"\"Service %s already exists\"",
"%",
"name",
")",
"self",
".",
"services",
"[",
"name",
"]",
"=",
"ServiceWrapper",
"(",
"service",
",",
"description",
",",
"authenticator",
",",
"expose_request",
",",
"preprocessor",
")"
] | Adds a service to the gateway.
@param service: The service to add to the gateway.
@type service: C{callable}, class instance, or a module
@param name: The name of the service.
@type name: C{str}
@raise pyamf.remoting.RemotingError: Service already exists.
@raise TypeError: C{service} cannot be a scalar value.
@raise TypeError: C{service} must be C{callable} or a module. | [
"Adds",
"a",
"service",
"to",
"the",
"gateway",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L298-L335 |
251,552 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.removeService | def removeService(self, service):
"""
Removes a service from the gateway.
@param service: Either the name or t of the service to remove from the
gateway, or .
@type service: C{callable} or a class instance
@raise NameError: Service not found.
"""
for name, wrapper in self.services.iteritems():
if service in (name, wrapper.service):
del self.services[name]
return
raise NameError("Service %r not found" % (service,)) | python | def removeService(self, service):
"""
Removes a service from the gateway.
@param service: Either the name or t of the service to remove from the
gateway, or .
@type service: C{callable} or a class instance
@raise NameError: Service not found.
"""
for name, wrapper in self.services.iteritems():
if service in (name, wrapper.service):
del self.services[name]
return
raise NameError("Service %r not found" % (service,)) | [
"def",
"removeService",
"(",
"self",
",",
"service",
")",
":",
"for",
"name",
",",
"wrapper",
"in",
"self",
".",
"services",
".",
"iteritems",
"(",
")",
":",
"if",
"service",
"in",
"(",
"name",
",",
"wrapper",
".",
"service",
")",
":",
"del",
"self",
".",
"services",
"[",
"name",
"]",
"return",
"raise",
"NameError",
"(",
"\"Service %r not found\"",
"%",
"(",
"service",
",",
")",
")"
] | Removes a service from the gateway.
@param service: Either the name or t of the service to remove from the
gateway, or .
@type service: C{callable} or a class instance
@raise NameError: Service not found. | [
"Removes",
"a",
"service",
"from",
"the",
"gateway",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L346-L360 |
251,553 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.getServiceRequest | def getServiceRequest(self, request, target):
"""
Returns a service based on the message.
@raise UnknownServiceError: Unknown service.
@param request: The AMF request.
@type request: L{Request<pyamf.remoting.Request>}
@rtype: L{ServiceRequest}
"""
try:
return self._request_class(
request.envelope, self.services[target], None)
except KeyError:
pass
try:
sp = target.split('.')
name, meth = '.'.join(sp[:-1]), sp[-1]
return self._request_class(
request.envelope, self.services[name], meth)
except (ValueError, KeyError):
pass
raise UnknownServiceError("Unknown service %s" % target) | python | def getServiceRequest(self, request, target):
"""
Returns a service based on the message.
@raise UnknownServiceError: Unknown service.
@param request: The AMF request.
@type request: L{Request<pyamf.remoting.Request>}
@rtype: L{ServiceRequest}
"""
try:
return self._request_class(
request.envelope, self.services[target], None)
except KeyError:
pass
try:
sp = target.split('.')
name, meth = '.'.join(sp[:-1]), sp[-1]
return self._request_class(
request.envelope, self.services[name], meth)
except (ValueError, KeyError):
pass
raise UnknownServiceError("Unknown service %s" % target) | [
"def",
"getServiceRequest",
"(",
"self",
",",
"request",
",",
"target",
")",
":",
"try",
":",
"return",
"self",
".",
"_request_class",
"(",
"request",
".",
"envelope",
",",
"self",
".",
"services",
"[",
"target",
"]",
",",
"None",
")",
"except",
"KeyError",
":",
"pass",
"try",
":",
"sp",
"=",
"target",
".",
"split",
"(",
"'.'",
")",
"name",
",",
"meth",
"=",
"'.'",
".",
"join",
"(",
"sp",
"[",
":",
"-",
"1",
"]",
")",
",",
"sp",
"[",
"-",
"1",
"]",
"return",
"self",
".",
"_request_class",
"(",
"request",
".",
"envelope",
",",
"self",
".",
"services",
"[",
"name",
"]",
",",
"meth",
")",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"pass",
"raise",
"UnknownServiceError",
"(",
"\"Unknown service %s\"",
"%",
"target",
")"
] | Returns a service based on the message.
@raise UnknownServiceError: Unknown service.
@param request: The AMF request.
@type request: L{Request<pyamf.remoting.Request>}
@rtype: L{ServiceRequest} | [
"Returns",
"a",
"service",
"based",
"on",
"the",
"message",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L362-L386 |
251,554 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.getProcessor | def getProcessor(self, request):
"""
Returns request processor.
@param request: The AMF message.
@type request: L{Request<remoting.Request>}
"""
if request.target == 'null' or not request.target:
from pyamf.remoting import amf3
return amf3.RequestProcessor(self)
else:
from pyamf.remoting import amf0
return amf0.RequestProcessor(self) | python | def getProcessor(self, request):
"""
Returns request processor.
@param request: The AMF message.
@type request: L{Request<remoting.Request>}
"""
if request.target == 'null' or not request.target:
from pyamf.remoting import amf3
return amf3.RequestProcessor(self)
else:
from pyamf.remoting import amf0
return amf0.RequestProcessor(self) | [
"def",
"getProcessor",
"(",
"self",
",",
"request",
")",
":",
"if",
"request",
".",
"target",
"==",
"'null'",
"or",
"not",
"request",
".",
"target",
":",
"from",
"pyamf",
".",
"remoting",
"import",
"amf3",
"return",
"amf3",
".",
"RequestProcessor",
"(",
"self",
")",
"else",
":",
"from",
"pyamf",
".",
"remoting",
"import",
"amf0",
"return",
"amf0",
".",
"RequestProcessor",
"(",
"self",
")"
] | Returns request processor.
@param request: The AMF message.
@type request: L{Request<remoting.Request>} | [
"Returns",
"request",
"processor",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L388-L402 |
251,555 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.mustExposeRequest | def mustExposeRequest(self, service_request):
"""
Decides whether the underlying http request should be exposed as the
first argument to the method call. This is granular, looking at the
service method first, then at the service level and finally checking
the gateway.
@rtype: C{bool}
"""
expose_request = service_request.service.mustExposeRequest(service_request)
if expose_request is None:
if self.expose_request is None:
return False
return self.expose_request
return expose_request | python | def mustExposeRequest(self, service_request):
"""
Decides whether the underlying http request should be exposed as the
first argument to the method call. This is granular, looking at the
service method first, then at the service level and finally checking
the gateway.
@rtype: C{bool}
"""
expose_request = service_request.service.mustExposeRequest(service_request)
if expose_request is None:
if self.expose_request is None:
return False
return self.expose_request
return expose_request | [
"def",
"mustExposeRequest",
"(",
"self",
",",
"service_request",
")",
":",
"expose_request",
"=",
"service_request",
".",
"service",
".",
"mustExposeRequest",
"(",
"service_request",
")",
"if",
"expose_request",
"is",
"None",
":",
"if",
"self",
".",
"expose_request",
"is",
"None",
":",
"return",
"False",
"return",
"self",
".",
"expose_request",
"return",
"expose_request"
] | Decides whether the underlying http request should be exposed as the
first argument to the method call. This is granular, looking at the
service method first, then at the service level and finally checking
the gateway.
@rtype: C{bool} | [
"Decides",
"whether",
"the",
"underlying",
"http",
"request",
"should",
"be",
"exposed",
"as",
"the",
"first",
"argument",
"to",
"the",
"method",
"call",
".",
"This",
"is",
"granular",
"looking",
"at",
"the",
"service",
"method",
"first",
"then",
"at",
"the",
"service",
"level",
"and",
"finally",
"checking",
"the",
"gateway",
"."
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L418-L435 |
251,556 | jmgilman/Neolib | neolib/pyamf/remoting/gateway/__init__.py | BaseGateway.callServiceRequest | def callServiceRequest(self, service_request, *args, **kwargs):
"""
Executes the service_request call
"""
if self.mustExposeRequest(service_request):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return service_request(*args) | python | def callServiceRequest(self, service_request, *args, **kwargs):
"""
Executes the service_request call
"""
if self.mustExposeRequest(service_request):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return service_request(*args) | [
"def",
"callServiceRequest",
"(",
"self",
",",
"service_request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"mustExposeRequest",
"(",
"service_request",
")",
":",
"http_request",
"=",
"kwargs",
".",
"get",
"(",
"'http_request'",
",",
"None",
")",
"args",
"=",
"(",
"http_request",
",",
")",
"+",
"args",
"return",
"service_request",
"(",
"*",
"args",
")"
] | Executes the service_request call | [
"Executes",
"the",
"service_request",
"call"
] | 228fafeaed0f3195676137732384a14820ae285c | https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L505-L513 |
251,557 | jkenlooper/chill | src/chill/operate.py | node_input | def node_input():
"""
Get a valid node id from the user.
Return -1 if invalid
"""
try:
node = int(raw_input("Node id: "))
except ValueError:
node = INVALID_NODE
print 'invalid node id: %s' % node
return node | python | def node_input():
"""
Get a valid node id from the user.
Return -1 if invalid
"""
try:
node = int(raw_input("Node id: "))
except ValueError:
node = INVALID_NODE
print 'invalid node id: %s' % node
return node | [
"def",
"node_input",
"(",
")",
":",
"try",
":",
"node",
"=",
"int",
"(",
"raw_input",
"(",
"\"Node id: \"",
")",
")",
"except",
"ValueError",
":",
"node",
"=",
"INVALID_NODE",
"print",
"'invalid node id: %s'",
"%",
"node",
"return",
"node"
] | Get a valid node id from the user.
Return -1 if invalid | [
"Get",
"a",
"valid",
"node",
"id",
"from",
"the",
"user",
"."
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L38-L49 |
251,558 | jkenlooper/chill | src/chill/operate.py | existing_node_input | def existing_node_input():
"""
Get an existing node id by name or id.
Return -1 if invalid
"""
input_from_user = raw_input("Existing node name or id: ")
node_id = INVALID_NODE
if not input_from_user:
return node_id
# int or str?
try:
parsed_input = int(input_from_user)
except ValueError:
parsed_input = input_from_user
if isinstance(parsed_input, int):
result = db.execute(text(fetch_query_string('select_node_from_id.sql')),
node_id=parsed_input).fetchall()
if result:
node_id = int(result[0]['node_id'])
else:
result = db.execute(text(fetch_query_string('select_node_from_name.sql')),
node_name=parsed_input).fetchall()
if result:
if len(result) == 1:
print 'Node id: {node_id}\nNode name: {name}'.format(**result[0])
print '-------------'
node_id = result[0]['node_id']
else:
print 'Multiple nodes found with the name: {0}'.format(parsed_input)
for item in result:
print '{node_id}: {name} = {value}'.format(**item)
node_selection = raw_input('Enter a node id from this list or enter "?" to render all or "?<node>" for a specific one.')
if node_selection:
node_selection_match = re.match(r"\?(\d)*", node_selection)
if node_selection_match:
if node_selection_match.groups()[0]:
value = render_node(int(node_selection_match.groups()[0]), noderequest={'_no_template':True}, **result[0])
print safe_dump(value, default_flow_style=False)
else:
for item in result:
value = render_node(item['node_id'], noderequest={'_no_template':True}, **item)
print 'Node id: {0}'.format(item['node_id'])
print safe_dump(value, default_flow_style=False)
print '---'
node_id = node_input()
else:
try:
node_id = int(node_selection)
except ValueError:
node_id = INVALID_NODE
print 'invalid node id: %s' % node
return node_id | python | def existing_node_input():
"""
Get an existing node id by name or id.
Return -1 if invalid
"""
input_from_user = raw_input("Existing node name or id: ")
node_id = INVALID_NODE
if not input_from_user:
return node_id
# int or str?
try:
parsed_input = int(input_from_user)
except ValueError:
parsed_input = input_from_user
if isinstance(parsed_input, int):
result = db.execute(text(fetch_query_string('select_node_from_id.sql')),
node_id=parsed_input).fetchall()
if result:
node_id = int(result[0]['node_id'])
else:
result = db.execute(text(fetch_query_string('select_node_from_name.sql')),
node_name=parsed_input).fetchall()
if result:
if len(result) == 1:
print 'Node id: {node_id}\nNode name: {name}'.format(**result[0])
print '-------------'
node_id = result[0]['node_id']
else:
print 'Multiple nodes found with the name: {0}'.format(parsed_input)
for item in result:
print '{node_id}: {name} = {value}'.format(**item)
node_selection = raw_input('Enter a node id from this list or enter "?" to render all or "?<node>" for a specific one.')
if node_selection:
node_selection_match = re.match(r"\?(\d)*", node_selection)
if node_selection_match:
if node_selection_match.groups()[0]:
value = render_node(int(node_selection_match.groups()[0]), noderequest={'_no_template':True}, **result[0])
print safe_dump(value, default_flow_style=False)
else:
for item in result:
value = render_node(item['node_id'], noderequest={'_no_template':True}, **item)
print 'Node id: {0}'.format(item['node_id'])
print safe_dump(value, default_flow_style=False)
print '---'
node_id = node_input()
else:
try:
node_id = int(node_selection)
except ValueError:
node_id = INVALID_NODE
print 'invalid node id: %s' % node
return node_id | [
"def",
"existing_node_input",
"(",
")",
":",
"input_from_user",
"=",
"raw_input",
"(",
"\"Existing node name or id: \"",
")",
"node_id",
"=",
"INVALID_NODE",
"if",
"not",
"input_from_user",
":",
"return",
"node_id",
"# int or str?",
"try",
":",
"parsed_input",
"=",
"int",
"(",
"input_from_user",
")",
"except",
"ValueError",
":",
"parsed_input",
"=",
"input_from_user",
"if",
"isinstance",
"(",
"parsed_input",
",",
"int",
")",
":",
"result",
"=",
"db",
".",
"execute",
"(",
"text",
"(",
"fetch_query_string",
"(",
"'select_node_from_id.sql'",
")",
")",
",",
"node_id",
"=",
"parsed_input",
")",
".",
"fetchall",
"(",
")",
"if",
"result",
":",
"node_id",
"=",
"int",
"(",
"result",
"[",
"0",
"]",
"[",
"'node_id'",
"]",
")",
"else",
":",
"result",
"=",
"db",
".",
"execute",
"(",
"text",
"(",
"fetch_query_string",
"(",
"'select_node_from_name.sql'",
")",
")",
",",
"node_name",
"=",
"parsed_input",
")",
".",
"fetchall",
"(",
")",
"if",
"result",
":",
"if",
"len",
"(",
"result",
")",
"==",
"1",
":",
"print",
"'Node id: {node_id}\\nNode name: {name}'",
".",
"format",
"(",
"*",
"*",
"result",
"[",
"0",
"]",
")",
"print",
"'-------------'",
"node_id",
"=",
"result",
"[",
"0",
"]",
"[",
"'node_id'",
"]",
"else",
":",
"print",
"'Multiple nodes found with the name: {0}'",
".",
"format",
"(",
"parsed_input",
")",
"for",
"item",
"in",
"result",
":",
"print",
"'{node_id}: {name} = {value}'",
".",
"format",
"(",
"*",
"*",
"item",
")",
"node_selection",
"=",
"raw_input",
"(",
"'Enter a node id from this list or enter \"?\" to render all or \"?<node>\" for a specific one.'",
")",
"if",
"node_selection",
":",
"node_selection_match",
"=",
"re",
".",
"match",
"(",
"r\"\\?(\\d)*\"",
",",
"node_selection",
")",
"if",
"node_selection_match",
":",
"if",
"node_selection_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
":",
"value",
"=",
"render_node",
"(",
"int",
"(",
"node_selection_match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
",",
"noderequest",
"=",
"{",
"'_no_template'",
":",
"True",
"}",
",",
"*",
"*",
"result",
"[",
"0",
"]",
")",
"print",
"safe_dump",
"(",
"value",
",",
"default_flow_style",
"=",
"False",
")",
"else",
":",
"for",
"item",
"in",
"result",
":",
"value",
"=",
"render_node",
"(",
"item",
"[",
"'node_id'",
"]",
",",
"noderequest",
"=",
"{",
"'_no_template'",
":",
"True",
"}",
",",
"*",
"*",
"item",
")",
"print",
"'Node id: {0}'",
".",
"format",
"(",
"item",
"[",
"'node_id'",
"]",
")",
"print",
"safe_dump",
"(",
"value",
",",
"default_flow_style",
"=",
"False",
")",
"print",
"'---'",
"node_id",
"=",
"node_input",
"(",
")",
"else",
":",
"try",
":",
"node_id",
"=",
"int",
"(",
"node_selection",
")",
"except",
"ValueError",
":",
"node_id",
"=",
"INVALID_NODE",
"print",
"'invalid node id: %s'",
"%",
"node",
"return",
"node_id"
] | Get an existing node id by name or id.
Return -1 if invalid | [
"Get",
"an",
"existing",
"node",
"id",
"by",
"name",
"or",
"id",
"."
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L51-L107 |
251,559 | jkenlooper/chill | src/chill/operate.py | render_value_for_node | def render_value_for_node(node_id):
"""
Wrap render_node for usage in operate scripts. Returns without template
rendered.
"""
value = None
result = []
try:
result = db.execute(text(fetch_query_string('select_node_from_id.sql')), node_id=node_id).fetchall()
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result:
kw = dict(zip(result[0].keys(), result[0].values()))
value = render_node(node_id, noderequest={'_no_template':True}, **kw)
return value | python | def render_value_for_node(node_id):
"""
Wrap render_node for usage in operate scripts. Returns without template
rendered.
"""
value = None
result = []
try:
result = db.execute(text(fetch_query_string('select_node_from_id.sql')), node_id=node_id).fetchall()
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result:
kw = dict(zip(result[0].keys(), result[0].values()))
value = render_node(node_id, noderequest={'_no_template':True}, **kw)
return value | [
"def",
"render_value_for_node",
"(",
"node_id",
")",
":",
"value",
"=",
"None",
"result",
"=",
"[",
"]",
"try",
":",
"result",
"=",
"db",
".",
"execute",
"(",
"text",
"(",
"fetch_query_string",
"(",
"'select_node_from_id.sql'",
")",
")",
",",
"node_id",
"=",
"node_id",
")",
".",
"fetchall",
"(",
")",
"except",
"DatabaseError",
"as",
"err",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"\"DatabaseError: %s\"",
",",
"err",
")",
"if",
"result",
":",
"kw",
"=",
"dict",
"(",
"zip",
"(",
"result",
"[",
"0",
"]",
".",
"keys",
"(",
")",
",",
"result",
"[",
"0",
"]",
".",
"values",
"(",
")",
")",
")",
"value",
"=",
"render_node",
"(",
"node_id",
",",
"noderequest",
"=",
"{",
"'_no_template'",
":",
"True",
"}",
",",
"*",
"*",
"kw",
")",
"return",
"value"
] | Wrap render_node for usage in operate scripts. Returns without template
rendered. | [
"Wrap",
"render_node",
"for",
"usage",
"in",
"operate",
"scripts",
".",
"Returns",
"without",
"template",
"rendered",
"."
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L109-L125 |
251,560 | jkenlooper/chill | src/chill/operate.py | purge_collection | def purge_collection(keys):
"Recursive purge of nodes with name and id"
for key in keys:
m = re.match(r'(.*) \((\d+)\)', key)
name = m.group(1)
node_id = m.group(2)
value = render_value_for_node(node_id)
print 'remove node with name:{0} and id:{1}'.format(name, node_id)
delete_node(node_id=node_id)
if isinstance(value, dict):
purge_collection(value.keys()) | python | def purge_collection(keys):
"Recursive purge of nodes with name and id"
for key in keys:
m = re.match(r'(.*) \((\d+)\)', key)
name = m.group(1)
node_id = m.group(2)
value = render_value_for_node(node_id)
print 'remove node with name:{0} and id:{1}'.format(name, node_id)
delete_node(node_id=node_id)
if isinstance(value, dict):
purge_collection(value.keys()) | [
"def",
"purge_collection",
"(",
"keys",
")",
":",
"for",
"key",
"in",
"keys",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r'(.*) \\((\\d+)\\)'",
",",
"key",
")",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"node_id",
"=",
"m",
".",
"group",
"(",
"2",
")",
"value",
"=",
"render_value_for_node",
"(",
"node_id",
")",
"print",
"'remove node with name:{0} and id:{1}'",
".",
"format",
"(",
"name",
",",
"node_id",
")",
"delete_node",
"(",
"node_id",
"=",
"node_id",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"purge_collection",
"(",
"value",
".",
"keys",
"(",
")",
")"
] | Recursive purge of nodes with name and id | [
"Recursive",
"purge",
"of",
"nodes",
"with",
"name",
"and",
"id"
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L144-L155 |
251,561 | jkenlooper/chill | src/chill/operate.py | mode_new_collection | def mode_new_collection():
"""
Create a new collection of items with common attributes.
"""
print globals()['mode_new_collection'].__doc__
collection_name = raw_input("Collection name: ")
item_attr_list = []
collection_node_id = None
if collection_name:
collection_node_id = insert_node(name=collection_name, value=None)
insert_query(name='select_link_node_from_node.sql', node_id=collection_node_id)
item_attr = True
while item_attr:
item_attr = raw_input("Add a collection item attribute name: ")
if item_attr:
item_attr_list.append(item_attr)
# if no collection name then exit
selection = collection_name
while selection:
selection = select([
'Add item',
])
if selection == 'Add item':
# create item
add_item_with_attributes_to_collection(
collection_name=collection_name,
collection_node_id=collection_node_id,
item_attr_list=item_attr_list)
if collection_node_id:
print "Added collection name '{0}' with node id: {1}".format(collection_name, collection_node_id) | python | def mode_new_collection():
"""
Create a new collection of items with common attributes.
"""
print globals()['mode_new_collection'].__doc__
collection_name = raw_input("Collection name: ")
item_attr_list = []
collection_node_id = None
if collection_name:
collection_node_id = insert_node(name=collection_name, value=None)
insert_query(name='select_link_node_from_node.sql', node_id=collection_node_id)
item_attr = True
while item_attr:
item_attr = raw_input("Add a collection item attribute name: ")
if item_attr:
item_attr_list.append(item_attr)
# if no collection name then exit
selection = collection_name
while selection:
selection = select([
'Add item',
])
if selection == 'Add item':
# create item
add_item_with_attributes_to_collection(
collection_name=collection_name,
collection_node_id=collection_node_id,
item_attr_list=item_attr_list)
if collection_node_id:
print "Added collection name '{0}' with node id: {1}".format(collection_name, collection_node_id) | [
"def",
"mode_new_collection",
"(",
")",
":",
"print",
"globals",
"(",
")",
"[",
"'mode_new_collection'",
"]",
".",
"__doc__",
"collection_name",
"=",
"raw_input",
"(",
"\"Collection name: \"",
")",
"item_attr_list",
"=",
"[",
"]",
"collection_node_id",
"=",
"None",
"if",
"collection_name",
":",
"collection_node_id",
"=",
"insert_node",
"(",
"name",
"=",
"collection_name",
",",
"value",
"=",
"None",
")",
"insert_query",
"(",
"name",
"=",
"'select_link_node_from_node.sql'",
",",
"node_id",
"=",
"collection_node_id",
")",
"item_attr",
"=",
"True",
"while",
"item_attr",
":",
"item_attr",
"=",
"raw_input",
"(",
"\"Add a collection item attribute name: \"",
")",
"if",
"item_attr",
":",
"item_attr_list",
".",
"append",
"(",
"item_attr",
")",
"# if no collection name then exit",
"selection",
"=",
"collection_name",
"while",
"selection",
":",
"selection",
"=",
"select",
"(",
"[",
"'Add item'",
",",
"]",
")",
"if",
"selection",
"==",
"'Add item'",
":",
"# create item",
"add_item_with_attributes_to_collection",
"(",
"collection_name",
"=",
"collection_name",
",",
"collection_node_id",
"=",
"collection_node_id",
",",
"item_attr_list",
"=",
"item_attr_list",
")",
"if",
"collection_node_id",
":",
"print",
"\"Added collection name '{0}' with node id: {1}\"",
".",
"format",
"(",
"collection_name",
",",
"collection_node_id",
")"
] | Create a new collection of items with common attributes. | [
"Create",
"a",
"new",
"collection",
"of",
"items",
"with",
"common",
"attributes",
"."
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L259-L292 |
251,562 | jkenlooper/chill | src/chill/operate.py | mode_database_functions | def mode_database_functions():
"Select a function to perform from chill.database"
print globals()['mode_database_functions'].__doc__
selection = True
database_functions = [
'init_db',
'insert_node',
'insert_node_node',
'delete_node',
'select_node',
'insert_route',
'insert_query',
'add_template_for_node',
'fetch_query_string',
]
while selection:
choices = database_functions + [
'help',
]
selection = select(choices)
if selection:
print globals().get(selection).__doc__
if selection == 'init_db':
confirm = raw_input("Initialize new database y/n? [n] ")
if confirm == 'y':
init_db()
elif selection == 'insert_node':
name = raw_input("Node name: ")
value = raw_input("Node value: ")
node = insert_node(name=name, value=value or None)
print "name: %s \nid: %s" % (name, node)
elif selection == 'insert_query':
sqlfile = choose_query_file()
if sqlfile:
node = existing_node_input()
if node >= 0:
insert_query(name=sqlfile, node_id=node)
print "adding %s to node id: %s" % (sqlfile, node)
elif selection == 'insert_node_node':
print "Add parent node id"
node = existing_node_input()
print "Add target node id"
target_node = existing_node_input()
if node >= 0 and target_node >= 0:
insert_node_node(node_id=node, target_node_id=target_node)
elif selection == 'delete_node':
node = existing_node_input()
if node >= 0:
delete_node(node_id=node)
elif selection == 'select_node':
node = existing_node_input()
if node >= 0:
result = select_node(node_id=node)
print safe_dump(dict(zip(result[0].keys(), result[0].values())), default_flow_style=False)
elif selection == 'insert_route':
path = raw_input('path: ')
weight = raw_input('weight: ') or None
method = raw_input('method: ') or 'GET'
node = existing_node_input()
if node >= 0:
insert_route(path=path, node_id=node, weight=weight, method=method)
elif selection == 'add_template_for_node':
folder = current_app.config.get('THEME_TEMPLATE_FOLDER')
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
templatefile = select(choices)
if templatefile:
node = existing_node_input()
if node >= 0:
add_template_for_node(name=templatefile, node_id=node)
print "adding %s to node id: %s" % (templatefile, node)
elif selection == 'fetch_query_string':
sqlfile = choose_query_file()
if sqlfile:
sql = fetch_query_string(sqlfile)
print sql
elif selection == 'help':
print "------"
for f in database_functions:
print "\n** %s **" % f
print globals().get(f).__doc__
print "------"
else:
pass | python | def mode_database_functions():
"Select a function to perform from chill.database"
print globals()['mode_database_functions'].__doc__
selection = True
database_functions = [
'init_db',
'insert_node',
'insert_node_node',
'delete_node',
'select_node',
'insert_route',
'insert_query',
'add_template_for_node',
'fetch_query_string',
]
while selection:
choices = database_functions + [
'help',
]
selection = select(choices)
if selection:
print globals().get(selection).__doc__
if selection == 'init_db':
confirm = raw_input("Initialize new database y/n? [n] ")
if confirm == 'y':
init_db()
elif selection == 'insert_node':
name = raw_input("Node name: ")
value = raw_input("Node value: ")
node = insert_node(name=name, value=value or None)
print "name: %s \nid: %s" % (name, node)
elif selection == 'insert_query':
sqlfile = choose_query_file()
if sqlfile:
node = existing_node_input()
if node >= 0:
insert_query(name=sqlfile, node_id=node)
print "adding %s to node id: %s" % (sqlfile, node)
elif selection == 'insert_node_node':
print "Add parent node id"
node = existing_node_input()
print "Add target node id"
target_node = existing_node_input()
if node >= 0 and target_node >= 0:
insert_node_node(node_id=node, target_node_id=target_node)
elif selection == 'delete_node':
node = existing_node_input()
if node >= 0:
delete_node(node_id=node)
elif selection == 'select_node':
node = existing_node_input()
if node >= 0:
result = select_node(node_id=node)
print safe_dump(dict(zip(result[0].keys(), result[0].values())), default_flow_style=False)
elif selection == 'insert_route':
path = raw_input('path: ')
weight = raw_input('weight: ') or None
method = raw_input('method: ') or 'GET'
node = existing_node_input()
if node >= 0:
insert_route(path=path, node_id=node, weight=weight, method=method)
elif selection == 'add_template_for_node':
folder = current_app.config.get('THEME_TEMPLATE_FOLDER')
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
templatefile = select(choices)
if templatefile:
node = existing_node_input()
if node >= 0:
add_template_for_node(name=templatefile, node_id=node)
print "adding %s to node id: %s" % (templatefile, node)
elif selection == 'fetch_query_string':
sqlfile = choose_query_file()
if sqlfile:
sql = fetch_query_string(sqlfile)
print sql
elif selection == 'help':
print "------"
for f in database_functions:
print "\n** %s **" % f
print globals().get(f).__doc__
print "------"
else:
pass | [
"def",
"mode_database_functions",
"(",
")",
":",
"print",
"globals",
"(",
")",
"[",
"'mode_database_functions'",
"]",
".",
"__doc__",
"selection",
"=",
"True",
"database_functions",
"=",
"[",
"'init_db'",
",",
"'insert_node'",
",",
"'insert_node_node'",
",",
"'delete_node'",
",",
"'select_node'",
",",
"'insert_route'",
",",
"'insert_query'",
",",
"'add_template_for_node'",
",",
"'fetch_query_string'",
",",
"]",
"while",
"selection",
":",
"choices",
"=",
"database_functions",
"+",
"[",
"'help'",
",",
"]",
"selection",
"=",
"select",
"(",
"choices",
")",
"if",
"selection",
":",
"print",
"globals",
"(",
")",
".",
"get",
"(",
"selection",
")",
".",
"__doc__",
"if",
"selection",
"==",
"'init_db'",
":",
"confirm",
"=",
"raw_input",
"(",
"\"Initialize new database y/n? [n] \"",
")",
"if",
"confirm",
"==",
"'y'",
":",
"init_db",
"(",
")",
"elif",
"selection",
"==",
"'insert_node'",
":",
"name",
"=",
"raw_input",
"(",
"\"Node name: \"",
")",
"value",
"=",
"raw_input",
"(",
"\"Node value: \"",
")",
"node",
"=",
"insert_node",
"(",
"name",
"=",
"name",
",",
"value",
"=",
"value",
"or",
"None",
")",
"print",
"\"name: %s \\nid: %s\"",
"%",
"(",
"name",
",",
"node",
")",
"elif",
"selection",
"==",
"'insert_query'",
":",
"sqlfile",
"=",
"choose_query_file",
"(",
")",
"if",
"sqlfile",
":",
"node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
":",
"insert_query",
"(",
"name",
"=",
"sqlfile",
",",
"node_id",
"=",
"node",
")",
"print",
"\"adding %s to node id: %s\"",
"%",
"(",
"sqlfile",
",",
"node",
")",
"elif",
"selection",
"==",
"'insert_node_node'",
":",
"print",
"\"Add parent node id\"",
"node",
"=",
"existing_node_input",
"(",
")",
"print",
"\"Add target node id\"",
"target_node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
"and",
"target_node",
">=",
"0",
":",
"insert_node_node",
"(",
"node_id",
"=",
"node",
",",
"target_node_id",
"=",
"target_node",
")",
"elif",
"selection",
"==",
"'delete_node'",
":",
"node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
":",
"delete_node",
"(",
"node_id",
"=",
"node",
")",
"elif",
"selection",
"==",
"'select_node'",
":",
"node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
":",
"result",
"=",
"select_node",
"(",
"node_id",
"=",
"node",
")",
"print",
"safe_dump",
"(",
"dict",
"(",
"zip",
"(",
"result",
"[",
"0",
"]",
".",
"keys",
"(",
")",
",",
"result",
"[",
"0",
"]",
".",
"values",
"(",
")",
")",
")",
",",
"default_flow_style",
"=",
"False",
")",
"elif",
"selection",
"==",
"'insert_route'",
":",
"path",
"=",
"raw_input",
"(",
"'path: '",
")",
"weight",
"=",
"raw_input",
"(",
"'weight: '",
")",
"or",
"None",
"method",
"=",
"raw_input",
"(",
"'method: '",
")",
"or",
"'GET'",
"node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
":",
"insert_route",
"(",
"path",
"=",
"path",
",",
"node_id",
"=",
"node",
",",
"weight",
"=",
"weight",
",",
"method",
"=",
"method",
")",
"elif",
"selection",
"==",
"'add_template_for_node'",
":",
"folder",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'THEME_TEMPLATE_FOLDER'",
")",
"choices",
"=",
"map",
"(",
"os",
".",
"path",
".",
"basename",
",",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'*'",
")",
")",
")",
"choices",
".",
"sort",
"(",
")",
"templatefile",
"=",
"select",
"(",
"choices",
")",
"if",
"templatefile",
":",
"node",
"=",
"existing_node_input",
"(",
")",
"if",
"node",
">=",
"0",
":",
"add_template_for_node",
"(",
"name",
"=",
"templatefile",
",",
"node_id",
"=",
"node",
")",
"print",
"\"adding %s to node id: %s\"",
"%",
"(",
"templatefile",
",",
"node",
")",
"elif",
"selection",
"==",
"'fetch_query_string'",
":",
"sqlfile",
"=",
"choose_query_file",
"(",
")",
"if",
"sqlfile",
":",
"sql",
"=",
"fetch_query_string",
"(",
"sqlfile",
")",
"print",
"sql",
"elif",
"selection",
"==",
"'help'",
":",
"print",
"\"------\"",
"for",
"f",
"in",
"database_functions",
":",
"print",
"\"\\n** %s **\"",
"%",
"f",
"print",
"globals",
"(",
")",
".",
"get",
"(",
"f",
")",
".",
"__doc__",
"print",
"\"------\"",
"else",
":",
"pass"
] | Select a function to perform from chill.database | [
"Select",
"a",
"function",
"to",
"perform",
"from",
"chill",
".",
"database"
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L295-L389 |
251,563 | jkenlooper/chill | src/chill/operate.py | operate_menu | def operate_menu():
"Select between these operations on the database"
selection = True
while selection:
print globals()['operate_menu'].__doc__
selection = select([
'chill.database functions',
'execute sql file',
'render_node',
'New collection',
'Manage collection',
'Add document for node',
'help',
])
if selection == 'chill.database functions':
mode_database_functions()
elif selection == 'execute sql file':
print "View the sql file and show a fill in the blanks interface with raw_input"
sqlfile = choose_query_file()
if not sqlfile:
# return to the menu choices if not file picked
selection = True
else:
sql_named_placeholders_re = re.compile(r":(\w+)")
sql = fetch_query_string(sqlfile)
placeholders = set(sql_named_placeholders_re.findall(sql))
print sql
data = {}
for placeholder in placeholders:
value = raw_input(placeholder + ': ')
data[placeholder] = value
result = []
try:
result = db.execute(text(sql), data)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result and result.returns_rows:
result = result.fetchall()
print result
if not result:
print 'No results.'
else:
kw = result[0]
if 'node_id' in kw:
print 'render node %s' % kw['node_id']
value = render_node(kw['node_id'], **kw)
print safe_dump(value, default_flow_style=False)
else:
#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)
print safe_dump([dict(zip(x.keys(), x.values())) for x in result], default_flow_style=False)
elif selection == 'render_node':
print globals()['render_node'].__doc__
node_id = existing_node_input()
value = render_value_for_node(node_id)
print safe_dump(value, default_flow_style=False)
elif selection == 'New collection':
mode_new_collection()
elif selection == 'Manage collection':
mode_collection()
elif selection == 'Add document for node':
folder = current_app.config.get('DOCUMENT_FOLDER')
if not folder:
print "No DOCUMENT_FOLDER configured for the application."
else:
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
if len(choices) == 0:
print "No files found in DOCUMENT_FOLDER."
else:
filename = select(choices)
if filename:
defaultname = os.path.splitext(filename)[0]
nodename = raw_input("Enter name for node [{0}]: ".format(defaultname)) or defaultname
node = insert_node(name=nodename, value=filename)
print "Added document '%s' to node '%s' with id: %s" % (filename, nodename, node)
elif selection == 'help':
print "------"
print __doc__
print "------"
else:
print 'Done' | python | def operate_menu():
"Select between these operations on the database"
selection = True
while selection:
print globals()['operate_menu'].__doc__
selection = select([
'chill.database functions',
'execute sql file',
'render_node',
'New collection',
'Manage collection',
'Add document for node',
'help',
])
if selection == 'chill.database functions':
mode_database_functions()
elif selection == 'execute sql file':
print "View the sql file and show a fill in the blanks interface with raw_input"
sqlfile = choose_query_file()
if not sqlfile:
# return to the menu choices if not file picked
selection = True
else:
sql_named_placeholders_re = re.compile(r":(\w+)")
sql = fetch_query_string(sqlfile)
placeholders = set(sql_named_placeholders_re.findall(sql))
print sql
data = {}
for placeholder in placeholders:
value = raw_input(placeholder + ': ')
data[placeholder] = value
result = []
try:
result = db.execute(text(sql), data)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
if result and result.returns_rows:
result = result.fetchall()
print result
if not result:
print 'No results.'
else:
kw = result[0]
if 'node_id' in kw:
print 'render node %s' % kw['node_id']
value = render_node(kw['node_id'], **kw)
print safe_dump(value, default_flow_style=False)
else:
#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)
print safe_dump([dict(zip(x.keys(), x.values())) for x in result], default_flow_style=False)
elif selection == 'render_node':
print globals()['render_node'].__doc__
node_id = existing_node_input()
value = render_value_for_node(node_id)
print safe_dump(value, default_flow_style=False)
elif selection == 'New collection':
mode_new_collection()
elif selection == 'Manage collection':
mode_collection()
elif selection == 'Add document for node':
folder = current_app.config.get('DOCUMENT_FOLDER')
if not folder:
print "No DOCUMENT_FOLDER configured for the application."
else:
choices = map(os.path.basename,
glob(os.path.join(folder, '*'))
)
choices.sort()
if len(choices) == 0:
print "No files found in DOCUMENT_FOLDER."
else:
filename = select(choices)
if filename:
defaultname = os.path.splitext(filename)[0]
nodename = raw_input("Enter name for node [{0}]: ".format(defaultname)) or defaultname
node = insert_node(name=nodename, value=filename)
print "Added document '%s' to node '%s' with id: %s" % (filename, nodename, node)
elif selection == 'help':
print "------"
print __doc__
print "------"
else:
print 'Done' | [
"def",
"operate_menu",
"(",
")",
":",
"selection",
"=",
"True",
"while",
"selection",
":",
"print",
"globals",
"(",
")",
"[",
"'operate_menu'",
"]",
".",
"__doc__",
"selection",
"=",
"select",
"(",
"[",
"'chill.database functions'",
",",
"'execute sql file'",
",",
"'render_node'",
",",
"'New collection'",
",",
"'Manage collection'",
",",
"'Add document for node'",
",",
"'help'",
",",
"]",
")",
"if",
"selection",
"==",
"'chill.database functions'",
":",
"mode_database_functions",
"(",
")",
"elif",
"selection",
"==",
"'execute sql file'",
":",
"print",
"\"View the sql file and show a fill in the blanks interface with raw_input\"",
"sqlfile",
"=",
"choose_query_file",
"(",
")",
"if",
"not",
"sqlfile",
":",
"# return to the menu choices if not file picked",
"selection",
"=",
"True",
"else",
":",
"sql_named_placeholders_re",
"=",
"re",
".",
"compile",
"(",
"r\":(\\w+)\"",
")",
"sql",
"=",
"fetch_query_string",
"(",
"sqlfile",
")",
"placeholders",
"=",
"set",
"(",
"sql_named_placeholders_re",
".",
"findall",
"(",
"sql",
")",
")",
"print",
"sql",
"data",
"=",
"{",
"}",
"for",
"placeholder",
"in",
"placeholders",
":",
"value",
"=",
"raw_input",
"(",
"placeholder",
"+",
"': '",
")",
"data",
"[",
"placeholder",
"]",
"=",
"value",
"result",
"=",
"[",
"]",
"try",
":",
"result",
"=",
"db",
".",
"execute",
"(",
"text",
"(",
"sql",
")",
",",
"data",
")",
"except",
"DatabaseError",
"as",
"err",
":",
"current_app",
".",
"logger",
".",
"error",
"(",
"\"DatabaseError: %s\"",
",",
"err",
")",
"if",
"result",
"and",
"result",
".",
"returns_rows",
":",
"result",
"=",
"result",
".",
"fetchall",
"(",
")",
"print",
"result",
"if",
"not",
"result",
":",
"print",
"'No results.'",
"else",
":",
"kw",
"=",
"result",
"[",
"0",
"]",
"if",
"'node_id'",
"in",
"kw",
":",
"print",
"'render node %s'",
"%",
"kw",
"[",
"'node_id'",
"]",
"value",
"=",
"render_node",
"(",
"kw",
"[",
"'node_id'",
"]",
",",
"*",
"*",
"kw",
")",
"print",
"safe_dump",
"(",
"value",
",",
"default_flow_style",
"=",
"False",
")",
"else",
":",
"#print safe_dump(rowify(result, [(x, None) for x in result[0].keys()]), default_flow_style=False)",
"print",
"safe_dump",
"(",
"[",
"dict",
"(",
"zip",
"(",
"x",
".",
"keys",
"(",
")",
",",
"x",
".",
"values",
"(",
")",
")",
")",
"for",
"x",
"in",
"result",
"]",
",",
"default_flow_style",
"=",
"False",
")",
"elif",
"selection",
"==",
"'render_node'",
":",
"print",
"globals",
"(",
")",
"[",
"'render_node'",
"]",
".",
"__doc__",
"node_id",
"=",
"existing_node_input",
"(",
")",
"value",
"=",
"render_value_for_node",
"(",
"node_id",
")",
"print",
"safe_dump",
"(",
"value",
",",
"default_flow_style",
"=",
"False",
")",
"elif",
"selection",
"==",
"'New collection'",
":",
"mode_new_collection",
"(",
")",
"elif",
"selection",
"==",
"'Manage collection'",
":",
"mode_collection",
"(",
")",
"elif",
"selection",
"==",
"'Add document for node'",
":",
"folder",
"=",
"current_app",
".",
"config",
".",
"get",
"(",
"'DOCUMENT_FOLDER'",
")",
"if",
"not",
"folder",
":",
"print",
"\"No DOCUMENT_FOLDER configured for the application.\"",
"else",
":",
"choices",
"=",
"map",
"(",
"os",
".",
"path",
".",
"basename",
",",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'*'",
")",
")",
")",
"choices",
".",
"sort",
"(",
")",
"if",
"len",
"(",
"choices",
")",
"==",
"0",
":",
"print",
"\"No files found in DOCUMENT_FOLDER.\"",
"else",
":",
"filename",
"=",
"select",
"(",
"choices",
")",
"if",
"filename",
":",
"defaultname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"nodename",
"=",
"raw_input",
"(",
"\"Enter name for node [{0}]: \"",
".",
"format",
"(",
"defaultname",
")",
")",
"or",
"defaultname",
"node",
"=",
"insert_node",
"(",
"name",
"=",
"nodename",
",",
"value",
"=",
"filename",
")",
"print",
"\"Added document '%s' to node '%s' with id: %s\"",
"%",
"(",
"filename",
",",
"nodename",
",",
"node",
")",
"elif",
"selection",
"==",
"'help'",
":",
"print",
"\"------\"",
"print",
"__doc__",
"print",
"\"------\"",
"else",
":",
"print",
"'Done'"
] | Select between these operations on the database | [
"Select",
"between",
"these",
"operations",
"on",
"the",
"database"
] | 35360c17c2a3b769ecb5406c6dabcf4cc70bd76f | https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/operate.py#L391-L481 |
251,564 | dossier/dossier.web | dossier/web/config.py | thread_local_property | def thread_local_property(name):
'''Creates a thread local ``property``.'''
name = '_thread_local_' + name
def fget(self):
try:
return getattr(self, name).value
except AttributeError:
return None
def fset(self, value):
getattr(self, name).value = value
return property(fget=fget, fset=fset) | python | def thread_local_property(name):
'''Creates a thread local ``property``.'''
name = '_thread_local_' + name
def fget(self):
try:
return getattr(self, name).value
except AttributeError:
return None
def fset(self, value):
getattr(self, name).value = value
return property(fget=fget, fset=fset) | [
"def",
"thread_local_property",
"(",
"name",
")",
":",
"name",
"=",
"'_thread_local_'",
"+",
"name",
"def",
"fget",
"(",
"self",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"self",
",",
"name",
")",
".",
"value",
"except",
"AttributeError",
":",
"return",
"None",
"def",
"fset",
"(",
"self",
",",
"value",
")",
":",
"getattr",
"(",
"self",
",",
"name",
")",
".",
"value",
"=",
"value",
"return",
"property",
"(",
"fget",
"=",
"fget",
",",
"fset",
"=",
"fset",
")"
] | Creates a thread local ``property``. | [
"Creates",
"a",
"thread",
"local",
"property",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/config.py#L46-L59 |
251,565 | dossier/dossier.web | dossier/web/config.py | Config.kvlclient | def kvlclient(self):
'''Return a thread local ``kvlayer`` client.'''
if self._kvlclient is None:
self._kvlclient = kvlayer.client()
return self._kvlclient | python | def kvlclient(self):
'''Return a thread local ``kvlayer`` client.'''
if self._kvlclient is None:
self._kvlclient = kvlayer.client()
return self._kvlclient | [
"def",
"kvlclient",
"(",
"self",
")",
":",
"if",
"self",
".",
"_kvlclient",
"is",
"None",
":",
"self",
".",
"_kvlclient",
"=",
"kvlayer",
".",
"client",
"(",
")",
"return",
"self",
".",
"_kvlclient"
] | Return a thread local ``kvlayer`` client. | [
"Return",
"a",
"thread",
"local",
"kvlayer",
"client",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/config.py#L132-L136 |
251,566 | alexgorin/pymar | pymar/producer.py | Producer.divide | def divide(self, data_source_factory):
"""Divides the task according to the number of workers."""
data_length = data_source_factory.length()
data_interval_length = data_length / self.workers_number() + 1
current_index = 0
self.responses = []
while current_index < data_length:
self.responses.append(0)
offset = current_index
limit = min((data_length - current_index, data_interval_length))
yield data_source_factory.part(limit, offset)
current_index += limit | python | def divide(self, data_source_factory):
"""Divides the task according to the number of workers."""
data_length = data_source_factory.length()
data_interval_length = data_length / self.workers_number() + 1
current_index = 0
self.responses = []
while current_index < data_length:
self.responses.append(0)
offset = current_index
limit = min((data_length - current_index, data_interval_length))
yield data_source_factory.part(limit, offset)
current_index += limit | [
"def",
"divide",
"(",
"self",
",",
"data_source_factory",
")",
":",
"data_length",
"=",
"data_source_factory",
".",
"length",
"(",
")",
"data_interval_length",
"=",
"data_length",
"/",
"self",
".",
"workers_number",
"(",
")",
"+",
"1",
"current_index",
"=",
"0",
"self",
".",
"responses",
"=",
"[",
"]",
"while",
"current_index",
"<",
"data_length",
":",
"self",
".",
"responses",
".",
"append",
"(",
"0",
")",
"offset",
"=",
"current_index",
"limit",
"=",
"min",
"(",
"(",
"data_length",
"-",
"current_index",
",",
"data_interval_length",
")",
")",
"yield",
"data_source_factory",
".",
"part",
"(",
"limit",
",",
"offset",
")",
"current_index",
"+=",
"limit"
] | Divides the task according to the number of workers. | [
"Divides",
"the",
"task",
"according",
"to",
"the",
"number",
"of",
"workers",
"."
] | 39cd029ea6ff135a6400af2114658166dd6f4ae6 | https://github.com/alexgorin/pymar/blob/39cd029ea6ff135a6400af2114658166dd6f4ae6/pymar/producer.py#L89-L101 |
251,567 | alexgorin/pymar | pymar/producer.py | Producer.map | def map(self, data_source_factory, timeout=0, on_timeout="local_mode"):
"""Sends tasks to workers and awaits the responses.
When all the responses are received, reduces them and returns the result.
If timeout is set greater than 0, producer will quit waiting for workers when time has passed.
If on_timeout is set to "local_mode", after the time limit producer will run tasks locally.
If on_timeout is set to "fail", after the time limit producer raise TimeOutException.
"""
def local_launch():
print "Local launch"
return self.reduce_fn(
self.map_fn(data_source_factory.build_data_source())
)
if self.local_mode:
return local_launch()
for index, factory in enumerate(self.divide(data_source_factory)):
self.unprocessed_request_num += 1
self.logging.info("Sending %d-th message with %d elements" % (index + 1, factory.length()))
self.logging.info("len(data) = %d" % len(pickle.dumps(factory)))
self.channel.basic_publish(exchange='',
routing_key=self.routing_key(),
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id="_".join((self.correlation_id, str(index))),
),
body=pickle.dumps(factory))
self.logging.info("Waiting...")
time_limit_exceeded = [False]
def on_timeout_func():
print "Timeout!!"
self.logging.warning("Timeout!")
time_limit_exceeded[0] = True
if timeout > 0:
self.timer = Timer(timeout, on_timeout_func)
self.timer.start()
while self.unprocessed_request_num:
if time_limit_exceeded[0]:
if on_timeout == "local_mode":
return local_launch()
assert on_timeout == "fail", "Invalid value for on_timeout: %s" % on_timeout
raise TimeOutException()
self.connection.process_data_events()
self.logging.info("Responses: %s" % str(self.responses))
return self.reduce_fn(self.responses) | python | def map(self, data_source_factory, timeout=0, on_timeout="local_mode"):
"""Sends tasks to workers and awaits the responses.
When all the responses are received, reduces them and returns the result.
If timeout is set greater than 0, producer will quit waiting for workers when time has passed.
If on_timeout is set to "local_mode", after the time limit producer will run tasks locally.
If on_timeout is set to "fail", after the time limit producer raise TimeOutException.
"""
def local_launch():
print "Local launch"
return self.reduce_fn(
self.map_fn(data_source_factory.build_data_source())
)
if self.local_mode:
return local_launch()
for index, factory in enumerate(self.divide(data_source_factory)):
self.unprocessed_request_num += 1
self.logging.info("Sending %d-th message with %d elements" % (index + 1, factory.length()))
self.logging.info("len(data) = %d" % len(pickle.dumps(factory)))
self.channel.basic_publish(exchange='',
routing_key=self.routing_key(),
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id="_".join((self.correlation_id, str(index))),
),
body=pickle.dumps(factory))
self.logging.info("Waiting...")
time_limit_exceeded = [False]
def on_timeout_func():
print "Timeout!!"
self.logging.warning("Timeout!")
time_limit_exceeded[0] = True
if timeout > 0:
self.timer = Timer(timeout, on_timeout_func)
self.timer.start()
while self.unprocessed_request_num:
if time_limit_exceeded[0]:
if on_timeout == "local_mode":
return local_launch()
assert on_timeout == "fail", "Invalid value for on_timeout: %s" % on_timeout
raise TimeOutException()
self.connection.process_data_events()
self.logging.info("Responses: %s" % str(self.responses))
return self.reduce_fn(self.responses) | [
"def",
"map",
"(",
"self",
",",
"data_source_factory",
",",
"timeout",
"=",
"0",
",",
"on_timeout",
"=",
"\"local_mode\"",
")",
":",
"def",
"local_launch",
"(",
")",
":",
"print",
"\"Local launch\"",
"return",
"self",
".",
"reduce_fn",
"(",
"self",
".",
"map_fn",
"(",
"data_source_factory",
".",
"build_data_source",
"(",
")",
")",
")",
"if",
"self",
".",
"local_mode",
":",
"return",
"local_launch",
"(",
")",
"for",
"index",
",",
"factory",
"in",
"enumerate",
"(",
"self",
".",
"divide",
"(",
"data_source_factory",
")",
")",
":",
"self",
".",
"unprocessed_request_num",
"+=",
"1",
"self",
".",
"logging",
".",
"info",
"(",
"\"Sending %d-th message with %d elements\"",
"%",
"(",
"index",
"+",
"1",
",",
"factory",
".",
"length",
"(",
")",
")",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"len(data) = %d\"",
"%",
"len",
"(",
"pickle",
".",
"dumps",
"(",
"factory",
")",
")",
")",
"self",
".",
"channel",
".",
"basic_publish",
"(",
"exchange",
"=",
"''",
",",
"routing_key",
"=",
"self",
".",
"routing_key",
"(",
")",
",",
"properties",
"=",
"pika",
".",
"BasicProperties",
"(",
"reply_to",
"=",
"self",
".",
"callback_queue",
",",
"correlation_id",
"=",
"\"_\"",
".",
"join",
"(",
"(",
"self",
".",
"correlation_id",
",",
"str",
"(",
"index",
")",
")",
")",
",",
")",
",",
"body",
"=",
"pickle",
".",
"dumps",
"(",
"factory",
")",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Waiting...\"",
")",
"time_limit_exceeded",
"=",
"[",
"False",
"]",
"def",
"on_timeout_func",
"(",
")",
":",
"print",
"\"Timeout!!\"",
"self",
".",
"logging",
".",
"warning",
"(",
"\"Timeout!\"",
")",
"time_limit_exceeded",
"[",
"0",
"]",
"=",
"True",
"if",
"timeout",
">",
"0",
":",
"self",
".",
"timer",
"=",
"Timer",
"(",
"timeout",
",",
"on_timeout_func",
")",
"self",
".",
"timer",
".",
"start",
"(",
")",
"while",
"self",
".",
"unprocessed_request_num",
":",
"if",
"time_limit_exceeded",
"[",
"0",
"]",
":",
"if",
"on_timeout",
"==",
"\"local_mode\"",
":",
"return",
"local_launch",
"(",
")",
"assert",
"on_timeout",
"==",
"\"fail\"",
",",
"\"Invalid value for on_timeout: %s\"",
"%",
"on_timeout",
"raise",
"TimeOutException",
"(",
")",
"self",
".",
"connection",
".",
"process_data_events",
"(",
")",
"self",
".",
"logging",
".",
"info",
"(",
"\"Responses: %s\"",
"%",
"str",
"(",
"self",
".",
"responses",
")",
")",
"return",
"self",
".",
"reduce_fn",
"(",
"self",
".",
"responses",
")"
] | Sends tasks to workers and awaits the responses.
When all the responses are received, reduces them and returns the result.
If timeout is set greater than 0, producer will quit waiting for workers when time has passed.
If on_timeout is set to "local_mode", after the time limit producer will run tasks locally.
If on_timeout is set to "fail", after the time limit producer raise TimeOutException. | [
"Sends",
"tasks",
"to",
"workers",
"and",
"awaits",
"the",
"responses",
".",
"When",
"all",
"the",
"responses",
"are",
"received",
"reduces",
"them",
"and",
"returns",
"the",
"result",
"."
] | 39cd029ea6ff135a6400af2114658166dd6f4ae6 | https://github.com/alexgorin/pymar/blob/39cd029ea6ff135a6400af2114658166dd6f4ae6/pymar/producer.py#L103-L156 |
251,568 | corydodt/Crosscap | crosscap/openapi.py | _orderedCleanDict | def _orderedCleanDict(attrsObj):
"""
-> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties
"""
def _filt(k, v):
if attr.has(v):
return not not any(attr.astuple(v))
return not not v
return attr.asdict(attrsObj,
dict_factory=OrderedDict,
recurse=False,
filter=_filt) | python | def _orderedCleanDict(attrsObj):
"""
-> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties
"""
def _filt(k, v):
if attr.has(v):
return not not any(attr.astuple(v))
return not not v
return attr.asdict(attrsObj,
dict_factory=OrderedDict,
recurse=False,
filter=_filt) | [
"def",
"_orderedCleanDict",
"(",
"attrsObj",
")",
":",
"def",
"_filt",
"(",
"k",
",",
"v",
")",
":",
"if",
"attr",
".",
"has",
"(",
"v",
")",
":",
"return",
"not",
"not",
"any",
"(",
"attr",
".",
"astuple",
"(",
"v",
")",
")",
"return",
"not",
"not",
"v",
"return",
"attr",
".",
"asdict",
"(",
"attrsObj",
",",
"dict_factory",
"=",
"OrderedDict",
",",
"recurse",
"=",
"False",
",",
"filter",
"=",
"_filt",
")"
] | -> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties | [
"-",
">",
"dict",
"with",
"false",
"-",
"values",
"removed"
] | 388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L124-L138 |
251,569 | corydodt/Crosscap | crosscap/openapi.py | representCleanOpenAPIOperation | def representCleanOpenAPIOperation(dumper, data):
"""
Unpack nonstandard attributes while representing an OpenAPIOperation
"""
dct = _orderedCleanDict(data)
if '_extended' in dct:
for k, ext in list(data._extended.items()):
dct[k] = ext
del dct['_extended']
return dumper.yaml_representers[type(dct)](dumper, dct) | python | def representCleanOpenAPIOperation(dumper, data):
"""
Unpack nonstandard attributes while representing an OpenAPIOperation
"""
dct = _orderedCleanDict(data)
if '_extended' in dct:
for k, ext in list(data._extended.items()):
dct[k] = ext
del dct['_extended']
return dumper.yaml_representers[type(dct)](dumper, dct) | [
"def",
"representCleanOpenAPIOperation",
"(",
"dumper",
",",
"data",
")",
":",
"dct",
"=",
"_orderedCleanDict",
"(",
"data",
")",
"if",
"'_extended'",
"in",
"dct",
":",
"for",
"k",
",",
"ext",
"in",
"list",
"(",
"data",
".",
"_extended",
".",
"items",
"(",
")",
")",
":",
"dct",
"[",
"k",
"]",
"=",
"ext",
"del",
"dct",
"[",
"'_extended'",
"]",
"return",
"dumper",
".",
"yaml_representers",
"[",
"type",
"(",
"dct",
")",
"]",
"(",
"dumper",
",",
"dct",
")"
] | Unpack nonstandard attributes while representing an OpenAPIOperation | [
"Unpack",
"nonstandard",
"attributes",
"while",
"representing",
"an",
"OpenAPIOperation"
] | 388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L141-L151 |
251,570 | corydodt/Crosscap | crosscap/openapi.py | representCleanOpenAPIParameter | def representCleanOpenAPIParameter(dumper, data):
"""
Rename python reserved keyword fields before representing an OpenAPIParameter
"""
dct = _orderedCleanDict(data)
# We are using "in_" as a key for the "in" parameter, since in is a Python keyword.
# To represent it correctly, we then have to swap "in_" for "in".
# So we do an item-by-item copy of the dct so we don't change the order when
# making this swap.
d2 = OrderedDict()
for k, v in dct.copy().items():
if k == 'in_':
d2['in'] = v
else:
d2[k] = v
return dumper.yaml_representers[type(d2)](dumper, d2) | python | def representCleanOpenAPIParameter(dumper, data):
"""
Rename python reserved keyword fields before representing an OpenAPIParameter
"""
dct = _orderedCleanDict(data)
# We are using "in_" as a key for the "in" parameter, since in is a Python keyword.
# To represent it correctly, we then have to swap "in_" for "in".
# So we do an item-by-item copy of the dct so we don't change the order when
# making this swap.
d2 = OrderedDict()
for k, v in dct.copy().items():
if k == 'in_':
d2['in'] = v
else:
d2[k] = v
return dumper.yaml_representers[type(d2)](dumper, d2) | [
"def",
"representCleanOpenAPIParameter",
"(",
"dumper",
",",
"data",
")",
":",
"dct",
"=",
"_orderedCleanDict",
"(",
"data",
")",
"# We are using \"in_\" as a key for the \"in\" parameter, since in is a Python keyword.",
"# To represent it correctly, we then have to swap \"in_\" for \"in\".",
"# So we do an item-by-item copy of the dct so we don't change the order when",
"# making this swap.",
"d2",
"=",
"OrderedDict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"dct",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'in_'",
":",
"d2",
"[",
"'in'",
"]",
"=",
"v",
"else",
":",
"d2",
"[",
"k",
"]",
"=",
"v",
"return",
"dumper",
".",
"yaml_representers",
"[",
"type",
"(",
"d2",
")",
"]",
"(",
"dumper",
",",
"d2",
")"
] | Rename python reserved keyword fields before representing an OpenAPIParameter | [
"Rename",
"python",
"reserved",
"keyword",
"fields",
"before",
"representing",
"an",
"OpenAPIParameter"
] | 388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L168-L184 |
251,571 | corydodt/Crosscap | crosscap/openapi.py | representCleanOpenAPIObjects | def representCleanOpenAPIObjects(dumper, data):
"""
Produce a representation of an OpenAPI object, removing empty attributes
"""
dct = _orderedCleanDict(data)
return dumper.yaml_representers[type(dct)](dumper, dct) | python | def representCleanOpenAPIObjects(dumper, data):
"""
Produce a representation of an OpenAPI object, removing empty attributes
"""
dct = _orderedCleanDict(data)
return dumper.yaml_representers[type(dct)](dumper, dct) | [
"def",
"representCleanOpenAPIObjects",
"(",
"dumper",
",",
"data",
")",
":",
"dct",
"=",
"_orderedCleanDict",
"(",
"data",
")",
"return",
"dumper",
".",
"yaml_representers",
"[",
"type",
"(",
"dct",
")",
"]",
"(",
"dumper",
",",
"dct",
")"
] | Produce a representation of an OpenAPI object, removing empty attributes | [
"Produce",
"a",
"representation",
"of",
"an",
"OpenAPI",
"object",
"removing",
"empty",
"attributes"
] | 388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L187-L193 |
251,572 | corydodt/Crosscap | crosscap/openapi.py | mediaTypeHelper | def mediaTypeHelper(mediaType):
"""
Return a function that creates a Responses object;
"""
def _innerHelper(data=None):
"""
Create a Responses object that contains a MediaType entry of the specified mediaType
Convenience function for the most common cases where you need an instance of Responses
"""
ret = OpenAPIResponses()
if data is None:
data = {}
ret.default.content[mediaType] = data
return ret
return _innerHelper | python | def mediaTypeHelper(mediaType):
"""
Return a function that creates a Responses object;
"""
def _innerHelper(data=None):
"""
Create a Responses object that contains a MediaType entry of the specified mediaType
Convenience function for the most common cases where you need an instance of Responses
"""
ret = OpenAPIResponses()
if data is None:
data = {}
ret.default.content[mediaType] = data
return ret
return _innerHelper | [
"def",
"mediaTypeHelper",
"(",
"mediaType",
")",
":",
"def",
"_innerHelper",
"(",
"data",
"=",
"None",
")",
":",
"\"\"\"\n Create a Responses object that contains a MediaType entry of the specified mediaType\n\n Convenience function for the most common cases where you need an instance of Responses\n \"\"\"",
"ret",
"=",
"OpenAPIResponses",
"(",
")",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"ret",
".",
"default",
".",
"content",
"[",
"mediaType",
"]",
"=",
"data",
"return",
"ret",
"return",
"_innerHelper"
] | Return a function that creates a Responses object; | [
"Return",
"a",
"function",
"that",
"creates",
"a",
"Responses",
"object",
";"
] | 388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e | https://github.com/corydodt/Crosscap/blob/388a2ec36b8aa85e8f1ed692bb6e43474ba76c8e/crosscap/openapi.py#L196-L211 |
251,573 | etcher-be/elib_config | elib_config/_file/_config_file.py | _ensure_config_file_exists | def _ensure_config_file_exists():
"""
Makes sure the config file exists.
:raises: :class:`epab.core.new_config.exc.ConfigFileNotFoundError`
"""
config_file = Path(ELIBConfig.config_file_path).absolute()
if not config_file.exists():
raise ConfigFileNotFoundError(ELIBConfig.config_file_path) | python | def _ensure_config_file_exists():
"""
Makes sure the config file exists.
:raises: :class:`epab.core.new_config.exc.ConfigFileNotFoundError`
"""
config_file = Path(ELIBConfig.config_file_path).absolute()
if not config_file.exists():
raise ConfigFileNotFoundError(ELIBConfig.config_file_path) | [
"def",
"_ensure_config_file_exists",
"(",
")",
":",
"config_file",
"=",
"Path",
"(",
"ELIBConfig",
".",
"config_file_path",
")",
".",
"absolute",
"(",
")",
"if",
"not",
"config_file",
".",
"exists",
"(",
")",
":",
"raise",
"ConfigFileNotFoundError",
"(",
"ELIBConfig",
".",
"config_file_path",
")"
] | Makes sure the config file exists.
:raises: :class:`epab.core.new_config.exc.ConfigFileNotFoundError` | [
"Makes",
"sure",
"the",
"config",
"file",
"exists",
"."
] | 5d8c839e84d70126620ab0186dc1f717e5868bd0 | https://github.com/etcher-be/elib_config/blob/5d8c839e84d70126620ab0186dc1f717e5868bd0/elib_config/_file/_config_file.py#L21-L29 |
251,574 | jenanwise/codequality | codequality/main.py | CodeQuality._relevant_checkers | def _relevant_checkers(self, path):
"""
Get set of checkers for the given path.
TODO: currently this is based off the file extension. We would like to
honor magic bits as well, so that python binaries, shell scripts, etc
but we're not guaranteed that `path` currently exists on the filesystem
-- e.g. when version control for historical revs is used.
"""
_, ext = os.path.splitext(path)
ext = ext.lstrip('.')
return checkers.checkers.get(ext, []) | python | def _relevant_checkers(self, path):
"""
Get set of checkers for the given path.
TODO: currently this is based off the file extension. We would like to
honor magic bits as well, so that python binaries, shell scripts, etc
but we're not guaranteed that `path` currently exists on the filesystem
-- e.g. when version control for historical revs is used.
"""
_, ext = os.path.splitext(path)
ext = ext.lstrip('.')
return checkers.checkers.get(ext, []) | [
"def",
"_relevant_checkers",
"(",
"self",
",",
"path",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"ext",
"=",
"ext",
".",
"lstrip",
"(",
"'.'",
")",
"return",
"checkers",
".",
"checkers",
".",
"get",
"(",
"ext",
",",
"[",
"]",
")"
] | Get set of checkers for the given path.
TODO: currently this is based off the file extension. We would like to
honor magic bits as well, so that python binaries, shell scripts, etc
but we're not guaranteed that `path` currently exists on the filesystem
-- e.g. when version control for historical revs is used. | [
"Get",
"set",
"of",
"checkers",
"for",
"the",
"given",
"path",
"."
] | 8a2bd767fd73091c49a5318fdbfb2b4fff77533d | https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/main.py#L139-L150 |
251,575 | jenanwise/codequality | codequality/main.py | CodeQuality._list_checkers | def _list_checkers(self):
"""
Print information about checkers and their external tools.
Currently only works properly on systems with the `which` tool
available.
"""
classes = set()
for checker_group in checkers.checkers.itervalues():
for checker in checker_group:
classes.add(checker)
max_width = 0
for clazz in classes:
max_width = max(max_width, len(clazz.tool), len(clazz.__name__))
for clazz in sorted(classes):
status, _ = commands.getstatusoutput('which %s' % clazz.tool)
result = 'missing' if status else 'installed'
version = '' if status else clazz.get_version()
print '%s%s%s%s' % (
clazz.__name__.ljust(max_width + 1),
clazz.tool.ljust(max_width + 1),
result.ljust(max_width + 1),
version,
) | python | def _list_checkers(self):
"""
Print information about checkers and their external tools.
Currently only works properly on systems with the `which` tool
available.
"""
classes = set()
for checker_group in checkers.checkers.itervalues():
for checker in checker_group:
classes.add(checker)
max_width = 0
for clazz in classes:
max_width = max(max_width, len(clazz.tool), len(clazz.__name__))
for clazz in sorted(classes):
status, _ = commands.getstatusoutput('which %s' % clazz.tool)
result = 'missing' if status else 'installed'
version = '' if status else clazz.get_version()
print '%s%s%s%s' % (
clazz.__name__.ljust(max_width + 1),
clazz.tool.ljust(max_width + 1),
result.ljust(max_width + 1),
version,
) | [
"def",
"_list_checkers",
"(",
"self",
")",
":",
"classes",
"=",
"set",
"(",
")",
"for",
"checker_group",
"in",
"checkers",
".",
"checkers",
".",
"itervalues",
"(",
")",
":",
"for",
"checker",
"in",
"checker_group",
":",
"classes",
".",
"add",
"(",
"checker",
")",
"max_width",
"=",
"0",
"for",
"clazz",
"in",
"classes",
":",
"max_width",
"=",
"max",
"(",
"max_width",
",",
"len",
"(",
"clazz",
".",
"tool",
")",
",",
"len",
"(",
"clazz",
".",
"__name__",
")",
")",
"for",
"clazz",
"in",
"sorted",
"(",
"classes",
")",
":",
"status",
",",
"_",
"=",
"commands",
".",
"getstatusoutput",
"(",
"'which %s'",
"%",
"clazz",
".",
"tool",
")",
"result",
"=",
"'missing'",
"if",
"status",
"else",
"'installed'",
"version",
"=",
"''",
"if",
"status",
"else",
"clazz",
".",
"get_version",
"(",
")",
"print",
"'%s%s%s%s'",
"%",
"(",
"clazz",
".",
"__name__",
".",
"ljust",
"(",
"max_width",
"+",
"1",
")",
",",
"clazz",
".",
"tool",
".",
"ljust",
"(",
"max_width",
"+",
"1",
")",
",",
"result",
".",
"ljust",
"(",
"max_width",
"+",
"1",
")",
",",
"version",
",",
")"
] | Print information about checkers and their external tools.
Currently only works properly on systems with the `which` tool
available. | [
"Print",
"information",
"about",
"checkers",
"and",
"their",
"external",
"tools",
"."
] | 8a2bd767fd73091c49a5318fdbfb2b4fff77533d | https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/main.py#L176-L201 |
251,576 | jenanwise/codequality | codequality/main.py | CodeQuality._should_ignore | def _should_ignore(self, path):
"""
Return True iff path should be ignored.
"""
for ignore in self.options.ignores:
if fnmatch.fnmatch(path, ignore):
return True
return False | python | def _should_ignore(self, path):
"""
Return True iff path should be ignored.
"""
for ignore in self.options.ignores:
if fnmatch.fnmatch(path, ignore):
return True
return False | [
"def",
"_should_ignore",
"(",
"self",
",",
"path",
")",
":",
"for",
"ignore",
"in",
"self",
".",
"options",
".",
"ignores",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"path",
",",
"ignore",
")",
":",
"return",
"True",
"return",
"False"
] | Return True iff path should be ignored. | [
"Return",
"True",
"iff",
"path",
"should",
"be",
"ignored",
"."
] | 8a2bd767fd73091c49a5318fdbfb2b4fff77533d | https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/main.py#L203-L210 |
251,577 | knagra/farnsworth | legacy/views.py | legacy_notes_view | def legacy_notes_view(request):
"""
View to see legacy notes.
"""
notes = TeacherNote.objects.all()
note_count = notes.count()
paginator = Paginator(notes, 100)
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
notes = paginator.page(1)
except EmptyPage:
notes = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_notes.html',
{'page_name': "Legacy Notes",
'notes': notes,
'note_count': note_count,},
context_instance=RequestContext(request)
) | python | def legacy_notes_view(request):
"""
View to see legacy notes.
"""
notes = TeacherNote.objects.all()
note_count = notes.count()
paginator = Paginator(notes, 100)
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
notes = paginator.page(1)
except EmptyPage:
notes = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_notes.html',
{'page_name': "Legacy Notes",
'notes': notes,
'note_count': note_count,},
context_instance=RequestContext(request)
) | [
"def",
"legacy_notes_view",
"(",
"request",
")",
":",
"notes",
"=",
"TeacherNote",
".",
"objects",
".",
"all",
"(",
")",
"note_count",
"=",
"notes",
".",
"count",
"(",
")",
"paginator",
"=",
"Paginator",
"(",
"notes",
",",
"100",
")",
"page",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
")",
"try",
":",
"notes",
"=",
"paginator",
".",
"page",
"(",
"page",
")",
"except",
"PageNotAnInteger",
":",
"notes",
"=",
"paginator",
".",
"page",
"(",
"1",
")",
"except",
"EmptyPage",
":",
"notes",
"=",
"paginator",
".",
"page",
"(",
"paginator",
".",
"num_pages",
")",
"return",
"render_to_response",
"(",
"'teacher_notes.html'",
",",
"{",
"'page_name'",
":",
"\"Legacy Notes\"",
",",
"'notes'",
":",
"notes",
",",
"'note_count'",
":",
"note_count",
",",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] | View to see legacy notes. | [
"View",
"to",
"see",
"legacy",
"notes",
"."
] | 1b6589f0d9fea154f0a1e2231ed906764ed26d26 | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/legacy/views.py#L69-L90 |
251,578 | knagra/farnsworth | legacy/views.py | legacy_events_view | def legacy_events_view(request):
"""
View to see legacy events.
"""
events = TeacherEvent.objects.all()
event_count = events.count()
paginator = Paginator(events, 100)
page = request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_events.html',
{'page_name': "Legacy Events",
'events': events,
'event_count': event_count,},
context_instance=RequestContext(request)
) | python | def legacy_events_view(request):
"""
View to see legacy events.
"""
events = TeacherEvent.objects.all()
event_count = events.count()
paginator = Paginator(events, 100)
page = request.GET.get('page')
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_events.html',
{'page_name': "Legacy Events",
'events': events,
'event_count': event_count,},
context_instance=RequestContext(request)
) | [
"def",
"legacy_events_view",
"(",
"request",
")",
":",
"events",
"=",
"TeacherEvent",
".",
"objects",
".",
"all",
"(",
")",
"event_count",
"=",
"events",
".",
"count",
"(",
")",
"paginator",
"=",
"Paginator",
"(",
"events",
",",
"100",
")",
"page",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
")",
"try",
":",
"events",
"=",
"paginator",
".",
"page",
"(",
"page",
")",
"except",
"PageNotAnInteger",
":",
"events",
"=",
"paginator",
".",
"page",
"(",
"1",
")",
"except",
"EmptyPage",
":",
"events",
"=",
"paginator",
".",
"page",
"(",
"paginator",
".",
"num_pages",
")",
"return",
"render_to_response",
"(",
"'teacher_events.html'",
",",
"{",
"'page_name'",
":",
"\"Legacy Events\"",
",",
"'events'",
":",
"events",
",",
"'event_count'",
":",
"event_count",
",",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] | View to see legacy events. | [
"View",
"to",
"see",
"legacy",
"events",
"."
] | 1b6589f0d9fea154f0a1e2231ed906764ed26d26 | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/legacy/views.py#L93-L114 |
251,579 | knagra/farnsworth | legacy/views.py | legacy_requests_view | def legacy_requests_view(request, rtype):
"""
View to see legacy requests of rtype request type, which should be either
'food' or 'maintenance'.
"""
if not rtype in ['food', 'maintenance']:
raise Http404
requests_dict = [] # [(req, [req_responses]), (req2, [req2_responses]), ...]
requests = TeacherRequest.objects.filter(request_type=rtype)
request_count = requests.count()
paginator = Paginator(requests, 50)
page = request.GET.get('page')
try:
requests = paginator.page(page)
except PageNotAnInteger:
requests = paginator.page(1)
except EmptyPage:
requests = paginator.page(paginator.num_pages)
for req in requests:
requests_dict.append(
(req, TeacherResponse.objects.filter(request=req),)
)
return render_to_response(
'teacher_requests.html',
{'page_name': "Legacy {rtype} Requests".format(rtype=rtype.title()),
'requests_dict': requests_dict,
'requests': requests,
'request_type': rtype.title(),
'request_count': request_count,},
context_instance=RequestContext(request)
) | python | def legacy_requests_view(request, rtype):
"""
View to see legacy requests of rtype request type, which should be either
'food' or 'maintenance'.
"""
if not rtype in ['food', 'maintenance']:
raise Http404
requests_dict = [] # [(req, [req_responses]), (req2, [req2_responses]), ...]
requests = TeacherRequest.objects.filter(request_type=rtype)
request_count = requests.count()
paginator = Paginator(requests, 50)
page = request.GET.get('page')
try:
requests = paginator.page(page)
except PageNotAnInteger:
requests = paginator.page(1)
except EmptyPage:
requests = paginator.page(paginator.num_pages)
for req in requests:
requests_dict.append(
(req, TeacherResponse.objects.filter(request=req),)
)
return render_to_response(
'teacher_requests.html',
{'page_name': "Legacy {rtype} Requests".format(rtype=rtype.title()),
'requests_dict': requests_dict,
'requests': requests,
'request_type': rtype.title(),
'request_count': request_count,},
context_instance=RequestContext(request)
) | [
"def",
"legacy_requests_view",
"(",
"request",
",",
"rtype",
")",
":",
"if",
"not",
"rtype",
"in",
"[",
"'food'",
",",
"'maintenance'",
"]",
":",
"raise",
"Http404",
"requests_dict",
"=",
"[",
"]",
"# [(req, [req_responses]), (req2, [req2_responses]), ...]",
"requests",
"=",
"TeacherRequest",
".",
"objects",
".",
"filter",
"(",
"request_type",
"=",
"rtype",
")",
"request_count",
"=",
"requests",
".",
"count",
"(",
")",
"paginator",
"=",
"Paginator",
"(",
"requests",
",",
"50",
")",
"page",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
")",
"try",
":",
"requests",
"=",
"paginator",
".",
"page",
"(",
"page",
")",
"except",
"PageNotAnInteger",
":",
"requests",
"=",
"paginator",
".",
"page",
"(",
"1",
")",
"except",
"EmptyPage",
":",
"requests",
"=",
"paginator",
".",
"page",
"(",
"paginator",
".",
"num_pages",
")",
"for",
"req",
"in",
"requests",
":",
"requests_dict",
".",
"append",
"(",
"(",
"req",
",",
"TeacherResponse",
".",
"objects",
".",
"filter",
"(",
"request",
"=",
"req",
")",
",",
")",
")",
"return",
"render_to_response",
"(",
"'teacher_requests.html'",
",",
"{",
"'page_name'",
":",
"\"Legacy {rtype} Requests\"",
".",
"format",
"(",
"rtype",
"=",
"rtype",
".",
"title",
"(",
")",
")",
",",
"'requests_dict'",
":",
"requests_dict",
",",
"'requests'",
":",
"requests",
",",
"'request_type'",
":",
"rtype",
".",
"title",
"(",
")",
",",
"'request_count'",
":",
"request_count",
",",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
")"
] | View to see legacy requests of rtype request type, which should be either
'food' or 'maintenance'. | [
"View",
"to",
"see",
"legacy",
"requests",
"of",
"rtype",
"request",
"type",
"which",
"should",
"be",
"either",
"food",
"or",
"maintenance",
"."
] | 1b6589f0d9fea154f0a1e2231ed906764ed26d26 | https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/legacy/views.py#L117-L148 |
251,580 | fstab50/metal | metal/bash_init.py | run_command_orig | def run_command_orig(cmd):
""" No idea how th f to get this to work """
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
os.killpg(os.getpgid(pro.pid), signal.SIGTERM)
else:
raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr))
return stdout | python | def run_command_orig(cmd):
""" No idea how th f to get this to work """
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
os.killpg(os.getpgid(pro.pid), signal.SIGTERM)
else:
raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr))
return stdout | [
"def",
"run_command_orig",
"(",
"cmd",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"process",
".",
"communicate",
"(",
")",
"if",
"process",
".",
"returncode",
"==",
"0",
":",
"os",
".",
"killpg",
"(",
"os",
".",
"getpgid",
"(",
"pro",
".",
"pid",
")",
",",
"signal",
".",
"SIGTERM",
")",
"else",
":",
"raise",
"BadRCError",
"(",
"\"Bad rc (%s) for cmd '%s': %s\"",
"%",
"(",
"process",
".",
"returncode",
",",
"cmd",
",",
"stdout",
"+",
"stderr",
")",
")",
"return",
"stdout"
] | No idea how th f to get this to work | [
"No",
"idea",
"how",
"th",
"f",
"to",
"get",
"this",
"to",
"work"
] | 0488bbdd516a508909267cc44191f632e21156ba | https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/bash_init.py#L12-L20 |
251,581 | tBaxter/django-fretboard | fretboard/models.py | Topic.page_count | def page_count(self):
"""
Get count of total pages
"""
postcount = self.post_set.count()
max_pages = (postcount / get_paginate_by())
if postcount % get_paginate_by() != 0:
max_pages += 1
return max_pages | python | def page_count(self):
"""
Get count of total pages
"""
postcount = self.post_set.count()
max_pages = (postcount / get_paginate_by())
if postcount % get_paginate_by() != 0:
max_pages += 1
return max_pages | [
"def",
"page_count",
"(",
"self",
")",
":",
"postcount",
"=",
"self",
".",
"post_set",
".",
"count",
"(",
")",
"max_pages",
"=",
"(",
"postcount",
"/",
"get_paginate_by",
"(",
")",
")",
"if",
"postcount",
"%",
"get_paginate_by",
"(",
")",
"!=",
"0",
":",
"max_pages",
"+=",
"1",
"return",
"max_pages"
] | Get count of total pages | [
"Get",
"count",
"of",
"total",
"pages"
] | 3c3f9557089821283f315a07f3e5a57a2725ab3b | https://github.com/tBaxter/django-fretboard/blob/3c3f9557089821283f315a07f3e5a57a2725ab3b/fretboard/models.py#L167-L175 |
251,582 | tBaxter/django-fretboard | fretboard/models.py | Topic.get_image | def get_image(self):
"""
Gets first image from post set.
"""
posts_with_images = self.post_set.filter(image__gt='')
if posts_with_images:
return posts_with_images[0].image | python | def get_image(self):
"""
Gets first image from post set.
"""
posts_with_images = self.post_set.filter(image__gt='')
if posts_with_images:
return posts_with_images[0].image | [
"def",
"get_image",
"(",
"self",
")",
":",
"posts_with_images",
"=",
"self",
".",
"post_set",
".",
"filter",
"(",
"image__gt",
"=",
"''",
")",
"if",
"posts_with_images",
":",
"return",
"posts_with_images",
"[",
"0",
"]",
".",
"image"
] | Gets first image from post set. | [
"Gets",
"first",
"image",
"from",
"post",
"set",
"."
] | 3c3f9557089821283f315a07f3e5a57a2725ab3b | https://github.com/tBaxter/django-fretboard/blob/3c3f9557089821283f315a07f3e5a57a2725ab3b/fretboard/models.py#L192-L198 |
251,583 | tBaxter/django-fretboard | fretboard/models.py | Post.post_url | def post_url(self):
"""
Determine which page this post lives on within the topic
and return link to anchor within that page
"""
topic = self.topic
topic_page = topic.post_set.filter(id__lt=self.id).count() / get_paginate_by() + 1
return "{0}page{1}/#post-{2}".format(topic.get_short_url(), topic_page, self.id) | python | def post_url(self):
"""
Determine which page this post lives on within the topic
and return link to anchor within that page
"""
topic = self.topic
topic_page = topic.post_set.filter(id__lt=self.id).count() / get_paginate_by() + 1
return "{0}page{1}/#post-{2}".format(topic.get_short_url(), topic_page, self.id) | [
"def",
"post_url",
"(",
"self",
")",
":",
"topic",
"=",
"self",
".",
"topic",
"topic_page",
"=",
"topic",
".",
"post_set",
".",
"filter",
"(",
"id__lt",
"=",
"self",
".",
"id",
")",
".",
"count",
"(",
")",
"/",
"get_paginate_by",
"(",
")",
"+",
"1",
"return",
"\"{0}page{1}/#post-{2}\"",
".",
"format",
"(",
"topic",
".",
"get_short_url",
"(",
")",
",",
"topic_page",
",",
"self",
".",
"id",
")"
] | Determine which page this post lives on within the topic
and return link to anchor within that page | [
"Determine",
"which",
"page",
"this",
"post",
"lives",
"on",
"within",
"the",
"topic",
"and",
"return",
"link",
"to",
"anchor",
"within",
"that",
"page"
] | 3c3f9557089821283f315a07f3e5a57a2725ab3b | https://github.com/tBaxter/django-fretboard/blob/3c3f9557089821283f315a07f3e5a57a2725ab3b/fretboard/models.py#L232-L239 |
251,584 | laysakura/relshell | relshell/record.py | Record.next | def next(self):
"""Return a column one by one
:raises: StopIteration
"""
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col | python | def next(self):
"""Return a column one by one
:raises: StopIteration
"""
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cur_col",
">=",
"len",
"(",
"self",
".",
"_rec",
")",
":",
"self",
".",
"_cur_col",
"=",
"0",
"raise",
"StopIteration",
"col",
"=",
"self",
".",
"_rec",
"[",
"self",
".",
"_cur_col",
"]",
"self",
".",
"_cur_col",
"+=",
"1",
"return",
"col"
] | Return a column one by one
:raises: StopIteration | [
"Return",
"a",
"column",
"one",
"by",
"one"
] | 9ca5c03a34c11cb763a4a75595f18bf4383aa8cc | https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/record.py#L42-L52 |
251,585 | edeposit/edeposit.amqp.aleph | src/edeposit/amqp/aleph/datastructures/semanticinfo.py | _parse_summaryRecordSysNumber | def _parse_summaryRecordSysNumber(summaryRecordSysNumber):
"""
Try to parse vague, not likely machine-readable description and return
first token, which contains enough numbers in it.
"""
def number_of_digits(token):
digits = filter(lambda x: x.isdigit(), token)
return len(digits)
tokens = map(
lambda x: remove_hairs(x, r" .,:;<>(){}[]\/"),
summaryRecordSysNumber.split()
)
# pick only tokens that contains 3 digits
contains_digits = filter(lambda x: number_of_digits(x) > 3, tokens)
if not contains_digits:
return ""
return contains_digits[0] | python | def _parse_summaryRecordSysNumber(summaryRecordSysNumber):
"""
Try to parse vague, not likely machine-readable description and return
first token, which contains enough numbers in it.
"""
def number_of_digits(token):
digits = filter(lambda x: x.isdigit(), token)
return len(digits)
tokens = map(
lambda x: remove_hairs(x, r" .,:;<>(){}[]\/"),
summaryRecordSysNumber.split()
)
# pick only tokens that contains 3 digits
contains_digits = filter(lambda x: number_of_digits(x) > 3, tokens)
if not contains_digits:
return ""
return contains_digits[0] | [
"def",
"_parse_summaryRecordSysNumber",
"(",
"summaryRecordSysNumber",
")",
":",
"def",
"number_of_digits",
"(",
"token",
")",
":",
"digits",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"isdigit",
"(",
")",
",",
"token",
")",
"return",
"len",
"(",
"digits",
")",
"tokens",
"=",
"map",
"(",
"lambda",
"x",
":",
"remove_hairs",
"(",
"x",
",",
"r\" .,:;<>(){}[]\\/\"",
")",
",",
"summaryRecordSysNumber",
".",
"split",
"(",
")",
")",
"# pick only tokens that contains 3 digits",
"contains_digits",
"=",
"filter",
"(",
"lambda",
"x",
":",
"number_of_digits",
"(",
"x",
")",
">",
"3",
",",
"tokens",
")",
"if",
"not",
"contains_digits",
":",
"return",
"\"\"",
"return",
"contains_digits",
"[",
"0",
"]"
] | Try to parse vague, not likely machine-readable description and return
first token, which contains enough numbers in it. | [
"Try",
"to",
"parse",
"vague",
"not",
"likely",
"machine",
"-",
"readable",
"description",
"and",
"return",
"first",
"token",
"which",
"contains",
"enough",
"numbers",
"in",
"it",
"."
] | 360342c0504d5daa2344e864762cdf938d4149c7 | https://github.com/edeposit/edeposit.amqp.aleph/blob/360342c0504d5daa2344e864762cdf938d4149c7/src/edeposit/amqp/aleph/datastructures/semanticinfo.py#L19-L39 |
251,586 | markmuetz/commandify | commandify/commandify.py | commandify | def commandify(use_argcomplete=False, exit=True, *args, **kwargs):
'''Turns decorated functions into command line args
Finds the main_command and all commands and generates command line args
from these.'''
parser = CommandifyArgumentParser(*args, **kwargs)
parser.setup_arguments()
if use_argcomplete:
try:
import argcomplete
except ImportError:
print('argcomplete not installed, please install it.')
parser.exit(status=2)
# Must happen between setup_arguments() and parse_args().
argcomplete.autocomplete(parser)
args = parser.parse_args()
if exit:
parser.dispatch_commands()
parser.exit(0)
else:
return parser.dispatch_commands() | python | def commandify(use_argcomplete=False, exit=True, *args, **kwargs):
'''Turns decorated functions into command line args
Finds the main_command and all commands and generates command line args
from these.'''
parser = CommandifyArgumentParser(*args, **kwargs)
parser.setup_arguments()
if use_argcomplete:
try:
import argcomplete
except ImportError:
print('argcomplete not installed, please install it.')
parser.exit(status=2)
# Must happen between setup_arguments() and parse_args().
argcomplete.autocomplete(parser)
args = parser.parse_args()
if exit:
parser.dispatch_commands()
parser.exit(0)
else:
return parser.dispatch_commands() | [
"def",
"commandify",
"(",
"use_argcomplete",
"=",
"False",
",",
"exit",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"parser",
"=",
"CommandifyArgumentParser",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"parser",
".",
"setup_arguments",
"(",
")",
"if",
"use_argcomplete",
":",
"try",
":",
"import",
"argcomplete",
"except",
"ImportError",
":",
"print",
"(",
"'argcomplete not installed, please install it.'",
")",
"parser",
".",
"exit",
"(",
"status",
"=",
"2",
")",
"# Must happen between setup_arguments() and parse_args().",
"argcomplete",
".",
"autocomplete",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"exit",
":",
"parser",
".",
"dispatch_commands",
"(",
")",
"parser",
".",
"exit",
"(",
"0",
")",
"else",
":",
"return",
"parser",
".",
"dispatch_commands",
"(",
")"
] | Turns decorated functions into command line args
Finds the main_command and all commands and generates command line args
from these. | [
"Turns",
"decorated",
"functions",
"into",
"command",
"line",
"args"
] | 2f836caca06fbee80a43ca1ceee22376f63ec4d1 | https://github.com/markmuetz/commandify/blob/2f836caca06fbee80a43ca1ceee22376f63ec4d1/commandify/commandify.py#L245-L265 |
251,587 | markmuetz/commandify | commandify/commandify.py | CommandifyArgumentParser._get_command_args | def _get_command_args(self, command, args):
'''Work out the command arguments for a given command'''
command_args = {}
command_argument_names =\
command.__code__.co_varnames[:command.__code__.co_argcount]
for varname in command_argument_names:
if varname == 'args':
command_args['args'] = args
elif varname in self.provide_args:
command_args[varname] = self.provide_args[varname]
else:
command_args[varname] = getattr(args, varname)
return command_args | python | def _get_command_args(self, command, args):
'''Work out the command arguments for a given command'''
command_args = {}
command_argument_names =\
command.__code__.co_varnames[:command.__code__.co_argcount]
for varname in command_argument_names:
if varname == 'args':
command_args['args'] = args
elif varname in self.provide_args:
command_args[varname] = self.provide_args[varname]
else:
command_args[varname] = getattr(args, varname)
return command_args | [
"def",
"_get_command_args",
"(",
"self",
",",
"command",
",",
"args",
")",
":",
"command_args",
"=",
"{",
"}",
"command_argument_names",
"=",
"command",
".",
"__code__",
".",
"co_varnames",
"[",
":",
"command",
".",
"__code__",
".",
"co_argcount",
"]",
"for",
"varname",
"in",
"command_argument_names",
":",
"if",
"varname",
"==",
"'args'",
":",
"command_args",
"[",
"'args'",
"]",
"=",
"args",
"elif",
"varname",
"in",
"self",
".",
"provide_args",
":",
"command_args",
"[",
"varname",
"]",
"=",
"self",
".",
"provide_args",
"[",
"varname",
"]",
"else",
":",
"command_args",
"[",
"varname",
"]",
"=",
"getattr",
"(",
"args",
",",
"varname",
")",
"return",
"command_args"
] | Work out the command arguments for a given command | [
"Work",
"out",
"the",
"command",
"arguments",
"for",
"a",
"given",
"command"
] | 2f836caca06fbee80a43ca1ceee22376f63ec4d1 | https://github.com/markmuetz/commandify/blob/2f836caca06fbee80a43ca1ceee22376f63ec4d1/commandify/commandify.py#L229-L242 |
251,588 | b3j0f/annotation | b3j0f/annotation/call.py | types | def types(*args, **kwargs):
"""Quick alias for the Types Annotation with only args and kwargs
parameters.
:param tuple args: may contain rtype.
:param dict kwargs: may contain ptypes.
"""
rtype = first(args)
return Types(rtype=rtype, ptypes=kwargs) | python | def types(*args, **kwargs):
"""Quick alias for the Types Annotation with only args and kwargs
parameters.
:param tuple args: may contain rtype.
:param dict kwargs: may contain ptypes.
"""
rtype = first(args)
return Types(rtype=rtype, ptypes=kwargs) | [
"def",
"types",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"rtype",
"=",
"first",
"(",
"args",
")",
"return",
"Types",
"(",
"rtype",
"=",
"rtype",
",",
"ptypes",
"=",
"kwargs",
")"
] | Quick alias for the Types Annotation with only args and kwargs
parameters.
:param tuple args: may contain rtype.
:param dict kwargs: may contain ptypes. | [
"Quick",
"alias",
"for",
"the",
"Types",
"Annotation",
"with",
"only",
"args",
"and",
"kwargs",
"parameters",
"."
] | 738035a974e4092696d9dc1bbd149faa21c8c51f | https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/call.py#L251-L261 |
251,589 | b3j0f/annotation | b3j0f/annotation/call.py | Retries._checkretry | def _checkretry(self, mydelay, condition, tries_remaining, data):
"""Check if input parameters allow to retries function execution.
:param float mydelay: waiting delay between two execution.
:param int condition: condition to check with this condition.
:param int tries_remaining: tries remaining.
:param data: data to hook.
"""
result = mydelay
if self.condition & condition and tries_remaining > 0:
# hook data with tries_remaining and mydelay
if self.hook is not None:
self.hook(data, condition, tries_remaining, mydelay)
# wait mydelay seconds
sleep(mydelay)
result *= self.backoff # increment mydelay with this backoff
elif condition is Retries.ON_ERROR:
raise data # raise data if no retries and on_error
else: # else Nonify mydelay to prevent callee function to stop
result = None
return result | python | def _checkretry(self, mydelay, condition, tries_remaining, data):
"""Check if input parameters allow to retries function execution.
:param float mydelay: waiting delay between two execution.
:param int condition: condition to check with this condition.
:param int tries_remaining: tries remaining.
:param data: data to hook.
"""
result = mydelay
if self.condition & condition and tries_remaining > 0:
# hook data with tries_remaining and mydelay
if self.hook is not None:
self.hook(data, condition, tries_remaining, mydelay)
# wait mydelay seconds
sleep(mydelay)
result *= self.backoff # increment mydelay with this backoff
elif condition is Retries.ON_ERROR:
raise data # raise data if no retries and on_error
else: # else Nonify mydelay to prevent callee function to stop
result = None
return result | [
"def",
"_checkretry",
"(",
"self",
",",
"mydelay",
",",
"condition",
",",
"tries_remaining",
",",
"data",
")",
":",
"result",
"=",
"mydelay",
"if",
"self",
".",
"condition",
"&",
"condition",
"and",
"tries_remaining",
">",
"0",
":",
"# hook data with tries_remaining and mydelay",
"if",
"self",
".",
"hook",
"is",
"not",
"None",
":",
"self",
".",
"hook",
"(",
"data",
",",
"condition",
",",
"tries_remaining",
",",
"mydelay",
")",
"# wait mydelay seconds",
"sleep",
"(",
"mydelay",
")",
"result",
"*=",
"self",
".",
"backoff",
"# increment mydelay with this backoff",
"elif",
"condition",
"is",
"Retries",
".",
"ON_ERROR",
":",
"raise",
"data",
"# raise data if no retries and on_error",
"else",
":",
"# else Nonify mydelay to prevent callee function to stop",
"result",
"=",
"None",
"return",
"result"
] | Check if input parameters allow to retries function execution.
:param float mydelay: waiting delay between two execution.
:param int condition: condition to check with this condition.
:param int tries_remaining: tries remaining.
:param data: data to hook. | [
"Check",
"if",
"input",
"parameters",
"allow",
"to",
"retries",
"function",
"execution",
"."
] | 738035a974e4092696d9dc1bbd149faa21c8c51f | https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/call.py#L465-L491 |
251,590 | b3j0f/annotation | b3j0f/annotation/call.py | Memoize._getkey | def _getkey(self, args, kwargs):
"""Get hash key from args and kwargs.
args and kwargs must be hashable.
:param tuple args: called vargs.
:param dict kwargs: called keywords.
:return: hash(tuple(args) + tuple((key, val) for key in sorted(kwargs)).
:rtype: int."""
values = list(args)
keys = sorted(list(kwargs))
for key in keys:
values.append((key, kwargs[key]))
result = hash(tuple(values))
return result | python | def _getkey(self, args, kwargs):
"""Get hash key from args and kwargs.
args and kwargs must be hashable.
:param tuple args: called vargs.
:param dict kwargs: called keywords.
:return: hash(tuple(args) + tuple((key, val) for key in sorted(kwargs)).
:rtype: int."""
values = list(args)
keys = sorted(list(kwargs))
for key in keys:
values.append((key, kwargs[key]))
result = hash(tuple(values))
return result | [
"def",
"_getkey",
"(",
"self",
",",
"args",
",",
"kwargs",
")",
":",
"values",
"=",
"list",
"(",
"args",
")",
"keys",
"=",
"sorted",
"(",
"list",
"(",
"kwargs",
")",
")",
"for",
"key",
"in",
"keys",
":",
"values",
".",
"append",
"(",
"(",
"key",
",",
"kwargs",
"[",
"key",
"]",
")",
")",
"result",
"=",
"hash",
"(",
"tuple",
"(",
"values",
")",
")",
"return",
"result"
] | Get hash key from args and kwargs.
args and kwargs must be hashable.
:param tuple args: called vargs.
:param dict kwargs: called keywords.
:return: hash(tuple(args) + tuple((key, val) for key in sorted(kwargs)).
:rtype: int. | [
"Get",
"hash",
"key",
"from",
"args",
"and",
"kwargs",
"."
] | 738035a974e4092696d9dc1bbd149faa21c8c51f | https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/call.py#L513-L532 |
251,591 | b3j0f/annotation | b3j0f/annotation/call.py | Memoize.getparams | def getparams(self, result):
"""Get result parameters.
:param result: cached result.
:raises: ValueError if result is not cached.
:return: args and kwargs registered with input result.
:rtype: tuple"""
for key in self._cache:
if self._cache[key][2] == result:
args, kwargs, _ = self._cache[key]
return args, kwargs
else:
raise ValueError('Result is not cached') | python | def getparams(self, result):
"""Get result parameters.
:param result: cached result.
:raises: ValueError if result is not cached.
:return: args and kwargs registered with input result.
:rtype: tuple"""
for key in self._cache:
if self._cache[key][2] == result:
args, kwargs, _ = self._cache[key]
return args, kwargs
else:
raise ValueError('Result is not cached') | [
"def",
"getparams",
"(",
"self",
",",
"result",
")",
":",
"for",
"key",
"in",
"self",
".",
"_cache",
":",
"if",
"self",
".",
"_cache",
"[",
"key",
"]",
"[",
"2",
"]",
"==",
"result",
":",
"args",
",",
"kwargs",
",",
"_",
"=",
"self",
".",
"_cache",
"[",
"key",
"]",
"return",
"args",
",",
"kwargs",
"else",
":",
"raise",
"ValueError",
"(",
"'Result is not cached'",
")"
] | Get result parameters.
:param result: cached result.
:raises: ValueError if result is not cached.
:return: args and kwargs registered with input result.
:rtype: tuple | [
"Get",
"result",
"parameters",
"."
] | 738035a974e4092696d9dc1bbd149faa21c8c51f | https://github.com/b3j0f/annotation/blob/738035a974e4092696d9dc1bbd149faa21c8c51f/b3j0f/annotation/call.py#L555-L569 |
251,592 | EventTeam/beliefs | src/beliefs/cells/bools.py | BoolCell.is_entailed_by | def is_entailed_by(self, other):
""" If the other is as or more specific than self"""
other = BoolCell.coerce(other)
if self.value == U or other.value == self.value:
return True
return False | python | def is_entailed_by(self, other):
""" If the other is as or more specific than self"""
other = BoolCell.coerce(other)
if self.value == U or other.value == self.value:
return True
return False | [
"def",
"is_entailed_by",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"BoolCell",
".",
"coerce",
"(",
"other",
")",
"if",
"self",
".",
"value",
"==",
"U",
"or",
"other",
".",
"value",
"==",
"self",
".",
"value",
":",
"return",
"True",
"return",
"False"
] | If the other is as or more specific than self | [
"If",
"the",
"other",
"is",
"as",
"or",
"more",
"specific",
"than",
"self"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/bools.py#L44-L49 |
251,593 | EventTeam/beliefs | src/beliefs/cells/bools.py | BoolCell.merge | def merge(self, other):
"""
Merges two BoolCells
"""
other = BoolCell.coerce(other)
if self.is_equal(other):
# pick among dependencies
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.value = other.value
elif self.is_contradictory(other):
raise Contradiction("Cannot merge T and F")
else:
raise Exception
return self | python | def merge(self, other):
"""
Merges two BoolCells
"""
other = BoolCell.coerce(other)
if self.is_equal(other):
# pick among dependencies
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.value = other.value
elif self.is_contradictory(other):
raise Contradiction("Cannot merge T and F")
else:
raise Exception
return self | [
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"BoolCell",
".",
"coerce",
"(",
"other",
")",
"if",
"self",
".",
"is_equal",
"(",
"other",
")",
":",
"# pick among dependencies",
"return",
"self",
"elif",
"other",
".",
"is_entailed_by",
"(",
"self",
")",
":",
"return",
"self",
"elif",
"self",
".",
"is_entailed_by",
"(",
"other",
")",
":",
"self",
".",
"value",
"=",
"other",
".",
"value",
"elif",
"self",
".",
"is_contradictory",
"(",
"other",
")",
":",
"raise",
"Contradiction",
"(",
"\"Cannot merge T and F\"",
")",
"else",
":",
"raise",
"Exception",
"return",
"self"
] | Merges two BoolCells | [
"Merges",
"two",
"BoolCells"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/bools.py#L82-L98 |
251,594 | BenjaminSchubert/NitPycker | nitpycker/runner.py | ParallelRunner._makeResult | def _makeResult(self):
""" instantiates the result class reporters """
return [reporter(self.stream, self.descriptions, self.verbosity) for reporter in self.resultclass] | python | def _makeResult(self):
""" instantiates the result class reporters """
return [reporter(self.stream, self.descriptions, self.verbosity) for reporter in self.resultclass] | [
"def",
"_makeResult",
"(",
"self",
")",
":",
"return",
"[",
"reporter",
"(",
"self",
".",
"stream",
",",
"self",
".",
"descriptions",
",",
"self",
".",
"verbosity",
")",
"for",
"reporter",
"in",
"self",
".",
"resultclass",
"]"
] | instantiates the result class reporters | [
"instantiates",
"the",
"result",
"class",
"reporters"
] | 3ac2b3bf06f1d704b4853167a967311b0465a76f | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L108-L110 |
251,595 | BenjaminSchubert/NitPycker | nitpycker/runner.py | ParallelRunner.module_can_run_parallel | def module_can_run_parallel(test_module: unittest.TestSuite) -> bool:
"""
Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise
"""
for test_class in test_module:
# if the test is already failed, we just don't filter it
# and let the test runner deal with it later.
if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+
# noinspection PyProtectedMember
if isinstance(test_class, unittest.loader._FailedTest):
continue
if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4-
# before python 3.4.5, test import failures were not serializable.
# We are unable to be sure that this is a module import failure, but it very likely is
# if this is the case, we'll just run this locally and see
raise TestClassNotIterable()
for test_case in test_class:
return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False) | python | def module_can_run_parallel(test_module: unittest.TestSuite) -> bool:
"""
Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise
"""
for test_class in test_module:
# if the test is already failed, we just don't filter it
# and let the test runner deal with it later.
if hasattr(unittest.loader, '_FailedTest'): # import failure in python 3.4.5+
# noinspection PyProtectedMember
if isinstance(test_class, unittest.loader._FailedTest):
continue
if not isinstance(test_class, collections.Iterable): # likely an import failure in python 3.4.4-
# before python 3.4.5, test import failures were not serializable.
# We are unable to be sure that this is a module import failure, but it very likely is
# if this is the case, we'll just run this locally and see
raise TestClassNotIterable()
for test_case in test_class:
return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False) | [
"def",
"module_can_run_parallel",
"(",
"test_module",
":",
"unittest",
".",
"TestSuite",
")",
"->",
"bool",
":",
"for",
"test_class",
"in",
"test_module",
":",
"# if the test is already failed, we just don't filter it",
"# and let the test runner deal with it later.",
"if",
"hasattr",
"(",
"unittest",
".",
"loader",
",",
"'_FailedTest'",
")",
":",
"# import failure in python 3.4.5+",
"# noinspection PyProtectedMember",
"if",
"isinstance",
"(",
"test_class",
",",
"unittest",
".",
"loader",
".",
"_FailedTest",
")",
":",
"continue",
"if",
"not",
"isinstance",
"(",
"test_class",
",",
"collections",
".",
"Iterable",
")",
":",
"# likely an import failure in python 3.4.4-",
"# before python 3.4.5, test import failures were not serializable.",
"# We are unable to be sure that this is a module import failure, but it very likely is",
"# if this is the case, we'll just run this locally and see",
"raise",
"TestClassNotIterable",
"(",
")",
"for",
"test_case",
"in",
"test_class",
":",
"return",
"not",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"test_case",
".",
"__module__",
"]",
",",
"\"__no_parallel__\"",
",",
"False",
")"
] | Checks if a given module of tests can be run in parallel or not
:param test_module: the module to run
:return: True if the module can be run on parallel, False otherwise | [
"Checks",
"if",
"a",
"given",
"module",
"of",
"tests",
"can",
"be",
"run",
"in",
"parallel",
"or",
"not"
] | 3ac2b3bf06f1d704b4853167a967311b0465a76f | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L113-L135 |
251,596 | BenjaminSchubert/NitPycker | nitpycker/runner.py | ParallelRunner.class_can_run_parallel | def class_can_run_parallel(test_class: unittest.TestSuite) -> bool:
"""
Checks if a given class of tests can be run in parallel or not
:param test_class: the class to run
:return: True if te class can be run in parallel, False otherwise
"""
for test_case in test_class:
return not getattr(test_case, "__no_parallel__", False) | python | def class_can_run_parallel(test_class: unittest.TestSuite) -> bool:
"""
Checks if a given class of tests can be run in parallel or not
:param test_class: the class to run
:return: True if te class can be run in parallel, False otherwise
"""
for test_case in test_class:
return not getattr(test_case, "__no_parallel__", False) | [
"def",
"class_can_run_parallel",
"(",
"test_class",
":",
"unittest",
".",
"TestSuite",
")",
"->",
"bool",
":",
"for",
"test_case",
"in",
"test_class",
":",
"return",
"not",
"getattr",
"(",
"test_case",
",",
"\"__no_parallel__\"",
",",
"False",
")"
] | Checks if a given class of tests can be run in parallel or not
:param test_class: the class to run
:return: True if te class can be run in parallel, False otherwise | [
"Checks",
"if",
"a",
"given",
"class",
"of",
"tests",
"can",
"be",
"run",
"in",
"parallel",
"or",
"not"
] | 3ac2b3bf06f1d704b4853167a967311b0465a76f | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L138-L146 |
251,597 | BenjaminSchubert/NitPycker | nitpycker/runner.py | ParallelRunner.print_summary | def print_summary(self, result, time_taken):
"""
Prints the test summary, how many tests failed, how long it took, etc
:param result: result class to use to print summary
:param time_taken: the time all tests took to run
"""
if hasattr(result, "separator2"):
self.stream.writeln(result.separator2)
self.stream.writeln("Ran {number_of_tests} test{s} in {time:.3f}s\n".format(
number_of_tests=result.testsRun, s="s" if result.testsRun != 1 else "", time=time_taken
))
info = []
if not result.wasSuccessful():
self.stream.write("FAILED")
if result.failures:
info.append("failures={}".format(len(result.failures)))
if result.errors:
info.append("errors={}".format(len(result.errors)))
else:
self.stream.write("OK")
if result.skipped:
info.append("skipped={}".format(len(result.skipped)))
if result.expectedFailures:
info.append("expected failures={}".format(len(result.expectedFailures)))
if result.unexpectedSuccesses:
info.append("unexpected successes={}".format(len(result.unexpectedSuccesses)))
if info:
self.stream.writeln(" ({})".format(", ".join(info)))
else:
self.stream.write("\n") | python | def print_summary(self, result, time_taken):
"""
Prints the test summary, how many tests failed, how long it took, etc
:param result: result class to use to print summary
:param time_taken: the time all tests took to run
"""
if hasattr(result, "separator2"):
self.stream.writeln(result.separator2)
self.stream.writeln("Ran {number_of_tests} test{s} in {time:.3f}s\n".format(
number_of_tests=result.testsRun, s="s" if result.testsRun != 1 else "", time=time_taken
))
info = []
if not result.wasSuccessful():
self.stream.write("FAILED")
if result.failures:
info.append("failures={}".format(len(result.failures)))
if result.errors:
info.append("errors={}".format(len(result.errors)))
else:
self.stream.write("OK")
if result.skipped:
info.append("skipped={}".format(len(result.skipped)))
if result.expectedFailures:
info.append("expected failures={}".format(len(result.expectedFailures)))
if result.unexpectedSuccesses:
info.append("unexpected successes={}".format(len(result.unexpectedSuccesses)))
if info:
self.stream.writeln(" ({})".format(", ".join(info)))
else:
self.stream.write("\n") | [
"def",
"print_summary",
"(",
"self",
",",
"result",
",",
"time_taken",
")",
":",
"if",
"hasattr",
"(",
"result",
",",
"\"separator2\"",
")",
":",
"self",
".",
"stream",
".",
"writeln",
"(",
"result",
".",
"separator2",
")",
"self",
".",
"stream",
".",
"writeln",
"(",
"\"Ran {number_of_tests} test{s} in {time:.3f}s\\n\"",
".",
"format",
"(",
"number_of_tests",
"=",
"result",
".",
"testsRun",
",",
"s",
"=",
"\"s\"",
"if",
"result",
".",
"testsRun",
"!=",
"1",
"else",
"\"\"",
",",
"time",
"=",
"time_taken",
")",
")",
"info",
"=",
"[",
"]",
"if",
"not",
"result",
".",
"wasSuccessful",
"(",
")",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"FAILED\"",
")",
"if",
"result",
".",
"failures",
":",
"info",
".",
"append",
"(",
"\"failures={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"failures",
")",
")",
")",
"if",
"result",
".",
"errors",
":",
"info",
".",
"append",
"(",
"\"errors={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"errors",
")",
")",
")",
"else",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"OK\"",
")",
"if",
"result",
".",
"skipped",
":",
"info",
".",
"append",
"(",
"\"skipped={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"skipped",
")",
")",
")",
"if",
"result",
".",
"expectedFailures",
":",
"info",
".",
"append",
"(",
"\"expected failures={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"expectedFailures",
")",
")",
")",
"if",
"result",
".",
"unexpectedSuccesses",
":",
"info",
".",
"append",
"(",
"\"unexpected successes={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"unexpectedSuccesses",
")",
")",
")",
"if",
"info",
":",
"self",
".",
"stream",
".",
"writeln",
"(",
"\" ({})\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"info",
")",
")",
")",
"else",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"\\n\"",
")"
] | Prints the test summary, how many tests failed, how long it took, etc
:param result: result class to use to print summary
:param time_taken: the time all tests took to run | [
"Prints",
"the",
"test",
"summary",
"how",
"many",
"tests",
"failed",
"how",
"long",
"it",
"took",
"etc"
] | 3ac2b3bf06f1d704b4853167a967311b0465a76f | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L182-L217 |
251,598 | BenjaminSchubert/NitPycker | nitpycker/runner.py | ParallelRunner.run | def run(self, test: unittest.TestSuite):
"""
Given a TestSuite, will create one process per test case whenever possible and run them concurrently.
Will then wait for the result and return them
:param test: the TestSuite to run
:return: a summary of the test run
"""
start_time = time.time()
process = []
resource_manager = multiprocessing.Manager()
results_queue = resource_manager.Queue()
tasks_running = resource_manager.BoundedSemaphore(self.process_number)
test_suites, local_test_suites = self.collect_tests(test)
results_collector = ResultCollector(
self.stream, self.descriptions, self.verbosity,
result_queue=results_queue, test_results=self._makeResult(),
tests=test_suites
)
results_collector.start()
for index, suite in enumerate(test_suites):
tasks_running.acquire()
x = self.Process(index, suite, results_queue, tasks_running)
x.start()
process.append(x)
local_test_suites.run(results_collector)
for i in process:
i.join()
results_queue.join()
results_collector.end_collection()
results_collector.join()
results_collector.printErrors()
self.print_summary(results_collector, time.time() - start_time)
return results_collector | python | def run(self, test: unittest.TestSuite):
"""
Given a TestSuite, will create one process per test case whenever possible and run them concurrently.
Will then wait for the result and return them
:param test: the TestSuite to run
:return: a summary of the test run
"""
start_time = time.time()
process = []
resource_manager = multiprocessing.Manager()
results_queue = resource_manager.Queue()
tasks_running = resource_manager.BoundedSemaphore(self.process_number)
test_suites, local_test_suites = self.collect_tests(test)
results_collector = ResultCollector(
self.stream, self.descriptions, self.verbosity,
result_queue=results_queue, test_results=self._makeResult(),
tests=test_suites
)
results_collector.start()
for index, suite in enumerate(test_suites):
tasks_running.acquire()
x = self.Process(index, suite, results_queue, tasks_running)
x.start()
process.append(x)
local_test_suites.run(results_collector)
for i in process:
i.join()
results_queue.join()
results_collector.end_collection()
results_collector.join()
results_collector.printErrors()
self.print_summary(results_collector, time.time() - start_time)
return results_collector | [
"def",
"run",
"(",
"self",
",",
"test",
":",
"unittest",
".",
"TestSuite",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"process",
"=",
"[",
"]",
"resource_manager",
"=",
"multiprocessing",
".",
"Manager",
"(",
")",
"results_queue",
"=",
"resource_manager",
".",
"Queue",
"(",
")",
"tasks_running",
"=",
"resource_manager",
".",
"BoundedSemaphore",
"(",
"self",
".",
"process_number",
")",
"test_suites",
",",
"local_test_suites",
"=",
"self",
".",
"collect_tests",
"(",
"test",
")",
"results_collector",
"=",
"ResultCollector",
"(",
"self",
".",
"stream",
",",
"self",
".",
"descriptions",
",",
"self",
".",
"verbosity",
",",
"result_queue",
"=",
"results_queue",
",",
"test_results",
"=",
"self",
".",
"_makeResult",
"(",
")",
",",
"tests",
"=",
"test_suites",
")",
"results_collector",
".",
"start",
"(",
")",
"for",
"index",
",",
"suite",
"in",
"enumerate",
"(",
"test_suites",
")",
":",
"tasks_running",
".",
"acquire",
"(",
")",
"x",
"=",
"self",
".",
"Process",
"(",
"index",
",",
"suite",
",",
"results_queue",
",",
"tasks_running",
")",
"x",
".",
"start",
"(",
")",
"process",
".",
"append",
"(",
"x",
")",
"local_test_suites",
".",
"run",
"(",
"results_collector",
")",
"for",
"i",
"in",
"process",
":",
"i",
".",
"join",
"(",
")",
"results_queue",
".",
"join",
"(",
")",
"results_collector",
".",
"end_collection",
"(",
")",
"results_collector",
".",
"join",
"(",
")",
"results_collector",
".",
"printErrors",
"(",
")",
"self",
".",
"print_summary",
"(",
"results_collector",
",",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"return",
"results_collector"
] | Given a TestSuite, will create one process per test case whenever possible and run them concurrently.
Will then wait for the result and return them
:param test: the TestSuite to run
:return: a summary of the test run | [
"Given",
"a",
"TestSuite",
"will",
"create",
"one",
"process",
"per",
"test",
"case",
"whenever",
"possible",
"and",
"run",
"them",
"concurrently",
".",
"Will",
"then",
"wait",
"for",
"the",
"result",
"and",
"return",
"them"
] | 3ac2b3bf06f1d704b4853167a967311b0465a76f | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L219-L261 |
251,599 | foliant-docs/foliantcontrib.init | setup.py | get_templates | def get_templates(path: Path) -> List[str]:
'''List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``.
'''
result = []
for item in path.glob('**/*'):
if item.is_file() and not item.name.startswith('_'):
result.append(item.relative_to(path.parent).as_posix())
return result | python | def get_templates(path: Path) -> List[str]:
'''List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``.
'''
result = []
for item in path.glob('**/*'):
if item.is_file() and not item.name.startswith('_'):
result.append(item.relative_to(path.parent).as_posix())
return result | [
"def",
"get_templates",
"(",
"path",
":",
"Path",
")",
"->",
"List",
"[",
"str",
"]",
":",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"path",
".",
"glob",
"(",
"'**/*'",
")",
":",
"if",
"item",
".",
"is_file",
"(",
")",
"and",
"not",
"item",
".",
"name",
".",
"startswith",
"(",
"'_'",
")",
":",
"result",
".",
"append",
"(",
"item",
".",
"relative_to",
"(",
"path",
".",
"parent",
")",
".",
"as_posix",
"(",
")",
")",
"return",
"result"
] | List all files in ``templates`` directory, including all subdirectories.
The resulting list contains UNIX-like relative paths starting with ``templates``. | [
"List",
"all",
"files",
"in",
"templates",
"directory",
"including",
"all",
"subdirectories",
"."
] | 39aa38949b6270a750c800b79b4e71dd827f28d8 | https://github.com/foliant-docs/foliantcontrib.init/blob/39aa38949b6270a750c800b79b4e71dd827f28d8/setup.py#L14-L26 |
Subsets and Splits