repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L472-L482 | async def _deferred_init(self):
"""
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
"""
await self._check_subscriptions()
await self._set_whitelist()
await self._set_get_started()
await self._set_greeting_text()
await self._set_persistent_menu() | [
"async",
"def",
"_deferred_init",
"(",
"self",
")",
":",
"await",
"self",
".",
"_check_subscriptions",
"(",
")",
"await",
"self",
".",
"_set_whitelist",
"(",
")",
"await",
"self",
".",
"_set_get_started",
"(",
")",
"await",
"self",
".",
"_set_greeting_text",
"(",
")",
"await",
"self",
".",
"_set_persistent_menu",
"(",
")"
] | Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run. | [
"Run",
"those",
"things",
"in",
"a",
"sepearate",
"tasks",
"as",
"they",
"are",
"not",
"required",
"for",
"the",
"bot",
"to",
"work",
"and",
"they",
"take",
"a",
"lot",
"of",
"time",
"to",
"run",
"."
] | python | train |
dhylands/rshell | rshell/main.py | https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2326-L2374 | def do_ls(self, line):
"""ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents.
"""
args = self.line_to_args(line)
if len(args.filenames) == 0:
args.filenames = ['.']
for idx, fn in enumerate(args.filenames):
if not is_pattern(fn):
filename = resolve_path(fn)
stat = auto(get_stat, filename)
mode = stat_mode(stat)
if not mode_exists(mode):
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
continue
if not mode_isdir(mode):
if args.long:
print_long(fn, stat, self.print)
else:
self.print(fn)
continue
if len(args.filenames) > 1:
if idx > 0:
self.print('')
self.print("%s:" % filename)
pattern = '*'
else: # A pattern was specified
filename, pattern = validate_pattern(fn)
if filename is None: # An error was printed
continue
files = []
ldir_stat = auto(listdir_stat, filename)
if ldir_stat is None:
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
else:
for filename, stat in sorted(ldir_stat,
key=lambda entry: entry[0]):
if is_visible(filename) or args.all:
if fnmatch.fnmatch(filename, pattern):
if args.long:
print_long(filename, stat, self.print)
else:
files.append(decorated_filename(filename, stat))
if len(files) > 0:
print_cols(sorted(files), self.print, self.columns) | [
"def",
"do_ls",
"(",
"self",
",",
"line",
")",
":",
"args",
"=",
"self",
".",
"line_to_args",
"(",
"line",
")",
"if",
"len",
"(",
"args",
".",
"filenames",
")",
"==",
"0",
":",
"args",
".",
"filenames",
"=",
"[",
"'.'",
"]",
"for",
"idx",
",",
"fn",
"in",
"enumerate",
"(",
"args",
".",
"filenames",
")",
":",
"if",
"not",
"is_pattern",
"(",
"fn",
")",
":",
"filename",
"=",
"resolve_path",
"(",
"fn",
")",
"stat",
"=",
"auto",
"(",
"get_stat",
",",
"filename",
")",
"mode",
"=",
"stat_mode",
"(",
"stat",
")",
"if",
"not",
"mode_exists",
"(",
"mode",
")",
":",
"err",
"=",
"\"Cannot access '{}': No such file or directory\"",
"print_err",
"(",
"err",
".",
"format",
"(",
"filename",
")",
")",
"continue",
"if",
"not",
"mode_isdir",
"(",
"mode",
")",
":",
"if",
"args",
".",
"long",
":",
"print_long",
"(",
"fn",
",",
"stat",
",",
"self",
".",
"print",
")",
"else",
":",
"self",
".",
"print",
"(",
"fn",
")",
"continue",
"if",
"len",
"(",
"args",
".",
"filenames",
")",
">",
"1",
":",
"if",
"idx",
">",
"0",
":",
"self",
".",
"print",
"(",
"''",
")",
"self",
".",
"print",
"(",
"\"%s:\"",
"%",
"filename",
")",
"pattern",
"=",
"'*'",
"else",
":",
"# A pattern was specified",
"filename",
",",
"pattern",
"=",
"validate_pattern",
"(",
"fn",
")",
"if",
"filename",
"is",
"None",
":",
"# An error was printed",
"continue",
"files",
"=",
"[",
"]",
"ldir_stat",
"=",
"auto",
"(",
"listdir_stat",
",",
"filename",
")",
"if",
"ldir_stat",
"is",
"None",
":",
"err",
"=",
"\"Cannot access '{}': No such file or directory\"",
"print_err",
"(",
"err",
".",
"format",
"(",
"filename",
")",
")",
"else",
":",
"for",
"filename",
",",
"stat",
"in",
"sorted",
"(",
"ldir_stat",
",",
"key",
"=",
"lambda",
"entry",
":",
"entry",
"[",
"0",
"]",
")",
":",
"if",
"is_visible",
"(",
"filename",
")",
"or",
"args",
".",
"all",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"filename",
",",
"pattern",
")",
":",
"if",
"args",
".",
"long",
":",
"print_long",
"(",
"filename",
",",
"stat",
",",
"self",
".",
"print",
")",
"else",
":",
"files",
".",
"append",
"(",
"decorated_filename",
"(",
"filename",
",",
"stat",
")",
")",
"if",
"len",
"(",
"files",
")",
">",
"0",
":",
"print_cols",
"(",
"sorted",
"(",
"files",
")",
",",
"self",
".",
"print",
",",
"self",
".",
"columns",
")"
] | ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents. | [
"ls",
"[",
"-",
"a",
"]",
"[",
"-",
"l",
"]",
"[",
"FILE|DIRECTORY|PATTERN",
"]",
"...",
"PATTERN",
"supports",
"*",
"?",
"[",
"seq",
"]",
"[",
"!seq",
"]",
"Unix",
"filename",
"matching"
] | python | train |
caseyjlaw/rtpipe | rtpipe/RT.py | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/RT.py#L1039-L1048 | def correct_dm(d, dm, blrange):
""" Dedisperses data into data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time.
"""
data = numpyview(data_mem, 'complex64', datashape(d))
data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d))
bl0,bl1 = blrange
data_resamp[:, bl0:bl1] = data[:, bl0:bl1]
rtlib.dedisperse_par(data_resamp, d['freq'], d['inttime'], dm, blrange, verbose=0) | [
"def",
"correct_dm",
"(",
"d",
",",
"dm",
",",
"blrange",
")",
":",
"data",
"=",
"numpyview",
"(",
"data_mem",
",",
"'complex64'",
",",
"datashape",
"(",
"d",
")",
")",
"data_resamp",
"=",
"numpyview",
"(",
"data_resamp_mem",
",",
"'complex64'",
",",
"datashape",
"(",
"d",
")",
")",
"bl0",
",",
"bl1",
"=",
"blrange",
"data_resamp",
"[",
":",
",",
"bl0",
":",
"bl1",
"]",
"=",
"data",
"[",
":",
",",
"bl0",
":",
"bl1",
"]",
"rtlib",
".",
"dedisperse_par",
"(",
"data_resamp",
",",
"d",
"[",
"'freq'",
"]",
",",
"d",
"[",
"'inttime'",
"]",
",",
"dm",
",",
"blrange",
",",
"verbose",
"=",
"0",
")"
] | Dedisperses data into data_resamp
Drops edges, since it assumes that data is read with overlapping chunks in time. | [
"Dedisperses",
"data",
"into",
"data_resamp",
"Drops",
"edges",
"since",
"it",
"assumes",
"that",
"data",
"is",
"read",
"with",
"overlapping",
"chunks",
"in",
"time",
"."
] | python | train |
smdabdoub/phylotoast | phylotoast/biom_calc.py | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L95-L135 | def raw_abundance(biomf, sampleIDs=None, sample_abd=True):
"""
Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values.
"""
results = defaultdict(int)
if sampleIDs is None:
sampleIDs = biomf.ids()
else:
try:
for sid in sampleIDs:
assert sid in biomf.ids()
except AssertionError:
raise ValueError(
"\nError while calculating raw total abundances: The sampleIDs provided "
"do not match the sampleIDs in biom file. Please double check the "
"sampleIDs provided.\n")
otuIDs = biomf.ids(axis="observation")
for sampleID in sampleIDs:
for otuID in otuIDs:
abd = biomf.get_value_by_ids(otuID, sampleID)
if sample_abd:
results[sampleID] += abd
else:
results[otuID] += abd
return results | [
"def",
"raw_abundance",
"(",
"biomf",
",",
"sampleIDs",
"=",
"None",
",",
"sample_abd",
"=",
"True",
")",
":",
"results",
"=",
"defaultdict",
"(",
"int",
")",
"if",
"sampleIDs",
"is",
"None",
":",
"sampleIDs",
"=",
"biomf",
".",
"ids",
"(",
")",
"else",
":",
"try",
":",
"for",
"sid",
"in",
"sampleIDs",
":",
"assert",
"sid",
"in",
"biomf",
".",
"ids",
"(",
")",
"except",
"AssertionError",
":",
"raise",
"ValueError",
"(",
"\"\\nError while calculating raw total abundances: The sampleIDs provided \"",
"\"do not match the sampleIDs in biom file. Please double check the \"",
"\"sampleIDs provided.\\n\"",
")",
"otuIDs",
"=",
"biomf",
".",
"ids",
"(",
"axis",
"=",
"\"observation\"",
")",
"for",
"sampleID",
"in",
"sampleIDs",
":",
"for",
"otuID",
"in",
"otuIDs",
":",
"abd",
"=",
"biomf",
".",
"get_value_by_ids",
"(",
"otuID",
",",
"sampleID",
")",
"if",
"sample_abd",
":",
"results",
"[",
"sampleID",
"]",
"+=",
"abd",
"else",
":",
"results",
"[",
"otuID",
"]",
"+=",
"abd",
"return",
"results"
] | Calculate the total number of sequences in each OTU or SampleID.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: List
:param sampleIDs: A list of column id's from BIOM format OTU table. By default, the
list has been set to None.
:type sample_abd: Boolean
:param sample_abd: A boolean operator to provide output for OTUID's or SampleID's. By
default, the output will be provided for SampleID's.
:rtype: dict
:return: Returns a dictionary keyed on either OTUID's or SampleIDs and their
respective abundance as values. | [
"Calculate",
"the",
"total",
"number",
"of",
"sequences",
"in",
"each",
"OTU",
"or",
"SampleID",
"."
] | python | train |
brainiak/brainiak | brainiak/funcalign/rsrm.py | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L304-L343 | def _init_transforms(self, subjs, voxels, features, random_state):
"""Initialize the mappings (Wi) with random orthogonal matrices.
Parameters
----------
subjs : int
The number of subjects.
voxels : list of int
A list with the number of voxels per subject.
features : int
The number of features in the model.
random_state : `RandomState`
A random state to draw the mappings.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for
each subject.
Note
----
Not thread safe.
"""
# Init the Random seed generator
np.random.seed(self.rand_seed)
# Draw a random W for each subject
W = [random_state.random_sample((voxels[i], features))
for i in range(subjs)]
# Make it orthogonal it with QR decomposition
for i in range(subjs):
W[i], _ = np.linalg.qr(W[i])
return W | [
"def",
"_init_transforms",
"(",
"self",
",",
"subjs",
",",
"voxels",
",",
"features",
",",
"random_state",
")",
":",
"# Init the Random seed generator",
"np",
".",
"random",
".",
"seed",
"(",
"self",
".",
"rand_seed",
")",
"# Draw a random W for each subject",
"W",
"=",
"[",
"random_state",
".",
"random_sample",
"(",
"(",
"voxels",
"[",
"i",
"]",
",",
"features",
")",
")",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
"]",
"# Make it orthogonal it with QR decomposition",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
":",
"W",
"[",
"i",
"]",
",",
"_",
"=",
"np",
".",
"linalg",
".",
"qr",
"(",
"W",
"[",
"i",
"]",
")",
"return",
"W"
] | Initialize the mappings (Wi) with random orthogonal matrices.
Parameters
----------
subjs : int
The number of subjects.
voxels : list of int
A list with the number of voxels per subject.
features : int
The number of features in the model.
random_state : `RandomState`
A random state to draw the mappings.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for
each subject.
Note
----
Not thread safe. | [
"Initialize",
"the",
"mappings",
"(",
"Wi",
")",
"with",
"random",
"orthogonal",
"matrices",
".",
"Parameters",
"----------"
] | python | train |
vivangkumar/uberpy | uberpy/api.py | https://github.com/vivangkumar/uberpy/blob/abc62ccb5399424eb5690f12c392ab2dbd9d96e0/uberpy/api.py#L58-L91 | def check_status(content, response):
"""
Check the response that is returned for known exceptions and errors.
:param response: Response that is returned from the call.
:raise:
MalformedRequestException if `response.status` is 400
UnauthorisedException if `response.status` is 401
NotFoundException if `response.status` is 404
UnacceptableContentException if `response.status` is 406
InvalidRequestException if `response.status` is 422
RateLimitException if `response.status` is 429
ServerException if `response.status` > 500
"""
if response.status == 400:
raise MalformedRequestException(content, response)
if response.status == 401:
raise UnauthorisedException(content, response)
if response.status == 404:
raise NotFoundException(content, response)
if response.status == 406:
raise UnacceptableContentException(content, response)
if response.status == 422:
raise InvalidRequestException(content, response)
if response.status == 429:
raise RateLimitException(content, response)
if response.status >= 500:
raise ServerException(content, response) | [
"def",
"check_status",
"(",
"content",
",",
"response",
")",
":",
"if",
"response",
".",
"status",
"==",
"400",
":",
"raise",
"MalformedRequestException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
"==",
"401",
":",
"raise",
"UnauthorisedException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
"==",
"404",
":",
"raise",
"NotFoundException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
"==",
"406",
":",
"raise",
"UnacceptableContentException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
"==",
"422",
":",
"raise",
"InvalidRequestException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
"==",
"429",
":",
"raise",
"RateLimitException",
"(",
"content",
",",
"response",
")",
"if",
"response",
".",
"status",
">=",
"500",
":",
"raise",
"ServerException",
"(",
"content",
",",
"response",
")"
] | Check the response that is returned for known exceptions and errors.
:param response: Response that is returned from the call.
:raise:
MalformedRequestException if `response.status` is 400
UnauthorisedException if `response.status` is 401
NotFoundException if `response.status` is 404
UnacceptableContentException if `response.status` is 406
InvalidRequestException if `response.status` is 422
RateLimitException if `response.status` is 429
ServerException if `response.status` > 500 | [
"Check",
"the",
"response",
"that",
"is",
"returned",
"for",
"known",
"exceptions",
"and",
"errors",
".",
":",
"param",
"response",
":",
"Response",
"that",
"is",
"returned",
"from",
"the",
"call",
".",
":",
"raise",
":",
"MalformedRequestException",
"if",
"response",
".",
"status",
"is",
"400",
"UnauthorisedException",
"if",
"response",
".",
"status",
"is",
"401",
"NotFoundException",
"if",
"response",
".",
"status",
"is",
"404",
"UnacceptableContentException",
"if",
"response",
".",
"status",
"is",
"406",
"InvalidRequestException",
"if",
"response",
".",
"status",
"is",
"422",
"RateLimitException",
"if",
"response",
".",
"status",
"is",
"429",
"ServerException",
"if",
"response",
".",
"status",
">",
"500"
] | python | valid |
urtdevs/yaurtww | yaurtww/manifest.py | https://github.com/urtdevs/yaurtww/blob/842fbd1fb5d32c2be89df471591b70c767aebd14/yaurtww/manifest.py#L14-L23 | def _parse_version(self, line):
"""
There's a magic suffix to the release version, currently it's -03, but
it increments seemingly randomly.
"""
version_string = line.split(' ')[1]
version_list = version_string.split('.')
major_version = ''.join([version_list[0], version_list[1]])
release_num = ''.join([version_list[2].rstrip(), "-03"])
return (major_version, release_num) | [
"def",
"_parse_version",
"(",
"self",
",",
"line",
")",
":",
"version_string",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"version_list",
"=",
"version_string",
".",
"split",
"(",
"'.'",
")",
"major_version",
"=",
"''",
".",
"join",
"(",
"[",
"version_list",
"[",
"0",
"]",
",",
"version_list",
"[",
"1",
"]",
"]",
")",
"release_num",
"=",
"''",
".",
"join",
"(",
"[",
"version_list",
"[",
"2",
"]",
".",
"rstrip",
"(",
")",
",",
"\"-03\"",
"]",
")",
"return",
"(",
"major_version",
",",
"release_num",
")"
] | There's a magic suffix to the release version, currently it's -03, but
it increments seemingly randomly. | [
"There",
"s",
"a",
"magic",
"suffix",
"to",
"the",
"release",
"version",
"currently",
"it",
"s",
"-",
"03",
"but",
"it",
"increments",
"seemingly",
"randomly",
"."
] | python | train |
inveniosoftware/invenio-search | examples/app.py | https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/examples/app.py#L106-L119 | def index():
"""Query Elasticsearch using Invenio query syntax."""
page = request.values.get('page', 1, type=int)
size = request.values.get('size', 2, type=int)
search = ExampleSearch()[(page - 1) * size:page * size]
if 'q' in request.values:
search = search.query(QueryString(query=request.values.get('q')))
search = search.sort(
request.values.get('sort', 'title')
)
search = ExampleSearch.faceted_search(search=search)
results = search.execute().to_dict()
return jsonify({'hits': results.get('hits')}) | [
"def",
"index",
"(",
")",
":",
"page",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'page'",
",",
"1",
",",
"type",
"=",
"int",
")",
"size",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'size'",
",",
"2",
",",
"type",
"=",
"int",
")",
"search",
"=",
"ExampleSearch",
"(",
")",
"[",
"(",
"page",
"-",
"1",
")",
"*",
"size",
":",
"page",
"*",
"size",
"]",
"if",
"'q'",
"in",
"request",
".",
"values",
":",
"search",
"=",
"search",
".",
"query",
"(",
"QueryString",
"(",
"query",
"=",
"request",
".",
"values",
".",
"get",
"(",
"'q'",
")",
")",
")",
"search",
"=",
"search",
".",
"sort",
"(",
"request",
".",
"values",
".",
"get",
"(",
"'sort'",
",",
"'title'",
")",
")",
"search",
"=",
"ExampleSearch",
".",
"faceted_search",
"(",
"search",
"=",
"search",
")",
"results",
"=",
"search",
".",
"execute",
"(",
")",
".",
"to_dict",
"(",
")",
"return",
"jsonify",
"(",
"{",
"'hits'",
":",
"results",
".",
"get",
"(",
"'hits'",
")",
"}",
")"
] | Query Elasticsearch using Invenio query syntax. | [
"Query",
"Elasticsearch",
"using",
"Invenio",
"query",
"syntax",
"."
] | python | train |
sdispater/orator | orator/orm/scopes/soft_deleting.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/scopes/soft_deleting.py#L10-L22 | def apply(self, builder, model):
"""
Apply the scope to a given query builder.
:param builder: The query builder
:type builder: orator.orm.builder.Builder
:param model: The model
:type model: orator.orm.Model
"""
builder.where_null(model.get_qualified_deleted_at_column())
self.extend(builder) | [
"def",
"apply",
"(",
"self",
",",
"builder",
",",
"model",
")",
":",
"builder",
".",
"where_null",
"(",
"model",
".",
"get_qualified_deleted_at_column",
"(",
")",
")",
"self",
".",
"extend",
"(",
"builder",
")"
] | Apply the scope to a given query builder.
:param builder: The query builder
:type builder: orator.orm.builder.Builder
:param model: The model
:type model: orator.orm.Model | [
"Apply",
"the",
"scope",
"to",
"a",
"given",
"query",
"builder",
"."
] | python | train |
adafruit/Adafruit_Python_MPR121 | Adafruit_MPR121/MPR121.py | https://github.com/adafruit/Adafruit_Python_MPR121/blob/86360b80186617e0056d5bd5279bda000978d92c/Adafruit_MPR121/MPR121.py#L160-L165 | def filtered_data(self, pin):
"""Return filtered data register value for the provided pin (0-11).
Useful for debugging.
"""
assert pin >= 0 and pin < 12, 'pin must be between 0-11 (inclusive)'
return self._i2c_retry(self._device.readU16LE, MPR121_FILTDATA_0L + pin*2) | [
"def",
"filtered_data",
"(",
"self",
",",
"pin",
")",
":",
"assert",
"pin",
">=",
"0",
"and",
"pin",
"<",
"12",
",",
"'pin must be between 0-11 (inclusive)'",
"return",
"self",
".",
"_i2c_retry",
"(",
"self",
".",
"_device",
".",
"readU16LE",
",",
"MPR121_FILTDATA_0L",
"+",
"pin",
"*",
"2",
")"
] | Return filtered data register value for the provided pin (0-11).
Useful for debugging. | [
"Return",
"filtered",
"data",
"register",
"value",
"for",
"the",
"provided",
"pin",
"(",
"0",
"-",
"11",
")",
".",
"Useful",
"for",
"debugging",
"."
] | python | train |
blockadeio/analyst_toolbench | blockade/cli/aws_serverless.py | https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/cli/aws_serverless.py#L552-L589 | def remove_s3_bucket():
"""Remove the Blockade bucket."""
logger.debug("[#] Removing S3 bucket")
client = boto3.client("s3", region_name=PRIMARY_REGION)
buckets = client.list_buckets()
matches = [x for x in buckets.get('Buckets', list())
if x['Name'].startswith(S3_BUCKET_NAME)]
if len(matches) == 0:
return
match = matches.pop()['Name']
try:
response = client.list_objects_v2(
Bucket=match,
)
except client.exceptions.NoSuchBucket:
logger.info("[!] S3 bucket already deleted")
return True
while response['KeyCount'] > 0:
logger.debug('[*] Deleting %d objects from bucket %s'
% (len(response['Contents']), match))
response = client.delete_objects(
Bucket=match,
Delete={
'Objects': [{'Key': obj['Key']} for obj in response['Contents']]
}
)
response = client.list_objects_v2(
Bucket=match,
)
logger.debug('[#] Deleting bucket %s' % match)
response = client.delete_bucket(
Bucket=match
)
logger.info("[#] Successfully deleted the S3 bucket")
return response | [
"def",
"remove_s3_bucket",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"[#] Removing S3 bucket\"",
")",
"client",
"=",
"boto3",
".",
"client",
"(",
"\"s3\"",
",",
"region_name",
"=",
"PRIMARY_REGION",
")",
"buckets",
"=",
"client",
".",
"list_buckets",
"(",
")",
"matches",
"=",
"[",
"x",
"for",
"x",
"in",
"buckets",
".",
"get",
"(",
"'Buckets'",
",",
"list",
"(",
")",
")",
"if",
"x",
"[",
"'Name'",
"]",
".",
"startswith",
"(",
"S3_BUCKET_NAME",
")",
"]",
"if",
"len",
"(",
"matches",
")",
"==",
"0",
":",
"return",
"match",
"=",
"matches",
".",
"pop",
"(",
")",
"[",
"'Name'",
"]",
"try",
":",
"response",
"=",
"client",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"match",
",",
")",
"except",
"client",
".",
"exceptions",
".",
"NoSuchBucket",
":",
"logger",
".",
"info",
"(",
"\"[!] S3 bucket already deleted\"",
")",
"return",
"True",
"while",
"response",
"[",
"'KeyCount'",
"]",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"'[*] Deleting %d objects from bucket %s'",
"%",
"(",
"len",
"(",
"response",
"[",
"'Contents'",
"]",
")",
",",
"match",
")",
")",
"response",
"=",
"client",
".",
"delete_objects",
"(",
"Bucket",
"=",
"match",
",",
"Delete",
"=",
"{",
"'Objects'",
":",
"[",
"{",
"'Key'",
":",
"obj",
"[",
"'Key'",
"]",
"}",
"for",
"obj",
"in",
"response",
"[",
"'Contents'",
"]",
"]",
"}",
")",
"response",
"=",
"client",
".",
"list_objects_v2",
"(",
"Bucket",
"=",
"match",
",",
")",
"logger",
".",
"debug",
"(",
"'[#] Deleting bucket %s'",
"%",
"match",
")",
"response",
"=",
"client",
".",
"delete_bucket",
"(",
"Bucket",
"=",
"match",
")",
"logger",
".",
"info",
"(",
"\"[#] Successfully deleted the S3 bucket\"",
")",
"return",
"response"
] | Remove the Blockade bucket. | [
"Remove",
"the",
"Blockade",
"bucket",
"."
] | python | train |
michaelpb/omnic | omnic/cli/commandparser.py | https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/cli/commandparser.py#L16-L27 | def gen_subcommand_help(self):
'''
Generates s
'''
commands = sorted(self.subcommands.items(), key=lambda i: i[0])
return '\n'.join(
'%s %s' % (
subcommand.ljust(15),
textwrap.shorten(description, width=61),
)
for subcommand, (description, action, opts) in commands
) | [
"def",
"gen_subcommand_help",
"(",
"self",
")",
":",
"commands",
"=",
"sorted",
"(",
"self",
".",
"subcommands",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"i",
"[",
"0",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"'%s %s'",
"%",
"(",
"subcommand",
".",
"ljust",
"(",
"15",
")",
",",
"textwrap",
".",
"shorten",
"(",
"description",
",",
"width",
"=",
"61",
")",
",",
")",
"for",
"subcommand",
",",
"(",
"description",
",",
"action",
",",
"opts",
")",
"in",
"commands",
")"
] | Generates s | [
"Generates",
"s"
] | python | train |
pypa/pipenv | pipenv/patched/notpip/_vendor/html5lib/_inputstream.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/_inputstream.py#L569-L581 | def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding | [
"def",
"detectEncodingMeta",
"(",
"self",
")",
":",
"buffer",
"=",
"self",
".",
"rawStream",
".",
"read",
"(",
"self",
".",
"numBytesMeta",
")",
"assert",
"isinstance",
"(",
"buffer",
",",
"bytes",
")",
"parser",
"=",
"EncodingParser",
"(",
"buffer",
")",
"self",
".",
"rawStream",
".",
"seek",
"(",
"0",
")",
"encoding",
"=",
"parser",
".",
"getEncoding",
"(",
")",
"if",
"encoding",
"is",
"not",
"None",
"and",
"encoding",
".",
"name",
"in",
"(",
"\"utf-16be\"",
",",
"\"utf-16le\"",
")",
":",
"encoding",
"=",
"lookupEncoding",
"(",
"\"utf-8\"",
")",
"return",
"encoding"
] | Report the encoding declared by the meta element | [
"Report",
"the",
"encoding",
"declared",
"by",
"the",
"meta",
"element"
] | python | train |
OCA/openupgradelib | openupgradelib/openupgrade.py | https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade.py#L653-L670 | def rename_xmlids(cr, xmlids_spec):
"""
Rename XML IDs. Typically called in the pre script.
One usage example is when an ID changes module. In OpenERP 6 for example,
a number of res_groups IDs moved to module base from other modules (
although they were still being defined in their respective module).
:param xmlids_spec: a list of tuples (old module.xmlid, new module.xmlid).
"""
for (old, new) in xmlids_spec:
if '.' not in old or '.' not in new:
logger.error(
'Cannot rename XMLID %s to %s: need the module '
'reference to be specified in the IDs' % (old, new))
else:
query = ("UPDATE ir_model_data SET module = %s, name = %s "
"WHERE module = %s and name = %s")
logged_query(cr, query, tuple(new.split('.') + old.split('.'))) | [
"def",
"rename_xmlids",
"(",
"cr",
",",
"xmlids_spec",
")",
":",
"for",
"(",
"old",
",",
"new",
")",
"in",
"xmlids_spec",
":",
"if",
"'.'",
"not",
"in",
"old",
"or",
"'.'",
"not",
"in",
"new",
":",
"logger",
".",
"error",
"(",
"'Cannot rename XMLID %s to %s: need the module '",
"'reference to be specified in the IDs'",
"%",
"(",
"old",
",",
"new",
")",
")",
"else",
":",
"query",
"=",
"(",
"\"UPDATE ir_model_data SET module = %s, name = %s \"",
"\"WHERE module = %s and name = %s\"",
")",
"logged_query",
"(",
"cr",
",",
"query",
",",
"tuple",
"(",
"new",
".",
"split",
"(",
"'.'",
")",
"+",
"old",
".",
"split",
"(",
"'.'",
")",
")",
")"
] | Rename XML IDs. Typically called in the pre script.
One usage example is when an ID changes module. In OpenERP 6 for example,
a number of res_groups IDs moved to module base from other modules (
although they were still being defined in their respective module).
:param xmlids_spec: a list of tuples (old module.xmlid, new module.xmlid). | [
"Rename",
"XML",
"IDs",
".",
"Typically",
"called",
"in",
"the",
"pre",
"script",
".",
"One",
"usage",
"example",
"is",
"when",
"an",
"ID",
"changes",
"module",
".",
"In",
"OpenERP",
"6",
"for",
"example",
"a",
"number",
"of",
"res_groups",
"IDs",
"moved",
"to",
"module",
"base",
"from",
"other",
"modules",
"(",
"although",
"they",
"were",
"still",
"being",
"defined",
"in",
"their",
"respective",
"module",
")",
"."
] | python | train |
ozak/georasters | georasters/georasters.py | https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L1110-L1190 | def distance(self, sources, destinations, x='x', y='y', isolation=True,
export_raster=False, export_shape=False, routes=False, path='./'):
"""
Compute cost distance measured from each start point to all end points.
The function returns the distances between the start point and the end
points as a Pandas dataframe. Additionally, for each start point it computes
the level of isolation, i.e. its average travel distance to all other locations
"""
start_points = sources.copy()
end_points = destinations.copy()
if (not isinstance(start_points, pd.core.frame.DataFrame) and
not isinstance(start_points, gp.geodataframe.GeoDataFrame)):
raise TypeError('Sources has to be a (Geo)Pandas Data Frame Object.')
if (not isinstance(end_points, pd.core.frame.DataFrame) and
not isinstance(end_points, gp.geodataframe.GeoDataFrame)):
raise TypeError('Destinations has to be a (Geo)Pandas Data Frame Object.')
if not self.mcp_cost:
self.mcp()
count = 0
start_points['row'], start_points['col'] = self.map_pixel_location(start_points[x],
start_points[y])
end_points['row'], end_points['col'] = self.map_pixel_location(end_points[x], end_points[y])
start_points['ID'] = start_points.index.values
end_points['ID'] = end_points.index.values+start_points['ID'].max()+1
for i in start_points.iterrows():
cumulative_costs, traceback = self.mcp_cost.find_costs([[i[1].row, i[1].col]])
dist = cumulative_costs[end_points.row.values, end_points.col.values].transpose()/(7*24)
df2 = pd.DataFrame(np.array([(i[1]['ID']*np.ones_like(dist)).flatten(),
end_points['ID'], dist.flatten()]).transpose(),
columns=['ID1', 'ID2', 'dist'])
# Keep only locations that are accessible
df2 = df2.loc[df2['dist'] < np.inf]
if isolation:
grisolation = np.ma.masked_array(cumulative_costs,
mask=np.logical_or(self.raster.mask, cumulative_costs == np.inf)
, fill_value=np.nan).mean()/(7*24)
start_points.loc[i[0], 'Iso'] = grisolation
if export_raster:
cumulative_costs = GeoRaster(np.ma.masked_array(cumulative_costs,
mask=np.logical_or(self.raster.mask,
cumulative_costs == np.inf),
fill_value=np.nan), self.geot, self.nodata_value,
projection=self.projection, datatype=self.datatype)
cumulative_costs.raster.data[cumulative_costs.raster.mask] = cumulative_costs.nodata_value
cumulative_costs.to_tiff(path+str(i[1]['ID']))
if df2.size > 0:
if export_shape:
routes = True
if routes:
df2['geometry'] = df2['ID2'].apply(lambda x:
self.mcp_cost.traceback(end_points.loc[end_points['ID'] == x][['row', 'col']].values[0]))
df2['geometry'] = df2.geometry.apply(lambda x: [map_pixel_inv(y[0], y[1], self.geot[1],
self.geot[-1], self.geot[0], self.geot[-3]) for y in x])
df2['geometry'] = df2.geometry.apply(lambda x: LineString(x) if int(len(x) > 1)
else LineString([x[0], x[0]]))
df2 = gp.GeoDataFrame(df2, crs=cea)
if isolation:
df2['Iso'] = grisolation
if count == 0:
self.grdist = df2.copy()
else:
self.grdist = self.grdist.append(df2)
count += 1
if routes:
self.grdist = gp.GeoDataFrame(self.grdist, crs=cea)
if export_shape:
start_pointscols = sources.columns.values
end_pointscols = destinations.columns.values
if 'geometry' in end_pointscols:
self.grdist = pd.merge(self.grdist, end_points[['ID'] + end_pointscols.tolist()].drop('geometry', axis=1), left_on='ID2', right_on='ID', how='left')
else:
self.grdist = pd.merge(self.grdist, end_points[['ID']+end_pointscols.tolist()], left_on='ID2', right_on='ID', how='left')
if 'geometry' in self.start_pointscols:
self.grdist = pd.merge(self.grdist, start_points[['ID']+start_pointscols.tolist()].drop('geometry', axis=1), left_on='ID1', right_on='ID', how='left',
suffixes=['_2', '_1'])
else:
self.grdist = pd.merge(self.grdist, start_points[['ID']+start_pointscols.tolist()], left_on='ID1', right_on='ID', how='left',
suffixes=['_2', '_1'])
self.grdist = gp.GeoDataFrame(self.grdist, crs=cea)
self.grdist.to_file(path+'routes.shp') | [
"def",
"distance",
"(",
"self",
",",
"sources",
",",
"destinations",
",",
"x",
"=",
"'x'",
",",
"y",
"=",
"'y'",
",",
"isolation",
"=",
"True",
",",
"export_raster",
"=",
"False",
",",
"export_shape",
"=",
"False",
",",
"routes",
"=",
"False",
",",
"path",
"=",
"'./'",
")",
":",
"start_points",
"=",
"sources",
".",
"copy",
"(",
")",
"end_points",
"=",
"destinations",
".",
"copy",
"(",
")",
"if",
"(",
"not",
"isinstance",
"(",
"start_points",
",",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
")",
"and",
"not",
"isinstance",
"(",
"start_points",
",",
"gp",
".",
"geodataframe",
".",
"GeoDataFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Sources has to be a (Geo)Pandas Data Frame Object.'",
")",
"if",
"(",
"not",
"isinstance",
"(",
"end_points",
",",
"pd",
".",
"core",
".",
"frame",
".",
"DataFrame",
")",
"and",
"not",
"isinstance",
"(",
"end_points",
",",
"gp",
".",
"geodataframe",
".",
"GeoDataFrame",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Destinations has to be a (Geo)Pandas Data Frame Object.'",
")",
"if",
"not",
"self",
".",
"mcp_cost",
":",
"self",
".",
"mcp",
"(",
")",
"count",
"=",
"0",
"start_points",
"[",
"'row'",
"]",
",",
"start_points",
"[",
"'col'",
"]",
"=",
"self",
".",
"map_pixel_location",
"(",
"start_points",
"[",
"x",
"]",
",",
"start_points",
"[",
"y",
"]",
")",
"end_points",
"[",
"'row'",
"]",
",",
"end_points",
"[",
"'col'",
"]",
"=",
"self",
".",
"map_pixel_location",
"(",
"end_points",
"[",
"x",
"]",
",",
"end_points",
"[",
"y",
"]",
")",
"start_points",
"[",
"'ID'",
"]",
"=",
"start_points",
".",
"index",
".",
"values",
"end_points",
"[",
"'ID'",
"]",
"=",
"end_points",
".",
"index",
".",
"values",
"+",
"start_points",
"[",
"'ID'",
"]",
".",
"max",
"(",
")",
"+",
"1",
"for",
"i",
"in",
"start_points",
".",
"iterrows",
"(",
")",
":",
"cumulative_costs",
",",
"traceback",
"=",
"self",
".",
"mcp_cost",
".",
"find_costs",
"(",
"[",
"[",
"i",
"[",
"1",
"]",
".",
"row",
",",
"i",
"[",
"1",
"]",
".",
"col",
"]",
"]",
")",
"dist",
"=",
"cumulative_costs",
"[",
"end_points",
".",
"row",
".",
"values",
",",
"end_points",
".",
"col",
".",
"values",
"]",
".",
"transpose",
"(",
")",
"/",
"(",
"7",
"*",
"24",
")",
"df2",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"array",
"(",
"[",
"(",
"i",
"[",
"1",
"]",
"[",
"'ID'",
"]",
"*",
"np",
".",
"ones_like",
"(",
"dist",
")",
")",
".",
"flatten",
"(",
")",
",",
"end_points",
"[",
"'ID'",
"]",
",",
"dist",
".",
"flatten",
"(",
")",
"]",
")",
".",
"transpose",
"(",
")",
",",
"columns",
"=",
"[",
"'ID1'",
",",
"'ID2'",
",",
"'dist'",
"]",
")",
"# Keep only locations that are accessible",
"df2",
"=",
"df2",
".",
"loc",
"[",
"df2",
"[",
"'dist'",
"]",
"<",
"np",
".",
"inf",
"]",
"if",
"isolation",
":",
"grisolation",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"cumulative_costs",
",",
"mask",
"=",
"np",
".",
"logical_or",
"(",
"self",
".",
"raster",
".",
"mask",
",",
"cumulative_costs",
"==",
"np",
".",
"inf",
")",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
".",
"mean",
"(",
")",
"/",
"(",
"7",
"*",
"24",
")",
"start_points",
".",
"loc",
"[",
"i",
"[",
"0",
"]",
",",
"'Iso'",
"]",
"=",
"grisolation",
"if",
"export_raster",
":",
"cumulative_costs",
"=",
"GeoRaster",
"(",
"np",
".",
"ma",
".",
"masked_array",
"(",
"cumulative_costs",
",",
"mask",
"=",
"np",
".",
"logical_or",
"(",
"self",
".",
"raster",
".",
"mask",
",",
"cumulative_costs",
"==",
"np",
".",
"inf",
")",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
",",
"self",
".",
"geot",
",",
"self",
".",
"nodata_value",
",",
"projection",
"=",
"self",
".",
"projection",
",",
"datatype",
"=",
"self",
".",
"datatype",
")",
"cumulative_costs",
".",
"raster",
".",
"data",
"[",
"cumulative_costs",
".",
"raster",
".",
"mask",
"]",
"=",
"cumulative_costs",
".",
"nodata_value",
"cumulative_costs",
".",
"to_tiff",
"(",
"path",
"+",
"str",
"(",
"i",
"[",
"1",
"]",
"[",
"'ID'",
"]",
")",
")",
"if",
"df2",
".",
"size",
">",
"0",
":",
"if",
"export_shape",
":",
"routes",
"=",
"True",
"if",
"routes",
":",
"df2",
"[",
"'geometry'",
"]",
"=",
"df2",
"[",
"'ID2'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"self",
".",
"mcp_cost",
".",
"traceback",
"(",
"end_points",
".",
"loc",
"[",
"end_points",
"[",
"'ID'",
"]",
"==",
"x",
"]",
"[",
"[",
"'row'",
",",
"'col'",
"]",
"]",
".",
"values",
"[",
"0",
"]",
")",
")",
"df2",
"[",
"'geometry'",
"]",
"=",
"df2",
".",
"geometry",
".",
"apply",
"(",
"lambda",
"x",
":",
"[",
"map_pixel_inv",
"(",
"y",
"[",
"0",
"]",
",",
"y",
"[",
"1",
"]",
",",
"self",
".",
"geot",
"[",
"1",
"]",
",",
"self",
".",
"geot",
"[",
"-",
"1",
"]",
",",
"self",
".",
"geot",
"[",
"0",
"]",
",",
"self",
".",
"geot",
"[",
"-",
"3",
"]",
")",
"for",
"y",
"in",
"x",
"]",
")",
"df2",
"[",
"'geometry'",
"]",
"=",
"df2",
".",
"geometry",
".",
"apply",
"(",
"lambda",
"x",
":",
"LineString",
"(",
"x",
")",
"if",
"int",
"(",
"len",
"(",
"x",
")",
">",
"1",
")",
"else",
"LineString",
"(",
"[",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"0",
"]",
"]",
")",
")",
"df2",
"=",
"gp",
".",
"GeoDataFrame",
"(",
"df2",
",",
"crs",
"=",
"cea",
")",
"if",
"isolation",
":",
"df2",
"[",
"'Iso'",
"]",
"=",
"grisolation",
"if",
"count",
"==",
"0",
":",
"self",
".",
"grdist",
"=",
"df2",
".",
"copy",
"(",
")",
"else",
":",
"self",
".",
"grdist",
"=",
"self",
".",
"grdist",
".",
"append",
"(",
"df2",
")",
"count",
"+=",
"1",
"if",
"routes",
":",
"self",
".",
"grdist",
"=",
"gp",
".",
"GeoDataFrame",
"(",
"self",
".",
"grdist",
",",
"crs",
"=",
"cea",
")",
"if",
"export_shape",
":",
"start_pointscols",
"=",
"sources",
".",
"columns",
".",
"values",
"end_pointscols",
"=",
"destinations",
".",
"columns",
".",
"values",
"if",
"'geometry'",
"in",
"end_pointscols",
":",
"self",
".",
"grdist",
"=",
"pd",
".",
"merge",
"(",
"self",
".",
"grdist",
",",
"end_points",
"[",
"[",
"'ID'",
"]",
"+",
"end_pointscols",
".",
"tolist",
"(",
")",
"]",
".",
"drop",
"(",
"'geometry'",
",",
"axis",
"=",
"1",
")",
",",
"left_on",
"=",
"'ID2'",
",",
"right_on",
"=",
"'ID'",
",",
"how",
"=",
"'left'",
")",
"else",
":",
"self",
".",
"grdist",
"=",
"pd",
".",
"merge",
"(",
"self",
".",
"grdist",
",",
"end_points",
"[",
"[",
"'ID'",
"]",
"+",
"end_pointscols",
".",
"tolist",
"(",
")",
"]",
",",
"left_on",
"=",
"'ID2'",
",",
"right_on",
"=",
"'ID'",
",",
"how",
"=",
"'left'",
")",
"if",
"'geometry'",
"in",
"self",
".",
"start_pointscols",
":",
"self",
".",
"grdist",
"=",
"pd",
".",
"merge",
"(",
"self",
".",
"grdist",
",",
"start_points",
"[",
"[",
"'ID'",
"]",
"+",
"start_pointscols",
".",
"tolist",
"(",
")",
"]",
".",
"drop",
"(",
"'geometry'",
",",
"axis",
"=",
"1",
")",
",",
"left_on",
"=",
"'ID1'",
",",
"right_on",
"=",
"'ID'",
",",
"how",
"=",
"'left'",
",",
"suffixes",
"=",
"[",
"'_2'",
",",
"'_1'",
"]",
")",
"else",
":",
"self",
".",
"grdist",
"=",
"pd",
".",
"merge",
"(",
"self",
".",
"grdist",
",",
"start_points",
"[",
"[",
"'ID'",
"]",
"+",
"start_pointscols",
".",
"tolist",
"(",
")",
"]",
",",
"left_on",
"=",
"'ID1'",
",",
"right_on",
"=",
"'ID'",
",",
"how",
"=",
"'left'",
",",
"suffixes",
"=",
"[",
"'_2'",
",",
"'_1'",
"]",
")",
"self",
".",
"grdist",
"=",
"gp",
".",
"GeoDataFrame",
"(",
"self",
".",
"grdist",
",",
"crs",
"=",
"cea",
")",
"self",
".",
"grdist",
".",
"to_file",
"(",
"path",
"+",
"'routes.shp'",
")"
] | Compute cost distance measured from each start point to all end points.
The function returns the distances between the start point and the end
points as a Pandas dataframe. Additionally, for each start point it computes
the level of isolation, i.e. its average travel distance to all other locations | [
"Compute",
"cost",
"distance",
"measured",
"from",
"each",
"start",
"point",
"to",
"all",
"end",
"points",
".",
"The",
"function",
"returns",
"the",
"distances",
"between",
"the",
"start",
"point",
"and",
"the",
"end",
"points",
"as",
"a",
"Pandas",
"dataframe",
".",
"Additionally",
"for",
"each",
"start",
"point",
"it",
"computes",
"the",
"level",
"of",
"isolation",
"i",
".",
"e",
".",
"its",
"average",
"travel",
"distance",
"to",
"all",
"other",
"locations"
] | python | train |
mitsei/dlkit | dlkit/records/assessment/basic/drag_and_drop_records.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/drag_and_drop_records.py#L303-L310 | def _init_map(self):
"""stub"""
self.my_osid_object_form._my_map['zoneConditions'] = \
self._zone_conditions_metadata['default_object_values'][0]
self.my_osid_object_form._my_map['coordinateConditions'] = \
self._coordinate_conditions_metadata['default_object_values'][0]
self.my_osid_object_form._my_map['spatialUnitConditions'] = \
self._spatial_unit_conditions_metadata['default_object_values'][0] | [
"def",
"_init_map",
"(",
"self",
")",
":",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'zoneConditions'",
"]",
"=",
"self",
".",
"_zone_conditions_metadata",
"[",
"'default_object_values'",
"]",
"[",
"0",
"]",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'coordinateConditions'",
"]",
"=",
"self",
".",
"_coordinate_conditions_metadata",
"[",
"'default_object_values'",
"]",
"[",
"0",
"]",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'spatialUnitConditions'",
"]",
"=",
"self",
".",
"_spatial_unit_conditions_metadata",
"[",
"'default_object_values'",
"]",
"[",
"0",
"]"
] | stub | [
"stub"
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/flow/base.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/base.py#L294-L305 | def storagehandler(self):
"""
Returns the storage handler available to thise actor.
:return: the storage handler, None if not available
"""
if isinstance(self, StorageHandler):
return self
elif self.parent is not None:
return self.parent.storagehandler
else:
return None | [
"def",
"storagehandler",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"StorageHandler",
")",
":",
"return",
"self",
"elif",
"self",
".",
"parent",
"is",
"not",
"None",
":",
"return",
"self",
".",
"parent",
".",
"storagehandler",
"else",
":",
"return",
"None"
] | Returns the storage handler available to thise actor.
:return: the storage handler, None if not available | [
"Returns",
"the",
"storage",
"handler",
"available",
"to",
"thise",
"actor",
"."
] | python | train |
PmagPy/PmagPy | dialogs/grid_frame2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L364-L399 | def remove_col_label(self, event):#, include_pmag=True):
"""
check to see if column is required
if it is not, delete it from grid
"""
er_possible_headers = self.grid_headers[self.grid_type]['er'][2]
pmag_possible_headers = self.grid_headers[self.grid_type]['pmag'][2]
er_actual_headers = self.grid_headers[self.grid_type]['er'][0]
pmag_actual_headers = self.grid_headers[self.grid_type]['pmag'][0]
col = event.GetCol()
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
if label in self.grid_headers[self.grid_type]['er'][1]:
pw.simple_warning("That header is required, and cannot be removed")
return False
#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:
# pw.simple_warning("That header is required, and cannot be removed")
# return False
else:
print('That header is not required:', label)
self.grid.remove_col(col)
#if label in er_possible_headers:
try:
print('removing {} from er_actual_headers'.format(label))
er_actual_headers.remove(label)
except ValueError:
pass
#if label in pmag_possible_headers:
try:
pmag_actual_headers.remove(label)
except ValueError:
pass
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self) | [
"def",
"remove_col_label",
"(",
"self",
",",
"event",
")",
":",
"#, include_pmag=True):",
"er_possible_headers",
"=",
"self",
".",
"grid_headers",
"[",
"self",
".",
"grid_type",
"]",
"[",
"'er'",
"]",
"[",
"2",
"]",
"pmag_possible_headers",
"=",
"self",
".",
"grid_headers",
"[",
"self",
".",
"grid_type",
"]",
"[",
"'pmag'",
"]",
"[",
"2",
"]",
"er_actual_headers",
"=",
"self",
".",
"grid_headers",
"[",
"self",
".",
"grid_type",
"]",
"[",
"'er'",
"]",
"[",
"0",
"]",
"pmag_actual_headers",
"=",
"self",
".",
"grid_headers",
"[",
"self",
".",
"grid_type",
"]",
"[",
"'pmag'",
"]",
"[",
"0",
"]",
"col",
"=",
"event",
".",
"GetCol",
"(",
")",
"label",
"=",
"self",
".",
"grid",
".",
"GetColLabelValue",
"(",
"col",
")",
"if",
"'**'",
"in",
"label",
":",
"label",
"=",
"label",
".",
"strip",
"(",
"'**'",
")",
"if",
"label",
"in",
"self",
".",
"grid_headers",
"[",
"self",
".",
"grid_type",
"]",
"[",
"'er'",
"]",
"[",
"1",
"]",
":",
"pw",
".",
"simple_warning",
"(",
"\"That header is required, and cannot be removed\"",
")",
"return",
"False",
"#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:",
"# pw.simple_warning(\"That header is required, and cannot be removed\")",
"# return False",
"else",
":",
"print",
"(",
"'That header is not required:'",
",",
"label",
")",
"self",
".",
"grid",
".",
"remove_col",
"(",
"col",
")",
"#if label in er_possible_headers:",
"try",
":",
"print",
"(",
"'removing {} from er_actual_headers'",
".",
"format",
"(",
"label",
")",
")",
"er_actual_headers",
".",
"remove",
"(",
"label",
")",
"except",
"ValueError",
":",
"pass",
"#if label in pmag_possible_headers:",
"try",
":",
"pmag_actual_headers",
".",
"remove",
"(",
"label",
")",
"except",
"ValueError",
":",
"pass",
"# causes resize on each column header delete",
"# can leave this out if we want.....",
"self",
".",
"main_sizer",
".",
"Fit",
"(",
"self",
")"
] | check to see if column is required
if it is not, delete it from grid | [
"check",
"to",
"see",
"if",
"column",
"is",
"required",
"if",
"it",
"is",
"not",
"delete",
"it",
"from",
"grid"
] | python | train |
nerdvegas/rez | src/rez/utils/filesystem.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L76-L112 | def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Does nothing if:
- config setting 'make_package_temporarily_writable' is False;
- this can't be done (eg we don't own `path`).
Args:
path (str): Path to make temporarily writable
"""
from rez.config import config
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if config.make_package_temporarily_writable and \
not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode) | [
"def",
"make_path_writable",
"(",
"path",
")",
":",
"from",
"rez",
".",
"config",
"import",
"config",
"try",
":",
"orig_mode",
"=",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
"new_mode",
"=",
"orig_mode",
"if",
"config",
".",
"make_package_temporarily_writable",
"and",
"not",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"W_OK",
")",
":",
"new_mode",
"=",
"orig_mode",
"|",
"stat",
".",
"S_IWUSR",
"# make writable",
"if",
"new_mode",
"!=",
"orig_mode",
":",
"os",
".",
"chmod",
"(",
"path",
",",
"new_mode",
")",
"except",
"OSError",
":",
"# ignore access errors here, and just do nothing. It will be more",
"# intuitive for the calling code to fail on access instead.",
"#",
"orig_mode",
"=",
"None",
"new_mode",
"=",
"None",
"# yield, then reset mode back to original",
"try",
":",
"yield",
"finally",
":",
"if",
"new_mode",
"!=",
"orig_mode",
":",
"os",
".",
"chmod",
"(",
"path",
",",
"orig_mode",
")"
] | Temporarily make `path` writable, if possible.
Does nothing if:
- config setting 'make_package_temporarily_writable' is False;
- this can't be done (eg we don't own `path`).
Args:
path (str): Path to make temporarily writable | [
"Temporarily",
"make",
"path",
"writable",
"if",
"possible",
"."
] | python | train |
ethereum/py-evm | eth/db/journal.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/journal.py#L379-L408 | def commit(self, changeset_id: uuid.UUID) -> None:
"""
Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB().
"""
self._validate_changeset(changeset_id)
journal_data = self.journal.commit_changeset(changeset_id)
if self.journal.is_empty():
# Ensure the journal automatically restarts recording after
# it has been persisted to the underlying db
self.reset()
for key, value in journal_data.items():
try:
if value is DELETED_ENTRY:
del self.wrapped_db[key]
elif value is ERASE_CREATED_ENTRY:
pass
else:
self.wrapped_db[key] = cast(bytes, value)
except Exception:
self._reapply_changeset_to_journal(changeset_id, journal_data)
raise | [
"def",
"commit",
"(",
"self",
",",
"changeset_id",
":",
"uuid",
".",
"UUID",
")",
"->",
"None",
":",
"self",
".",
"_validate_changeset",
"(",
"changeset_id",
")",
"journal_data",
"=",
"self",
".",
"journal",
".",
"commit_changeset",
"(",
"changeset_id",
")",
"if",
"self",
".",
"journal",
".",
"is_empty",
"(",
")",
":",
"# Ensure the journal automatically restarts recording after",
"# it has been persisted to the underlying db",
"self",
".",
"reset",
"(",
")",
"for",
"key",
",",
"value",
"in",
"journal_data",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"value",
"is",
"DELETED_ENTRY",
":",
"del",
"self",
".",
"wrapped_db",
"[",
"key",
"]",
"elif",
"value",
"is",
"ERASE_CREATED_ENTRY",
":",
"pass",
"else",
":",
"self",
".",
"wrapped_db",
"[",
"key",
"]",
"=",
"cast",
"(",
"bytes",
",",
"value",
")",
"except",
"Exception",
":",
"self",
".",
"_reapply_changeset_to_journal",
"(",
"changeset_id",
",",
"journal_data",
")",
"raise"
] | Commits a given changeset. This merges the given changeset and all
subsequent changesets into the previous changeset giving precidence
to later changesets in case of any conflicting keys.
If this is the base changeset then all changes will be written to
the underlying database and the Journal starts a new recording.
Typically, callers won't have access to the base changeset, because
it is dropped during .reset() which is called in JournalDB(). | [
"Commits",
"a",
"given",
"changeset",
".",
"This",
"merges",
"the",
"given",
"changeset",
"and",
"all",
"subsequent",
"changesets",
"into",
"the",
"previous",
"changeset",
"giving",
"precidence",
"to",
"later",
"changesets",
"in",
"case",
"of",
"any",
"conflicting",
"keys",
"."
] | python | train |
google/grumpy | third_party/pypy/_sre.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/_sre.py#L1068-L1077 | def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
return result | [
"def",
"check_charset",
"(",
"self",
",",
"ctx",
",",
"char",
")",
":",
"self",
".",
"set_dispatcher",
".",
"reset",
"(",
"char",
")",
"save_position",
"=",
"ctx",
".",
"code_position",
"result",
"=",
"None",
"while",
"result",
"is",
"None",
":",
"result",
"=",
"self",
".",
"set_dispatcher",
".",
"dispatch",
"(",
"ctx",
".",
"peek_code",
"(",
")",
",",
"ctx",
")",
"ctx",
".",
"code_position",
"=",
"save_position",
"return",
"result"
] | Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set. | [
"Checks",
"whether",
"a",
"character",
"matches",
"set",
"of",
"arbitrary",
"length",
".",
"Assumes",
"the",
"code",
"pointer",
"is",
"at",
"the",
"first",
"member",
"of",
"the",
"set",
"."
] | python | valid |
stbraun/fuzzing | features/steps/ft_fuzzer.py | https://github.com/stbraun/fuzzing/blob/974a64472732d4e40db919d242149bf0856fe199/features/steps/ft_fuzzer.py#L110-L116 | def step_impl09(context):
"""Create application list.
:param context: test context.
"""
assert context.table, "ENSURE: table is provided."
context.app_list = [row['application'] for row in context.table.rows] | [
"def",
"step_impl09",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"ENSURE: table is provided.\"",
"context",
".",
"app_list",
"=",
"[",
"row",
"[",
"'application'",
"]",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
"]"
] | Create application list.
:param context: test context. | [
"Create",
"application",
"list",
"."
] | python | train |
Holzhaus/python-cmuclmtk | cmuclmtk/__init__.py | https://github.com/Holzhaus/python-cmuclmtk/blob/67a5c6713c497ca644ea1c697a70e8d930c9d4b4/cmuclmtk/__init__.py#L394-L446 | def idngram2lm(idngram_file, vocab_file, output_file, context_file=None, vocab_type=1, oov_fraction=0.5, four_byte_counts=False, min_unicount=0, zeroton_fraction=False, n=3, verbosity=2, arpa_output=True, ascii_input=False):
"""
Takes an idngram-file (in either binary (by default) or ASCII (if specified) format), a vocabulary file, and (optionally) a context cues file. Additional command line parameters will specify the cutoffs, the discounting strategy and parameters, etc. It outputs a language model, in either binary format (to be read by evallm), or in ARPA format.
"""
# TODO: Args still missing
# [ -calc_mem | -buffer 100 | -spec_num y ... z ]
# [ -two_byte_bo_weights
# [ -min_bo_weight nnnnn] [ -max_bo_weight nnnnn] [ -out_of_range_bo_weights] ]
# [ -linear | -absolute | -good_turing | -witten_bell ]
# [ -disc_ranges 1 7 7 ]
# [ -cutoffs 0 ... 0 ]
cmd = ['idngram2lm', '-idngram', os.path.abspath(idngram_file),
'-vocab', os.path.abspath(vocab_file),
'-vocab_type', vocab_type,
'-oov_fraction', oov_fraction,
'-min_unicount',min_unicount,
'-verbosity',verbosity,
'-n',n]
if arpa_output:
cmd.extend(['-arpa',output_file])
else:
cmd.extend(['-binary',output_file])
if four_byte_counts:
cmd.append('-four_byte_counts')
if zeroton_fraction:
cmd.append('-zeroton_fraction')
if ascii_input:
cmd.append('-ascii_input')
else:
cmd.append('-bin_input')
# Ensure that every parameter is of type 'str'
cmd = [str(x) for x in cmd]
with tempfile.SpooledTemporaryFile() as output_f:
with output_to_debuglogger() as err_f:
exitcode = subprocess.call(cmd, stdout=output_f, stderr=err_f)
output = output_f.read()
logger = logging.getLogger(__name__)
logger.debug("Command '%s' returned with exit code '%d'." % (' '.join(cmd), exitcode))
if exitcode != 0:
raise ConversionError("'%s' returned with non-zero exit status '%s'" % (cmd[0], exitcode))
if sys.version_info >= (3,) and type(output) is bytes:
output = output.decode('utf-8')
return output.strip() | [
"def",
"idngram2lm",
"(",
"idngram_file",
",",
"vocab_file",
",",
"output_file",
",",
"context_file",
"=",
"None",
",",
"vocab_type",
"=",
"1",
",",
"oov_fraction",
"=",
"0.5",
",",
"four_byte_counts",
"=",
"False",
",",
"min_unicount",
"=",
"0",
",",
"zeroton_fraction",
"=",
"False",
",",
"n",
"=",
"3",
",",
"verbosity",
"=",
"2",
",",
"arpa_output",
"=",
"True",
",",
"ascii_input",
"=",
"False",
")",
":",
"# TODO: Args still missing",
"# [ -calc_mem | -buffer 100 | -spec_num y ... z ]",
"# [ -two_byte_bo_weights ",
"# [ -min_bo_weight nnnnn] [ -max_bo_weight nnnnn] [ -out_of_range_bo_weights] ]",
"# [ -linear | -absolute | -good_turing | -witten_bell ]",
"# [ -disc_ranges 1 7 7 ]",
"# [ -cutoffs 0 ... 0 ]",
"cmd",
"=",
"[",
"'idngram2lm'",
",",
"'-idngram'",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"idngram_file",
")",
",",
"'-vocab'",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"vocab_file",
")",
",",
"'-vocab_type'",
",",
"vocab_type",
",",
"'-oov_fraction'",
",",
"oov_fraction",
",",
"'-min_unicount'",
",",
"min_unicount",
",",
"'-verbosity'",
",",
"verbosity",
",",
"'-n'",
",",
"n",
"]",
"if",
"arpa_output",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-arpa'",
",",
"output_file",
"]",
")",
"else",
":",
"cmd",
".",
"extend",
"(",
"[",
"'-binary'",
",",
"output_file",
"]",
")",
"if",
"four_byte_counts",
":",
"cmd",
".",
"append",
"(",
"'-four_byte_counts'",
")",
"if",
"zeroton_fraction",
":",
"cmd",
".",
"append",
"(",
"'-zeroton_fraction'",
")",
"if",
"ascii_input",
":",
"cmd",
".",
"append",
"(",
"'-ascii_input'",
")",
"else",
":",
"cmd",
".",
"append",
"(",
"'-bin_input'",
")",
"# Ensure that every parameter is of type 'str'",
"cmd",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"cmd",
"]",
"with",
"tempfile",
".",
"SpooledTemporaryFile",
"(",
")",
"as",
"output_f",
":",
"with",
"output_to_debuglogger",
"(",
")",
"as",
"err_f",
":",
"exitcode",
"=",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"output_f",
",",
"stderr",
"=",
"err_f",
")",
"output",
"=",
"output_f",
".",
"read",
"(",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"\"Command '%s' returned with exit code '%d'.\"",
"%",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
",",
"exitcode",
")",
")",
"if",
"exitcode",
"!=",
"0",
":",
"raise",
"ConversionError",
"(",
"\"'%s' returned with non-zero exit status '%s'\"",
"%",
"(",
"cmd",
"[",
"0",
"]",
",",
"exitcode",
")",
")",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
")",
"and",
"type",
"(",
"output",
")",
"is",
"bytes",
":",
"output",
"=",
"output",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"output",
".",
"strip",
"(",
")"
] | Takes an idngram-file (in either binary (by default) or ASCII (if specified) format), a vocabulary file, and (optionally) a context cues file. Additional command line parameters will specify the cutoffs, the discounting strategy and parameters, etc. It outputs a language model, in either binary format (to be read by evallm), or in ARPA format. | [
"Takes",
"an",
"idngram",
"-",
"file",
"(",
"in",
"either",
"binary",
"(",
"by",
"default",
")",
"or",
"ASCII",
"(",
"if",
"specified",
")",
"format",
")",
"a",
"vocabulary",
"file",
"and",
"(",
"optionally",
")",
"a",
"context",
"cues",
"file",
".",
"Additional",
"command",
"line",
"parameters",
"will",
"specify",
"the",
"cutoffs",
"the",
"discounting",
"strategy",
"and",
"parameters",
"etc",
".",
"It",
"outputs",
"a",
"language",
"model",
"in",
"either",
"binary",
"format",
"(",
"to",
"be",
"read",
"by",
"evallm",
")",
"or",
"in",
"ARPA",
"format",
"."
] | python | train |
mozilla-releng/scriptworker | scriptworker/ed25519.py | https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/ed25519.py#L86-L100 | def ed25519_private_key_to_string(key):
"""Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption()
), None).decode('utf-8') | [
"def",
"ed25519_private_key_to_string",
"(",
"key",
")",
":",
"return",
"base64",
".",
"b64encode",
"(",
"key",
".",
"private_bytes",
"(",
"encoding",
"=",
"serialization",
".",
"Encoding",
".",
"Raw",
",",
"format",
"=",
"serialization",
".",
"PrivateFormat",
".",
"Raw",
",",
"encryption_algorithm",
"=",
"serialization",
".",
"NoEncryption",
"(",
")",
")",
",",
"None",
")",
".",
"decode",
"(",
"'utf-8'",
")"
] | Convert an ed25519 private key to a base64-encoded string.
Args:
key (Ed25519PrivateKey): the key to write to the file.
Returns:
str: the key representation as a str | [
"Convert",
"an",
"ed25519",
"private",
"key",
"to",
"a",
"base64",
"-",
"encoded",
"string",
"."
] | python | train |
StackStorm/pybind | pybind/nos/v6_0_2f/overlay_gateway/site/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/overlay_gateway/site/__init__.py#L144-L169 | def _set_tunnel_dst(self, v, load=False):
"""
Setter method for tunnel_dst, mapped from YANG variable /overlay_gateway/site/tunnel_dst (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel_dst is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel_dst() directly.
YANG Description: Site IP address configuration represents
destination IP of tunnel to the site. Tunnel will
not be setup without the IP address configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("address",tunnel_dst.tunnel_dst, yang_name="tunnel-dst", rest_name="ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}), is_container='list', yang_name="tunnel-dst", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tunnel_dst must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("address",tunnel_dst.tunnel_dst, yang_name="tunnel-dst", rest_name="ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}), is_container='list', yang_name="tunnel-dst", rest_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__tunnel_dst = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_tunnel_dst",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"YANGListType",
"(",
"\"address\"",
",",
"tunnel_dst",
".",
"tunnel_dst",
",",
"yang_name",
"=",
"\"tunnel-dst\"",
",",
"rest_name",
"=",
"\"ip\"",
",",
"parent",
"=",
"self",
",",
"is_container",
"=",
"'list'",
",",
"user_ordered",
"=",
"False",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"yang_keys",
"=",
"'address'",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'IP configuration for site.'",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-compact-syntax'",
":",
"None",
",",
"u'alt-name'",
":",
"u'ip'",
",",
"u'callpoint'",
":",
"u'overlay-site-ip-cp'",
"}",
"}",
")",
",",
"is_container",
"=",
"'list'",
",",
"yang_name",
"=",
"\"tunnel-dst\"",
",",
"rest_name",
"=",
"\"ip\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u'IP configuration for site.'",
",",
"u'cli-suppress-mode'",
":",
"None",
",",
"u'cli-compact-syntax'",
":",
"None",
",",
"u'alt-name'",
":",
"u'ip'",
",",
"u'callpoint'",
":",
"u'overlay-site-ip-cp'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-tunnels'",
",",
"defining_module",
"=",
"'brocade-tunnels'",
",",
"yang_type",
"=",
"'list'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"tunnel_dst must be of a type compatible with list\"\"\"",
",",
"'defined-type'",
":",
"\"list\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=YANGListType(\"address\",tunnel_dst.tunnel_dst, yang_name=\"tunnel-dst\", rest_name=\"ip\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}), is_container='list', yang_name=\"tunnel-dst\", rest_name=\"ip\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IP configuration for site.', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'ip', u'callpoint': u'overlay-site-ip-cp'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__tunnel_dst",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for tunnel_dst, mapped from YANG variable /overlay_gateway/site/tunnel_dst (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tunnel_dst is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tunnel_dst() directly.
YANG Description: Site IP address configuration represents
destination IP of tunnel to the site. Tunnel will
not be setup without the IP address configuration. | [
"Setter",
"method",
"for",
"tunnel_dst",
"mapped",
"from",
"YANG",
"variable",
"/",
"overlay_gateway",
"/",
"site",
"/",
"tunnel_dst",
"(",
"list",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_tunnel_dst",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_tunnel_dst",
"()",
"directly",
"."
] | python | train |
junzis/pyModeS | pyModeS/decoder/adsb.py | https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/adsb.py#L88-L114 | def position_with_ref(msg, lat_ref, lon_ref):
"""Decode position with only one message,
knowing reference nearby location, such as previously
calculated location, ground station, or airport location, etc.
Works with both airborne and surface position messages.
The reference position shall be with in 180NM (airborne) or 45NM (surface)
of the true position.
Args:
msg (string): even message (28 bytes hexadecimal string)
lat_ref: previous known latitude
lon_ref: previous known longitude
Returns:
(float, float): (latitude, longitude) of the aircraft
"""
tc = typecode(msg)
if 5<=tc<=8:
return surface_position_with_ref(msg, lat_ref, lon_ref)
elif 9<=tc<=18 or 20<=tc<=22:
return airborne_position_with_ref(msg, lat_ref, lon_ref)
else:
raise RuntimeError("incorrect or inconsistant message types") | [
"def",
"position_with_ref",
"(",
"msg",
",",
"lat_ref",
",",
"lon_ref",
")",
":",
"tc",
"=",
"typecode",
"(",
"msg",
")",
"if",
"5",
"<=",
"tc",
"<=",
"8",
":",
"return",
"surface_position_with_ref",
"(",
"msg",
",",
"lat_ref",
",",
"lon_ref",
")",
"elif",
"9",
"<=",
"tc",
"<=",
"18",
"or",
"20",
"<=",
"tc",
"<=",
"22",
":",
"return",
"airborne_position_with_ref",
"(",
"msg",
",",
"lat_ref",
",",
"lon_ref",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"incorrect or inconsistant message types\"",
")"
] | Decode position with only one message,
knowing reference nearby location, such as previously
calculated location, ground station, or airport location, etc.
Works with both airborne and surface position messages.
The reference position shall be with in 180NM (airborne) or 45NM (surface)
of the true position.
Args:
msg (string): even message (28 bytes hexadecimal string)
lat_ref: previous known latitude
lon_ref: previous known longitude
Returns:
(float, float): (latitude, longitude) of the aircraft | [
"Decode",
"position",
"with",
"only",
"one",
"message",
"knowing",
"reference",
"nearby",
"location",
"such",
"as",
"previously",
"calculated",
"location",
"ground",
"station",
"or",
"airport",
"location",
"etc",
".",
"Works",
"with",
"both",
"airborne",
"and",
"surface",
"position",
"messages",
".",
"The",
"reference",
"position",
"shall",
"be",
"with",
"in",
"180NM",
"(",
"airborne",
")",
"or",
"45NM",
"(",
"surface",
")",
"of",
"the",
"true",
"position",
"."
] | python | train |
numenta/htmresearch | htmresearch/frameworks/layers/physical_objects.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L447-L455 | def _sampleFromFaces(self):
"""
We start by sampling a dimension to "max out", then sample the sign and
the other dimensions' values.
"""
coordinates = [random.uniform(-1, 1) * dim / 2. for dim in self.dimensions]
dim = random.choice(range(self.dimension))
coordinates[dim] = self.dimensions[dim] / 2. * random.choice([-1, 1])
return coordinates | [
"def",
"_sampleFromFaces",
"(",
"self",
")",
":",
"coordinates",
"=",
"[",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
")",
"*",
"dim",
"/",
"2.",
"for",
"dim",
"in",
"self",
".",
"dimensions",
"]",
"dim",
"=",
"random",
".",
"choice",
"(",
"range",
"(",
"self",
".",
"dimension",
")",
")",
"coordinates",
"[",
"dim",
"]",
"=",
"self",
".",
"dimensions",
"[",
"dim",
"]",
"/",
"2.",
"*",
"random",
".",
"choice",
"(",
"[",
"-",
"1",
",",
"1",
"]",
")",
"return",
"coordinates"
] | We start by sampling a dimension to "max out", then sample the sign and
the other dimensions' values. | [
"We",
"start",
"by",
"sampling",
"a",
"dimension",
"to",
"max",
"out",
"then",
"sample",
"the",
"sign",
"and",
"the",
"other",
"dimensions",
"values",
"."
] | python | train |
saltstack/salt | salt/cloud/clouds/vmware.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vmware.py#L4553-L4620 | def shutdown_host(kwargs=None, call=None):
'''
Shut down the specified host system in this VMware environment
.. note::
If the host system is not in maintenance mode, it will not be shut down. If you
want to shut down the host system regardless of whether it is in maintenance mode,
set ``force=True``. Default is ``force=False``.
CLI Example:
.. code-block:: bash
salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True]
'''
if call != 'function':
raise SaltCloudSystemExit(
'The shutdown_host function must be called with '
'-f or --function.'
)
host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None
force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False
if not host_name:
raise SaltCloudSystemExit(
'You must specify name of the host system.'
)
# Get the service instance
si = _get_si()
host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name)
if not host_ref:
raise SaltCloudSystemExit(
'Specified host system does not exist.'
)
if host_ref.runtime.connectionState == 'notResponding':
raise SaltCloudSystemExit(
'Specified host system cannot be shut down in it\'s current state (not responding).'
)
if not host_ref.capability.rebootSupported:
raise SaltCloudSystemExit(
'Specified host system does not support shutdown.'
)
if not host_ref.runtime.inMaintenanceMode and not force:
raise SaltCloudSystemExit(
'Specified host system is not in maintenance mode. Specify force=True to '
'force reboot even if there are virtual machines running or other operations '
'in progress.'
)
try:
host_ref.ShutdownHost_Task(force)
except Exception as exc:
log.error(
'Error while shutting down host %s: %s',
host_name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {host_name: 'failed to shut down host'}
return {host_name: 'shut down host'} | [
"def",
"shutdown_host",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The shutdown_host function must be called with '",
"'-f or --function.'",
")",
"host_name",
"=",
"kwargs",
".",
"get",
"(",
"'host'",
")",
"if",
"kwargs",
"and",
"'host'",
"in",
"kwargs",
"else",
"None",
"force",
"=",
"_str_to_bool",
"(",
"kwargs",
".",
"get",
"(",
"'force'",
")",
")",
"if",
"kwargs",
"and",
"'force'",
"in",
"kwargs",
"else",
"False",
"if",
"not",
"host_name",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'You must specify name of the host system.'",
")",
"# Get the service instance",
"si",
"=",
"_get_si",
"(",
")",
"host_ref",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_mor_by_property",
"(",
"si",
",",
"vim",
".",
"HostSystem",
",",
"host_name",
")",
"if",
"not",
"host_ref",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Specified host system does not exist.'",
")",
"if",
"host_ref",
".",
"runtime",
".",
"connectionState",
"==",
"'notResponding'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Specified host system cannot be shut down in it\\'s current state (not responding).'",
")",
"if",
"not",
"host_ref",
".",
"capability",
".",
"rebootSupported",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Specified host system does not support shutdown.'",
")",
"if",
"not",
"host_ref",
".",
"runtime",
".",
"inMaintenanceMode",
"and",
"not",
"force",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'Specified host system is not in maintenance mode. Specify force=True to '",
"'force reboot even if there are virtual machines running or other operations '",
"'in progress.'",
")",
"try",
":",
"host_ref",
".",
"ShutdownHost_Task",
"(",
"force",
")",
"except",
"Exception",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"'Error while shutting down host %s: %s'",
",",
"host_name",
",",
"exc",
",",
"# Show the traceback if the debug logging level is enabled",
"exc_info_on_loglevel",
"=",
"logging",
".",
"DEBUG",
")",
"return",
"{",
"host_name",
":",
"'failed to shut down host'",
"}",
"return",
"{",
"host_name",
":",
"'shut down host'",
"}"
] | Shut down the specified host system in this VMware environment
.. note::
If the host system is not in maintenance mode, it will not be shut down. If you
want to shut down the host system regardless of whether it is in maintenance mode,
set ``force=True``. Default is ``force=False``.
CLI Example:
.. code-block:: bash
salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True] | [
"Shut",
"down",
"the",
"specified",
"host",
"system",
"in",
"this",
"VMware",
"environment"
] | python | train |
pyviz/holoviews | holoviews/plotting/bokeh/renderer.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/renderer.py#L123-L137 | def get_plot(self_or_cls, obj, doc=None, renderer=None, **kwargs):
"""
Given a HoloViews Viewable return a corresponding plot instance.
Allows supplying a document attach the plot to, useful when
combining the bokeh model with another plot.
"""
if doc is None:
doc = Document() if self_or_cls.notebook_context else curdoc()
if self_or_cls.notebook_context:
curdoc().theme = self_or_cls.theme
doc.theme = self_or_cls.theme
plot = super(BokehRenderer, self_or_cls).get_plot(obj, renderer, **kwargs)
plot.document = doc
return plot | [
"def",
"get_plot",
"(",
"self_or_cls",
",",
"obj",
",",
"doc",
"=",
"None",
",",
"renderer",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc",
"=",
"Document",
"(",
")",
"if",
"self_or_cls",
".",
"notebook_context",
"else",
"curdoc",
"(",
")",
"if",
"self_or_cls",
".",
"notebook_context",
":",
"curdoc",
"(",
")",
".",
"theme",
"=",
"self_or_cls",
".",
"theme",
"doc",
".",
"theme",
"=",
"self_or_cls",
".",
"theme",
"plot",
"=",
"super",
"(",
"BokehRenderer",
",",
"self_or_cls",
")",
".",
"get_plot",
"(",
"obj",
",",
"renderer",
",",
"*",
"*",
"kwargs",
")",
"plot",
".",
"document",
"=",
"doc",
"return",
"plot"
] | Given a HoloViews Viewable return a corresponding plot instance.
Allows supplying a document attach the plot to, useful when
combining the bokeh model with another plot. | [
"Given",
"a",
"HoloViews",
"Viewable",
"return",
"a",
"corresponding",
"plot",
"instance",
".",
"Allows",
"supplying",
"a",
"document",
"attach",
"the",
"plot",
"to",
"useful",
"when",
"combining",
"the",
"bokeh",
"model",
"with",
"another",
"plot",
"."
] | python | train |
springload/wagtaildraftail | wagtaildraftail/widgets.py | https://github.com/springload/wagtaildraftail/blob/87f1ae3ade493c00daff021394051aa656136c10/wagtaildraftail/widgets.py#L75-L85 | def intercept_image_formats(self, options):
"""
Load all image formats if needed.
"""
if 'entityTypes' in options:
for entity in options['entityTypes']:
if entity['type'] == ENTITY_TYPES.IMAGE and 'imageFormats' in entity:
if entity['imageFormats'] == '__all__':
entity['imageFormats'] = get_all_image_formats()
return options | [
"def",
"intercept_image_formats",
"(",
"self",
",",
"options",
")",
":",
"if",
"'entityTypes'",
"in",
"options",
":",
"for",
"entity",
"in",
"options",
"[",
"'entityTypes'",
"]",
":",
"if",
"entity",
"[",
"'type'",
"]",
"==",
"ENTITY_TYPES",
".",
"IMAGE",
"and",
"'imageFormats'",
"in",
"entity",
":",
"if",
"entity",
"[",
"'imageFormats'",
"]",
"==",
"'__all__'",
":",
"entity",
"[",
"'imageFormats'",
"]",
"=",
"get_all_image_formats",
"(",
")",
"return",
"options"
] | Load all image formats if needed. | [
"Load",
"all",
"image",
"formats",
"if",
"needed",
"."
] | python | train |
devoperate/chronos | chronos/helpers.py | https://github.com/devoperate/chronos/blob/5ae6047c4f13db9f5e85a0c72a3dc47f05a8d7bd/chronos/helpers.py#L65-L79 | def git_tag_to_semver(git_tag: str) -> SemVer:
"""
:git_tag: A string representation of a Git tag.
Searches a Git tag's string representation for a SemVer, and returns that
as a SemVer object.
"""
pattern = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+$')
match = pattern.search(git_tag)
if match:
version = match.group(0)
else:
raise InvalidTagFormatException('Tag passed contains no SemVer.')
return SemVer.from_str(version) | [
"def",
"git_tag_to_semver",
"(",
"git_tag",
":",
"str",
")",
"->",
"SemVer",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'[0-9]+\\.[0-9]+\\.[0-9]+$'",
")",
"match",
"=",
"pattern",
".",
"search",
"(",
"git_tag",
")",
"if",
"match",
":",
"version",
"=",
"match",
".",
"group",
"(",
"0",
")",
"else",
":",
"raise",
"InvalidTagFormatException",
"(",
"'Tag passed contains no SemVer.'",
")",
"return",
"SemVer",
".",
"from_str",
"(",
"version",
")"
] | :git_tag: A string representation of a Git tag.
Searches a Git tag's string representation for a SemVer, and returns that
as a SemVer object. | [
":",
"git_tag",
":",
"A",
"string",
"representation",
"of",
"a",
"Git",
"tag",
"."
] | python | train |
PonteIneptique/collatinus-python | pycollatinus/lemmatiseur.py | https://github.com/PonteIneptique/collatinus-python/blob/fca37b0b77bc60f47d3c24ab42f6d0bdca6ba0f5/pycollatinus/lemmatiseur.py#L211-L225 | def _lemmatise_contractions(self, f, *args, **kwargs):
""" Lemmatise un mot f avec sa contraction
:param f: Mot à lemmatiser
:yield: Match formated like in _lemmatise()
"""
fd = f
for contraction, decontraction in self._contractions.items():
if fd.endswith(contraction):
fd = f[:-len(contraction)]
if "v" in fd or "V" in fd:
fd += decontraction
else:
fd += deramise(decontraction)
yield from self._lemmatise(fd, *args, **kwargs) | [
"def",
"_lemmatise_contractions",
"(",
"self",
",",
"f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"fd",
"=",
"f",
"for",
"contraction",
",",
"decontraction",
"in",
"self",
".",
"_contractions",
".",
"items",
"(",
")",
":",
"if",
"fd",
".",
"endswith",
"(",
"contraction",
")",
":",
"fd",
"=",
"f",
"[",
":",
"-",
"len",
"(",
"contraction",
")",
"]",
"if",
"\"v\"",
"in",
"fd",
"or",
"\"V\"",
"in",
"fd",
":",
"fd",
"+=",
"decontraction",
"else",
":",
"fd",
"+=",
"deramise",
"(",
"decontraction",
")",
"yield",
"from",
"self",
".",
"_lemmatise",
"(",
"fd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Lemmatise un mot f avec sa contraction
:param f: Mot à lemmatiser
:yield: Match formated like in _lemmatise() | [
"Lemmatise",
"un",
"mot",
"f",
"avec",
"sa",
"contraction"
] | python | train |
arve0/leicascanningtemplate | leicascanningtemplate/template.py | https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L370-L415 | def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8')) | [
"def",
"write",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"filename",
"# update time",
"self",
".",
"properties",
".",
"CurrentDate",
"=",
"_current_time",
"(",
")",
"# set rubber band to true",
"self",
".",
"properties",
".",
"EnableRubberBand",
"=",
"'true'",
"# update start position",
"self",
".",
"update_start_position",
"(",
")",
"# update well postions",
"self",
".",
"update_well_positions",
"(",
")",
"# update counts",
"self",
".",
"update_counts",
"(",
")",
"# remove py:pytype attributes",
"objectify",
".",
"deannotate",
"(",
"self",
".",
"root",
")",
"# remove namespaces added by lxml",
"for",
"child",
"in",
"self",
".",
"root",
".",
"iterchildren",
"(",
")",
":",
"etree",
".",
"cleanup_namespaces",
"(",
"child",
")",
"xml",
"=",
"etree",
".",
"tostring",
"(",
"self",
".",
"root",
",",
"encoding",
"=",
"'utf8'",
",",
"xml_declaration",
"=",
"True",
",",
"pretty_print",
"=",
"True",
")",
"# fix format quirks",
"# add carriage return character",
"xml",
"=",
"u'\\r\\n'",
".",
"join",
"(",
"l",
".",
"decode",
"(",
"encoding",
"=",
"'utf8'",
")",
"for",
"l",
"in",
"xml",
".",
"splitlines",
"(",
")",
")",
"# add space at \"end/>\" --> \"end />\"",
"xml",
"=",
"re",
".",
"sub",
"(",
"r'([\"a-z])/>'",
",",
"r'\\1 />'",
",",
"xml",
")",
"xml",
"=",
"xml",
".",
"replace",
"(",
"\"version='1.0' encoding='utf8'\"",
",",
"'version=\"1.0\"'",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"xml",
".",
"encode",
"(",
"'utf8'",
")",
")"
] | Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename. | [
"Save",
"template",
"to",
"xml",
".",
"Before",
"saving",
"template",
"will",
"update",
"date",
"start",
"position",
"well",
"positions",
"and",
"counts",
"."
] | python | train |
MillionIntegrals/vel | vel/util/intepolate.py | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/util/intepolate.py#L53-L55 | def interpolate_single(start, end, coefficient, how='linear'):
""" Interpolate single value between start and end in given number of steps """
return INTERP_SINGLE_DICT[how](start, end, coefficient) | [
"def",
"interpolate_single",
"(",
"start",
",",
"end",
",",
"coefficient",
",",
"how",
"=",
"'linear'",
")",
":",
"return",
"INTERP_SINGLE_DICT",
"[",
"how",
"]",
"(",
"start",
",",
"end",
",",
"coefficient",
")"
] | Interpolate single value between start and end in given number of steps | [
"Interpolate",
"single",
"value",
"between",
"start",
"and",
"end",
"in",
"given",
"number",
"of",
"steps"
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/trax/inputs.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L262-L316 | def batch_fun(dataset, training, shapes, target_names, num_devices,
batch_size_per_device=32, batch_size=None, eval_batch_size=32,
bucket_length=32, buckets=None,
batch_shuffle_size=128, max_eval_length=None):
"""Batching function."""
del target_names
# Batch size is batch_size_per_device * num_devices unless given directly.
batch_size = batch_size or batch_size_per_device * num_devices
# If bucketing is not specified, check if target shapes are variable.
cur_batch_size = batch_size if training else eval_batch_size
# Make cur_batch_size divisible by num_devices.
cur_batch_size = max(cur_batch_size // num_devices, 1) * num_devices
# Create heuristic buckets is none are specified.
if buckets is None:
variable_target_shapes = False
target_shape = shapes[1]
for dim in target_shape:
if dim is None:
variable_target_shapes = True
tf.logging.info("Heuristically setting bucketing to %s based on shapes "
"of target tensors." % variable_target_shapes)
if variable_target_shapes:
bucket_boundaries = [bucket_length // 4, bucket_length // 2,
bucket_length, bucket_length * 2,
bucket_length * 4, bucket_length * 8,
bucket_length * 16]
# We will pad to boundaries which pads to bucket_boundary - 1: add 1 here.
bucket_boundaries = [b + 1 for b in bucket_boundaries]
if not training:
max_eval_length = max_eval_length or bucket_length * 32
bucket_boundaries[-1] = max_eval_length
bucket_batch_sizes = [cur_batch_size * 4, cur_batch_size * 2,
cur_batch_size, cur_batch_size // 2,
cur_batch_size // 4, cur_batch_size // 8,
cur_batch_size // 16, 1]
if not training:
bucket_batch_sizes[-2] = cur_batch_size // max_eval_length
# Make batch sizes divisible by num_devices.
bucket_batch_sizes = [max(b // num_devices, 1) * num_devices
for b in bucket_batch_sizes]
buckets = (bucket_boundaries, bucket_batch_sizes)
if buckets:
tf.logging.info("Bucketing with buckets %s." % str(buckets))
def example_length(_, target):
return tf.shape(target)[0]
boundaries, batch_sizes = buckets
dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length(
example_length, boundaries, batch_sizes,
pad_to_bucket_boundary=True))
else:
dataset = dataset.padded_batch(cur_batch_size, shapes)
if training:
return dataset.shuffle(batch_shuffle_size)
return dataset | [
"def",
"batch_fun",
"(",
"dataset",
",",
"training",
",",
"shapes",
",",
"target_names",
",",
"num_devices",
",",
"batch_size_per_device",
"=",
"32",
",",
"batch_size",
"=",
"None",
",",
"eval_batch_size",
"=",
"32",
",",
"bucket_length",
"=",
"32",
",",
"buckets",
"=",
"None",
",",
"batch_shuffle_size",
"=",
"128",
",",
"max_eval_length",
"=",
"None",
")",
":",
"del",
"target_names",
"# Batch size is batch_size_per_device * num_devices unless given directly.",
"batch_size",
"=",
"batch_size",
"or",
"batch_size_per_device",
"*",
"num_devices",
"# If bucketing is not specified, check if target shapes are variable.",
"cur_batch_size",
"=",
"batch_size",
"if",
"training",
"else",
"eval_batch_size",
"# Make cur_batch_size divisible by num_devices.",
"cur_batch_size",
"=",
"max",
"(",
"cur_batch_size",
"//",
"num_devices",
",",
"1",
")",
"*",
"num_devices",
"# Create heuristic buckets is none are specified.",
"if",
"buckets",
"is",
"None",
":",
"variable_target_shapes",
"=",
"False",
"target_shape",
"=",
"shapes",
"[",
"1",
"]",
"for",
"dim",
"in",
"target_shape",
":",
"if",
"dim",
"is",
"None",
":",
"variable_target_shapes",
"=",
"True",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Heuristically setting bucketing to %s based on shapes \"",
"\"of target tensors.\"",
"%",
"variable_target_shapes",
")",
"if",
"variable_target_shapes",
":",
"bucket_boundaries",
"=",
"[",
"bucket_length",
"//",
"4",
",",
"bucket_length",
"//",
"2",
",",
"bucket_length",
",",
"bucket_length",
"*",
"2",
",",
"bucket_length",
"*",
"4",
",",
"bucket_length",
"*",
"8",
",",
"bucket_length",
"*",
"16",
"]",
"# We will pad to boundaries which pads to bucket_boundary - 1: add 1 here.",
"bucket_boundaries",
"=",
"[",
"b",
"+",
"1",
"for",
"b",
"in",
"bucket_boundaries",
"]",
"if",
"not",
"training",
":",
"max_eval_length",
"=",
"max_eval_length",
"or",
"bucket_length",
"*",
"32",
"bucket_boundaries",
"[",
"-",
"1",
"]",
"=",
"max_eval_length",
"bucket_batch_sizes",
"=",
"[",
"cur_batch_size",
"*",
"4",
",",
"cur_batch_size",
"*",
"2",
",",
"cur_batch_size",
",",
"cur_batch_size",
"//",
"2",
",",
"cur_batch_size",
"//",
"4",
",",
"cur_batch_size",
"//",
"8",
",",
"cur_batch_size",
"//",
"16",
",",
"1",
"]",
"if",
"not",
"training",
":",
"bucket_batch_sizes",
"[",
"-",
"2",
"]",
"=",
"cur_batch_size",
"//",
"max_eval_length",
"# Make batch sizes divisible by num_devices.",
"bucket_batch_sizes",
"=",
"[",
"max",
"(",
"b",
"//",
"num_devices",
",",
"1",
")",
"*",
"num_devices",
"for",
"b",
"in",
"bucket_batch_sizes",
"]",
"buckets",
"=",
"(",
"bucket_boundaries",
",",
"bucket_batch_sizes",
")",
"if",
"buckets",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Bucketing with buckets %s.\"",
"%",
"str",
"(",
"buckets",
")",
")",
"def",
"example_length",
"(",
"_",
",",
"target",
")",
":",
"return",
"tf",
".",
"shape",
"(",
"target",
")",
"[",
"0",
"]",
"boundaries",
",",
"batch_sizes",
"=",
"buckets",
"dataset",
"=",
"dataset",
".",
"apply",
"(",
"tf",
".",
"data",
".",
"experimental",
".",
"bucket_by_sequence_length",
"(",
"example_length",
",",
"boundaries",
",",
"batch_sizes",
",",
"pad_to_bucket_boundary",
"=",
"True",
")",
")",
"else",
":",
"dataset",
"=",
"dataset",
".",
"padded_batch",
"(",
"cur_batch_size",
",",
"shapes",
")",
"if",
"training",
":",
"return",
"dataset",
".",
"shuffle",
"(",
"batch_shuffle_size",
")",
"return",
"dataset"
] | Batching function. | [
"Batching",
"function",
"."
] | python | train |
cbclab/MOT | mot/mcmc_diagnostics.py | https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L153-L194 | def get_auto_correlation_time(chain, max_lag=None):
r"""Compute the auto correlation time up to the given lag for the given chain (1d vector).
This will halt when the maximum lag :math:`m` is reached or when the sum of two consecutive lags for any
odd lag is lower or equal to zero.
The auto correlation sum is estimated as:
.. math::
\tau = 1 + 2 * \sum_{k=1}^{m}{\rho_{k}}
Where :math:`\rho_{k}` is estimated as:
.. math::
\hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}}
Args:
chain (ndarray): the vector with the samples
max_lag (int): the maximum lag to use in the autocorrelation computation. If not given we use:
:math:`min(n/3, 1000)`.
"""
max_lag = max_lag or min(len(chain) // 3, 1000)
normalized_chain = chain - np.mean(chain, dtype=np.float64)
previous_accoeff = 0
auto_corr_sum = 0
for lag in range(1, max_lag):
auto_correlation_coeff = np.mean(normalized_chain[:len(chain) - lag] * normalized_chain[lag:], dtype=np.float64)
if lag % 2 == 0:
if previous_accoeff + auto_correlation_coeff <= 0:
break
auto_corr_sum += auto_correlation_coeff
previous_accoeff = auto_correlation_coeff
return auto_corr_sum / np.var(chain, dtype=np.float64) | [
"def",
"get_auto_correlation_time",
"(",
"chain",
",",
"max_lag",
"=",
"None",
")",
":",
"max_lag",
"=",
"max_lag",
"or",
"min",
"(",
"len",
"(",
"chain",
")",
"//",
"3",
",",
"1000",
")",
"normalized_chain",
"=",
"chain",
"-",
"np",
".",
"mean",
"(",
"chain",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"previous_accoeff",
"=",
"0",
"auto_corr_sum",
"=",
"0",
"for",
"lag",
"in",
"range",
"(",
"1",
",",
"max_lag",
")",
":",
"auto_correlation_coeff",
"=",
"np",
".",
"mean",
"(",
"normalized_chain",
"[",
":",
"len",
"(",
"chain",
")",
"-",
"lag",
"]",
"*",
"normalized_chain",
"[",
"lag",
":",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"lag",
"%",
"2",
"==",
"0",
":",
"if",
"previous_accoeff",
"+",
"auto_correlation_coeff",
"<=",
"0",
":",
"break",
"auto_corr_sum",
"+=",
"auto_correlation_coeff",
"previous_accoeff",
"=",
"auto_correlation_coeff",
"return",
"auto_corr_sum",
"/",
"np",
".",
"var",
"(",
"chain",
",",
"dtype",
"=",
"np",
".",
"float64",
")"
] | r"""Compute the auto correlation time up to the given lag for the given chain (1d vector).
This will halt when the maximum lag :math:`m` is reached or when the sum of two consecutive lags for any
odd lag is lower or equal to zero.
The auto correlation sum is estimated as:
.. math::
\tau = 1 + 2 * \sum_{k=1}^{m}{\rho_{k}}
Where :math:`\rho_{k}` is estimated as:
.. math::
\hat{\rho}_{k} = \frac{E[(X_{t} - \mu)(X_{t + k} - \mu)]}{\sigma^{2}}
Args:
chain (ndarray): the vector with the samples
max_lag (int): the maximum lag to use in the autocorrelation computation. If not given we use:
:math:`min(n/3, 1000)`. | [
"r",
"Compute",
"the",
"auto",
"correlation",
"time",
"up",
"to",
"the",
"given",
"lag",
"for",
"the",
"given",
"chain",
"(",
"1d",
"vector",
")",
"."
] | python | train |
Valuehorizon/valuehorizon-companies | companies/models.py | https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L386-L398 | def save(self, *args, **kwargs):
"""
Generate a name, and ensure amount is less than or equal to 100
"""
self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type)
if self.amount > 100:
raise ValueError("Ownership amount cannot be more than 100%")
elif self.amount < 0:
raise ValueError("Ownership amount cannot be less than 0%")
else:
super(Ownership, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"name",
"=",
"str",
"(",
"self",
".",
"parent",
".",
"name",
")",
"+",
"\" - \"",
"+",
"str",
"(",
"self",
".",
"child",
".",
"name",
")",
"+",
"\" - \"",
"+",
"str",
"(",
"self",
".",
"ownership_type",
")",
"if",
"self",
".",
"amount",
">",
"100",
":",
"raise",
"ValueError",
"(",
"\"Ownership amount cannot be more than 100%\"",
")",
"elif",
"self",
".",
"amount",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Ownership amount cannot be less than 0%\"",
")",
"else",
":",
"super",
"(",
"Ownership",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Generate a name, and ensure amount is less than or equal to 100 | [
"Generate",
"a",
"name",
"and",
"ensure",
"amount",
"is",
"less",
"than",
"or",
"equal",
"to",
"100"
] | python | train |
emory-libraries/eulxml | eulxml/forms/xmlobject.py | https://github.com/emory-libraries/eulxml/blob/17d71c7d98c0cebda9932b7f13e72093805e1fe2/eulxml/forms/xmlobject.py#L595-L609 | def _update_subinstance(self, name, subform):
"""Save bound data for a single subform into the XmlObject model
instance."""
old_subinstance = getattr(self.instance, name)
new_subinstance = subform.update_instance()
# if our instance previously had no node for the subform AND the
# updated one has data, then attach the new node.
if old_subinstance is None and not new_subinstance.is_empty():
setattr(self.instance, name, new_subinstance)
# on the other hand, if the instance previously had a node for the
# subform AND the updated one is empty, then remove the node.
if old_subinstance is not None and new_subinstance.is_empty():
delattr(self.instance, name) | [
"def",
"_update_subinstance",
"(",
"self",
",",
"name",
",",
"subform",
")",
":",
"old_subinstance",
"=",
"getattr",
"(",
"self",
".",
"instance",
",",
"name",
")",
"new_subinstance",
"=",
"subform",
".",
"update_instance",
"(",
")",
"# if our instance previously had no node for the subform AND the",
"# updated one has data, then attach the new node.",
"if",
"old_subinstance",
"is",
"None",
"and",
"not",
"new_subinstance",
".",
"is_empty",
"(",
")",
":",
"setattr",
"(",
"self",
".",
"instance",
",",
"name",
",",
"new_subinstance",
")",
"# on the other hand, if the instance previously had a node for the",
"# subform AND the updated one is empty, then remove the node.",
"if",
"old_subinstance",
"is",
"not",
"None",
"and",
"new_subinstance",
".",
"is_empty",
"(",
")",
":",
"delattr",
"(",
"self",
".",
"instance",
",",
"name",
")"
] | Save bound data for a single subform into the XmlObject model
instance. | [
"Save",
"bound",
"data",
"for",
"a",
"single",
"subform",
"into",
"the",
"XmlObject",
"model",
"instance",
"."
] | python | train |
cogniteev/docido-python-sdk | docido_sdk/toolbox/decorators.py | https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/decorators.py#L66-L83 | def reraise(clazz):
""" Decorator catching every exception that might be raised by wrapped
function and raise another exception instead.
Exception initially raised is passed in first argument of the raised
exception.
:param: Exception class: clazz:
Python exception class to raise
"""
def _decorator(f):
@functools.wraps(f)
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
raise clazz(e), None, sys.exc_info()[2]
return _wrap
return _decorator | [
"def",
"reraise",
"(",
"clazz",
")",
":",
"def",
"_decorator",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"_wrap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"clazz",
"(",
"e",
")",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
"return",
"_wrap",
"return",
"_decorator"
] | Decorator catching every exception that might be raised by wrapped
function and raise another exception instead.
Exception initially raised is passed in first argument of the raised
exception.
:param: Exception class: clazz:
Python exception class to raise | [
"Decorator",
"catching",
"every",
"exception",
"that",
"might",
"be",
"raised",
"by",
"wrapped",
"function",
"and",
"raise",
"another",
"exception",
"instead",
".",
"Exception",
"initially",
"raised",
"is",
"passed",
"in",
"first",
"argument",
"of",
"the",
"raised",
"exception",
"."
] | python | train |
project-rig/rig | rig/machine_control/machine_controller.py | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1127-L1170 | def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) | [
"def",
"sdram_alloc_as_filelike",
"(",
"self",
",",
"size",
",",
"tag",
"=",
"0",
",",
"x",
"=",
"Required",
",",
"y",
"=",
"Required",
",",
"app_id",
"=",
"Required",
",",
"clear",
"=",
"False",
")",
":",
"# Perform the malloc",
"start_address",
"=",
"self",
".",
"sdram_alloc",
"(",
"size",
",",
"tag",
",",
"x",
",",
"y",
",",
"app_id",
",",
"clear",
")",
"return",
"MemoryIO",
"(",
"self",
",",
"x",
",",
"y",
",",
"start_address",
",",
"start_address",
"+",
"size",
")"
] | Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid. | [
"Like",
":",
"py",
":",
"meth",
":",
".",
"sdram_alloc",
"but",
"returns",
"a",
":",
"py",
":",
"class",
":",
"file",
"-",
"like",
"object",
"<",
".",
"MemoryIO",
">",
"which",
"allows",
"safe",
"reading",
"and",
"writing",
"to",
"the",
"block",
"that",
"is",
"allocated",
"."
] | python | train |
belbio/bel | bel/lang/belobj.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/belobj.py#L339-L372 | def compute_edges(
self, rules: List[str] = None, ast_result=False, fmt="medium"
) -> List[Mapping[str, Any]]:
"""Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format
"""
if not self.ast:
return self
edges_asts = bel.edge.computed.compute_edges(self.ast, self.spec)
if ast_result:
return edges_asts
edges = []
for ast in edges_asts:
edges.append(
{
"subject": ast.bel_subject.to_string(),
"relation": ast.bel_relation,
"object": ast.bel_object.to_string(),
}
)
return edges | [
"def",
"compute_edges",
"(",
"self",
",",
"rules",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"ast_result",
"=",
"False",
",",
"fmt",
"=",
"\"medium\"",
")",
"->",
"List",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"if",
"not",
"self",
".",
"ast",
":",
"return",
"self",
"edges_asts",
"=",
"bel",
".",
"edge",
".",
"computed",
".",
"compute_edges",
"(",
"self",
".",
"ast",
",",
"self",
".",
"spec",
")",
"if",
"ast_result",
":",
"return",
"edges_asts",
"edges",
"=",
"[",
"]",
"for",
"ast",
"in",
"edges_asts",
":",
"edges",
".",
"append",
"(",
"{",
"\"subject\"",
":",
"ast",
".",
"bel_subject",
".",
"to_string",
"(",
")",
",",
"\"relation\"",
":",
"ast",
".",
"bel_relation",
",",
"\"object\"",
":",
"ast",
".",
"bel_object",
".",
"to_string",
"(",
")",
",",
"}",
")",
"return",
"edges"
] | Computed edges from primary BEL statement
Takes an AST and generates all computed edges based on BEL Specification YAML computed signatures.
Will run only the list of computed edge rules if given.
Args:
rules (list): a list of rules to filter; only the rules in this list will be applied to computed
fmt (str): short, medium or long version of BEL Edge (function and relation names)
Returns:
List[Mapping[str, Any]]: BEL Edges in medium format | [
"Computed",
"edges",
"from",
"primary",
"BEL",
"statement"
] | python | train |
johnhw/pyspacenavigator | spacenavigator.py | https://github.com/johnhw/pyspacenavigator/blob/518bd89f94b83156f12dae261f95c517fb78ebe4/spacenavigator.py#L257-L270 | def list_devices():
"""Return a list of the supported devices connected
Returns:
A list of string names of the devices supported which were found. Empty if no supported devices found
"""
devices = []
all_hids = hid.find_all_hid_devices()
if all_hids:
for index, device in enumerate(all_hids):
for device_name,spec in device_specs.items():
if device.vendor_id == spec.hid_id[0] and device.product_id == spec.hid_id[1]:
devices.append(device_name)
return devices | [
"def",
"list_devices",
"(",
")",
":",
"devices",
"=",
"[",
"]",
"all_hids",
"=",
"hid",
".",
"find_all_hid_devices",
"(",
")",
"if",
"all_hids",
":",
"for",
"index",
",",
"device",
"in",
"enumerate",
"(",
"all_hids",
")",
":",
"for",
"device_name",
",",
"spec",
"in",
"device_specs",
".",
"items",
"(",
")",
":",
"if",
"device",
".",
"vendor_id",
"==",
"spec",
".",
"hid_id",
"[",
"0",
"]",
"and",
"device",
".",
"product_id",
"==",
"spec",
".",
"hid_id",
"[",
"1",
"]",
":",
"devices",
".",
"append",
"(",
"device_name",
")",
"return",
"devices"
] | Return a list of the supported devices connected
Returns:
A list of string names of the devices supported which were found. Empty if no supported devices found | [
"Return",
"a",
"list",
"of",
"the",
"supported",
"devices",
"connected",
"Returns",
":",
"A",
"list",
"of",
"string",
"names",
"of",
"the",
"devices",
"supported",
"which",
"were",
"found",
".",
"Empty",
"if",
"no",
"supported",
"devices",
"found"
] | python | train |
markovmodel/PyEMMA | pyemma/util/discrete_trajectories.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/util/discrete_trajectories.py#L144-L177 | def count_states(dtrajs, ignore_negative=False):
r"""returns a histogram count
Parameters
----------
dtrajs : array_like or list of array_like
Discretized trajectory or list of discretized trajectories
ignore_negative, bool, default=False
Ignore negative elements. By default, a negative element will cause an
exception
Returns
-------
count : ndarray((n), dtype=int)
the number of occurrences of each state. n=max+1 where max is the largest state index found.
"""
# format input
dtrajs = _ensure_dtraj_list(dtrajs)
# make bincounts for each input trajectory
nmax = 0
bcs = []
for dtraj in dtrajs:
if ignore_negative:
dtraj = dtraj[np.where(dtraj >= 0)]
bc = np.bincount(dtraj)
nmax = max(nmax, bc.shape[0])
bcs.append(bc)
# construct total bincount
res = np.zeros(nmax, dtype=int)
# add up individual bincounts
for i, bc in enumerate(bcs):
res[:bc.shape[0]] += bc
return res | [
"def",
"count_states",
"(",
"dtrajs",
",",
"ignore_negative",
"=",
"False",
")",
":",
"# format input",
"dtrajs",
"=",
"_ensure_dtraj_list",
"(",
"dtrajs",
")",
"# make bincounts for each input trajectory",
"nmax",
"=",
"0",
"bcs",
"=",
"[",
"]",
"for",
"dtraj",
"in",
"dtrajs",
":",
"if",
"ignore_negative",
":",
"dtraj",
"=",
"dtraj",
"[",
"np",
".",
"where",
"(",
"dtraj",
">=",
"0",
")",
"]",
"bc",
"=",
"np",
".",
"bincount",
"(",
"dtraj",
")",
"nmax",
"=",
"max",
"(",
"nmax",
",",
"bc",
".",
"shape",
"[",
"0",
"]",
")",
"bcs",
".",
"append",
"(",
"bc",
")",
"# construct total bincount",
"res",
"=",
"np",
".",
"zeros",
"(",
"nmax",
",",
"dtype",
"=",
"int",
")",
"# add up individual bincounts",
"for",
"i",
",",
"bc",
"in",
"enumerate",
"(",
"bcs",
")",
":",
"res",
"[",
":",
"bc",
".",
"shape",
"[",
"0",
"]",
"]",
"+=",
"bc",
"return",
"res"
] | r"""returns a histogram count
Parameters
----------
dtrajs : array_like or list of array_like
Discretized trajectory or list of discretized trajectories
ignore_negative, bool, default=False
Ignore negative elements. By default, a negative element will cause an
exception
Returns
-------
count : ndarray((n), dtype=int)
the number of occurrences of each state. n=max+1 where max is the largest state index found. | [
"r",
"returns",
"a",
"histogram",
"count"
] | python | train |
valentinalexeev/pwaqi | pwaqi/__init__.py | https://github.com/valentinalexeev/pwaqi/blob/81a1fa1ad87be7ba015c1cb07c52c7760ca99d8c/pwaqi/__init__.py#L31-L41 | def get_location_observation(lat, lng, token):
"""Lookup observations by geo coordinates."""
req = requests.get(
API_ENDPOINT_GEO % (lat, lng),
params={
'token': token
})
if req.status_code == 200 and req.json()["status"] == "ok":
return parse_observation_response(req.json()["data"])
return {} | [
"def",
"get_location_observation",
"(",
"lat",
",",
"lng",
",",
"token",
")",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"API_ENDPOINT_GEO",
"%",
"(",
"lat",
",",
"lng",
")",
",",
"params",
"=",
"{",
"'token'",
":",
"token",
"}",
")",
"if",
"req",
".",
"status_code",
"==",
"200",
"and",
"req",
".",
"json",
"(",
")",
"[",
"\"status\"",
"]",
"==",
"\"ok\"",
":",
"return",
"parse_observation_response",
"(",
"req",
".",
"json",
"(",
")",
"[",
"\"data\"",
"]",
")",
"return",
"{",
"}"
] | Lookup observations by geo coordinates. | [
"Lookup",
"observations",
"by",
"geo",
"coordinates",
"."
] | python | test |
ladybug-tools/ladybug | ladybug/sunpath.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/sunpath.py#L127-L131 | def is_daylight_saving_hour(self, datetime):
"""Check if a datetime is a daylight saving time."""
if not self.daylight_saving_period:
return False
return self.daylight_saving_period.isTimeIncluded(datetime.hoy) | [
"def",
"is_daylight_saving_hour",
"(",
"self",
",",
"datetime",
")",
":",
"if",
"not",
"self",
".",
"daylight_saving_period",
":",
"return",
"False",
"return",
"self",
".",
"daylight_saving_period",
".",
"isTimeIncluded",
"(",
"datetime",
".",
"hoy",
")"
] | Check if a datetime is a daylight saving time. | [
"Check",
"if",
"a",
"datetime",
"is",
"a",
"daylight",
"saving",
"time",
"."
] | python | train |
chrisrink10/basilisp | src/basilisp/lang/compiler/parser.py | https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/parser.py#L1788-L1876 | def __resolve_namespaced_symbol( # pylint: disable=too-many-branches
ctx: ParserContext, form: sym.Symbol
) -> Union[MaybeClass, MaybeHostForm, VarRef]:
"""Resolve a namespaced symbol into a Python name or Basilisp Var."""
assert form.ns is not None
if form.ns == ctx.current_ns.name:
v = ctx.current_ns.find(sym.symbol(form.name))
if v is not None:
return VarRef(form=form, var=v, env=ctx.get_node_env())
elif form.ns == _BUILTINS_NS:
class_ = munge(form.name, allow_builtins=True)
target = getattr(builtins, class_, None)
if target is None:
raise ParserException(
f"cannot resolve builtin function '{class_}'", form=form
)
return MaybeClass(
form=form, class_=class_, target=target, env=ctx.get_node_env()
)
if "." in form.name:
raise ParserException(
"symbol names may not contain the '.' operator", form=form
)
ns_sym = sym.symbol(form.ns)
if ns_sym in ctx.current_ns.imports or ns_sym in ctx.current_ns.import_aliases:
# We still import Basilisp code, so we'll want to make sure
# that the symbol isn't referring to a Basilisp Var first
v = Var.find(form)
if v is not None:
return VarRef(form=form, var=v, env=ctx.get_node_env())
# Fetch the full namespace name for the aliased namespace/module.
# We don't need this for actually generating the link later, but
# we _do_ need it for fetching a reference to the module to check
# for membership.
if ns_sym in ctx.current_ns.import_aliases:
ns = ctx.current_ns.import_aliases[ns_sym]
assert ns is not None
ns_name = ns.name
else:
ns_name = ns_sym.name
safe_module_name = munge(ns_name)
assert (
safe_module_name in sys.modules
), f"Module '{safe_module_name}' is not imported"
ns_module = sys.modules[safe_module_name]
safe_name = munge(form.name)
# Try without allowing builtins first
if safe_name in vars(ns_module):
return MaybeHostForm(
form=form,
class_=munge(ns_sym.name),
field=safe_name,
target=vars(ns_module)[safe_name],
env=ctx.get_node_env(),
)
# Then allow builtins
safe_name = munge(form.name, allow_builtins=True)
if safe_name not in vars(ns_module):
raise ParserException("can't identify aliased form", form=form)
# Aliased imports generate code which uses the import alias, so we
# don't need to care if this is an import or an alias.
return MaybeHostForm(
form=form,
class_=munge(ns_sym.name),
field=safe_name,
target=vars(ns_module)[safe_name],
env=ctx.get_node_env(),
)
elif ns_sym in ctx.current_ns.aliases:
aliased_ns: runtime.Namespace = ctx.current_ns.aliases[ns_sym]
v = Var.find(sym.symbol(form.name, ns=aliased_ns.name))
if v is None:
raise ParserException(
f"unable to resolve symbol '{sym.symbol(form.name, ns_sym.name)}' in this context",
form=form,
)
return VarRef(form=form, var=v, env=ctx.get_node_env())
else:
raise ParserException(
f"unable to resolve symbol '{form}' in this context", form=form
) | [
"def",
"__resolve_namespaced_symbol",
"(",
"# pylint: disable=too-many-branches",
"ctx",
":",
"ParserContext",
",",
"form",
":",
"sym",
".",
"Symbol",
")",
"->",
"Union",
"[",
"MaybeClass",
",",
"MaybeHostForm",
",",
"VarRef",
"]",
":",
"assert",
"form",
".",
"ns",
"is",
"not",
"None",
"if",
"form",
".",
"ns",
"==",
"ctx",
".",
"current_ns",
".",
"name",
":",
"v",
"=",
"ctx",
".",
"current_ns",
".",
"find",
"(",
"sym",
".",
"symbol",
"(",
"form",
".",
"name",
")",
")",
"if",
"v",
"is",
"not",
"None",
":",
"return",
"VarRef",
"(",
"form",
"=",
"form",
",",
"var",
"=",
"v",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
")",
"elif",
"form",
".",
"ns",
"==",
"_BUILTINS_NS",
":",
"class_",
"=",
"munge",
"(",
"form",
".",
"name",
",",
"allow_builtins",
"=",
"True",
")",
"target",
"=",
"getattr",
"(",
"builtins",
",",
"class_",
",",
"None",
")",
"if",
"target",
"is",
"None",
":",
"raise",
"ParserException",
"(",
"f\"cannot resolve builtin function '{class_}'\"",
",",
"form",
"=",
"form",
")",
"return",
"MaybeClass",
"(",
"form",
"=",
"form",
",",
"class_",
"=",
"class_",
",",
"target",
"=",
"target",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
")",
"if",
"\".\"",
"in",
"form",
".",
"name",
":",
"raise",
"ParserException",
"(",
"\"symbol names may not contain the '.' operator\"",
",",
"form",
"=",
"form",
")",
"ns_sym",
"=",
"sym",
".",
"symbol",
"(",
"form",
".",
"ns",
")",
"if",
"ns_sym",
"in",
"ctx",
".",
"current_ns",
".",
"imports",
"or",
"ns_sym",
"in",
"ctx",
".",
"current_ns",
".",
"import_aliases",
":",
"# We still import Basilisp code, so we'll want to make sure",
"# that the symbol isn't referring to a Basilisp Var first",
"v",
"=",
"Var",
".",
"find",
"(",
"form",
")",
"if",
"v",
"is",
"not",
"None",
":",
"return",
"VarRef",
"(",
"form",
"=",
"form",
",",
"var",
"=",
"v",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
")",
"# Fetch the full namespace name for the aliased namespace/module.",
"# We don't need this for actually generating the link later, but",
"# we _do_ need it for fetching a reference to the module to check",
"# for membership.",
"if",
"ns_sym",
"in",
"ctx",
".",
"current_ns",
".",
"import_aliases",
":",
"ns",
"=",
"ctx",
".",
"current_ns",
".",
"import_aliases",
"[",
"ns_sym",
"]",
"assert",
"ns",
"is",
"not",
"None",
"ns_name",
"=",
"ns",
".",
"name",
"else",
":",
"ns_name",
"=",
"ns_sym",
".",
"name",
"safe_module_name",
"=",
"munge",
"(",
"ns_name",
")",
"assert",
"(",
"safe_module_name",
"in",
"sys",
".",
"modules",
")",
",",
"f\"Module '{safe_module_name}' is not imported\"",
"ns_module",
"=",
"sys",
".",
"modules",
"[",
"safe_module_name",
"]",
"safe_name",
"=",
"munge",
"(",
"form",
".",
"name",
")",
"# Try without allowing builtins first",
"if",
"safe_name",
"in",
"vars",
"(",
"ns_module",
")",
":",
"return",
"MaybeHostForm",
"(",
"form",
"=",
"form",
",",
"class_",
"=",
"munge",
"(",
"ns_sym",
".",
"name",
")",
",",
"field",
"=",
"safe_name",
",",
"target",
"=",
"vars",
"(",
"ns_module",
")",
"[",
"safe_name",
"]",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
",",
")",
"# Then allow builtins",
"safe_name",
"=",
"munge",
"(",
"form",
".",
"name",
",",
"allow_builtins",
"=",
"True",
")",
"if",
"safe_name",
"not",
"in",
"vars",
"(",
"ns_module",
")",
":",
"raise",
"ParserException",
"(",
"\"can't identify aliased form\"",
",",
"form",
"=",
"form",
")",
"# Aliased imports generate code which uses the import alias, so we",
"# don't need to care if this is an import or an alias.",
"return",
"MaybeHostForm",
"(",
"form",
"=",
"form",
",",
"class_",
"=",
"munge",
"(",
"ns_sym",
".",
"name",
")",
",",
"field",
"=",
"safe_name",
",",
"target",
"=",
"vars",
"(",
"ns_module",
")",
"[",
"safe_name",
"]",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
",",
")",
"elif",
"ns_sym",
"in",
"ctx",
".",
"current_ns",
".",
"aliases",
":",
"aliased_ns",
":",
"runtime",
".",
"Namespace",
"=",
"ctx",
".",
"current_ns",
".",
"aliases",
"[",
"ns_sym",
"]",
"v",
"=",
"Var",
".",
"find",
"(",
"sym",
".",
"symbol",
"(",
"form",
".",
"name",
",",
"ns",
"=",
"aliased_ns",
".",
"name",
")",
")",
"if",
"v",
"is",
"None",
":",
"raise",
"ParserException",
"(",
"f\"unable to resolve symbol '{sym.symbol(form.name, ns_sym.name)}' in this context\"",
",",
"form",
"=",
"form",
",",
")",
"return",
"VarRef",
"(",
"form",
"=",
"form",
",",
"var",
"=",
"v",
",",
"env",
"=",
"ctx",
".",
"get_node_env",
"(",
")",
")",
"else",
":",
"raise",
"ParserException",
"(",
"f\"unable to resolve symbol '{form}' in this context\"",
",",
"form",
"=",
"form",
")"
] | Resolve a namespaced symbol into a Python name or Basilisp Var. | [
"Resolve",
"a",
"namespaced",
"symbol",
"into",
"a",
"Python",
"name",
"or",
"Basilisp",
"Var",
"."
] | python | test |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L1837-L1855 | def delete_dns_server(self, service_name, deployment_name, dns_server_name):
'''
Deletes a DNS server from a deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Name of the DNS server that you want to delete.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
return self._perform_delete(
self._get_dns_server_path(service_name,
deployment_name,
dns_server_name),
as_async=True) | [
"def",
"delete_dns_server",
"(",
"self",
",",
"service_name",
",",
"deployment_name",
",",
"dns_server_name",
")",
":",
"_validate_not_none",
"(",
"'service_name'",
",",
"service_name",
")",
"_validate_not_none",
"(",
"'deployment_name'",
",",
"deployment_name",
")",
"_validate_not_none",
"(",
"'dns_server_name'",
",",
"dns_server_name",
")",
"return",
"self",
".",
"_perform_delete",
"(",
"self",
".",
"_get_dns_server_path",
"(",
"service_name",
",",
"deployment_name",
",",
"dns_server_name",
")",
",",
"as_async",
"=",
"True",
")"
] | Deletes a DNS server from a deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Name of the DNS server that you want to delete. | [
"Deletes",
"a",
"DNS",
"server",
"from",
"a",
"deployment",
"."
] | python | test |
woolfson-group/isambard | isambard/external_programs/dssp.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/external_programs/dssp.py#L180-L220 | def extract_helices_dssp(in_pdb):
"""Uses DSSP to find alpha-helices and extracts helices from a pdb file.
Returns a length 3 list with a helix id, the chain id and a dict
containing the coordinates of each residues CA.
Parameters
----------
in_pdb : string
Path to a PDB file.
"""
from ampal.pdb_parser import split_pdb_lines
dssp_out = subprocess.check_output(
[global_settings['dssp']['path'], in_pdb])
helix = 0
helices = []
h_on = False
for line in dssp_out.splitlines():
dssp_line = line.split()
try:
if dssp_line[4] == 'H':
if helix not in [x[0] for x in helices]:
helices.append(
[helix, dssp_line[2], {int(dssp_line[1]): None}])
else:
helices[helix][2][int(dssp_line[1])] = None
h_on = True
else:
if h_on:
helix += 1
h_on = False
except IndexError:
pass
with open(in_pdb, 'r') as pdb:
pdb_atoms = split_pdb_lines(pdb.read())
for atom in pdb_atoms:
for helix in helices:
if (atom[2] == "CA") and (atom[5] == helix[1]) and (atom[6] in helix[2].keys()):
helix[2][atom[6]] = tuple(atom[8:11])
return helices | [
"def",
"extract_helices_dssp",
"(",
"in_pdb",
")",
":",
"from",
"ampal",
".",
"pdb_parser",
"import",
"split_pdb_lines",
"dssp_out",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"global_settings",
"[",
"'dssp'",
"]",
"[",
"'path'",
"]",
",",
"in_pdb",
"]",
")",
"helix",
"=",
"0",
"helices",
"=",
"[",
"]",
"h_on",
"=",
"False",
"for",
"line",
"in",
"dssp_out",
".",
"splitlines",
"(",
")",
":",
"dssp_line",
"=",
"line",
".",
"split",
"(",
")",
"try",
":",
"if",
"dssp_line",
"[",
"4",
"]",
"==",
"'H'",
":",
"if",
"helix",
"not",
"in",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"helices",
"]",
":",
"helices",
".",
"append",
"(",
"[",
"helix",
",",
"dssp_line",
"[",
"2",
"]",
",",
"{",
"int",
"(",
"dssp_line",
"[",
"1",
"]",
")",
":",
"None",
"}",
"]",
")",
"else",
":",
"helices",
"[",
"helix",
"]",
"[",
"2",
"]",
"[",
"int",
"(",
"dssp_line",
"[",
"1",
"]",
")",
"]",
"=",
"None",
"h_on",
"=",
"True",
"else",
":",
"if",
"h_on",
":",
"helix",
"+=",
"1",
"h_on",
"=",
"False",
"except",
"IndexError",
":",
"pass",
"with",
"open",
"(",
"in_pdb",
",",
"'r'",
")",
"as",
"pdb",
":",
"pdb_atoms",
"=",
"split_pdb_lines",
"(",
"pdb",
".",
"read",
"(",
")",
")",
"for",
"atom",
"in",
"pdb_atoms",
":",
"for",
"helix",
"in",
"helices",
":",
"if",
"(",
"atom",
"[",
"2",
"]",
"==",
"\"CA\"",
")",
"and",
"(",
"atom",
"[",
"5",
"]",
"==",
"helix",
"[",
"1",
"]",
")",
"and",
"(",
"atom",
"[",
"6",
"]",
"in",
"helix",
"[",
"2",
"]",
".",
"keys",
"(",
")",
")",
":",
"helix",
"[",
"2",
"]",
"[",
"atom",
"[",
"6",
"]",
"]",
"=",
"tuple",
"(",
"atom",
"[",
"8",
":",
"11",
"]",
")",
"return",
"helices"
] | Uses DSSP to find alpha-helices and extracts helices from a pdb file.
Returns a length 3 list with a helix id, the chain id and a dict
containing the coordinates of each residues CA.
Parameters
----------
in_pdb : string
Path to a PDB file. | [
"Uses",
"DSSP",
"to",
"find",
"alpha",
"-",
"helices",
"and",
"extracts",
"helices",
"from",
"a",
"pdb",
"file",
"."
] | python | train |
aouyar/PyMunin | pysysinfo/system.py | https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/system.py#L98-L118 | def getProcessStats(self):
"""Return stats for running and blocked processes, forks,
context switches and interrupts.
@return: Dictionary of stats.
"""
info_dict = {}
try:
fp = open(cpustatFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading stats from file: %s' % cpustatFile)
for line in data.splitlines():
arr = line.split()
if len(arr) > 1 and arr[0] in ('ctxt', 'intr', 'softirq',
'processes', 'procs_running',
'procs_blocked'):
info_dict[arr[0]] = arr[1]
return info_dict | [
"def",
"getProcessStats",
"(",
"self",
")",
":",
"info_dict",
"=",
"{",
"}",
"try",
":",
"fp",
"=",
"open",
"(",
"cpustatFile",
",",
"'r'",
")",
"data",
"=",
"fp",
".",
"read",
"(",
")",
"fp",
".",
"close",
"(",
")",
"except",
":",
"raise",
"IOError",
"(",
"'Failed reading stats from file: %s'",
"%",
"cpustatFile",
")",
"for",
"line",
"in",
"data",
".",
"splitlines",
"(",
")",
":",
"arr",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"arr",
")",
">",
"1",
"and",
"arr",
"[",
"0",
"]",
"in",
"(",
"'ctxt'",
",",
"'intr'",
",",
"'softirq'",
",",
"'processes'",
",",
"'procs_running'",
",",
"'procs_blocked'",
")",
":",
"info_dict",
"[",
"arr",
"[",
"0",
"]",
"]",
"=",
"arr",
"[",
"1",
"]",
"return",
"info_dict"
] | Return stats for running and blocked processes, forks,
context switches and interrupts.
@return: Dictionary of stats. | [
"Return",
"stats",
"for",
"running",
"and",
"blocked",
"processes",
"forks",
"context",
"switches",
"and",
"interrupts",
"."
] | python | train |
siznax/wptools | wptools/utils.py | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/utils.py#L110-L115 | def stderr(msg, silent=False):
"""
write msg to stderr if not silent
"""
if not silent:
print(msg, file=sys.stderr) | [
"def",
"stderr",
"(",
"msg",
",",
"silent",
"=",
"False",
")",
":",
"if",
"not",
"silent",
":",
"print",
"(",
"msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] | write msg to stderr if not silent | [
"write",
"msg",
"to",
"stderr",
"if",
"not",
"silent"
] | python | train |
freakboy3742/pyxero | xero/auth.py | https://github.com/freakboy3742/pyxero/blob/5566f17fa06ed1f2fb9426c112951a72276b0f9a/xero/auth.py#L165-L177 | def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize a verified set of OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
) | [
"def",
"_init_oauth",
"(",
"self",
",",
"oauth_token",
",",
"oauth_token_secret",
")",
":",
"self",
".",
"oauth_token",
"=",
"oauth_token",
"self",
".",
"oauth_token_secret",
"=",
"oauth_token_secret",
"self",
".",
"_oauth",
"=",
"OAuth1",
"(",
"self",
".",
"consumer_key",
",",
"client_secret",
"=",
"self",
".",
"consumer_secret",
",",
"resource_owner_key",
"=",
"self",
".",
"oauth_token",
",",
"resource_owner_secret",
"=",
"self",
".",
"oauth_token_secret",
",",
"rsa_key",
"=",
"self",
".",
"rsa_key",
",",
"signature_method",
"=",
"self",
".",
"_signature_method",
")"
] | Store and initialize a verified set of OAuth credentials | [
"Store",
"and",
"initialize",
"a",
"verified",
"set",
"of",
"OAuth",
"credentials"
] | python | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/regex.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L11-L27 | def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
"""
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result | [
"def",
"transform",
"(",
"list",
",",
"pattern",
",",
"indices",
"=",
"[",
"1",
"]",
")",
":",
"result",
"=",
"[",
"]",
"for",
"e",
"in",
"list",
":",
"m",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"e",
")",
"if",
"m",
":",
"for",
"i",
"in",
"indices",
":",
"result",
".",
"append",
"(",
"m",
".",
"group",
"(",
"i",
")",
")",
"return",
"result"
] | Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches. | [
"Matches",
"all",
"elements",
"of",
"list",
"agains",
"the",
"pattern",
"and",
"returns",
"a",
"list",
"of",
"the",
"elements",
"indicated",
"by",
"indices",
"of",
"all",
"successfull",
"matches",
".",
"If",
"indices",
"is",
"omitted",
"returns",
"a",
"list",
"of",
"first",
"paranthethised",
"groups",
"of",
"all",
"successfull",
"matches",
"."
] | python | train |
pytroll/satpy | satpy/readers/grib.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/grib.py#L207-L217 | def get_area_def(self, dsid):
"""Get area definition for message.
If latlong grid then convert to valid eqc grid.
"""
msg = self._get_message(self._msg_datasets[dsid])
try:
return self._area_def_from_msg(msg)
except (RuntimeError, KeyError):
raise RuntimeError("Unknown GRIB projection information") | [
"def",
"get_area_def",
"(",
"self",
",",
"dsid",
")",
":",
"msg",
"=",
"self",
".",
"_get_message",
"(",
"self",
".",
"_msg_datasets",
"[",
"dsid",
"]",
")",
"try",
":",
"return",
"self",
".",
"_area_def_from_msg",
"(",
"msg",
")",
"except",
"(",
"RuntimeError",
",",
"KeyError",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown GRIB projection information\"",
")"
] | Get area definition for message.
If latlong grid then convert to valid eqc grid. | [
"Get",
"area",
"definition",
"for",
"message",
"."
] | python | train |
moonso/interval_tree | interval_tree/interval_tree.py | https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L64-L97 | def recursive_build_tree(self, intervals):
"""
recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values
"""
center = int(round(len(intervals) / 2))
left = intervals[:center]
right = intervals[center + 1:]
node = intervals[center]
if len(left) > 1:
left = self.recursive_build_tree(left)
elif len(left) == 1:
left = [left[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
left = [-1,-1,-1,[]]
if len(right) > 1:
right = self.recursive_build_tree(right)
elif len(right) == 1:
right = [right[0],[-1,-1,-1,[]],[-1,-1,-1,[]],[]]
else:
right = [-1,-1,-1,[]]
return [node, left, right, []] | [
"def",
"recursive_build_tree",
"(",
"self",
",",
"intervals",
")",
":",
"center",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"intervals",
")",
"/",
"2",
")",
")",
"left",
"=",
"intervals",
"[",
":",
"center",
"]",
"right",
"=",
"intervals",
"[",
"center",
"+",
"1",
":",
"]",
"node",
"=",
"intervals",
"[",
"center",
"]",
"if",
"len",
"(",
"left",
")",
">",
"1",
":",
"left",
"=",
"self",
".",
"recursive_build_tree",
"(",
"left",
")",
"elif",
"len",
"(",
"left",
")",
"==",
"1",
":",
"left",
"=",
"[",
"left",
"[",
"0",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"]",
"]",
"else",
":",
"left",
"=",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
"if",
"len",
"(",
"right",
")",
">",
"1",
":",
"right",
"=",
"self",
".",
"recursive_build_tree",
"(",
"right",
")",
"elif",
"len",
"(",
"right",
")",
"==",
"1",
":",
"right",
"=",
"[",
"right",
"[",
"0",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
",",
"[",
"]",
"]",
"else",
":",
"right",
"=",
"[",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
",",
"[",
"]",
"]",
"return",
"[",
"node",
",",
"left",
",",
"right",
",",
"[",
"]",
"]"
] | recursively builds a BST based on the elementary intervals.
each node is an array: [interval value, left descendent nodes, right descendent nodes, [ids]].
nodes with no descendents have a -1 value in left/right descendent positions.
for example, a node with two empty descendents:
[500, interval value
[-1,-1,-1,['id5','id6']], left descendent
[-1,-1,-1,['id4']], right descendent
['id1',id2',id3']] data values | [
"recursively",
"builds",
"a",
"BST",
"based",
"on",
"the",
"elementary",
"intervals",
".",
"each",
"node",
"is",
"an",
"array",
":",
"[",
"interval",
"value",
"left",
"descendent",
"nodes",
"right",
"descendent",
"nodes",
"[",
"ids",
"]]",
".",
"nodes",
"with",
"no",
"descendents",
"have",
"a",
"-",
"1",
"value",
"in",
"left",
"/",
"right",
"descendent",
"positions",
"."
] | python | train |
postlund/pyatv | pyatv/conf.py | https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/conf.py#L25-L35 | def add_service(self, service):
"""Add a new service.
If the service already exists, it will be replaced.
"""
if service.protocol in self._services:
existing = self._services[service.protocol]
if not existing.superseeded_by(service):
return
self._services[service.protocol] = service | [
"def",
"add_service",
"(",
"self",
",",
"service",
")",
":",
"if",
"service",
".",
"protocol",
"in",
"self",
".",
"_services",
":",
"existing",
"=",
"self",
".",
"_services",
"[",
"service",
".",
"protocol",
"]",
"if",
"not",
"existing",
".",
"superseeded_by",
"(",
"service",
")",
":",
"return",
"self",
".",
"_services",
"[",
"service",
".",
"protocol",
"]",
"=",
"service"
] | Add a new service.
If the service already exists, it will be replaced. | [
"Add",
"a",
"new",
"service",
"."
] | python | train |
mlperf/training | reinforcement/tensorflow/minigo/oneoffs/validate_misc.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/oneoffs/validate_misc.py#L22-L43 | def validate_examples(example_file):
"""Validate that examples are well formed.
Pi should sum to 1.0
value should be {-1,1}
Usage:
validate_examples("../data/300.tfrecord.zz")
"""
def test_example(raw):
example = tf.train.Example()
example.ParseFromString(raw)
pi = np.frombuffer(example.features.feature['pi'].bytes_list.value[0], np.float32)
value = example.features.feature['outcome'].float_list.value[0]
assert abs(pi.sum() - 1) < 1e-4, pi.sum()
assert value in (-1, 1), value
opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
for record in tqdm(tf.python_io.tf_record_iterator(example_file, opts)):
test_example(record) | [
"def",
"validate_examples",
"(",
"example_file",
")",
":",
"def",
"test_example",
"(",
"raw",
")",
":",
"example",
"=",
"tf",
".",
"train",
".",
"Example",
"(",
")",
"example",
".",
"ParseFromString",
"(",
"raw",
")",
"pi",
"=",
"np",
".",
"frombuffer",
"(",
"example",
".",
"features",
".",
"feature",
"[",
"'pi'",
"]",
".",
"bytes_list",
".",
"value",
"[",
"0",
"]",
",",
"np",
".",
"float32",
")",
"value",
"=",
"example",
".",
"features",
".",
"feature",
"[",
"'outcome'",
"]",
".",
"float_list",
".",
"value",
"[",
"0",
"]",
"assert",
"abs",
"(",
"pi",
".",
"sum",
"(",
")",
"-",
"1",
")",
"<",
"1e-4",
",",
"pi",
".",
"sum",
"(",
")",
"assert",
"value",
"in",
"(",
"-",
"1",
",",
"1",
")",
",",
"value",
"opts",
"=",
"tf",
".",
"python_io",
".",
"TFRecordOptions",
"(",
"tf",
".",
"python_io",
".",
"TFRecordCompressionType",
".",
"ZLIB",
")",
"for",
"record",
"in",
"tqdm",
"(",
"tf",
".",
"python_io",
".",
"tf_record_iterator",
"(",
"example_file",
",",
"opts",
")",
")",
":",
"test_example",
"(",
"record",
")"
] | Validate that examples are well formed.
Pi should sum to 1.0
value should be {-1,1}
Usage:
validate_examples("../data/300.tfrecord.zz") | [
"Validate",
"that",
"examples",
"are",
"well",
"formed",
"."
] | python | train |
jab/bidict | bidict/_base.py | https://github.com/jab/bidict/blob/1a1ba9758651aed9c4f58384eff006d2e2ad6835/bidict/_base.py#L241-L293 | def _dedup_item(self, key, val, on_dup):
"""
Check *key* and *val* for any duplication in self.
Handle any duplication as per the duplication policies given in *on_dup*.
(key, val) already present is construed as a no-op, not a duplication.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.RAISE`, raise the appropriate error.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.IGNORE`, return *None*.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.OVERWRITE`,
or if no duplication is found,
return the _DedupResult *(isdupkey, isdupval, oldkey, oldval)*.
"""
fwdm = self._fwdm
invm = self._invm
oldval = fwdm.get(key, _MISS)
oldkey = invm.get(val, _MISS)
isdupkey = oldval is not _MISS
isdupval = oldkey is not _MISS
dedup_result = _DedupResult(isdupkey, isdupval, oldkey, oldval)
if isdupkey and isdupval:
if self._isdupitem(key, val, dedup_result):
# (key, val) duplicates an existing item -> no-op.
return _NOOP
# key and val each duplicate a different existing item.
if on_dup.kv is RAISE:
raise KeyAndValueDuplicationError(key, val)
elif on_dup.kv is IGNORE:
return _NOOP
assert on_dup.kv is OVERWRITE, 'invalid on_dup_kv: %r' % on_dup.kv
# Fall through to the return statement on the last line.
elif isdupkey:
if on_dup.key is RAISE:
raise KeyDuplicationError(key)
elif on_dup.key is IGNORE:
return _NOOP
assert on_dup.key is OVERWRITE, 'invalid on_dup.key: %r' % on_dup.key
# Fall through to the return statement on the last line.
elif isdupval:
if on_dup.val is RAISE:
raise ValueDuplicationError(val)
elif on_dup.val is IGNORE:
return _NOOP
assert on_dup.val is OVERWRITE, 'invalid on_dup.val: %r' % on_dup.val
# Fall through to the return statement on the last line.
# else neither isdupkey nor isdupval.
return dedup_result | [
"def",
"_dedup_item",
"(",
"self",
",",
"key",
",",
"val",
",",
"on_dup",
")",
":",
"fwdm",
"=",
"self",
".",
"_fwdm",
"invm",
"=",
"self",
".",
"_invm",
"oldval",
"=",
"fwdm",
".",
"get",
"(",
"key",
",",
"_MISS",
")",
"oldkey",
"=",
"invm",
".",
"get",
"(",
"val",
",",
"_MISS",
")",
"isdupkey",
"=",
"oldval",
"is",
"not",
"_MISS",
"isdupval",
"=",
"oldkey",
"is",
"not",
"_MISS",
"dedup_result",
"=",
"_DedupResult",
"(",
"isdupkey",
",",
"isdupval",
",",
"oldkey",
",",
"oldval",
")",
"if",
"isdupkey",
"and",
"isdupval",
":",
"if",
"self",
".",
"_isdupitem",
"(",
"key",
",",
"val",
",",
"dedup_result",
")",
":",
"# (key, val) duplicates an existing item -> no-op.",
"return",
"_NOOP",
"# key and val each duplicate a different existing item.",
"if",
"on_dup",
".",
"kv",
"is",
"RAISE",
":",
"raise",
"KeyAndValueDuplicationError",
"(",
"key",
",",
"val",
")",
"elif",
"on_dup",
".",
"kv",
"is",
"IGNORE",
":",
"return",
"_NOOP",
"assert",
"on_dup",
".",
"kv",
"is",
"OVERWRITE",
",",
"'invalid on_dup_kv: %r'",
"%",
"on_dup",
".",
"kv",
"# Fall through to the return statement on the last line.",
"elif",
"isdupkey",
":",
"if",
"on_dup",
".",
"key",
"is",
"RAISE",
":",
"raise",
"KeyDuplicationError",
"(",
"key",
")",
"elif",
"on_dup",
".",
"key",
"is",
"IGNORE",
":",
"return",
"_NOOP",
"assert",
"on_dup",
".",
"key",
"is",
"OVERWRITE",
",",
"'invalid on_dup.key: %r'",
"%",
"on_dup",
".",
"key",
"# Fall through to the return statement on the last line.",
"elif",
"isdupval",
":",
"if",
"on_dup",
".",
"val",
"is",
"RAISE",
":",
"raise",
"ValueDuplicationError",
"(",
"val",
")",
"elif",
"on_dup",
".",
"val",
"is",
"IGNORE",
":",
"return",
"_NOOP",
"assert",
"on_dup",
".",
"val",
"is",
"OVERWRITE",
",",
"'invalid on_dup.val: %r'",
"%",
"on_dup",
".",
"val",
"# Fall through to the return statement on the last line.",
"# else neither isdupkey nor isdupval.",
"return",
"dedup_result"
] | Check *key* and *val* for any duplication in self.
Handle any duplication as per the duplication policies given in *on_dup*.
(key, val) already present is construed as a no-op, not a duplication.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.RAISE`, raise the appropriate error.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.IGNORE`, return *None*.
If duplication is found and the corresponding duplication policy is
:attr:`~bidict.OVERWRITE`,
or if no duplication is found,
return the _DedupResult *(isdupkey, isdupval, oldkey, oldval)*. | [
"Check",
"*",
"key",
"*",
"and",
"*",
"val",
"*",
"for",
"any",
"duplication",
"in",
"self",
"."
] | python | test |
ralphje/imagemounter | imagemounter/parser.py | https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/parser.py#L109-L119 | def mount_disks(self):
"""Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to
use :func:`init` instead.
:return: whether all mounts have succeeded
:rtype: bool"""
result = True
for disk in self.disks:
result = disk.mount() and result
return result | [
"def",
"mount_disks",
"(",
"self",
")",
":",
"result",
"=",
"True",
"for",
"disk",
"in",
"self",
".",
"disks",
":",
"result",
"=",
"disk",
".",
"mount",
"(",
")",
"and",
"result",
"return",
"result"
] | Mounts all disks in the parser, i.e. calling :func:`Disk.mount` on all underlying disks. You probably want to
use :func:`init` instead.
:return: whether all mounts have succeeded
:rtype: bool | [
"Mounts",
"all",
"disks",
"in",
"the",
"parser",
"i",
".",
"e",
".",
"calling",
":",
"func",
":",
"Disk",
".",
"mount",
"on",
"all",
"underlying",
"disks",
".",
"You",
"probably",
"want",
"to",
"use",
":",
"func",
":",
"init",
"instead",
"."
] | python | train |
LogicalDash/LiSE | allegedb/allegedb/cache.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/cache.py#L765-L780 | def retrieve(self, *args):
"""Get a value previously .store(...)'d.
Needs at least five arguments. The -1th is the tick
within the turn you want,
the -2th is that turn, the -3th is the branch,
and the -4th is the key. All other arguments identify
the entity that the key is in.
"""
ret = self._base_retrieve(args)
if ret is None:
raise HistoryError("Set, then deleted", deleted=True)
elif ret is KeyError:
raise ret
return ret | [
"def",
"retrieve",
"(",
"self",
",",
"*",
"args",
")",
":",
"ret",
"=",
"self",
".",
"_base_retrieve",
"(",
"args",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"HistoryError",
"(",
"\"Set, then deleted\"",
",",
"deleted",
"=",
"True",
")",
"elif",
"ret",
"is",
"KeyError",
":",
"raise",
"ret",
"return",
"ret"
] | Get a value previously .store(...)'d.
Needs at least five arguments. The -1th is the tick
within the turn you want,
the -2th is that turn, the -3th is the branch,
and the -4th is the key. All other arguments identify
the entity that the key is in. | [
"Get",
"a",
"value",
"previously",
".",
"store",
"(",
"...",
")",
"d",
"."
] | python | train |
boakley/robotframework-hub | rfhub/kwdb.py | https://github.com/boakley/robotframework-hub/blob/f3dc7562fe6218a7b8d7aac7b9ef234e1a573f7c/rfhub/kwdb.py#L86-L111 | def on_change(self, path, event_type):
"""Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file
"""
# I can do all this work in a sql statement, but
# for debugging it's easier to do it in stages.
sql = """SELECT collection_id
FROM collection_table
WHERE path == ?
"""
cursor = self._execute(sql, (path,))
results = cursor.fetchall()
# there should always be exactly one result, but
# there's no harm in using a loop to process the
# single result
for result in results:
collection_id = result[0]
# remove all keywords in this collection
sql = """DELETE from keyword_table
WHERE collection_id == ?
"""
cursor = self._execute(sql, (collection_id,))
self._load_keywords(collection_id, path=path) | [
"def",
"on_change",
"(",
"self",
",",
"path",
",",
"event_type",
")",
":",
"# I can do all this work in a sql statement, but",
"# for debugging it's easier to do it in stages.",
"sql",
"=",
"\"\"\"SELECT collection_id\n FROM collection_table\n WHERE path == ?\n \"\"\"",
"cursor",
"=",
"self",
".",
"_execute",
"(",
"sql",
",",
"(",
"path",
",",
")",
")",
"results",
"=",
"cursor",
".",
"fetchall",
"(",
")",
"# there should always be exactly one result, but",
"# there's no harm in using a loop to process the",
"# single result",
"for",
"result",
"in",
"results",
":",
"collection_id",
"=",
"result",
"[",
"0",
"]",
"# remove all keywords in this collection",
"sql",
"=",
"\"\"\"DELETE from keyword_table\n WHERE collection_id == ?\n \"\"\"",
"cursor",
"=",
"self",
".",
"_execute",
"(",
"sql",
",",
"(",
"collection_id",
",",
")",
")",
"self",
".",
"_load_keywords",
"(",
"collection_id",
",",
"path",
"=",
"path",
")"
] | Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file | [
"Respond",
"to",
"changes",
"in",
"the",
"file",
"system"
] | python | train |
xingjiepan/cylinder_fitting | cylinder_fitting/geometry.py | https://github.com/xingjiepan/cylinder_fitting/blob/f96d79732bc49cbc0cb4b39f008af7ce42aeb213/cylinder_fitting/geometry.py#L23-L29 | def point_line_distance(p, l_p, l_v):
'''Calculate the distance between a point and a line defined
by a point and a direction vector.
'''
l_v = normalize(l_v)
u = p - l_p
return np.linalg.norm(u - np.dot(u, l_v) * l_v) | [
"def",
"point_line_distance",
"(",
"p",
",",
"l_p",
",",
"l_v",
")",
":",
"l_v",
"=",
"normalize",
"(",
"l_v",
")",
"u",
"=",
"p",
"-",
"l_p",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"u",
"-",
"np",
".",
"dot",
"(",
"u",
",",
"l_v",
")",
"*",
"l_v",
")"
] | Calculate the distance between a point and a line defined
by a point and a direction vector. | [
"Calculate",
"the",
"distance",
"between",
"a",
"point",
"and",
"a",
"line",
"defined",
"by",
"a",
"point",
"and",
"a",
"direction",
"vector",
"."
] | python | train |
cggh/scikit-allel | allel/stats/sf.py | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/sf.py#L638-L675 | def plot_sfs_folded_scaled(*args, **kwargs):
"""Plot a folded scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn.
"""
kwargs.setdefault('yscale', 'linear')
ax = plot_sfs_folded(*args, **kwargs)
ax.set_ylabel('scaled site frequency')
n = kwargs.get('n', None)
if n:
ax.set_xlabel('minor allele frequency')
else:
ax.set_xlabel('minor allele count')
return ax | [
"def",
"plot_sfs_folded_scaled",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'yscale'",
",",
"'linear'",
")",
"ax",
"=",
"plot_sfs_folded",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"set_ylabel",
"(",
"'scaled site frequency'",
")",
"n",
"=",
"kwargs",
".",
"get",
"(",
"'n'",
",",
"None",
")",
"if",
"n",
":",
"ax",
".",
"set_xlabel",
"(",
"'minor allele frequency'",
")",
"else",
":",
"ax",
".",
"set_xlabel",
"(",
"'minor allele count'",
")",
"return",
"ax"
] | Plot a folded scaled site frequency spectrum.
Parameters
----------
s : array_like, int, shape (n_chromosomes/2,)
Site frequency spectrum.
yscale : string, optional
Y axis scale.
bins : int or array_like, int, optional
Allele count bins.
n : int, optional
Number of chromosomes sampled. If provided, X axis will be plotted
as allele frequency, otherwise as allele count.
clip_endpoints : bool, optional
If True, do not plot first and last values from frequency spectrum.
label : string, optional
Label for data series in plot.
plot_kwargs : dict-like
Additional keyword arguments, passed through to ax.plot().
ax : axes, optional
Axes on which to draw. If not provided, a new figure will be created.
Returns
-------
ax : axes
The axes on which the plot was drawn. | [
"Plot",
"a",
"folded",
"scaled",
"site",
"frequency",
"spectrum",
"."
] | python | train |
fpoirotte/sphinxcontrib-varlinks | sphinxcontrib/varlinks.py | https://github.com/fpoirotte/sphinxcontrib-varlinks/blob/836899486e841fee4bac32a9d57da2786b2045c6/sphinxcontrib/varlinks.py#L92-L123 | def apply(self):
"""Replace substitutions in hyperlinks with their contents"""
# In this phase, we replace the substitutions in hyperlinks
# with the contents of the sub-nodes introduced during phase 1.
# We also remove those temporary nodes from the tree.
subst_re = re.compile(self.subst_pattern)
# Apply the substitutions to hyperlink references.
for link in self.document.traverse(nodes.reference):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 1)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
content = subst_re.sub(replacer, link[0])
# Cleanup the temporary nodes and recreate the node's content.
link.clear()
del link['varlinks']
link.append(nodes.Text(content))
# Do the same with hyperlink targets.
for link in self.document.traverse(nodes.target):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 0)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
# Cleanup the temporary nodes.
link.clear()
del link['varlinks'] | [
"def",
"apply",
"(",
"self",
")",
":",
"# In this phase, we replace the substitutions in hyperlinks",
"# with the contents of the sub-nodes introduced during phase 1.",
"# We also remove those temporary nodes from the tree.",
"subst_re",
"=",
"re",
".",
"compile",
"(",
"self",
".",
"subst_pattern",
")",
"# Apply the substitutions to hyperlink references.",
"for",
"link",
"in",
"self",
".",
"document",
".",
"traverse",
"(",
"nodes",
".",
"reference",
")",
":",
"substitutions",
"=",
"link",
".",
"get",
"(",
"'varlinks'",
")",
"if",
"not",
"substitutions",
":",
"continue",
"replacer",
"=",
"self",
".",
"_replace",
"(",
"substitutions",
",",
"link",
".",
"children",
",",
"1",
")",
"link",
"[",
"'refuri'",
"]",
"=",
"subst_re",
".",
"sub",
"(",
"replacer",
",",
"link",
"[",
"'refuri'",
"]",
")",
"content",
"=",
"subst_re",
".",
"sub",
"(",
"replacer",
",",
"link",
"[",
"0",
"]",
")",
"# Cleanup the temporary nodes and recreate the node's content.",
"link",
".",
"clear",
"(",
")",
"del",
"link",
"[",
"'varlinks'",
"]",
"link",
".",
"append",
"(",
"nodes",
".",
"Text",
"(",
"content",
")",
")",
"# Do the same with hyperlink targets.",
"for",
"link",
"in",
"self",
".",
"document",
".",
"traverse",
"(",
"nodes",
".",
"target",
")",
":",
"substitutions",
"=",
"link",
".",
"get",
"(",
"'varlinks'",
")",
"if",
"not",
"substitutions",
":",
"continue",
"replacer",
"=",
"self",
".",
"_replace",
"(",
"substitutions",
",",
"link",
".",
"children",
",",
"0",
")",
"link",
"[",
"'refuri'",
"]",
"=",
"subst_re",
".",
"sub",
"(",
"replacer",
",",
"link",
"[",
"'refuri'",
"]",
")",
"# Cleanup the temporary nodes.",
"link",
".",
"clear",
"(",
")",
"del",
"link",
"[",
"'varlinks'",
"]"
] | Replace substitutions in hyperlinks with their contents | [
"Replace",
"substitutions",
"in",
"hyperlinks",
"with",
"their",
"contents"
] | python | train |
kratsg/ironman | ironman/utilities.py | https://github.com/kratsg/ironman/blob/7d67b79970870e7e5520181d2afa3f423e46eb3a/ironman/utilities.py#L7-L10 | def byteswap(data, word_size=4):
""" Swap the byte-ordering in a packet with N=4 bytes per word
"""
return reduce(lambda x,y: x+''.join(reversed(y)), chunks(data, word_size), '') | [
"def",
"byteswap",
"(",
"data",
",",
"word_size",
"=",
"4",
")",
":",
"return",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"+",
"''",
".",
"join",
"(",
"reversed",
"(",
"y",
")",
")",
",",
"chunks",
"(",
"data",
",",
"word_size",
")",
",",
"''",
")"
] | Swap the byte-ordering in a packet with N=4 bytes per word | [
"Swap",
"the",
"byte",
"-",
"ordering",
"in",
"a",
"packet",
"with",
"N",
"=",
"4",
"bytes",
"per",
"word"
] | python | train |
theduke/django-baseline | django_baseline/template.py | https://github.com/theduke/django-baseline/blob/7be8b956e53c70b35f34e1783a8fe8f716955afb/django_baseline/template.py#L5-L24 | def render_template(tpl, context):
'''
A shortcut function to render a partial template with context and return
the output.
'''
templates = [tpl] if type(tpl) != list else tpl
tpl_instance = None
for tpl in templates:
try:
tpl_instance = template.loader.get_template(tpl)
break
except template.TemplateDoesNotExist:
pass
if not tpl_instance:
raise Exception('Template does not exist: ' + templates[-1])
return tpl_instance.render(template.Context(context)) | [
"def",
"render_template",
"(",
"tpl",
",",
"context",
")",
":",
"templates",
"=",
"[",
"tpl",
"]",
"if",
"type",
"(",
"tpl",
")",
"!=",
"list",
"else",
"tpl",
"tpl_instance",
"=",
"None",
"for",
"tpl",
"in",
"templates",
":",
"try",
":",
"tpl_instance",
"=",
"template",
".",
"loader",
".",
"get_template",
"(",
"tpl",
")",
"break",
"except",
"template",
".",
"TemplateDoesNotExist",
":",
"pass",
"if",
"not",
"tpl_instance",
":",
"raise",
"Exception",
"(",
"'Template does not exist: '",
"+",
"templates",
"[",
"-",
"1",
"]",
")",
"return",
"tpl_instance",
".",
"render",
"(",
"template",
".",
"Context",
"(",
"context",
")",
")"
] | A shortcut function to render a partial template with context and return
the output. | [
"A",
"shortcut",
"function",
"to",
"render",
"a",
"partial",
"template",
"with",
"context",
"and",
"return",
"the",
"output",
"."
] | python | test |
secdev/scapy | scapy/contrib/http2.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L1111-L1137 | def huffman_conv2str(cls, bit_str, bit_len):
# type: (int, int) -> str
""" huffman_conv2str converts a bitstring of bit_len bitlength into a
binary string. It DOES NOT compress/decompress the bitstring!
@param int bit_str: the bitstring to convert.
@param int bit_len: the bitlength of bit_str.
@return str: the converted bitstring as a bytestring.
@raise AssertionError
"""
assert(bit_str >= 0)
assert(bit_len >= 0)
byte_len = bit_len // 8
rem_bit = bit_len % 8
if rem_bit != 0:
bit_str <<= 8 - rem_bit
byte_len += 1
# As usual the list/join tricks is a performance trick to build
# efficiently a Python string
s = [] # type: List[str]
i = 0
while i < byte_len:
s.insert(0, chb((bit_str >> (i * 8)) & 0xFF))
i += 1
return b''.join(s) | [
"def",
"huffman_conv2str",
"(",
"cls",
",",
"bit_str",
",",
"bit_len",
")",
":",
"# type: (int, int) -> str",
"assert",
"(",
"bit_str",
">=",
"0",
")",
"assert",
"(",
"bit_len",
">=",
"0",
")",
"byte_len",
"=",
"bit_len",
"//",
"8",
"rem_bit",
"=",
"bit_len",
"%",
"8",
"if",
"rem_bit",
"!=",
"0",
":",
"bit_str",
"<<=",
"8",
"-",
"rem_bit",
"byte_len",
"+=",
"1",
"# As usual the list/join tricks is a performance trick to build",
"# efficiently a Python string",
"s",
"=",
"[",
"]",
"# type: List[str]",
"i",
"=",
"0",
"while",
"i",
"<",
"byte_len",
":",
"s",
".",
"insert",
"(",
"0",
",",
"chb",
"(",
"(",
"bit_str",
">>",
"(",
"i",
"*",
"8",
")",
")",
"&",
"0xFF",
")",
")",
"i",
"+=",
"1",
"return",
"b''",
".",
"join",
"(",
"s",
")"
] | huffman_conv2str converts a bitstring of bit_len bitlength into a
binary string. It DOES NOT compress/decompress the bitstring!
@param int bit_str: the bitstring to convert.
@param int bit_len: the bitlength of bit_str.
@return str: the converted bitstring as a bytestring.
@raise AssertionError | [
"huffman_conv2str",
"converts",
"a",
"bitstring",
"of",
"bit_len",
"bitlength",
"into",
"a",
"binary",
"string",
".",
"It",
"DOES",
"NOT",
"compress",
"/",
"decompress",
"the",
"bitstring!"
] | python | train |
inspirehep/inspire-utils | inspire_utils/urls.py | https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/urls.py#L31-L51 | def ensure_scheme(url, default_scheme='http'):
"""Adds a scheme to a url if not present.
Args:
url (string): a url, assumed to start with netloc
default_scheme (string): a scheme to be added
Returns:
string: URL with a scheme
"""
parsed = urlsplit(url, scheme=default_scheme)
if not parsed.netloc:
parsed = SplitResult(
scheme=parsed.scheme,
netloc=parsed.path,
path='',
query=parsed.query,
fragment=parsed.fragment
)
return urlunsplit(parsed) | [
"def",
"ensure_scheme",
"(",
"url",
",",
"default_scheme",
"=",
"'http'",
")",
":",
"parsed",
"=",
"urlsplit",
"(",
"url",
",",
"scheme",
"=",
"default_scheme",
")",
"if",
"not",
"parsed",
".",
"netloc",
":",
"parsed",
"=",
"SplitResult",
"(",
"scheme",
"=",
"parsed",
".",
"scheme",
",",
"netloc",
"=",
"parsed",
".",
"path",
",",
"path",
"=",
"''",
",",
"query",
"=",
"parsed",
".",
"query",
",",
"fragment",
"=",
"parsed",
".",
"fragment",
")",
"return",
"urlunsplit",
"(",
"parsed",
")"
] | Adds a scheme to a url if not present.
Args:
url (string): a url, assumed to start with netloc
default_scheme (string): a scheme to be added
Returns:
string: URL with a scheme | [
"Adds",
"a",
"scheme",
"to",
"a",
"url",
"if",
"not",
"present",
"."
] | python | train |
klmitch/turnstile | turnstile/config.py | https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/config.py#L228-L254 | def to_bool(value, do_raise=True):
"""Convert a string to a boolean value.
If the string consists of digits, the integer value of the string
is coerced to a boolean value. Otherwise, any of the strings "t",
"true", "on", "y", and "yes" are considered True and any of the
strings "f", "false", "off", "n", and "no" are considered False.
A ValueError will be raised for any other value.
"""
value = value.lower()
# Try it as an integer
if value.isdigit():
return bool(int(value))
# OK, check it against the true/false values...
if value in _str_true:
return True
elif value in _str_false:
return False
# Not recognized
if do_raise:
raise ValueError("invalid literal for to_bool(): %r" % value)
return False | [
"def",
"to_bool",
"(",
"value",
",",
"do_raise",
"=",
"True",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"# Try it as an integer",
"if",
"value",
".",
"isdigit",
"(",
")",
":",
"return",
"bool",
"(",
"int",
"(",
"value",
")",
")",
"# OK, check it against the true/false values...",
"if",
"value",
"in",
"_str_true",
":",
"return",
"True",
"elif",
"value",
"in",
"_str_false",
":",
"return",
"False",
"# Not recognized",
"if",
"do_raise",
":",
"raise",
"ValueError",
"(",
"\"invalid literal for to_bool(): %r\"",
"%",
"value",
")",
"return",
"False"
] | Convert a string to a boolean value.
If the string consists of digits, the integer value of the string
is coerced to a boolean value. Otherwise, any of the strings "t",
"true", "on", "y", and "yes" are considered True and any of the
strings "f", "false", "off", "n", and "no" are considered False.
A ValueError will be raised for any other value. | [
"Convert",
"a",
"string",
"to",
"a",
"boolean",
"value",
"."
] | python | train |
scopus-api/scopus | scopus/deprecated_/scopus_api.py | https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/deprecated_/scopus_api.py#L164-L174 | def references(self):
"""Return EIDs of references of an article.
Note: Requires the FULL view of the article.
"""
refs = self.items.find('bibrecord/tail/bibliography', ns)
if refs is not None:
eids = [r.find("ref-info/refd-itemidlist/itemid", ns).text for r
in refs.findall("reference", ns)]
return ["2-s2.0-" + eid for eid in eids]
else:
return None | [
"def",
"references",
"(",
"self",
")",
":",
"refs",
"=",
"self",
".",
"items",
".",
"find",
"(",
"'bibrecord/tail/bibliography'",
",",
"ns",
")",
"if",
"refs",
"is",
"not",
"None",
":",
"eids",
"=",
"[",
"r",
".",
"find",
"(",
"\"ref-info/refd-itemidlist/itemid\"",
",",
"ns",
")",
".",
"text",
"for",
"r",
"in",
"refs",
".",
"findall",
"(",
"\"reference\"",
",",
"ns",
")",
"]",
"return",
"[",
"\"2-s2.0-\"",
"+",
"eid",
"for",
"eid",
"in",
"eids",
"]",
"else",
":",
"return",
"None"
] | Return EIDs of references of an article.
Note: Requires the FULL view of the article. | [
"Return",
"EIDs",
"of",
"references",
"of",
"an",
"article",
".",
"Note",
":",
"Requires",
"the",
"FULL",
"view",
"of",
"the",
"article",
"."
] | python | train |
openeemeter/eemeter | eemeter/caltrack/usage_per_day.py | https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L1303-L1344 | def get_hdd_only_candidate_models(
data, minimum_non_zero_hdd, minimum_total_hdd, beta_hdd_maximum_p_value, weights_col
):
"""
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``hdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of hdd-only candidate models, with any associated warnings.
"""
balance_points = [int(col[4:]) for col in data.columns if col.startswith("hdd")]
candidate_models = [
get_single_hdd_only_candidate_model(
data,
minimum_non_zero_hdd,
minimum_total_hdd,
beta_hdd_maximum_p_value,
weights_col,
balance_point,
)
for balance_point in balance_points
]
return candidate_models | [
"def",
"get_hdd_only_candidate_models",
"(",
"data",
",",
"minimum_non_zero_hdd",
",",
"minimum_total_hdd",
",",
"beta_hdd_maximum_p_value",
",",
"weights_col",
")",
":",
"balance_points",
"=",
"[",
"int",
"(",
"col",
"[",
"4",
":",
"]",
")",
"for",
"col",
"in",
"data",
".",
"columns",
"if",
"col",
".",
"startswith",
"(",
"\"hdd\"",
")",
"]",
"candidate_models",
"=",
"[",
"get_single_hdd_only_candidate_model",
"(",
"data",
",",
"minimum_non_zero_hdd",
",",
"minimum_total_hdd",
",",
"beta_hdd_maximum_p_value",
",",
"weights_col",
",",
"balance_point",
",",
")",
"for",
"balance_point",
"in",
"balance_points",
"]",
"return",
"candidate_models"
] | Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and 1 to n
columns with names of the form ``hdd_<balance_point>``. All columns
with names of this form will be used to fit a candidate model.
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
Returns
-------
candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel`
A list of hdd-only candidate models, with any associated warnings. | [
"Parameters",
"----------",
"data",
":",
":",
"any",
":",
"pandas",
".",
"DataFrame",
"A",
"DataFrame",
"containing",
"at",
"least",
"the",
"column",
"meter_value",
"and",
"1",
"to",
"n",
"columns",
"with",
"names",
"of",
"the",
"form",
"hdd_<balance_point",
">",
".",
"All",
"columns",
"with",
"names",
"of",
"this",
"form",
"will",
"be",
"used",
"to",
"fit",
"a",
"candidate",
"model",
".",
"DataFrames",
"of",
"this",
"form",
"can",
"be",
"made",
"using",
"the",
":",
"any",
":",
"eemeter",
".",
"create_caltrack_daily_design_matrix",
"or",
":",
"any",
":",
"eemeter",
".",
"create_caltrack_billing_design_matrix",
"methods",
".",
"minimum_non_zero_hdd",
":",
":",
"any",
":",
"int",
"Minimum",
"allowable",
"number",
"of",
"non",
"-",
"zero",
"heating",
"degree",
"day",
"values",
".",
"minimum_total_hdd",
":",
":",
"any",
":",
"float",
"Minimum",
"allowable",
"total",
"sum",
"of",
"heating",
"degree",
"day",
"values",
".",
"beta_hdd_maximum_p_value",
":",
":",
"any",
":",
"float",
"The",
"maximum",
"allowable",
"p",
"-",
"value",
"of",
"the",
"beta",
"hdd",
"parameter",
".",
"weights_col",
":",
":",
"any",
":",
"str",
"or",
"None",
"The",
"name",
"of",
"the",
"column",
"(",
"if",
"any",
")",
"in",
"data",
"to",
"use",
"as",
"weights",
"."
] | python | train |
pescadores/pescador | examples/mux/mux_files_example.py | https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/examples/mux/mux_files_example.py#L60-L71 | def npz_generator(npz_path):
"""Generate data from an npz file."""
npz_data = np.load(npz_path)
X = npz_data['X']
# Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,)
y = npz_data['Y']
n = X.shape[0]
while True:
i = np.random.randint(0, n)
yield {'X': X[i], 'Y': y[i]} | [
"def",
"npz_generator",
"(",
"npz_path",
")",
":",
"npz_data",
"=",
"np",
".",
"load",
"(",
"npz_path",
")",
"X",
"=",
"npz_data",
"[",
"'X'",
"]",
"# Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,)",
"y",
"=",
"npz_data",
"[",
"'Y'",
"]",
"n",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"while",
"True",
":",
"i",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"n",
")",
"yield",
"{",
"'X'",
":",
"X",
"[",
"i",
"]",
",",
"'Y'",
":",
"y",
"[",
"i",
"]",
"}"
] | Generate data from an npz file. | [
"Generate",
"data",
"from",
"an",
"npz",
"file",
"."
] | python | train |
tjcsl/ion | intranet/apps/users/templatetags/users.py | https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/users/templatetags/users.py#L21-L26 | def argument_request_user(obj, func_name):
"""Pass request.user as an argument to the given function call."""
func = getattr(obj, func_name)
request = threadlocals.request()
if request:
return func(request.user) | [
"def",
"argument_request_user",
"(",
"obj",
",",
"func_name",
")",
":",
"func",
"=",
"getattr",
"(",
"obj",
",",
"func_name",
")",
"request",
"=",
"threadlocals",
".",
"request",
"(",
")",
"if",
"request",
":",
"return",
"func",
"(",
"request",
".",
"user",
")"
] | Pass request.user as an argument to the given function call. | [
"Pass",
"request",
".",
"user",
"as",
"an",
"argument",
"to",
"the",
"given",
"function",
"call",
"."
] | python | train |
PierreRust/apigpio | apigpio/apigpio.py | https://github.com/PierreRust/apigpio/blob/2b969f40e06219b43a43498d8baf87f5935ceab2/apigpio/apigpio.py#L1033-L1054 | def set_servo_pulsewidth(self, user_gpio, pulsewidth):
"""
Starts (500-2500) or stops (0) servo pulses on the GPIO.
user_gpio:= 0-31.
pulsewidth:= 0 (off),
500 (most anti-clockwise) - 2500 (most clockwise).
The selected pulsewidth will continue to be transmitted until
changed by a subsequent call to set_servo_pulsewidth.
The pulsewidths supported by servos varies and should probably
be determined by experiment. A value of 1500 should always be
safe and represents the mid-point of rotation.
You can DAMAGE a servo if you command it to move beyond its
limits.
...
yield from pi.set_servo_pulsewidth(17, 0) # off
yield from pi.set_servo_pulsewidth(17, 1000) # safe anti-clockwise
yield from pi.set_servo_pulsewidth(17, 1500) # centre
yield from pi.set_servo_pulsewidth(17, 2000) # safe clockwise
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_SERVO, user_gpio, int(pulsewidth))
return _u2i(res) | [
"def",
"set_servo_pulsewidth",
"(",
"self",
",",
"user_gpio",
",",
"pulsewidth",
")",
":",
"res",
"=",
"yield",
"from",
"self",
".",
"_pigpio_aio_command",
"(",
"_PI_CMD_SERVO",
",",
"user_gpio",
",",
"int",
"(",
"pulsewidth",
")",
")",
"return",
"_u2i",
"(",
"res",
")"
] | Starts (500-2500) or stops (0) servo pulses on the GPIO.
user_gpio:= 0-31.
pulsewidth:= 0 (off),
500 (most anti-clockwise) - 2500 (most clockwise).
The selected pulsewidth will continue to be transmitted until
changed by a subsequent call to set_servo_pulsewidth.
The pulsewidths supported by servos varies and should probably
be determined by experiment. A value of 1500 should always be
safe and represents the mid-point of rotation.
You can DAMAGE a servo if you command it to move beyond its
limits.
...
yield from pi.set_servo_pulsewidth(17, 0) # off
yield from pi.set_servo_pulsewidth(17, 1000) # safe anti-clockwise
yield from pi.set_servo_pulsewidth(17, 1500) # centre
yield from pi.set_servo_pulsewidth(17, 2000) # safe clockwise
... | [
"Starts",
"(",
"500",
"-",
"2500",
")",
"or",
"stops",
"(",
"0",
")",
"servo",
"pulses",
"on",
"the",
"GPIO",
".",
"user_gpio",
":",
"=",
"0",
"-",
"31",
".",
"pulsewidth",
":",
"=",
"0",
"(",
"off",
")",
"500",
"(",
"most",
"anti",
"-",
"clockwise",
")",
"-",
"2500",
"(",
"most",
"clockwise",
")",
".",
"The",
"selected",
"pulsewidth",
"will",
"continue",
"to",
"be",
"transmitted",
"until",
"changed",
"by",
"a",
"subsequent",
"call",
"to",
"set_servo_pulsewidth",
".",
"The",
"pulsewidths",
"supported",
"by",
"servos",
"varies",
"and",
"should",
"probably",
"be",
"determined",
"by",
"experiment",
".",
"A",
"value",
"of",
"1500",
"should",
"always",
"be",
"safe",
"and",
"represents",
"the",
"mid",
"-",
"point",
"of",
"rotation",
".",
"You",
"can",
"DAMAGE",
"a",
"servo",
"if",
"you",
"command",
"it",
"to",
"move",
"beyond",
"its",
"limits",
".",
"...",
"yield",
"from",
"pi",
".",
"set_servo_pulsewidth",
"(",
"17",
"0",
")",
"#",
"off",
"yield",
"from",
"pi",
".",
"set_servo_pulsewidth",
"(",
"17",
"1000",
")",
"#",
"safe",
"anti",
"-",
"clockwise",
"yield",
"from",
"pi",
".",
"set_servo_pulsewidth",
"(",
"17",
"1500",
")",
"#",
"centre",
"yield",
"from",
"pi",
".",
"set_servo_pulsewidth",
"(",
"17",
"2000",
")",
"#",
"safe",
"clockwise",
"..."
] | python | train |
Brightmd/TxPx | txpx/process.py | https://github.com/Brightmd/TxPx/blob/403c18b3006fc68842ec05b259e8611fe80763aa/txpx/process.py#L86-L96 | def processEnded(self, reason):
"""
Connected process shut down
"""
log_debug("{name} process exited", name=self.name)
if self.deferred:
if reason.type == ProcessDone:
self.deferred.callback(reason.value.exitCode)
elif reason.type == ProcessTerminated:
self.deferred.errback(reason)
return self.deferred | [
"def",
"processEnded",
"(",
"self",
",",
"reason",
")",
":",
"log_debug",
"(",
"\"{name} process exited\"",
",",
"name",
"=",
"self",
".",
"name",
")",
"if",
"self",
".",
"deferred",
":",
"if",
"reason",
".",
"type",
"==",
"ProcessDone",
":",
"self",
".",
"deferred",
".",
"callback",
"(",
"reason",
".",
"value",
".",
"exitCode",
")",
"elif",
"reason",
".",
"type",
"==",
"ProcessTerminated",
":",
"self",
".",
"deferred",
".",
"errback",
"(",
"reason",
")",
"return",
"self",
".",
"deferred"
] | Connected process shut down | [
"Connected",
"process",
"shut",
"down"
] | python | train |
theislab/scanpy | scanpy/preprocessing/_simple.py | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/preprocessing/_simple.py#L826-L878 | def scale(data, zero_center=True, max_value=None, copy=False) -> Optional[AnnData]:
"""Scale data to unit variance and zero mean.
.. note::
Variables (genes) that do not display any variation (are constant across
all observations) are retained and set to 0 during this operation. In
the future, they might be set to NaNs.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
zero_center : `bool`, optional (default: `True`)
If `False`, omit zero-centering variables, which allows to handle sparse
input efficiently.
max_value : `float` or `None`, optional (default: `None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Depending on `copy` returns or updates `adata` with a scaled `adata.X`.
"""
if isinstance(data, AnnData):
adata = data.copy() if copy else data
# need to add the following here to make inplace logic work
if zero_center and issparse(adata.X):
logg.msg(
'... scale_data: as `zero_center=True`, sparse input is '
'densified and may lead to large memory consumption')
adata.X = adata.X.toarray()
scale(adata.X, zero_center=zero_center, max_value=max_value, copy=False)
return adata if copy else None
X = data.copy() if copy else data # proceed with the data matrix
zero_center = zero_center if zero_center is not None else False if issparse(X) else True
if not zero_center and max_value is not None:
logg.msg(
'... scale_data: be careful when using `max_value` without `zero_center`',
v=4)
if max_value is not None:
logg.msg('... clipping at max_value', max_value)
if zero_center and issparse(X):
logg.msg('... scale_data: as `zero_center=True`, sparse input is '
'densified and may lead to large memory consumption, returning copy',
v=4)
X = X.toarray()
copy = True
_scale(X, zero_center)
if max_value is not None: X[X > max_value] = max_value
return X if copy else None | [
"def",
"scale",
"(",
"data",
",",
"zero_center",
"=",
"True",
",",
"max_value",
"=",
"None",
",",
"copy",
"=",
"False",
")",
"->",
"Optional",
"[",
"AnnData",
"]",
":",
"if",
"isinstance",
"(",
"data",
",",
"AnnData",
")",
":",
"adata",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"data",
"# need to add the following here to make inplace logic work",
"if",
"zero_center",
"and",
"issparse",
"(",
"adata",
".",
"X",
")",
":",
"logg",
".",
"msg",
"(",
"'... scale_data: as `zero_center=True`, sparse input is '",
"'densified and may lead to large memory consumption'",
")",
"adata",
".",
"X",
"=",
"adata",
".",
"X",
".",
"toarray",
"(",
")",
"scale",
"(",
"adata",
".",
"X",
",",
"zero_center",
"=",
"zero_center",
",",
"max_value",
"=",
"max_value",
",",
"copy",
"=",
"False",
")",
"return",
"adata",
"if",
"copy",
"else",
"None",
"X",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"data",
"# proceed with the data matrix",
"zero_center",
"=",
"zero_center",
"if",
"zero_center",
"is",
"not",
"None",
"else",
"False",
"if",
"issparse",
"(",
"X",
")",
"else",
"True",
"if",
"not",
"zero_center",
"and",
"max_value",
"is",
"not",
"None",
":",
"logg",
".",
"msg",
"(",
"'... scale_data: be careful when using `max_value` without `zero_center`'",
",",
"v",
"=",
"4",
")",
"if",
"max_value",
"is",
"not",
"None",
":",
"logg",
".",
"msg",
"(",
"'... clipping at max_value'",
",",
"max_value",
")",
"if",
"zero_center",
"and",
"issparse",
"(",
"X",
")",
":",
"logg",
".",
"msg",
"(",
"'... scale_data: as `zero_center=True`, sparse input is '",
"'densified and may lead to large memory consumption, returning copy'",
",",
"v",
"=",
"4",
")",
"X",
"=",
"X",
".",
"toarray",
"(",
")",
"copy",
"=",
"True",
"_scale",
"(",
"X",
",",
"zero_center",
")",
"if",
"max_value",
"is",
"not",
"None",
":",
"X",
"[",
"X",
">",
"max_value",
"]",
"=",
"max_value",
"return",
"X",
"if",
"copy",
"else",
"None"
] | Scale data to unit variance and zero mean.
.. note::
Variables (genes) that do not display any variation (are constant across
all observations) are retained and set to 0 during this operation. In
the future, they might be set to NaNs.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
zero_center : `bool`, optional (default: `True`)
If `False`, omit zero-centering variables, which allows to handle sparse
input efficiently.
max_value : `float` or `None`, optional (default: `None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Depending on `copy` returns or updates `adata` with a scaled `adata.X`. | [
"Scale",
"data",
"to",
"unit",
"variance",
"and",
"zero",
"mean",
"."
] | python | train |
androguard/androguard | androguard/core/bytecodes/apk.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L1011-L1036 | def is_tag_matched(self, tag, **attribute_filter):
r"""
Return true if the attributes matches in attribute filter.
An attribute filter is a dictionary containing: {attribute_name: value}.
This function will return True if and only if all attributes have the same value.
This function allows to set the dictionary via kwargs, thus you can filter like this:
example::
a.is_tag_matched(tag, name="foobar", other="barfoo")
This function uses a fallback for attribute searching. It will by default use
the namespace variant but fall back to the non-namespace variant.
Thus specifiying :code:`{"name": "foobar"}` will match on :code:`<bla name="foobar" \>`
as well as on :code:`<bla android:name="foobar" \>`.
:param lxml.etree.Element tag: specify the tag element
:param attribute_filter: specify the attribute filter as dictionary
"""
if len(attribute_filter) <= 0:
return True
for attr, value in attribute_filter.items():
_value = self.get_value_from_tag(tag, attr)
if _value != value:
return False
return True | [
"def",
"is_tag_matched",
"(",
"self",
",",
"tag",
",",
"*",
"*",
"attribute_filter",
")",
":",
"if",
"len",
"(",
"attribute_filter",
")",
"<=",
"0",
":",
"return",
"True",
"for",
"attr",
",",
"value",
"in",
"attribute_filter",
".",
"items",
"(",
")",
":",
"_value",
"=",
"self",
".",
"get_value_from_tag",
"(",
"tag",
",",
"attr",
")",
"if",
"_value",
"!=",
"value",
":",
"return",
"False",
"return",
"True"
] | r"""
Return true if the attributes matches in attribute filter.
An attribute filter is a dictionary containing: {attribute_name: value}.
This function will return True if and only if all attributes have the same value.
This function allows to set the dictionary via kwargs, thus you can filter like this:
example::
a.is_tag_matched(tag, name="foobar", other="barfoo")
This function uses a fallback for attribute searching. It will by default use
the namespace variant but fall back to the non-namespace variant.
Thus specifiying :code:`{"name": "foobar"}` will match on :code:`<bla name="foobar" \>`
as well as on :code:`<bla android:name="foobar" \>`.
:param lxml.etree.Element tag: specify the tag element
:param attribute_filter: specify the attribute filter as dictionary | [
"r",
"Return",
"true",
"if",
"the",
"attributes",
"matches",
"in",
"attribute",
"filter",
"."
] | python | train |
awslabs/sockeye | sockeye/utils.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/utils.py#L503-L533 | def get_gpu_memory_usage(ctx: List[mx.context.Context]) -> Dict[int, Tuple[int, int]]:
"""
Returns used and total memory for GPUs identified by the given context list.
:param ctx: List of MXNet context devices.
:return: Dictionary of device id mapping to a tuple of (memory used, memory total).
"""
if isinstance(ctx, mx.context.Context):
ctx = [ctx]
ctx = [c for c in ctx if c.device_type == 'gpu']
if not ctx:
return {}
if shutil.which("nvidia-smi") is None:
logger.warning("Couldn't find nvidia-smi, therefore we assume no GPUs are available.")
return {}
device_ids = [c.device_id for c in ctx]
# Run from clean forkserver process to not leak any CUDA resources
mp_context = mp_utils.get_context()
result_queue = mp_context.Queue()
nvidia_smi_process = mp_context.Process(target=query_nvidia_smi, args=(device_ids, result_queue,))
nvidia_smi_process.start()
nvidia_smi_process.join()
memory_data = result_queue.get()
log_gpu_memory_usage(memory_data)
return memory_data | [
"def",
"get_gpu_memory_usage",
"(",
"ctx",
":",
"List",
"[",
"mx",
".",
"context",
".",
"Context",
"]",
")",
"->",
"Dict",
"[",
"int",
",",
"Tuple",
"[",
"int",
",",
"int",
"]",
"]",
":",
"if",
"isinstance",
"(",
"ctx",
",",
"mx",
".",
"context",
".",
"Context",
")",
":",
"ctx",
"=",
"[",
"ctx",
"]",
"ctx",
"=",
"[",
"c",
"for",
"c",
"in",
"ctx",
"if",
"c",
".",
"device_type",
"==",
"'gpu'",
"]",
"if",
"not",
"ctx",
":",
"return",
"{",
"}",
"if",
"shutil",
".",
"which",
"(",
"\"nvidia-smi\"",
")",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"Couldn't find nvidia-smi, therefore we assume no GPUs are available.\"",
")",
"return",
"{",
"}",
"device_ids",
"=",
"[",
"c",
".",
"device_id",
"for",
"c",
"in",
"ctx",
"]",
"# Run from clean forkserver process to not leak any CUDA resources",
"mp_context",
"=",
"mp_utils",
".",
"get_context",
"(",
")",
"result_queue",
"=",
"mp_context",
".",
"Queue",
"(",
")",
"nvidia_smi_process",
"=",
"mp_context",
".",
"Process",
"(",
"target",
"=",
"query_nvidia_smi",
",",
"args",
"=",
"(",
"device_ids",
",",
"result_queue",
",",
")",
")",
"nvidia_smi_process",
".",
"start",
"(",
")",
"nvidia_smi_process",
".",
"join",
"(",
")",
"memory_data",
"=",
"result_queue",
".",
"get",
"(",
")",
"log_gpu_memory_usage",
"(",
"memory_data",
")",
"return",
"memory_data"
] | Returns used and total memory for GPUs identified by the given context list.
:param ctx: List of MXNet context devices.
:return: Dictionary of device id mapping to a tuple of (memory used, memory total). | [
"Returns",
"used",
"and",
"total",
"memory",
"for",
"GPUs",
"identified",
"by",
"the",
"given",
"context",
"list",
"."
] | python | train |
taskcluster/taskcluster-client.py | taskcluster/aio/queue.py | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/queue.py#L767-L785 | async def declareWorkerType(self, *args, **kwargs):
"""
Update a worker-type
Declare a workerType, supplying some details about it.
`declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1`
provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
`queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`.
This method takes input: ``v1/update-workertype-request.json#``
This method gives output: ``v1/workertype-response.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs) | [
"async",
"def",
"declareWorkerType",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"declareWorkerType\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Update a worker-type
Declare a workerType, supplying some details about it.
`declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1`
provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
`queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`.
This method takes input: ``v1/update-workertype-request.json#``
This method gives output: ``v1/workertype-response.json#``
This method is ``experimental`` | [
"Update",
"a",
"worker",
"-",
"type"
] | python | train |
woolfson-group/isambard | isambard/ampal/pdb_parser.py | https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/pdb_parser.py#L337-L374 | def proc_monomer(self, monomer_info, parent, mon_cls=False):
"""Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `ampal_parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly.
"""
monomer_labels, monomer_data = monomer_info
if len(monomer_labels) > 1:
raise ValueError(
'Malformed PDB, single monomer id with '
'multiple labels. {}'.format(monomer_labels))
else:
monomer_label = list(monomer_labels)[0]
if mon_cls:
monomer_class = mon_cls
het = True
elif monomer_label[0] == 'ATOM':
if monomer_label[2] in standard_amino_acids.values():
monomer_class = Residue
else:
monomer_class = Nucleotide
het = False
else:
raise ValueError('Unknown Monomer type.')
monomer = monomer_class(
atoms=None, mol_code=monomer_label[2], monomer_id=monomer_label[1],
insertion_code=monomer_label[3], is_hetero=het, ampal_parent=parent
)
monomer.states = self.gen_states(monomer_data.values(), monomer)
monomer._active_state = sorted(monomer.states.keys())[0]
return monomer | [
"def",
"proc_monomer",
"(",
"self",
",",
"monomer_info",
",",
"parent",
",",
"mon_cls",
"=",
"False",
")",
":",
"monomer_labels",
",",
"monomer_data",
"=",
"monomer_info",
"if",
"len",
"(",
"monomer_labels",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Malformed PDB, single monomer id with '",
"'multiple labels. {}'",
".",
"format",
"(",
"monomer_labels",
")",
")",
"else",
":",
"monomer_label",
"=",
"list",
"(",
"monomer_labels",
")",
"[",
"0",
"]",
"if",
"mon_cls",
":",
"monomer_class",
"=",
"mon_cls",
"het",
"=",
"True",
"elif",
"monomer_label",
"[",
"0",
"]",
"==",
"'ATOM'",
":",
"if",
"monomer_label",
"[",
"2",
"]",
"in",
"standard_amino_acids",
".",
"values",
"(",
")",
":",
"monomer_class",
"=",
"Residue",
"else",
":",
"monomer_class",
"=",
"Nucleotide",
"het",
"=",
"False",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown Monomer type.'",
")",
"monomer",
"=",
"monomer_class",
"(",
"atoms",
"=",
"None",
",",
"mol_code",
"=",
"monomer_label",
"[",
"2",
"]",
",",
"monomer_id",
"=",
"monomer_label",
"[",
"1",
"]",
",",
"insertion_code",
"=",
"monomer_label",
"[",
"3",
"]",
",",
"is_hetero",
"=",
"het",
",",
"ampal_parent",
"=",
"parent",
")",
"monomer",
".",
"states",
"=",
"self",
".",
"gen_states",
"(",
"monomer_data",
".",
"values",
"(",
")",
",",
"monomer",
")",
"monomer",
".",
"_active_state",
"=",
"sorted",
"(",
"monomer",
".",
"states",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"return",
"monomer"
] | Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `ampal_parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly. | [
"Processes",
"a",
"records",
"into",
"a",
"Monomer",
"."
] | python | train |
COLORFULBOARD/revision | revision/client.py | https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/client.py#L235-L243 | def save(self, revision):
"""
:param revision:
:type revision: :class:`revision.data.Revision`
"""
if not isinstance(revision, Revision):
raise InvalidArgType()
self.state.update(revision) | [
"def",
"save",
"(",
"self",
",",
"revision",
")",
":",
"if",
"not",
"isinstance",
"(",
"revision",
",",
"Revision",
")",
":",
"raise",
"InvalidArgType",
"(",
")",
"self",
".",
"state",
".",
"update",
"(",
"revision",
")"
] | :param revision:
:type revision: :class:`revision.data.Revision` | [
":",
"param",
"revision",
":",
":",
"type",
"revision",
":",
":",
"class",
":",
"revision",
".",
"data",
".",
"Revision"
] | python | train |
ewels/MultiQC | multiqc/utils/report.py | https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/utils/report.py#L48-L187 | def get_filelist(run_module_names):
"""
Go through all supplied search directories and assembly a master
list of files to search. Then fire search functions for each file.
"""
# Prep search patterns
spatterns = [{},{},{},{},{},{},{}]
epatterns = [{}, {}]
ignored_patterns = []
for key, sps in config.sp.items():
mod_name = key.split('/', 1)[0]
if mod_name.lower() not in [m.lower() for m in run_module_names]:
ignored_patterns.append(key)
continue
files[key] = list()
if not isinstance(sps, list):
sps = [sps]
# Warn if we have any unrecognised search pattern keys
expected_sp_keys = [
'fn',
'fn_re',
'contents',
'contents_re',
'num_lines',
'shared',
'max_filesize',
'exclude_fn',
'exclude_fn_re',
'exclude_contents',
'exclude_contents_re'
]
unrecognised_keys = [y for x in sps for y in x.keys() if y not in expected_sp_keys]
if len(unrecognised_keys) > 0:
logger.warn("Unrecognised search pattern keys for '{}': {}".format(key, ', '.join(unrecognised_keys)))
# Split search patterns according to speed of execution.
if any([x for x in sps if 'contents_re' in x]):
if any([x for x in sps if 'num_lines' in x]):
spatterns[4][key] = sps
elif any([x for x in sps if 'max_filesize' in x]):
spatterns[5][key] = sps
else:
spatterns[6][key] = sps
elif any([x for x in sps if 'contents' in x]):
if any([x for x in sps if 'num_lines' in x]):
spatterns[1][key] = sps
elif any([x for x in sps if 'max_filesize' in x]):
spatterns[2][key] = sps
else:
spatterns[3][key] = sps
else:
spatterns[0][key] = sps
if len(ignored_patterns) > 0:
logger.debug("Ignored {} search patterns as didn't match running modules.".format(len(ignored_patterns)))
def add_file(fn, root):
"""
Function applied to each file found when walking the analysis
directories. Runs through all search patterns and returns True
if a match is found.
"""
f = {'fn': fn, 'root': root}
# Check that this is a file and not a pipe or anything weird
if not os.path.isfile(os.path.join(root, fn)):
return None
# Check that we don't want to ignore this file
i_matches = [n for n in config.fn_ignore_files if fnmatch.fnmatch(fn, n)]
if len(i_matches) > 0:
logger.debug("Ignoring file as matched an ignore pattern: {}".format(fn))
return None
# Limit search to small files, to avoid 30GB FastQ files etc.
try:
f['filesize'] = os.path.getsize(os.path.join(root,fn))
except (IOError, OSError, ValueError, UnicodeDecodeError):
logger.debug("Couldn't read file when checking filesize: {}".format(fn))
else:
if f['filesize'] > config.log_filesize_limit:
return False
# Test file for each search pattern
for patterns in spatterns:
for key, sps in patterns.items():
for sp in sps:
if search_file (sp, f):
# Check that we shouldn't exclude this file
if not exclude_file(sp, f):
# Looks good! Remember this file
files[key].append(f)
# Don't keep searching this file for other modules
if not sp.get('shared', False):
return
# Don't look at other patterns for this module
else:
break
# Go through the analysis directories and get file list
for path in config.analysis_dir:
if os.path.islink(path) and config.ignore_symlinks:
continue
elif os.path.isfile(path):
searchfiles.append([os.path.basename(path), os.path.dirname(path)])
elif os.path.isdir(path):
for root, dirnames, filenames in os.walk(path, followlinks=(not config.ignore_symlinks), topdown=True):
bname = os.path.basename(root)
# Skip any sub-directories matching ignore params
orig_dirnames = dirnames[:]
for n in config.fn_ignore_dirs:
dirnames[:] = [d for d in dirnames if not fnmatch.fnmatch(d, n.rstrip(os.sep))]
if len(orig_dirnames) != len(dirnames):
removed_dirs = [os.path.join(root, d) for d in set(orig_dirnames).symmetric_difference(set(dirnames))]
logger.debug("Ignoring directory as matched fn_ignore_dirs: {}".format(", ".join(removed_dirs)))
orig_dirnames = dirnames[:]
for n in config.fn_ignore_paths:
dirnames[:] = [d for d in dirnames if not fnmatch.fnmatch(os.path.join(root, d), n.rstrip(os.sep))]
if len(orig_dirnames) != len(dirnames):
removed_dirs = [os.path.join(root, d) for d in set(orig_dirnames).symmetric_difference(set(dirnames))]
logger.debug("Ignoring directory as matched fn_ignore_paths: {}".format(", ".join(removed_dirs)))
# Skip *this* directory if matches ignore params
d_matches = [n for n in config.fn_ignore_dirs if fnmatch.fnmatch(bname, n.rstrip(os.sep))]
if len(d_matches) > 0:
logger.debug("Ignoring directory as matched fn_ignore_dirs: {}".format(bname))
continue
p_matches = [n for n in config.fn_ignore_paths if fnmatch.fnmatch(root, n.rstrip(os.sep))]
if len(p_matches) > 0:
logger.debug("Ignoring directory as matched fn_ignore_paths: {}".format(root))
continue
# Search filenames in this directory
for fn in filenames:
searchfiles.append([fn, root])
# Search through collected files
with click.progressbar(searchfiles, label="Searching {} files..".format(len(searchfiles))) as sfiles:
for sf in sfiles:
add_file(sf[0], sf[1]) | [
"def",
"get_filelist",
"(",
"run_module_names",
")",
":",
"# Prep search patterns",
"spatterns",
"=",
"[",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"]",
"epatterns",
"=",
"[",
"{",
"}",
",",
"{",
"}",
"]",
"ignored_patterns",
"=",
"[",
"]",
"for",
"key",
",",
"sps",
"in",
"config",
".",
"sp",
".",
"items",
"(",
")",
":",
"mod_name",
"=",
"key",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]",
"if",
"mod_name",
".",
"lower",
"(",
")",
"not",
"in",
"[",
"m",
".",
"lower",
"(",
")",
"for",
"m",
"in",
"run_module_names",
"]",
":",
"ignored_patterns",
".",
"append",
"(",
"key",
")",
"continue",
"files",
"[",
"key",
"]",
"=",
"list",
"(",
")",
"if",
"not",
"isinstance",
"(",
"sps",
",",
"list",
")",
":",
"sps",
"=",
"[",
"sps",
"]",
"# Warn if we have any unrecognised search pattern keys",
"expected_sp_keys",
"=",
"[",
"'fn'",
",",
"'fn_re'",
",",
"'contents'",
",",
"'contents_re'",
",",
"'num_lines'",
",",
"'shared'",
",",
"'max_filesize'",
",",
"'exclude_fn'",
",",
"'exclude_fn_re'",
",",
"'exclude_contents'",
",",
"'exclude_contents_re'",
"]",
"unrecognised_keys",
"=",
"[",
"y",
"for",
"x",
"in",
"sps",
"for",
"y",
"in",
"x",
".",
"keys",
"(",
")",
"if",
"y",
"not",
"in",
"expected_sp_keys",
"]",
"if",
"len",
"(",
"unrecognised_keys",
")",
">",
"0",
":",
"logger",
".",
"warn",
"(",
"\"Unrecognised search pattern keys for '{}': {}\"",
".",
"format",
"(",
"key",
",",
"', '",
".",
"join",
"(",
"unrecognised_keys",
")",
")",
")",
"# Split search patterns according to speed of execution.",
"if",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'contents_re'",
"in",
"x",
"]",
")",
":",
"if",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'num_lines'",
"in",
"x",
"]",
")",
":",
"spatterns",
"[",
"4",
"]",
"[",
"key",
"]",
"=",
"sps",
"elif",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'max_filesize'",
"in",
"x",
"]",
")",
":",
"spatterns",
"[",
"5",
"]",
"[",
"key",
"]",
"=",
"sps",
"else",
":",
"spatterns",
"[",
"6",
"]",
"[",
"key",
"]",
"=",
"sps",
"elif",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'contents'",
"in",
"x",
"]",
")",
":",
"if",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'num_lines'",
"in",
"x",
"]",
")",
":",
"spatterns",
"[",
"1",
"]",
"[",
"key",
"]",
"=",
"sps",
"elif",
"any",
"(",
"[",
"x",
"for",
"x",
"in",
"sps",
"if",
"'max_filesize'",
"in",
"x",
"]",
")",
":",
"spatterns",
"[",
"2",
"]",
"[",
"key",
"]",
"=",
"sps",
"else",
":",
"spatterns",
"[",
"3",
"]",
"[",
"key",
"]",
"=",
"sps",
"else",
":",
"spatterns",
"[",
"0",
"]",
"[",
"key",
"]",
"=",
"sps",
"if",
"len",
"(",
"ignored_patterns",
")",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Ignored {} search patterns as didn't match running modules.\"",
".",
"format",
"(",
"len",
"(",
"ignored_patterns",
")",
")",
")",
"def",
"add_file",
"(",
"fn",
",",
"root",
")",
":",
"\"\"\"\n Function applied to each file found when walking the analysis\n directories. Runs through all search patterns and returns True\n if a match is found.\n \"\"\"",
"f",
"=",
"{",
"'fn'",
":",
"fn",
",",
"'root'",
":",
"root",
"}",
"# Check that this is a file and not a pipe or anything weird",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fn",
")",
")",
":",
"return",
"None",
"# Check that we don't want to ignore this file",
"i_matches",
"=",
"[",
"n",
"for",
"n",
"in",
"config",
".",
"fn_ignore_files",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"fn",
",",
"n",
")",
"]",
"if",
"len",
"(",
"i_matches",
")",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring file as matched an ignore pattern: {}\"",
".",
"format",
"(",
"fn",
")",
")",
"return",
"None",
"# Limit search to small files, to avoid 30GB FastQ files etc.",
"try",
":",
"f",
"[",
"'filesize'",
"]",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fn",
")",
")",
"except",
"(",
"IOError",
",",
"OSError",
",",
"ValueError",
",",
"UnicodeDecodeError",
")",
":",
"logger",
".",
"debug",
"(",
"\"Couldn't read file when checking filesize: {}\"",
".",
"format",
"(",
"fn",
")",
")",
"else",
":",
"if",
"f",
"[",
"'filesize'",
"]",
">",
"config",
".",
"log_filesize_limit",
":",
"return",
"False",
"# Test file for each search pattern",
"for",
"patterns",
"in",
"spatterns",
":",
"for",
"key",
",",
"sps",
"in",
"patterns",
".",
"items",
"(",
")",
":",
"for",
"sp",
"in",
"sps",
":",
"if",
"search_file",
"(",
"sp",
",",
"f",
")",
":",
"# Check that we shouldn't exclude this file",
"if",
"not",
"exclude_file",
"(",
"sp",
",",
"f",
")",
":",
"# Looks good! Remember this file",
"files",
"[",
"key",
"]",
".",
"append",
"(",
"f",
")",
"# Don't keep searching this file for other modules",
"if",
"not",
"sp",
".",
"get",
"(",
"'shared'",
",",
"False",
")",
":",
"return",
"# Don't look at other patterns for this module",
"else",
":",
"break",
"# Go through the analysis directories and get file list",
"for",
"path",
"in",
"config",
".",
"analysis_dir",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"path",
")",
"and",
"config",
".",
"ignore_symlinks",
":",
"continue",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"searchfiles",
".",
"append",
"(",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"]",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"path",
",",
"followlinks",
"=",
"(",
"not",
"config",
".",
"ignore_symlinks",
")",
",",
"topdown",
"=",
"True",
")",
":",
"bname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"# Skip any sub-directories matching ignore params",
"orig_dirnames",
"=",
"dirnames",
"[",
":",
"]",
"for",
"n",
"in",
"config",
".",
"fn_ignore_dirs",
":",
"dirnames",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirnames",
"if",
"not",
"fnmatch",
".",
"fnmatch",
"(",
"d",
",",
"n",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
")",
"]",
"if",
"len",
"(",
"orig_dirnames",
")",
"!=",
"len",
"(",
"dirnames",
")",
":",
"removed_dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"set",
"(",
"orig_dirnames",
")",
".",
"symmetric_difference",
"(",
"set",
"(",
"dirnames",
")",
")",
"]",
"logger",
".",
"debug",
"(",
"\"Ignoring directory as matched fn_ignore_dirs: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"removed_dirs",
")",
")",
")",
"orig_dirnames",
"=",
"dirnames",
"[",
":",
"]",
"for",
"n",
"in",
"config",
".",
"fn_ignore_paths",
":",
"dirnames",
"[",
":",
"]",
"=",
"[",
"d",
"for",
"d",
"in",
"dirnames",
"if",
"not",
"fnmatch",
".",
"fnmatch",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
",",
"n",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
")",
"]",
"if",
"len",
"(",
"orig_dirnames",
")",
"!=",
"len",
"(",
"dirnames",
")",
":",
"removed_dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"d",
")",
"for",
"d",
"in",
"set",
"(",
"orig_dirnames",
")",
".",
"symmetric_difference",
"(",
"set",
"(",
"dirnames",
")",
")",
"]",
"logger",
".",
"debug",
"(",
"\"Ignoring directory as matched fn_ignore_paths: {}\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"removed_dirs",
")",
")",
")",
"# Skip *this* directory if matches ignore params",
"d_matches",
"=",
"[",
"n",
"for",
"n",
"in",
"config",
".",
"fn_ignore_dirs",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"bname",
",",
"n",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
")",
"]",
"if",
"len",
"(",
"d_matches",
")",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring directory as matched fn_ignore_dirs: {}\"",
".",
"format",
"(",
"bname",
")",
")",
"continue",
"p_matches",
"=",
"[",
"n",
"for",
"n",
"in",
"config",
".",
"fn_ignore_paths",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"root",
",",
"n",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
")",
"]",
"if",
"len",
"(",
"p_matches",
")",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring directory as matched fn_ignore_paths: {}\"",
".",
"format",
"(",
"root",
")",
")",
"continue",
"# Search filenames in this directory",
"for",
"fn",
"in",
"filenames",
":",
"searchfiles",
".",
"append",
"(",
"[",
"fn",
",",
"root",
"]",
")",
"# Search through collected files",
"with",
"click",
".",
"progressbar",
"(",
"searchfiles",
",",
"label",
"=",
"\"Searching {} files..\"",
".",
"format",
"(",
"len",
"(",
"searchfiles",
")",
")",
")",
"as",
"sfiles",
":",
"for",
"sf",
"in",
"sfiles",
":",
"add_file",
"(",
"sf",
"[",
"0",
"]",
",",
"sf",
"[",
"1",
"]",
")"
] | Go through all supplied search directories and assembly a master
list of files to search. Then fire search functions for each file. | [
"Go",
"through",
"all",
"supplied",
"search",
"directories",
"and",
"assembly",
"a",
"master",
"list",
"of",
"files",
"to",
"search",
".",
"Then",
"fire",
"search",
"functions",
"for",
"each",
"file",
"."
] | python | train |
rh-marketingops/dwm | dwm/wrappers.py | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/wrappers.py#L6-L37 | def lookupAll(data, configFields, lookupType, db, histObj={}):
"""
Return a record after having cleaning rules of specified type applied to all fields in the config
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param dict configFields: "fields" object from DWM config (see DataDictionary)
:param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict histObj: History object to which changes should be appended
"""
for field in data.keys():
if field in configFields.keys() and data[field]!='':
if lookupType in configFields[field]["lookup"]:
if lookupType in ['genericLookup', 'fieldSpecificLookup', 'normLookup']:
fieldValNew, histObj = DataLookup(fieldVal=data[field], db=db, lookupType=lookupType, fieldName=field, histObj=histObj)
elif lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex']:
fieldValNew, histObj = RegexLookup(fieldVal=data[field], db=db, fieldName=field, lookupType=lookupType, histObj=histObj)
elif lookupType=='normIncludes':
fieldValNew, histObj, checkMatch = IncludesLookup(fieldVal=data[field], lookupType='normIncludes', db=db, fieldName=field, histObj=histObj)
data[field] = fieldValNew
return data, histObj | [
"def",
"lookupAll",
"(",
"data",
",",
"configFields",
",",
"lookupType",
",",
"db",
",",
"histObj",
"=",
"{",
"}",
")",
":",
"for",
"field",
"in",
"data",
".",
"keys",
"(",
")",
":",
"if",
"field",
"in",
"configFields",
".",
"keys",
"(",
")",
"and",
"data",
"[",
"field",
"]",
"!=",
"''",
":",
"if",
"lookupType",
"in",
"configFields",
"[",
"field",
"]",
"[",
"\"lookup\"",
"]",
":",
"if",
"lookupType",
"in",
"[",
"'genericLookup'",
",",
"'fieldSpecificLookup'",
",",
"'normLookup'",
"]",
":",
"fieldValNew",
",",
"histObj",
"=",
"DataLookup",
"(",
"fieldVal",
"=",
"data",
"[",
"field",
"]",
",",
"db",
"=",
"db",
",",
"lookupType",
"=",
"lookupType",
",",
"fieldName",
"=",
"field",
",",
"histObj",
"=",
"histObj",
")",
"elif",
"lookupType",
"in",
"[",
"'genericRegex'",
",",
"'fieldSpecificRegex'",
",",
"'normRegex'",
"]",
":",
"fieldValNew",
",",
"histObj",
"=",
"RegexLookup",
"(",
"fieldVal",
"=",
"data",
"[",
"field",
"]",
",",
"db",
"=",
"db",
",",
"fieldName",
"=",
"field",
",",
"lookupType",
"=",
"lookupType",
",",
"histObj",
"=",
"histObj",
")",
"elif",
"lookupType",
"==",
"'normIncludes'",
":",
"fieldValNew",
",",
"histObj",
",",
"checkMatch",
"=",
"IncludesLookup",
"(",
"fieldVal",
"=",
"data",
"[",
"field",
"]",
",",
"lookupType",
"=",
"'normIncludes'",
",",
"db",
"=",
"db",
",",
"fieldName",
"=",
"field",
",",
"histObj",
"=",
"histObj",
")",
"data",
"[",
"field",
"]",
"=",
"fieldValNew",
"return",
"data",
",",
"histObj"
] | Return a record after having cleaning rules of specified type applied to all fields in the config
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param dict configFields: "fields" object from DWM config (see DataDictionary)
:param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict histObj: History object to which changes should be appended | [
"Return",
"a",
"record",
"after",
"having",
"cleaning",
"rules",
"of",
"specified",
"type",
"applied",
"to",
"all",
"fields",
"in",
"the",
"config"
] | python | train |
walkr/oi | setup.py | https://github.com/walkr/oi/blob/d9d8491d0bc920e493d8f716d6078762b8b2c6d3/setup.py#L17-L31 | def read_long_description(readme_file):
""" Read package long description from README file """
try:
import pypandoc
except (ImportError, OSError) as e:
print('No pypandoc or pandoc: %s' % (e,))
if is_py3:
fh = open(readme_file, encoding='utf-8')
else:
fh = open(readme_file)
long_description = fh.read()
fh.close()
return long_description
else:
return pypandoc.convert(readme_file, 'rst') | [
"def",
"read_long_description",
"(",
"readme_file",
")",
":",
"try",
":",
"import",
"pypandoc",
"except",
"(",
"ImportError",
",",
"OSError",
")",
"as",
"e",
":",
"print",
"(",
"'No pypandoc or pandoc: %s'",
"%",
"(",
"e",
",",
")",
")",
"if",
"is_py3",
":",
"fh",
"=",
"open",
"(",
"readme_file",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"fh",
"=",
"open",
"(",
"readme_file",
")",
"long_description",
"=",
"fh",
".",
"read",
"(",
")",
"fh",
".",
"close",
"(",
")",
"return",
"long_description",
"else",
":",
"return",
"pypandoc",
".",
"convert",
"(",
"readme_file",
",",
"'rst'",
")"
] | Read package long description from README file | [
"Read",
"package",
"long",
"description",
"from",
"README",
"file"
] | python | train |
raymondEhlers/pachyderm | pachyderm/histogram.py | https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/histogram.py#L602-L620 | def get_bin_edges_from_axis(axis) -> np.ndarray:
""" Get bin edges from a ROOT hist axis.
Note:
Doesn't include over- or underflow bins!
Args:
axis (ROOT.TAxis): Axis from which the bin edges should be extracted.
Returns:
Array containing the bin edges.
"""
# Don't include over- or underflow bins
bins = range(1, axis.GetNbins() + 1)
# Bin edges
bin_edges = np.empty(len(bins) + 1)
bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins]
bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins())
return bin_edges | [
"def",
"get_bin_edges_from_axis",
"(",
"axis",
")",
"->",
"np",
".",
"ndarray",
":",
"# Don't include over- or underflow bins",
"bins",
"=",
"range",
"(",
"1",
",",
"axis",
".",
"GetNbins",
"(",
")",
"+",
"1",
")",
"# Bin edges",
"bin_edges",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"bins",
")",
"+",
"1",
")",
"bin_edges",
"[",
":",
"-",
"1",
"]",
"=",
"[",
"axis",
".",
"GetBinLowEdge",
"(",
"i",
")",
"for",
"i",
"in",
"bins",
"]",
"bin_edges",
"[",
"-",
"1",
"]",
"=",
"axis",
".",
"GetBinUpEdge",
"(",
"axis",
".",
"GetNbins",
"(",
")",
")",
"return",
"bin_edges"
] | Get bin edges from a ROOT hist axis.
Note:
Doesn't include over- or underflow bins!
Args:
axis (ROOT.TAxis): Axis from which the bin edges should be extracted.
Returns:
Array containing the bin edges. | [
"Get",
"bin",
"edges",
"from",
"a",
"ROOT",
"hist",
"axis",
"."
] | python | train |
DataBiosphere/dsub | dsub/lib/param_util.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/param_util.py#L836-L877 | def age_to_create_time(age, from_time=None):
"""Compute the create time (UTC) for the list filter.
If the age is an integer value it is treated as a UTC date.
Otherwise the value must be of the form "<integer><unit>" where supported
units are s, m, h, d, w (seconds, minutes, hours, days, weeks).
Args:
age: A "<integer><unit>" string or integer value.
from_time:
Returns:
A timezone-aware datetime or None if age parameter is empty.
"""
if not age:
return None
if not from_time:
from_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
try:
last_char = age[-1]
if last_char == 's':
return from_time - datetime.timedelta(seconds=int(age[:-1]))
elif last_char == 'm':
return from_time - datetime.timedelta(minutes=int(age[:-1]))
elif last_char == 'h':
return from_time - datetime.timedelta(hours=int(age[:-1]))
elif last_char == 'd':
return from_time - datetime.timedelta(days=int(age[:-1]))
elif last_char == 'w':
return from_time - datetime.timedelta(weeks=int(age[:-1]))
else:
# If no unit is given treat the age as seconds from epoch, otherwise apply
# the correct time unit.
return dsub_util.replace_timezone(
datetime.datetime.utcfromtimestamp(int(age)), pytz.utc)
except (ValueError, OverflowError) as e:
raise ValueError('Unable to parse age string %s: %s' % (age, e)) | [
"def",
"age_to_create_time",
"(",
"age",
",",
"from_time",
"=",
"None",
")",
":",
"if",
"not",
"age",
":",
"return",
"None",
"if",
"not",
"from_time",
":",
"from_time",
"=",
"dsub_util",
".",
"replace_timezone",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"tzlocal",
"(",
")",
")",
"try",
":",
"last_char",
"=",
"age",
"[",
"-",
"1",
"]",
"if",
"last_char",
"==",
"'s'",
":",
"return",
"from_time",
"-",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"age",
"[",
":",
"-",
"1",
"]",
")",
")",
"elif",
"last_char",
"==",
"'m'",
":",
"return",
"from_time",
"-",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"int",
"(",
"age",
"[",
":",
"-",
"1",
"]",
")",
")",
"elif",
"last_char",
"==",
"'h'",
":",
"return",
"from_time",
"-",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"age",
"[",
":",
"-",
"1",
"]",
")",
")",
"elif",
"last_char",
"==",
"'d'",
":",
"return",
"from_time",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"int",
"(",
"age",
"[",
":",
"-",
"1",
"]",
")",
")",
"elif",
"last_char",
"==",
"'w'",
":",
"return",
"from_time",
"-",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"int",
"(",
"age",
"[",
":",
"-",
"1",
"]",
")",
")",
"else",
":",
"# If no unit is given treat the age as seconds from epoch, otherwise apply",
"# the correct time unit.",
"return",
"dsub_util",
".",
"replace_timezone",
"(",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"age",
")",
")",
",",
"pytz",
".",
"utc",
")",
"except",
"(",
"ValueError",
",",
"OverflowError",
")",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'Unable to parse age string %s: %s'",
"%",
"(",
"age",
",",
"e",
")",
")"
] | Compute the create time (UTC) for the list filter.
If the age is an integer value it is treated as a UTC date.
Otherwise the value must be of the form "<integer><unit>" where supported
units are s, m, h, d, w (seconds, minutes, hours, days, weeks).
Args:
age: A "<integer><unit>" string or integer value.
from_time:
Returns:
A timezone-aware datetime or None if age parameter is empty. | [
"Compute",
"the",
"create",
"time",
"(",
"UTC",
")",
"for",
"the",
"list",
"filter",
"."
] | python | valid |
aptivate/django-sortable-listview | sortable_listview/views.py | https://github.com/aptivate/django-sortable-listview/blob/9d5fa5847f0c3e80893780c6540e5098635ace9f/sortable_listview/views.py#L94-L106 | def get_querystring(self):
"""
Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page')
"""
to_remove = self.get_querystring_parameter_to_remove()
query_string = urlparse(self.request.get_full_path()).query
query_dict = parse_qs(query_string.encode('utf-8'))
for arg in to_remove:
if arg in query_dict:
del query_dict[arg]
clean_query_string = urlencode(query_dict, doseq=True)
return clean_query_string | [
"def",
"get_querystring",
"(",
"self",
")",
":",
"to_remove",
"=",
"self",
".",
"get_querystring_parameter_to_remove",
"(",
")",
"query_string",
"=",
"urlparse",
"(",
"self",
".",
"request",
".",
"get_full_path",
"(",
")",
")",
".",
"query",
"query_dict",
"=",
"parse_qs",
"(",
"query_string",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"for",
"arg",
"in",
"to_remove",
":",
"if",
"arg",
"in",
"query_dict",
":",
"del",
"query_dict",
"[",
"arg",
"]",
"clean_query_string",
"=",
"urlencode",
"(",
"query_dict",
",",
"doseq",
"=",
"True",
")",
"return",
"clean_query_string"
] | Clean existing query string (GET parameters) by removing
arguments that we don't want to preserve (sort parameter, 'page') | [
"Clean",
"existing",
"query",
"string",
"(",
"GET",
"parameters",
")",
"by",
"removing",
"arguments",
"that",
"we",
"don",
"t",
"want",
"to",
"preserve",
"(",
"sort",
"parameter",
"page",
")"
] | python | train |
ethereum/py-evm | eth/vm/stack.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/stack.py#L99-L107 | def dup(self, position: int) -> None:
"""
Perform a DUP operation on the stack.
"""
idx = -1 * position
try:
self.push(self.values[idx])
except IndexError:
raise InsufficientStack("Insufficient stack items for DUP{0}".format(position)) | [
"def",
"dup",
"(",
"self",
",",
"position",
":",
"int",
")",
"->",
"None",
":",
"idx",
"=",
"-",
"1",
"*",
"position",
"try",
":",
"self",
".",
"push",
"(",
"self",
".",
"values",
"[",
"idx",
"]",
")",
"except",
"IndexError",
":",
"raise",
"InsufficientStack",
"(",
"\"Insufficient stack items for DUP{0}\"",
".",
"format",
"(",
"position",
")",
")"
] | Perform a DUP operation on the stack. | [
"Perform",
"a",
"DUP",
"operation",
"on",
"the",
"stack",
"."
] | python | train |
googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/collection.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/collection.py#L325-L343 | def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields) | [
"def",
"start_after",
"(",
"self",
",",
"document_fields",
")",
":",
"query",
"=",
"query_mod",
".",
"Query",
"(",
"self",
")",
"return",
"query",
".",
"start_after",
"(",
"document_fields",
")"
] | Start query after a cursor with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. | [
"Start",
"query",
"after",
"a",
"cursor",
"with",
"this",
"collection",
"as",
"parent",
"."
] | python | train |
DataBiosphere/toil | src/toil/worker.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/worker.py#L46-L100 | def nextChainableJobGraph(jobGraph, jobStore):
"""Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
"""
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith("_toil "):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph | [
"def",
"nextChainableJobGraph",
"(",
"jobGraph",
",",
"jobStore",
")",
":",
"#If no more jobs to run or services not finished, quit",
"if",
"len",
"(",
"jobGraph",
".",
"stack",
")",
"==",
"0",
"or",
"len",
"(",
"jobGraph",
".",
"services",
")",
">",
"0",
"or",
"jobGraph",
".",
"checkpoint",
"!=",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s\"",
",",
"len",
"(",
"jobGraph",
".",
"stack",
")",
",",
"len",
"(",
"jobGraph",
".",
"services",
")",
",",
"jobGraph",
".",
"checkpoint",
"!=",
"None",
")",
"return",
"None",
"#Get the next set of jobs to run",
"jobs",
"=",
"jobGraph",
".",
"stack",
"[",
"-",
"1",
"]",
"assert",
"len",
"(",
"jobs",
")",
">",
"0",
"#If there are 2 or more jobs to run in parallel we quit",
"if",
"len",
"(",
"jobs",
")",
">=",
"2",
":",
"logger",
".",
"debug",
"(",
"\"No more jobs can run in series by this worker,\"",
"\" it's got %i children\"",
",",
"len",
"(",
"jobs",
")",
"-",
"1",
")",
"return",
"None",
"#We check the requirements of the jobGraph to see if we can run it",
"#within the current worker",
"successorJobNode",
"=",
"jobs",
"[",
"0",
"]",
"if",
"successorJobNode",
".",
"memory",
">",
"jobGraph",
".",
"memory",
":",
"logger",
".",
"debug",
"(",
"\"We need more memory for the next job, so finishing\"",
")",
"return",
"None",
"if",
"successorJobNode",
".",
"cores",
">",
"jobGraph",
".",
"cores",
":",
"logger",
".",
"debug",
"(",
"\"We need more cores for the next job, so finishing\"",
")",
"return",
"None",
"if",
"successorJobNode",
".",
"disk",
">",
"jobGraph",
".",
"disk",
":",
"logger",
".",
"debug",
"(",
"\"We need more disk for the next job, so finishing\"",
")",
"return",
"None",
"if",
"successorJobNode",
".",
"preemptable",
"!=",
"jobGraph",
".",
"preemptable",
":",
"logger",
".",
"debug",
"(",
"\"Preemptability is different for the next job, returning to the leader\"",
")",
"return",
"None",
"if",
"successorJobNode",
".",
"predecessorNumber",
">",
"1",
":",
"logger",
".",
"debug",
"(",
"\"The jobGraph has multiple predecessors, we must return to the leader.\"",
")",
"return",
"None",
"# Load the successor jobGraph",
"successorJobGraph",
"=",
"jobStore",
".",
"load",
"(",
"successorJobNode",
".",
"jobStoreID",
")",
"# Somewhat ugly, but check if job is a checkpoint job and quit if",
"# so",
"if",
"successorJobGraph",
".",
"command",
".",
"startswith",
"(",
"\"_toil \"",
")",
":",
"#Load the job",
"successorJob",
"=",
"Job",
".",
"_loadJob",
"(",
"successorJobGraph",
".",
"command",
",",
"jobStore",
")",
"# Check it is not a checkpoint",
"if",
"successorJob",
".",
"checkpoint",
":",
"logger",
".",
"debug",
"(",
"\"Next job is checkpoint, so finishing\"",
")",
"return",
"None",
"# Made it through! This job is chainable.",
"return",
"successorJobGraph"
] | Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate. | [
"Returns",
"the",
"next",
"chainable",
"jobGraph",
"after",
"this",
"jobGraph",
"if",
"one",
"exists",
"or",
"None",
"if",
"the",
"chain",
"must",
"terminate",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/nose/plugins/logcapture.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/logcapture.py#L225-L233 | def formatError(self, test, err):
"""Add captured log messages to error output.
"""
# logic flow copied from Capture.formatError
test.capturedLogging = records = self.formatLogRecords()
if not records:
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, records), tb) | [
"def",
"formatError",
"(",
"self",
",",
"test",
",",
"err",
")",
":",
"# logic flow copied from Capture.formatError",
"test",
".",
"capturedLogging",
"=",
"records",
"=",
"self",
".",
"formatLogRecords",
"(",
")",
"if",
"not",
"records",
":",
"return",
"err",
"ec",
",",
"ev",
",",
"tb",
"=",
"err",
"return",
"(",
"ec",
",",
"self",
".",
"addCaptureToErr",
"(",
"ev",
",",
"records",
")",
",",
"tb",
")"
] | Add captured log messages to error output. | [
"Add",
"captured",
"log",
"messages",
"to",
"error",
"output",
"."
] | python | test |
materialsproject/pymatgen | pymatgen/__init__.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/__init__.py#L82-L104 | def loadfn(fname):
"""
Convenience method to perform quick loading of data from a filename. The
type of object returned depends the file type.
Args:
fname (string): A filename.
Returns:
Note that fname is matched using unix-style, i.e., fnmatch.
(Structure) if *POSCAR*/*CONTCAR*/*.cif
(Vasprun) *vasprun*
(obj) if *json* (passthrough to monty.serialization.loadfn)
"""
if (fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or
".cif" in fname.lower()) or fnmatch(fname, "*.vasp"):
return Structure.from_file(fname)
elif fnmatch(fname, "*vasprun*"):
from pymatgen.io.vasp import Vasprun
return Vasprun(fname)
elif fnmatch(fname, "*.json*"):
from monty.serialization import loadfn
return loadfn(fname) | [
"def",
"loadfn",
"(",
"fname",
")",
":",
"if",
"(",
"fnmatch",
"(",
"fname",
",",
"\"*POSCAR*\"",
")",
"or",
"fnmatch",
"(",
"fname",
",",
"\"*CONTCAR*\"",
")",
"or",
"\".cif\"",
"in",
"fname",
".",
"lower",
"(",
")",
")",
"or",
"fnmatch",
"(",
"fname",
",",
"\"*.vasp\"",
")",
":",
"return",
"Structure",
".",
"from_file",
"(",
"fname",
")",
"elif",
"fnmatch",
"(",
"fname",
",",
"\"*vasprun*\"",
")",
":",
"from",
"pymatgen",
".",
"io",
".",
"vasp",
"import",
"Vasprun",
"return",
"Vasprun",
"(",
"fname",
")",
"elif",
"fnmatch",
"(",
"fname",
",",
"\"*.json*\"",
")",
":",
"from",
"monty",
".",
"serialization",
"import",
"loadfn",
"return",
"loadfn",
"(",
"fname",
")"
] | Convenience method to perform quick loading of data from a filename. The
type of object returned depends the file type.
Args:
fname (string): A filename.
Returns:
Note that fname is matched using unix-style, i.e., fnmatch.
(Structure) if *POSCAR*/*CONTCAR*/*.cif
(Vasprun) *vasprun*
(obj) if *json* (passthrough to monty.serialization.loadfn) | [
"Convenience",
"method",
"to",
"perform",
"quick",
"loading",
"of",
"data",
"from",
"a",
"filename",
".",
"The",
"type",
"of",
"object",
"returned",
"depends",
"the",
"file",
"type",
"."
] | python | train |
fprimex/zdesk | zdesk/zdesk_api.py | https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L3382-L3386 | def ticket_comment_attachment_redact(self, ticket_id, comment_id, attachment_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/attachments#redact-comment-attachment"
api_path = "/api/v2/tickets/{ticket_id}/comments/{comment_id}/attachments/{attachment_id}/redact.json"
api_path = api_path.format(ticket_id=ticket_id, comment_id=comment_id, attachment_id=attachment_id)
return self.call(api_path, method="PUT", data=data, **kwargs) | [
"def",
"ticket_comment_attachment_redact",
"(",
"self",
",",
"ticket_id",
",",
"comment_id",
",",
"attachment_id",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"api_path",
"=",
"\"/api/v2/tickets/{ticket_id}/comments/{comment_id}/attachments/{attachment_id}/redact.json\"",
"api_path",
"=",
"api_path",
".",
"format",
"(",
"ticket_id",
"=",
"ticket_id",
",",
"comment_id",
"=",
"comment_id",
",",
"attachment_id",
"=",
"attachment_id",
")",
"return",
"self",
".",
"call",
"(",
"api_path",
",",
"method",
"=",
"\"PUT\"",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
] | https://developer.zendesk.com/rest_api/docs/core/attachments#redact-comment-attachment | [
"https",
":",
"//",
"developer",
".",
"zendesk",
".",
"com",
"/",
"rest_api",
"/",
"docs",
"/",
"core",
"/",
"attachments#redact",
"-",
"comment",
"-",
"attachment"
] | python | train |
etcher-be/elib_miz | elib_miz/mission.py | https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L1773-L1777 | def radio_presets(self) -> typing.Iterator['FlyingUnit.RadioPresets']:
"""
Returns: generator over unit radio presets
"""
raise TypeError('unit #{}: {}'.format(self.unit_id, self.unit_name)) | [
"def",
"radio_presets",
"(",
"self",
")",
"->",
"typing",
".",
"Iterator",
"[",
"'FlyingUnit.RadioPresets'",
"]",
":",
"raise",
"TypeError",
"(",
"'unit #{}: {}'",
".",
"format",
"(",
"self",
".",
"unit_id",
",",
"self",
".",
"unit_name",
")",
")"
] | Returns: generator over unit radio presets | [
"Returns",
":",
"generator",
"over",
"unit",
"radio",
"presets"
] | python | train |
wummel/dosage | dosagelib/output.py | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/output.py#L69-L88 | def write(self, s, level=0, color=None):
"""Write message with indentation, context and optional timestamp."""
if level > self.level:
return
if self.timestamps:
timestamp = time.strftime(u'%H:%M:%S ')
else:
timestamp = u''
with lock:
if self.context:
self.stream.write(u'%s%s> ' % (timestamp, self.context))
elif self.context is None:
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
self.stream.write(u'%s' % s, color=color)
try:
text_type = unicode
except NameError:
text_type = str
self.stream.write(text_type(os.linesep))
self.stream.flush() | [
"def",
"write",
"(",
"self",
",",
"s",
",",
"level",
"=",
"0",
",",
"color",
"=",
"None",
")",
":",
"if",
"level",
">",
"self",
".",
"level",
":",
"return",
"if",
"self",
".",
"timestamps",
":",
"timestamp",
"=",
"time",
".",
"strftime",
"(",
"u'%H:%M:%S '",
")",
"else",
":",
"timestamp",
"=",
"u''",
"with",
"lock",
":",
"if",
"self",
".",
"context",
":",
"self",
".",
"stream",
".",
"write",
"(",
"u'%s%s> '",
"%",
"(",
"timestamp",
",",
"self",
".",
"context",
")",
")",
"elif",
"self",
".",
"context",
"is",
"None",
":",
"self",
".",
"stream",
".",
"write",
"(",
"u'%s%s> '",
"%",
"(",
"timestamp",
",",
"get_threadname",
"(",
")",
")",
")",
"self",
".",
"stream",
".",
"write",
"(",
"u'%s'",
"%",
"s",
",",
"color",
"=",
"color",
")",
"try",
":",
"text_type",
"=",
"unicode",
"except",
"NameError",
":",
"text_type",
"=",
"str",
"self",
".",
"stream",
".",
"write",
"(",
"text_type",
"(",
"os",
".",
"linesep",
")",
")",
"self",
".",
"stream",
".",
"flush",
"(",
")"
] | Write message with indentation, context and optional timestamp. | [
"Write",
"message",
"with",
"indentation",
"context",
"and",
"optional",
"timestamp",
"."
] | python | train |
watson-developer-cloud/python-sdk | ibm_watson/assistant_v2.py | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v2.py#L1009-L1014 | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'system') and self.system is not None:
_dict['system'] = self.system._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'system'",
")",
"and",
"self",
".",
"system",
"is",
"not",
"None",
":",
"_dict",
"[",
"'system'",
"]",
"=",
"self",
".",
"system",
".",
"_to_dict",
"(",
")",
"return",
"_dict"
] | Return a json dictionary representing this model. | [
"Return",
"a",
"json",
"dictionary",
"representing",
"this",
"model",
"."
] | python | train |
trivago/Protector | protector/parser/query_parser.py | https://github.com/trivago/Protector/blob/7ebe7bde965e27737b961a0cb5740724d174fdc7/protector/parser/query_parser.py#L139-L160 | def create_select_query(self, tokens):
"""
Parse tokens of select query
:param tokens: A list of InfluxDB query tokens
"""
if not tokens[Keyword.SELECT]:
return None
if not tokens[Keyword.FROM]:
return None
return SelectQuery(
self.parse_keyword(Keyword.SELECT, tokens),
self.parse_keyword(Keyword.FROM, tokens),
where_stmt=self.parse_keyword(Keyword.WHERE, tokens),
limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens),
group_by_stmt=self.parse_group(tokens),
duration=self.parsed_time_overlap.timespan_seconds(),
resolution=self.parsed_resolution,
time_ranges=self.parsed_time,
time_overlap=self.parsed_time_overlap,
datapoints=self.parsed_datapoints
) | [
"def",
"create_select_query",
"(",
"self",
",",
"tokens",
")",
":",
"if",
"not",
"tokens",
"[",
"Keyword",
".",
"SELECT",
"]",
":",
"return",
"None",
"if",
"not",
"tokens",
"[",
"Keyword",
".",
"FROM",
"]",
":",
"return",
"None",
"return",
"SelectQuery",
"(",
"self",
".",
"parse_keyword",
"(",
"Keyword",
".",
"SELECT",
",",
"tokens",
")",
",",
"self",
".",
"parse_keyword",
"(",
"Keyword",
".",
"FROM",
",",
"tokens",
")",
",",
"where_stmt",
"=",
"self",
".",
"parse_keyword",
"(",
"Keyword",
".",
"WHERE",
",",
"tokens",
")",
",",
"limit_stmt",
"=",
"self",
".",
"parse_keyword",
"(",
"Keyword",
".",
"LIMIT",
",",
"tokens",
")",
",",
"group_by_stmt",
"=",
"self",
".",
"parse_group",
"(",
"tokens",
")",
",",
"duration",
"=",
"self",
".",
"parsed_time_overlap",
".",
"timespan_seconds",
"(",
")",
",",
"resolution",
"=",
"self",
".",
"parsed_resolution",
",",
"time_ranges",
"=",
"self",
".",
"parsed_time",
",",
"time_overlap",
"=",
"self",
".",
"parsed_time_overlap",
",",
"datapoints",
"=",
"self",
".",
"parsed_datapoints",
")"
] | Parse tokens of select query
:param tokens: A list of InfluxDB query tokens | [
"Parse",
"tokens",
"of",
"select",
"query",
":",
"param",
"tokens",
":",
"A",
"list",
"of",
"InfluxDB",
"query",
"tokens"
] | python | valid |
xhtml2pdf/xhtml2pdf | xhtml2pdf/reportlab_paragraph.py | https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/reportlab_paragraph.py#L1461-L1655 | def drawPara(self, debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
if self.debug:
print (id(self), "drawPara", self.blPara.kind)
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
leading = style.leading
autoLeading = getattr(self, 'autoLeading', getattr(style, 'autoLeading', ''))
#work out the origin for line 1
leftIndent = style.leftIndent
cur_x = leftIndent
if debug:
bw = 0.5
bc = Color(1, 1, 0)
bg = Color(0.9, 0.9, 0.9)
else:
bw = getattr(style, 'borderWidth', None)
bc = getattr(style, 'borderColor', None)
bg = style.backColor
#if has a background or border, draw it
if bg or (bc and bw):
canvas.saveState()
op = canvas.rect
kwds = dict(fill=0, stroke=0)
if bc and bw:
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
kwds['stroke'] = 1
br = getattr(style, 'borderRadius', 0)
if br and not debug:
op = canvas.roundRect
kwds['radius'] = br
if bg:
canvas.setFillColor(bg)
kwds['fill'] = 1
bp = getattr(style, 'borderPadding', 0)
op(leftIndent - bp,
-bp,
self.width - (leftIndent + style.rightIndent) + 2 * bp,
self.height + 2 * bp,
**kwds)
canvas.restoreState()
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
_offsets = getattr(self, '_offsets', [0])
_offsets += (nLines - len(_offsets)) * [_offsets[-1]]
canvas.saveState()
alignment = style.alignment
offset = style.firstLineIndent + _offsets[0]
lim = nLines - 1
noJustifyLast = not (hasattr(self, '_JustifyLast') and self._JustifyLast)
if blPara.kind == 0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
if bulletText:
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
#set up the font etc.
canvas.setFillColor(f.textColor)
tx = self.beginText(cur_x, cur_y)
if autoLeading == 'max':
leading = max(leading, LEADING_FACTOR * f.fontSize)
elif autoLeading == 'min':
leading = LEADING_FACTOR * f.fontSize
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, leading)
ws = getattr(tx, '_wordSpace', 0)
t_off = dpl(tx, offset, ws, lines[0][1], noJustifyLast and nLines == 1)
if f.underline or f.link or f.strike:
xs = tx.XtraState = ABag()
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.lines = lines
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = f.link
canvas.setStrokeColor(f.textColor)
dx = t_off + leftIndent
if dpl != _justifyDrawParaLine: ws = 0
# XXX Never underline!
underline = f.underline
strike = f.strike
link = f.link
if underline:
_do_under_line(0, dx, ws, tx)
if strike:
_do_under_line(0, dx, ws, tx, lm=0.125)
if link: _do_link_line(0, dx, ws, tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in six.moves.range(1, nLines):
ws = lines[i][0]
t_off = dpl(tx, _offsets[i], ws, lines[i][1], noJustifyLast and i == lim)
if dpl != _justifyDrawParaLine: ws = 0
if underline: _do_under_line(i, t_off + leftIndent, ws, tx)
if strike: _do_under_line(i, t_off + leftIndent, ws, tx, lm=0.125)
if link: _do_link_line(i, t_off + leftIndent, ws, tx)
else:
for i in six.moves.range(1, nLines):
dpl(tx, _offsets[i], lines[i][0], lines[i][1], noJustifyLast and i == lim)
else:
f = lines[0]
cur_y = self.height - getattr(f, 'ascent', f.fontSize) # TODO fix XPreformatted to remove this hack
# default?
dpl = _leftDrawParaLineX
if bulletText:
offset = _drawBullet(canvas, offset, cur_y, bulletText, style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError("bad align %s" % repr(alignment))
#set up the font etc.
tx = self.beginText(cur_x, cur_y)
xs = tx.XtraState = ABag()
xs.textColor = None
# XXX Modified for XHTML2PDF
xs.backColor = None
xs.rise = 0
xs.underline = 0
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.background = 0
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strike = 0
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = None
xs.leading = style.leading
xs.leftIndent = leftIndent
tx._leading = None
tx._olb = None
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.autoLeading = autoLeading
tx._fontname, tx._fontsize = None, None
dpl(tx, offset, lines[0], noJustifyLast and nLines == 1)
_do_post_text(tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in six.moves.range(1, nLines):
f = lines[i]
dpl(tx, _offsets[i], f, noJustifyLast and i == lim)
_do_post_text(tx)
canvas.drawText(tx)
canvas.restoreState() | [
"def",
"drawPara",
"(",
"self",
",",
"debug",
"=",
"0",
")",
":",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"id",
"(",
"self",
")",
",",
"\"drawPara\"",
",",
"self",
".",
"blPara",
".",
"kind",
")",
"#stash the key facts locally for speed",
"canvas",
"=",
"self",
".",
"canv",
"style",
"=",
"self",
".",
"style",
"blPara",
"=",
"self",
".",
"blPara",
"lines",
"=",
"blPara",
".",
"lines",
"leading",
"=",
"style",
".",
"leading",
"autoLeading",
"=",
"getattr",
"(",
"self",
",",
"'autoLeading'",
",",
"getattr",
"(",
"style",
",",
"'autoLeading'",
",",
"''",
")",
")",
"#work out the origin for line 1",
"leftIndent",
"=",
"style",
".",
"leftIndent",
"cur_x",
"=",
"leftIndent",
"if",
"debug",
":",
"bw",
"=",
"0.5",
"bc",
"=",
"Color",
"(",
"1",
",",
"1",
",",
"0",
")",
"bg",
"=",
"Color",
"(",
"0.9",
",",
"0.9",
",",
"0.9",
")",
"else",
":",
"bw",
"=",
"getattr",
"(",
"style",
",",
"'borderWidth'",
",",
"None",
")",
"bc",
"=",
"getattr",
"(",
"style",
",",
"'borderColor'",
",",
"None",
")",
"bg",
"=",
"style",
".",
"backColor",
"#if has a background or border, draw it",
"if",
"bg",
"or",
"(",
"bc",
"and",
"bw",
")",
":",
"canvas",
".",
"saveState",
"(",
")",
"op",
"=",
"canvas",
".",
"rect",
"kwds",
"=",
"dict",
"(",
"fill",
"=",
"0",
",",
"stroke",
"=",
"0",
")",
"if",
"bc",
"and",
"bw",
":",
"canvas",
".",
"setStrokeColor",
"(",
"bc",
")",
"canvas",
".",
"setLineWidth",
"(",
"bw",
")",
"kwds",
"[",
"'stroke'",
"]",
"=",
"1",
"br",
"=",
"getattr",
"(",
"style",
",",
"'borderRadius'",
",",
"0",
")",
"if",
"br",
"and",
"not",
"debug",
":",
"op",
"=",
"canvas",
".",
"roundRect",
"kwds",
"[",
"'radius'",
"]",
"=",
"br",
"if",
"bg",
":",
"canvas",
".",
"setFillColor",
"(",
"bg",
")",
"kwds",
"[",
"'fill'",
"]",
"=",
"1",
"bp",
"=",
"getattr",
"(",
"style",
",",
"'borderPadding'",
",",
"0",
")",
"op",
"(",
"leftIndent",
"-",
"bp",
",",
"-",
"bp",
",",
"self",
".",
"width",
"-",
"(",
"leftIndent",
"+",
"style",
".",
"rightIndent",
")",
"+",
"2",
"*",
"bp",
",",
"self",
".",
"height",
"+",
"2",
"*",
"bp",
",",
"*",
"*",
"kwds",
")",
"canvas",
".",
"restoreState",
"(",
")",
"nLines",
"=",
"len",
"(",
"lines",
")",
"bulletText",
"=",
"self",
".",
"bulletText",
"if",
"nLines",
">",
"0",
":",
"_offsets",
"=",
"getattr",
"(",
"self",
",",
"'_offsets'",
",",
"[",
"0",
"]",
")",
"_offsets",
"+=",
"(",
"nLines",
"-",
"len",
"(",
"_offsets",
")",
")",
"*",
"[",
"_offsets",
"[",
"-",
"1",
"]",
"]",
"canvas",
".",
"saveState",
"(",
")",
"alignment",
"=",
"style",
".",
"alignment",
"offset",
"=",
"style",
".",
"firstLineIndent",
"+",
"_offsets",
"[",
"0",
"]",
"lim",
"=",
"nLines",
"-",
"1",
"noJustifyLast",
"=",
"not",
"(",
"hasattr",
"(",
"self",
",",
"'_JustifyLast'",
")",
"and",
"self",
".",
"_JustifyLast",
")",
"if",
"blPara",
".",
"kind",
"==",
"0",
":",
"if",
"alignment",
"==",
"TA_LEFT",
":",
"dpl",
"=",
"_leftDrawParaLine",
"elif",
"alignment",
"==",
"TA_CENTER",
":",
"dpl",
"=",
"_centerDrawParaLine",
"elif",
"self",
".",
"style",
".",
"alignment",
"==",
"TA_RIGHT",
":",
"dpl",
"=",
"_rightDrawParaLine",
"elif",
"self",
".",
"style",
".",
"alignment",
"==",
"TA_JUSTIFY",
":",
"dpl",
"=",
"_justifyDrawParaLine",
"f",
"=",
"blPara",
"cur_y",
"=",
"self",
".",
"height",
"-",
"getattr",
"(",
"f",
",",
"'ascent'",
",",
"f",
".",
"fontSize",
")",
"# TODO fix XPreformatted to remove this hack",
"if",
"bulletText",
":",
"offset",
"=",
"_drawBullet",
"(",
"canvas",
",",
"offset",
",",
"cur_y",
",",
"bulletText",
",",
"style",
")",
"#set up the font etc.",
"canvas",
".",
"setFillColor",
"(",
"f",
".",
"textColor",
")",
"tx",
"=",
"self",
".",
"beginText",
"(",
"cur_x",
",",
"cur_y",
")",
"if",
"autoLeading",
"==",
"'max'",
":",
"leading",
"=",
"max",
"(",
"leading",
",",
"LEADING_FACTOR",
"*",
"f",
".",
"fontSize",
")",
"elif",
"autoLeading",
"==",
"'min'",
":",
"leading",
"=",
"LEADING_FACTOR",
"*",
"f",
".",
"fontSize",
"#now the font for the rest of the paragraph",
"tx",
".",
"setFont",
"(",
"f",
".",
"fontName",
",",
"f",
".",
"fontSize",
",",
"leading",
")",
"ws",
"=",
"getattr",
"(",
"tx",
",",
"'_wordSpace'",
",",
"0",
")",
"t_off",
"=",
"dpl",
"(",
"tx",
",",
"offset",
",",
"ws",
",",
"lines",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"noJustifyLast",
"and",
"nLines",
"==",
"1",
")",
"if",
"f",
".",
"underline",
"or",
"f",
".",
"link",
"or",
"f",
".",
"strike",
":",
"xs",
"=",
"tx",
".",
"XtraState",
"=",
"ABag",
"(",
")",
"xs",
".",
"cur_y",
"=",
"cur_y",
"xs",
".",
"f",
"=",
"f",
"xs",
".",
"style",
"=",
"style",
"xs",
".",
"lines",
"=",
"lines",
"xs",
".",
"underlines",
"=",
"[",
"]",
"xs",
".",
"underlineColor",
"=",
"None",
"# XXX Modified for XHTML2PDF",
"xs",
".",
"backgrounds",
"=",
"[",
"]",
"xs",
".",
"backgroundColor",
"=",
"None",
"xs",
".",
"backgroundFontSize",
"=",
"None",
"xs",
".",
"strikes",
"=",
"[",
"]",
"xs",
".",
"strikeColor",
"=",
"None",
"# XXX Modified for XHTML2PDF",
"xs",
".",
"strikeFontSize",
"=",
"None",
"xs",
".",
"links",
"=",
"[",
"]",
"xs",
".",
"link",
"=",
"f",
".",
"link",
"canvas",
".",
"setStrokeColor",
"(",
"f",
".",
"textColor",
")",
"dx",
"=",
"t_off",
"+",
"leftIndent",
"if",
"dpl",
"!=",
"_justifyDrawParaLine",
":",
"ws",
"=",
"0",
"# XXX Never underline!",
"underline",
"=",
"f",
".",
"underline",
"strike",
"=",
"f",
".",
"strike",
"link",
"=",
"f",
".",
"link",
"if",
"underline",
":",
"_do_under_line",
"(",
"0",
",",
"dx",
",",
"ws",
",",
"tx",
")",
"if",
"strike",
":",
"_do_under_line",
"(",
"0",
",",
"dx",
",",
"ws",
",",
"tx",
",",
"lm",
"=",
"0.125",
")",
"if",
"link",
":",
"_do_link_line",
"(",
"0",
",",
"dx",
",",
"ws",
",",
"tx",
")",
"#now the middle of the paragraph, aligned with the left margin which is our origin.",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"1",
",",
"nLines",
")",
":",
"ws",
"=",
"lines",
"[",
"i",
"]",
"[",
"0",
"]",
"t_off",
"=",
"dpl",
"(",
"tx",
",",
"_offsets",
"[",
"i",
"]",
",",
"ws",
",",
"lines",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"noJustifyLast",
"and",
"i",
"==",
"lim",
")",
"if",
"dpl",
"!=",
"_justifyDrawParaLine",
":",
"ws",
"=",
"0",
"if",
"underline",
":",
"_do_under_line",
"(",
"i",
",",
"t_off",
"+",
"leftIndent",
",",
"ws",
",",
"tx",
")",
"if",
"strike",
":",
"_do_under_line",
"(",
"i",
",",
"t_off",
"+",
"leftIndent",
",",
"ws",
",",
"tx",
",",
"lm",
"=",
"0.125",
")",
"if",
"link",
":",
"_do_link_line",
"(",
"i",
",",
"t_off",
"+",
"leftIndent",
",",
"ws",
",",
"tx",
")",
"else",
":",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"1",
",",
"nLines",
")",
":",
"dpl",
"(",
"tx",
",",
"_offsets",
"[",
"i",
"]",
",",
"lines",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"lines",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"noJustifyLast",
"and",
"i",
"==",
"lim",
")",
"else",
":",
"f",
"=",
"lines",
"[",
"0",
"]",
"cur_y",
"=",
"self",
".",
"height",
"-",
"getattr",
"(",
"f",
",",
"'ascent'",
",",
"f",
".",
"fontSize",
")",
"# TODO fix XPreformatted to remove this hack",
"# default?",
"dpl",
"=",
"_leftDrawParaLineX",
"if",
"bulletText",
":",
"offset",
"=",
"_drawBullet",
"(",
"canvas",
",",
"offset",
",",
"cur_y",
",",
"bulletText",
",",
"style",
")",
"if",
"alignment",
"==",
"TA_LEFT",
":",
"dpl",
"=",
"_leftDrawParaLineX",
"elif",
"alignment",
"==",
"TA_CENTER",
":",
"dpl",
"=",
"_centerDrawParaLineX",
"elif",
"self",
".",
"style",
".",
"alignment",
"==",
"TA_RIGHT",
":",
"dpl",
"=",
"_rightDrawParaLineX",
"elif",
"self",
".",
"style",
".",
"alignment",
"==",
"TA_JUSTIFY",
":",
"dpl",
"=",
"_justifyDrawParaLineX",
"else",
":",
"raise",
"ValueError",
"(",
"\"bad align %s\"",
"%",
"repr",
"(",
"alignment",
")",
")",
"#set up the font etc.",
"tx",
"=",
"self",
".",
"beginText",
"(",
"cur_x",
",",
"cur_y",
")",
"xs",
"=",
"tx",
".",
"XtraState",
"=",
"ABag",
"(",
")",
"xs",
".",
"textColor",
"=",
"None",
"# XXX Modified for XHTML2PDF",
"xs",
".",
"backColor",
"=",
"None",
"xs",
".",
"rise",
"=",
"0",
"xs",
".",
"underline",
"=",
"0",
"xs",
".",
"underlines",
"=",
"[",
"]",
"xs",
".",
"underlineColor",
"=",
"None",
"# XXX Modified for XHTML2PDF",
"xs",
".",
"background",
"=",
"0",
"xs",
".",
"backgrounds",
"=",
"[",
"]",
"xs",
".",
"backgroundColor",
"=",
"None",
"xs",
".",
"backgroundFontSize",
"=",
"None",
"xs",
".",
"strike",
"=",
"0",
"xs",
".",
"strikes",
"=",
"[",
"]",
"xs",
".",
"strikeColor",
"=",
"None",
"# XXX Modified for XHTML2PDF",
"xs",
".",
"strikeFontSize",
"=",
"None",
"xs",
".",
"links",
"=",
"[",
"]",
"xs",
".",
"link",
"=",
"None",
"xs",
".",
"leading",
"=",
"style",
".",
"leading",
"xs",
".",
"leftIndent",
"=",
"leftIndent",
"tx",
".",
"_leading",
"=",
"None",
"tx",
".",
"_olb",
"=",
"None",
"xs",
".",
"cur_y",
"=",
"cur_y",
"xs",
".",
"f",
"=",
"f",
"xs",
".",
"style",
"=",
"style",
"xs",
".",
"autoLeading",
"=",
"autoLeading",
"tx",
".",
"_fontname",
",",
"tx",
".",
"_fontsize",
"=",
"None",
",",
"None",
"dpl",
"(",
"tx",
",",
"offset",
",",
"lines",
"[",
"0",
"]",
",",
"noJustifyLast",
"and",
"nLines",
"==",
"1",
")",
"_do_post_text",
"(",
"tx",
")",
"#now the middle of the paragraph, aligned with the left margin which is our origin.",
"for",
"i",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"1",
",",
"nLines",
")",
":",
"f",
"=",
"lines",
"[",
"i",
"]",
"dpl",
"(",
"tx",
",",
"_offsets",
"[",
"i",
"]",
",",
"f",
",",
"noJustifyLast",
"and",
"i",
"==",
"lim",
")",
"_do_post_text",
"(",
"tx",
")",
"canvas",
".",
"drawText",
"(",
"tx",
")",
"canvas",
".",
"restoreState",
"(",
")"
] | Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite. | [
"Draws",
"a",
"paragraph",
"according",
"to",
"the",
"given",
"style",
".",
"Returns",
"the",
"final",
"y",
"position",
"at",
"the",
"bottom",
".",
"Not",
"safe",
"for",
"paragraphs",
"without",
"spaces",
"e",
".",
"g",
".",
"Japanese",
";",
"wrapping",
"algorithm",
"will",
"go",
"infinite",
"."
] | python | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/libpath.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/libpath.py#L13-L45 | def find_lib_path():
"""Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# make pythonpack hack: copy this directory one level upper for setup.py
dll_path = [curr_path, os.path.join(curr_path, '../../wrapper/'),
os.path.join(curr_path, './wrapper/')]
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))
else:
dll_path.append(os.path.join(curr_path, '../../windows/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/Release/'))
if os.name == 'nt':
dll_path = [os.path.join(p, 'xgboost_wrapper.dll') for p in dll_path]
else:
dll_path = [os.path.join(p, 'libxgboostwrapper.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
#From github issues, most of installation errors come from machines w/o compilers
if len(lib_path) == 0 and not os.environ.get('XGBOOST_BUILD_DOC', False):
raise XGBoostLibraryNotFound(
'Cannot find XGBoost Libarary in the candicate path, ' +
'did you install compilers and run build.sh in root path?\n'
'List of candidates:\n' + ('\n'.join(dll_path)))
return lib_path | [
"def",
"find_lib_path",
"(",
")",
":",
"curr_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"__file__",
")",
")",
")",
"# make pythonpack hack: copy this directory one level upper for setup.py",
"dll_path",
"=",
"[",
"curr_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../wrapper/'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./wrapper/'",
")",
"]",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"if",
"platform",
".",
"architecture",
"(",
")",
"[",
"0",
"]",
"==",
"'64bit'",
":",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../windows/x64/Release/'",
")",
")",
"# hack for pip installation when copy all parent source directory here",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./windows/x64/Release/'",
")",
")",
"else",
":",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'../../windows/Release/'",
")",
")",
"# hack for pip installation when copy all parent source directory here",
"dll_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"curr_path",
",",
"'./windows/Release/'",
")",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"dll_path",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"'xgboost_wrapper.dll'",
")",
"for",
"p",
"in",
"dll_path",
"]",
"else",
":",
"dll_path",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"'libxgboostwrapper.so'",
")",
"for",
"p",
"in",
"dll_path",
"]",
"lib_path",
"=",
"[",
"p",
"for",
"p",
"in",
"dll_path",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"p",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"p",
")",
"]",
"#From github issues, most of installation errors come from machines w/o compilers",
"if",
"len",
"(",
"lib_path",
")",
"==",
"0",
"and",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"'XGBOOST_BUILD_DOC'",
",",
"False",
")",
":",
"raise",
"XGBoostLibraryNotFound",
"(",
"'Cannot find XGBoost Libarary in the candicate path, '",
"+",
"'did you install compilers and run build.sh in root path?\\n'",
"'List of candidates:\\n'",
"+",
"(",
"'\\n'",
".",
"join",
"(",
"dll_path",
")",
")",
")",
"return",
"lib_path"
] | Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost | [
"Load",
"find",
"the",
"path",
"to",
"xgboost",
"dynamic",
"library",
"files",
"."
] | python | train |
theno/fabsetup | fabsetup/fabfile/setup/service/__init__.py | https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/service/__init__.py#L191-L233 | def vnc_raspi_osmc():
'''Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/
'''
print(blue('Install dependencies'))
install_packages([
'git',
'build-essential',
'rbp-userland-dev-osmc',
'libvncserver-dev',
'libconfig++-dev',
])
print(blue('Build vnc server for raspberry pi using dispmanx '
'(dispmanx_vnc)'))
checkup_git_repo_legacy(
url='https://github.com/patrikolausson/dispmanx_vnc.git')
run('mkdir -p ~/repos')
run('cd ~/repos/dispmanx_vnc && make')
print(blue('set up dispmanx_vnc as a service'))
with warn_only():
run('sudo systemctl stop dispmanx_vncserver.service')
username = env.user
builddir = flo('/home/{username}/repos/dispmanx_vnc')
run(flo('sudo cp {builddir}/dispmanx_vncserver /usr/bin'))
run('sudo chmod +x /usr/bin/dispmanx_vncserver')
fabfile_data_dir = FABFILE_DATA_DIR
put('{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf', '/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf')
put('{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service',
'/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.service '
'/etc/systemd/system/dispmanx_vncserver.service')
run('sudo systemctl start dispmanx_vncserver.service')
run('sudo systemctl enable dispmanx_vncserver.service')
run('sudo systemctl daemon-reload') | [
"def",
"vnc_raspi_osmc",
"(",
")",
":",
"print",
"(",
"blue",
"(",
"'Install dependencies'",
")",
")",
"install_packages",
"(",
"[",
"'git'",
",",
"'build-essential'",
",",
"'rbp-userland-dev-osmc'",
",",
"'libvncserver-dev'",
",",
"'libconfig++-dev'",
",",
"]",
")",
"print",
"(",
"blue",
"(",
"'Build vnc server for raspberry pi using dispmanx '",
"'(dispmanx_vnc)'",
")",
")",
"checkup_git_repo_legacy",
"(",
"url",
"=",
"'https://github.com/patrikolausson/dispmanx_vnc.git'",
")",
"run",
"(",
"'mkdir -p ~/repos'",
")",
"run",
"(",
"'cd ~/repos/dispmanx_vnc && make'",
")",
"print",
"(",
"blue",
"(",
"'set up dispmanx_vnc as a service'",
")",
")",
"with",
"warn_only",
"(",
")",
":",
"run",
"(",
"'sudo systemctl stop dispmanx_vncserver.service'",
")",
"username",
"=",
"env",
".",
"user",
"builddir",
"=",
"flo",
"(",
"'/home/{username}/repos/dispmanx_vnc'",
")",
"run",
"(",
"flo",
"(",
"'sudo cp {builddir}/dispmanx_vncserver /usr/bin'",
")",
")",
"run",
"(",
"'sudo chmod +x /usr/bin/dispmanx_vncserver'",
")",
"fabfile_data_dir",
"=",
"FABFILE_DATA_DIR",
"put",
"(",
"'{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf'",
",",
"'/tmp/'",
")",
"run",
"(",
"'sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf'",
")",
"put",
"(",
"'{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service'",
",",
"'/tmp/'",
")",
"run",
"(",
"'sudo mv /tmp/dispmanx_vncserver.service '",
"'/etc/systemd/system/dispmanx_vncserver.service'",
")",
"run",
"(",
"'sudo systemctl start dispmanx_vncserver.service'",
")",
"run",
"(",
"'sudo systemctl enable dispmanx_vncserver.service'",
")",
"run",
"(",
"'sudo systemctl daemon-reload'",
")"
] | Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/ | [
"Install",
"and",
"configure",
"dispmanx_vnc",
"server",
"on",
"osmc",
"(",
"raspberry",
"pi",
")",
"."
] | python | train |
roamanalytics/mittens | mittens/np_mittens.py | https://github.com/roamanalytics/mittens/blob/dbf0c3f8d18651475cf7e21ab1ceb824c5f89150/mittens/np_mittens.py#L176-L200 | def get_step(self, grad):
"""Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`.
"""
if self._momentum is None:
self._momentum = self.initial_accumulator_value * np.ones_like(grad)
self._momentum += grad ** 2
return self.learning_rate * grad / np.sqrt(self._momentum) | [
"def",
"get_step",
"(",
"self",
",",
"grad",
")",
":",
"if",
"self",
".",
"_momentum",
"is",
"None",
":",
"self",
".",
"_momentum",
"=",
"self",
".",
"initial_accumulator_value",
"*",
"np",
".",
"ones_like",
"(",
"grad",
")",
"self",
".",
"_momentum",
"+=",
"grad",
"**",
"2",
"return",
"self",
".",
"learning_rate",
"*",
"grad",
"/",
"np",
".",
"sqrt",
"(",
"self",
".",
"_momentum",
")"
] | Computes the 'step' to take for the next gradient descent update.
Returns the step rather than performing the update so that
parameters can be updated in place rather than overwritten.
Examples
--------
>>> gradient = # ...
>>> optimizer = AdaGradOptimizer(0.01)
>>> params -= optimizer.get_step(gradient)
Parameters
----------
grad
Returns
-------
np.array
Size matches `grad`. | [
"Computes",
"the",
"step",
"to",
"take",
"for",
"the",
"next",
"gradient",
"descent",
"update",
"."
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.