id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
249,500 | pavelsof/ipatok | ipatok/ipa.py | is_suprasegmental | def is_suprasegmental(char, strict=True):
"""
Check whether the character is a suprasegmental according to the IPA spec.
This includes tones, word accents, and length markers.
In strict mode return True only if the diacritic is part of the IPA spec.
"""
if (char in chart.suprasegmentals) or (char in chart.lengths):
return True
return is_tone(char, strict) | python | def is_suprasegmental(char, strict=True):
"""
Check whether the character is a suprasegmental according to the IPA spec.
This includes tones, word accents, and length markers.
In strict mode return True only if the diacritic is part of the IPA spec.
"""
if (char in chart.suprasegmentals) or (char in chart.lengths):
return True
return is_tone(char, strict) | [
"def",
"is_suprasegmental",
"(",
"char",
",",
"strict",
"=",
"True",
")",
":",
"if",
"(",
"char",
"in",
"chart",
".",
"suprasegmentals",
")",
"or",
"(",
"char",
"in",
"chart",
".",
"lengths",
")",
":",
"return",
"True",
"return",
"is_tone",
"(",
"char",
",",
"strict",
")"
] | Check whether the character is a suprasegmental according to the IPA spec.
This includes tones, word accents, and length markers.
In strict mode return True only if the diacritic is part of the IPA spec. | [
"Check",
"whether",
"the",
"character",
"is",
"a",
"suprasegmental",
"according",
"to",
"the",
"IPA",
"spec",
".",
"This",
"includes",
"tones",
"word",
"accents",
"and",
"length",
"markers",
"."
] | fde3c334b8573315fd1073f14341b71f50f7f006 | https://github.com/pavelsof/ipatok/blob/fde3c334b8573315fd1073f14341b71f50f7f006/ipatok/ipa.py#L152-L162 |
249,501 | pavelsof/ipatok | ipatok/ipa.py | replace_substitutes | def replace_substitutes(string):
"""
Return the given string with all known common substitutes replaced with
their IPA-compliant counterparts.
"""
for non_ipa, ipa in chart.replacements.items():
string = string.replace(non_ipa, ipa)
return string | python | def replace_substitutes(string):
"""
Return the given string with all known common substitutes replaced with
their IPA-compliant counterparts.
"""
for non_ipa, ipa in chart.replacements.items():
string = string.replace(non_ipa, ipa)
return string | [
"def",
"replace_substitutes",
"(",
"string",
")",
":",
"for",
"non_ipa",
",",
"ipa",
"in",
"chart",
".",
"replacements",
".",
"items",
"(",
")",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"non_ipa",
",",
"ipa",
")",
"return",
"string"
] | Return the given string with all known common substitutes replaced with
their IPA-compliant counterparts. | [
"Return",
"the",
"given",
"string",
"with",
"all",
"known",
"common",
"substitutes",
"replaced",
"with",
"their",
"IPA",
"-",
"compliant",
"counterparts",
"."
] | fde3c334b8573315fd1073f14341b71f50f7f006 | https://github.com/pavelsof/ipatok/blob/fde3c334b8573315fd1073f14341b71f50f7f006/ipatok/ipa.py#L203-L211 |
249,502 | pavelsof/ipatok | ipatok/ipa.py | Chart.load_ipa | def load_ipa(self, file_path):
"""
Populate the instance's set properties using the specified file.
"""
sections = {
'# consonants (pulmonic)': self.consonants,
'# consonants (non-pulmonic)': self.consonants,
'# other symbols': self.consonants,
'# tie bars': self.tie_bars,
'# vowels': self.vowels,
'# diacritics': self.diacritics,
'# suprasegmentals': self.suprasegmentals,
'# lengths': self.lengths,
'# tones and word accents': self.tones }
curr_section = None
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line.startswith('#'):
if line in sections:
curr_section = sections[line]
else:
curr_section = None
elif line:
if curr_section is not None:
curr_section.add(line.split('\t')[0]) | python | def load_ipa(self, file_path):
"""
Populate the instance's set properties using the specified file.
"""
sections = {
'# consonants (pulmonic)': self.consonants,
'# consonants (non-pulmonic)': self.consonants,
'# other symbols': self.consonants,
'# tie bars': self.tie_bars,
'# vowels': self.vowels,
'# diacritics': self.diacritics,
'# suprasegmentals': self.suprasegmentals,
'# lengths': self.lengths,
'# tones and word accents': self.tones }
curr_section = None
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line.startswith('#'):
if line in sections:
curr_section = sections[line]
else:
curr_section = None
elif line:
if curr_section is not None:
curr_section.add(line.split('\t')[0]) | [
"def",
"load_ipa",
"(",
"self",
",",
"file_path",
")",
":",
"sections",
"=",
"{",
"'# consonants (pulmonic)'",
":",
"self",
".",
"consonants",
",",
"'# consonants (non-pulmonic)'",
":",
"self",
".",
"consonants",
",",
"'# other symbols'",
":",
"self",
".",
"consonants",
",",
"'# tie bars'",
":",
"self",
".",
"tie_bars",
",",
"'# vowels'",
":",
"self",
".",
"vowels",
",",
"'# diacritics'",
":",
"self",
".",
"diacritics",
",",
"'# suprasegmentals'",
":",
"self",
".",
"suprasegmentals",
",",
"'# lengths'",
":",
"self",
".",
"lengths",
",",
"'# tones and word accents'",
":",
"self",
".",
"tones",
"}",
"curr_section",
"=",
"None",
"with",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
",",
"f",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"if",
"line",
"in",
"sections",
":",
"curr_section",
"=",
"sections",
"[",
"line",
"]",
"else",
":",
"curr_section",
"=",
"None",
"elif",
"line",
":",
"if",
"curr_section",
"is",
"not",
"None",
":",
"curr_section",
".",
"add",
"(",
"line",
".",
"split",
"(",
"'\\t'",
")",
"[",
"0",
"]",
")"
] | Populate the instance's set properties using the specified file. | [
"Populate",
"the",
"instance",
"s",
"set",
"properties",
"using",
"the",
"specified",
"file",
"."
] | fde3c334b8573315fd1073f14341b71f50f7f006 | https://github.com/pavelsof/ipatok/blob/fde3c334b8573315fd1073f14341b71f50f7f006/ipatok/ipa.py#L39-L65 |
249,503 | pavelsof/ipatok | ipatok/ipa.py | Chart.load_replacements | def load_replacements(self, file_path):
"""
Populate self.replacements using the specified file.
"""
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line:
line = line.split('\t')
self.replacements[line[0]] = line[1] | python | def load_replacements(self, file_path):
"""
Populate self.replacements using the specified file.
"""
with open(file_path, encoding='utf-8') as f:
for line in map(lambda x: x.strip(), f):
if line:
line = line.split('\t')
self.replacements[line[0]] = line[1] | [
"def",
"load_replacements",
"(",
"self",
",",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
",",
"f",
")",
":",
"if",
"line",
":",
"line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"self",
".",
"replacements",
"[",
"line",
"[",
"0",
"]",
"]",
"=",
"line",
"[",
"1",
"]"
] | Populate self.replacements using the specified file. | [
"Populate",
"self",
".",
"replacements",
"using",
"the",
"specified",
"file",
"."
] | fde3c334b8573315fd1073f14341b71f50f7f006 | https://github.com/pavelsof/ipatok/blob/fde3c334b8573315fd1073f14341b71f50f7f006/ipatok/ipa.py#L67-L75 |
249,504 | pmichali/whodunit | whodunit/__init__.py | sort_by_name | def sort_by_name(names):
"""Sort by last name, uniquely."""
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key) | python | def sort_by_name(names):
"""Sort by last name, uniquely."""
def last_name_key(full_name):
parts = full_name.split(' ')
if len(parts) == 1:
return full_name.upper()
last_first = parts[-1] + ' ' + ' '.join(parts[:-1])
return last_first.upper()
return sorted(set(names), key=last_name_key) | [
"def",
"sort_by_name",
"(",
"names",
")",
":",
"def",
"last_name_key",
"(",
"full_name",
")",
":",
"parts",
"=",
"full_name",
".",
"split",
"(",
"' '",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"return",
"full_name",
".",
"upper",
"(",
")",
"last_first",
"=",
"parts",
"[",
"-",
"1",
"]",
"+",
"' '",
"+",
"' '",
".",
"join",
"(",
"parts",
"[",
":",
"-",
"1",
"]",
")",
"return",
"last_first",
".",
"upper",
"(",
")",
"return",
"sorted",
"(",
"set",
"(",
"names",
")",
",",
"key",
"=",
"last_name_key",
")"
] | Sort by last name, uniquely. | [
"Sort",
"by",
"last",
"name",
"uniquely",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L467-L477 |
249,505 | pmichali/whodunit | whodunit/__init__.py | BlameRecord.store_attribute | def store_attribute(self, key, value):
"""Store blame info we are interested in."""
if key == 'summary' or key == 'filename' or key == 'previous':
return
attr = key.replace('-', '_')
if key.endswith('-time'):
value = int(value)
setattr(self, attr, value) | python | def store_attribute(self, key, value):
"""Store blame info we are interested in."""
if key == 'summary' or key == 'filename' or key == 'previous':
return
attr = key.replace('-', '_')
if key.endswith('-time'):
value = int(value)
setattr(self, attr, value) | [
"def",
"store_attribute",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"==",
"'summary'",
"or",
"key",
"==",
"'filename'",
"or",
"key",
"==",
"'previous'",
":",
"return",
"attr",
"=",
"key",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"if",
"key",
".",
"endswith",
"(",
"'-time'",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"setattr",
"(",
"self",
",",
"attr",
",",
"value",
")"
] | Store blame info we are interested in. | [
"Store",
"blame",
"info",
"we",
"are",
"interested",
"in",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L111-L118 |
249,506 | pmichali/whodunit | whodunit/__init__.py | Owners.is_git_file | def is_git_file(cls, path, name):
"""Determine if file is known by git."""
os.chdir(path)
p = subprocess.Popen(['git', 'ls-files', '--error-unmatch', name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
return p.returncode == 0 | python | def is_git_file(cls, path, name):
"""Determine if file is known by git."""
os.chdir(path)
p = subprocess.Popen(['git', 'ls-files', '--error-unmatch', name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
return p.returncode == 0 | [
"def",
"is_git_file",
"(",
"cls",
",",
"path",
",",
"name",
")",
":",
"os",
".",
"chdir",
"(",
"path",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"'git'",
",",
"'ls-files'",
",",
"'--error-unmatch'",
",",
"name",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"p",
".",
"wait",
"(",
")",
"return",
"p",
".",
"returncode",
"==",
"0"
] | Determine if file is known by git. | [
"Determine",
"if",
"file",
"is",
"known",
"by",
"git",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L175-L181 |
249,507 | pmichali/whodunit | whodunit/__init__.py | Owners.collect_modules | def collect_modules(self):
"""Generator to look for git files in tree. Will handle all lines."""
for path, dirlist, filelist in os.walk(self.root):
for name in fnmatch.filter(filelist, self.filter):
if self.is_git_file(path, name):
yield (os.path.join(path, name), []) | python | def collect_modules(self):
"""Generator to look for git files in tree. Will handle all lines."""
for path, dirlist, filelist in os.walk(self.root):
for name in fnmatch.filter(filelist, self.filter):
if self.is_git_file(path, name):
yield (os.path.join(path, name), []) | [
"def",
"collect_modules",
"(",
"self",
")",
":",
"for",
"path",
",",
"dirlist",
",",
"filelist",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"root",
")",
":",
"for",
"name",
"in",
"fnmatch",
".",
"filter",
"(",
"filelist",
",",
"self",
".",
"filter",
")",
":",
"if",
"self",
".",
"is_git_file",
"(",
"path",
",",
"name",
")",
":",
"yield",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"name",
")",
",",
"[",
"]",
")"
] | Generator to look for git files in tree. Will handle all lines. | [
"Generator",
"to",
"look",
"for",
"git",
"files",
"in",
"tree",
".",
"Will",
"handle",
"all",
"lines",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L183-L188 |
249,508 | pmichali/whodunit | whodunit/__init__.py | Owners.collect_blame_info | def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
"""
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out | python | def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
"""
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out | [
"def",
"collect_blame_info",
"(",
"cls",
",",
"matches",
")",
":",
"old_area",
"=",
"None",
"for",
"filename",
",",
"ranges",
"in",
"matches",
":",
"area",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"if",
"not",
"area",
":",
"area",
"=",
"'.'",
"if",
"area",
"!=",
"old_area",
":",
"print",
"(",
"\"\\n\\n%s/\\n\"",
"%",
"area",
")",
"old_area",
"=",
"area",
"print",
"(",
"\"%s \"",
"%",
"name",
",",
"end",
"=",
"\"\"",
")",
"filter",
"=",
"cls",
".",
"build_line_range_filter",
"(",
"ranges",
")",
"command",
"=",
"[",
"'git'",
",",
"'blame'",
",",
"'--line-porcelain'",
"]",
"+",
"filter",
"+",
"[",
"name",
"]",
"os",
".",
"chdir",
"(",
"area",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"err",
":",
"print",
"(",
"\" <<<<<<<<<< Unable to collect 'git blame' info:\"",
",",
"err",
")",
"else",
":",
"yield",
"out"
] | Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines. | [
"Runs",
"git",
"blame",
"on",
"files",
"for",
"the",
"specified",
"sets",
"of",
"line",
"ranges",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L195-L218 |
249,509 | pmichali/whodunit | whodunit/__init__.py | Owners.unique_authors | def unique_authors(self, limit):
"""Unique list of authors, but preserving order."""
seen = set()
if limit == 0:
limit = None
seen_add = seen.add # Assign to variable, so not resolved each time
return [x.author for x in self.sorted_commits[:limit]
if not (x.author in seen or seen_add(x.author))] | python | def unique_authors(self, limit):
"""Unique list of authors, but preserving order."""
seen = set()
if limit == 0:
limit = None
seen_add = seen.add # Assign to variable, so not resolved each time
return [x.author for x in self.sorted_commits[:limit]
if not (x.author in seen or seen_add(x.author))] | [
"def",
"unique_authors",
"(",
"self",
",",
"limit",
")",
":",
"seen",
"=",
"set",
"(",
")",
"if",
"limit",
"==",
"0",
":",
"limit",
"=",
"None",
"seen_add",
"=",
"seen",
".",
"add",
"# Assign to variable, so not resolved each time",
"return",
"[",
"x",
".",
"author",
"for",
"x",
"in",
"self",
".",
"sorted_commits",
"[",
":",
"limit",
"]",
"if",
"not",
"(",
"x",
".",
"author",
"in",
"seen",
"or",
"seen_add",
"(",
"x",
".",
"author",
")",
")",
"]"
] | Unique list of authors, but preserving order. | [
"Unique",
"list",
"of",
"authors",
"but",
"preserving",
"order",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L247-L254 |
249,510 | pmichali/whodunit | whodunit/__init__.py | Owners.show | def show(self, commit):
"""Display one commit line.
The output will be:
<uuid> <#lines> <author> <short-commit-date>
If verbose flag set, the output will be:
<uuid> <#lines> <author+email> <long-date> <committer+email>
"""
author = commit.author
author_width = 25
committer = ''
commit_date = date_to_str(commit.committer_time, commit.committer_tz,
self.verbose)
if self.verbose:
author += " %s" % commit.author_mail
author_width = 50
committer = " %s %s" % (commit.committer, commit.committer_mail)
return " {} {:>5d} {:{}s} {}{}".format(
commit.uuid[:8], commit.line_count, author, author_width,
commit_date, committer) | python | def show(self, commit):
"""Display one commit line.
The output will be:
<uuid> <#lines> <author> <short-commit-date>
If verbose flag set, the output will be:
<uuid> <#lines> <author+email> <long-date> <committer+email>
"""
author = commit.author
author_width = 25
committer = ''
commit_date = date_to_str(commit.committer_time, commit.committer_tz,
self.verbose)
if self.verbose:
author += " %s" % commit.author_mail
author_width = 50
committer = " %s %s" % (commit.committer, commit.committer_mail)
return " {} {:>5d} {:{}s} {}{}".format(
commit.uuid[:8], commit.line_count, author, author_width,
commit_date, committer) | [
"def",
"show",
"(",
"self",
",",
"commit",
")",
":",
"author",
"=",
"commit",
".",
"author",
"author_width",
"=",
"25",
"committer",
"=",
"''",
"commit_date",
"=",
"date_to_str",
"(",
"commit",
".",
"committer_time",
",",
"commit",
".",
"committer_tz",
",",
"self",
".",
"verbose",
")",
"if",
"self",
".",
"verbose",
":",
"author",
"+=",
"\" %s\"",
"%",
"commit",
".",
"author_mail",
"author_width",
"=",
"50",
"committer",
"=",
"\" %s %s\"",
"%",
"(",
"commit",
".",
"committer",
",",
"commit",
".",
"committer_mail",
")",
"return",
"\" {} {:>5d} {:{}s} {}{}\"",
".",
"format",
"(",
"commit",
".",
"uuid",
"[",
":",
"8",
"]",
",",
"commit",
".",
"line_count",
",",
"author",
",",
"author_width",
",",
"commit_date",
",",
"committer",
")"
] | Display one commit line.
The output will be:
<uuid> <#lines> <author> <short-commit-date>
If verbose flag set, the output will be:
<uuid> <#lines> <author+email> <long-date> <committer+email> | [
"Display",
"one",
"commit",
"line",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L262-L282 |
249,511 | pmichali/whodunit | whodunit/__init__.py | SizeOwners.merge_user_commits | def merge_user_commits(cls, commits):
"""Merge all the commits for the user.
Aggregate line counts, and use the most recent commit (by date/time)
as the representative commit for the user.
"""
user = None
for commit in commits:
if not user:
user = commit
else:
if commit.committer_time > user.committer_time:
commit.line_count += user.line_count
user = commit
else:
user.line_count += commit.line_count
return user | python | def merge_user_commits(cls, commits):
"""Merge all the commits for the user.
Aggregate line counts, and use the most recent commit (by date/time)
as the representative commit for the user.
"""
user = None
for commit in commits:
if not user:
user = commit
else:
if commit.committer_time > user.committer_time:
commit.line_count += user.line_count
user = commit
else:
user.line_count += commit.line_count
return user | [
"def",
"merge_user_commits",
"(",
"cls",
",",
"commits",
")",
":",
"user",
"=",
"None",
"for",
"commit",
"in",
"commits",
":",
"if",
"not",
"user",
":",
"user",
"=",
"commit",
"else",
":",
"if",
"commit",
".",
"committer_time",
">",
"user",
".",
"committer_time",
":",
"commit",
".",
"line_count",
"+=",
"user",
".",
"line_count",
"user",
"=",
"commit",
"else",
":",
"user",
".",
"line_count",
"+=",
"commit",
".",
"line_count",
"return",
"user"
] | Merge all the commits for the user.
Aggregate line counts, and use the most recent commit (by date/time)
as the representative commit for the user. | [
"Merge",
"all",
"the",
"commits",
"for",
"the",
"user",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L288-L304 |
249,512 | pmichali/whodunit | whodunit/__init__.py | SizeOwners.sort | def sort(self):
"""Sort by commit size, per author."""
# First sort commits by author email
users = []
# Group commits by author email, so they can be merged
for _, group in itertools.groupby(sorted(self.commits),
operator.attrgetter('author_mail')):
if group:
users.append(self.merge_user_commits(group))
# Finally sort by the (aggregated) commits' line counts
self.sorted_commits = sorted(users,
key=operator.attrgetter('line_count'),
reverse=True)
return self.sorted_commits | python | def sort(self):
"""Sort by commit size, per author."""
# First sort commits by author email
users = []
# Group commits by author email, so they can be merged
for _, group in itertools.groupby(sorted(self.commits),
operator.attrgetter('author_mail')):
if group:
users.append(self.merge_user_commits(group))
# Finally sort by the (aggregated) commits' line counts
self.sorted_commits = sorted(users,
key=operator.attrgetter('line_count'),
reverse=True)
return self.sorted_commits | [
"def",
"sort",
"(",
"self",
")",
":",
"# First sort commits by author email",
"users",
"=",
"[",
"]",
"# Group commits by author email, so they can be merged",
"for",
"_",
",",
"group",
"in",
"itertools",
".",
"groupby",
"(",
"sorted",
"(",
"self",
".",
"commits",
")",
",",
"operator",
".",
"attrgetter",
"(",
"'author_mail'",
")",
")",
":",
"if",
"group",
":",
"users",
".",
"append",
"(",
"self",
".",
"merge_user_commits",
"(",
"group",
")",
")",
"# Finally sort by the (aggregated) commits' line counts",
"self",
".",
"sorted_commits",
"=",
"sorted",
"(",
"users",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"'line_count'",
")",
",",
"reverse",
"=",
"True",
")",
"return",
"self",
".",
"sorted_commits"
] | Sort by commit size, per author. | [
"Sort",
"by",
"commit",
"size",
"per",
"author",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L306-L319 |
249,513 | pmichali/whodunit | whodunit/__init__.py | CoverageOwners.make_ranges | def make_ranges(cls, lines):
"""Convert list of lines into list of line range tuples.
Only will be called if there is one or more entries in the list. Single
lines, will be coverted into tuple with same line.
"""
start_line = last_line = lines.pop(0)
ranges = []
for line in lines:
if line == (last_line + 1):
last_line = line
else:
ranges.append((start_line, last_line))
start_line = line
last_line = line
ranges.append((start_line, last_line))
return ranges | python | def make_ranges(cls, lines):
"""Convert list of lines into list of line range tuples.
Only will be called if there is one or more entries in the list. Single
lines, will be coverted into tuple with same line.
"""
start_line = last_line = lines.pop(0)
ranges = []
for line in lines:
if line == (last_line + 1):
last_line = line
else:
ranges.append((start_line, last_line))
start_line = line
last_line = line
ranges.append((start_line, last_line))
return ranges | [
"def",
"make_ranges",
"(",
"cls",
",",
"lines",
")",
":",
"start_line",
"=",
"last_line",
"=",
"lines",
".",
"pop",
"(",
"0",
")",
"ranges",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
"==",
"(",
"last_line",
"+",
"1",
")",
":",
"last_line",
"=",
"line",
"else",
":",
"ranges",
".",
"append",
"(",
"(",
"start_line",
",",
"last_line",
")",
")",
"start_line",
"=",
"line",
"last_line",
"=",
"line",
"ranges",
".",
"append",
"(",
"(",
"start_line",
",",
"last_line",
")",
")",
"return",
"ranges"
] | Convert list of lines into list of line range tuples.
Only will be called if there is one or more entries in the list. Single
lines, will be coverted into tuple with same line. | [
"Convert",
"list",
"of",
"lines",
"into",
"list",
"of",
"line",
"range",
"tuples",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L340-L356 |
249,514 | pmichali/whodunit | whodunit/__init__.py | CoverageOwners.determine_coverage | def determine_coverage(cls, coverage_file):
"""Scan the summary section of report looking for coverage data.
Will see CSS class with "stm mis" (missing coverage), or "stm par"
(partial coverage), and can extract line number. Will get file name
from title tag.
"""
lines = []
source_file = 'ERROR'
for line in coverage_file:
m = title_re.match(line)
if m:
if m.group(2) == '100':
return ('', [])
source_file = m.group(1)
continue
m = source_re.match(line)
if m:
lines.append(int(m.group(1)))
continue
if end_re.match(line):
break
line_ranges = cls.make_ranges(lines)
return (source_file, line_ranges) | python | def determine_coverage(cls, coverage_file):
"""Scan the summary section of report looking for coverage data.
Will see CSS class with "stm mis" (missing coverage), or "stm par"
(partial coverage), and can extract line number. Will get file name
from title tag.
"""
lines = []
source_file = 'ERROR'
for line in coverage_file:
m = title_re.match(line)
if m:
if m.group(2) == '100':
return ('', [])
source_file = m.group(1)
continue
m = source_re.match(line)
if m:
lines.append(int(m.group(1)))
continue
if end_re.match(line):
break
line_ranges = cls.make_ranges(lines)
return (source_file, line_ranges) | [
"def",
"determine_coverage",
"(",
"cls",
",",
"coverage_file",
")",
":",
"lines",
"=",
"[",
"]",
"source_file",
"=",
"'ERROR'",
"for",
"line",
"in",
"coverage_file",
":",
"m",
"=",
"title_re",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"if",
"m",
".",
"group",
"(",
"2",
")",
"==",
"'100'",
":",
"return",
"(",
"''",
",",
"[",
"]",
")",
"source_file",
"=",
"m",
".",
"group",
"(",
"1",
")",
"continue",
"m",
"=",
"source_re",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"lines",
".",
"append",
"(",
"int",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
")",
"continue",
"if",
"end_re",
".",
"match",
"(",
"line",
")",
":",
"break",
"line_ranges",
"=",
"cls",
".",
"make_ranges",
"(",
"lines",
")",
"return",
"(",
"source_file",
",",
"line_ranges",
")"
] | Scan the summary section of report looking for coverage data.
Will see CSS class with "stm mis" (missing coverage), or "stm par"
(partial coverage), and can extract line number. Will get file name
from title tag. | [
"Scan",
"the",
"summary",
"section",
"of",
"report",
"looking",
"for",
"coverage",
"data",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L359-L382 |
249,515 | pmichali/whodunit | whodunit/__init__.py | CoverageOwners.collect_modules | def collect_modules(self):
"""Generator to obtain lines of interest from coverage report files.
Will verify that the source file is within the project tree, relative
to the coverage directory.
"""
coverage_dir = os.path.join(self.root, 'cover')
for name in fnmatch.filter(os.listdir(coverage_dir), "*.html"):
if name == 'index.html':
continue
with open(os.path.join(coverage_dir, name)) as cover_file:
src_file, line_ranges = self.determine_coverage(cover_file)
if not src_file:
continue
src_file = os.path.abspath(os.path.join(self.root, src_file))
if os.path.isfile(src_file):
yield (src_file, line_ranges)
else:
raise SourceNotFound(
"Source file %(file)s not found at %(area)s" %
{'file': os.path.basename(src_file),
'area': os.path.dirname(src_file)}) | python | def collect_modules(self):
"""Generator to obtain lines of interest from coverage report files.
Will verify that the source file is within the project tree, relative
to the coverage directory.
"""
coverage_dir = os.path.join(self.root, 'cover')
for name in fnmatch.filter(os.listdir(coverage_dir), "*.html"):
if name == 'index.html':
continue
with open(os.path.join(coverage_dir, name)) as cover_file:
src_file, line_ranges = self.determine_coverage(cover_file)
if not src_file:
continue
src_file = os.path.abspath(os.path.join(self.root, src_file))
if os.path.isfile(src_file):
yield (src_file, line_ranges)
else:
raise SourceNotFound(
"Source file %(file)s not found at %(area)s" %
{'file': os.path.basename(src_file),
'area': os.path.dirname(src_file)}) | [
"def",
"collect_modules",
"(",
"self",
")",
":",
"coverage_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"'cover'",
")",
"for",
"name",
"in",
"fnmatch",
".",
"filter",
"(",
"os",
".",
"listdir",
"(",
"coverage_dir",
")",
",",
"\"*.html\"",
")",
":",
"if",
"name",
"==",
"'index.html'",
":",
"continue",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"coverage_dir",
",",
"name",
")",
")",
"as",
"cover_file",
":",
"src_file",
",",
"line_ranges",
"=",
"self",
".",
"determine_coverage",
"(",
"cover_file",
")",
"if",
"not",
"src_file",
":",
"continue",
"src_file",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"root",
",",
"src_file",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"src_file",
")",
":",
"yield",
"(",
"src_file",
",",
"line_ranges",
")",
"else",
":",
"raise",
"SourceNotFound",
"(",
"\"Source file %(file)s not found at %(area)s\"",
"%",
"{",
"'file'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"src_file",
")",
",",
"'area'",
":",
"os",
".",
"path",
".",
"dirname",
"(",
"src_file",
")",
"}",
")"
] | Generator to obtain lines of interest from coverage report files.
Will verify that the source file is within the project tree, relative
to the coverage directory. | [
"Generator",
"to",
"obtain",
"lines",
"of",
"interest",
"from",
"coverage",
"report",
"files",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L384-L405 |
249,516 | pmichali/whodunit | whodunit/__init__.py | CoverageOwners.sort | def sort(self):
"""Consolidate adjacent lines, if same commit ID.
Will modify line number to be a range, when two or more lines with the
same commit ID.
"""
self.sorted_commits = []
if not self.commits:
return self.sorted_commits
prev_commit = self.commits.pop(0)
prev_line = prev_commit.line_number
prev_uuid = prev_commit.uuid
for commit in self.commits:
if (commit.uuid != prev_uuid or
commit.line_number != (prev_line + 1)):
prev_commit.lines = self.line_range(prev_commit.line_number,
prev_line)
self.sorted_commits.append(prev_commit)
prev_commit = commit
prev_line = commit.line_number
prev_uuid = commit.uuid
# Take care of last commit
prev_commit.lines = self.line_range(prev_commit.line_number, prev_line)
self.sorted_commits.append(prev_commit)
return self.sorted_commits | python | def sort(self):
"""Consolidate adjacent lines, if same commit ID.
Will modify line number to be a range, when two or more lines with the
same commit ID.
"""
self.sorted_commits = []
if not self.commits:
return self.sorted_commits
prev_commit = self.commits.pop(0)
prev_line = prev_commit.line_number
prev_uuid = prev_commit.uuid
for commit in self.commits:
if (commit.uuid != prev_uuid or
commit.line_number != (prev_line + 1)):
prev_commit.lines = self.line_range(prev_commit.line_number,
prev_line)
self.sorted_commits.append(prev_commit)
prev_commit = commit
prev_line = commit.line_number
prev_uuid = commit.uuid
# Take care of last commit
prev_commit.lines = self.line_range(prev_commit.line_number, prev_line)
self.sorted_commits.append(prev_commit)
return self.sorted_commits | [
"def",
"sort",
"(",
"self",
")",
":",
"self",
".",
"sorted_commits",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"commits",
":",
"return",
"self",
".",
"sorted_commits",
"prev_commit",
"=",
"self",
".",
"commits",
".",
"pop",
"(",
"0",
")",
"prev_line",
"=",
"prev_commit",
".",
"line_number",
"prev_uuid",
"=",
"prev_commit",
".",
"uuid",
"for",
"commit",
"in",
"self",
".",
"commits",
":",
"if",
"(",
"commit",
".",
"uuid",
"!=",
"prev_uuid",
"or",
"commit",
".",
"line_number",
"!=",
"(",
"prev_line",
"+",
"1",
")",
")",
":",
"prev_commit",
".",
"lines",
"=",
"self",
".",
"line_range",
"(",
"prev_commit",
".",
"line_number",
",",
"prev_line",
")",
"self",
".",
"sorted_commits",
".",
"append",
"(",
"prev_commit",
")",
"prev_commit",
"=",
"commit",
"prev_line",
"=",
"commit",
".",
"line_number",
"prev_uuid",
"=",
"commit",
".",
"uuid",
"# Take care of last commit",
"prev_commit",
".",
"lines",
"=",
"self",
".",
"line_range",
"(",
"prev_commit",
".",
"line_number",
",",
"prev_line",
")",
"self",
".",
"sorted_commits",
".",
"append",
"(",
"prev_commit",
")",
"return",
"self",
".",
"sorted_commits"
] | Consolidate adjacent lines, if same commit ID.
Will modify line number to be a range, when two or more lines with the
same commit ID. | [
"Consolidate",
"adjacent",
"lines",
"if",
"same",
"commit",
"ID",
"."
] | eed9107533766d716469e35fbb647a39dfa07035 | https://github.com/pmichali/whodunit/blob/eed9107533766d716469e35fbb647a39dfa07035/whodunit/__init__.py#L414-L438 |
249,517 | monkeython/scriba | scriba/schemes/scriba_ftps.py | write | def write(url, content, **args):
"""Put an object into a ftps URL."""
with FTPSResource(url, **args) as resource:
resource.write(content) | python | def write(url, content, **args):
"""Put an object into a ftps URL."""
with FTPSResource(url, **args) as resource:
resource.write(content) | [
"def",
"write",
"(",
"url",
",",
"content",
",",
"*",
"*",
"args",
")",
":",
"with",
"FTPSResource",
"(",
"url",
",",
"*",
"*",
"args",
")",
"as",
"resource",
":",
"resource",
".",
"write",
"(",
"content",
")"
] | Put an object into a ftps URL. | [
"Put",
"an",
"object",
"into",
"a",
"ftps",
"URL",
"."
] | fb8e7636ed07c3d035433fdd153599ac8b24dfc4 | https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/schemes/scriba_ftps.py#L27-L30 |
249,518 | spookey/photon | photon/util/structures.py | yaml_str_join | def yaml_str_join(l, n):
'''
YAML loader to join strings
The keywords are as following:
* `hostname`: Your hostname (from :func:`util.system.get_hostname`)
* `timestamp`: Current timestamp (from :func:`util.system.get_timestamp`)
:returns:
A `non character` joined string |yaml_loader_returns|
.. note::
Be careful with timestamps when using a `config` in :ref:`settings`.
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.system import get_hostname, get_timestamp
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq == 'hostname':
s[num] = '%s' % (get_hostname())
elif seq == 'timestamp':
s[num] = '%s' % (get_timestamp())
return ''.join([str(i) for i in s]) | python | def yaml_str_join(l, n):
'''
YAML loader to join strings
The keywords are as following:
* `hostname`: Your hostname (from :func:`util.system.get_hostname`)
* `timestamp`: Current timestamp (from :func:`util.system.get_timestamp`)
:returns:
A `non character` joined string |yaml_loader_returns|
.. note::
Be careful with timestamps when using a `config` in :ref:`settings`.
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.system import get_hostname, get_timestamp
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq == 'hostname':
s[num] = '%s' % (get_hostname())
elif seq == 'timestamp':
s[num] = '%s' % (get_timestamp())
return ''.join([str(i) for i in s]) | [
"def",
"yaml_str_join",
"(",
"l",
",",
"n",
")",
":",
"from",
"photon",
".",
"util",
".",
"system",
"import",
"get_hostname",
",",
"get_timestamp",
"s",
"=",
"l",
".",
"construct_sequence",
"(",
"n",
")",
"for",
"num",
",",
"seq",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"seq",
"==",
"'hostname'",
":",
"s",
"[",
"num",
"]",
"=",
"'%s'",
"%",
"(",
"get_hostname",
"(",
")",
")",
"elif",
"seq",
"==",
"'timestamp'",
":",
"s",
"[",
"num",
"]",
"=",
"'%s'",
"%",
"(",
"get_timestamp",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"s",
"]",
")"
] | YAML loader to join strings
The keywords are as following:
* `hostname`: Your hostname (from :func:`util.system.get_hostname`)
* `timestamp`: Current timestamp (from :func:`util.system.get_timestamp`)
:returns:
A `non character` joined string |yaml_loader_returns|
.. note::
Be careful with timestamps when using a `config` in :ref:`settings`.
.. seealso:: |yaml_loader_seealso| | [
"YAML",
"loader",
"to",
"join",
"strings"
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/structures.py#L11-L39 |
249,519 | spookey/photon | photon/util/structures.py | yaml_loc_join | def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) | python | def yaml_loc_join(l, n):
'''
YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso|
'''
from photon.util.locations import get_locations
locations = get_locations()
s = l.construct_sequence(n)
for num, seq in enumerate(s):
if seq in locations:
s[num] = '%s' % (locations[seq])
return _path.join(*s) | [
"def",
"yaml_loc_join",
"(",
"l",
",",
"n",
")",
":",
"from",
"photon",
".",
"util",
".",
"locations",
"import",
"get_locations",
"locations",
"=",
"get_locations",
"(",
")",
"s",
"=",
"l",
".",
"construct_sequence",
"(",
"n",
")",
"for",
"num",
",",
"seq",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"seq",
"in",
"locations",
":",
"s",
"[",
"num",
"]",
"=",
"'%s'",
"%",
"(",
"locations",
"[",
"seq",
"]",
")",
"return",
"_path",
".",
"join",
"(",
"*",
"s",
")"
] | YAML loader to join paths
The keywords come directly from :func:`util.locations.get_locations`.
See there!
:returns:
A `path seperator` (``/``) joined string |yaml_loader_returns|
.. seealso:: |yaml_loader_seealso| | [
"YAML",
"loader",
"to",
"join",
"paths"
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/structures.py#L42-L63 |
249,520 | spookey/photon | photon/util/structures.py | dict_merge | def dict_merge(o, v):
'''
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
'''
if not isinstance(v, dict):
return v
res = _deepcopy(o)
for key in v.keys():
if res.get(key) and isinstance(res[key], dict):
res[key] = dict_merge(res[key], v[key])
else:
res[key] = _deepcopy(v[key])
return res | python | def dict_merge(o, v):
'''
Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise!
'''
if not isinstance(v, dict):
return v
res = _deepcopy(o)
for key in v.keys():
if res.get(key) and isinstance(res[key], dict):
res[key] = dict_merge(res[key], v[key])
else:
res[key] = _deepcopy(v[key])
return res | [
"def",
"dict_merge",
"(",
"o",
",",
"v",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"return",
"v",
"res",
"=",
"_deepcopy",
"(",
"o",
")",
"for",
"key",
"in",
"v",
".",
"keys",
"(",
")",
":",
"if",
"res",
".",
"get",
"(",
"key",
")",
"and",
"isinstance",
"(",
"res",
"[",
"key",
"]",
",",
"dict",
")",
":",
"res",
"[",
"key",
"]",
"=",
"dict_merge",
"(",
"res",
"[",
"key",
"]",
",",
"v",
"[",
"key",
"]",
")",
"else",
":",
"res",
"[",
"key",
"]",
"=",
"_deepcopy",
"(",
"v",
"[",
"key",
"]",
")",
"return",
"res"
] | Recursively climbs through dictionaries and merges them together.
:param o:
The first dictionary
:param v:
The second dictionary
:returns:
A dictionary (who would have guessed?)
.. note::
Make sure `o` & `v` are indeed dictionaries,
bad things will happen otherwise! | [
"Recursively",
"climbs",
"through",
"dictionaries",
"and",
"merges",
"them",
"together",
"."
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/structures.py#L66-L90 |
249,521 | spookey/photon | photon/util/structures.py | to_list | def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) | python | def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) | [
"def",
"to_list",
"(",
"i",
",",
"use_keys",
"=",
"False",
")",
":",
"from",
"photon",
".",
"util",
".",
"system",
"import",
"shell_notify",
"if",
"not",
"i",
":",
"return",
"[",
"]",
"if",
"isinstance",
"(",
"i",
",",
"str",
")",
":",
"return",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"i",
",",
"list",
")",
":",
"return",
"i",
"if",
"isinstance",
"(",
"i",
",",
"dict",
")",
":",
"res",
"=",
"list",
"(",
")",
"for",
"e",
"in",
"i",
".",
"keys",
"(",
")",
"if",
"use_keys",
"else",
"i",
".",
"values",
"(",
")",
":",
"res",
".",
"append",
"(",
"to_list",
"(",
"e",
")",
")",
"if",
"isinstance",
"(",
"e",
",",
"dict",
")",
"else",
"res",
".",
"append",
"(",
"e",
")",
"return",
"res",
"shell_notify",
"(",
"'type for %s uncovered'",
"%",
"(",
"i",
")",
",",
"state",
"=",
"True",
",",
"more",
"=",
"type",
"(",
"i",
")",
")"
] | Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list | [
"Converts",
"items",
"to",
"a",
"list",
"."
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/util/structures.py#L93-L125 |
249,522 | Ffisegydd/whatis | whatis/_core.py | this | def this(obj, **kwargs):
"""Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
"""
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? ")) | python | def this(obj, **kwargs):
"""Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
"""
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? ")) | [
"def",
"this",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"verbose",
"=",
"kwargs",
".",
"get",
"(",
"\"verbose\"",
",",
"True",
")",
"if",
"verbose",
":",
"print",
"(",
"'{:=^30}'",
".",
"format",
"(",
"\" whatis.this? \"",
")",
")",
"for",
"func",
"in",
"pipeline",
":",
"s",
"=",
"func",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"if",
"s",
"is",
"not",
"None",
":",
"print",
"(",
"s",
")",
"if",
"verbose",
":",
"print",
"(",
"'{:=^30}\\n'",
".",
"format",
"(",
"\" whatis.this? \"",
")",
")"
] | Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each. | [
"Prints",
"series",
"of",
"debugging",
"steps",
"to",
"user",
"."
] | eef780ced61aae6d001aeeef7574e5e27e613583 | https://github.com/Ffisegydd/whatis/blob/eef780ced61aae6d001aeeef7574e5e27e613583/whatis/_core.py#L29-L45 |
249,523 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _get_repeat_masker_header | def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res | python | def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res | [
"def",
"_get_repeat_masker_header",
"(",
"pairwise_alignment",
")",
":",
"res",
"=",
"\"\"",
"res",
"+=",
"str",
"(",
"pairwise_alignment",
".",
"meta",
"[",
"ALIG_SCORE_KEY",
"]",
")",
"+",
"\" \"",
"res",
"+=",
"\"{:.2f}\"",
".",
"format",
"(",
"pairwise_alignment",
".",
"meta",
"[",
"PCENT_SUBS_KEY",
"]",
")",
"+",
"\" \"",
"res",
"+=",
"\"{:.2f}\"",
".",
"format",
"(",
"pairwise_alignment",
".",
"meta",
"[",
"PCENT_S1_INDELS_KEY",
"]",
")",
"+",
"\" \"",
"res",
"+=",
"\"{:.2f}\"",
".",
"format",
"(",
"pairwise_alignment",
".",
"meta",
"[",
"PCENT_S2_INDELS_KEY",
"]",
")",
"+",
"\" \"",
"res",
"+=",
"(",
"pairwise_alignment",
".",
"s1",
".",
"name",
"if",
"(",
"pairwise_alignment",
".",
"s1",
".",
"name",
"!=",
"\"\"",
"and",
"pairwise_alignment",
".",
"s1",
".",
"name",
"is",
"not",
"None",
")",
"else",
"UNKNOWN_SEQ_NAME",
")",
"+",
"\" \"",
"res",
"+=",
"str",
"(",
"pairwise_alignment",
".",
"s1",
".",
"start",
")",
"+",
"\" \"",
"res",
"+=",
"str",
"(",
"pairwise_alignment",
".",
"s1",
".",
"end",
"-",
"1",
")",
"+",
"\" \"",
"res",
"+=",
"\"(\"",
"+",
"str",
"(",
"pairwise_alignment",
".",
"s1",
".",
"remaining",
")",
"+",
"\") \"",
"res",
"+=",
"(",
"\"C \"",
"if",
"not",
"pairwise_alignment",
".",
"s2",
".",
"is_positive_strand",
"(",
")",
"else",
"\"\"",
")",
"res",
"+=",
"(",
"pairwise_alignment",
".",
"s2",
".",
"name",
"if",
"(",
"pairwise_alignment",
".",
"s2",
".",
"name",
"!=",
"\"\"",
"and",
"pairwise_alignment",
".",
"s2",
".",
"name",
"is",
"not",
"None",
")",
"else",
"UNKNOWN_SEQ_NAME",
")",
"+",
"\" \"",
"res",
"+=",
"(",
"\"(\"",
"+",
"str",
"(",
"pairwise_alignment",
".",
"s2",
".",
"remaining",
")",
"+",
"\")\"",
"if",
"not",
"pairwise_alignment",
".",
"s2",
".",
"is_positive_strand",
"(",
")",
"else",
"str",
"(",
"pairwise_alignment",
".",
"s2",
".",
"start",
")",
")",
"res",
"+=",
"\" \"",
"# Note here that we need to convert between our internal representation",
"# for coordinates and the repeat-masker one; internally, we always store",
"# coordinates as exclusive of the final value with start < end;",
"# repeatmasker gives the larger coordinate as the 'start' when the match",
"# is to the reverse complement, so we have to swap start/end, and its",
"# coordinates are inclusive of end, so we have to subtract 1 from end.",
"res",
"+=",
"str",
"(",
"pairwise_alignment",
".",
"s2",
".",
"end",
"-",
"1",
")",
"+",
"\" \"",
"res",
"+=",
"(",
"str",
"(",
"pairwise_alignment",
".",
"s2",
".",
"start",
")",
"if",
"not",
"pairwise_alignment",
".",
"s2",
".",
"is_positive_strand",
"(",
")",
"else",
"\"(\"",
"+",
"str",
"(",
"pairwise_alignment",
".",
"s2",
".",
"remaining",
")",
"+",
"\")\"",
")",
"+",
"\" \"",
"res",
"+=",
"pairwise_alignment",
".",
"meta",
"[",
"UNKNOWN_RM_HEADER_FIELD_KEY",
"]",
"+",
"\" \"",
"res",
"+=",
"str",
"(",
"pairwise_alignment",
".",
"meta",
"[",
"RM_ID_KEY",
"]",
")",
"return",
"res"
] | generate header string of repeatmasker formated repr of self. | [
"generate",
"header",
"string",
"of",
"repeatmasker",
"formated",
"repr",
"of",
"self",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L102-L137 |
249,524 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_is_alignment_line | def _rm_is_alignment_line(parts, s1_name, s2_name):
"""
return true if the tokenized line is a repeatmasker alignment line.
:param parts: the line, already split into tokens around whitespace
:param s1_name: the name of the first sequence, as extracted from the header
of the element this line is in
:param s2_name: the name of the second sequence, as extracted from the header
of the element this line is in
"""
if len(parts) < 2:
return False
if _rm_name_match(parts[0], s1_name):
return True
if (_rm_name_match(parts[0], s2_name) or
(parts[0] == "C" and _rm_name_match(parts[1], s2_name))):
return True
return False | python | def _rm_is_alignment_line(parts, s1_name, s2_name):
"""
return true if the tokenized line is a repeatmasker alignment line.
:param parts: the line, already split into tokens around whitespace
:param s1_name: the name of the first sequence, as extracted from the header
of the element this line is in
:param s2_name: the name of the second sequence, as extracted from the header
of the element this line is in
"""
if len(parts) < 2:
return False
if _rm_name_match(parts[0], s1_name):
return True
if (_rm_name_match(parts[0], s2_name) or
(parts[0] == "C" and _rm_name_match(parts[1], s2_name))):
return True
return False | [
"def",
"_rm_is_alignment_line",
"(",
"parts",
",",
"s1_name",
",",
"s2_name",
")",
":",
"if",
"len",
"(",
"parts",
")",
"<",
"2",
":",
"return",
"False",
"if",
"_rm_name_match",
"(",
"parts",
"[",
"0",
"]",
",",
"s1_name",
")",
":",
"return",
"True",
"if",
"(",
"_rm_name_match",
"(",
"parts",
"[",
"0",
"]",
",",
"s2_name",
")",
"or",
"(",
"parts",
"[",
"0",
"]",
"==",
"\"C\"",
"and",
"_rm_name_match",
"(",
"parts",
"[",
"1",
"]",
",",
"s2_name",
")",
")",
")",
":",
"return",
"True",
"return",
"False"
] | return true if the tokenized line is a repeatmasker alignment line.
:param parts: the line, already split into tokens around whitespace
:param s1_name: the name of the first sequence, as extracted from the header
of the element this line is in
:param s2_name: the name of the second sequence, as extracted from the header
of the element this line is in | [
"return",
"true",
"if",
"the",
"tokenized",
"line",
"is",
"a",
"repeatmasker",
"alignment",
"line",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L252-L269 |
249,525 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_is_header_line | def _rm_is_header_line(parts, n):
"""
determine whether a pre-split string is a repeat-masker alignment header.
headers have no special structure or symbol to mark them, so this is based
only on the number of elements, and what data type they are.
"""
if (n == 15 and parts[8] == "C"):
return True
if (n == 14 and parts[0].isdigit()):
return True | python | def _rm_is_header_line(parts, n):
"""
determine whether a pre-split string is a repeat-masker alignment header.
headers have no special structure or symbol to mark them, so this is based
only on the number of elements, and what data type they are.
"""
if (n == 15 and parts[8] == "C"):
return True
if (n == 14 and parts[0].isdigit()):
return True | [
"def",
"_rm_is_header_line",
"(",
"parts",
",",
"n",
")",
":",
"if",
"(",
"n",
"==",
"15",
"and",
"parts",
"[",
"8",
"]",
"==",
"\"C\"",
")",
":",
"return",
"True",
"if",
"(",
"n",
"==",
"14",
"and",
"parts",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
")",
":",
"return",
"True"
] | determine whether a pre-split string is a repeat-masker alignment header.
headers have no special structure or symbol to mark them, so this is based
only on the number of elements, and what data type they are. | [
"determine",
"whether",
"a",
"pre",
"-",
"split",
"string",
"is",
"a",
"repeat",
"-",
"masker",
"alignment",
"header",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L272-L282 |
249,526 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_get_names_from_header | def _rm_get_names_from_header(parts):
"""
get repeat and seq. name from repeatmasker alignment header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic sequence name is always at position 4 (zero-based index); the
name of the repeat is at position 9 if matching the reverse complement of
the consensus sequence for the repeat and position 8 otherwise
:param parts: the header line, as a tokenized list.
:return: tuple of (name of genomic sequence, name of repeat sequence)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
return (parts[4], parts[8]) if len(parts) == 14 else (parts[4], parts[9]) | python | def _rm_get_names_from_header(parts):
"""
get repeat and seq. name from repeatmasker alignment header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic sequence name is always at position 4 (zero-based index); the
name of the repeat is at position 9 if matching the reverse complement of
the consensus sequence for the repeat and position 8 otherwise
:param parts: the header line, as a tokenized list.
:return: tuple of (name of genomic sequence, name of repeat sequence)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
return (parts[4], parts[8]) if len(parts) == 14 else (parts[4], parts[9]) | [
"def",
"_rm_get_names_from_header",
"(",
"parts",
")",
":",
"assert",
"(",
"(",
"parts",
"[",
"8",
"]",
"==",
"\"C\"",
"and",
"len",
"(",
"parts",
")",
"==",
"15",
")",
"or",
"(",
"len",
"(",
"parts",
")",
"==",
"14",
")",
")",
"return",
"(",
"parts",
"[",
"4",
"]",
",",
"parts",
"[",
"8",
"]",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"14",
"else",
"(",
"parts",
"[",
"4",
"]",
",",
"parts",
"[",
"9",
"]",
")"
] | get repeat and seq. name from repeatmasker alignment header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
the genomic sequence name is always at position 4 (zero-based index); the
name of the repeat is at position 9 if matching the reverse complement of
the consensus sequence for the repeat and position 8 otherwise
:param parts: the header line, as a tokenized list.
:return: tuple of (name of genomic sequence, name of repeat sequence) | [
"get",
"repeat",
"and",
"seq",
".",
"name",
"from",
"repeatmasker",
"alignment",
"header",
"line",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L333-L349 |
249,527 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_get_repeat_coords_from_header | def _rm_get_repeat_coords_from_header(parts):
"""
extract the repeat coordinates of a repeat masker match from a header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4
if the match is to the reverse complement, the start and end coordinates are
at positions 11 and 12 (zero-based indexes), otherwise they're at positions
9 and 10. In the later case, the 'start' is the earlier number and the end
is the larger one. In reverse complement matches, RM lists the 'start' as the
larger number and the end as the smaller one. We swap these around to match
the Pyokit convention of start < end always and also adjust the end so it is
not inclusive of the last position
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
if len(parts) == 14:
s = int(parts[9])
e = int(parts[10]) + 1
else:
s = int(parts[12])
e = int(parts[11]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e) | python | def _rm_get_repeat_coords_from_header(parts):
"""
extract the repeat coordinates of a repeat masker match from a header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4
if the match is to the reverse complement, the start and end coordinates are
at positions 11 and 12 (zero-based indexes), otherwise they're at positions
9 and 10. In the later case, the 'start' is the earlier number and the end
is the larger one. In reverse complement matches, RM lists the 'start' as the
larger number and the end as the smaller one. We swap these around to match
the Pyokit convention of start < end always and also adjust the end so it is
not inclusive of the last position
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end)
"""
assert((parts[8] == "C" and len(parts) == 15) or (len(parts) == 14))
if len(parts) == 14:
s = int(parts[9])
e = int(parts[10]) + 1
else:
s = int(parts[12])
e = int(parts[11]) + 1
if (s >= e):
raise AlignmentIteratorError("invalid repeatmakser header: " +
" ".join(parts))
return (s, e) | [
"def",
"_rm_get_repeat_coords_from_header",
"(",
"parts",
")",
":",
"assert",
"(",
"(",
"parts",
"[",
"8",
"]",
"==",
"\"C\"",
"and",
"len",
"(",
"parts",
")",
"==",
"15",
")",
"or",
"(",
"len",
"(",
"parts",
")",
"==",
"14",
")",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"14",
":",
"s",
"=",
"int",
"(",
"parts",
"[",
"9",
"]",
")",
"e",
"=",
"int",
"(",
"parts",
"[",
"10",
"]",
")",
"+",
"1",
"else",
":",
"s",
"=",
"int",
"(",
"parts",
"[",
"12",
"]",
")",
"e",
"=",
"int",
"(",
"parts",
"[",
"11",
"]",
")",
"+",
"1",
"if",
"(",
"s",
">=",
"e",
")",
":",
"raise",
"AlignmentIteratorError",
"(",
"\"invalid repeatmakser header: \"",
"+",
"\" \"",
".",
"join",
"(",
"parts",
")",
")",
"return",
"(",
"s",
",",
"e",
")"
] | extract the repeat coordinates of a repeat masker match from a header line.
An example header line is::
239 29.42 1.92 0.97 chr1 11 17 (41) C XX#YY (74) 104 1 m_b1s502i1 4
239 29.42 1.92 0.97 chr1 11 17 (41) XX#YY 1 104 (74) m_b1s502i1 4
if the match is to the reverse complement, the start and end coordinates are
at positions 11 and 12 (zero-based indexes), otherwise they're at positions
9 and 10. In the later case, the 'start' is the earlier number and the end
is the larger one. In reverse complement matches, RM lists the 'start' as the
larger number and the end as the smaller one. We swap these around to match
the Pyokit convention of start < end always and also adjust the end so it is
not inclusive of the last position
:param parts: the header line, as a tokenized list.
:return: tuple of (start, end) | [
"extract",
"the",
"repeat",
"coordinates",
"of",
"a",
"repeat",
"masker",
"match",
"from",
"a",
"header",
"line",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L374-L404 |
249,528 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_name_match | def _rm_name_match(s1, s2):
"""
determine whether two sequence names from a repeatmasker alignment match.
:return: True if they are the same string, or if one forms a substring of the
other, else False
"""
m_len = min(len(s1), len(s2))
return s1[:m_len] == s2[:m_len] | python | def _rm_name_match(s1, s2):
"""
determine whether two sequence names from a repeatmasker alignment match.
:return: True if they are the same string, or if one forms a substring of the
other, else False
"""
m_len = min(len(s1), len(s2))
return s1[:m_len] == s2[:m_len] | [
"def",
"_rm_name_match",
"(",
"s1",
",",
"s2",
")",
":",
"m_len",
"=",
"min",
"(",
"len",
"(",
"s1",
")",
",",
"len",
"(",
"s2",
")",
")",
"return",
"s1",
"[",
":",
"m_len",
"]",
"==",
"s2",
"[",
":",
"m_len",
"]"
] | determine whether two sequence names from a repeatmasker alignment match.
:return: True if they are the same string, or if one forms a substring of the
other, else False | [
"determine",
"whether",
"two",
"sequence",
"names",
"from",
"a",
"repeatmasker",
"alignment",
"match",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L531-L539 |
249,529 | pjuren/pyokit | src/pyokit/io/repeatmaskerAlignments.py | _rm_extract_sequence_and_name | def _rm_extract_sequence_and_name(alig_str_parts, s1_name, s2_name):
"""
parse an alignment line from a repeatmasker alignment and return the name
of the sequence it si from and the sequence portion contained in the line.
:param alig_str_parts: the alignment string, split around whitespace as list
:param s1_name: the name of the first sequence in the alignment this line is
from
:param s2_name: the name of the second sequence in the alignment this line is
from
:return: a tuple of name and sequence string; name will always be either
s1_name or s2_name
:raise AlignmentIteratorError: if the line doesn't have the expected number
of elements, or the name does not match
either of s1_name or s2_name
"""
# first, based on the number of parts we have we'll guess whether its a
# reverse complement or not
if len(alig_str_parts) == 4:
# expect the first element to amtch something..
nm = alig_str_parts[0]
seq = alig_str_parts[2]
elif len(alig_str_parts) == 5:
# expect the second element to match something...
nm = alig_str_parts[1]
seq = alig_str_parts[3]
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"expected this line to have 4 or 5 " +
"elements, but it has " +
str(len(alig_str_parts)))
if _rm_name_match(nm, s1_name):
return s1_name, seq
elif _rm_name_match(nm, s2_name):
return s2_name, seq
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"extracted alignment name (" + nm + ") " +
"did not match either sequence name from " +
"header line (" + s1_name + " or " +
s2_name + ")") | python | def _rm_extract_sequence_and_name(alig_str_parts, s1_name, s2_name):
"""
parse an alignment line from a repeatmasker alignment and return the name
of the sequence it si from and the sequence portion contained in the line.
:param alig_str_parts: the alignment string, split around whitespace as list
:param s1_name: the name of the first sequence in the alignment this line is
from
:param s2_name: the name of the second sequence in the alignment this line is
from
:return: a tuple of name and sequence string; name will always be either
s1_name or s2_name
:raise AlignmentIteratorError: if the line doesn't have the expected number
of elements, or the name does not match
either of s1_name or s2_name
"""
# first, based on the number of parts we have we'll guess whether its a
# reverse complement or not
if len(alig_str_parts) == 4:
# expect the first element to amtch something..
nm = alig_str_parts[0]
seq = alig_str_parts[2]
elif len(alig_str_parts) == 5:
# expect the second element to match something...
nm = alig_str_parts[1]
seq = alig_str_parts[3]
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"expected this line to have 4 or 5 " +
"elements, but it has " +
str(len(alig_str_parts)))
if _rm_name_match(nm, s1_name):
return s1_name, seq
elif _rm_name_match(nm, s2_name):
return s2_name, seq
else:
raise AlignmentIteratorError("failed parsing alignment line '" +
" ".join(alig_str_parts) + "'; reason: " +
"extracted alignment name (" + nm + ") " +
"did not match either sequence name from " +
"header line (" + s1_name + " or " +
s2_name + ")") | [
"def",
"_rm_extract_sequence_and_name",
"(",
"alig_str_parts",
",",
"s1_name",
",",
"s2_name",
")",
":",
"# first, based on the number of parts we have we'll guess whether its a",
"# reverse complement or not",
"if",
"len",
"(",
"alig_str_parts",
")",
"==",
"4",
":",
"# expect the first element to amtch something..",
"nm",
"=",
"alig_str_parts",
"[",
"0",
"]",
"seq",
"=",
"alig_str_parts",
"[",
"2",
"]",
"elif",
"len",
"(",
"alig_str_parts",
")",
"==",
"5",
":",
"# expect the second element to match something...",
"nm",
"=",
"alig_str_parts",
"[",
"1",
"]",
"seq",
"=",
"alig_str_parts",
"[",
"3",
"]",
"else",
":",
"raise",
"AlignmentIteratorError",
"(",
"\"failed parsing alignment line '\"",
"+",
"\" \"",
".",
"join",
"(",
"alig_str_parts",
")",
"+",
"\"'; reason: \"",
"+",
"\"expected this line to have 4 or 5 \"",
"+",
"\"elements, but it has \"",
"+",
"str",
"(",
"len",
"(",
"alig_str_parts",
")",
")",
")",
"if",
"_rm_name_match",
"(",
"nm",
",",
"s1_name",
")",
":",
"return",
"s1_name",
",",
"seq",
"elif",
"_rm_name_match",
"(",
"nm",
",",
"s2_name",
")",
":",
"return",
"s2_name",
",",
"seq",
"else",
":",
"raise",
"AlignmentIteratorError",
"(",
"\"failed parsing alignment line '\"",
"+",
"\" \"",
".",
"join",
"(",
"alig_str_parts",
")",
"+",
"\"'; reason: \"",
"+",
"\"extracted alignment name (\"",
"+",
"nm",
"+",
"\") \"",
"+",
"\"did not match either sequence name from \"",
"+",
"\"header line (\"",
"+",
"s1_name",
"+",
"\" or \"",
"+",
"s2_name",
"+",
"\")\"",
")"
] | parse an alignment line from a repeatmasker alignment and return the name
of the sequence it si from and the sequence portion contained in the line.
:param alig_str_parts: the alignment string, split around whitespace as list
:param s1_name: the name of the first sequence in the alignment this line is
from
:param s2_name: the name of the second sequence in the alignment this line is
from
:return: a tuple of name and sequence string; name will always be either
s1_name or s2_name
:raise AlignmentIteratorError: if the line doesn't have the expected number
of elements, or the name does not match
either of s1_name or s2_name | [
"parse",
"an",
"alignment",
"line",
"from",
"a",
"repeatmasker",
"alignment",
"and",
"return",
"the",
"name",
"of",
"the",
"sequence",
"it",
"si",
"from",
"and",
"the",
"sequence",
"portion",
"contained",
"in",
"the",
"line",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/repeatmaskerAlignments.py#L555-L597 |
249,530 | edeposit/edeposit.amqp.antivirus | src/edeposit/amqp/antivirus/wrappers/clamd.py | scan_file | def scan_file(path):
"""
Scan `path` for viruses using ``clamd`` antivirus daemon.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
ValueError: When the server is not running.
AssertionError: When the internal file doesn't exists.
"""
path = os.path.abspath(path)
assert os.path.exists(path), "Unreachable file '%s'." % path
try:
cd = pyclamd.ClamdUnixSocket()
cd.ping()
except pyclamd.ConnectionError:
cd = pyclamd.ClamdNetworkSocket()
try:
cd.ping()
except pyclamd.ConnectionError:
raise ValueError(
"Couldn't connect to clamd server using unix/network socket."
)
cd = pyclamd.ClamdUnixSocket()
assert cd.ping(), "clamd server is not reachable!"
result = cd.scan_file(path)
return result if result else {} | python | def scan_file(path):
"""
Scan `path` for viruses using ``clamd`` antivirus daemon.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
ValueError: When the server is not running.
AssertionError: When the internal file doesn't exists.
"""
path = os.path.abspath(path)
assert os.path.exists(path), "Unreachable file '%s'." % path
try:
cd = pyclamd.ClamdUnixSocket()
cd.ping()
except pyclamd.ConnectionError:
cd = pyclamd.ClamdNetworkSocket()
try:
cd.ping()
except pyclamd.ConnectionError:
raise ValueError(
"Couldn't connect to clamd server using unix/network socket."
)
cd = pyclamd.ClamdUnixSocket()
assert cd.ping(), "clamd server is not reachable!"
result = cd.scan_file(path)
return result if result else {} | [
"def",
"scan_file",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
",",
"\"Unreachable file '%s'.\"",
"%",
"path",
"try",
":",
"cd",
"=",
"pyclamd",
".",
"ClamdUnixSocket",
"(",
")",
"cd",
".",
"ping",
"(",
")",
"except",
"pyclamd",
".",
"ConnectionError",
":",
"cd",
"=",
"pyclamd",
".",
"ClamdNetworkSocket",
"(",
")",
"try",
":",
"cd",
".",
"ping",
"(",
")",
"except",
"pyclamd",
".",
"ConnectionError",
":",
"raise",
"ValueError",
"(",
"\"Couldn't connect to clamd server using unix/network socket.\"",
")",
"cd",
"=",
"pyclamd",
".",
"ClamdUnixSocket",
"(",
")",
"assert",
"cd",
".",
"ping",
"(",
")",
",",
"\"clamd server is not reachable!\"",
"result",
"=",
"cd",
".",
"scan_file",
"(",
"path",
")",
"return",
"result",
"if",
"result",
"else",
"{",
"}"
] | Scan `path` for viruses using ``clamd`` antivirus daemon.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
ValueError: When the server is not running.
AssertionError: When the internal file doesn't exists. | [
"Scan",
"path",
"for",
"viruses",
"using",
"clamd",
"antivirus",
"daemon",
"."
] | 011b38bbe920819fab99a5891b1e70732321a598 | https://github.com/edeposit/edeposit.amqp.antivirus/blob/011b38bbe920819fab99a5891b1e70732321a598/src/edeposit/amqp/antivirus/wrappers/clamd.py#L16-L51 |
249,531 | minhhoit/yacms | yacms/utils/timezone.py | get_best_local_timezone | def get_best_local_timezone():
"""
Compares local timezone offset to pytz's timezone db, to determine
a matching timezone name to use when TIME_ZONE is not set.
"""
zone_name = tzlocal.get_localzone().zone
if zone_name in pytz.all_timezones:
return zone_name
if time.daylight:
local_offset = time.altzone
localtz = time.tzname[1]
else:
local_offset = time.timezone
localtz = time.tzname[0]
local_offset = datetime.timedelta(seconds=-local_offset)
for zone_name in pytz.all_timezones:
timezone = pytz.timezone(zone_name)
if not hasattr(timezone, '_tzinfos'):
continue
for utcoffset, daylight, tzname in timezone._tzinfos:
if utcoffset == local_offset and tzname == localtz:
return zone_name | python | def get_best_local_timezone():
"""
Compares local timezone offset to pytz's timezone db, to determine
a matching timezone name to use when TIME_ZONE is not set.
"""
zone_name = tzlocal.get_localzone().zone
if zone_name in pytz.all_timezones:
return zone_name
if time.daylight:
local_offset = time.altzone
localtz = time.tzname[1]
else:
local_offset = time.timezone
localtz = time.tzname[0]
local_offset = datetime.timedelta(seconds=-local_offset)
for zone_name in pytz.all_timezones:
timezone = pytz.timezone(zone_name)
if not hasattr(timezone, '_tzinfos'):
continue
for utcoffset, daylight, tzname in timezone._tzinfos:
if utcoffset == local_offset and tzname == localtz:
return zone_name | [
"def",
"get_best_local_timezone",
"(",
")",
":",
"zone_name",
"=",
"tzlocal",
".",
"get_localzone",
"(",
")",
".",
"zone",
"if",
"zone_name",
"in",
"pytz",
".",
"all_timezones",
":",
"return",
"zone_name",
"if",
"time",
".",
"daylight",
":",
"local_offset",
"=",
"time",
".",
"altzone",
"localtz",
"=",
"time",
".",
"tzname",
"[",
"1",
"]",
"else",
":",
"local_offset",
"=",
"time",
".",
"timezone",
"localtz",
"=",
"time",
".",
"tzname",
"[",
"0",
"]",
"local_offset",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"-",
"local_offset",
")",
"for",
"zone_name",
"in",
"pytz",
".",
"all_timezones",
":",
"timezone",
"=",
"pytz",
".",
"timezone",
"(",
"zone_name",
")",
"if",
"not",
"hasattr",
"(",
"timezone",
",",
"'_tzinfos'",
")",
":",
"continue",
"for",
"utcoffset",
",",
"daylight",
",",
"tzname",
"in",
"timezone",
".",
"_tzinfos",
":",
"if",
"utcoffset",
"==",
"local_offset",
"and",
"tzname",
"==",
"localtz",
":",
"return",
"zone_name"
] | Compares local timezone offset to pytz's timezone db, to determine
a matching timezone name to use when TIME_ZONE is not set. | [
"Compares",
"local",
"timezone",
"offset",
"to",
"pytz",
"s",
"timezone",
"db",
"to",
"determine",
"a",
"matching",
"timezone",
"name",
"to",
"use",
"when",
"TIME_ZONE",
"is",
"not",
"set",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/timezone.py#L9-L30 |
249,532 | tBaxter/django-fretboard | fretboard/templatetags/fretboard_tags.py | topic_quick_links | def topic_quick_links(context, topic, latest, last_seen_time):
"""
Creates topic listing page links for the given topic, with the given
number of posts per page.
Topics with between 2 and 5 pages will have page links displayed for
each page.
Topics with more than 5 pages will have page links displayed for the
first page and the last 3 pages.
"""
output_text = u''
pages = topic.page_count
if not pages or pages == 0:
hits = topic.post_count - 1
if hits < 1:
hits = 1
pages = hits // PAGINATE_BY + 1
# determine if we need to show new link.
if latest and latest.post_date_int > last_seen_time:
output_text += '<a href="{0}#first-new-post" class="new">new</a>'.format(topic.last_url)
# If only one post (or none) only return new link, if anything.
if topic.post_count < 2:
output_text += '1 post'
else:
# We have more than one post. Create last link
if latest:
last_link = '<a href="{0}#post-{1}" title="latest post">'.format(topic.last_url, latest.id)
else:
last_link = ''
# if only one page, just show the number of posts wrapped in a link
if pages < 2:
output_text += '{0}{1} posts</a>'.format(last_link, topic.post_count)
else:
page_link = u'<a href="%spage%%s/">%%s</a>' % topic.get_short_url()
if pages < 4:
page_links = u' '.join([page_link % (page, page) for page in range(1, pages + 1)])
else:
page_links = u' '.join(
[page_link % (1, 1), u'<small>…</small>'] +
[page_link % (page, page) for page in range(pages - 1, pages + 1)]
)
output_text += '{0}{1}►</a>'.format(page_links, last_link)
return {
'output_text': output_text,
'topic': topic,
'forum_slug': context['forum_slug'],
'user': context['user'],
'perms': context['perms']
} | python | def topic_quick_links(context, topic, latest, last_seen_time):
"""
Creates topic listing page links for the given topic, with the given
number of posts per page.
Topics with between 2 and 5 pages will have page links displayed for
each page.
Topics with more than 5 pages will have page links displayed for the
first page and the last 3 pages.
"""
output_text = u''
pages = topic.page_count
if not pages or pages == 0:
hits = topic.post_count - 1
if hits < 1:
hits = 1
pages = hits // PAGINATE_BY + 1
# determine if we need to show new link.
if latest and latest.post_date_int > last_seen_time:
output_text += '<a href="{0}#first-new-post" class="new">new</a>'.format(topic.last_url)
# If only one post (or none) only return new link, if anything.
if topic.post_count < 2:
output_text += '1 post'
else:
# We have more than one post. Create last link
if latest:
last_link = '<a href="{0}#post-{1}" title="latest post">'.format(topic.last_url, latest.id)
else:
last_link = ''
# if only one page, just show the number of posts wrapped in a link
if pages < 2:
output_text += '{0}{1} posts</a>'.format(last_link, topic.post_count)
else:
page_link = u'<a href="%spage%%s/">%%s</a>' % topic.get_short_url()
if pages < 4:
page_links = u' '.join([page_link % (page, page) for page in range(1, pages + 1)])
else:
page_links = u' '.join(
[page_link % (1, 1), u'<small>…</small>'] +
[page_link % (page, page) for page in range(pages - 1, pages + 1)]
)
output_text += '{0}{1}►</a>'.format(page_links, last_link)
return {
'output_text': output_text,
'topic': topic,
'forum_slug': context['forum_slug'],
'user': context['user'],
'perms': context['perms']
} | [
"def",
"topic_quick_links",
"(",
"context",
",",
"topic",
",",
"latest",
",",
"last_seen_time",
")",
":",
"output_text",
"=",
"u''",
"pages",
"=",
"topic",
".",
"page_count",
"if",
"not",
"pages",
"or",
"pages",
"==",
"0",
":",
"hits",
"=",
"topic",
".",
"post_count",
"-",
"1",
"if",
"hits",
"<",
"1",
":",
"hits",
"=",
"1",
"pages",
"=",
"hits",
"//",
"PAGINATE_BY",
"+",
"1",
"# determine if we need to show new link.",
"if",
"latest",
"and",
"latest",
".",
"post_date_int",
">",
"last_seen_time",
":",
"output_text",
"+=",
"'<a href=\"{0}#first-new-post\" class=\"new\">new</a>'",
".",
"format",
"(",
"topic",
".",
"last_url",
")",
"# If only one post (or none) only return new link, if anything.",
"if",
"topic",
".",
"post_count",
"<",
"2",
":",
"output_text",
"+=",
"'1 post'",
"else",
":",
"# We have more than one post. Create last link",
"if",
"latest",
":",
"last_link",
"=",
"'<a href=\"{0}#post-{1}\" title=\"latest post\">'",
".",
"format",
"(",
"topic",
".",
"last_url",
",",
"latest",
".",
"id",
")",
"else",
":",
"last_link",
"=",
"''",
"# if only one page, just show the number of posts wrapped in a link",
"if",
"pages",
"<",
"2",
":",
"output_text",
"+=",
"'{0}{1} posts</a>'",
".",
"format",
"(",
"last_link",
",",
"topic",
".",
"post_count",
")",
"else",
":",
"page_link",
"=",
"u'<a href=\"%spage%%s/\">%%s</a>'",
"%",
"topic",
".",
"get_short_url",
"(",
")",
"if",
"pages",
"<",
"4",
":",
"page_links",
"=",
"u' '",
".",
"join",
"(",
"[",
"page_link",
"%",
"(",
"page",
",",
"page",
")",
"for",
"page",
"in",
"range",
"(",
"1",
",",
"pages",
"+",
"1",
")",
"]",
")",
"else",
":",
"page_links",
"=",
"u' '",
".",
"join",
"(",
"[",
"page_link",
"%",
"(",
"1",
",",
"1",
")",
",",
"u'<small>…</small>'",
"]",
"+",
"[",
"page_link",
"%",
"(",
"page",
",",
"page",
")",
"for",
"page",
"in",
"range",
"(",
"pages",
"-",
"1",
",",
"pages",
"+",
"1",
")",
"]",
")",
"output_text",
"+=",
"'{0}{1}►</a>'",
".",
"format",
"(",
"page_links",
",",
"last_link",
")",
"return",
"{",
"'output_text'",
":",
"output_text",
",",
"'topic'",
":",
"topic",
",",
"'forum_slug'",
":",
"context",
"[",
"'forum_slug'",
"]",
",",
"'user'",
":",
"context",
"[",
"'user'",
"]",
",",
"'perms'",
":",
"context",
"[",
"'perms'",
"]",
"}"
] | Creates topic listing page links for the given topic, with the given
number of posts per page.
Topics with between 2 and 5 pages will have page links displayed for
each page.
Topics with more than 5 pages will have page links displayed for the
first page and the last 3 pages. | [
"Creates",
"topic",
"listing",
"page",
"links",
"for",
"the",
"given",
"topic",
"with",
"the",
"given",
"number",
"of",
"posts",
"per",
"page",
"."
] | 3c3f9557089821283f315a07f3e5a57a2725ab3b | https://github.com/tBaxter/django-fretboard/blob/3c3f9557089821283f315a07f3e5a57a2725ab3b/fretboard/templatetags/fretboard_tags.py#L46-L101 |
249,533 | django-xxx/django-mobi2 | mobi2/middleware.py | ignore_user_agent | def ignore_user_agent(user_agent):
""" compare the useragent from the broswer to the ignore list
This is popular if you want a mobile device to not trigger
as mobile. For example iPad."""
if user_agent:
for ua in MOBI_USER_AGENT_IGNORE_LIST:
if ua and ua.lower() in user_agent.lower():
return True
return False | python | def ignore_user_agent(user_agent):
""" compare the useragent from the broswer to the ignore list
This is popular if you want a mobile device to not trigger
as mobile. For example iPad."""
if user_agent:
for ua in MOBI_USER_AGENT_IGNORE_LIST:
if ua and ua.lower() in user_agent.lower():
return True
return False | [
"def",
"ignore_user_agent",
"(",
"user_agent",
")",
":",
"if",
"user_agent",
":",
"for",
"ua",
"in",
"MOBI_USER_AGENT_IGNORE_LIST",
":",
"if",
"ua",
"and",
"ua",
".",
"lower",
"(",
")",
"in",
"user_agent",
".",
"lower",
"(",
")",
":",
"return",
"True",
"return",
"False"
] | compare the useragent from the broswer to the ignore list
This is popular if you want a mobile device to not trigger
as mobile. For example iPad. | [
"compare",
"the",
"useragent",
"from",
"the",
"broswer",
"to",
"the",
"ignore",
"list",
"This",
"is",
"popular",
"if",
"you",
"want",
"a",
"mobile",
"device",
"to",
"not",
"trigger",
"as",
"mobile",
".",
"For",
"example",
"iPad",
"."
] | 7ac323faa1a9599f3cd39acd3c49626819ce0538 | https://github.com/django-xxx/django-mobi2/blob/7ac323faa1a9599f3cd39acd3c49626819ce0538/mobi2/middleware.py#L13-L21 |
249,534 | django-xxx/django-mobi2 | mobi2/middleware.py | MobileDetectionMiddleware.process_request | def process_request(request):
"""Adds a "mobile" attribute to the request which is True or False
depending on whether the request should be considered to come from a
small-screen device such as a phone or a PDA"""
if 'HTTP_X_OPERAMINI_FEATURES' in request.META:
# Then it's running opera mini. 'Nuff said.
# Reference from:
# http://dev.opera.com/articles/view/opera-mini-request-headers/
request.mobile = True
return None
if 'HTTP_ACCEPT' in request.META:
s = request.META['HTTP_ACCEPT'].lower()
if 'application/vnd.wap.xhtml+xml' in s:
# Then it's a wap browser
request.mobile = True
return None
if 'HTTP_USER_AGENT' in request.META:
# This takes the most processing. Surprisingly enough, when I
# Experimented on my own machine, this was the most efficient
# algorithm. Certainly more so than regexes.
# Also, Caching didn't help much, with real-world caches.
s = request.META['HTTP_USER_AGENT'].lower()
for ua in search_strings:
if ua in s:
# check if we are ignoring this user agent: (IPad)
if not ignore_user_agent(s):
request.mobile = True
if MOBI_DETECT_TABLET:
request.tablet = _is_tablet(s)
return None
# Otherwise it's not a mobile
request.mobile = False
request.tablet = False
return None | python | def process_request(request):
"""Adds a "mobile" attribute to the request which is True or False
depending on whether the request should be considered to come from a
small-screen device such as a phone or a PDA"""
if 'HTTP_X_OPERAMINI_FEATURES' in request.META:
# Then it's running opera mini. 'Nuff said.
# Reference from:
# http://dev.opera.com/articles/view/opera-mini-request-headers/
request.mobile = True
return None
if 'HTTP_ACCEPT' in request.META:
s = request.META['HTTP_ACCEPT'].lower()
if 'application/vnd.wap.xhtml+xml' in s:
# Then it's a wap browser
request.mobile = True
return None
if 'HTTP_USER_AGENT' in request.META:
# This takes the most processing. Surprisingly enough, when I
# Experimented on my own machine, this was the most efficient
# algorithm. Certainly more so than regexes.
# Also, Caching didn't help much, with real-world caches.
s = request.META['HTTP_USER_AGENT'].lower()
for ua in search_strings:
if ua in s:
# check if we are ignoring this user agent: (IPad)
if not ignore_user_agent(s):
request.mobile = True
if MOBI_DETECT_TABLET:
request.tablet = _is_tablet(s)
return None
# Otherwise it's not a mobile
request.mobile = False
request.tablet = False
return None | [
"def",
"process_request",
"(",
"request",
")",
":",
"if",
"'HTTP_X_OPERAMINI_FEATURES'",
"in",
"request",
".",
"META",
":",
"# Then it's running opera mini. 'Nuff said.",
"# Reference from:",
"# http://dev.opera.com/articles/view/opera-mini-request-headers/",
"request",
".",
"mobile",
"=",
"True",
"return",
"None",
"if",
"'HTTP_ACCEPT'",
"in",
"request",
".",
"META",
":",
"s",
"=",
"request",
".",
"META",
"[",
"'HTTP_ACCEPT'",
"]",
".",
"lower",
"(",
")",
"if",
"'application/vnd.wap.xhtml+xml'",
"in",
"s",
":",
"# Then it's a wap browser",
"request",
".",
"mobile",
"=",
"True",
"return",
"None",
"if",
"'HTTP_USER_AGENT'",
"in",
"request",
".",
"META",
":",
"# This takes the most processing. Surprisingly enough, when I",
"# Experimented on my own machine, this was the most efficient",
"# algorithm. Certainly more so than regexes.",
"# Also, Caching didn't help much, with real-world caches.",
"s",
"=",
"request",
".",
"META",
"[",
"'HTTP_USER_AGENT'",
"]",
".",
"lower",
"(",
")",
"for",
"ua",
"in",
"search_strings",
":",
"if",
"ua",
"in",
"s",
":",
"# check if we are ignoring this user agent: (IPad)",
"if",
"not",
"ignore_user_agent",
"(",
"s",
")",
":",
"request",
".",
"mobile",
"=",
"True",
"if",
"MOBI_DETECT_TABLET",
":",
"request",
".",
"tablet",
"=",
"_is_tablet",
"(",
"s",
")",
"return",
"None",
"# Otherwise it's not a mobile",
"request",
".",
"mobile",
"=",
"False",
"request",
".",
"tablet",
"=",
"False",
"return",
"None"
] | Adds a "mobile" attribute to the request which is True or False
depending on whether the request should be considered to come from a
small-screen device such as a phone or a PDA | [
"Adds",
"a",
"mobile",
"attribute",
"to",
"the",
"request",
"which",
"is",
"True",
"or",
"False",
"depending",
"on",
"whether",
"the",
"request",
"should",
"be",
"considered",
"to",
"come",
"from",
"a",
"small",
"-",
"screen",
"device",
"such",
"as",
"a",
"phone",
"or",
"a",
"PDA"
] | 7ac323faa1a9599f3cd39acd3c49626819ce0538 | https://github.com/django-xxx/django-mobi2/blob/7ac323faa1a9599f3cd39acd3c49626819ce0538/mobi2/middleware.py#L26-L64 |
249,535 | veltzer/pydmt | pydmt/core/pydmt.py | PyDMT.build_by_builder | def build_by_builder(self, builder: Builder, stats: BuildProcessStats):
""" run one builder, return statistics about the run """
logger = logging.getLogger(__name__)
target_signature = builder.get_signature()
assert target_signature is not None, "builder signature is None"
if self.cache.list_sig_ok(target_signature):
logger.info("verifying [{}]".format(builder.get_name()))
file_bad = 0
file_correct = 0
file_missing = 0
file_total = 0
list_filename = self.cache.get_list_filename(target_signature)
for object_name, signature in Cache.iterate_objects(list_filename):
filename = self.cache.get_object_filename(signature)
if os.path.isfile(object_name):
object_name_signature = sha1_file(object_name)
if object_name_signature != signature:
# logger.info("file [{}] is incorrect. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_sha1(filename, object_name)
file_bad += 1
else:
# logger.info("file [{}] is up to date".format(object_name))
stats.add_nop(filename, object_name)
file_correct += 1
else:
# logger.info("file [{}] is missing. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_missing(filename, object_name)
file_missing += 1
file_total += 1
if file_bad > 0 or file_missing > 0:
logger.info("Retrieved {} files from cache (bad/correct/missing = {}/{}/{}".format(
file_total, file_bad, file_correct, file_missing))
else:
logger.info("ok [{}]".format(builder.get_name()))
else:
# this is one of the rare cases in which really want to catch all exceptions.
# noinspection PyBroadException
try:
logger.info("running [{}]".format(builder.get_name()))
builder.build()
logger.info("ok [{}]".format(builder.get_name()))
stats.add_builder_ok(builder)
# first lets build a list of what was constructed
targets = builder.get_targets()
targets.extend(builder.get_targets_post_build())
content = ""
for target in targets:
signature = sha1_file(target)
content += target + " " + signature + "\n"
self.cache.save_object_by_signature(signature, target)
self.cache.save_list_by_signature(target_signature, content)
except Exception as e:
logger.info("failed [{}]".format(builder.get_name()))
logger.info("exception [{}]".format(e))
stats.add_builder_fail(builder, e) | python | def build_by_builder(self, builder: Builder, stats: BuildProcessStats):
""" run one builder, return statistics about the run """
logger = logging.getLogger(__name__)
target_signature = builder.get_signature()
assert target_signature is not None, "builder signature is None"
if self.cache.list_sig_ok(target_signature):
logger.info("verifying [{}]".format(builder.get_name()))
file_bad = 0
file_correct = 0
file_missing = 0
file_total = 0
list_filename = self.cache.get_list_filename(target_signature)
for object_name, signature in Cache.iterate_objects(list_filename):
filename = self.cache.get_object_filename(signature)
if os.path.isfile(object_name):
object_name_signature = sha1_file(object_name)
if object_name_signature != signature:
# logger.info("file [{}] is incorrect. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_sha1(filename, object_name)
file_bad += 1
else:
# logger.info("file [{}] is up to date".format(object_name))
stats.add_nop(filename, object_name)
file_correct += 1
else:
# logger.info("file [{}] is missing. Getting from cache.".format(object_name))
copy_mkdir(filename, object_name)
stats.add_copy_missing(filename, object_name)
file_missing += 1
file_total += 1
if file_bad > 0 or file_missing > 0:
logger.info("Retrieved {} files from cache (bad/correct/missing = {}/{}/{}".format(
file_total, file_bad, file_correct, file_missing))
else:
logger.info("ok [{}]".format(builder.get_name()))
else:
# this is one of the rare cases in which really want to catch all exceptions.
# noinspection PyBroadException
try:
logger.info("running [{}]".format(builder.get_name()))
builder.build()
logger.info("ok [{}]".format(builder.get_name()))
stats.add_builder_ok(builder)
# first lets build a list of what was constructed
targets = builder.get_targets()
targets.extend(builder.get_targets_post_build())
content = ""
for target in targets:
signature = sha1_file(target)
content += target + " " + signature + "\n"
self.cache.save_object_by_signature(signature, target)
self.cache.save_list_by_signature(target_signature, content)
except Exception as e:
logger.info("failed [{}]".format(builder.get_name()))
logger.info("exception [{}]".format(e))
stats.add_builder_fail(builder, e) | [
"def",
"build_by_builder",
"(",
"self",
",",
"builder",
":",
"Builder",
",",
"stats",
":",
"BuildProcessStats",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"target_signature",
"=",
"builder",
".",
"get_signature",
"(",
")",
"assert",
"target_signature",
"is",
"not",
"None",
",",
"\"builder signature is None\"",
"if",
"self",
".",
"cache",
".",
"list_sig_ok",
"(",
"target_signature",
")",
":",
"logger",
".",
"info",
"(",
"\"verifying [{}]\"",
".",
"format",
"(",
"builder",
".",
"get_name",
"(",
")",
")",
")",
"file_bad",
"=",
"0",
"file_correct",
"=",
"0",
"file_missing",
"=",
"0",
"file_total",
"=",
"0",
"list_filename",
"=",
"self",
".",
"cache",
".",
"get_list_filename",
"(",
"target_signature",
")",
"for",
"object_name",
",",
"signature",
"in",
"Cache",
".",
"iterate_objects",
"(",
"list_filename",
")",
":",
"filename",
"=",
"self",
".",
"cache",
".",
"get_object_filename",
"(",
"signature",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"object_name",
")",
":",
"object_name_signature",
"=",
"sha1_file",
"(",
"object_name",
")",
"if",
"object_name_signature",
"!=",
"signature",
":",
"# logger.info(\"file [{}] is incorrect. Getting from cache.\".format(object_name))",
"copy_mkdir",
"(",
"filename",
",",
"object_name",
")",
"stats",
".",
"add_copy_sha1",
"(",
"filename",
",",
"object_name",
")",
"file_bad",
"+=",
"1",
"else",
":",
"# logger.info(\"file [{}] is up to date\".format(object_name))",
"stats",
".",
"add_nop",
"(",
"filename",
",",
"object_name",
")",
"file_correct",
"+=",
"1",
"else",
":",
"# logger.info(\"file [{}] is missing. Getting from cache.\".format(object_name))",
"copy_mkdir",
"(",
"filename",
",",
"object_name",
")",
"stats",
".",
"add_copy_missing",
"(",
"filename",
",",
"object_name",
")",
"file_missing",
"+=",
"1",
"file_total",
"+=",
"1",
"if",
"file_bad",
">",
"0",
"or",
"file_missing",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"Retrieved {} files from cache (bad/correct/missing = {}/{}/{}\"",
".",
"format",
"(",
"file_total",
",",
"file_bad",
",",
"file_correct",
",",
"file_missing",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"ok [{}]\"",
".",
"format",
"(",
"builder",
".",
"get_name",
"(",
")",
")",
")",
"else",
":",
"# this is one of the rare cases in which really want to catch all exceptions.",
"# noinspection PyBroadException",
"try",
":",
"logger",
".",
"info",
"(",
"\"running [{}]\"",
".",
"format",
"(",
"builder",
".",
"get_name",
"(",
")",
")",
")",
"builder",
".",
"build",
"(",
")",
"logger",
".",
"info",
"(",
"\"ok [{}]\"",
".",
"format",
"(",
"builder",
".",
"get_name",
"(",
")",
")",
")",
"stats",
".",
"add_builder_ok",
"(",
"builder",
")",
"# first lets build a list of what was constructed",
"targets",
"=",
"builder",
".",
"get_targets",
"(",
")",
"targets",
".",
"extend",
"(",
"builder",
".",
"get_targets_post_build",
"(",
")",
")",
"content",
"=",
"\"\"",
"for",
"target",
"in",
"targets",
":",
"signature",
"=",
"sha1_file",
"(",
"target",
")",
"content",
"+=",
"target",
"+",
"\" \"",
"+",
"signature",
"+",
"\"\\n\"",
"self",
".",
"cache",
".",
"save_object_by_signature",
"(",
"signature",
",",
"target",
")",
"self",
".",
"cache",
".",
"save_list_by_signature",
"(",
"target_signature",
",",
"content",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"info",
"(",
"\"failed [{}]\"",
".",
"format",
"(",
"builder",
".",
"get_name",
"(",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"exception [{}]\"",
".",
"format",
"(",
"e",
")",
")",
"stats",
".",
"add_builder_fail",
"(",
"builder",
",",
"e",
")"
] | run one builder, return statistics about the run | [
"run",
"one",
"builder",
"return",
"statistics",
"about",
"the",
"run"
] | 11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7 | https://github.com/veltzer/pydmt/blob/11d3db7ea079756c1e4137d3dd8a2cabbcc98bf7/pydmt/core/pydmt.py#L53-L109 |
249,536 | rgmining/ria | ria/bipartite.py | Reviewer.anomalous_score | def anomalous_score(self):
"""Anomalous score of this reviewer.
Initial anomalous score is :math:`1 / |R|`
where :math:`R` is a set of reviewers.
"""
return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers) | python | def anomalous_score(self):
"""Anomalous score of this reviewer.
Initial anomalous score is :math:`1 / |R|`
where :math:`R` is a set of reviewers.
"""
return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers) | [
"def",
"anomalous_score",
"(",
"self",
")",
":",
"return",
"self",
".",
"_anomalous",
"if",
"self",
".",
"_anomalous",
"else",
"1.",
"/",
"len",
"(",
"self",
".",
"_graph",
".",
"reviewers",
")"
] | Anomalous score of this reviewer.
Initial anomalous score is :math:`1 / |R|`
where :math:`R` is a set of reviewers. | [
"Anomalous",
"score",
"of",
"this",
"reviewer",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L151-L157 |
249,537 | rgmining/ria | ria/bipartite.py | Product.summary | def summary(self):
"""Summary of reviews for this product.
Initial summary is computed by
.. math::
\\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r),
where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`.
"""
if self._summary:
return self._summary
reviewers = self._graph.retrieve_reviewers(self)
return self._summary_cls(
[self._graph.retrieve_review(r, self) for r in reviewers]) | python | def summary(self):
"""Summary of reviews for this product.
Initial summary is computed by
.. math::
\\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r),
where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`.
"""
if self._summary:
return self._summary
reviewers = self._graph.retrieve_reviewers(self)
return self._summary_cls(
[self._graph.retrieve_review(r, self) for r in reviewers]) | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"_summary",
":",
"return",
"self",
".",
"_summary",
"reviewers",
"=",
"self",
".",
"_graph",
".",
"retrieve_reviewers",
"(",
"self",
")",
"return",
"self",
".",
"_summary_cls",
"(",
"[",
"self",
".",
"_graph",
".",
"retrieve_review",
"(",
"r",
",",
"self",
")",
"for",
"r",
"in",
"reviewers",
"]",
")"
] | Summary of reviews for this product.
Initial summary is computed by
.. math::
\\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r),
where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`. | [
"Summary",
"of",
"reviews",
"for",
"this",
"product",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L226-L242 |
249,538 | rgmining/ria | ria/bipartite.py | Product.summary | def summary(self, v):
"""Set summary.
Args:
v: A new summary. It could be a single number or lists.
"""
if hasattr(v, "__iter__"):
self._summary = self._summary_cls(v)
else:
self._summary = self._summary_cls(float(v)) | python | def summary(self, v):
"""Set summary.
Args:
v: A new summary. It could be a single number or lists.
"""
if hasattr(v, "__iter__"):
self._summary = self._summary_cls(v)
else:
self._summary = self._summary_cls(float(v)) | [
"def",
"summary",
"(",
"self",
",",
"v",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"__iter__\"",
")",
":",
"self",
".",
"_summary",
"=",
"self",
".",
"_summary_cls",
"(",
"v",
")",
"else",
":",
"self",
".",
"_summary",
"=",
"self",
".",
"_summary_cls",
"(",
"float",
"(",
"v",
")",
")"
] | Set summary.
Args:
v: A new summary. It could be a single number or lists. | [
"Set",
"summary",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L245-L254 |
249,539 | rgmining/ria | ria/bipartite.py | Product.update_summary | def update_summary(self, w):
"""Update summary.
The new summary is a weighted average of reviews i.e.
.. math::
\\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)}
{\\sum_{r \\in R} \\mbox{weight}(r)},
where :math:`R` is a set of reviewers reviewing this product,
:math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are
the review and weight of the reviewer :math:`r`, respectively.
Args:
w: A weight function.
Returns:
absolute difference between old summary and updated one.
"""
old = self.summary.v # pylint: disable=no-member
reviewers = self._graph.retrieve_reviewers(self)
reviews = [self._graph.retrieve_review(
r, self).score for r in reviewers]
weights = [w(r.anomalous_score) for r in reviewers]
if sum(weights) == 0:
self.summary = np.mean(reviews)
else:
self.summary = np.average(reviews, weights=weights)
return abs(self.summary.v - old) | python | def update_summary(self, w):
"""Update summary.
The new summary is a weighted average of reviews i.e.
.. math::
\\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)}
{\\sum_{r \\in R} \\mbox{weight}(r)},
where :math:`R` is a set of reviewers reviewing this product,
:math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are
the review and weight of the reviewer :math:`r`, respectively.
Args:
w: A weight function.
Returns:
absolute difference between old summary and updated one.
"""
old = self.summary.v # pylint: disable=no-member
reviewers = self._graph.retrieve_reviewers(self)
reviews = [self._graph.retrieve_review(
r, self).score for r in reviewers]
weights = [w(r.anomalous_score) for r in reviewers]
if sum(weights) == 0:
self.summary = np.mean(reviews)
else:
self.summary = np.average(reviews, weights=weights)
return abs(self.summary.v - old) | [
"def",
"update_summary",
"(",
"self",
",",
"w",
")",
":",
"old",
"=",
"self",
".",
"summary",
".",
"v",
"# pylint: disable=no-member",
"reviewers",
"=",
"self",
".",
"_graph",
".",
"retrieve_reviewers",
"(",
"self",
")",
"reviews",
"=",
"[",
"self",
".",
"_graph",
".",
"retrieve_review",
"(",
"r",
",",
"self",
")",
".",
"score",
"for",
"r",
"in",
"reviewers",
"]",
"weights",
"=",
"[",
"w",
"(",
"r",
".",
"anomalous_score",
")",
"for",
"r",
"in",
"reviewers",
"]",
"if",
"sum",
"(",
"weights",
")",
"==",
"0",
":",
"self",
".",
"summary",
"=",
"np",
".",
"mean",
"(",
"reviews",
")",
"else",
":",
"self",
".",
"summary",
"=",
"np",
".",
"average",
"(",
"reviews",
",",
"weights",
"=",
"weights",
")",
"return",
"abs",
"(",
"self",
".",
"summary",
".",
"v",
"-",
"old",
")"
] | Update summary.
The new summary is a weighted average of reviews i.e.
.. math::
\\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)}
{\\sum_{r \\in R} \\mbox{weight}(r)},
where :math:`R` is a set of reviewers reviewing this product,
:math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are
the review and weight of the reviewer :math:`r`, respectively.
Args:
w: A weight function.
Returns:
absolute difference between old summary and updated one. | [
"Update",
"summary",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L256-L286 |
249,540 | rgmining/ria | ria/bipartite.py | BipartiteGraph.new_reviewer | def new_reviewer(self, name, anomalous=None):
"""Create a new reviewer.
Args:
name: name of the new reviewer.
anomalous: initial anomalous score. (default: None)
Returns:
A new reviewer instance.
"""
n = self._reviewer_cls(
self, name=name, credibility=self.credibility, anomalous=anomalous)
self.graph.add_node(n)
self.reviewers.append(n)
return n | python | def new_reviewer(self, name, anomalous=None):
"""Create a new reviewer.
Args:
name: name of the new reviewer.
anomalous: initial anomalous score. (default: None)
Returns:
A new reviewer instance.
"""
n = self._reviewer_cls(
self, name=name, credibility=self.credibility, anomalous=anomalous)
self.graph.add_node(n)
self.reviewers.append(n)
return n | [
"def",
"new_reviewer",
"(",
"self",
",",
"name",
",",
"anomalous",
"=",
"None",
")",
":",
"n",
"=",
"self",
".",
"_reviewer_cls",
"(",
"self",
",",
"name",
"=",
"name",
",",
"credibility",
"=",
"self",
".",
"credibility",
",",
"anomalous",
"=",
"anomalous",
")",
"self",
".",
"graph",
".",
"add_node",
"(",
"n",
")",
"self",
".",
"reviewers",
".",
"append",
"(",
"n",
")",
"return",
"n"
] | Create a new reviewer.
Args:
name: name of the new reviewer.
anomalous: initial anomalous score. (default: None)
Returns:
A new reviewer instance. | [
"Create",
"a",
"new",
"reviewer",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L333-L347 |
249,541 | rgmining/ria | ria/bipartite.py | BipartiteGraph.new_product | def new_product(self, name):
"""Create a new product.
Args:
name: name of the new product.
Returns:
A new product instance.
"""
n = self._product_cls(self, name, summary_cls=self._summary_cls)
self.graph.add_node(n)
self.products.append(n)
return n | python | def new_product(self, name):
"""Create a new product.
Args:
name: name of the new product.
Returns:
A new product instance.
"""
n = self._product_cls(self, name, summary_cls=self._summary_cls)
self.graph.add_node(n)
self.products.append(n)
return n | [
"def",
"new_product",
"(",
"self",
",",
"name",
")",
":",
"n",
"=",
"self",
".",
"_product_cls",
"(",
"self",
",",
"name",
",",
"summary_cls",
"=",
"self",
".",
"_summary_cls",
")",
"self",
".",
"graph",
".",
"add_node",
"(",
"n",
")",
"self",
".",
"products",
".",
"append",
"(",
"n",
")",
"return",
"n"
] | Create a new product.
Args:
name: name of the new product.
Returns:
A new product instance. | [
"Create",
"a",
"new",
"product",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L349-L361 |
249,542 | rgmining/ria | ria/bipartite.py | BipartiteGraph.add_review | def add_review(self, reviewer, product, review, date=None):
"""Add a new review from a given reviewer to a given product.
Args:
reviewer: an instance of Reviewer.
product: an instance of Product.
review: a float value.
date: date the review issued.
Returns:
the added new review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
r = self._review_cls(review, date=date)
self.graph.add_edge(reviewer, product, review=r)
return r | python | def add_review(self, reviewer, product, review, date=None):
"""Add a new review from a given reviewer to a given product.
Args:
reviewer: an instance of Reviewer.
product: an instance of Product.
review: a float value.
date: date the review issued.
Returns:
the added new review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
r = self._review_cls(review, date=date)
self.graph.add_edge(reviewer, product, review=r)
return r | [
"def",
"add_review",
"(",
"self",
",",
"reviewer",
",",
"product",
",",
"review",
",",
"date",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"reviewer",
",",
"self",
".",
"_reviewer_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given reviewer isn't acceptable:\"",
",",
"reviewer",
",",
"\", expected:\"",
",",
"self",
".",
"_reviewer_cls",
")",
"elif",
"not",
"isinstance",
"(",
"product",
",",
"self",
".",
"_product_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given product isn't acceptable:\"",
",",
"product",
",",
"\", expected:\"",
",",
"self",
".",
"_product_cls",
")",
"r",
"=",
"self",
".",
"_review_cls",
"(",
"review",
",",
"date",
"=",
"date",
")",
"self",
".",
"graph",
".",
"add_edge",
"(",
"reviewer",
",",
"product",
",",
"review",
"=",
"r",
")",
"return",
"r"
] | Add a new review from a given reviewer to a given product.
Args:
reviewer: an instance of Reviewer.
product: an instance of Product.
review: a float value.
date: date the review issued.
Returns:
the added new review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed. | [
"Add",
"a",
"new",
"review",
"from",
"a",
"given",
"reviewer",
"to",
"a",
"given",
"product",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L363-L389 |
249,543 | rgmining/ria | ria/bipartite.py | BipartiteGraph.retrieve_products | def retrieve_products(self, reviewer):
"""Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer)) | python | def retrieve_products(self, reviewer):
"""Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer)) | [
"def",
"retrieve_products",
"(",
"self",
",",
"reviewer",
")",
":",
"if",
"not",
"isinstance",
"(",
"reviewer",
",",
"self",
".",
"_reviewer_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given reviewer isn't acceptable:\"",
",",
"reviewer",
",",
"\", expected:\"",
",",
"self",
".",
"_reviewer_cls",
")",
"return",
"list",
"(",
"self",
".",
"graph",
".",
"successors",
"(",
"reviewer",
")",
")"
] | Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed. | [
"Retrieve",
"products",
"reviewed",
"by",
"a",
"given",
"reviewer",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L392-L409 |
249,544 | rgmining/ria | ria/bipartite.py | BipartiteGraph.retrieve_reviewers | def retrieve_reviewers(self, product):
"""Retrieve reviewers who reviewed a given product.
Args:
product: A product specifying reviewers.
Returns:
A list of reviewers who review the product.
Raises:
TypeError: when given product isn't instance of specified product
class when this graph is constructed.
"""
if not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
return list(self.graph.predecessors(product)) | python | def retrieve_reviewers(self, product):
"""Retrieve reviewers who reviewed a given product.
Args:
product: A product specifying reviewers.
Returns:
A list of reviewers who review the product.
Raises:
TypeError: when given product isn't instance of specified product
class when this graph is constructed.
"""
if not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
return list(self.graph.predecessors(product)) | [
"def",
"retrieve_reviewers",
"(",
"self",
",",
"product",
")",
":",
"if",
"not",
"isinstance",
"(",
"product",
",",
"self",
".",
"_product_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given product isn't acceptable:\"",
",",
"product",
",",
"\", expected:\"",
",",
"self",
".",
"_product_cls",
")",
"return",
"list",
"(",
"self",
".",
"graph",
".",
"predecessors",
"(",
"product",
")",
")"
] | Retrieve reviewers who reviewed a given product.
Args:
product: A product specifying reviewers.
Returns:
A list of reviewers who review the product.
Raises:
TypeError: when given product isn't instance of specified product
class when this graph is constructed. | [
"Retrieve",
"reviewers",
"who",
"reviewed",
"a",
"given",
"product",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L412-L429 |
249,545 | rgmining/ria | ria/bipartite.py | BipartiteGraph.retrieve_review | def retrieve_review(self, reviewer, product):
"""Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
try:
return self.graph[reviewer][product]["review"]
except TypeError:
raise KeyError(
"{0} does not review {1}.".format(reviewer, product)) | python | def retrieve_review(self, reviewer, product):
"""Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
try:
return self.graph[reviewer][product]["review"]
except TypeError:
raise KeyError(
"{0} does not review {1}.".format(reviewer, product)) | [
"def",
"retrieve_review",
"(",
"self",
",",
"reviewer",
",",
"product",
")",
":",
"if",
"not",
"isinstance",
"(",
"reviewer",
",",
"self",
".",
"_reviewer_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given reviewer isn't acceptable:\"",
",",
"reviewer",
",",
"\", expected:\"",
",",
"self",
".",
"_reviewer_cls",
")",
"elif",
"not",
"isinstance",
"(",
"product",
",",
"self",
".",
"_product_cls",
")",
":",
"raise",
"TypeError",
"(",
"\"Type of given product isn't acceptable:\"",
",",
"product",
",",
"\", expected:\"",
",",
"self",
".",
"_product_cls",
")",
"try",
":",
"return",
"self",
".",
"graph",
"[",
"reviewer",
"]",
"[",
"product",
"]",
"[",
"\"review\"",
"]",
"except",
"TypeError",
":",
"raise",
"KeyError",
"(",
"\"{0} does not review {1}.\"",
".",
"format",
"(",
"reviewer",
",",
"product",
")",
")"
] | Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product. | [
"Retrieve",
"review",
"that",
"the",
"given",
"reviewer",
"put",
"the",
"given",
"product",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L432-L460 |
249,546 | rgmining/ria | ria/bipartite.py | BipartiteGraph._weight_generator | def _weight_generator(self, reviewers):
"""Compute a weight function for the given reviewers.
Args:
reviewers: a set of reviewers to compute weight function.
Returns:
a function computing a weight for a reviewer.
"""
scores = [r.anomalous_score for r in reviewers]
mu = np.average(scores)
sigma = np.std(scores)
if sigma:
def w(v):
"""Compute a weight for the given reviewer.
Args:
v: anomalous score of a reviewer.
Returns:
weight of the given anomalous score.
"""
try:
exp = math.exp(self.alpha * (v - mu) / sigma)
return 1. / (1. + exp)
except OverflowError:
return 0.
return w
else:
# Sigma = 0 means all reviews have same anomalous scores.
# In this case, all reviews should be treated as same.
return lambda v: 1. | python | def _weight_generator(self, reviewers):
"""Compute a weight function for the given reviewers.
Args:
reviewers: a set of reviewers to compute weight function.
Returns:
a function computing a weight for a reviewer.
"""
scores = [r.anomalous_score for r in reviewers]
mu = np.average(scores)
sigma = np.std(scores)
if sigma:
def w(v):
"""Compute a weight for the given reviewer.
Args:
v: anomalous score of a reviewer.
Returns:
weight of the given anomalous score.
"""
try:
exp = math.exp(self.alpha * (v - mu) / sigma)
return 1. / (1. + exp)
except OverflowError:
return 0.
return w
else:
# Sigma = 0 means all reviews have same anomalous scores.
# In this case, all reviews should be treated as same.
return lambda v: 1. | [
"def",
"_weight_generator",
"(",
"self",
",",
"reviewers",
")",
":",
"scores",
"=",
"[",
"r",
".",
"anomalous_score",
"for",
"r",
"in",
"reviewers",
"]",
"mu",
"=",
"np",
".",
"average",
"(",
"scores",
")",
"sigma",
"=",
"np",
".",
"std",
"(",
"scores",
")",
"if",
"sigma",
":",
"def",
"w",
"(",
"v",
")",
":",
"\"\"\"Compute a weight for the given reviewer.\n\n Args:\n v: anomalous score of a reviewer.\n Returns:\n weight of the given anomalous score.\n \"\"\"",
"try",
":",
"exp",
"=",
"math",
".",
"exp",
"(",
"self",
".",
"alpha",
"*",
"(",
"v",
"-",
"mu",
")",
"/",
"sigma",
")",
"return",
"1.",
"/",
"(",
"1.",
"+",
"exp",
")",
"except",
"OverflowError",
":",
"return",
"0.",
"return",
"w",
"else",
":",
"# Sigma = 0 means all reviews have same anomalous scores.",
"# In this case, all reviews should be treated as same.",
"return",
"lambda",
"v",
":",
"1."
] | Compute a weight function for the given reviewers.
Args:
reviewers: a set of reviewers to compute weight function.
Returns:
a function computing a weight for a reviewer. | [
"Compute",
"a",
"weight",
"function",
"for",
"the",
"given",
"reviewers",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L474-L507 |
249,547 | rgmining/ria | ria/bipartite.py | BipartiteGraph.dump_credibilities | def dump_credibilities(self, output):
"""Dump credibilities of all products.
Args:
output: a writable object.
"""
for p in self.products:
json.dump({
"product_id": p.name,
"credibility": self.credibility(p)
}, output)
output.write("\n") | python | def dump_credibilities(self, output):
"""Dump credibilities of all products.
Args:
output: a writable object.
"""
for p in self.products:
json.dump({
"product_id": p.name,
"credibility": self.credibility(p)
}, output)
output.write("\n") | [
"def",
"dump_credibilities",
"(",
"self",
",",
"output",
")",
":",
"for",
"p",
"in",
"self",
".",
"products",
":",
"json",
".",
"dump",
"(",
"{",
"\"product_id\"",
":",
"p",
".",
"name",
",",
"\"credibility\"",
":",
"self",
".",
"credibility",
"(",
"p",
")",
"}",
",",
"output",
")",
"output",
".",
"write",
"(",
"\"\\n\"",
")"
] | Dump credibilities of all products.
Args:
output: a writable object. | [
"Dump",
"credibilities",
"of",
"all",
"products",
"."
] | 39223c67b7e59e10bd8e3a9062fb13f8bf893a5d | https://github.com/rgmining/ria/blob/39223c67b7e59e10bd8e3a9062fb13f8bf893a5d/ria/bipartite.py#L509-L520 |
249,548 | pjuren/pyokit | src/pyokit/io/maf.py | merge_dictionaries | def merge_dictionaries(a, b):
"""Merge two dictionaries; duplicate keys get value from b."""
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res | python | def merge_dictionaries(a, b):
"""Merge two dictionaries; duplicate keys get value from b."""
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res | [
"def",
"merge_dictionaries",
"(",
"a",
",",
"b",
")",
":",
"res",
"=",
"{",
"}",
"for",
"k",
"in",
"a",
":",
"res",
"[",
"k",
"]",
"=",
"a",
"[",
"k",
"]",
"for",
"k",
"in",
"b",
":",
"res",
"[",
"k",
"]",
"=",
"b",
"[",
"k",
"]",
"return",
"res"
] | Merge two dictionaries; duplicate keys get value from b. | [
"Merge",
"two",
"dictionaries",
";",
"duplicate",
"keys",
"get",
"value",
"from",
"b",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/maf.py#L73-L80 |
249,549 | pjuren/pyokit | src/pyokit/io/maf.py | __build_sequence | def __build_sequence(parts):
"""Build a sequence object using the pre-tokenized parts from a MAF line.
s -- a sequence line; has 6 fields in addition to 's':
* source sequence,
* start coord. of seq., zero-based. If -'ve strand, rel to start of
rev. comp.
* ungapped length of the sequence
* strand
* src size -- the full length of the source sequence
* the sequence itself
"""
strand = parts[4]
seq_length = int(parts[3])
total_seq_len = int(parts[5])
start = (int(parts[2]) if strand == "+"
else total_seq_len - int(parts[2]) - seq_length)
end = start + seq_length
remain = total_seq_len - end
return Sequence(parts[1], parts[6], start, end, strand, remain) | python | def __build_sequence(parts):
"""Build a sequence object using the pre-tokenized parts from a MAF line.
s -- a sequence line; has 6 fields in addition to 's':
* source sequence,
* start coord. of seq., zero-based. If -'ve strand, rel to start of
rev. comp.
* ungapped length of the sequence
* strand
* src size -- the full length of the source sequence
* the sequence itself
"""
strand = parts[4]
seq_length = int(parts[3])
total_seq_len = int(parts[5])
start = (int(parts[2]) if strand == "+"
else total_seq_len - int(parts[2]) - seq_length)
end = start + seq_length
remain = total_seq_len - end
return Sequence(parts[1], parts[6], start, end, strand, remain) | [
"def",
"__build_sequence",
"(",
"parts",
")",
":",
"strand",
"=",
"parts",
"[",
"4",
"]",
"seq_length",
"=",
"int",
"(",
"parts",
"[",
"3",
"]",
")",
"total_seq_len",
"=",
"int",
"(",
"parts",
"[",
"5",
"]",
")",
"start",
"=",
"(",
"int",
"(",
"parts",
"[",
"2",
"]",
")",
"if",
"strand",
"==",
"\"+\"",
"else",
"total_seq_len",
"-",
"int",
"(",
"parts",
"[",
"2",
"]",
")",
"-",
"seq_length",
")",
"end",
"=",
"start",
"+",
"seq_length",
"remain",
"=",
"total_seq_len",
"-",
"end",
"return",
"Sequence",
"(",
"parts",
"[",
"1",
"]",
",",
"parts",
"[",
"6",
"]",
",",
"start",
",",
"end",
",",
"strand",
",",
"remain",
")"
] | Build a sequence object using the pre-tokenized parts from a MAF line.
s -- a sequence line; has 6 fields in addition to 's':
* source sequence,
* start coord. of seq., zero-based. If -'ve strand, rel to start of
rev. comp.
* ungapped length of the sequence
* strand
* src size -- the full length of the source sequence
* the sequence itself | [
"Build",
"a",
"sequence",
"object",
"using",
"the",
"pre",
"-",
"tokenized",
"parts",
"from",
"a",
"MAF",
"line",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/maf.py#L83-L102 |
249,550 | pjuren/pyokit | src/pyokit/io/maf.py | __annotate_sequence_with_context | def __annotate_sequence_with_context(seq, i_line_parts):
"""Extract meta data from pre-tokenized maf i-line and populate sequence.
i -- always come after s lines, and contain information about the context of
the sequence. Five fields are given, not counting the 'i'
* source sequence (must match s line before this)
* left status (see below)
* left count; num of bases in source sequence between start of the
block and end of previous block (0 if this is the first)
* right status (see below)
* right count; num of bases in source after end of this block before
start of next
status (left/right) is a single char and can be:
* C -- the sequence before or after is contiguous with this block.
* I -- there are bases between the bases in this block and the one
before or after it.
* N -- this is the first sequence from this src chrom or scaffold.
* n -- this is the first sequence from this src chrom or scaffold but
it is bridged by another alignment from a different chrom or
scaffold.
* M -- there is missing data before or after this block (Ns in the
sequence).
* T -- the sequence in this block has been used before in a previous
block (likely a tandem duplication)
"""
if i_line_parts[1] != seq.name:
raise MAFError("Trying to populate meta data for sequence " + seq.name +
" with i-line information for " +
str(i_line_parts[1]) + "; maflormed MAF file?")
if len(i_line_parts) != 6:
raise MAFError("i-line with " + str(len(i_line_parts)) + "; expected 6.")
seq.meta_data[LEFT_STATUS_KEY] = i_line_parts[2]
seq.meta_data[LEFT_COUNT_KEY] = int(i_line_parts[3])
seq.meta_data[RIGHT_STATUS_KEY] = i_line_parts[4]
seq.meta_data[RIGHT_COUNT_KEY] = int(i_line_parts[5]) | python | def __annotate_sequence_with_context(seq, i_line_parts):
"""Extract meta data from pre-tokenized maf i-line and populate sequence.
i -- always come after s lines, and contain information about the context of
the sequence. Five fields are given, not counting the 'i'
* source sequence (must match s line before this)
* left status (see below)
* left count; num of bases in source sequence between start of the
block and end of previous block (0 if this is the first)
* right status (see below)
* right count; num of bases in source after end of this block before
start of next
status (left/right) is a single char and can be:
* C -- the sequence before or after is contiguous with this block.
* I -- there are bases between the bases in this block and the one
before or after it.
* N -- this is the first sequence from this src chrom or scaffold.
* n -- this is the first sequence from this src chrom or scaffold but
it is bridged by another alignment from a different chrom or
scaffold.
* M -- there is missing data before or after this block (Ns in the
sequence).
* T -- the sequence in this block has been used before in a previous
block (likely a tandem duplication)
"""
if i_line_parts[1] != seq.name:
raise MAFError("Trying to populate meta data for sequence " + seq.name +
" with i-line information for " +
str(i_line_parts[1]) + "; maflormed MAF file?")
if len(i_line_parts) != 6:
raise MAFError("i-line with " + str(len(i_line_parts)) + "; expected 6.")
seq.meta_data[LEFT_STATUS_KEY] = i_line_parts[2]
seq.meta_data[LEFT_COUNT_KEY] = int(i_line_parts[3])
seq.meta_data[RIGHT_STATUS_KEY] = i_line_parts[4]
seq.meta_data[RIGHT_COUNT_KEY] = int(i_line_parts[5]) | [
"def",
"__annotate_sequence_with_context",
"(",
"seq",
",",
"i_line_parts",
")",
":",
"if",
"i_line_parts",
"[",
"1",
"]",
"!=",
"seq",
".",
"name",
":",
"raise",
"MAFError",
"(",
"\"Trying to populate meta data for sequence \"",
"+",
"seq",
".",
"name",
"+",
"\" with i-line information for \"",
"+",
"str",
"(",
"i_line_parts",
"[",
"1",
"]",
")",
"+",
"\"; maflormed MAF file?\"",
")",
"if",
"len",
"(",
"i_line_parts",
")",
"!=",
"6",
":",
"raise",
"MAFError",
"(",
"\"i-line with \"",
"+",
"str",
"(",
"len",
"(",
"i_line_parts",
")",
")",
"+",
"\"; expected 6.\"",
")",
"seq",
".",
"meta_data",
"[",
"LEFT_STATUS_KEY",
"]",
"=",
"i_line_parts",
"[",
"2",
"]",
"seq",
".",
"meta_data",
"[",
"LEFT_COUNT_KEY",
"]",
"=",
"int",
"(",
"i_line_parts",
"[",
"3",
"]",
")",
"seq",
".",
"meta_data",
"[",
"RIGHT_STATUS_KEY",
"]",
"=",
"i_line_parts",
"[",
"4",
"]",
"seq",
".",
"meta_data",
"[",
"RIGHT_COUNT_KEY",
"]",
"=",
"int",
"(",
"i_line_parts",
"[",
"5",
"]",
")"
] | Extract meta data from pre-tokenized maf i-line and populate sequence.
i -- always come after s lines, and contain information about the context of
the sequence. Five fields are given, not counting the 'i'
* source sequence (must match s line before this)
* left status (see below)
* left count; num of bases in source sequence between start of the
block and end of previous block (0 if this is the first)
* right status (see below)
* right count; num of bases in source after end of this block before
start of next
status (left/right) is a single char and can be:
* C -- the sequence before or after is contiguous with this block.
* I -- there are bases between the bases in this block and the one
before or after it.
* N -- this is the first sequence from this src chrom or scaffold.
* n -- this is the first sequence from this src chrom or scaffold but
it is bridged by another alignment from a different chrom or
scaffold.
* M -- there is missing data before or after this block (Ns in the
sequence).
* T -- the sequence in this block has been used before in a previous
block (likely a tandem duplication) | [
"Extract",
"meta",
"data",
"from",
"pre",
"-",
"tokenized",
"maf",
"i",
"-",
"line",
"and",
"populate",
"sequence",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/maf.py#L135-L169 |
249,551 | pjuren/pyokit | src/pyokit/io/maf.py | __annotate_sequence_with_quality | def __annotate_sequence_with_quality(seq, q_line_parts):
"""Extract meta data from pre-tokenized maf q-line and populate sequence.
q -- quality information about an aligned base in a species. Two fields after
the 'q': the source name and a single digit for each nucleotide in its
sequence (0-9 or F, or - to indicate a gap).
"""
if q_line_parts[1] != seq.name:
raise MAFError("trying to populate meta data for sequence " + seq.name +
" with q-line information for " +
str(q_line_parts[1]) + "; maflormed MAF file?")
if len(q_line_parts[2]) != len(seq):
raise MAFError("trying to populate quality meta data for sequence with " +
"length " + str(len(seq)) + " using quality line with " +
"length " + str(len(q_line_parts[2])) + "; malformed " +
"MAF file?")
seq.meta_data[QUALITY_META_KEY] = q_line_parts[2] | python | def __annotate_sequence_with_quality(seq, q_line_parts):
"""Extract meta data from pre-tokenized maf q-line and populate sequence.
q -- quality information about an aligned base in a species. Two fields after
the 'q': the source name and a single digit for each nucleotide in its
sequence (0-9 or F, or - to indicate a gap).
"""
if q_line_parts[1] != seq.name:
raise MAFError("trying to populate meta data for sequence " + seq.name +
" with q-line information for " +
str(q_line_parts[1]) + "; maflormed MAF file?")
if len(q_line_parts[2]) != len(seq):
raise MAFError("trying to populate quality meta data for sequence with " +
"length " + str(len(seq)) + " using quality line with " +
"length " + str(len(q_line_parts[2])) + "; malformed " +
"MAF file?")
seq.meta_data[QUALITY_META_KEY] = q_line_parts[2] | [
"def",
"__annotate_sequence_with_quality",
"(",
"seq",
",",
"q_line_parts",
")",
":",
"if",
"q_line_parts",
"[",
"1",
"]",
"!=",
"seq",
".",
"name",
":",
"raise",
"MAFError",
"(",
"\"trying to populate meta data for sequence \"",
"+",
"seq",
".",
"name",
"+",
"\" with q-line information for \"",
"+",
"str",
"(",
"q_line_parts",
"[",
"1",
"]",
")",
"+",
"\"; maflormed MAF file?\"",
")",
"if",
"len",
"(",
"q_line_parts",
"[",
"2",
"]",
")",
"!=",
"len",
"(",
"seq",
")",
":",
"raise",
"MAFError",
"(",
"\"trying to populate quality meta data for sequence with \"",
"+",
"\"length \"",
"+",
"str",
"(",
"len",
"(",
"seq",
")",
")",
"+",
"\" using quality line with \"",
"+",
"\"length \"",
"+",
"str",
"(",
"len",
"(",
"q_line_parts",
"[",
"2",
"]",
")",
")",
"+",
"\"; malformed \"",
"+",
"\"MAF file?\"",
")",
"seq",
".",
"meta_data",
"[",
"QUALITY_META_KEY",
"]",
"=",
"q_line_parts",
"[",
"2",
"]"
] | Extract meta data from pre-tokenized maf q-line and populate sequence.
q -- quality information about an aligned base in a species. Two fields after
the 'q': the source name and a single digit for each nucleotide in its
sequence (0-9 or F, or - to indicate a gap). | [
"Extract",
"meta",
"data",
"from",
"pre",
"-",
"tokenized",
"maf",
"q",
"-",
"line",
"and",
"populate",
"sequence",
"."
] | fddae123b5d817daa39496183f19c000d9c3791f | https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/maf.py#L172-L188 |
249,552 | AguaClara/aide_document-DEPRECATED | aide_document/jekyll.py | add_frontmatter | def add_frontmatter(file_name, title, makenew=False):
"""
Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project.
Parameters
==========
file_name : String
Relative file path from where this method is called to the location of the file that will have frontmatter added.
title : String
Title of the page that will go into the Jekyll project.
makenew : Boolean (OPTIONAL)
If set to True, will create a new file with the frontmatter next to the original file with "_added_frontmatter" appended to its name. Otherwise, the method simply edits the original file.
Examples
========
Suppose we have the following directory:
data/
doc.md
To write to a new file doc_add_frontmatter.md and add frontmatter:
>>> from aide_document import jekyll
>>> jekyll.add_frontmatter('doc.md', 'Document', True)
The last parameter can be omitted if you want to just overwrite doc.md.
"""
with open(file_name, "r+") as oldfile:
# Creates new file and writes to it if specified
if makenew:
with open(file_name[:-3] + '_added_frontmatter.md', 'w') as newfile:
newfile.write('---\n' + 'title: ' + title + '\n' + '---\n')
newfile.write(oldfile.read())
# Writes to old file if unspecified
else:
content = oldfile.read()
oldfile.seek(0)
oldfile.write('---\n' + 'title: ' + title + '\n' + '---\n' + content) | python | def add_frontmatter(file_name, title, makenew=False):
"""
Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project.
Parameters
==========
file_name : String
Relative file path from where this method is called to the location of the file that will have frontmatter added.
title : String
Title of the page that will go into the Jekyll project.
makenew : Boolean (OPTIONAL)
If set to True, will create a new file with the frontmatter next to the original file with "_added_frontmatter" appended to its name. Otherwise, the method simply edits the original file.
Examples
========
Suppose we have the following directory:
data/
doc.md
To write to a new file doc_add_frontmatter.md and add frontmatter:
>>> from aide_document import jekyll
>>> jekyll.add_frontmatter('doc.md', 'Document', True)
The last parameter can be omitted if you want to just overwrite doc.md.
"""
with open(file_name, "r+") as oldfile:
# Creates new file and writes to it if specified
if makenew:
with open(file_name[:-3] + '_added_frontmatter.md', 'w') as newfile:
newfile.write('---\n' + 'title: ' + title + '\n' + '---\n')
newfile.write(oldfile.read())
# Writes to old file if unspecified
else:
content = oldfile.read()
oldfile.seek(0)
oldfile.write('---\n' + 'title: ' + title + '\n' + '---\n' + content) | [
"def",
"add_frontmatter",
"(",
"file_name",
",",
"title",
",",
"makenew",
"=",
"False",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"\"r+\"",
")",
"as",
"oldfile",
":",
"# Creates new file and writes to it if specified",
"if",
"makenew",
":",
"with",
"open",
"(",
"file_name",
"[",
":",
"-",
"3",
"]",
"+",
"'_added_frontmatter.md'",
",",
"'w'",
")",
"as",
"newfile",
":",
"newfile",
".",
"write",
"(",
"'---\\n'",
"+",
"'title: '",
"+",
"title",
"+",
"'\\n'",
"+",
"'---\\n'",
")",
"newfile",
".",
"write",
"(",
"oldfile",
".",
"read",
"(",
")",
")",
"# Writes to old file if unspecified",
"else",
":",
"content",
"=",
"oldfile",
".",
"read",
"(",
")",
"oldfile",
".",
"seek",
"(",
"0",
")",
"oldfile",
".",
"write",
"(",
"'---\\n'",
"+",
"'title: '",
"+",
"title",
"+",
"'\\n'",
"+",
"'---\\n'",
"+",
"content",
")"
] | Adds basic frontmatter to a MarkDown file that will be used in a Jekyll project.
Parameters
==========
file_name : String
Relative file path from where this method is called to the location of the file that will have frontmatter added.
title : String
Title of the page that will go into the Jekyll project.
makenew : Boolean (OPTIONAL)
If set to True, will create a new file with the frontmatter next to the original file with "_added_frontmatter" appended to its name. Otherwise, the method simply edits the original file.
Examples
========
Suppose we have the following directory:
data/
doc.md
To write to a new file doc_add_frontmatter.md and add frontmatter:
>>> from aide_document import jekyll
>>> jekyll.add_frontmatter('doc.md', 'Document', True)
The last parameter can be omitted if you want to just overwrite doc.md. | [
"Adds",
"basic",
"frontmatter",
"to",
"a",
"MarkDown",
"file",
"that",
"will",
"be",
"used",
"in",
"a",
"Jekyll",
"project",
"."
] | 3f3b5c9f321264e0e4d8ed68dfbc080762579815 | https://github.com/AguaClara/aide_document-DEPRECATED/blob/3f3b5c9f321264e0e4d8ed68dfbc080762579815/aide_document/jekyll.py#L12-L50 |
249,553 | TheSighing/climber | climber/summary.py | FrequencySummarizer.summarize | def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
sents = sent_tokenize(text)
assert n <= len(sents)
word_sent = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_sent)
ranking = defaultdict(int)
for i,sent in enumerate(word_sent):
for w in sent:
if w in self._freq:
ranking[i] += self._freq[w]
sents_idx = self._rank(ranking, n)
return [sents[j] for j in sents_idx] | python | def summarize(self, text, n):
"""
Return a list of n sentences
which represent the summary of text.
"""
sents = sent_tokenize(text)
assert n <= len(sents)
word_sent = [word_tokenize(s.lower()) for s in sents]
self._freq = self._compute_frequencies(word_sent)
ranking = defaultdict(int)
for i,sent in enumerate(word_sent):
for w in sent:
if w in self._freq:
ranking[i] += self._freq[w]
sents_idx = self._rank(ranking, n)
return [sents[j] for j in sents_idx] | [
"def",
"summarize",
"(",
"self",
",",
"text",
",",
"n",
")",
":",
"sents",
"=",
"sent_tokenize",
"(",
"text",
")",
"assert",
"n",
"<=",
"len",
"(",
"sents",
")",
"word_sent",
"=",
"[",
"word_tokenize",
"(",
"s",
".",
"lower",
"(",
")",
")",
"for",
"s",
"in",
"sents",
"]",
"self",
".",
"_freq",
"=",
"self",
".",
"_compute_frequencies",
"(",
"word_sent",
")",
"ranking",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"word_sent",
")",
":",
"for",
"w",
"in",
"sent",
":",
"if",
"w",
"in",
"self",
".",
"_freq",
":",
"ranking",
"[",
"i",
"]",
"+=",
"self",
".",
"_freq",
"[",
"w",
"]",
"sents_idx",
"=",
"self",
".",
"_rank",
"(",
"ranking",
",",
"n",
")",
"return",
"[",
"sents",
"[",
"j",
"]",
"for",
"j",
"in",
"sents_idx",
"]"
] | Return a list of n sentences
which represent the summary of text. | [
"Return",
"a",
"list",
"of",
"n",
"sentences",
"which",
"represent",
"the",
"summary",
"of",
"text",
"."
] | 39e4e70c9a768c82a995d8704679d1c046910666 | https://github.com/TheSighing/climber/blob/39e4e70c9a768c82a995d8704679d1c046910666/climber/summary.py#L39-L54 |
249,554 | TheSighing/climber | climber/summary.py | FrequencySummarizer._rank | def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking, key=ranking.get) | python | def _rank(self, ranking, n):
""" return the first n sentences with highest ranking """
return nlargest(n, ranking, key=ranking.get) | [
"def",
"_rank",
"(",
"self",
",",
"ranking",
",",
"n",
")",
":",
"return",
"nlargest",
"(",
"n",
",",
"ranking",
",",
"key",
"=",
"ranking",
".",
"get",
")"
] | return the first n sentences with highest ranking | [
"return",
"the",
"first",
"n",
"sentences",
"with",
"highest",
"ranking"
] | 39e4e70c9a768c82a995d8704679d1c046910666 | https://github.com/TheSighing/climber/blob/39e4e70c9a768c82a995d8704679d1c046910666/climber/summary.py#L56-L58 |
249,555 | sys-git/certifiable | certifiable/utils.py | make_certifier | def make_certifier():
"""
Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value)
"""
def decorator(func):
@six.wraps(func)
def wrapper(value=_undefined, **kwargs):
def certify(val):
if is_enabled():
exec_func(func, val, **kwargs)
return val
if value is not _undefined:
return certify(value)
else:
return certify
return wrapper
return decorator | python | def make_certifier():
"""
Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value)
"""
def decorator(func):
@six.wraps(func)
def wrapper(value=_undefined, **kwargs):
def certify(val):
if is_enabled():
exec_func(func, val, **kwargs)
return val
if value is not _undefined:
return certify(value)
else:
return certify
return wrapper
return decorator | [
"def",
"make_certifier",
"(",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"value",
"=",
"_undefined",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"certify",
"(",
"val",
")",
":",
"if",
"is_enabled",
"(",
")",
":",
"exec_func",
"(",
"func",
",",
"val",
",",
"*",
"*",
"kwargs",
")",
"return",
"val",
"if",
"value",
"is",
"not",
"_undefined",
":",
"return",
"certify",
"(",
"value",
")",
"else",
":",
"return",
"certify",
"return",
"wrapper",
"return",
"decorator"
] | Decorator that can wrap raw functions to create a certifier function.
Certifier functions support partial application. If a function wrapped by
`make_certifier` is called with a value as its first argument it will be
certified immediately. If no value is passed, then it will return a
function that can be called at a later time.
Assuming that `certify_something` has been decorated by `make_certifier`:
>>> certify_something(value, foo=1, bar=2)
Is equivalent to:
>>> certifier = certify_something(foo=1, bar=2)
>>> certifier(value) | [
"Decorator",
"that",
"can",
"wrap",
"raw",
"functions",
"to",
"create",
"a",
"certifier",
"function",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L33-L65 |
249,556 | sys-git/certifiable | certifiable/utils.py | certify_required | def certify_required(value, required=False):
"""
Certify that a value is present if required.
:param object value:
The value that is to be certified.
:param bool required:
Is the value required?
:raises CertifierValueError:
Required value is `None`.
"""
# Certify our kwargs:
if not isinstance(required, bool):
raise CertifierParamError(
'required',
required,
)
if value is None:
if required:
raise CertifierValueError(
message="required value is None",
)
return True | python | def certify_required(value, required=False):
"""
Certify that a value is present if required.
:param object value:
The value that is to be certified.
:param bool required:
Is the value required?
:raises CertifierValueError:
Required value is `None`.
"""
# Certify our kwargs:
if not isinstance(required, bool):
raise CertifierParamError(
'required',
required,
)
if value is None:
if required:
raise CertifierValueError(
message="required value is None",
)
return True | [
"def",
"certify_required",
"(",
"value",
",",
"required",
"=",
"False",
")",
":",
"# Certify our kwargs:",
"if",
"not",
"isinstance",
"(",
"required",
",",
"bool",
")",
":",
"raise",
"CertifierParamError",
"(",
"'required'",
",",
"required",
",",
")",
"if",
"value",
"is",
"None",
":",
"if",
"required",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"required value is None\"",
",",
")",
"return",
"True"
] | Certify that a value is present if required.
:param object value:
The value that is to be certified.
:param bool required:
Is the value required?
:raises CertifierValueError:
Required value is `None`. | [
"Certify",
"that",
"a",
"value",
"is",
"present",
"if",
"required",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L68-L91 |
249,557 | sys-git/certifiable | certifiable/utils.py | certify_parameter | def certify_parameter(certifier, name, value, kwargs=None):
"""
Internal certifier for kwargs passed to Certifiable public methods.
:param callable certifier:
The certifier to use
:param str name:
The name of the kwargs
:param object value:
The value of the kwarg.
:param bool required:
Is the param required. Default=False.
:raises CertifierParamError:
A parameter failed internal certification.
"""
try:
certifier(value, **kwargs or {})
except CertifierError as err:
six.raise_from(
CertifierParamError(
name,
value,
),
err) | python | def certify_parameter(certifier, name, value, kwargs=None):
"""
Internal certifier for kwargs passed to Certifiable public methods.
:param callable certifier:
The certifier to use
:param str name:
The name of the kwargs
:param object value:
The value of the kwarg.
:param bool required:
Is the param required. Default=False.
:raises CertifierParamError:
A parameter failed internal certification.
"""
try:
certifier(value, **kwargs or {})
except CertifierError as err:
six.raise_from(
CertifierParamError(
name,
value,
),
err) | [
"def",
"certify_parameter",
"(",
"certifier",
",",
"name",
",",
"value",
",",
"kwargs",
"=",
"None",
")",
":",
"try",
":",
"certifier",
"(",
"value",
",",
"*",
"*",
"kwargs",
"or",
"{",
"}",
")",
"except",
"CertifierError",
"as",
"err",
":",
"six",
".",
"raise_from",
"(",
"CertifierParamError",
"(",
"name",
",",
"value",
",",
")",
",",
"err",
")"
] | Internal certifier for kwargs passed to Certifiable public methods.
:param callable certifier:
The certifier to use
:param str name:
The name of the kwargs
:param object value:
The value of the kwarg.
:param bool required:
Is the param required. Default=False.
:raises CertifierParamError:
A parameter failed internal certification. | [
"Internal",
"certifier",
"for",
"kwargs",
"passed",
"to",
"Certifiable",
"public",
"methods",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L123-L146 |
249,558 | sys-git/certifiable | certifiable/utils.py | enable_from_env | def enable_from_env(state=None):
"""
Enable certification for this thread based on the environment variable `CERTIFIABLE_STATE`.
:param bool state:
Default status to use.
:return:
The new state.
:rtype:
bool
"""
try:
x = os.environ.get(
ENVVAR,
state,
)
value = bool(int(x))
except Exception: # pylint: disable=broad-except
value = bool(state)
return enable(value) | python | def enable_from_env(state=None):
"""
Enable certification for this thread based on the environment variable `CERTIFIABLE_STATE`.
:param bool state:
Default status to use.
:return:
The new state.
:rtype:
bool
"""
try:
x = os.environ.get(
ENVVAR,
state,
)
value = bool(int(x))
except Exception: # pylint: disable=broad-except
value = bool(state)
return enable(value) | [
"def",
"enable_from_env",
"(",
"state",
"=",
"None",
")",
":",
"try",
":",
"x",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"ENVVAR",
",",
"state",
",",
")",
"value",
"=",
"bool",
"(",
"int",
"(",
"x",
")",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"value",
"=",
"bool",
"(",
"state",
")",
"return",
"enable",
"(",
"value",
")"
] | Enable certification for this thread based on the environment variable `CERTIFIABLE_STATE`.
:param bool state:
Default status to use.
:return:
The new state.
:rtype:
bool | [
"Enable",
"certification",
"for",
"this",
"thread",
"based",
"on",
"the",
"environment",
"variable",
"CERTIFIABLE_STATE",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/utils.py#L196-L216 |
249,559 | motiejus/tictactoelib | tictactoelib/noninteractive.py | compete | def compete(source_x, source_o, timeout=None, memlimit=None, cgroup='tictactoe',
cgroup_path='/sys/fs/cgroup'):
"""Fights two source files.
Returns either:
* ('ok', 'x' | 'draw' | 'o', GAMEPLAY)
* ('error', GUILTY, REASON, GAMEPLAY)
REASON := utf8-encoded error string (can be up to 65k chars)
GAMEPLAY := [ NUM ]
GUILTY := 'x' | 'o' (during whose turn the error occured)
NUM := 1..81 | 0
NUM=0 means the move resulted in error (then ERROR_STRING is non-empty)
GAMEPLAY is never more than 255 characters long:
len(",".join(map(str, range(1, 81)))) == 230
"""
gameplay = []
for xo, moveresult, log in run_interactive(source_x, source_o, timeout,
memlimit, cgroup, cgroup_path):
if moveresult[0] == 'error':
return 'error', xo, moveresult[1], gameplay + [0]
elif moveresult[0] == 'state_coords':
gameplay.append(coords_to_num(moveresult[1][1]))
state = moveresult[1][0]
if state == 'draw' or state == 'x' or state == 'o':
return 'ok', state, gameplay | python | def compete(source_x, source_o, timeout=None, memlimit=None, cgroup='tictactoe',
cgroup_path='/sys/fs/cgroup'):
"""Fights two source files.
Returns either:
* ('ok', 'x' | 'draw' | 'o', GAMEPLAY)
* ('error', GUILTY, REASON, GAMEPLAY)
REASON := utf8-encoded error string (can be up to 65k chars)
GAMEPLAY := [ NUM ]
GUILTY := 'x' | 'o' (during whose turn the error occured)
NUM := 1..81 | 0
NUM=0 means the move resulted in error (then ERROR_STRING is non-empty)
GAMEPLAY is never more than 255 characters long:
len(",".join(map(str, range(1, 81)))) == 230
"""
gameplay = []
for xo, moveresult, log in run_interactive(source_x, source_o, timeout,
memlimit, cgroup, cgroup_path):
if moveresult[0] == 'error':
return 'error', xo, moveresult[1], gameplay + [0]
elif moveresult[0] == 'state_coords':
gameplay.append(coords_to_num(moveresult[1][1]))
state = moveresult[1][0]
if state == 'draw' or state == 'x' or state == 'o':
return 'ok', state, gameplay | [
"def",
"compete",
"(",
"source_x",
",",
"source_o",
",",
"timeout",
"=",
"None",
",",
"memlimit",
"=",
"None",
",",
"cgroup",
"=",
"'tictactoe'",
",",
"cgroup_path",
"=",
"'/sys/fs/cgroup'",
")",
":",
"gameplay",
"=",
"[",
"]",
"for",
"xo",
",",
"moveresult",
",",
"log",
"in",
"run_interactive",
"(",
"source_x",
",",
"source_o",
",",
"timeout",
",",
"memlimit",
",",
"cgroup",
",",
"cgroup_path",
")",
":",
"if",
"moveresult",
"[",
"0",
"]",
"==",
"'error'",
":",
"return",
"'error'",
",",
"xo",
",",
"moveresult",
"[",
"1",
"]",
",",
"gameplay",
"+",
"[",
"0",
"]",
"elif",
"moveresult",
"[",
"0",
"]",
"==",
"'state_coords'",
":",
"gameplay",
".",
"append",
"(",
"coords_to_num",
"(",
"moveresult",
"[",
"1",
"]",
"[",
"1",
"]",
")",
")",
"state",
"=",
"moveresult",
"[",
"1",
"]",
"[",
"0",
"]",
"if",
"state",
"==",
"'draw'",
"or",
"state",
"==",
"'x'",
"or",
"state",
"==",
"'o'",
":",
"return",
"'ok'",
",",
"state",
",",
"gameplay"
] | Fights two source files.
Returns either:
* ('ok', 'x' | 'draw' | 'o', GAMEPLAY)
* ('error', GUILTY, REASON, GAMEPLAY)
REASON := utf8-encoded error string (can be up to 65k chars)
GAMEPLAY := [ NUM ]
GUILTY := 'x' | 'o' (during whose turn the error occured)
NUM := 1..81 | 0
NUM=0 means the move resulted in error (then ERROR_STRING is non-empty)
GAMEPLAY is never more than 255 characters long:
len(",".join(map(str, range(1, 81)))) == 230 | [
"Fights",
"two",
"source",
"files",
"."
] | c884e206f11d9472ce0b7d08e06f894b24a20989 | https://github.com/motiejus/tictactoelib/blob/c884e206f11d9472ce0b7d08e06f894b24a20989/tictactoelib/noninteractive.py#L3-L30 |
249,560 | dossier/dossier.fc | python/dossier/fc/string_counter.py | StringCounter._fix_key | def _fix_key(key):
'''Normalize keys to Unicode strings.'''
if isinstance(key, unicode):
return key
if isinstance(key, str):
# On my system, the default encoding is `ascii`, so let's
# explicitly say UTF-8?
return unicode(key, 'utf-8')
raise TypeError(key) | python | def _fix_key(key):
'''Normalize keys to Unicode strings.'''
if isinstance(key, unicode):
return key
if isinstance(key, str):
# On my system, the default encoding is `ascii`, so let's
# explicitly say UTF-8?
return unicode(key, 'utf-8')
raise TypeError(key) | [
"def",
"_fix_key",
"(",
"key",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"unicode",
")",
":",
"return",
"key",
"if",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"# On my system, the default encoding is `ascii`, so let's",
"# explicitly say UTF-8?",
"return",
"unicode",
"(",
"key",
",",
"'utf-8'",
")",
"raise",
"TypeError",
"(",
"key",
")"
] | Normalize keys to Unicode strings. | [
"Normalize",
"keys",
"to",
"Unicode",
"strings",
"."
] | 3e969d0cb2592fc06afc1c849d2b22283450b5e2 | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/string_counter.py#L137-L145 |
249,561 | dossier/dossier.fc | python/dossier/fc/string_counter.py | StringCounter.truncate_most_common | def truncate_most_common(self, truncation_length):
'''
Sorts the counter and keeps only the most common items up to
``truncation_length`` in place.
:type truncation_length: int
'''
keep_keys = set(v[0] for v in self.most_common(truncation_length))
for key in self.keys():
if key not in keep_keys:
self.pop(key) | python | def truncate_most_common(self, truncation_length):
'''
Sorts the counter and keeps only the most common items up to
``truncation_length`` in place.
:type truncation_length: int
'''
keep_keys = set(v[0] for v in self.most_common(truncation_length))
for key in self.keys():
if key not in keep_keys:
self.pop(key) | [
"def",
"truncate_most_common",
"(",
"self",
",",
"truncation_length",
")",
":",
"keep_keys",
"=",
"set",
"(",
"v",
"[",
"0",
"]",
"for",
"v",
"in",
"self",
".",
"most_common",
"(",
"truncation_length",
")",
")",
"for",
"key",
"in",
"self",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"keep_keys",
":",
"self",
".",
"pop",
"(",
"key",
")"
] | Sorts the counter and keeps only the most common items up to
``truncation_length`` in place.
:type truncation_length: int | [
"Sorts",
"the",
"counter",
"and",
"keeps",
"only",
"the",
"most",
"common",
"items",
"up",
"to",
"truncation_length",
"in",
"place",
"."
] | 3e969d0cb2592fc06afc1c849d2b22283450b5e2 | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/string_counter.py#L194-L204 |
249,562 | MakerReduxCorp/MARDS | MARDS/standard_types.py | rst | def rst(value_rule):
'''Given the data and type information, generate a list of strings for
insertion into a RST document.
'''
lines = []
if value_rule.has('type'):
value_type = value_rule['type'].value
else:
value_type = 'string'
if value_type=='ignore':
pass
else:
lines.append('A *'+value_type+'* value is expected.')
lines.append('')
if value_type=="string":
pass
elif value_type=="label":
pass
elif value_type=="price":
pass
elif value_type=="qty":
pass
elif value_type=="percent":
pass
elif value_type=="check_list":
pass
elif value_type=="radio_select":
pass
elif value_type=="ignore":
pass
elif value_type=="unit":
pass
elif value_type=="angle":
pass
elif value_type=="file":
pass
elif value_type=="length":
pass
elif value_type=="distance":
pass
elif value_type=="duration":
pass
elif value_type=="mass":
pass
elif value_type=="temperature":
pass
elif value_type=="luminous_intensity":
pass
elif value_type=="current":
pass
elif value_type=="voltage":
pass
elif value_type=="frequency":
pass
elif value_type=="boolean":
pass
elif value_type=="integer":
pass
elif value_type=="float":
pass
elif value_type=="hexadecimal":
pass
return lines | python | def rst(value_rule):
'''Given the data and type information, generate a list of strings for
insertion into a RST document.
'''
lines = []
if value_rule.has('type'):
value_type = value_rule['type'].value
else:
value_type = 'string'
if value_type=='ignore':
pass
else:
lines.append('A *'+value_type+'* value is expected.')
lines.append('')
if value_type=="string":
pass
elif value_type=="label":
pass
elif value_type=="price":
pass
elif value_type=="qty":
pass
elif value_type=="percent":
pass
elif value_type=="check_list":
pass
elif value_type=="radio_select":
pass
elif value_type=="ignore":
pass
elif value_type=="unit":
pass
elif value_type=="angle":
pass
elif value_type=="file":
pass
elif value_type=="length":
pass
elif value_type=="distance":
pass
elif value_type=="duration":
pass
elif value_type=="mass":
pass
elif value_type=="temperature":
pass
elif value_type=="luminous_intensity":
pass
elif value_type=="current":
pass
elif value_type=="voltage":
pass
elif value_type=="frequency":
pass
elif value_type=="boolean":
pass
elif value_type=="integer":
pass
elif value_type=="float":
pass
elif value_type=="hexadecimal":
pass
return lines | [
"def",
"rst",
"(",
"value_rule",
")",
":",
"lines",
"=",
"[",
"]",
"if",
"value_rule",
".",
"has",
"(",
"'type'",
")",
":",
"value_type",
"=",
"value_rule",
"[",
"'type'",
"]",
".",
"value",
"else",
":",
"value_type",
"=",
"'string'",
"if",
"value_type",
"==",
"'ignore'",
":",
"pass",
"else",
":",
"lines",
".",
"append",
"(",
"'A *'",
"+",
"value_type",
"+",
"'* value is expected.'",
")",
"lines",
".",
"append",
"(",
"''",
")",
"if",
"value_type",
"==",
"\"string\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"label\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"price\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"qty\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"percent\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"check_list\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"radio_select\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"ignore\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"unit\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"angle\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"file\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"length\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"distance\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"duration\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"mass\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"temperature\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"luminous_intensity\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"current\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"voltage\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"frequency\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"boolean\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"integer\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"float\"",
":",
"pass",
"elif",
"value_type",
"==",
"\"hexadecimal\"",
":",
"pass",
"return",
"lines"
] | Given the data and type information, generate a list of strings for
insertion into a RST document. | [
"Given",
"the",
"data",
"and",
"type",
"information",
"generate",
"a",
"list",
"of",
"strings",
"for",
"insertion",
"into",
"a",
"RST",
"document",
"."
] | f8ddecc70f2ce1703984cb403c9d5417895170d6 | https://github.com/MakerReduxCorp/MARDS/blob/f8ddecc70f2ce1703984cb403c9d5417895170d6/MARDS/standard_types.py#L114-L176 |
249,563 | kxgames/vecrec | vecrec/collisions.py | circle_touching_line | def circle_touching_line(center, radius, start, end):
""" Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector
"""
C, R = center, radius
A, B = start, end
a = (B.x - A.x)**2 + (B.y - A.y)**2
b = 2 * (B.x - A.x) * (A.x - C.x) \
+ 2 * (B.y - A.y) * (A.y - C.y)
c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \
- 2 * (C.x * A.x + C.y * A.y) - R**2
discriminant = b**2 - 4 * a * c
if discriminant < 0:
return False
elif discriminant == 0:
u = v = -b / float(2 * a)
else:
u = (-b + math.sqrt(discriminant)) / float(2 * a)
v = (-b - math.sqrt(discriminant)) / float(2 * a)
if u < 0 and v < 0: return False
if u > 1 and v > 1: return False
return True | python | def circle_touching_line(center, radius, start, end):
""" Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector
"""
C, R = center, radius
A, B = start, end
a = (B.x - A.x)**2 + (B.y - A.y)**2
b = 2 * (B.x - A.x) * (A.x - C.x) \
+ 2 * (B.y - A.y) * (A.y - C.y)
c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \
- 2 * (C.x * A.x + C.y * A.y) - R**2
discriminant = b**2 - 4 * a * c
if discriminant < 0:
return False
elif discriminant == 0:
u = v = -b / float(2 * a)
else:
u = (-b + math.sqrt(discriminant)) / float(2 * a)
v = (-b - math.sqrt(discriminant)) / float(2 * a)
if u < 0 and v < 0: return False
if u > 1 and v > 1: return False
return True | [
"def",
"circle_touching_line",
"(",
"center",
",",
"radius",
",",
"start",
",",
"end",
")",
":",
"C",
",",
"R",
"=",
"center",
",",
"radius",
"A",
",",
"B",
"=",
"start",
",",
"end",
"a",
"=",
"(",
"B",
".",
"x",
"-",
"A",
".",
"x",
")",
"**",
"2",
"+",
"(",
"B",
".",
"y",
"-",
"A",
".",
"y",
")",
"**",
"2",
"b",
"=",
"2",
"*",
"(",
"B",
".",
"x",
"-",
"A",
".",
"x",
")",
"*",
"(",
"A",
".",
"x",
"-",
"C",
".",
"x",
")",
"+",
"2",
"*",
"(",
"B",
".",
"y",
"-",
"A",
".",
"y",
")",
"*",
"(",
"A",
".",
"y",
"-",
"C",
".",
"y",
")",
"c",
"=",
"C",
".",
"x",
"**",
"2",
"+",
"C",
".",
"y",
"**",
"2",
"+",
"A",
".",
"x",
"**",
"2",
"+",
"A",
".",
"y",
"**",
"2",
"-",
"2",
"*",
"(",
"C",
".",
"x",
"*",
"A",
".",
"x",
"+",
"C",
".",
"y",
"*",
"A",
".",
"y",
")",
"-",
"R",
"**",
"2",
"discriminant",
"=",
"b",
"**",
"2",
"-",
"4",
"*",
"a",
"*",
"c",
"if",
"discriminant",
"<",
"0",
":",
"return",
"False",
"elif",
"discriminant",
"==",
"0",
":",
"u",
"=",
"v",
"=",
"-",
"b",
"/",
"float",
"(",
"2",
"*",
"a",
")",
"else",
":",
"u",
"=",
"(",
"-",
"b",
"+",
"math",
".",
"sqrt",
"(",
"discriminant",
")",
")",
"/",
"float",
"(",
"2",
"*",
"a",
")",
"v",
"=",
"(",
"-",
"b",
"-",
"math",
".",
"sqrt",
"(",
"discriminant",
")",
")",
"/",
"float",
"(",
"2",
"*",
"a",
")",
"if",
"u",
"<",
"0",
"and",
"v",
"<",
"0",
":",
"return",
"False",
"if",
"u",
">",
"1",
"and",
"v",
">",
"1",
":",
"return",
"False",
"return",
"True"
] | Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector | [
"Return",
"true",
"if",
"the",
"given",
"circle",
"intersects",
"the",
"given",
"segment",
".",
"Note",
"that",
"this",
"checks",
"for",
"intersection",
"with",
"a",
"line",
"segment",
"and",
"not",
"an",
"actual",
"line",
"."
] | 18b0841419de21a644b4511e2229af853ed09529 | https://github.com/kxgames/vecrec/blob/18b0841419de21a644b4511e2229af853ed09529/vecrec/collisions.py#L3-L40 |
249,564 | nir0s/serv | serv/init/upstart.py | Upstart.generate | def generate(self, overwrite=False):
"""Generate a config file for an upstart service.
"""
super(Upstart, self).generate(overwrite=overwrite)
svc_file_template = self.template_prefix + '.conf'
self.svc_file_path = self.generate_into_prefix + '.conf'
self.generate_file_from_template(svc_file_template, self.svc_file_path)
return self.files | python | def generate(self, overwrite=False):
"""Generate a config file for an upstart service.
"""
super(Upstart, self).generate(overwrite=overwrite)
svc_file_template = self.template_prefix + '.conf'
self.svc_file_path = self.generate_into_prefix + '.conf'
self.generate_file_from_template(svc_file_template, self.svc_file_path)
return self.files | [
"def",
"generate",
"(",
"self",
",",
"overwrite",
"=",
"False",
")",
":",
"super",
"(",
"Upstart",
",",
"self",
")",
".",
"generate",
"(",
"overwrite",
"=",
"overwrite",
")",
"svc_file_template",
"=",
"self",
".",
"template_prefix",
"+",
"'.conf'",
"self",
".",
"svc_file_path",
"=",
"self",
".",
"generate_into_prefix",
"+",
"'.conf'",
"self",
".",
"generate_file_from_template",
"(",
"svc_file_template",
",",
"self",
".",
"svc_file_path",
")",
"return",
"self",
".",
"files"
] | Generate a config file for an upstart service. | [
"Generate",
"a",
"config",
"file",
"for",
"an",
"upstart",
"service",
"."
] | 7af724ed49c0eb766c37c4b5287b043a8cf99e9c | https://github.com/nir0s/serv/blob/7af724ed49c0eb766c37c4b5287b043a8cf99e9c/serv/init/upstart.py#L21-L30 |
249,565 | totokaka/pySpaceGDN | pyspacegdn/requests/request.py | Request._fetch | def _fetch(self, default_path):
""" Internal method for fetching.
This differs from :meth:`.fetch` in that it accepts a default path as
an argument.
"""
if not self._path:
path = default_path
else:
path = self._path
req_type = 'GET' if len(self._post_params) == 0 else 'POST'
url = '/'.join(['http:/', self.spacegdn.endpoint, path])
resp = requests.request(req_type, url, params=self._get_params,
data=self._post_params, headers=self._headers)
response = Response()
data = None
if resp.ok:
data = resp.json()
response.add(data, resp.status_code, resp.reason)
return response | python | def _fetch(self, default_path):
""" Internal method for fetching.
This differs from :meth:`.fetch` in that it accepts a default path as
an argument.
"""
if not self._path:
path = default_path
else:
path = self._path
req_type = 'GET' if len(self._post_params) == 0 else 'POST'
url = '/'.join(['http:/', self.spacegdn.endpoint, path])
resp = requests.request(req_type, url, params=self._get_params,
data=self._post_params, headers=self._headers)
response = Response()
data = None
if resp.ok:
data = resp.json()
response.add(data, resp.status_code, resp.reason)
return response | [
"def",
"_fetch",
"(",
"self",
",",
"default_path",
")",
":",
"if",
"not",
"self",
".",
"_path",
":",
"path",
"=",
"default_path",
"else",
":",
"path",
"=",
"self",
".",
"_path",
"req_type",
"=",
"'GET'",
"if",
"len",
"(",
"self",
".",
"_post_params",
")",
"==",
"0",
"else",
"'POST'",
"url",
"=",
"'/'",
".",
"join",
"(",
"[",
"'http:/'",
",",
"self",
".",
"spacegdn",
".",
"endpoint",
",",
"path",
"]",
")",
"resp",
"=",
"requests",
".",
"request",
"(",
"req_type",
",",
"url",
",",
"params",
"=",
"self",
".",
"_get_params",
",",
"data",
"=",
"self",
".",
"_post_params",
",",
"headers",
"=",
"self",
".",
"_headers",
")",
"response",
"=",
"Response",
"(",
")",
"data",
"=",
"None",
"if",
"resp",
".",
"ok",
":",
"data",
"=",
"resp",
".",
"json",
"(",
")",
"response",
".",
"add",
"(",
"data",
",",
"resp",
".",
"status_code",
",",
"resp",
".",
"reason",
")",
"return",
"response"
] | Internal method for fetching.
This differs from :meth:`.fetch` in that it accepts a default path as
an argument. | [
"Internal",
"method",
"for",
"fetching",
"."
] | 55c8be8d751e24873e0a7f7e99d2b715442ec878 | https://github.com/totokaka/pySpaceGDN/blob/55c8be8d751e24873e0a7f7e99d2b715442ec878/pyspacegdn/requests/request.py#L98-L120 |
249,566 | dossier/dossier.fc | python/dossier/fc/feature_tokens.py | FeatureTokens.tokens | def tokens(self, si, k):
'''`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
'''
for tokens in self[k]:
yield [si.body.sentences[tagid][sid].tokens[tid]
for tagid, sid, tid in tokens] | python | def tokens(self, si, k):
'''`si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`.
'''
for tokens in self[k]:
yield [si.body.sentences[tagid][sid].tokens[tid]
for tagid, sid, tid in tokens] | [
"def",
"tokens",
"(",
"self",
",",
"si",
",",
"k",
")",
":",
"for",
"tokens",
"in",
"self",
"[",
"k",
"]",
":",
"yield",
"[",
"si",
".",
"body",
".",
"sentences",
"[",
"tagid",
"]",
"[",
"sid",
"]",
".",
"tokens",
"[",
"tid",
"]",
"for",
"tagid",
",",
"sid",
",",
"tid",
"in",
"tokens",
"]"
] | `si` is a stream item and `k` is a key in this feature. The purpose
of this method is to dereference the token pointers with
respect to the given stream item. That is, it translates each
sequence of token pointers to a sequence of `Token`. | [
"si",
"is",
"a",
"stream",
"item",
"and",
"k",
"is",
"a",
"key",
"in",
"this",
"feature",
".",
"The",
"purpose",
"of",
"this",
"method",
"is",
"to",
"dereference",
"the",
"token",
"pointers",
"with",
"respect",
"to",
"the",
"given",
"stream",
"item",
".",
"That",
"is",
"it",
"translates",
"each",
"sequence",
"of",
"token",
"pointers",
"to",
"a",
"sequence",
"of",
"Token",
"."
] | 3e969d0cb2592fc06afc1c849d2b22283450b5e2 | https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_tokens.py#L45-L55 |
249,567 | kyleam/wcut | wcut/io.py | get_lines | def get_lines(fname):
"""Return generator with line number and line for file `fname`."""
for line in fileinput.input(fname):
yield fileinput.filelineno(), line.strip() | python | def get_lines(fname):
"""Return generator with line number and line for file `fname`."""
for line in fileinput.input(fname):
yield fileinput.filelineno(), line.strip() | [
"def",
"get_lines",
"(",
"fname",
")",
":",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"fname",
")",
":",
"yield",
"fileinput",
".",
"filelineno",
"(",
")",
",",
"line",
".",
"strip",
"(",
")"
] | Return generator with line number and line for file `fname`. | [
"Return",
"generator",
"with",
"line",
"number",
"and",
"line",
"for",
"file",
"fname",
"."
] | 36f6e10a4c3b4dae274a55010463c6acce83bc71 | https://github.com/kyleam/wcut/blob/36f6e10a4c3b4dae274a55010463c6acce83bc71/wcut/io.py#L21-L24 |
249,568 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | get_token | def get_token(username, password, server):
"""Retrieve token of TonicDNS API.
Arguments:
usename: TonicDNS API username
password: TonicDNS API password
server: TonicDNS API server
"""
method = 'PUT'
uri = 'https://' + server + '/authenticate'
token = ''
authinfo = {
"username": username,
"password": password,
"local_user": username}
token = tonicdns_client(uri, method, token, data=authinfo)
return token | python | def get_token(username, password, server):
"""Retrieve token of TonicDNS API.
Arguments:
usename: TonicDNS API username
password: TonicDNS API password
server: TonicDNS API server
"""
method = 'PUT'
uri = 'https://' + server + '/authenticate'
token = ''
authinfo = {
"username": username,
"password": password,
"local_user": username}
token = tonicdns_client(uri, method, token, data=authinfo)
return token | [
"def",
"get_token",
"(",
"username",
",",
"password",
",",
"server",
")",
":",
"method",
"=",
"'PUT'",
"uri",
"=",
"'https://'",
"+",
"server",
"+",
"'/authenticate'",
"token",
"=",
"''",
"authinfo",
"=",
"{",
"\"username\"",
":",
"username",
",",
"\"password\"",
":",
"password",
",",
"\"local_user\"",
":",
"username",
"}",
"token",
"=",
"tonicdns_client",
"(",
"uri",
",",
"method",
",",
"token",
",",
"data",
"=",
"authinfo",
")",
"return",
"token"
] | Retrieve token of TonicDNS API.
Arguments:
usename: TonicDNS API username
password: TonicDNS API password
server: TonicDNS API server | [
"Retrieve",
"token",
"of",
"TonicDNS",
"API",
"."
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L30-L50 |
249,569 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | tonicdns_client | def tonicdns_client(uri, method, token='', data='', keyword='',
content='', raw_flag=False):
"""TonicDNS API client
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
token: TonicDNS API authentication token
data: Post data to TonicDNS API
keyword: Processing keyword of response
content: data exist flag
raw_flag: True is return response data, False is pretty printing
"""
res = request(uri, method, data, token)
if token:
if keyword == 'serial':
args = {"token": token, "keyword": keyword, "content": content}
cur_soa, new_soa = response(uri, method, res, **args)
return cur_soa, new_soa
else:
if content is None:
args = {"token": token, "keyword": keyword,
"content": content.get('domain')}
response(uri, method, res, **args)
else:
# get sub command
args = {"token": token, "keyword": keyword,
"raw_flag": raw_flag}
data = response(uri, method, res, **args)
return data
else:
args = {"token": token, "keyword": keyword}
token = response(uri, method, res, **args)
return token | python | def tonicdns_client(uri, method, token='', data='', keyword='',
content='', raw_flag=False):
"""TonicDNS API client
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
token: TonicDNS API authentication token
data: Post data to TonicDNS API
keyword: Processing keyword of response
content: data exist flag
raw_flag: True is return response data, False is pretty printing
"""
res = request(uri, method, data, token)
if token:
if keyword == 'serial':
args = {"token": token, "keyword": keyword, "content": content}
cur_soa, new_soa = response(uri, method, res, **args)
return cur_soa, new_soa
else:
if content is None:
args = {"token": token, "keyword": keyword,
"content": content.get('domain')}
response(uri, method, res, **args)
else:
# get sub command
args = {"token": token, "keyword": keyword,
"raw_flag": raw_flag}
data = response(uri, method, res, **args)
return data
else:
args = {"token": token, "keyword": keyword}
token = response(uri, method, res, **args)
return token | [
"def",
"tonicdns_client",
"(",
"uri",
",",
"method",
",",
"token",
"=",
"''",
",",
"data",
"=",
"''",
",",
"keyword",
"=",
"''",
",",
"content",
"=",
"''",
",",
"raw_flag",
"=",
"False",
")",
":",
"res",
"=",
"request",
"(",
"uri",
",",
"method",
",",
"data",
",",
"token",
")",
"if",
"token",
":",
"if",
"keyword",
"==",
"'serial'",
":",
"args",
"=",
"{",
"\"token\"",
":",
"token",
",",
"\"keyword\"",
":",
"keyword",
",",
"\"content\"",
":",
"content",
"}",
"cur_soa",
",",
"new_soa",
"=",
"response",
"(",
"uri",
",",
"method",
",",
"res",
",",
"*",
"*",
"args",
")",
"return",
"cur_soa",
",",
"new_soa",
"else",
":",
"if",
"content",
"is",
"None",
":",
"args",
"=",
"{",
"\"token\"",
":",
"token",
",",
"\"keyword\"",
":",
"keyword",
",",
"\"content\"",
":",
"content",
".",
"get",
"(",
"'domain'",
")",
"}",
"response",
"(",
"uri",
",",
"method",
",",
"res",
",",
"*",
"*",
"args",
")",
"else",
":",
"# get sub command",
"args",
"=",
"{",
"\"token\"",
":",
"token",
",",
"\"keyword\"",
":",
"keyword",
",",
"\"raw_flag\"",
":",
"raw_flag",
"}",
"data",
"=",
"response",
"(",
"uri",
",",
"method",
",",
"res",
",",
"*",
"*",
"args",
")",
"return",
"data",
"else",
":",
"args",
"=",
"{",
"\"token\"",
":",
"token",
",",
"\"keyword\"",
":",
"keyword",
"}",
"token",
"=",
"response",
"(",
"uri",
",",
"method",
",",
"res",
",",
"*",
"*",
"args",
")",
"return",
"token"
] | TonicDNS API client
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
token: TonicDNS API authentication token
data: Post data to TonicDNS API
keyword: Processing keyword of response
content: data exist flag
raw_flag: True is return response data, False is pretty printing | [
"TonicDNS",
"API",
"client"
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L53-L89 |
249,570 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | request | def request(uri, method, data, token=''):
"""Request to TonicDNS API.
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
data: Post data to TonicDNS API
token: TonicDNS API authentication token
"""
socket.setdefaulttimeout(__timeout__)
obj = urllib.build_opener(urllib.HTTPHandler)
# encoding json
encoded = json.JSONEncoder(object).encode(data)
# encoding utf8
data_utf8 = encoded.encode('utf-8')
req = urllib.Request(uri, data=data_utf8)
# When encoded(=data) is False, retrieve data as GET method.
if encoded:
req.add_header('Content-Type', 'application/json')
if token:
req.add_header('x-authentication-token', token)
req.get_method = lambda: method
try:
res = obj.open(req)
return res
except urllib.URLError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1)
except urllib.HTTPError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1) | python | def request(uri, method, data, token=''):
"""Request to TonicDNS API.
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
data: Post data to TonicDNS API
token: TonicDNS API authentication token
"""
socket.setdefaulttimeout(__timeout__)
obj = urllib.build_opener(urllib.HTTPHandler)
# encoding json
encoded = json.JSONEncoder(object).encode(data)
# encoding utf8
data_utf8 = encoded.encode('utf-8')
req = urllib.Request(uri, data=data_utf8)
# When encoded(=data) is False, retrieve data as GET method.
if encoded:
req.add_header('Content-Type', 'application/json')
if token:
req.add_header('x-authentication-token', token)
req.get_method = lambda: method
try:
res = obj.open(req)
return res
except urllib.URLError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1)
except urllib.HTTPError as e:
sys.stderr.write("ERROR: %s\n" % e)
exit(1) | [
"def",
"request",
"(",
"uri",
",",
"method",
",",
"data",
",",
"token",
"=",
"''",
")",
":",
"socket",
".",
"setdefaulttimeout",
"(",
"__timeout__",
")",
"obj",
"=",
"urllib",
".",
"build_opener",
"(",
"urllib",
".",
"HTTPHandler",
")",
"# encoding json",
"encoded",
"=",
"json",
".",
"JSONEncoder",
"(",
"object",
")",
".",
"encode",
"(",
"data",
")",
"# encoding utf8",
"data_utf8",
"=",
"encoded",
".",
"encode",
"(",
"'utf-8'",
")",
"req",
"=",
"urllib",
".",
"Request",
"(",
"uri",
",",
"data",
"=",
"data_utf8",
")",
"# When encoded(=data) is False, retrieve data as GET method.",
"if",
"encoded",
":",
"req",
".",
"add_header",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"if",
"token",
":",
"req",
".",
"add_header",
"(",
"'x-authentication-token'",
",",
"token",
")",
"req",
".",
"get_method",
"=",
"lambda",
":",
"method",
"try",
":",
"res",
"=",
"obj",
".",
"open",
"(",
"req",
")",
"return",
"res",
"except",
"urllib",
".",
"URLError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: %s\\n\"",
"%",
"e",
")",
"exit",
"(",
"1",
")",
"except",
"urllib",
".",
"HTTPError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"ERROR: %s\\n\"",
"%",
"e",
")",
"exit",
"(",
"1",
")"
] | Request to TonicDNS API.
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
data: Post data to TonicDNS API
token: TonicDNS API authentication token | [
"Request",
"to",
"TonicDNS",
"API",
"."
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L92-L132 |
249,571 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | response | def response(uri, method, res, token='', keyword='',
content='', raw_flag=False):
"""Response of tonicdns_client request
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
res: Response of against request to TonicDNS API
token: TonicDNS API token
keyword: Processing keyword
content: JSON data
raw_flag: True is return responsed raw data, False is pretty print
"""
if method == 'GET' or (method == 'PUT' and not token):
# response body
data = res.read()
data_utf8 = data.decode('utf-8')
if token:
datas = json.loads(data_utf8)
else:
token = json.loads(data_utf8)['hash']
return token
if keyword == 'serial':
# filtering with keyword
record = search_record(datas, 'SOA')[0]
# if SOA record, remove priority unnecessary
del record['priority']
# override ttl
record['ttl'] = int(record['ttl'])
c = JSONConverter(content['domain'])
new_record = c.get_soa(record, content)
return record, new_record
elif keyword:
# '--search' option of 'get' subcommand
records = search_record(datas, keyword)
datas.update({"records": records})
if uri.split('/')[3] == 'template':
# 'tmpl_get' subcommand
if len(uri.split('/')) == 5:
# when specify template identfier
#print_formatted(datas)
utils.pretty_print(datas)
else:
# when get all templates
for data in datas:
#print_formatted(data)
utils.pretty_print(datas)
else:
# 'get' subcommand
if raw_flag:
return datas
else:
#print_formatted(datas)
if len(uri.split('zone/')) > 1:
domain = uri.split('zone/')[1]
else:
domain = ''
utils.pretty_print(datas, keyword, domain)
else:
# response non JSON data
data = res.read()
print(data) | python | def response(uri, method, res, token='', keyword='',
content='', raw_flag=False):
"""Response of tonicdns_client request
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
res: Response of against request to TonicDNS API
token: TonicDNS API token
keyword: Processing keyword
content: JSON data
raw_flag: True is return responsed raw data, False is pretty print
"""
if method == 'GET' or (method == 'PUT' and not token):
# response body
data = res.read()
data_utf8 = data.decode('utf-8')
if token:
datas = json.loads(data_utf8)
else:
token = json.loads(data_utf8)['hash']
return token
if keyword == 'serial':
# filtering with keyword
record = search_record(datas, 'SOA')[0]
# if SOA record, remove priority unnecessary
del record['priority']
# override ttl
record['ttl'] = int(record['ttl'])
c = JSONConverter(content['domain'])
new_record = c.get_soa(record, content)
return record, new_record
elif keyword:
# '--search' option of 'get' subcommand
records = search_record(datas, keyword)
datas.update({"records": records})
if uri.split('/')[3] == 'template':
# 'tmpl_get' subcommand
if len(uri.split('/')) == 5:
# when specify template identfier
#print_formatted(datas)
utils.pretty_print(datas)
else:
# when get all templates
for data in datas:
#print_formatted(data)
utils.pretty_print(datas)
else:
# 'get' subcommand
if raw_flag:
return datas
else:
#print_formatted(datas)
if len(uri.split('zone/')) > 1:
domain = uri.split('zone/')[1]
else:
domain = ''
utils.pretty_print(datas, keyword, domain)
else:
# response non JSON data
data = res.read()
print(data) | [
"def",
"response",
"(",
"uri",
",",
"method",
",",
"res",
",",
"token",
"=",
"''",
",",
"keyword",
"=",
"''",
",",
"content",
"=",
"''",
",",
"raw_flag",
"=",
"False",
")",
":",
"if",
"method",
"==",
"'GET'",
"or",
"(",
"method",
"==",
"'PUT'",
"and",
"not",
"token",
")",
":",
"# response body",
"data",
"=",
"res",
".",
"read",
"(",
")",
"data_utf8",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"token",
":",
"datas",
"=",
"json",
".",
"loads",
"(",
"data_utf8",
")",
"else",
":",
"token",
"=",
"json",
".",
"loads",
"(",
"data_utf8",
")",
"[",
"'hash'",
"]",
"return",
"token",
"if",
"keyword",
"==",
"'serial'",
":",
"# filtering with keyword",
"record",
"=",
"search_record",
"(",
"datas",
",",
"'SOA'",
")",
"[",
"0",
"]",
"# if SOA record, remove priority unnecessary",
"del",
"record",
"[",
"'priority'",
"]",
"# override ttl",
"record",
"[",
"'ttl'",
"]",
"=",
"int",
"(",
"record",
"[",
"'ttl'",
"]",
")",
"c",
"=",
"JSONConverter",
"(",
"content",
"[",
"'domain'",
"]",
")",
"new_record",
"=",
"c",
".",
"get_soa",
"(",
"record",
",",
"content",
")",
"return",
"record",
",",
"new_record",
"elif",
"keyword",
":",
"# '--search' option of 'get' subcommand",
"records",
"=",
"search_record",
"(",
"datas",
",",
"keyword",
")",
"datas",
".",
"update",
"(",
"{",
"\"records\"",
":",
"records",
"}",
")",
"if",
"uri",
".",
"split",
"(",
"'/'",
")",
"[",
"3",
"]",
"==",
"'template'",
":",
"# 'tmpl_get' subcommand",
"if",
"len",
"(",
"uri",
".",
"split",
"(",
"'/'",
")",
")",
"==",
"5",
":",
"# when specify template identfier",
"#print_formatted(datas)",
"utils",
".",
"pretty_print",
"(",
"datas",
")",
"else",
":",
"# when get all templates",
"for",
"data",
"in",
"datas",
":",
"#print_formatted(data)",
"utils",
".",
"pretty_print",
"(",
"datas",
")",
"else",
":",
"# 'get' subcommand",
"if",
"raw_flag",
":",
"return",
"datas",
"else",
":",
"#print_formatted(datas)",
"if",
"len",
"(",
"uri",
".",
"split",
"(",
"'zone/'",
")",
")",
">",
"1",
":",
"domain",
"=",
"uri",
".",
"split",
"(",
"'zone/'",
")",
"[",
"1",
"]",
"else",
":",
"domain",
"=",
"''",
"utils",
".",
"pretty_print",
"(",
"datas",
",",
"keyword",
",",
"domain",
")",
"else",
":",
"# response non JSON data",
"data",
"=",
"res",
".",
"read",
"(",
")",
"print",
"(",
"data",
")"
] | Response of tonicdns_client request
Arguments:
uri: TonicDNS API URI
method: TonicDNS API request method
res: Response of against request to TonicDNS API
token: TonicDNS API token
keyword: Processing keyword
content: JSON data
raw_flag: True is return responsed raw data, False is pretty print | [
"Response",
"of",
"tonicdns_client",
"request"
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L135-L207 |
249,572 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | search_record | def search_record(datas, keyword):
"""Search target JSON -> dictionary
Arguments:
datas: dictionary of record datas
keyword: search keyword (default is null)
Key target is "name" or "content" or "type". default null.
Either key and type, or on the other hand.
When keyword has include camma ",",
Separate keyword to name, type, content.
"""
key_name, key_type, key_content = False, False, False
if keyword.find(',') > -1:
if len(keyword.split(',')) == 3:
key_content = keyword.split(',')[2]
key_name = keyword.split(',')[0]
key_type = keyword.split(',')[1]
result = []
for record in datas['records']:
if key_name and key_type:
if key_content:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type and
record['content'].find(key_content) > -1)):
result.append(record)
else:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type)):
result.append(record)
elif ((record['name'].find(keyword) >= 0 or
record['content'].find(keyword) >= 0 or
record['type'] == keyword)):
result.append(record)
return result | python | def search_record(datas, keyword):
"""Search target JSON -> dictionary
Arguments:
datas: dictionary of record datas
keyword: search keyword (default is null)
Key target is "name" or "content" or "type". default null.
Either key and type, or on the other hand.
When keyword has include camma ",",
Separate keyword to name, type, content.
"""
key_name, key_type, key_content = False, False, False
if keyword.find(',') > -1:
if len(keyword.split(',')) == 3:
key_content = keyword.split(',')[2]
key_name = keyword.split(',')[0]
key_type = keyword.split(',')[1]
result = []
for record in datas['records']:
if key_name and key_type:
if key_content:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type and
record['content'].find(key_content) > -1)):
result.append(record)
else:
if ((record['name'].find(key_name) > -1 and
record['type'] == key_type)):
result.append(record)
elif ((record['name'].find(keyword) >= 0 or
record['content'].find(keyword) >= 0 or
record['type'] == keyword)):
result.append(record)
return result | [
"def",
"search_record",
"(",
"datas",
",",
"keyword",
")",
":",
"key_name",
",",
"key_type",
",",
"key_content",
"=",
"False",
",",
"False",
",",
"False",
"if",
"keyword",
".",
"find",
"(",
"','",
")",
">",
"-",
"1",
":",
"if",
"len",
"(",
"keyword",
".",
"split",
"(",
"','",
")",
")",
"==",
"3",
":",
"key_content",
"=",
"keyword",
".",
"split",
"(",
"','",
")",
"[",
"2",
"]",
"key_name",
"=",
"keyword",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"key_type",
"=",
"keyword",
".",
"split",
"(",
"','",
")",
"[",
"1",
"]",
"result",
"=",
"[",
"]",
"for",
"record",
"in",
"datas",
"[",
"'records'",
"]",
":",
"if",
"key_name",
"and",
"key_type",
":",
"if",
"key_content",
":",
"if",
"(",
"(",
"record",
"[",
"'name'",
"]",
".",
"find",
"(",
"key_name",
")",
">",
"-",
"1",
"and",
"record",
"[",
"'type'",
"]",
"==",
"key_type",
"and",
"record",
"[",
"'content'",
"]",
".",
"find",
"(",
"key_content",
")",
">",
"-",
"1",
")",
")",
":",
"result",
".",
"append",
"(",
"record",
")",
"else",
":",
"if",
"(",
"(",
"record",
"[",
"'name'",
"]",
".",
"find",
"(",
"key_name",
")",
">",
"-",
"1",
"and",
"record",
"[",
"'type'",
"]",
"==",
"key_type",
")",
")",
":",
"result",
".",
"append",
"(",
"record",
")",
"elif",
"(",
"(",
"record",
"[",
"'name'",
"]",
".",
"find",
"(",
"keyword",
")",
">=",
"0",
"or",
"record",
"[",
"'content'",
"]",
".",
"find",
"(",
"keyword",
")",
">=",
"0",
"or",
"record",
"[",
"'type'",
"]",
"==",
"keyword",
")",
")",
":",
"result",
".",
"append",
"(",
"record",
")",
"return",
"result"
] | Search target JSON -> dictionary
Arguments:
datas: dictionary of record datas
keyword: search keyword (default is null)
Key target is "name" or "content" or "type". default null.
Either key and type, or on the other hand.
When keyword has include camma ",",
Separate keyword to name, type, content. | [
"Search",
"target",
"JSON",
"-",
">",
"dictionary"
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L210-L253 |
249,573 | mkouhei/tonicdnscli | src/tonicdnscli/connect.py | print_formatted | def print_formatted(datas):
"""Pretty print JSON DATA
Argument:
datas: dictionary of data
"""
if not datas:
print("No data")
exit(1)
if isinstance(datas, list):
# get all zones
# API /zone without :identifier
hr()
print('%-20s %-8s %-12s'
% ('name', 'type', 'notified_serial'))
hr()
for record in datas:
# print 'NAME'
utils.print_inline("%(name)-20s" % record)
# print 'TYPE' of SOA record
utils.print_inline("%(type)-8s" % record)
if record.get('notified_serial'):
print("%(notified_serial)s" % record)
else:
print('')
exit(0)
elif datas.get('records'):
print("domain: %(name)s" % datas)
if datas.get('type') == 'MASTER' and datas.get('notified_serial'):
print("serial: %(notified_serial)s" % datas)
print("DNS : %(type)s" % datas)
# print header
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
hr()
for record in datas.get('records'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' of SOA record
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' of non SOA record
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' of non SOA
if record.get('type') == 'SOA':
utils.print_inline(">\t\t%(content)-25s " % record)
# print 'CONTENT' of SOA record
else:
utils.print_inline("%(content)-25s" % record)
# print TTL, and PRIORITY for MX, SRV record
if record.get('priority'):
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print ttl for non SOA record
else:
print("%(ttl)5s " % record)
hr()
elif datas.get('identifier'):
# for template
print("identifier : %(identifier)s" % datas)
print("description: %(description)s" % datas)
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
for record in datas.get('entries'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' for SOA
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' for non SOA
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' for SOA
if record.get('type') == 'SOA':
utils.print_inline("> %(content)-25s " % record)
# print 'CONTENT' for non SOA
else:
utils.print_inline("%(content)-24s" % record)
# print 'TTL', and 'PRIORITY'
if record.get('priority') is not None:
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print
else:
print("%(ttl)5s " % record)
hr()
else:
print("No match records") | python | def print_formatted(datas):
"""Pretty print JSON DATA
Argument:
datas: dictionary of data
"""
if not datas:
print("No data")
exit(1)
if isinstance(datas, list):
# get all zones
# API /zone without :identifier
hr()
print('%-20s %-8s %-12s'
% ('name', 'type', 'notified_serial'))
hr()
for record in datas:
# print 'NAME'
utils.print_inline("%(name)-20s" % record)
# print 'TYPE' of SOA record
utils.print_inline("%(type)-8s" % record)
if record.get('notified_serial'):
print("%(notified_serial)s" % record)
else:
print('')
exit(0)
elif datas.get('records'):
print("domain: %(name)s" % datas)
if datas.get('type') == 'MASTER' and datas.get('notified_serial'):
print("serial: %(notified_serial)s" % datas)
print("DNS : %(type)s" % datas)
# print header
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
hr()
for record in datas.get('records'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' of SOA record
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' of non SOA record
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' of non SOA
if record.get('type') == 'SOA':
utils.print_inline(">\t\t%(content)-25s " % record)
# print 'CONTENT' of SOA record
else:
utils.print_inline("%(content)-25s" % record)
# print TTL, and PRIORITY for MX, SRV record
if record.get('priority'):
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print ttl for non SOA record
else:
print("%(ttl)5s " % record)
hr()
elif datas.get('identifier'):
# for template
print("identifier : %(identifier)s" % datas)
print("description: %(description)s" % datas)
hr()
print('%-33s %-5s %-25s %-5s %-3s'
% ('name', 'type', 'content', 'ttl', 'prio'))
for record in datas.get('entries'):
# print 'NAME'
utils.print_inline("%(name)-33s" % record)
# print 'TYPE' for SOA
if record.get('type') == 'SOA':
print("%(type)-5s" % record)
# print 'TYPE' for non SOA
else:
utils.print_inline("%(type)-5s" % record)
# print 'CONTENT' for SOA
if record.get('type') == 'SOA':
utils.print_inline("> %(content)-25s " % record)
# print 'CONTENT' for non SOA
else:
utils.print_inline("%(content)-24s" % record)
# print 'TTL', and 'PRIORITY'
if record.get('priority') is not None:
utils.print_inline("%(ttl)5s" % record)
print("%(priority)2s" % record)
# print
else:
print("%(ttl)5s " % record)
hr()
else:
print("No match records") | [
"def",
"print_formatted",
"(",
"datas",
")",
":",
"if",
"not",
"datas",
":",
"print",
"(",
"\"No data\"",
")",
"exit",
"(",
"1",
")",
"if",
"isinstance",
"(",
"datas",
",",
"list",
")",
":",
"# get all zones",
"# API /zone without :identifier",
"hr",
"(",
")",
"print",
"(",
"'%-20s %-8s %-12s'",
"%",
"(",
"'name'",
",",
"'type'",
",",
"'notified_serial'",
")",
")",
"hr",
"(",
")",
"for",
"record",
"in",
"datas",
":",
"# print 'NAME'",
"utils",
".",
"print_inline",
"(",
"\"%(name)-20s\"",
"%",
"record",
")",
"# print 'TYPE' of SOA record",
"utils",
".",
"print_inline",
"(",
"\"%(type)-8s\"",
"%",
"record",
")",
"if",
"record",
".",
"get",
"(",
"'notified_serial'",
")",
":",
"print",
"(",
"\"%(notified_serial)s\"",
"%",
"record",
")",
"else",
":",
"print",
"(",
"''",
")",
"exit",
"(",
"0",
")",
"elif",
"datas",
".",
"get",
"(",
"'records'",
")",
":",
"print",
"(",
"\"domain: %(name)s\"",
"%",
"datas",
")",
"if",
"datas",
".",
"get",
"(",
"'type'",
")",
"==",
"'MASTER'",
"and",
"datas",
".",
"get",
"(",
"'notified_serial'",
")",
":",
"print",
"(",
"\"serial: %(notified_serial)s\"",
"%",
"datas",
")",
"print",
"(",
"\"DNS : %(type)s\"",
"%",
"datas",
")",
"# print header",
"hr",
"(",
")",
"print",
"(",
"'%-33s %-5s %-25s %-5s %-3s'",
"%",
"(",
"'name'",
",",
"'type'",
",",
"'content'",
",",
"'ttl'",
",",
"'prio'",
")",
")",
"hr",
"(",
")",
"for",
"record",
"in",
"datas",
".",
"get",
"(",
"'records'",
")",
":",
"# print 'NAME'",
"utils",
".",
"print_inline",
"(",
"\"%(name)-33s\"",
"%",
"record",
")",
"# print 'TYPE' of SOA record",
"if",
"record",
".",
"get",
"(",
"'type'",
")",
"==",
"'SOA'",
":",
"print",
"(",
"\"%(type)-5s\"",
"%",
"record",
")",
"# print 'TYPE' of non SOA record",
"else",
":",
"utils",
".",
"print_inline",
"(",
"\"%(type)-5s\"",
"%",
"record",
")",
"# print 'CONTENT' of non SOA",
"if",
"record",
".",
"get",
"(",
"'type'",
")",
"==",
"'SOA'",
":",
"utils",
".",
"print_inline",
"(",
"\">\\t\\t%(content)-25s \"",
"%",
"record",
")",
"# print 'CONTENT' of SOA record",
"else",
":",
"utils",
".",
"print_inline",
"(",
"\"%(content)-25s\"",
"%",
"record",
")",
"# print TTL, and PRIORITY for MX, SRV record",
"if",
"record",
".",
"get",
"(",
"'priority'",
")",
":",
"utils",
".",
"print_inline",
"(",
"\"%(ttl)5s\"",
"%",
"record",
")",
"print",
"(",
"\"%(priority)2s\"",
"%",
"record",
")",
"# print ttl for non SOA record",
"else",
":",
"print",
"(",
"\"%(ttl)5s \"",
"%",
"record",
")",
"hr",
"(",
")",
"elif",
"datas",
".",
"get",
"(",
"'identifier'",
")",
":",
"# for template",
"print",
"(",
"\"identifier : %(identifier)s\"",
"%",
"datas",
")",
"print",
"(",
"\"description: %(description)s\"",
"%",
"datas",
")",
"hr",
"(",
")",
"print",
"(",
"'%-33s %-5s %-25s %-5s %-3s'",
"%",
"(",
"'name'",
",",
"'type'",
",",
"'content'",
",",
"'ttl'",
",",
"'prio'",
")",
")",
"for",
"record",
"in",
"datas",
".",
"get",
"(",
"'entries'",
")",
":",
"# print 'NAME'",
"utils",
".",
"print_inline",
"(",
"\"%(name)-33s\"",
"%",
"record",
")",
"# print 'TYPE' for SOA",
"if",
"record",
".",
"get",
"(",
"'type'",
")",
"==",
"'SOA'",
":",
"print",
"(",
"\"%(type)-5s\"",
"%",
"record",
")",
"# print 'TYPE' for non SOA",
"else",
":",
"utils",
".",
"print_inline",
"(",
"\"%(type)-5s\"",
"%",
"record",
")",
"# print 'CONTENT' for SOA",
"if",
"record",
".",
"get",
"(",
"'type'",
")",
"==",
"'SOA'",
":",
"utils",
".",
"print_inline",
"(",
"\"> %(content)-25s \"",
"%",
"record",
")",
"# print 'CONTENT' for non SOA",
"else",
":",
"utils",
".",
"print_inline",
"(",
"\"%(content)-24s\"",
"%",
"record",
")",
"# print 'TTL', and 'PRIORITY'",
"if",
"record",
".",
"get",
"(",
"'priority'",
")",
"is",
"not",
"None",
":",
"utils",
".",
"print_inline",
"(",
"\"%(ttl)5s\"",
"%",
"record",
")",
"print",
"(",
"\"%(priority)2s\"",
"%",
"record",
")",
"# print",
"else",
":",
"print",
"(",
"\"%(ttl)5s \"",
"%",
"record",
")",
"hr",
"(",
")",
"else",
":",
"print",
"(",
"\"No match records\"",
")"
] | Pretty print JSON DATA
Argument:
datas: dictionary of data | [
"Pretty",
"print",
"JSON",
"DATA"
] | df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/connect.py#L256-L374 |
249,574 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.execute_deferred_effects | def execute_deferred_effects(self, pos):
""" Evaluates deferred effects that are triggered by the prefix of the
pos on the current beliefstate. For instance, if the effect is triggered
by the 'NN' pos, then the effect will be triggered by 'NN' or 'NNS'."""
costs = 0
to_delete = []
for entry in self.__dict__['deferred_effects']:
effect_pos, effect = entry
if pos.startswith(effect_pos):
costs += effect(self)
to_delete.append(entry)
# we delete afterwards, because Python cannot delete from a list that
# is being iterated over without screwing up the iteration.
for entry in to_delete:
self.__dict__['deferred_effects'].remove(entry)
return costs | python | def execute_deferred_effects(self, pos):
""" Evaluates deferred effects that are triggered by the prefix of the
pos on the current beliefstate. For instance, if the effect is triggered
by the 'NN' pos, then the effect will be triggered by 'NN' or 'NNS'."""
costs = 0
to_delete = []
for entry in self.__dict__['deferred_effects']:
effect_pos, effect = entry
if pos.startswith(effect_pos):
costs += effect(self)
to_delete.append(entry)
# we delete afterwards, because Python cannot delete from a list that
# is being iterated over without screwing up the iteration.
for entry in to_delete:
self.__dict__['deferred_effects'].remove(entry)
return costs | [
"def",
"execute_deferred_effects",
"(",
"self",
",",
"pos",
")",
":",
"costs",
"=",
"0",
"to_delete",
"=",
"[",
"]",
"for",
"entry",
"in",
"self",
".",
"__dict__",
"[",
"'deferred_effects'",
"]",
":",
"effect_pos",
",",
"effect",
"=",
"entry",
"if",
"pos",
".",
"startswith",
"(",
"effect_pos",
")",
":",
"costs",
"+=",
"effect",
"(",
"self",
")",
"to_delete",
".",
"append",
"(",
"entry",
")",
"# we delete afterwards, because Python cannot delete from a list that",
"# is being iterated over without screwing up the iteration.",
"for",
"entry",
"in",
"to_delete",
":",
"self",
".",
"__dict__",
"[",
"'deferred_effects'",
"]",
".",
"remove",
"(",
"entry",
")",
"return",
"costs"
] | Evaluates deferred effects that are triggered by the prefix of the
pos on the current beliefstate. For instance, if the effect is triggered
by the 'NN' pos, then the effect will be triggered by 'NN' or 'NNS'. | [
"Evaluates",
"deferred",
"effects",
"that",
"are",
"triggered",
"by",
"the",
"prefix",
"of",
"the",
"pos",
"on",
"the",
"current",
"beliefstate",
".",
"For",
"instance",
"if",
"the",
"effect",
"is",
"triggered",
"by",
"the",
"NN",
"pos",
"then",
"the",
"effect",
"will",
"be",
"triggered",
"by",
"NN",
"or",
"NNS",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L79-L94 |
249,575 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.set_environment_variable | def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) | python | def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) | [
"def",
"set_environment_variable",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"if",
"self",
".",
"get_environment_variable",
"(",
"key",
")",
"in",
"[",
"None",
",",
"val",
"]",
":",
"self",
".",
"__dict__",
"[",
"'environment_variables'",
"]",
"[",
"key",
"]",
"=",
"val",
"else",
":",
"raise",
"Contradiction",
"(",
"\"Could not set environment variable %s\"",
"%",
"(",
"key",
")",
")"
] | Sets a variable if that variable is not already set | [
"Sets",
"a",
"variable",
"if",
"that",
"variable",
"is",
"not",
"already",
"set"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L96-L101 |
249,576 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.iter_breadth_first | def iter_breadth_first(self, root=None):
""" Traverses the belief state's structure breadth-first """
if root == None:
root = self
yield root
last = root
for node in self.iter_breadth_first(root):
if isinstance(node, DictCell):
# recurse
for subpart in node:
yield subpart
last = subpart
if last == node:
return | python | def iter_breadth_first(self, root=None):
""" Traverses the belief state's structure breadth-first """
if root == None:
root = self
yield root
last = root
for node in self.iter_breadth_first(root):
if isinstance(node, DictCell):
# recurse
for subpart in node:
yield subpart
last = subpart
if last == node:
return | [
"def",
"iter_breadth_first",
"(",
"self",
",",
"root",
"=",
"None",
")",
":",
"if",
"root",
"==",
"None",
":",
"root",
"=",
"self",
"yield",
"root",
"last",
"=",
"root",
"for",
"node",
"in",
"self",
".",
"iter_breadth_first",
"(",
"root",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"DictCell",
")",
":",
"# recurse",
"for",
"subpart",
"in",
"node",
":",
"yield",
"subpart",
"last",
"=",
"subpart",
"if",
"last",
"==",
"node",
":",
"return"
] | Traverses the belief state's structure breadth-first | [
"Traverses",
"the",
"belief",
"state",
"s",
"structure",
"breadth",
"-",
"first"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L119-L132 |
249,577 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.find_path | def find_path(self, test_function=None, on_targets=False):
"""
General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True
"""
assert self.has_referential_domain(), "need context set"
if not test_function:
test_function = lambda x, y: True
def find_path_inner(part, prefix):
name, structure = part
if test_function(name, structure):
yield prefix + [name]
if isinstance(structure, DictCell):
for sub_structure in structure:
for prefix2 in find_path_inner(sub_structure,\
prefix[:] + [name]):
yield prefix2
prefix = []
if on_targets:
# apply search to the first target
results = []
for _, instance in self.iter_singleton_referents():
for part in instance:
for entry in find_path_inner(part, prefix[:]):
results.append(['target'] + entry)
while results:
yield results.pop()
break # only use first instance
else:
# apply search to self
for part in self:
for entry in find_path_inner(part, prefix[:]):
yield entry | python | def find_path(self, test_function=None, on_targets=False):
"""
General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True
"""
assert self.has_referential_domain(), "need context set"
if not test_function:
test_function = lambda x, y: True
def find_path_inner(part, prefix):
name, structure = part
if test_function(name, structure):
yield prefix + [name]
if isinstance(structure, DictCell):
for sub_structure in structure:
for prefix2 in find_path_inner(sub_structure,\
prefix[:] + [name]):
yield prefix2
prefix = []
if on_targets:
# apply search to the first target
results = []
for _, instance in self.iter_singleton_referents():
for part in instance:
for entry in find_path_inner(part, prefix[:]):
results.append(['target'] + entry)
while results:
yield results.pop()
break # only use first instance
else:
# apply search to self
for part in self:
for entry in find_path_inner(part, prefix[:]):
yield entry | [
"def",
"find_path",
"(",
"self",
",",
"test_function",
"=",
"None",
",",
"on_targets",
"=",
"False",
")",
":",
"assert",
"self",
".",
"has_referential_domain",
"(",
")",
",",
"\"need context set\"",
"if",
"not",
"test_function",
":",
"test_function",
"=",
"lambda",
"x",
",",
"y",
":",
"True",
"def",
"find_path_inner",
"(",
"part",
",",
"prefix",
")",
":",
"name",
",",
"structure",
"=",
"part",
"if",
"test_function",
"(",
"name",
",",
"structure",
")",
":",
"yield",
"prefix",
"+",
"[",
"name",
"]",
"if",
"isinstance",
"(",
"structure",
",",
"DictCell",
")",
":",
"for",
"sub_structure",
"in",
"structure",
":",
"for",
"prefix2",
"in",
"find_path_inner",
"(",
"sub_structure",
",",
"prefix",
"[",
":",
"]",
"+",
"[",
"name",
"]",
")",
":",
"yield",
"prefix2",
"prefix",
"=",
"[",
"]",
"if",
"on_targets",
":",
"# apply search to the first target",
"results",
"=",
"[",
"]",
"for",
"_",
",",
"instance",
"in",
"self",
".",
"iter_singleton_referents",
"(",
")",
":",
"for",
"part",
"in",
"instance",
":",
"for",
"entry",
"in",
"find_path_inner",
"(",
"part",
",",
"prefix",
"[",
":",
"]",
")",
":",
"results",
".",
"append",
"(",
"[",
"'target'",
"]",
"+",
"entry",
")",
"while",
"results",
":",
"yield",
"results",
".",
"pop",
"(",
")",
"break",
"# only use first instance",
"else",
":",
"# apply search to self",
"for",
"part",
"in",
"self",
":",
"for",
"entry",
"in",
"find_path_inner",
"(",
"part",
",",
"prefix",
"[",
":",
"]",
")",
":",
"yield",
"entry"
] | General helper method that iterates breadth-first over the referential_domain's
cells and returns a path where the test_function is True | [
"General",
"helper",
"method",
"that",
"iterates",
"breadth",
"-",
"first",
"over",
"the",
"referential_domain",
"s",
"cells",
"and",
"returns",
"a",
"path",
"where",
"the",
"test_function",
"is",
"True"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L134-L169 |
249,578 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.get_nth_unique_value | def get_nth_unique_value(self, keypath, n, distance_from, open_interval=True):
"""
Returns the `n-1`th unique value, or raises
a contradiction if that is out of bounds
"""
unique_values = self.get_ordered_values(keypath, distance_from, open_interval)
if 0 <= n < len(unique_values):
#logging.error("%i th unique value is %s" % (n, str(unique_values[n])))
return unique_values[n]
else:
raise Contradiction("n-th Unique value out of range: " + str(n)) | python | def get_nth_unique_value(self, keypath, n, distance_from, open_interval=True):
"""
Returns the `n-1`th unique value, or raises
a contradiction if that is out of bounds
"""
unique_values = self.get_ordered_values(keypath, distance_from, open_interval)
if 0 <= n < len(unique_values):
#logging.error("%i th unique value is %s" % (n, str(unique_values[n])))
return unique_values[n]
else:
raise Contradiction("n-th Unique value out of range: " + str(n)) | [
"def",
"get_nth_unique_value",
"(",
"self",
",",
"keypath",
",",
"n",
",",
"distance_from",
",",
"open_interval",
"=",
"True",
")",
":",
"unique_values",
"=",
"self",
".",
"get_ordered_values",
"(",
"keypath",
",",
"distance_from",
",",
"open_interval",
")",
"if",
"0",
"<=",
"n",
"<",
"len",
"(",
"unique_values",
")",
":",
"#logging.error(\"%i th unique value is %s\" % (n, str(unique_values[n])))",
"return",
"unique_values",
"[",
"n",
"]",
"else",
":",
"raise",
"Contradiction",
"(",
"\"n-th Unique value out of range: \"",
"+",
"str",
"(",
"n",
")",
")"
] | Returns the `n-1`th unique value, or raises
a contradiction if that is out of bounds | [
"Returns",
"the",
"n",
"-",
"1",
"th",
"unique",
"value",
"or",
"raises",
"a",
"contradiction",
"if",
"that",
"is",
"out",
"of",
"bounds"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L172-L182 |
249,579 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.get_ordered_values | def get_ordered_values(self, keypath, distance_from, open_interval=True):
"""
Retrieves the referents's values sorted by their distance from the
min, max, or mid value.
"""
values = []
if keypath[0] == 'target':
# instances start with 'target' prefix, but
# don't contain it, so we remove it here.
keypath = keypath[1:]
for _, instance in self.iter_singleton_referents():
value = instance.get_value_from_path(keypath)
if hasattr(value, 'low') and value.low != value.high:
return []
values.append(float(value))
if len(values) == 0:
return []
values = np.array(values)
anchor = values.min()
diffs = values - anchor
if distance_from == 'max':
anchor = values.max()
diffs = anchor - values
if distance_from == 'mean':
anchor = values.mean()
diffs = abs(anchor - values)
sdiffs = np.unique(diffs)
sdiffs.sort()
results = []
for ix, el in enumerate(sdiffs):
mask = diffs <= el
vals = values[mask]
if False:
# when vagueness has been made precise through an ordinal
results.append(IntervalCell(vals.min(), vals.max()))
elif distance_from == 'max':
if open_interval:
results.append(IntervalCell(vals.min(), np.inf))
else:
results.append(IntervalCell(vals.min(), vals.min()))
elif distance_from == 'min':
if open_interval:
results.append(IntervalCell(-np.inf, vals.max()))
else:
results.append(IntervalCell(vals.max(), vals.max()))
elif distance_from == 'mean':
if ix+1 == len(sdiffs): continue # skip last
results.append(IntervalCell(vals.min(), vals.max()))
return results | python | def get_ordered_values(self, keypath, distance_from, open_interval=True):
"""
Retrieves the referents's values sorted by their distance from the
min, max, or mid value.
"""
values = []
if keypath[0] == 'target':
# instances start with 'target' prefix, but
# don't contain it, so we remove it here.
keypath = keypath[1:]
for _, instance in self.iter_singleton_referents():
value = instance.get_value_from_path(keypath)
if hasattr(value, 'low') and value.low != value.high:
return []
values.append(float(value))
if len(values) == 0:
return []
values = np.array(values)
anchor = values.min()
diffs = values - anchor
if distance_from == 'max':
anchor = values.max()
diffs = anchor - values
if distance_from == 'mean':
anchor = values.mean()
diffs = abs(anchor - values)
sdiffs = np.unique(diffs)
sdiffs.sort()
results = []
for ix, el in enumerate(sdiffs):
mask = diffs <= el
vals = values[mask]
if False:
# when vagueness has been made precise through an ordinal
results.append(IntervalCell(vals.min(), vals.max()))
elif distance_from == 'max':
if open_interval:
results.append(IntervalCell(vals.min(), np.inf))
else:
results.append(IntervalCell(vals.min(), vals.min()))
elif distance_from == 'min':
if open_interval:
results.append(IntervalCell(-np.inf, vals.max()))
else:
results.append(IntervalCell(vals.max(), vals.max()))
elif distance_from == 'mean':
if ix+1 == len(sdiffs): continue # skip last
results.append(IntervalCell(vals.min(), vals.max()))
return results | [
"def",
"get_ordered_values",
"(",
"self",
",",
"keypath",
",",
"distance_from",
",",
"open_interval",
"=",
"True",
")",
":",
"values",
"=",
"[",
"]",
"if",
"keypath",
"[",
"0",
"]",
"==",
"'target'",
":",
"# instances start with 'target' prefix, but ",
"# don't contain it, so we remove it here.",
"keypath",
"=",
"keypath",
"[",
"1",
":",
"]",
"for",
"_",
",",
"instance",
"in",
"self",
".",
"iter_singleton_referents",
"(",
")",
":",
"value",
"=",
"instance",
".",
"get_value_from_path",
"(",
"keypath",
")",
"if",
"hasattr",
"(",
"value",
",",
"'low'",
")",
"and",
"value",
".",
"low",
"!=",
"value",
".",
"high",
":",
"return",
"[",
"]",
"values",
".",
"append",
"(",
"float",
"(",
"value",
")",
")",
"if",
"len",
"(",
"values",
")",
"==",
"0",
":",
"return",
"[",
"]",
"values",
"=",
"np",
".",
"array",
"(",
"values",
")",
"anchor",
"=",
"values",
".",
"min",
"(",
")",
"diffs",
"=",
"values",
"-",
"anchor",
"if",
"distance_from",
"==",
"'max'",
":",
"anchor",
"=",
"values",
".",
"max",
"(",
")",
"diffs",
"=",
"anchor",
"-",
"values",
"if",
"distance_from",
"==",
"'mean'",
":",
"anchor",
"=",
"values",
".",
"mean",
"(",
")",
"diffs",
"=",
"abs",
"(",
"anchor",
"-",
"values",
")",
"sdiffs",
"=",
"np",
".",
"unique",
"(",
"diffs",
")",
"sdiffs",
".",
"sort",
"(",
")",
"results",
"=",
"[",
"]",
"for",
"ix",
",",
"el",
"in",
"enumerate",
"(",
"sdiffs",
")",
":",
"mask",
"=",
"diffs",
"<=",
"el",
"vals",
"=",
"values",
"[",
"mask",
"]",
"if",
"False",
":",
"# when vagueness has been made precise through an ordinal",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"vals",
".",
"min",
"(",
")",
",",
"vals",
".",
"max",
"(",
")",
")",
")",
"elif",
"distance_from",
"==",
"'max'",
":",
"if",
"open_interval",
":",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"vals",
".",
"min",
"(",
")",
",",
"np",
".",
"inf",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"vals",
".",
"min",
"(",
")",
",",
"vals",
".",
"min",
"(",
")",
")",
")",
"elif",
"distance_from",
"==",
"'min'",
":",
"if",
"open_interval",
":",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"-",
"np",
".",
"inf",
",",
"vals",
".",
"max",
"(",
")",
")",
")",
"else",
":",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"vals",
".",
"max",
"(",
")",
",",
"vals",
".",
"max",
"(",
")",
")",
")",
"elif",
"distance_from",
"==",
"'mean'",
":",
"if",
"ix",
"+",
"1",
"==",
"len",
"(",
"sdiffs",
")",
":",
"continue",
"# skip last",
"results",
".",
"append",
"(",
"IntervalCell",
"(",
"vals",
".",
"min",
"(",
")",
",",
"vals",
".",
"max",
"(",
")",
")",
")",
"return",
"results"
] | Retrieves the referents's values sorted by their distance from the
min, max, or mid value. | [
"Retrieves",
"the",
"referents",
"s",
"values",
"sorted",
"by",
"their",
"distance",
"from",
"the",
"min",
"max",
"or",
"mid",
"value",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L184-L236 |
249,580 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.get_paths_for_attribute | def get_paths_for_attribute(self, attribute_name):
"""
Returns a path list to all attributes that have with a particular name.
"""
has_name = lambda name, structure: name == attribute_name
return self.find_path(has_name, on_targets=True) | python | def get_paths_for_attribute(self, attribute_name):
"""
Returns a path list to all attributes that have with a particular name.
"""
has_name = lambda name, structure: name == attribute_name
return self.find_path(has_name, on_targets=True) | [
"def",
"get_paths_for_attribute",
"(",
"self",
",",
"attribute_name",
")",
":",
"has_name",
"=",
"lambda",
"name",
",",
"structure",
":",
"name",
"==",
"attribute_name",
"return",
"self",
".",
"find_path",
"(",
"has_name",
",",
"on_targets",
"=",
"True",
")"
] | Returns a path list to all attributes that have with a particular name. | [
"Returns",
"a",
"path",
"list",
"to",
"all",
"attributes",
"that",
"have",
"with",
"a",
"particular",
"name",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L263-L268 |
249,581 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.add_cell | def add_cell(self, keypath, cell):
""" Adds a new cell to the end of `keypath` of type `cell`"""
keypath = keypath[:] # copy
inner = self # the most inner dict where cell is added
cellname = keypath # the name of the cell
assert keypath not in self, "Already exists: %s " % (str(keypath))
if isinstance(keypath, list):
while len(keypath) > 1:
cellname = keypath.pop(0)
if cellname not in inner:
inner.__dict__['p'][cellname] = DictCell()
inner = inner[cellname] # move in one
cellname = keypath[0]
# now we can add 'cellname'->(Cell) to inner (DictCell)
inner.__dict__['p'][cellname] = cell
return inner[cellname] | python | def add_cell(self, keypath, cell):
""" Adds a new cell to the end of `keypath` of type `cell`"""
keypath = keypath[:] # copy
inner = self # the most inner dict where cell is added
cellname = keypath # the name of the cell
assert keypath not in self, "Already exists: %s " % (str(keypath))
if isinstance(keypath, list):
while len(keypath) > 1:
cellname = keypath.pop(0)
if cellname not in inner:
inner.__dict__['p'][cellname] = DictCell()
inner = inner[cellname] # move in one
cellname = keypath[0]
# now we can add 'cellname'->(Cell) to inner (DictCell)
inner.__dict__['p'][cellname] = cell
return inner[cellname] | [
"def",
"add_cell",
"(",
"self",
",",
"keypath",
",",
"cell",
")",
":",
"keypath",
"=",
"keypath",
"[",
":",
"]",
"# copy",
"inner",
"=",
"self",
"# the most inner dict where cell is added",
"cellname",
"=",
"keypath",
"# the name of the cell",
"assert",
"keypath",
"not",
"in",
"self",
",",
"\"Already exists: %s \"",
"%",
"(",
"str",
"(",
"keypath",
")",
")",
"if",
"isinstance",
"(",
"keypath",
",",
"list",
")",
":",
"while",
"len",
"(",
"keypath",
")",
">",
"1",
":",
"cellname",
"=",
"keypath",
".",
"pop",
"(",
"0",
")",
"if",
"cellname",
"not",
"in",
"inner",
":",
"inner",
".",
"__dict__",
"[",
"'p'",
"]",
"[",
"cellname",
"]",
"=",
"DictCell",
"(",
")",
"inner",
"=",
"inner",
"[",
"cellname",
"]",
"# move in one",
"cellname",
"=",
"keypath",
"[",
"0",
"]",
"# now we can add 'cellname'->(Cell) to inner (DictCell)",
"inner",
".",
"__dict__",
"[",
"'p'",
"]",
"[",
"cellname",
"]",
"=",
"cell",
"return",
"inner",
"[",
"cellname",
"]"
] | Adds a new cell to the end of `keypath` of type `cell` | [
"Adds",
"a",
"new",
"cell",
"to",
"the",
"end",
"of",
"keypath",
"of",
"type",
"cell"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L323-L338 |
249,582 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.size | def size(self):
""" Returns the size of the belief state.
Initially if there are $n$ consistent members, (the result of `self.number_of_singleton_referents()`)
then there are generally $2^{n}-1$ valid belief states.
"""
n = self.number_of_singleton_referents()
targets = list(self.iter_referents_tuples())
n_targets = len(targets)
if n == 0 or n_targets == 0:
return 0
#if len(self.__dict__['deferred_effects']) != 0:
# return -1
size1 = len(list(self.iter_referents_tuples()))
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
return size1 | python | def size(self):
""" Returns the size of the belief state.
Initially if there are $n$ consistent members, (the result of `self.number_of_singleton_referents()`)
then there are generally $2^{n}-1$ valid belief states.
"""
n = self.number_of_singleton_referents()
targets = list(self.iter_referents_tuples())
n_targets = len(targets)
if n == 0 or n_targets == 0:
return 0
#if len(self.__dict__['deferred_effects']) != 0:
# return -1
size1 = len(list(self.iter_referents_tuples()))
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
return size1 | [
"def",
"size",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"number_of_singleton_referents",
"(",
")",
"targets",
"=",
"list",
"(",
"self",
".",
"iter_referents_tuples",
"(",
")",
")",
"n_targets",
"=",
"len",
"(",
"targets",
")",
"if",
"n",
"==",
"0",
"or",
"n_targets",
"==",
"0",
":",
"return",
"0",
"#if len(self.__dict__['deferred_effects']) != 0:",
"# return -1 ",
"size1",
"=",
"len",
"(",
"list",
"(",
"self",
".",
"iter_referents_tuples",
"(",
")",
")",
")",
"tlow",
",",
"thigh",
"=",
"self",
"[",
"'targetset_arity'",
"]",
".",
"get_tuple",
"(",
")",
"clow",
",",
"chigh",
"=",
"self",
"[",
"'contrast_arity'",
"]",
".",
"get_tuple",
"(",
")",
"return",
"size1"
] | Returns the size of the belief state.
Initially if there are $n$ consistent members, (the result of `self.number_of_singleton_referents()`)
then there are generally $2^{n}-1$ valid belief states. | [
"Returns",
"the",
"size",
"of",
"the",
"belief",
"state",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L402-L419 |
249,583 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.iter_referents | def iter_referents(self):
""" Generates target sets that are compatible with the current beliefstate. """
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
referents = list(self.iter_singleton_referents())
t = len(referents)
low = max(1, tlow)
high = min([t, thigh])
for targets in itertools.chain.from_iterable(itertools.combinations(referents, r) \
for r in reversed(xrange(low, high+1))):
if clow <= t-len(targets) <= chigh:
yield targets | python | def iter_referents(self):
""" Generates target sets that are compatible with the current beliefstate. """
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
referents = list(self.iter_singleton_referents())
t = len(referents)
low = max(1, tlow)
high = min([t, thigh])
for targets in itertools.chain.from_iterable(itertools.combinations(referents, r) \
for r in reversed(xrange(low, high+1))):
if clow <= t-len(targets) <= chigh:
yield targets | [
"def",
"iter_referents",
"(",
"self",
")",
":",
"tlow",
",",
"thigh",
"=",
"self",
"[",
"'targetset_arity'",
"]",
".",
"get_tuple",
"(",
")",
"clow",
",",
"chigh",
"=",
"self",
"[",
"'contrast_arity'",
"]",
".",
"get_tuple",
"(",
")",
"referents",
"=",
"list",
"(",
"self",
".",
"iter_singleton_referents",
"(",
")",
")",
"t",
"=",
"len",
"(",
"referents",
")",
"low",
"=",
"max",
"(",
"1",
",",
"tlow",
")",
"high",
"=",
"min",
"(",
"[",
"t",
",",
"thigh",
"]",
")",
"for",
"targets",
"in",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"itertools",
".",
"combinations",
"(",
"referents",
",",
"r",
")",
"for",
"r",
"in",
"reversed",
"(",
"xrange",
"(",
"low",
",",
"high",
"+",
"1",
")",
")",
")",
":",
"if",
"clow",
"<=",
"t",
"-",
"len",
"(",
"targets",
")",
"<=",
"chigh",
":",
"yield",
"targets"
] | Generates target sets that are compatible with the current beliefstate. | [
"Generates",
"target",
"sets",
"that",
"are",
"compatible",
"with",
"the",
"current",
"beliefstate",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L431-L444 |
249,584 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.number_of_singleton_referents | def number_of_singleton_referents(self):
"""
Returns the number of singleton elements of the referential domain that are
compatible with the current belief state.
This is the size of the union of all referent sets.
"""
if self.__dict__['referential_domain']:
ct = 0
for i in self.iter_singleton_referents():
ct += 1
return ct
else:
raise Exception("self.referential_domain must be defined") | python | def number_of_singleton_referents(self):
"""
Returns the number of singleton elements of the referential domain that are
compatible with the current belief state.
This is the size of the union of all referent sets.
"""
if self.__dict__['referential_domain']:
ct = 0
for i in self.iter_singleton_referents():
ct += 1
return ct
else:
raise Exception("self.referential_domain must be defined") | [
"def",
"number_of_singleton_referents",
"(",
"self",
")",
":",
"if",
"self",
".",
"__dict__",
"[",
"'referential_domain'",
"]",
":",
"ct",
"=",
"0",
"for",
"i",
"in",
"self",
".",
"iter_singleton_referents",
"(",
")",
":",
"ct",
"+=",
"1",
"return",
"ct",
"else",
":",
"raise",
"Exception",
"(",
"\"self.referential_domain must be defined\"",
")"
] | Returns the number of singleton elements of the referential domain that are
compatible with the current belief state.
This is the size of the union of all referent sets. | [
"Returns",
"the",
"number",
"of",
"singleton",
"elements",
"of",
"the",
"referential",
"domain",
"that",
"are",
"compatible",
"with",
"the",
"current",
"belief",
"state",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L460-L473 |
249,585 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.iter_singleton_referents | def iter_singleton_referents(self):
"""
Iterator of all of the singleton members of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints.
"""
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'], member
except KeyError:
raise Exception("No referential_domain defined") | python | def iter_singleton_referents(self):
"""
Iterator of all of the singleton members of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints.
"""
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'], member
except KeyError:
raise Exception("No referential_domain defined") | [
"def",
"iter_singleton_referents",
"(",
"self",
")",
":",
"try",
":",
"for",
"member",
"in",
"self",
".",
"__dict__",
"[",
"'referential_domain'",
"]",
".",
"iter_entities",
"(",
")",
":",
"if",
"self",
"[",
"'target'",
"]",
".",
"is_entailed_by",
"(",
"member",
")",
"and",
"(",
"self",
"[",
"'distractor'",
"]",
".",
"empty",
"(",
")",
"or",
"not",
"self",
"[",
"'distractor'",
"]",
".",
"is_entailed_by",
"(",
"member",
")",
")",
":",
"yield",
"member",
"[",
"'num'",
"]",
",",
"member",
"except",
"KeyError",
":",
"raise",
"Exception",
"(",
"\"No referential_domain defined\"",
")"
] | Iterator of all of the singleton members of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints. | [
"Iterator",
"of",
"all",
"of",
"the",
"singleton",
"members",
"of",
"the",
"context",
"set",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L475-L486 |
249,586 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.iter_singleton_referents_tuples | def iter_singleton_referents_tuples(self):
"""
Iterator of all of the singleton members's id number of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints.
"""
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'].low
except KeyError:
raise Exception("No referential_domain defined") | python | def iter_singleton_referents_tuples(self):
"""
Iterator of all of the singleton members's id number of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints.
"""
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'].low
except KeyError:
raise Exception("No referential_domain defined") | [
"def",
"iter_singleton_referents_tuples",
"(",
"self",
")",
":",
"try",
":",
"for",
"member",
"in",
"self",
".",
"__dict__",
"[",
"'referential_domain'",
"]",
".",
"iter_entities",
"(",
")",
":",
"if",
"self",
"[",
"'target'",
"]",
".",
"is_entailed_by",
"(",
"member",
")",
"and",
"(",
"self",
"[",
"'distractor'",
"]",
".",
"empty",
"(",
")",
"or",
"not",
"self",
"[",
"'distractor'",
"]",
".",
"is_entailed_by",
"(",
"member",
")",
")",
":",
"yield",
"member",
"[",
"'num'",
"]",
".",
"low",
"except",
"KeyError",
":",
"raise",
"Exception",
"(",
"\"No referential_domain defined\"",
")"
] | Iterator of all of the singleton members's id number of the context set.
NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints. | [
"Iterator",
"of",
"all",
"of",
"the",
"singleton",
"members",
"s",
"id",
"number",
"of",
"the",
"context",
"set",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L488-L499 |
249,587 | EventTeam/beliefs | src/beliefs/beliefstate.py | BeliefState.copy | def copy(self):
"""
Copies the BeliefState by recursively deep-copying all of
its parts. Domains are not copied, as they do not change
during the interpretation or generation.
"""
copied = BeliefState(self.__dict__['referential_domain'])
for key in ['environment_variables', 'deferred_effects', 'pos', 'p']:
copied.__dict__[key] = copy.deepcopy(self.__dict__[key])
return copied | python | def copy(self):
"""
Copies the BeliefState by recursively deep-copying all of
its parts. Domains are not copied, as they do not change
during the interpretation or generation.
"""
copied = BeliefState(self.__dict__['referential_domain'])
for key in ['environment_variables', 'deferred_effects', 'pos', 'p']:
copied.__dict__[key] = copy.deepcopy(self.__dict__[key])
return copied | [
"def",
"copy",
"(",
"self",
")",
":",
"copied",
"=",
"BeliefState",
"(",
"self",
".",
"__dict__",
"[",
"'referential_domain'",
"]",
")",
"for",
"key",
"in",
"[",
"'environment_variables'",
",",
"'deferred_effects'",
",",
"'pos'",
",",
"'p'",
"]",
":",
"copied",
".",
"__dict__",
"[",
"key",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"__dict__",
"[",
"key",
"]",
")",
"return",
"copied"
] | Copies the BeliefState by recursively deep-copying all of
its parts. Domains are not copied, as they do not change
during the interpretation or generation. | [
"Copies",
"the",
"BeliefState",
"by",
"recursively",
"deep",
"-",
"copying",
"all",
"of",
"its",
"parts",
".",
"Domains",
"are",
"not",
"copied",
"as",
"they",
"do",
"not",
"change",
"during",
"the",
"interpretation",
"or",
"generation",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L515-L524 |
249,588 | tschaume/ccsgp_get_started | ccsgp_get_started/examples/gp_datdir.py | gp_datdir | def gp_datdir(initial, topN):
"""example for plotting from a text file via numpy.loadtxt
1. prepare input/output directories
2. load the data into an OrderedDict() [adjust axes units]
3. sort countries from highest to lowest population
4. select the <topN> most populated countries
5. call ccsgp.make_plot with data from 4
Below is an output image for country initial T and the 4 most populated
countries for this initial (click to enlarge). Also see::
$ python -m ccsgp_get_started.examples.gp_datdir -h
for help on the command line options.
.. image:: pics/T.png
:width: 450 px
.. image:: pics/U.png
:width: 450 px
:param initial: country initial
:type initial: str
:param topN: number of most populated countries to plot
:type topN: int
:ivar inDir: input directory according to package structure and initial
:ivar outDir: output directory according to package structure
:ivar data: OrderedDict with datasets to plot as separate keys
:ivar file: data input file for specific country, format: [x y] OR [x y dx dy]
:ivar country: country, filename stem of input file
:ivar file_url: absolute url to input file
:ivar nSets: number of datasets
"""
# prepare input/output directories
inDir, outDir = getWorkDirs()
initial = initial.capitalize()
inDir = os.path.join(inDir, initial)
if not os.path.exists(inDir): # catch missing initial
return "initial %s doesn't exist" % initial
# prepare data
data = OrderedDict()
for file in os.listdir(inDir):
country = os.path.splitext(file)[0]
file_url = os.path.join(inDir, file)
data[country] = np.loadtxt(open(file_url, 'rb')) # load data
# set y-axis unit to 1M
data[country][:, 1] /= 1e6
if data[country].shape[1] > 2: data[country][:, 3:] /= 1e6
logging.debug(data) # shown if --log flag given on command line
# sort countries according to mean population (highest -> lowest)
sorted_data = OrderedDict(sorted(
data.items(), key = lambda t: np.mean(t[1][:,1]), reverse = True
))
# "pop" (select) N most populated countries
top_data = OrderedDict(
sorted_data.popitem(last = False) for i in xrange(topN)
if sorted_data
)
# generate plot using ccsgp.make_plot
nSets = len(top_data)
make_plot(
data = top_data.values(),
properties = [ getOpts(i) for i in xrange(nSets) ],
titles = top_data.keys(), # use data keys as legend titles
name = os.path.join(outDir, initial),
key = [ 'at graph 1., 1.2', 'maxrows 2' ],
ylabel = 'total population ({/Symbol \664} 10^{6})',
xlabel = 'year', rmargin = 0.99, tmargin = 0.85, size='8.5in,8in'
)
return 'done' | python | def gp_datdir(initial, topN):
"""example for plotting from a text file via numpy.loadtxt
1. prepare input/output directories
2. load the data into an OrderedDict() [adjust axes units]
3. sort countries from highest to lowest population
4. select the <topN> most populated countries
5. call ccsgp.make_plot with data from 4
Below is an output image for country initial T and the 4 most populated
countries for this initial (click to enlarge). Also see::
$ python -m ccsgp_get_started.examples.gp_datdir -h
for help on the command line options.
.. image:: pics/T.png
:width: 450 px
.. image:: pics/U.png
:width: 450 px
:param initial: country initial
:type initial: str
:param topN: number of most populated countries to plot
:type topN: int
:ivar inDir: input directory according to package structure and initial
:ivar outDir: output directory according to package structure
:ivar data: OrderedDict with datasets to plot as separate keys
:ivar file: data input file for specific country, format: [x y] OR [x y dx dy]
:ivar country: country, filename stem of input file
:ivar file_url: absolute url to input file
:ivar nSets: number of datasets
"""
# prepare input/output directories
inDir, outDir = getWorkDirs()
initial = initial.capitalize()
inDir = os.path.join(inDir, initial)
if not os.path.exists(inDir): # catch missing initial
return "initial %s doesn't exist" % initial
# prepare data
data = OrderedDict()
for file in os.listdir(inDir):
country = os.path.splitext(file)[0]
file_url = os.path.join(inDir, file)
data[country] = np.loadtxt(open(file_url, 'rb')) # load data
# set y-axis unit to 1M
data[country][:, 1] /= 1e6
if data[country].shape[1] > 2: data[country][:, 3:] /= 1e6
logging.debug(data) # shown if --log flag given on command line
# sort countries according to mean population (highest -> lowest)
sorted_data = OrderedDict(sorted(
data.items(), key = lambda t: np.mean(t[1][:,1]), reverse = True
))
# "pop" (select) N most populated countries
top_data = OrderedDict(
sorted_data.popitem(last = False) for i in xrange(topN)
if sorted_data
)
# generate plot using ccsgp.make_plot
nSets = len(top_data)
make_plot(
data = top_data.values(),
properties = [ getOpts(i) for i in xrange(nSets) ],
titles = top_data.keys(), # use data keys as legend titles
name = os.path.join(outDir, initial),
key = [ 'at graph 1., 1.2', 'maxrows 2' ],
ylabel = 'total population ({/Symbol \664} 10^{6})',
xlabel = 'year', rmargin = 0.99, tmargin = 0.85, size='8.5in,8in'
)
return 'done' | [
"def",
"gp_datdir",
"(",
"initial",
",",
"topN",
")",
":",
"# prepare input/output directories",
"inDir",
",",
"outDir",
"=",
"getWorkDirs",
"(",
")",
"initial",
"=",
"initial",
".",
"capitalize",
"(",
")",
"inDir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"inDir",
",",
"initial",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"inDir",
")",
":",
"# catch missing initial",
"return",
"\"initial %s doesn't exist\"",
"%",
"initial",
"# prepare data",
"data",
"=",
"OrderedDict",
"(",
")",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"inDir",
")",
":",
"country",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file",
")",
"[",
"0",
"]",
"file_url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"inDir",
",",
"file",
")",
"data",
"[",
"country",
"]",
"=",
"np",
".",
"loadtxt",
"(",
"open",
"(",
"file_url",
",",
"'rb'",
")",
")",
"# load data",
"# set y-axis unit to 1M",
"data",
"[",
"country",
"]",
"[",
":",
",",
"1",
"]",
"/=",
"1e6",
"if",
"data",
"[",
"country",
"]",
".",
"shape",
"[",
"1",
"]",
">",
"2",
":",
"data",
"[",
"country",
"]",
"[",
":",
",",
"3",
":",
"]",
"/=",
"1e6",
"logging",
".",
"debug",
"(",
"data",
")",
"# shown if --log flag given on command line",
"# sort countries according to mean population (highest -> lowest)",
"sorted_data",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"t",
":",
"np",
".",
"mean",
"(",
"t",
"[",
"1",
"]",
"[",
":",
",",
"1",
"]",
")",
",",
"reverse",
"=",
"True",
")",
")",
"# \"pop\" (select) N most populated countries",
"top_data",
"=",
"OrderedDict",
"(",
"sorted_data",
".",
"popitem",
"(",
"last",
"=",
"False",
")",
"for",
"i",
"in",
"xrange",
"(",
"topN",
")",
"if",
"sorted_data",
")",
"# generate plot using ccsgp.make_plot",
"nSets",
"=",
"len",
"(",
"top_data",
")",
"make_plot",
"(",
"data",
"=",
"top_data",
".",
"values",
"(",
")",
",",
"properties",
"=",
"[",
"getOpts",
"(",
"i",
")",
"for",
"i",
"in",
"xrange",
"(",
"nSets",
")",
"]",
",",
"titles",
"=",
"top_data",
".",
"keys",
"(",
")",
",",
"# use data keys as legend titles",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outDir",
",",
"initial",
")",
",",
"key",
"=",
"[",
"'at graph 1., 1.2'",
",",
"'maxrows 2'",
"]",
",",
"ylabel",
"=",
"'total population ({/Symbol \\664} 10^{6})'",
",",
"xlabel",
"=",
"'year'",
",",
"rmargin",
"=",
"0.99",
",",
"tmargin",
"=",
"0.85",
",",
"size",
"=",
"'8.5in,8in'",
")",
"return",
"'done'"
] | example for plotting from a text file via numpy.loadtxt
1. prepare input/output directories
2. load the data into an OrderedDict() [adjust axes units]
3. sort countries from highest to lowest population
4. select the <topN> most populated countries
5. call ccsgp.make_plot with data from 4
Below is an output image for country initial T and the 4 most populated
countries for this initial (click to enlarge). Also see::
$ python -m ccsgp_get_started.examples.gp_datdir -h
for help on the command line options.
.. image:: pics/T.png
:width: 450 px
.. image:: pics/U.png
:width: 450 px
:param initial: country initial
:type initial: str
:param topN: number of most populated countries to plot
:type topN: int
:ivar inDir: input directory according to package structure and initial
:ivar outDir: output directory according to package structure
:ivar data: OrderedDict with datasets to plot as separate keys
:ivar file: data input file for specific country, format: [x y] OR [x y dx dy]
:ivar country: country, filename stem of input file
:ivar file_url: absolute url to input file
:ivar nSets: number of datasets | [
"example",
"for",
"plotting",
"from",
"a",
"text",
"file",
"via",
"numpy",
".",
"loadtxt"
] | e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2 | https://github.com/tschaume/ccsgp_get_started/blob/e4e29844a3e6fc7574e9b4b8cd84131f28ddc3f2/ccsgp_get_started/examples/gp_datdir.py#L8-L78 |
249,589 | ulf1/oxyba | oxyba/linreg_ols_pinv.py | linreg_ols_pinv | def linreg_ols_pinv(y, X, rcond=1e-15):
"""Linear Regression, OLS, by multiplying with Pseudoinverse"""
import numpy as np
try: # multiply with inverse to compute coefficients
return np.dot(np.linalg.pinv(
np.dot(X.T, X), rcond=rcond), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: SVD does not converge")
return None | python | def linreg_ols_pinv(y, X, rcond=1e-15):
"""Linear Regression, OLS, by multiplying with Pseudoinverse"""
import numpy as np
try: # multiply with inverse to compute coefficients
return np.dot(np.linalg.pinv(
np.dot(X.T, X), rcond=rcond), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: SVD does not converge")
return None | [
"def",
"linreg_ols_pinv",
"(",
"y",
",",
"X",
",",
"rcond",
"=",
"1e-15",
")",
":",
"import",
"numpy",
"as",
"np",
"try",
":",
"# multiply with inverse to compute coefficients",
"return",
"np",
".",
"dot",
"(",
"np",
".",
"linalg",
".",
"pinv",
"(",
"np",
".",
"dot",
"(",
"X",
".",
"T",
",",
"X",
")",
",",
"rcond",
"=",
"rcond",
")",
",",
"np",
".",
"dot",
"(",
"X",
".",
"T",
",",
"y",
")",
")",
"except",
"np",
".",
"linalg",
".",
"LinAlgError",
":",
"print",
"(",
"\"LinAlgError: SVD does not converge\"",
")",
"return",
"None"
] | Linear Regression, OLS, by multiplying with Pseudoinverse | [
"Linear",
"Regression",
"OLS",
"by",
"multiplying",
"with",
"Pseudoinverse"
] | b3043116050de275124365cb11e7df91fb40169d | https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/linreg_ols_pinv.py#L2-L10 |
249,590 | xethorn/oto | oto/response.py | create_error_response | def create_error_response(code, message, status=status.BAD_REQUEST):
"""Create a fail response.
Args:
code (str): the code of the error. The title should be lowercase and
underscore separated.
message (dict, list, str): the message of the error.
This can be a list, dictionary or simple string.
status (int): the status code. Defaults to 400.
Returns:
Response: the response with the error. The format of the error is the
following: code and message. The code could be `user_error` or
`internal_error`. The message contains either a string, or a list
or a dictionary. If not specify, the status will be a 400.
"""
errors = dict(code=code, message=message)
return Response(errors=errors, status=status) | python | def create_error_response(code, message, status=status.BAD_REQUEST):
"""Create a fail response.
Args:
code (str): the code of the error. The title should be lowercase and
underscore separated.
message (dict, list, str): the message of the error.
This can be a list, dictionary or simple string.
status (int): the status code. Defaults to 400.
Returns:
Response: the response with the error. The format of the error is the
following: code and message. The code could be `user_error` or
`internal_error`. The message contains either a string, or a list
or a dictionary. If not specify, the status will be a 400.
"""
errors = dict(code=code, message=message)
return Response(errors=errors, status=status) | [
"def",
"create_error_response",
"(",
"code",
",",
"message",
",",
"status",
"=",
"status",
".",
"BAD_REQUEST",
")",
":",
"errors",
"=",
"dict",
"(",
"code",
"=",
"code",
",",
"message",
"=",
"message",
")",
"return",
"Response",
"(",
"errors",
"=",
"errors",
",",
"status",
"=",
"status",
")"
] | Create a fail response.
Args:
code (str): the code of the error. The title should be lowercase and
underscore separated.
message (dict, list, str): the message of the error.
This can be a list, dictionary or simple string.
status (int): the status code. Defaults to 400.
Returns:
Response: the response with the error. The format of the error is the
following: code and message. The code could be `user_error` or
`internal_error`. The message contains either a string, or a list
or a dictionary. If not specify, the status will be a 400. | [
"Create",
"a",
"fail",
"response",
"."
] | 2a76d374ccc4c85fdf81ae1c43698a94c0594d7b | https://github.com/xethorn/oto/blob/2a76d374ccc4c85fdf81ae1c43698a94c0594d7b/oto/response.py#L71-L89 |
249,591 | kervi/kervi-core | kervi/values/__init__.py | NumberValue.display_unit | def display_unit(self):
"""
Display unit of value.
:type: ``str``
"""
if self._display_unit:
return self._display_unit
elif self._Q:
config = Configuration.display.unit_systems
default_system = Configuration.unit_system
units = config.systems[default_system]
self._display_unit = units.get(self._type, self._unit)
if self._type == "temperature":
from_unit = "deg" + self._unit.upper()
to_unit = "deg" + self._display_unit.upper()
else:
from_unit = self._unit
to_unit = self._display_unit
#print("dv", from_unit, to_unit)
self._q_unit = self._Q("1 " + from_unit)
self._q_display = self._Q("1 " + to_unit)
return self._display_unit | python | def display_unit(self):
"""
Display unit of value.
:type: ``str``
"""
if self._display_unit:
return self._display_unit
elif self._Q:
config = Configuration.display.unit_systems
default_system = Configuration.unit_system
units = config.systems[default_system]
self._display_unit = units.get(self._type, self._unit)
if self._type == "temperature":
from_unit = "deg" + self._unit.upper()
to_unit = "deg" + self._display_unit.upper()
else:
from_unit = self._unit
to_unit = self._display_unit
#print("dv", from_unit, to_unit)
self._q_unit = self._Q("1 " + from_unit)
self._q_display = self._Q("1 " + to_unit)
return self._display_unit | [
"def",
"display_unit",
"(",
"self",
")",
":",
"if",
"self",
".",
"_display_unit",
":",
"return",
"self",
".",
"_display_unit",
"elif",
"self",
".",
"_Q",
":",
"config",
"=",
"Configuration",
".",
"display",
".",
"unit_systems",
"default_system",
"=",
"Configuration",
".",
"unit_system",
"units",
"=",
"config",
".",
"systems",
"[",
"default_system",
"]",
"self",
".",
"_display_unit",
"=",
"units",
".",
"get",
"(",
"self",
".",
"_type",
",",
"self",
".",
"_unit",
")",
"if",
"self",
".",
"_type",
"==",
"\"temperature\"",
":",
"from_unit",
"=",
"\"deg\"",
"+",
"self",
".",
"_unit",
".",
"upper",
"(",
")",
"to_unit",
"=",
"\"deg\"",
"+",
"self",
".",
"_display_unit",
".",
"upper",
"(",
")",
"else",
":",
"from_unit",
"=",
"self",
".",
"_unit",
"to_unit",
"=",
"self",
".",
"_display_unit",
"#print(\"dv\", from_unit, to_unit)",
"self",
".",
"_q_unit",
"=",
"self",
".",
"_Q",
"(",
"\"1 \"",
"+",
"from_unit",
")",
"self",
".",
"_q_display",
"=",
"self",
".",
"_Q",
"(",
"\"1 \"",
"+",
"to_unit",
")",
"return",
"self",
".",
"_display_unit"
] | Display unit of value.
:type: ``str`` | [
"Display",
"unit",
"of",
"value",
"."
] | 3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23 | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/values/__init__.py#L129-L156 |
249,592 | kervi/kervi-core | kervi/values/__init__.py | DateTimeValue.value | def value(self, new_value):
"""
Updates the value.
If the change exceeds the change delta observers and linked values are notified.
"""
datetime_value = None
if new_value:
datetime_value = new_value.strftime("%Y-%M-%dT%H:%M:%SZ")
self._set_value(datetime_value) | python | def value(self, new_value):
"""
Updates the value.
If the change exceeds the change delta observers and linked values are notified.
"""
datetime_value = None
if new_value:
datetime_value = new_value.strftime("%Y-%M-%dT%H:%M:%SZ")
self._set_value(datetime_value) | [
"def",
"value",
"(",
"self",
",",
"new_value",
")",
":",
"datetime_value",
"=",
"None",
"if",
"new_value",
":",
"datetime_value",
"=",
"new_value",
".",
"strftime",
"(",
"\"%Y-%M-%dT%H:%M:%SZ\"",
")",
"self",
".",
"_set_value",
"(",
"datetime_value",
")"
] | Updates the value.
If the change exceeds the change delta observers and linked values are notified. | [
"Updates",
"the",
"value",
".",
"If",
"the",
"change",
"exceeds",
"the",
"change",
"delta",
"observers",
"and",
"linked",
"values",
"are",
"notified",
"."
] | 3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23 | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/values/__init__.py#L401-L409 |
249,593 | kervi/kervi-core | kervi/values/__init__.py | EnumValue.add_option | def add_option(self, value, text, selected=False):
"""
Add option to select
:param value: The value that the option represent.
:param text: The text that should be displayer in dropdown
:param selected: True if the option should be the default value.
"""
option = {"value": value, "text": text, "selected":selected}
self.options += [option]
if selected:
self.selected_options += [option]
self._value.append(value)
if self._persist_value:
self.settings.store_value("selected_options", self.selected_options) | python | def add_option(self, value, text, selected=False):
"""
Add option to select
:param value: The value that the option represent.
:param text: The text that should be displayer in dropdown
:param selected: True if the option should be the default value.
"""
option = {"value": value, "text": text, "selected":selected}
self.options += [option]
if selected:
self.selected_options += [option]
self._value.append(value)
if self._persist_value:
self.settings.store_value("selected_options", self.selected_options) | [
"def",
"add_option",
"(",
"self",
",",
"value",
",",
"text",
",",
"selected",
"=",
"False",
")",
":",
"option",
"=",
"{",
"\"value\"",
":",
"value",
",",
"\"text\"",
":",
"text",
",",
"\"selected\"",
":",
"selected",
"}",
"self",
".",
"options",
"+=",
"[",
"option",
"]",
"if",
"selected",
":",
"self",
".",
"selected_options",
"+=",
"[",
"option",
"]",
"self",
".",
"_value",
".",
"append",
"(",
"value",
")",
"if",
"self",
".",
"_persist_value",
":",
"self",
".",
"settings",
".",
"store_value",
"(",
"\"selected_options\"",
",",
"self",
".",
"selected_options",
")"
] | Add option to select
:param value: The value that the option represent.
:param text: The text that should be displayer in dropdown
:param selected: True if the option should be the default value. | [
"Add",
"option",
"to",
"select"
] | 3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23 | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/values/__init__.py#L503-L519 |
249,594 | StanfordBioinformatics/scgpm_seqresults_dnanexus | scgpm_seqresults_dnanexus/log.py | get_logfile_name | def get_logfile_name(tags):
"""Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
"""
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename | python | def get_logfile_name(tags):
"""Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
"""
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename | [
"def",
"get_logfile_name",
"(",
"tags",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sd",
".",
"LOG_DIR",
")",
":",
"os",
".",
"mkdir",
"(",
"sd",
".",
"LOG_DIR",
")",
"filename",
"=",
"\"log\"",
"for",
"tag",
"in",
"tags",
":",
"filename",
"+=",
"\"_{}\"",
".",
"format",
"(",
"tag",
")",
"filename",
"+=",
"\".txt\"",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sd",
".",
"LOG_DIR",
",",
"filename",
")",
"return",
"filename"
] | Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided. | [
"Formulates",
"a",
"log",
"file",
"name",
"that",
"incorporates",
"the",
"provided",
"tags",
"."
] | 2bdaae5ec5d38a07fec99e0c5379074a591d77b6 | https://github.com/StanfordBioinformatics/scgpm_seqresults_dnanexus/blob/2bdaae5ec5d38a07fec99e0c5379074a591d77b6/scgpm_seqresults_dnanexus/log.py#L14-L30 |
249,595 | Othernet-Project/conz | conz/utils.py | rewrap | def rewrap(s, width=COLS):
""" Join all lines from input string and wrap it at specified width """
s = ' '.join([l.strip() for l in s.strip().split('\n')])
return '\n'.join(textwrap.wrap(s, width)) | python | def rewrap(s, width=COLS):
""" Join all lines from input string and wrap it at specified width """
s = ' '.join([l.strip() for l in s.strip().split('\n')])
return '\n'.join(textwrap.wrap(s, width)) | [
"def",
"rewrap",
"(",
"s",
",",
"width",
"=",
"COLS",
")",
":",
"s",
"=",
"' '",
".",
"join",
"(",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"s",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"s",
",",
"width",
")",
")"
] | Join all lines from input string and wrap it at specified width | [
"Join",
"all",
"lines",
"from",
"input",
"string",
"and",
"wrap",
"it",
"at",
"specified",
"width"
] | 051214fa95a837c21595b03426a2c54c522d07a0 | https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/utils.py#L17-L20 |
249,596 | jut-io/jut-python-tools | jut/commands/configs.py | add_configuration | def add_configuration(options):
"""
interactively add a new configuration
"""
if options.username != None:
username = options.username
else:
username = prompt('Username: ')
if options.password != None:
password = options.password
else:
password = prompt('Password: ', hide_input=not options.show_password)
if options.app_url != None:
app_url = options.app_url
else:
app_url = prompt('App URL (default: https://app.jut.io just hit enter): ')
if app_url.strip() == '':
app_url = 'https://app.jut.io'
section = '%s@%s' % (username, app_url)
if config.exists(section):
raise JutException('Configuration for "%s" already exists' % section)
token_manager = auth.TokenManager(username=username,
password=password,
app_url=app_url)
authorization = authorizations.get_authorization(token_manager,
app_url=app_url)
client_id = authorization['client_id']
client_secret = authorization['client_secret']
deployment_name = default_deployment(app_url,
client_id,
client_secret)
config.add(section, **{
'app_url': app_url,
'deployment_name': deployment_name,
'username': username,
'client_id': client_id,
'client_secret': client_secret
})
if options.default:
config.set_default(name=section)
else:
default_configuration(interactive=False) | python | def add_configuration(options):
"""
interactively add a new configuration
"""
if options.username != None:
username = options.username
else:
username = prompt('Username: ')
if options.password != None:
password = options.password
else:
password = prompt('Password: ', hide_input=not options.show_password)
if options.app_url != None:
app_url = options.app_url
else:
app_url = prompt('App URL (default: https://app.jut.io just hit enter): ')
if app_url.strip() == '':
app_url = 'https://app.jut.io'
section = '%s@%s' % (username, app_url)
if config.exists(section):
raise JutException('Configuration for "%s" already exists' % section)
token_manager = auth.TokenManager(username=username,
password=password,
app_url=app_url)
authorization = authorizations.get_authorization(token_manager,
app_url=app_url)
client_id = authorization['client_id']
client_secret = authorization['client_secret']
deployment_name = default_deployment(app_url,
client_id,
client_secret)
config.add(section, **{
'app_url': app_url,
'deployment_name': deployment_name,
'username': username,
'client_id': client_id,
'client_secret': client_secret
})
if options.default:
config.set_default(name=section)
else:
default_configuration(interactive=False) | [
"def",
"add_configuration",
"(",
"options",
")",
":",
"if",
"options",
".",
"username",
"!=",
"None",
":",
"username",
"=",
"options",
".",
"username",
"else",
":",
"username",
"=",
"prompt",
"(",
"'Username: '",
")",
"if",
"options",
".",
"password",
"!=",
"None",
":",
"password",
"=",
"options",
".",
"password",
"else",
":",
"password",
"=",
"prompt",
"(",
"'Password: '",
",",
"hide_input",
"=",
"not",
"options",
".",
"show_password",
")",
"if",
"options",
".",
"app_url",
"!=",
"None",
":",
"app_url",
"=",
"options",
".",
"app_url",
"else",
":",
"app_url",
"=",
"prompt",
"(",
"'App URL (default: https://app.jut.io just hit enter): '",
")",
"if",
"app_url",
".",
"strip",
"(",
")",
"==",
"''",
":",
"app_url",
"=",
"'https://app.jut.io'",
"section",
"=",
"'%s@%s'",
"%",
"(",
"username",
",",
"app_url",
")",
"if",
"config",
".",
"exists",
"(",
"section",
")",
":",
"raise",
"JutException",
"(",
"'Configuration for \"%s\" already exists'",
"%",
"section",
")",
"token_manager",
"=",
"auth",
".",
"TokenManager",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"app_url",
"=",
"app_url",
")",
"authorization",
"=",
"authorizations",
".",
"get_authorization",
"(",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"client_id",
"=",
"authorization",
"[",
"'client_id'",
"]",
"client_secret",
"=",
"authorization",
"[",
"'client_secret'",
"]",
"deployment_name",
"=",
"default_deployment",
"(",
"app_url",
",",
"client_id",
",",
"client_secret",
")",
"config",
".",
"add",
"(",
"section",
",",
"*",
"*",
"{",
"'app_url'",
":",
"app_url",
",",
"'deployment_name'",
":",
"deployment_name",
",",
"'username'",
":",
"username",
",",
"'client_id'",
":",
"client_id",
",",
"'client_secret'",
":",
"client_secret",
"}",
")",
"if",
"options",
".",
"default",
":",
"config",
".",
"set_default",
"(",
"name",
"=",
"section",
")",
"else",
":",
"default_configuration",
"(",
"interactive",
"=",
"False",
")"
] | interactively add a new configuration | [
"interactively",
"add",
"a",
"new",
"configuration"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/commands/configs.py#L58-L110 |
249,597 | insilicolife/micti | build/lib/MICTI/Kmeans.py | Kmeans.get_initial_centroids | def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids | python | def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids | [
"def",
"get_initial_centroids",
"(",
"self",
")",
":",
"if",
"self",
".",
"seed",
"is",
"not",
"None",
":",
"# useful for obtaining consistent results",
"np",
".",
"random",
".",
"seed",
"(",
"self",
".",
"seed",
")",
"n",
"=",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
"# number of data points",
"# Pick K indices from range [0, N).",
"rand_indices",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"n",
",",
"self",
".",
"k",
")",
"# Keep centroids as dense format, as many entries will be nonzero due to averaging.",
"# As long as at least one document in a cluster contains a word,",
"# it will carry a nonzero weight in the TF-IDF vector of the centroid.",
"centroids",
"=",
"self",
".",
"data",
"[",
"rand_indices",
",",
":",
"]",
".",
"toarray",
"(",
")",
"self",
".",
"centroids",
"=",
"centroids",
"return",
"centroids"
] | Randomly choose k data points as initial centroids | [
"Randomly",
"choose",
"k",
"data",
"points",
"as",
"initial",
"centroids"
] | f12f46724295b57c4859e6acf7eab580fc355eb1 | https://github.com/insilicolife/micti/blob/f12f46724295b57c4859e6acf7eab580fc355eb1/build/lib/MICTI/Kmeans.py#L34-L48 |
249,598 | insilicolife/micti | build/lib/MICTI/Kmeans.py | Kmeans.smart_initialize | def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids | python | def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids | [
"def",
"smart_initialize",
"(",
"self",
")",
":",
"if",
"self",
".",
"seed",
"is",
"not",
"None",
":",
"# useful for obtaining consistent results",
"np",
".",
"random",
".",
"seed",
"(",
"self",
".",
"seed",
")",
"centroids",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"k",
",",
"self",
".",
"data",
".",
"shape",
"[",
"1",
"]",
")",
")",
"# Randomly choose the first centroid.",
"# Since we have no prior knowledge, choose uniformly at random",
"idx",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"centroids",
"[",
"0",
"]",
"=",
"self",
".",
"data",
"[",
"idx",
",",
":",
"]",
".",
"toarray",
"(",
")",
"# Compute distances from the first centroid chosen to all the other data points",
"squared_distances",
"=",
"pairwise_distances",
"(",
"self",
".",
"data",
",",
"centroids",
"[",
"0",
":",
"1",
"]",
",",
"metric",
"=",
"'euclidean'",
")",
".",
"flatten",
"(",
")",
"**",
"2",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"self",
".",
"k",
")",
":",
"# Choose the next centroid randomly, so that the probability for each data point to be chosen",
"# is directly proportional to its squared distance from the nearest centroid.",
"# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.",
"idx",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"self",
".",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"1",
",",
"p",
"=",
"squared_distances",
"/",
"sum",
"(",
"squared_distances",
")",
")",
"centroids",
"[",
"i",
"]",
"=",
"self",
".",
"data",
"[",
"idx",
",",
":",
"]",
".",
"toarray",
"(",
")",
"# Now compute distances from the centroids to all data points",
"squared_distances",
"=",
"np",
".",
"min",
"(",
"pairwise_distances",
"(",
"self",
".",
"data",
",",
"centroids",
"[",
"0",
":",
"i",
"+",
"1",
"]",
",",
"metric",
"=",
"'euclidean'",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"self",
".",
"centroids",
"=",
"centroids",
"return",
"centroids"
] | Use k-means++ to initialize a good set of centroids | [
"Use",
"k",
"-",
"means",
"++",
"to",
"initialize",
"a",
"good",
"set",
"of",
"centroids"
] | f12f46724295b57c4859e6acf7eab580fc355eb1 | https://github.com/insilicolife/micti/blob/f12f46724295b57c4859e6acf7eab580fc355eb1/build/lib/MICTI/Kmeans.py#L50-L70 |
249,599 | etcher-be/elib_run | elib_run/_run/_capture_output.py | filter_line | def filter_line(line: str, context: RunContext) -> typing.Optional[str]:
"""
Filters out lines that match a given regex
:param line: line to filter
:type line: str
:param context: run context
:type context: _RunContext
:return: line if it doesn't match the filter
:rtype: optional str
"""
if context.filters is not None:
for filter_ in context.filters:
if re.match(filter_, line):
return None
return line | python | def filter_line(line: str, context: RunContext) -> typing.Optional[str]:
"""
Filters out lines that match a given regex
:param line: line to filter
:type line: str
:param context: run context
:type context: _RunContext
:return: line if it doesn't match the filter
:rtype: optional str
"""
if context.filters is not None:
for filter_ in context.filters:
if re.match(filter_, line):
return None
return line | [
"def",
"filter_line",
"(",
"line",
":",
"str",
",",
"context",
":",
"RunContext",
")",
"->",
"typing",
".",
"Optional",
"[",
"str",
"]",
":",
"if",
"context",
".",
"filters",
"is",
"not",
"None",
":",
"for",
"filter_",
"in",
"context",
".",
"filters",
":",
"if",
"re",
".",
"match",
"(",
"filter_",
",",
"line",
")",
":",
"return",
"None",
"return",
"line"
] | Filters out lines that match a given regex
:param line: line to filter
:type line: str
:param context: run context
:type context: _RunContext
:return: line if it doesn't match the filter
:rtype: optional str | [
"Filters",
"out",
"lines",
"that",
"match",
"a",
"given",
"regex"
] | c9d8ba9f067ab90c5baa27375a92b23f1b97cdde | https://github.com/etcher-be/elib_run/blob/c9d8ba9f067ab90c5baa27375a92b23f1b97cdde/elib_run/_run/_capture_output.py#L15-L30 |
Subsets and Splits