id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
1,900 | torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableProcessor._build_row | def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a) | python | def _build_row(self, row, parent, align, border):
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a) | [
"def",
"_build_row",
"(",
"self",
",",
"row",
",",
"parent",
",",
"align",
",",
"border",
")",
":",
"tr",
"=",
"etree",
".",
"SubElement",
"(",
"parent",
",",
"'tr'",
")",
"tag",
"=",
"'td'",
"if",
"parent",
".",
"tag",
"==",
"'thead'",
":",
"tag",
"=",
"'th'",
"cells",
"=",
"self",
".",
"_split_row",
"(",
"row",
",",
"border",
")",
"# We use align here rather than cells to ensure every row\r",
"# contains the same number of columns.\r",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"align",
")",
":",
"c",
"=",
"etree",
".",
"SubElement",
"(",
"tr",
",",
"tag",
")",
"try",
":",
"c",
".",
"text",
"=",
"cells",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"except",
"IndexError",
":",
"c",
".",
"text",
"=",
"\"\"",
"if",
"a",
":",
"c",
".",
"set",
"(",
"'align'",
",",
"a",
")"
] | Given a row of text, build table cells. | [
"Given",
"a",
"row",
"of",
"text",
"build",
"table",
"cells",
"."
] | 6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6 | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L63-L79 |
1,901 | torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableProcessor._split_row | def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | python | def _split_row(self, row, border):
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|') | [
"def",
"_split_row",
"(",
"self",
",",
"row",
",",
"border",
")",
":",
"if",
"border",
":",
"if",
"row",
".",
"startswith",
"(",
"'|'",
")",
":",
"row",
"=",
"row",
"[",
"1",
":",
"]",
"if",
"row",
".",
"endswith",
"(",
"'|'",
")",
":",
"row",
"=",
"row",
"[",
":",
"-",
"1",
"]",
"return",
"row",
".",
"split",
"(",
"'|'",
")"
] | split a row of text into list of cells. | [
"split",
"a",
"row",
"of",
"text",
"into",
"list",
"of",
"cells",
"."
] | 6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6 | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L81-L88 |
1,902 | torchbox/wagtail-markdown | wagtailmarkdown/mdx/tables/__init__.py | TableExtension.extendMarkdown | def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader') | python | def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader') | [
"def",
"extendMarkdown",
"(",
"self",
",",
"md",
",",
"md_globals",
")",
":",
"md",
".",
"parser",
".",
"blockprocessors",
".",
"add",
"(",
"'table'",
",",
"TableProcessor",
"(",
"md",
".",
"parser",
")",
",",
"'<hashheader'",
")"
] | Add an instance of TableProcessor to BlockParser. | [
"Add",
"an",
"instance",
"of",
"TableProcessor",
"to",
"BlockParser",
"."
] | 6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6 | https://github.com/torchbox/wagtail-markdown/blob/6e1c4457049b68e8bc7eb5a3b19830bff58dc6a6/wagtailmarkdown/mdx/tables/__init__.py#L94-L98 |
1,903 | kottenator/django-compressor-toolkit | compressor_toolkit/precompilers.py | get_all_static | def get_all_static():
"""
Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only)
"""
static_dirs = set()
for finder in settings.STATICFILES_FINDERS:
finder = finders.get_finder(finder)
if hasattr(finder, 'storages'):
for storage in finder.storages.values():
static_dirs.add(storage.location)
if hasattr(finder, 'storage'):
static_dirs.add(finder.storage.location)
return static_dirs | python | def get_all_static():
static_dirs = set()
for finder in settings.STATICFILES_FINDERS:
finder = finders.get_finder(finder)
if hasattr(finder, 'storages'):
for storage in finder.storages.values():
static_dirs.add(storage.location)
if hasattr(finder, 'storage'):
static_dirs.add(finder.storage.location)
return static_dirs | [
"def",
"get_all_static",
"(",
")",
":",
"static_dirs",
"=",
"set",
"(",
")",
"for",
"finder",
"in",
"settings",
".",
"STATICFILES_FINDERS",
":",
"finder",
"=",
"finders",
".",
"get_finder",
"(",
"finder",
")",
"if",
"hasattr",
"(",
"finder",
",",
"'storages'",
")",
":",
"for",
"storage",
"in",
"finder",
".",
"storages",
".",
"values",
"(",
")",
":",
"static_dirs",
".",
"add",
"(",
"storage",
".",
"location",
")",
"if",
"hasattr",
"(",
"finder",
",",
"'storage'",
")",
":",
"static_dirs",
".",
"add",
"(",
"finder",
".",
"storage",
".",
"location",
")",
"return",
"static_dirs"
] | Get all the static files directories found by ``STATICFILES_FINDERS``
:return: set of paths (top-level folders only) | [
"Get",
"all",
"the",
"static",
"files",
"directories",
"found",
"by",
"STATICFILES_FINDERS"
] | e7bfdaa354e9c9189db0e4ba4fa049045adad91b | https://github.com/kottenator/django-compressor-toolkit/blob/e7bfdaa354e9c9189db0e4ba4fa049045adad91b/compressor_toolkit/precompilers.py#L13-L31 |
1,904 | kottenator/django-compressor-toolkit | compressor_toolkit/precompilers.py | BaseCompiler.input | def input(self, **kwargs):
"""
Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469
"""
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | python | def input(self, **kwargs):
if self.infile is None and "{infile}" in self.command:
if self.filename is None:
self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext)
self.infile.write(self.content.encode(self.default_encoding))
self.infile.flush()
self.options += (
('infile', self.infile.name),
)
return super(BaseCompiler, self).input(**kwargs) | [
"def",
"input",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"infile",
"is",
"None",
"and",
"\"{infile}\"",
"in",
"self",
".",
"command",
":",
"if",
"self",
".",
"filename",
"is",
"None",
":",
"self",
".",
"infile",
"=",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'wb'",
",",
"suffix",
"=",
"self",
".",
"infile_ext",
")",
"self",
".",
"infile",
".",
"write",
"(",
"self",
".",
"content",
".",
"encode",
"(",
"self",
".",
"default_encoding",
")",
")",
"self",
".",
"infile",
".",
"flush",
"(",
")",
"self",
".",
"options",
"+=",
"(",
"(",
"'infile'",
",",
"self",
".",
"infile",
".",
"name",
")",
",",
")",
"return",
"super",
"(",
"BaseCompiler",
",",
"self",
")",
".",
"input",
"(",
"*",
"*",
"kwargs",
")"
] | Specify temporary input file extension.
Browserify requires explicit file extension (".js" or ".json" by default).
https://github.com/substack/node-browserify/issues/1469 | [
"Specify",
"temporary",
"input",
"file",
"extension",
"."
] | e7bfdaa354e9c9189db0e4ba4fa049045adad91b | https://github.com/kottenator/django-compressor-toolkit/blob/e7bfdaa354e9c9189db0e4ba4fa049045adad91b/compressor_toolkit/precompilers.py#L38-L53 |
1,905 | CodyKochmann/graphdb | graphdb/RamGraphDB.py | graph_hash | def graph_hash(obj):
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick'''
obj_type = type(obj)
try:
# this works for hashables
return hash((obj_type, obj))
except:
# this works for object containers since graphdb
# wants to identify different containers
# instead of the sum of their current internals
return hash((obj_type, id(obj))) | python | def graph_hash(obj):
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick'''
obj_type = type(obj)
try:
# this works for hashables
return hash((obj_type, obj))
except:
# this works for object containers since graphdb
# wants to identify different containers
# instead of the sum of their current internals
return hash((obj_type, id(obj))) | [
"def",
"graph_hash",
"(",
"obj",
")",
":",
"obj_type",
"=",
"type",
"(",
"obj",
")",
"try",
":",
"# this works for hashables",
"return",
"hash",
"(",
"(",
"obj_type",
",",
"obj",
")",
")",
"except",
":",
"# this works for object containers since graphdb",
"# wants to identify different containers",
"# instead of the sum of their current internals",
"return",
"hash",
"(",
"(",
"obj_type",
",",
"id",
"(",
"obj",
")",
")",
")"
] | this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick | [
"this",
"hashes",
"all",
"types",
"to",
"a",
"hash",
"without",
"colissions",
".",
"python",
"s",
"hashing",
"algorithms",
"are",
"not",
"cross",
"type",
"compatable",
"but",
"hashing",
"tuples",
"with",
"the",
"type",
"as",
"the",
"first",
"element",
"seems",
"to",
"do",
"the",
"trick"
] | 8c18830db4beda30204f5fd4450bc96eb39b0feb | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L13-L23 |
1,906 | CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList.where | def where(self, relation, filter_fn):
''' use this to filter VLists, simply provide a filter function and what relation to apply it to '''
assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation])) | python | def where(self, relation, filter_fn):
''' use this to filter VLists, simply provide a filter function and what relation to apply it to '''
assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string'
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation])) | [
"def",
"where",
"(",
"self",
",",
"relation",
",",
"filter_fn",
")",
":",
"assert",
"type",
"(",
"relation",
")",
".",
"__name__",
"in",
"{",
"'str'",
",",
"'unicode'",
"}",
",",
"'where needs the first arg to be a string'",
"assert",
"callable",
"(",
"filter_fn",
")",
",",
"'filter_fn needs to be callable'",
"return",
"VList",
"(",
"i",
"for",
"i",
"in",
"self",
"if",
"relation",
"in",
"i",
".",
"_relations",
"(",
")",
"and",
"any",
"(",
"filter_fn",
"(",
"_",
"(",
")",
")",
"for",
"_",
"in",
"i",
"[",
"relation",
"]",
")",
")"
] | use this to filter VLists, simply provide a filter function and what relation to apply it to | [
"use",
"this",
"to",
"filter",
"VLists",
"simply",
"provide",
"a",
"filter",
"function",
"and",
"what",
"relation",
"to",
"apply",
"it",
"to"
] | 8c18830db4beda30204f5fd4450bc96eb39b0feb | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L349-L353 |
1,907 | CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList._where | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | python | def _where(self, filter_fn):
''' use this to filter VLists, simply provide a filter function to filter the current found objects '''
assert callable(filter_fn), 'filter_fn needs to be callable'
return VList(i for i in self if filter_fn(i())) | [
"def",
"_where",
"(",
"self",
",",
"filter_fn",
")",
":",
"assert",
"callable",
"(",
"filter_fn",
")",
",",
"'filter_fn needs to be callable'",
"return",
"VList",
"(",
"i",
"for",
"i",
"in",
"self",
"if",
"filter_fn",
"(",
"i",
"(",
")",
")",
")"
] | use this to filter VLists, simply provide a filter function to filter the current found objects | [
"use",
"this",
"to",
"filter",
"VLists",
"simply",
"provide",
"a",
"filter",
"function",
"to",
"filter",
"the",
"current",
"found",
"objects"
] | 8c18830db4beda30204f5fd4450bc96eb39b0feb | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L355-L358 |
1,908 | CodyKochmann/graphdb | graphdb/RamGraphDB.py | VList._where | def _where(self, **kwargs):
'''use this to filter VLists with kv pairs'''
out = self
for k,v in kwargs.items():
out = out.where(k, lambda i:i==v)
return out | python | def _where(self, **kwargs):
'''use this to filter VLists with kv pairs'''
out = self
for k,v in kwargs.items():
out = out.where(k, lambda i:i==v)
return out | [
"def",
"_where",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"self",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"out",
"=",
"out",
".",
"where",
"(",
"k",
",",
"lambda",
"i",
":",
"i",
"==",
"v",
")",
"return",
"out"
] | use this to filter VLists with kv pairs | [
"use",
"this",
"to",
"filter",
"VLists",
"with",
"kv",
"pairs"
] | 8c18830db4beda30204f5fd4450bc96eb39b0feb | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/RamGraphDB.py#L362-L367 |
1,909 | CodyKochmann/graphdb | graphdb/SQLiteGraphDB.py | SQLiteGraphDB.find | def find(self, target, relation):
''' returns back all elements the target has a relation to '''
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/
for i in self._execute(query, (relation, self.serialize(target))):
yield self.deserialize(i[0]) | python | def find(self, target, relation):
''' returns back all elements the target has a relation to '''
query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/
for i in self._execute(query, (relation, self.serialize(target))):
yield self.deserialize(i[0]) | [
"def",
"find",
"(",
"self",
",",
"target",
",",
"relation",
")",
":",
"query",
"=",
"'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?'",
"# src is id not source :/",
"for",
"i",
"in",
"self",
".",
"_execute",
"(",
"query",
",",
"(",
"relation",
",",
"self",
".",
"serialize",
"(",
"target",
")",
")",
")",
":",
"yield",
"self",
".",
"deserialize",
"(",
"i",
"[",
"0",
"]",
")"
] | returns back all elements the target has a relation to | [
"returns",
"back",
"all",
"elements",
"the",
"target",
"has",
"a",
"relation",
"to"
] | 8c18830db4beda30204f5fd4450bc96eb39b0feb | https://github.com/CodyKochmann/graphdb/blob/8c18830db4beda30204f5fd4450bc96eb39b0feb/graphdb/SQLiteGraphDB.py#L309-L313 |
1,910 | StorjOld/pyp2p | pyp2p/sock.py | Sock.parse_buf | def parse_buf(self, encoding="unicode"):
"""
Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter.
"""
buf_len = len(self.buf)
replies = []
reply = b""
chop = 0
skip = 0
i = 0
buf_len = len(self.buf)
for i in range(0, buf_len):
ch = self.buf[i:i + 1]
if skip:
skip -= 1
i += 1
continue
nxt = i + 1
if nxt < buf_len:
if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n":
# Append new reply.
if reply != b"":
if encoding == "unicode":
replies.append(encode_str(reply, encoding))
else:
replies.append(reply)
reply = b""
# Truncate the whole buf if chop is out of bounds.
chop = nxt + 1
skip = 1
i += 1
continue
reply += ch
i += 1
# Truncate buf.
if chop:
self.buf = self.buf[chop:]
return replies | python | def parse_buf(self, encoding="unicode"):
buf_len = len(self.buf)
replies = []
reply = b""
chop = 0
skip = 0
i = 0
buf_len = len(self.buf)
for i in range(0, buf_len):
ch = self.buf[i:i + 1]
if skip:
skip -= 1
i += 1
continue
nxt = i + 1
if nxt < buf_len:
if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n":
# Append new reply.
if reply != b"":
if encoding == "unicode":
replies.append(encode_str(reply, encoding))
else:
replies.append(reply)
reply = b""
# Truncate the whole buf if chop is out of bounds.
chop = nxt + 1
skip = 1
i += 1
continue
reply += ch
i += 1
# Truncate buf.
if chop:
self.buf = self.buf[chop:]
return replies | [
"def",
"parse_buf",
"(",
"self",
",",
"encoding",
"=",
"\"unicode\"",
")",
":",
"buf_len",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"replies",
"=",
"[",
"]",
"reply",
"=",
"b\"\"",
"chop",
"=",
"0",
"skip",
"=",
"0",
"i",
"=",
"0",
"buf_len",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"buf_len",
")",
":",
"ch",
"=",
"self",
".",
"buf",
"[",
"i",
":",
"i",
"+",
"1",
"]",
"if",
"skip",
":",
"skip",
"-=",
"1",
"i",
"+=",
"1",
"continue",
"nxt",
"=",
"i",
"+",
"1",
"if",
"nxt",
"<",
"buf_len",
":",
"if",
"ch",
"==",
"b\"\\r\"",
"and",
"self",
".",
"buf",
"[",
"nxt",
":",
"nxt",
"+",
"1",
"]",
"==",
"b\"\\n\"",
":",
"# Append new reply.",
"if",
"reply",
"!=",
"b\"\"",
":",
"if",
"encoding",
"==",
"\"unicode\"",
":",
"replies",
".",
"append",
"(",
"encode_str",
"(",
"reply",
",",
"encoding",
")",
")",
"else",
":",
"replies",
".",
"append",
"(",
"reply",
")",
"reply",
"=",
"b\"\"",
"# Truncate the whole buf if chop is out of bounds.",
"chop",
"=",
"nxt",
"+",
"1",
"skip",
"=",
"1",
"i",
"+=",
"1",
"continue",
"reply",
"+=",
"ch",
"i",
"+=",
"1",
"# Truncate buf.",
"if",
"chop",
":",
"self",
".",
"buf",
"=",
"self",
".",
"buf",
"[",
"chop",
":",
"]",
"return",
"replies"
] | Since TCP is a stream-orientated protocol, responses aren't guaranteed
to be complete when they arrive. The buffer stores all the data and
this function splits the data into replies based on the new line
delimiter. | [
"Since",
"TCP",
"is",
"a",
"stream",
"-",
"orientated",
"protocol",
"responses",
"aren",
"t",
"guaranteed",
"to",
"be",
"complete",
"when",
"they",
"arrive",
".",
"The",
"buffer",
"stores",
"all",
"the",
"data",
"and",
"this",
"function",
"splits",
"the",
"data",
"into",
"replies",
"based",
"on",
"the",
"new",
"line",
"delimiter",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sock.py#L221-L267 |
1,911 | StorjOld/pyp2p | pyp2p/sock.py | Sock.get_chunks | def get_chunks(self, fixed_limit=None, encoding="unicode"):
"""
This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r
http://stackoverflow.com/questions/3187565/select-and-ssl-in-python
"""
# Socket is disconnected.
if not self.connected:
return
# Recv chunks until network buffer is empty.
repeat = 1
wait = 0.2
chunk_no = 0
max_buf = self.max_buf
max_chunks = self.max_chunks
if fixed_limit is not None:
max_buf = fixed_limit
max_chunks = fixed_limit
while repeat:
chunk_size = self.chunk_size
while True:
# Don't exceed buffer size.
buf_len = len(self.buf)
if buf_len >= max_buf:
break
remaining = max_buf - buf_len
if remaining < chunk_size:
chunk_size = remaining
# Don't allow non-blocking sockets to be
# DoSed by multiple small replies.
if chunk_no >= max_chunks and not self.blocking:
break
try:
chunk = self.s.recv(chunk_size)
except socket.timeout as e:
self.debug_print("Get chunks timed out.")
self.debug_print(e)
# Timeout on blocking sockets.
err = e.args[0]
self.debug_print(err)
if err == "timed out":
repeat = 0
break
except ssl.SSLError as e:
# Will block on non-blocking SSL sockets.
if e.errno == ssl.SSL_ERROR_WANT_READ:
self.debug_print("SSL_ERROR_WANT_READ")
break
else:
self.debug_print("Get chunks ssl error")
self.close()
return
except socket.error as e:
# Will block on nonblocking non-SSL sockets.
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
break
else:
# Connection closed or other problem.
self.debug_print("get chunks other closing")
self.close()
return
else:
if chunk == b"":
self.close()
return
# Avoid decoding errors.
self.buf += chunk
# Otherwise the loop will be endless.
if self.blocking:
break
# Used to avoid DoS of small packets.
chunk_no += 1
# Repeat is already set -- manual skip.
if not repeat:
break
else:
repeat = 0
# Block until there's a full reply or there's a timeout.
if self.blocking:
if fixed_limit is None:
# Partial response.
if self.delimiter not in self.buf:
repeat = 1
time.sleep(wait) | python | def get_chunks(self, fixed_limit=None, encoding="unicode"):
# Socket is disconnected.
if not self.connected:
return
# Recv chunks until network buffer is empty.
repeat = 1
wait = 0.2
chunk_no = 0
max_buf = self.max_buf
max_chunks = self.max_chunks
if fixed_limit is not None:
max_buf = fixed_limit
max_chunks = fixed_limit
while repeat:
chunk_size = self.chunk_size
while True:
# Don't exceed buffer size.
buf_len = len(self.buf)
if buf_len >= max_buf:
break
remaining = max_buf - buf_len
if remaining < chunk_size:
chunk_size = remaining
# Don't allow non-blocking sockets to be
# DoSed by multiple small replies.
if chunk_no >= max_chunks and not self.blocking:
break
try:
chunk = self.s.recv(chunk_size)
except socket.timeout as e:
self.debug_print("Get chunks timed out.")
self.debug_print(e)
# Timeout on blocking sockets.
err = e.args[0]
self.debug_print(err)
if err == "timed out":
repeat = 0
break
except ssl.SSLError as e:
# Will block on non-blocking SSL sockets.
if e.errno == ssl.SSL_ERROR_WANT_READ:
self.debug_print("SSL_ERROR_WANT_READ")
break
else:
self.debug_print("Get chunks ssl error")
self.close()
return
except socket.error as e:
# Will block on nonblocking non-SSL sockets.
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
break
else:
# Connection closed or other problem.
self.debug_print("get chunks other closing")
self.close()
return
else:
if chunk == b"":
self.close()
return
# Avoid decoding errors.
self.buf += chunk
# Otherwise the loop will be endless.
if self.blocking:
break
# Used to avoid DoS of small packets.
chunk_no += 1
# Repeat is already set -- manual skip.
if not repeat:
break
else:
repeat = 0
# Block until there's a full reply or there's a timeout.
if self.blocking:
if fixed_limit is None:
# Partial response.
if self.delimiter not in self.buf:
repeat = 1
time.sleep(wait) | [
"def",
"get_chunks",
"(",
"self",
",",
"fixed_limit",
"=",
"None",
",",
"encoding",
"=",
"\"unicode\"",
")",
":",
"# Socket is disconnected.",
"if",
"not",
"self",
".",
"connected",
":",
"return",
"# Recv chunks until network buffer is empty.",
"repeat",
"=",
"1",
"wait",
"=",
"0.2",
"chunk_no",
"=",
"0",
"max_buf",
"=",
"self",
".",
"max_buf",
"max_chunks",
"=",
"self",
".",
"max_chunks",
"if",
"fixed_limit",
"is",
"not",
"None",
":",
"max_buf",
"=",
"fixed_limit",
"max_chunks",
"=",
"fixed_limit",
"while",
"repeat",
":",
"chunk_size",
"=",
"self",
".",
"chunk_size",
"while",
"True",
":",
"# Don't exceed buffer size.",
"buf_len",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"if",
"buf_len",
">=",
"max_buf",
":",
"break",
"remaining",
"=",
"max_buf",
"-",
"buf_len",
"if",
"remaining",
"<",
"chunk_size",
":",
"chunk_size",
"=",
"remaining",
"# Don't allow non-blocking sockets to be",
"# DoSed by multiple small replies.",
"if",
"chunk_no",
">=",
"max_chunks",
"and",
"not",
"self",
".",
"blocking",
":",
"break",
"try",
":",
"chunk",
"=",
"self",
".",
"s",
".",
"recv",
"(",
"chunk_size",
")",
"except",
"socket",
".",
"timeout",
"as",
"e",
":",
"self",
".",
"debug_print",
"(",
"\"Get chunks timed out.\"",
")",
"self",
".",
"debug_print",
"(",
"e",
")",
"# Timeout on blocking sockets.",
"err",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"self",
".",
"debug_print",
"(",
"err",
")",
"if",
"err",
"==",
"\"timed out\"",
":",
"repeat",
"=",
"0",
"break",
"except",
"ssl",
".",
"SSLError",
"as",
"e",
":",
"# Will block on non-blocking SSL sockets.",
"if",
"e",
".",
"errno",
"==",
"ssl",
".",
"SSL_ERROR_WANT_READ",
":",
"self",
".",
"debug_print",
"(",
"\"SSL_ERROR_WANT_READ\"",
")",
"break",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Get chunks ssl error\"",
")",
"self",
".",
"close",
"(",
")",
"return",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"# Will block on nonblocking non-SSL sockets.",
"err",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"if",
"err",
"==",
"errno",
".",
"EAGAIN",
"or",
"err",
"==",
"errno",
".",
"EWOULDBLOCK",
":",
"break",
"else",
":",
"# Connection closed or other problem.",
"self",
".",
"debug_print",
"(",
"\"get chunks other closing\"",
")",
"self",
".",
"close",
"(",
")",
"return",
"else",
":",
"if",
"chunk",
"==",
"b\"\"",
":",
"self",
".",
"close",
"(",
")",
"return",
"# Avoid decoding errors.",
"self",
".",
"buf",
"+=",
"chunk",
"# Otherwise the loop will be endless.",
"if",
"self",
".",
"blocking",
":",
"break",
"# Used to avoid DoS of small packets.",
"chunk_no",
"+=",
"1",
"# Repeat is already set -- manual skip.",
"if",
"not",
"repeat",
":",
"break",
"else",
":",
"repeat",
"=",
"0",
"# Block until there's a full reply or there's a timeout.",
"if",
"self",
".",
"blocking",
":",
"if",
"fixed_limit",
"is",
"None",
":",
"# Partial response.",
"if",
"self",
".",
"delimiter",
"not",
"in",
"self",
".",
"buf",
":",
"repeat",
"=",
"1",
"time",
".",
"sleep",
"(",
"wait",
")"
] | This is the function which handles retrieving new data chunks. It's
main logic is avoiding a recv call blocking forever and halting
the program flow. To do this, it manages errors and keeps an eye
on the buffer to avoid overflows and DoS attacks.
http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r
http://stackoverflow.com/questions/3187565/select-and-ssl-in-python | [
"This",
"is",
"the",
"function",
"which",
"handles",
"retrieving",
"new",
"data",
"chunks",
".",
"It",
"s",
"main",
"logic",
"is",
"avoiding",
"a",
"recv",
"call",
"blocking",
"forever",
"and",
"halting",
"the",
"program",
"flow",
".",
"To",
"do",
"this",
"it",
"manages",
"errors",
"and",
"keeps",
"an",
"eye",
"on",
"the",
"buffer",
"to",
"avoid",
"overflows",
"and",
"DoS",
"attacks",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sock.py#L270-L369 |
1,912 | StorjOld/pyp2p | pyp2p/net.py | Net.validate_node | def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
"""
Don't accept connections from self to passive server
or connections to already connected nodes.
"""
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1 | python | def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1 | [
"def",
"validate_node",
"(",
"self",
",",
"node_ip",
",",
"node_port",
"=",
"None",
",",
"same_nodes",
"=",
"1",
")",
":",
"self",
".",
"debug_print",
"(",
"\"Validating: \"",
"+",
"node_ip",
")",
"# Is this a valid IP?\r",
"if",
"not",
"is_ip_valid",
"(",
"node_ip",
")",
"or",
"node_ip",
"==",
"\"0.0.0.0\"",
":",
"self",
".",
"debug_print",
"(",
"\"Invalid node ip in validate node\"",
")",
"return",
"0",
"# Is this a valid port?\r",
"if",
"node_port",
"!=",
"0",
"and",
"node_port",
"is",
"not",
"None",
":",
"if",
"not",
"is_valid_port",
"(",
"node_port",
")",
":",
"self",
".",
"debug_print",
"(",
"\"Invalid node port in validate port\"",
")",
"return",
"0",
"if",
"not",
"self",
".",
"enable_duplicate_ip_cons",
":",
"# Don't connect to ourself.\r",
"if",
"(",
"node_ip",
"==",
"\"127.0.0.1\"",
"or",
"node_ip",
"==",
"get_lan_ip",
"(",
"self",
".",
"interface",
")",
"or",
"node_ip",
"==",
"self",
".",
"wan_ip",
")",
":",
"self",
".",
"debug_print",
"(",
"\"Cannot connect to ourself.\"",
")",
"return",
"0",
"# No, really: don't connect to ourself.\r",
"if",
"node_ip",
"==",
"self",
".",
"passive_bind",
"and",
"node_port",
"==",
"self",
".",
"passive_port",
":",
"self",
".",
"debug_print",
"(",
"\"Error connecting to same listen server.\"",
")",
"return",
"0",
"# Don't connect to same nodes.\r",
"if",
"same_nodes",
":",
"for",
"node",
"in",
"self",
".",
"outbound",
"+",
"self",
".",
"inbound",
":",
"try",
":",
"addr",
",",
"port",
"=",
"node",
"[",
"\"con\"",
"]",
".",
"s",
".",
"getpeername",
"(",
")",
"if",
"node_ip",
"==",
"addr",
":",
"self",
".",
"debug_print",
"(",
"\"Already connected to this node.\"",
")",
"return",
"0",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"0",
"return",
"1"
] | Don't accept connections from self to passive server
or connections to already connected nodes. | [
"Don",
"t",
"accept",
"connections",
"from",
"self",
"to",
"passive",
"server",
"or",
"connections",
"to",
"already",
"connected",
"nodes",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L330-L373 |
1,913 | StorjOld/pyp2p | pyp2p/net.py | Net.bootstrap | def bootstrap(self):
"""
When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots.
"""
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | python | def bootstrap(self):
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | [
"def",
"bootstrap",
"(",
"self",
")",
":",
"# Disable bootstrap.\r",
"if",
"not",
"self",
".",
"enable_bootstrap",
":",
"return",
"None",
"# Avoid raping the rendezvous server.\r",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"last_bootstrap",
"is",
"not",
"None",
":",
"if",
"t",
"-",
"self",
".",
"last_bootstrap",
"<=",
"rendezvous_interval",
":",
"self",
".",
"debug_print",
"(",
"\"Bootstrapped recently\"",
")",
"return",
"None",
"self",
".",
"last_bootstrap",
"=",
"t",
"self",
".",
"debug_print",
"(",
"\"Searching for nodes to connect to.\"",
")",
"try",
":",
"connection_slots",
"=",
"self",
".",
"max_outbound",
"-",
"(",
"len",
"(",
"self",
".",
"outbound",
")",
")",
"if",
"connection_slots",
">",
"0",
":",
"# Connect to rendezvous server.\r",
"rendezvous_con",
"=",
"self",
".",
"rendezvous",
".",
"server_connect",
"(",
")",
"# Retrieve random nodes to bootstrap with.\r",
"rendezvous_con",
".",
"send_line",
"(",
"\"BOOTSTRAP \"",
"+",
"str",
"(",
"self",
".",
"max_outbound",
"*",
"2",
")",
")",
"choices",
"=",
"rendezvous_con",
".",
"recv_line",
"(",
"timeout",
"=",
"2",
")",
"if",
"choices",
"==",
"\"NODES EMPTY\"",
":",
"rendezvous_con",
".",
"close",
"(",
")",
"self",
".",
"debug_print",
"(",
"\"Node list is empty.\"",
")",
"return",
"self",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Found node list.\"",
")",
"# Parse node list.\r",
"choices",
"=",
"re",
".",
"findall",
"(",
"\"(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\\s?\"",
",",
"choices",
")",
"rendezvous_con",
".",
"s",
".",
"close",
"(",
")",
"# Attempt to make active simultaneous connections.\r",
"passive_nodes",
"=",
"[",
"]",
"for",
"node",
"in",
"choices",
":",
"# Out of connection slots.\r",
"if",
"not",
"connection_slots",
":",
"break",
"# Add to list of passive nodes.\r",
"node_type",
",",
"node_ip",
",",
"node_port",
"=",
"node",
"self",
".",
"debug_print",
"(",
"str",
"(",
"node",
")",
")",
"if",
"node_type",
"==",
"\"p\"",
":",
"passive_nodes",
".",
"append",
"(",
"node",
")",
"# Use passive to make up the remaining cons.\r",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"passive_nodes",
")",
"and",
"connection_slots",
">",
"0",
":",
"node_type",
",",
"node_ip",
",",
"node_port",
"=",
"passive_nodes",
"[",
"i",
"]",
"con",
"=",
"self",
".",
"add_node",
"(",
"node_ip",
",",
"node_port",
",",
"\"passive\"",
")",
"if",
"con",
"is",
"not",
"None",
":",
"connection_slots",
"-=",
"1",
"self",
".",
"debug_print",
"(",
"\"Con successful.\"",
")",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Con failed.\"",
")",
"i",
"+=",
"1",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"debug_print",
"(",
"\"Unknown error in bootstrap()\"",
")",
"error",
"=",
"parse_exception",
"(",
"e",
")",
"log_exception",
"(",
"self",
".",
"error_log_path",
",",
"error",
")",
"return",
"self"
] | When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots. | [
"When",
"the",
"software",
"is",
"first",
"started",
"it",
"needs",
"to",
"retrieve",
"a",
"list",
"of",
"nodes",
"to",
"connect",
"to",
"the",
"network",
"to",
".",
"This",
"function",
"asks",
"the",
"server",
"for",
"N",
"nodes",
"which",
"consists",
"of",
"at",
"least",
"N",
"passive",
"nodes",
"and",
"N",
"simultaneous",
"nodes",
".",
"The",
"simultaneous",
"nodes",
"are",
"prioritized",
"if",
"the",
"node_type",
"for",
"the",
"machine",
"running",
"this",
"software",
"is",
"simultaneous",
"with",
"passive",
"nodes",
"being",
"used",
"as",
"a",
"fallback",
".",
"Otherwise",
"the",
"node",
"exclusively",
"uses",
"passive",
"nodes",
"to",
"bootstrap",
".",
"This",
"algorithm",
"is",
"designed",
"to",
"preserve",
"passive",
"node",
"s",
"inbound",
"connection",
"slots",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L481-L560 |
1,914 | StorjOld/pyp2p | pyp2p/net.py | Net.advertise | def advertise(self):
"""
This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p
"""
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | python | def advertise(self):
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self | [
"def",
"advertise",
"(",
"self",
")",
":",
"# Advertise is disabled.\r",
"if",
"not",
"self",
".",
"enable_advertise",
":",
"self",
".",
"debug_print",
"(",
"\"Advertise is disbled!\"",
")",
"return",
"None",
"# Direct net server is reserved for direct connections only.\r",
"if",
"self",
".",
"net_type",
"==",
"\"direct\"",
"and",
"self",
".",
"node_type",
"==",
"\"passive\"",
":",
"return",
"None",
"# Net isn't started!.\r",
"if",
"not",
"self",
".",
"is_net_started",
":",
"raise",
"Exception",
"(",
"\"Please call start() before you call advertise()\"",
")",
"# Avoid raping the rendezvous server with excessive requests.\r",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"last_advertise",
"is",
"not",
"None",
":",
"if",
"t",
"-",
"self",
".",
"last_advertise",
"<=",
"advertise_interval",
":",
"return",
"None",
"if",
"len",
"(",
"self",
".",
"inbound",
")",
">=",
"self",
".",
"min_connected",
":",
"return",
"None",
"self",
".",
"last_advertise",
"=",
"t",
"# Tell rendezvous server to list us.\r",
"try",
":",
"# We're a passive node.\r",
"if",
"self",
".",
"node_type",
"==",
"\"passive\"",
"and",
"self",
".",
"passive_port",
"is",
"not",
"None",
"and",
"self",
".",
"enable_advertise",
":",
"self",
".",
"rendezvous",
".",
"passive_listen",
"(",
"self",
".",
"passive_port",
",",
"self",
".",
"max_inbound",
")",
"\"\"\"\r\n Simultaneous open is only used as a fail-safe for connections to\r\n nodes on the direct_net and only direct_net can list itself as\r\n simultaneous so its safe to leave this enabled.\r\n \"\"\"",
"if",
"self",
".",
"node_type",
"==",
"\"simultaneous\"",
":",
"self",
".",
"rendezvous",
".",
"simultaneous_listen",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"error",
"=",
"parse_exception",
"(",
"e",
")",
"log_exception",
"(",
"self",
".",
"error_log_path",
",",
"error",
")",
"return",
"self"
] | This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p | [
"This",
"function",
"tells",
"the",
"rendezvous",
"server",
"that",
"our",
"node",
"is",
"ready",
"to",
"accept",
"connections",
"from",
"other",
"nodes",
"on",
"the",
"P2P",
"network",
"that",
"run",
"the",
"bootstrap",
"function",
".",
"It",
"s",
"only",
"used",
"when",
"net_type",
"==",
"p2p"
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L562-L613 |
1,915 | StorjOld/pyp2p | pyp2p/net.py | Net.determine_node | def determine_node(self):
"""
Determines the type of node based on a combination of forwarding
reachability and NAT type.
"""
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active" | python | def determine_node(self):
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active" | [
"def",
"determine_node",
"(",
"self",
")",
":",
"# Manually set node_type as simultaneous.\r",
"if",
"self",
".",
"node_type",
"==",
"\"simultaneous\"",
":",
"if",
"self",
".",
"nat_type",
"!=",
"\"unknown\"",
":",
"return",
"\"simultaneous\"",
"# Get IP of binding interface.\r",
"unspecific_bind",
"=",
"[",
"\"0.0.0.0\"",
",",
"\"127.0.0.1\"",
",",
"\"localhost\"",
"]",
"if",
"self",
".",
"passive_bind",
"in",
"unspecific_bind",
":",
"lan_ip",
"=",
"get_lan_ip",
"(",
"self",
".",
"interface",
")",
"else",
":",
"lan_ip",
"=",
"self",
".",
"passive_bind",
"# Passive node checks.\r",
"if",
"lan_ip",
"is",
"not",
"None",
"and",
"self",
".",
"passive_port",
"is",
"not",
"None",
"and",
"self",
".",
"enable_forwarding",
":",
"self",
".",
"debug_print",
"(",
"\"Checking if port is forwarded.\"",
")",
"# Check port isn't already forwarded.\r",
"if",
"is_port_forwarded",
"(",
"lan_ip",
",",
"self",
".",
"passive_port",
",",
"\"TCP\"",
",",
"self",
".",
"forwarding_servers",
")",
":",
"msg",
"=",
"\"Port already forwarded. Skipping NAT traversal.\"",
"self",
".",
"debug_print",
"(",
"msg",
")",
"self",
".",
"forwarding_type",
"=",
"\"forwarded\"",
"return",
"\"passive\"",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Port is not already forwarded.\"",
")",
"# Most routers.\r",
"try",
":",
"self",
".",
"debug_print",
"(",
"\"Trying UPnP\"",
")",
"UPnP",
"(",
"self",
".",
"interface",
")",
".",
"forward_port",
"(",
"\"TCP\"",
",",
"self",
".",
"passive_port",
",",
"lan_ip",
")",
"if",
"is_port_forwarded",
"(",
"lan_ip",
",",
"self",
".",
"passive_port",
",",
"\"TCP\"",
",",
"self",
".",
"forwarding_servers",
")",
":",
"self",
".",
"forwarding_type",
"=",
"\"UPnP\"",
"self",
".",
"debug_print",
"(",
"\"Forwarded port with UPnP.\"",
")",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"UPnP failed to forward port.\"",
")",
"except",
"Exception",
"as",
"e",
":",
"# Log exception.\r",
"error",
"=",
"parse_exception",
"(",
"e",
")",
"log_exception",
"(",
"self",
".",
"error_log_path",
",",
"error",
")",
"self",
".",
"debug_print",
"(",
"\"UPnP failed to forward port.\"",
")",
"# Apple devices.\r",
"try",
":",
"self",
".",
"debug_print",
"(",
"\"Trying NATPMP.\"",
")",
"NatPMP",
"(",
"self",
".",
"interface",
")",
".",
"forward_port",
"(",
"\"TCP\"",
",",
"self",
".",
"passive_port",
",",
"lan_ip",
")",
"if",
"is_port_forwarded",
"(",
"lan_ip",
",",
"self",
".",
"passive_port",
",",
"\"TCP\"",
",",
"self",
".",
"forwarding_servers",
")",
":",
"self",
".",
"forwarding_type",
"=",
"\"NATPMP\"",
"self",
".",
"debug_print",
"(",
"\"Port forwarded with NATPMP.\"",
")",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Failed to forward port with NATPMP.\"",
")",
"self",
".",
"debug_print",
"(",
"\"Falling back on TCP hole punching or\"",
"\" proxying.\"",
")",
"except",
"Exception",
"as",
"e",
":",
"# Log exception\r",
"error",
"=",
"parse_exception",
"(",
"e",
")",
"log_exception",
"(",
"self",
".",
"error_log_path",
",",
"error",
")",
"self",
".",
"debug_print",
"(",
"\"Failed to forward port with NATPMP.\"",
")",
"# Check it worked.\r",
"if",
"self",
".",
"forwarding_type",
"!=",
"\"manual\"",
":",
"return",
"\"passive\"",
"# Fail-safe node types.\r",
"if",
"self",
".",
"nat_type",
"!=",
"\"unknown\"",
":",
"return",
"\"simultaneous\"",
"else",
":",
"return",
"\"active\""
] | Determines the type of node based on a combination of forwarding
reachability and NAT type. | [
"Determines",
"the",
"type",
"of",
"node",
"based",
"on",
"a",
"combination",
"of",
"forwarding",
"reachability",
"and",
"NAT",
"type",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L615-L697 |
1,916 | StorjOld/pyp2p | pyp2p/net.py | Net.start | def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self | python | def start(self):
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"debug_print",
"(",
"\"Starting networking.\"",
")",
"self",
".",
"debug_print",
"(",
"\"Make sure to iterate over replies if you need\"",
"\" connection alive management!\"",
")",
"# Register a cnt + c handler\r",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"self",
".",
"stop",
")",
"# Save WAN IP.\r",
"self",
".",
"debug_print",
"(",
"\"WAN IP = \"",
"+",
"str",
"(",
"self",
".",
"wan_ip",
")",
")",
"# Check rendezvous server is up.\r",
"try",
":",
"rendezvous_con",
"=",
"self",
".",
"rendezvous",
".",
"server_connect",
"(",
")",
"rendezvous_con",
".",
"close",
"(",
")",
"except",
":",
"raise",
"Exception",
"(",
"\"Unable to connect to rendezvous server.\"",
")",
"# Started no matter what\r",
"# since LAN connections are always possible.\r",
"self",
".",
"start_passive_server",
"(",
")",
"# Determine NAT type.\r",
"if",
"self",
".",
"nat_type",
"==",
"\"unknown\"",
":",
"self",
".",
"debug_print",
"(",
"\"Determining NAT type.\"",
")",
"nat_type",
"=",
"self",
".",
"rendezvous",
".",
"determine_nat",
"(",
")",
"if",
"nat_type",
"is",
"not",
"None",
"and",
"nat_type",
"!=",
"\"unknown\"",
":",
"self",
".",
"nat_type",
"=",
"nat_type",
"self",
".",
"rendezvous",
".",
"nat_type",
"=",
"nat_type",
"self",
".",
"debug_print",
"(",
"\"NAT type = \"",
"+",
"nat_type",
")",
"else",
":",
"self",
".",
"debug_print",
"(",
"\"Unable to determine NAT type.\"",
")",
"# Check NAT type if node is simultaneous\r",
"# is manually specified.\r",
"if",
"self",
".",
"node_type",
"==",
"\"simultaneous\"",
":",
"if",
"self",
".",
"nat_type",
"not",
"in",
"self",
".",
"rendezvous",
".",
"predictable_nats",
":",
"self",
".",
"debug_print",
"(",
"\"Manual setting of simultanous specified but\"",
"\" ignored since NAT does not support it.\"",
")",
"self",
".",
"node_type",
"=",
"\"active\"",
"else",
":",
"# Determine node type.\r",
"self",
".",
"debug_print",
"(",
"\"Determining node type.\"",
")",
"# No checks for manually specifying passive\r",
"# (there probably should be.)\r",
"if",
"self",
".",
"node_type",
"==",
"\"unknown\"",
":",
"self",
".",
"node_type",
"=",
"self",
".",
"determine_node",
"(",
")",
"# Prevent P2P nodes from running as simultaneous.\r",
"if",
"self",
".",
"net_type",
"==",
"\"p2p\"",
":",
"\"\"\"\r\n TCP hole punching is reserved specifically for direct networks\r\n (a net object reserved for receiving direct connections\r\n -- p2p is for connecting to the main network. The reason for this\r\n is you can't do multiple TCP hole punches at the same time so\r\n reserved for direct network where it's most needed.\r\n \"\"\"",
"if",
"self",
".",
"node_type",
"==",
"\"simultaneous\"",
":",
"self",
".",
"debug_print",
"(",
"\"Simultaneous is not allowed for P2P\"",
")",
"self",
".",
"node_type",
"=",
"\"active\"",
"self",
".",
"disable_simultaneous",
"(",
")",
"self",
".",
"debug_print",
"(",
"\"Node type = \"",
"+",
"self",
".",
"node_type",
")",
"# Close stray cons from determine_node() tests.\r",
"self",
".",
"close_cons",
"(",
")",
"# Set net started status.\r",
"self",
".",
"is_net_started",
"=",
"1",
"# Initialise our UNL details.\r",
"self",
".",
"unl",
"=",
"UNL",
"(",
"net",
"=",
"self",
",",
"dht_node",
"=",
"self",
".",
"dht_node",
",",
"wan_ip",
"=",
"self",
".",
"wan_ip",
")",
"# Nestled calls.\r",
"return",
"self"
] | This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class. | [
"This",
"function",
"determines",
"node",
"and",
"NAT",
"type",
"saves",
"connectivity",
"details",
"and",
"starts",
"any",
"needed",
"servers",
"to",
"be",
"a",
"part",
"of",
"the",
"network",
".",
"This",
"is",
"usually",
"the",
"first",
"function",
"called",
"after",
"initialising",
"the",
"Net",
"class",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/net.py#L710-L795 |
1,917 | StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.send_remote_port | def send_remote_port(self):
"""
Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port.
"""
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | python | def send_remote_port(self):
msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port))
self.send_line(msg) | [
"def",
"send_remote_port",
"(",
"self",
")",
":",
"msg",
"=",
"\"REMOTE TCP %s\"",
"%",
"(",
"str",
"(",
"self",
".",
"transport",
".",
"getPeer",
"(",
")",
".",
"port",
")",
")",
"self",
".",
"send_line",
"(",
"msg",
")"
] | Sends the remote port mapped for the connection.
This port is surprisingly often the same as the locally
bound port for an endpoint because a lot of NAT types
preserve the port. | [
"Sends",
"the",
"remote",
"port",
"mapped",
"for",
"the",
"connection",
".",
"This",
"port",
"is",
"surprisingly",
"often",
"the",
"same",
"as",
"the",
"locally",
"bound",
"port",
"for",
"an",
"endpoint",
"because",
"a",
"lot",
"of",
"NAT",
"types",
"preserve",
"the",
"port",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L76-L84 |
1,918 | StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.cleanup_candidates | def cleanup_candidates(self, node_ip):
"""
Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
old_candidates.append(candidate)
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate) | python | def cleanup_candidates(self, node_ip):
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
elapsed = int(time.time() - candidate["time"])
if elapsed > self.challege_timeout:
old_candidates.append(candidate)
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate) | [
"def",
"cleanup_candidates",
"(",
"self",
",",
"node_ip",
")",
":",
"if",
"node_ip",
"in",
"self",
".",
"factory",
".",
"candidates",
":",
"old_candidates",
"=",
"[",
"]",
"for",
"candidate",
"in",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
":",
"elapsed",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"-",
"candidate",
"[",
"\"time\"",
"]",
")",
"if",
"elapsed",
">",
"self",
".",
"challege_timeout",
":",
"old_candidates",
".",
"append",
"(",
"candidate",
")",
"for",
"candidate",
"in",
"old_candidates",
":",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
".",
"remove",
"(",
"candidate",
")"
] | Removes old TCP hole punching candidates for a
designated node if a certain amount of time has passed
since they last connected. | [
"Removes",
"old",
"TCP",
"hole",
"punching",
"candidates",
"for",
"a",
"designated",
"node",
"if",
"a",
"certain",
"amount",
"of",
"time",
"has",
"passed",
"since",
"they",
"last",
"connected",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L108-L122 |
1,919 | StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.propogate_candidates | def propogate_candidates(self, node_ip):
"""
Used to progate new candidates to passive simultaneous
nodes.
"""
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
# Already sent -- updated when they accept this challenge.
if candidate["propogated"]:
continue
# Notify node of challege from client.
msg = "CHALLENGE %s %s %s" % (
candidate["ip_addr"],
" ".join(map(str, candidate["predictions"])),
candidate["proto"])
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
old_candidates.append(candidate) | python | def propogate_candidates(self, node_ip):
if node_ip in self.factory.candidates:
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Not connected.
if not candidate["con"].connected:
continue
# Already sent -- updated when they accept this challenge.
if candidate["propogated"]:
continue
# Notify node of challege from client.
msg = "CHALLENGE %s %s %s" % (
candidate["ip_addr"],
" ".join(map(str, candidate["predictions"])),
candidate["proto"])
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
old_candidates.append(candidate) | [
"def",
"propogate_candidates",
"(",
"self",
",",
"node_ip",
")",
":",
"if",
"node_ip",
"in",
"self",
".",
"factory",
".",
"candidates",
":",
"old_candidates",
"=",
"[",
"]",
"for",
"candidate",
"in",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
":",
"# Not connected.\r",
"if",
"not",
"candidate",
"[",
"\"con\"",
"]",
".",
"connected",
":",
"continue",
"# Already sent -- updated when they accept this challenge.\r",
"if",
"candidate",
"[",
"\"propogated\"",
"]",
":",
"continue",
"# Notify node of challege from client.\r",
"msg",
"=",
"\"CHALLENGE %s %s %s\"",
"%",
"(",
"candidate",
"[",
"\"ip_addr\"",
"]",
",",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"candidate",
"[",
"\"predictions\"",
"]",
")",
")",
",",
"candidate",
"[",
"\"proto\"",
"]",
")",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"[",
"node_ip",
"]",
"[",
"\"con\"",
"]",
".",
"send_line",
"(",
"msg",
")",
"old_candidates",
".",
"append",
"(",
"candidate",
")"
] | Used to progate new candidates to passive simultaneous
nodes. | [
"Used",
"to",
"progate",
"new",
"candidates",
"to",
"passive",
"simultaneous",
"nodes",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L124-L149 |
1,920 | StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.synchronize_simultaneous | def synchronize_simultaneous(self, node_ip):
"""
Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt.
"""
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) | python | def synchronize_simultaneous(self, node_ip):
for candidate in self.factory.candidates[node_ip]:
# Only if candidate is connected.
if not candidate["con"].connected:
continue
# Synchronise simultaneous node.
if candidate["time"] -\
self.factory.nodes["simultaneous"][node_ip]["time"] >\
self.challege_timeout:
msg = "RECONNECT"
self.factory.nodes["simultaneous"][node_ip]["con"].\
send_line(msg)
return
self.cleanup_candidates(node_ip)
self.propogate_candidates(node_ip) | [
"def",
"synchronize_simultaneous",
"(",
"self",
",",
"node_ip",
")",
":",
"for",
"candidate",
"in",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
":",
"# Only if candidate is connected.\r",
"if",
"not",
"candidate",
"[",
"\"con\"",
"]",
".",
"connected",
":",
"continue",
"# Synchronise simultaneous node.\r",
"if",
"candidate",
"[",
"\"time\"",
"]",
"-",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"[",
"node_ip",
"]",
"[",
"\"time\"",
"]",
">",
"self",
".",
"challege_timeout",
":",
"msg",
"=",
"\"RECONNECT\"",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"[",
"node_ip",
"]",
"[",
"\"con\"",
"]",
".",
"send_line",
"(",
"msg",
")",
"return",
"self",
".",
"cleanup_candidates",
"(",
"node_ip",
")",
"self",
".",
"propogate_candidates",
"(",
"node_ip",
")"
] | Because adjacent mappings for certain NAT types can
be stolen by other connections, the purpose of this
function is to ensure the last connection by a passive
simultaneous node is recent compared to the time for
a candidate to increase the chance that the precited
mappings remain active for the TCP hole punching
attempt. | [
"Because",
"adjacent",
"mappings",
"for",
"certain",
"NAT",
"types",
"can",
"be",
"stolen",
"by",
"other",
"connections",
"the",
"purpose",
"of",
"this",
"function",
"is",
"to",
"ensure",
"the",
"last",
"connection",
"by",
"a",
"passive",
"simultaneous",
"node",
"is",
"recent",
"compared",
"to",
"the",
"time",
"for",
"a",
"candidate",
"to",
"increase",
"the",
"chance",
"that",
"the",
"precited",
"mappings",
"remain",
"active",
"for",
"the",
"TCP",
"hole",
"punching",
"attempt",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L151-L177 |
1,921 | StorjOld/pyp2p | pyp2p/rendezvous_server.py | RendezvousProtocol.connectionLost | def connectionLost(self, reason):
"""
Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections.
"""
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanup:
self.factory.last_cleanup = t
# Delete old passive nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["passive"]):
passive_node = self.factory.nodes["passive"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - passive_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["passive"][node_ip]
# Delete old simultaneous nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["simultaneous"]):
simultaneous_node =\
self.factory.nodes["simultaneous"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - simultaneous_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["simultaneous"][node_ip]
# Delete old candidates and candidate structs.
old_node_ips = []
for node_ip in list(self.factory.candidates):
# Record old candidates.
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Hole punching is ms time sensitive.
# Candidates older than this is safe to assume
# they're not needed.
if node_ip not in self.factory.nodes["simultaneous"] \
and t - candidate["time"] >= self.challenge_timeout * 5:
old_candidates.append(candidate)
# Remove old candidates.
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
# Record old node IPs.
if not len(self.factory.candidates[node_ip]) and \
node_ip not in self.factory.nodes["simultaneous"]:
old_node_ips.append(node_ip)
# Remove old node IPs.
for node_ip in old_node_ips:
del self.factory.candidates[node_ip]
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error)) | python | def connectionLost(self, reason):
try:
self.connected = False
if debug:
print(self.log_entry("CLOSED =", "none"))
# Every five minutes: cleanup
t = time.time()
if time.time() - self.factory.last_cleanup >= self.cleanup:
self.factory.last_cleanup = t
# Delete old passive nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["passive"]):
passive_node = self.factory.nodes["passive"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - passive_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["passive"][node_ip]
# Delete old simultaneous nodes.
old_node_ips = []
for node_ip in list(self.factory.nodes["simultaneous"]):
simultaneous_node =\
self.factory.nodes["simultaneous"][node_ip]
# Gives enough time for passive nodes to receive clients.
if t - simultaneous_node["time"] >= self.node_lifetime:
old_node_ips.append(node_ip)
for node_ip in old_node_ips:
del self.factory.nodes["simultaneous"][node_ip]
# Delete old candidates and candidate structs.
old_node_ips = []
for node_ip in list(self.factory.candidates):
# Record old candidates.
old_candidates = []
for candidate in self.factory.candidates[node_ip]:
# Hole punching is ms time sensitive.
# Candidates older than this is safe to assume
# they're not needed.
if node_ip not in self.factory.nodes["simultaneous"] \
and t - candidate["time"] >= self.challenge_timeout * 5:
old_candidates.append(candidate)
# Remove old candidates.
for candidate in old_candidates:
self.factory.candidates[node_ip].remove(candidate)
# Record old node IPs.
if not len(self.factory.candidates[node_ip]) and \
node_ip not in self.factory.nodes["simultaneous"]:
old_node_ips.append(node_ip)
# Remove old node IPs.
for node_ip in old_node_ips:
del self.factory.candidates[node_ip]
except Exception as e:
error = parse_exception(e)
log_exception(error_log_path, error)
print(self.log_entry("ERROR =", error)) | [
"def",
"connectionLost",
"(",
"self",
",",
"reason",
")",
":",
"try",
":",
"self",
".",
"connected",
"=",
"False",
"if",
"debug",
":",
"print",
"(",
"self",
".",
"log_entry",
"(",
"\"CLOSED =\"",
",",
"\"none\"",
")",
")",
"# Every five minutes: cleanup\r",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"factory",
".",
"last_cleanup",
">=",
"self",
".",
"cleanup",
":",
"self",
".",
"factory",
".",
"last_cleanup",
"=",
"t",
"# Delete old passive nodes.\r",
"old_node_ips",
"=",
"[",
"]",
"for",
"node_ip",
"in",
"list",
"(",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"passive\"",
"]",
")",
":",
"passive_node",
"=",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"passive\"",
"]",
"[",
"node_ip",
"]",
"# Gives enough time for passive nodes to receive clients.\r",
"if",
"t",
"-",
"passive_node",
"[",
"\"time\"",
"]",
">=",
"self",
".",
"node_lifetime",
":",
"old_node_ips",
".",
"append",
"(",
"node_ip",
")",
"for",
"node_ip",
"in",
"old_node_ips",
":",
"del",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"passive\"",
"]",
"[",
"node_ip",
"]",
"# Delete old simultaneous nodes.\r",
"old_node_ips",
"=",
"[",
"]",
"for",
"node_ip",
"in",
"list",
"(",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
")",
":",
"simultaneous_node",
"=",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"[",
"node_ip",
"]",
"# Gives enough time for passive nodes to receive clients.\r",
"if",
"t",
"-",
"simultaneous_node",
"[",
"\"time\"",
"]",
">=",
"self",
".",
"node_lifetime",
":",
"old_node_ips",
".",
"append",
"(",
"node_ip",
")",
"for",
"node_ip",
"in",
"old_node_ips",
":",
"del",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"[",
"node_ip",
"]",
"# Delete old candidates and candidate structs.\r",
"old_node_ips",
"=",
"[",
"]",
"for",
"node_ip",
"in",
"list",
"(",
"self",
".",
"factory",
".",
"candidates",
")",
":",
"# Record old candidates.\r",
"old_candidates",
"=",
"[",
"]",
"for",
"candidate",
"in",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
":",
"# Hole punching is ms time sensitive.\r",
"# Candidates older than this is safe to assume\r",
"# they're not needed.\r",
"if",
"node_ip",
"not",
"in",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
"and",
"t",
"-",
"candidate",
"[",
"\"time\"",
"]",
">=",
"self",
".",
"challenge_timeout",
"*",
"5",
":",
"old_candidates",
".",
"append",
"(",
"candidate",
")",
"# Remove old candidates.\r",
"for",
"candidate",
"in",
"old_candidates",
":",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
".",
"remove",
"(",
"candidate",
")",
"# Record old node IPs.\r",
"if",
"not",
"len",
"(",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
")",
"and",
"node_ip",
"not",
"in",
"self",
".",
"factory",
".",
"nodes",
"[",
"\"simultaneous\"",
"]",
":",
"old_node_ips",
".",
"append",
"(",
"node_ip",
")",
"# Remove old node IPs.\r",
"for",
"node_ip",
"in",
"old_node_ips",
":",
"del",
"self",
".",
"factory",
".",
"candidates",
"[",
"node_ip",
"]",
"except",
"Exception",
"as",
"e",
":",
"error",
"=",
"parse_exception",
"(",
"e",
")",
"log_exception",
"(",
"error_log_path",
",",
"error",
")",
"print",
"(",
"self",
".",
"log_entry",
"(",
"\"ERROR =\"",
",",
"error",
")",
")"
] | Mostly handles clean-up of node + candidate structures.
Avoids memory exhaustion for a large number of connections. | [
"Mostly",
"handles",
"clean",
"-",
"up",
"of",
"node",
"+",
"candidate",
"structures",
".",
"Avoids",
"memory",
"exhaustion",
"for",
"a",
"large",
"number",
"of",
"connections",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L197-L261 |
1,922 | StorjOld/pyp2p | pyp2p/ipgetter.py | IPgetter.get_external_ip | def get_external_ip(self):
"""
This function gets your IP from a random server
"""
random.shuffle(self.server_list)
myip = ''
for server in self.server_list[:3]:
myip = self.fetch(server)
if myip != '':
return myip
else:
continue
return '' | python | def get_external_ip(self):
random.shuffle(self.server_list)
myip = ''
for server in self.server_list[:3]:
myip = self.fetch(server)
if myip != '':
return myip
else:
continue
return '' | [
"def",
"get_external_ip",
"(",
"self",
")",
":",
"random",
".",
"shuffle",
"(",
"self",
".",
"server_list",
")",
"myip",
"=",
"''",
"for",
"server",
"in",
"self",
".",
"server_list",
"[",
":",
"3",
"]",
":",
"myip",
"=",
"self",
".",
"fetch",
"(",
"server",
")",
"if",
"myip",
"!=",
"''",
":",
"return",
"myip",
"else",
":",
"continue",
"return",
"''"
] | This function gets your IP from a random server | [
"This",
"function",
"gets",
"your",
"IP",
"from",
"a",
"random",
"server"
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/ipgetter.py#L109-L122 |
1,923 | StorjOld/pyp2p | pyp2p/ipgetter.py | IPgetter.fetch | def fetch(self, server):
"""
This function gets your IP from a specific server
"""
t = None
socket_default_timeout = socket.getdefaulttimeout()
opener = urllib.build_opener()
opener.addheaders = [('User-agent',
"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)"
" Gecko/20100101 Firefox/24.0")]
try:
# Close url resource if fetching not finished within timeout.
t = Timer(self.timeout, self.handle_timeout, [self.url])
t.start()
# Open URL.
if version_info[0:2] == (2, 5):
# Support for Python 2.5.* using socket hack
# (Changes global socket timeout.)
socket.setdefaulttimeout(self.timeout)
self.url = opener.open(server)
else:
self.url = opener.open(server, timeout=self.timeout)
# Read response.
content = self.url.read()
# Didn't want to import chardet. Prefered to stick to stdlib
if PY3K:
try:
content = content.decode('UTF-8')
except UnicodeDecodeError:
content = content.decode('ISO-8859-1')
p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.('
p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|['
p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
m = re.search(
p,
content)
myip = m.group(0)
if len(myip) > 0:
return myip
else:
return ''
except Exception as e:
print(e)
return ''
finally:
if self.url is not None:
self.url.close()
self.url = None
if t is not None:
t.cancel()
# Reset default socket timeout.
if socket.getdefaulttimeout() != socket_default_timeout:
socket.setdefaulttimeout(socket_default_timeout) | python | def fetch(self, server):
t = None
socket_default_timeout = socket.getdefaulttimeout()
opener = urllib.build_opener()
opener.addheaders = [('User-agent',
"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)"
" Gecko/20100101 Firefox/24.0")]
try:
# Close url resource if fetching not finished within timeout.
t = Timer(self.timeout, self.handle_timeout, [self.url])
t.start()
# Open URL.
if version_info[0:2] == (2, 5):
# Support for Python 2.5.* using socket hack
# (Changes global socket timeout.)
socket.setdefaulttimeout(self.timeout)
self.url = opener.open(server)
else:
self.url = opener.open(server, timeout=self.timeout)
# Read response.
content = self.url.read()
# Didn't want to import chardet. Prefered to stick to stdlib
if PY3K:
try:
content = content.decode('UTF-8')
except UnicodeDecodeError:
content = content.decode('ISO-8859-1')
p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.('
p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|['
p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
m = re.search(
p,
content)
myip = m.group(0)
if len(myip) > 0:
return myip
else:
return ''
except Exception as e:
print(e)
return ''
finally:
if self.url is not None:
self.url.close()
self.url = None
if t is not None:
t.cancel()
# Reset default socket timeout.
if socket.getdefaulttimeout() != socket_default_timeout:
socket.setdefaulttimeout(socket_default_timeout) | [
"def",
"fetch",
"(",
"self",
",",
"server",
")",
":",
"t",
"=",
"None",
"socket_default_timeout",
"=",
"socket",
".",
"getdefaulttimeout",
"(",
")",
"opener",
"=",
"urllib",
".",
"build_opener",
"(",
")",
"opener",
".",
"addheaders",
"=",
"[",
"(",
"'User-agent'",
",",
"\"Mozilla/5.0 (X11; Linux x86_64; rv:24.0)\"",
"\" Gecko/20100101 Firefox/24.0\"",
")",
"]",
"try",
":",
"# Close url resource if fetching not finished within timeout.\r",
"t",
"=",
"Timer",
"(",
"self",
".",
"timeout",
",",
"self",
".",
"handle_timeout",
",",
"[",
"self",
".",
"url",
"]",
")",
"t",
".",
"start",
"(",
")",
"# Open URL.\r",
"if",
"version_info",
"[",
"0",
":",
"2",
"]",
"==",
"(",
"2",
",",
"5",
")",
":",
"# Support for Python 2.5.* using socket hack\r",
"# (Changes global socket timeout.)\r",
"socket",
".",
"setdefaulttimeout",
"(",
"self",
".",
"timeout",
")",
"self",
".",
"url",
"=",
"opener",
".",
"open",
"(",
"server",
")",
"else",
":",
"self",
".",
"url",
"=",
"opener",
".",
"open",
"(",
"server",
",",
"timeout",
"=",
"self",
".",
"timeout",
")",
"# Read response.\r",
"content",
"=",
"self",
".",
"url",
".",
"read",
"(",
")",
"# Didn't want to import chardet. Prefered to stick to stdlib\r",
"if",
"PY3K",
":",
"try",
":",
"content",
"=",
"content",
".",
"decode",
"(",
"'UTF-8'",
")",
"except",
"UnicodeDecodeError",
":",
"content",
"=",
"content",
".",
"decode",
"(",
"'ISO-8859-1'",
")",
"p",
"=",
"'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.('",
"p",
"+=",
"'25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|['",
"p",
"+=",
"'01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'",
"m",
"=",
"re",
".",
"search",
"(",
"p",
",",
"content",
")",
"myip",
"=",
"m",
".",
"group",
"(",
"0",
")",
"if",
"len",
"(",
"myip",
")",
">",
"0",
":",
"return",
"myip",
"else",
":",
"return",
"''",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"''",
"finally",
":",
"if",
"self",
".",
"url",
"is",
"not",
"None",
":",
"self",
".",
"url",
".",
"close",
"(",
")",
"self",
".",
"url",
"=",
"None",
"if",
"t",
"is",
"not",
"None",
":",
"t",
".",
"cancel",
"(",
")",
"# Reset default socket timeout.\r",
"if",
"socket",
".",
"getdefaulttimeout",
"(",
")",
"!=",
"socket_default_timeout",
":",
"socket",
".",
"setdefaulttimeout",
"(",
"socket_default_timeout",
")"
] | This function gets your IP from a specific server | [
"This",
"function",
"gets",
"your",
"IP",
"from",
"a",
"specific",
"server"
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/ipgetter.py#L129-L187 |
1,924 | StorjOld/pyp2p | pyp2p/unl.py | UNL.connect | def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
"""
A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution.
"""
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | python | def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | [
"def",
"connect",
"(",
"self",
",",
"their_unl",
",",
"events",
",",
"force_master",
"=",
"1",
",",
"hairpin",
"=",
"1",
",",
"nonce",
"=",
"\"0\"",
"*",
"64",
")",
":",
"parms",
"=",
"(",
"their_unl",
",",
"events",
",",
"force_master",
",",
"hairpin",
",",
"nonce",
")",
"t",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"connect_handler",
",",
"args",
"=",
"parms",
")",
"t",
".",
"start",
"(",
")",
"self",
".",
"unl_threads",
".",
"append",
"(",
"t",
")"
] | A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution. | [
"A",
"new",
"thread",
"is",
"spawned",
"because",
"many",
"of",
"the",
"connection",
"techniques",
"rely",
"on",
"sleep",
"to",
"determine",
"connection",
"outcome",
"or",
"to",
"synchronise",
"hole",
"punching",
"techniques",
".",
"If",
"the",
"sleep",
"is",
"in",
"its",
"own",
"thread",
"it",
"won",
"t",
"block",
"main",
"execution",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/unl.py#L375-L386 |
1,925 | StorjOld/pyp2p | pyp2p/sys_clock.py | SysClock.calculate_clock_skew | def calculate_clock_skew(self):
"""
Computer average and standard deviation
using all the data points.
"""
n = self.statx_n(self.data_points)
"""
Required to be able to compute the standard
deviation.
"""
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
"""
Incrementally remove aberration points.
"""
for k in range(0, self.clean_steps):
"""
Remove aberration points: keep only
the sigma range around the average.
"""
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue
cleaned_data_points.append(v)
self.data_points = cleaned_data_points[:]
"""
Recompute the new average using the
"sound" points we kept.
"""
n = self.statx_n(self.data_points)
"""
Not enough data to compute standard
deviation.
"""
if n < 2:
break
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break
"""
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
"""
if sdev > self.max_sdev or n < self.min_data:
return Decimal("0")
return avg | python | def calculate_clock_skew(self):
n = self.statx_n(self.data_points)
"""
Required to be able to compute the standard
deviation.
"""
if n < 1:
return Decimal("0")
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
"""
Incrementally remove aberration points.
"""
for k in range(0, self.clean_steps):
"""
Remove aberration points: keep only
the sigma range around the average.
"""
min_val = avg - sdev
max_val = avg + sdev
cleaned_data_points = []
for i in range(0, n):
v = self.data_points[i]
if v < min_val or v > max_val:
continue
cleaned_data_points.append(v)
self.data_points = cleaned_data_points[:]
"""
Recompute the new average using the
"sound" points we kept.
"""
n = self.statx_n(self.data_points)
"""
Not enough data to compute standard
deviation.
"""
if n < 2:
break
avg = self.statx_avg(self.data_points)
sdev = self.statx_sdev(self.data_points)
if sdev <= self.max_sdev or n < self.min_data:
break
"""
If standard deviation is too large still, we
cannot update our clock. Collect more points.
If we don't have a minimum amount of data,
don't attempt the update yet, continue collecting.
"""
if sdev > self.max_sdev or n < self.min_data:
return Decimal("0")
return avg | [
"def",
"calculate_clock_skew",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"statx_n",
"(",
"self",
".",
"data_points",
")",
"\"\"\"\n Required to be able to compute the standard\n deviation.\n \"\"\"",
"if",
"n",
"<",
"1",
":",
"return",
"Decimal",
"(",
"\"0\"",
")",
"avg",
"=",
"self",
".",
"statx_avg",
"(",
"self",
".",
"data_points",
")",
"sdev",
"=",
"self",
".",
"statx_sdev",
"(",
"self",
".",
"data_points",
")",
"\"\"\"\n Incrementally remove aberration points.\n \"\"\"",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"self",
".",
"clean_steps",
")",
":",
"\"\"\"\n Remove aberration points: keep only\n the sigma range around the average.\n \"\"\"",
"min_val",
"=",
"avg",
"-",
"sdev",
"max_val",
"=",
"avg",
"+",
"sdev",
"cleaned_data_points",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"n",
")",
":",
"v",
"=",
"self",
".",
"data_points",
"[",
"i",
"]",
"if",
"v",
"<",
"min_val",
"or",
"v",
">",
"max_val",
":",
"continue",
"cleaned_data_points",
".",
"append",
"(",
"v",
")",
"self",
".",
"data_points",
"=",
"cleaned_data_points",
"[",
":",
"]",
"\"\"\"\n Recompute the new average using the\n \"sound\" points we kept.\n \"\"\"",
"n",
"=",
"self",
".",
"statx_n",
"(",
"self",
".",
"data_points",
")",
"\"\"\"\n Not enough data to compute standard\n deviation.\n \"\"\"",
"if",
"n",
"<",
"2",
":",
"break",
"avg",
"=",
"self",
".",
"statx_avg",
"(",
"self",
".",
"data_points",
")",
"sdev",
"=",
"self",
".",
"statx_sdev",
"(",
"self",
".",
"data_points",
")",
"if",
"sdev",
"<=",
"self",
".",
"max_sdev",
"or",
"n",
"<",
"self",
".",
"min_data",
":",
"break",
"\"\"\"\n If standard deviation is too large still, we\n cannot update our clock. Collect more points.\n\n If we don't have a minimum amount of data,\n don't attempt the update yet, continue collecting.\n \"\"\"",
"if",
"sdev",
">",
"self",
".",
"max_sdev",
"or",
"n",
"<",
"self",
".",
"min_data",
":",
"return",
"Decimal",
"(",
"\"0\"",
")",
"return",
"avg"
] | Computer average and standard deviation
using all the data points. | [
"Computer",
"average",
"and",
"standard",
"deviation",
"using",
"all",
"the",
"data",
"points",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/sys_clock.py#L66-L132 |
1,926 | StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.simultaneous_listen | def simultaneous_listen(self):
"""
This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests.
"""
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1 | python | def simultaneous_listen(self):
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1 | [
"def",
"simultaneous_listen",
"(",
"self",
")",
":",
"# Close socket.\r",
"if",
"self",
".",
"server_con",
"is",
"not",
"None",
":",
"self",
".",
"server_con",
".",
"s",
".",
"close",
"(",
")",
"self",
".",
"server_con",
"=",
"None",
"# Reset predictions + mappings.\r",
"self",
".",
"mappings",
"=",
"None",
"self",
".",
"predictions",
"=",
"None",
"# Connect to rendezvous server.\r",
"parts",
"=",
"self",
".",
"sequential_connect",
"(",
")",
"if",
"parts",
"is",
"None",
":",
"return",
"0",
"con",
",",
"mappings",
",",
"predictions",
"=",
"parts",
"con",
".",
"blocking",
"=",
"0",
"con",
".",
"timeout",
"=",
"0",
"con",
".",
"s",
".",
"settimeout",
"(",
"0",
")",
"self",
".",
"server_con",
"=",
"con",
"self",
".",
"mappings",
"=",
"mappings",
"self",
".",
"predictions",
"=",
"predictions",
"# Register simultaneous node with server.\r",
"msg",
"=",
"\"SIMULTANEOUS READY 0 0\"",
"ret",
"=",
"self",
".",
"server_con",
".",
"send_line",
"(",
"msg",
")",
"if",
"not",
"ret",
":",
"return",
"0",
"return",
"1"
] | This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests. | [
"This",
"function",
"is",
"called",
"by",
"passive",
"simultaneous",
"nodes",
"who",
"wish",
"to",
"establish",
"themself",
"as",
"such",
".",
"It",
"sets",
"up",
"a",
"connection",
"to",
"the",
"Rendezvous",
"Server",
"to",
"monitor",
"for",
"new",
"hole",
"punching",
"requests",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L253-L286 |
1,927 | StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.predict_mappings | def predict_mappings(self, mappings):
"""
This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour.
"""
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings | python | def predict_mappings(self, mappings):
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings | [
"def",
"predict_mappings",
"(",
"self",
",",
"mappings",
")",
":",
"if",
"self",
".",
"nat_type",
"not",
"in",
"self",
".",
"predictable_nats",
":",
"msg",
"=",
"\"Can't predict mappings for non-predictable NAT type.\"",
"raise",
"Exception",
"(",
"msg",
")",
"for",
"mapping",
"in",
"mappings",
":",
"mapping",
"[",
"\"bound\"",
"]",
"=",
"mapping",
"[",
"\"sock\"",
"]",
".",
"getsockname",
"(",
")",
"[",
"1",
"]",
"if",
"self",
".",
"nat_type",
"==",
"\"preserving\"",
":",
"mapping",
"[",
"\"remote\"",
"]",
"=",
"mapping",
"[",
"\"source\"",
"]",
"if",
"self",
".",
"nat_type",
"==",
"\"delta\"",
":",
"max_port",
"=",
"65535",
"mapping",
"[",
"\"remote\"",
"]",
"=",
"int",
"(",
"mapping",
"[",
"\"source\"",
"]",
")",
"+",
"self",
".",
"delta",
"# Overflow or underflow = wrap port around.\r",
"if",
"mapping",
"[",
"\"remote\"",
"]",
">",
"max_port",
":",
"mapping",
"[",
"\"remote\"",
"]",
"-=",
"max_port",
"if",
"mapping",
"[",
"\"remote\"",
"]",
"<",
"0",
":",
"mapping",
"[",
"\"remote\"",
"]",
"=",
"max_port",
"-",
"-",
"mapping",
"[",
"\"remote\"",
"]",
"# Unknown error.\r",
"if",
"mapping",
"[",
"\"remote\"",
"]",
"<",
"1",
"or",
"mapping",
"[",
"\"remote\"",
"]",
">",
"max_port",
":",
"mapping",
"[",
"\"remote\"",
"]",
"=",
"1",
"mapping",
"[",
"\"remote\"",
"]",
"=",
"str",
"(",
"mapping",
"[",
"\"remote\"",
"]",
")",
"return",
"mappings"
] | This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour. | [
"This",
"function",
"is",
"used",
"to",
"predict",
"the",
"remote",
"ports",
"that",
"a",
"NAT",
"will",
"map",
"a",
"local",
"connection",
"to",
".",
"It",
"requires",
"the",
"NAT",
"type",
"to",
"be",
"determined",
"before",
"use",
".",
"Current",
"support",
"for",
"preserving",
"and",
"delta",
"type",
"mapping",
"behaviour",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L298-L330 |
1,928 | StorjOld/pyp2p | pyp2p/rendezvous_client.py | RendezvousClient.parse_remote_port | def parse_remote_port(self, reply):
"""
Parses a remote port from a Rendezvous Server's
response.
"""
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
remote_port = 0
return remote_port | python | def parse_remote_port(self, reply):
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
remote_port = 0
return remote_port | [
"def",
"parse_remote_port",
"(",
"self",
",",
"reply",
")",
":",
"remote_port",
"=",
"re",
".",
"findall",
"(",
"\"^REMOTE (TCP|UDP) ([0-9]+)$\"",
",",
"reply",
")",
"if",
"not",
"len",
"(",
"remote_port",
")",
":",
"remote_port",
"=",
"0",
"else",
":",
"remote_port",
"=",
"int",
"(",
"remote_port",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"if",
"remote_port",
"<",
"1",
"or",
"remote_port",
">",
"65535",
":",
"remote_port",
"=",
"0",
"return",
"remote_port"
] | Parses a remote port from a Rendezvous Server's
response. | [
"Parses",
"a",
"remote",
"port",
"from",
"a",
"Rendezvous",
"Server",
"s",
"response",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_client.py#L544-L557 |
1,929 | StorjOld/pyp2p | pyp2p/lib.py | get_unused_port | def get_unused_port(port=None):
"""Checks if port is already in use."""
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
if e.errno in (98, 10048): # 98, 10048 means address already bound
return get_unused_port(None)
raise e
s.close()
return port | python | def get_unused_port(port=None):
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
if e.errno in (98, 10048): # 98, 10048 means address already bound
return get_unused_port(None)
raise e
s.close()
return port | [
"def",
"get_unused_port",
"(",
"port",
"=",
"None",
")",
":",
"if",
"port",
"is",
"None",
"or",
"port",
"<",
"1024",
"or",
"port",
">",
"65535",
":",
"port",
"=",
"random",
".",
"randint",
"(",
"1024",
",",
"65535",
")",
"assert",
"(",
"1024",
"<=",
"port",
"<=",
"65535",
")",
"while",
"True",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"s",
".",
"bind",
"(",
"(",
"''",
",",
"port",
")",
")",
"# Try to open port",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"in",
"(",
"98",
",",
"10048",
")",
":",
"# 98, 10048 means address already bound",
"return",
"get_unused_port",
"(",
"None",
")",
"raise",
"e",
"s",
".",
"close",
"(",
")",
"return",
"port"
] | Checks if port is already in use. | [
"Checks",
"if",
"port",
"is",
"already",
"in",
"use",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L27-L41 |
1,930 | StorjOld/pyp2p | pyp2p/lib.py | get_lan_ip | def get_lan_ip(interface="default"):
if sys.version_info < (3, 0, 0):
if type(interface) == str:
interface = unicode(interface)
else:
if type(interface) == bytes:
interface = interface.decode("utf-8")
# Get ID of interface that handles WAN stuff.
default_gateway = get_default_gateway(interface)
gateways = netifaces.gateways()
wan_id = None
if netifaces.AF_INET in gateways:
gw_list = gateways[netifaces.AF_INET]
for gw_info in gw_list:
if gw_info[0] == default_gateway:
wan_id = gw_info[1]
break
# Find LAN IP of interface for WAN stuff.
interfaces = netifaces.interfaces()
if wan_id in interfaces:
families = netifaces.ifaddresses(wan_id)
if netifaces.AF_INET in families:
if_info_list = families[netifaces.AF_INET]
for if_info in if_info_list:
if "addr" in if_info:
return if_info["addr"]
"""
Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case
"""
if platform.system() == "Linux":
if ip is not None:
return ip.routes["8.8.8.8"]["prefsrc"]
return None | python | def get_lan_ip(interface="default"):
if sys.version_info < (3, 0, 0):
if type(interface) == str:
interface = unicode(interface)
else:
if type(interface) == bytes:
interface = interface.decode("utf-8")
# Get ID of interface that handles WAN stuff.
default_gateway = get_default_gateway(interface)
gateways = netifaces.gateways()
wan_id = None
if netifaces.AF_INET in gateways:
gw_list = gateways[netifaces.AF_INET]
for gw_info in gw_list:
if gw_info[0] == default_gateway:
wan_id = gw_info[1]
break
# Find LAN IP of interface for WAN stuff.
interfaces = netifaces.interfaces()
if wan_id in interfaces:
families = netifaces.ifaddresses(wan_id)
if netifaces.AF_INET in families:
if_info_list = families[netifaces.AF_INET]
for if_info in if_info_list:
if "addr" in if_info:
return if_info["addr"]
if platform.system() == "Linux":
if ip is not None:
return ip.routes["8.8.8.8"]["prefsrc"]
return None | [
"def",
"get_lan_ip",
"(",
"interface",
"=",
"\"default\"",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"if",
"type",
"(",
"interface",
")",
"==",
"str",
":",
"interface",
"=",
"unicode",
"(",
"interface",
")",
"else",
":",
"if",
"type",
"(",
"interface",
")",
"==",
"bytes",
":",
"interface",
"=",
"interface",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# Get ID of interface that handles WAN stuff.",
"default_gateway",
"=",
"get_default_gateway",
"(",
"interface",
")",
"gateways",
"=",
"netifaces",
".",
"gateways",
"(",
")",
"wan_id",
"=",
"None",
"if",
"netifaces",
".",
"AF_INET",
"in",
"gateways",
":",
"gw_list",
"=",
"gateways",
"[",
"netifaces",
".",
"AF_INET",
"]",
"for",
"gw_info",
"in",
"gw_list",
":",
"if",
"gw_info",
"[",
"0",
"]",
"==",
"default_gateway",
":",
"wan_id",
"=",
"gw_info",
"[",
"1",
"]",
"break",
"# Find LAN IP of interface for WAN stuff.",
"interfaces",
"=",
"netifaces",
".",
"interfaces",
"(",
")",
"if",
"wan_id",
"in",
"interfaces",
":",
"families",
"=",
"netifaces",
".",
"ifaddresses",
"(",
"wan_id",
")",
"if",
"netifaces",
".",
"AF_INET",
"in",
"families",
":",
"if_info_list",
"=",
"families",
"[",
"netifaces",
".",
"AF_INET",
"]",
"for",
"if_info",
"in",
"if_info_list",
":",
"if",
"\"addr\"",
"in",
"if_info",
":",
"return",
"if_info",
"[",
"\"addr\"",
"]",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Linux\"",
":",
"if",
"ip",
"is",
"not",
"None",
":",
"return",
"ip",
".",
"routes",
"[",
"\"8.8.8.8\"",
"]",
"[",
"\"prefsrc\"",
"]",
"return",
"None"
] | Execution may reach here if the host is using
virtual interfaces on Linux and there are no gateways
which suggests the host is a VPS or server. In this
case | [
"Execution",
"may",
"reach",
"here",
"if",
"the",
"host",
"is",
"using",
"virtual",
"interfaces",
"on",
"Linux",
"and",
"there",
"are",
"no",
"gateways",
"which",
"suggests",
"the",
"host",
"is",
"a",
"VPS",
"or",
"server",
".",
"In",
"this",
"case"
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L179-L218 |
1,931 | StorjOld/pyp2p | pyp2p/lib.py | get_wan_ip | def get_wan_ip(n=0):
"""
That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure.
"""
if n == 2:
try:
ip = myip()
ip = extract_ip(ip)
if is_ip_valid(ip):
return ip
except Exception as e:
print(str(e))
return None
# Fail-safe: use centralized server for IP lookup.
from pyp2p.net import forwarding_servers
for forwarding_server in forwarding_servers:
url = "http://" + forwarding_server["addr"] + ":"
url += str(forwarding_server["port"])
url += forwarding_server["url"]
url += "?action=get_wan_ip"
try:
r = urlopen(url, timeout=5)
response = r.read().decode("utf-8")
response = extract_ip(response)
if is_ip_valid(response):
return response
except Exception as e:
print(str(e))
continue
time.sleep(1)
return get_wan_ip(n + 1) | python | def get_wan_ip(n=0):
if n == 2:
try:
ip = myip()
ip = extract_ip(ip)
if is_ip_valid(ip):
return ip
except Exception as e:
print(str(e))
return None
# Fail-safe: use centralized server for IP lookup.
from pyp2p.net import forwarding_servers
for forwarding_server in forwarding_servers:
url = "http://" + forwarding_server["addr"] + ":"
url += str(forwarding_server["port"])
url += forwarding_server["url"]
url += "?action=get_wan_ip"
try:
r = urlopen(url, timeout=5)
response = r.read().decode("utf-8")
response = extract_ip(response)
if is_ip_valid(response):
return response
except Exception as e:
print(str(e))
continue
time.sleep(1)
return get_wan_ip(n + 1) | [
"def",
"get_wan_ip",
"(",
"n",
"=",
"0",
")",
":",
"if",
"n",
"==",
"2",
":",
"try",
":",
"ip",
"=",
"myip",
"(",
")",
"ip",
"=",
"extract_ip",
"(",
"ip",
")",
"if",
"is_ip_valid",
"(",
"ip",
")",
":",
"return",
"ip",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"return",
"None",
"# Fail-safe: use centralized server for IP lookup.",
"from",
"pyp2p",
".",
"net",
"import",
"forwarding_servers",
"for",
"forwarding_server",
"in",
"forwarding_servers",
":",
"url",
"=",
"\"http://\"",
"+",
"forwarding_server",
"[",
"\"addr\"",
"]",
"+",
"\":\"",
"url",
"+=",
"str",
"(",
"forwarding_server",
"[",
"\"port\"",
"]",
")",
"url",
"+=",
"forwarding_server",
"[",
"\"url\"",
"]",
"url",
"+=",
"\"?action=get_wan_ip\"",
"try",
":",
"r",
"=",
"urlopen",
"(",
"url",
",",
"timeout",
"=",
"5",
")",
"response",
"=",
"r",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"response",
"=",
"extract_ip",
"(",
"response",
")",
"if",
"is_ip_valid",
"(",
"response",
")",
":",
"return",
"response",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"continue",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"get_wan_ip",
"(",
"n",
"+",
"1",
")"
] | That IP module sucks. Occasionally it returns an IP address behind
cloudflare which probably happens when cloudflare tries to proxy your web
request because it thinks you're trying to DoS. It's better if we just run
our own infrastructure. | [
"That",
"IP",
"module",
"sucks",
".",
"Occasionally",
"it",
"returns",
"an",
"IP",
"address",
"behind",
"cloudflare",
"which",
"probably",
"happens",
"when",
"cloudflare",
"tries",
"to",
"proxy",
"your",
"web",
"request",
"because",
"it",
"thinks",
"you",
"re",
"trying",
"to",
"DoS",
".",
"It",
"s",
"better",
"if",
"we",
"just",
"run",
"our",
"own",
"infrastructure",
"."
] | 7024208c3af20511496a652ff212f54c420e0464 | https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/lib.py#L378-L414 |
1,932 | gawel/aiocron | aiocron/__init__.py | Cron.initialize | def initialize(self):
"""Initialize croniter and related times"""
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime) | python | def initialize(self):
if self.croniter is None:
self.time = time.time()
self.datetime = datetime.now(self.tz)
self.loop_time = self.loop.time()
self.croniter = croniter(self.spec, start_time=self.datetime) | [
"def",
"initialize",
"(",
"self",
")",
":",
"if",
"self",
".",
"croniter",
"is",
"None",
":",
"self",
".",
"time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"datetime",
"=",
"datetime",
".",
"now",
"(",
"self",
".",
"tz",
")",
"self",
".",
"loop_time",
"=",
"self",
".",
"loop",
".",
"time",
"(",
")",
"self",
".",
"croniter",
"=",
"croniter",
"(",
"self",
".",
"spec",
",",
"start_time",
"=",
"self",
".",
"datetime",
")"
] | Initialize croniter and related times | [
"Initialize",
"croniter",
"and",
"related",
"times"
] | 949870b2f7fe1e10e4220f3243c9d4237255d203 | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L63-L69 |
1,933 | gawel/aiocron | aiocron/__init__.py | Cron.get_next | def get_next(self):
"""Return next iteration time related to loop time"""
return self.loop_time + (self.croniter.get_next(float) - self.time) | python | def get_next(self):
return self.loop_time + (self.croniter.get_next(float) - self.time) | [
"def",
"get_next",
"(",
"self",
")",
":",
"return",
"self",
".",
"loop_time",
"+",
"(",
"self",
".",
"croniter",
".",
"get_next",
"(",
"float",
")",
"-",
"self",
".",
"time",
")"
] | Return next iteration time related to loop time | [
"Return",
"next",
"iteration",
"time",
"related",
"to",
"loop",
"time"
] | 949870b2f7fe1e10e4220f3243c9d4237255d203 | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L71-L73 |
1,934 | gawel/aiocron | aiocron/__init__.py | Cron.call_next | def call_next(self):
"""Set next hop in the loop. Call task"""
if self.handle is not None:
self.handle.cancel()
next_time = self.get_next()
self.handle = self.loop.call_at(next_time, self.call_next)
self.call_func() | python | def call_next(self):
if self.handle is not None:
self.handle.cancel()
next_time = self.get_next()
self.handle = self.loop.call_at(next_time, self.call_next)
self.call_func() | [
"def",
"call_next",
"(",
"self",
")",
":",
"if",
"self",
".",
"handle",
"is",
"not",
"None",
":",
"self",
".",
"handle",
".",
"cancel",
"(",
")",
"next_time",
"=",
"self",
".",
"get_next",
"(",
")",
"self",
".",
"handle",
"=",
"self",
".",
"loop",
".",
"call_at",
"(",
"next_time",
",",
"self",
".",
"call_next",
")",
"self",
".",
"call_func",
"(",
")"
] | Set next hop in the loop. Call task | [
"Set",
"next",
"hop",
"in",
"the",
"loop",
".",
"Call",
"task"
] | 949870b2f7fe1e10e4220f3243c9d4237255d203 | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L75-L81 |
1,935 | gawel/aiocron | aiocron/__init__.py | Cron.call_func | def call_func(self, *args, **kwargs):
"""Called. Take care of exceptions using gather"""
asyncio.gather(
self.cron(*args, **kwargs),
loop=self.loop, return_exceptions=True
).add_done_callback(self.set_result) | python | def call_func(self, *args, **kwargs):
asyncio.gather(
self.cron(*args, **kwargs),
loop=self.loop, return_exceptions=True
).add_done_callback(self.set_result) | [
"def",
"call_func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"asyncio",
".",
"gather",
"(",
"self",
".",
"cron",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"loop",
"=",
"self",
".",
"loop",
",",
"return_exceptions",
"=",
"True",
")",
".",
"add_done_callback",
"(",
"self",
".",
"set_result",
")"
] | Called. Take care of exceptions using gather | [
"Called",
".",
"Take",
"care",
"of",
"exceptions",
"using",
"gather"
] | 949870b2f7fe1e10e4220f3243c9d4237255d203 | https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L83-L88 |
1,936 | ppannuto/python-titlecase | titlecase/__init__.py | cmd | def cmd():
'''Handler for command line invocation'''
# Try to handle any reasonable thing thrown at this.
# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout
# and treat any subsequent arguments as a space separated string to
# be titlecased (so it still works if people forget quotes)
parser = argparse.ArgumentParser()
in_group = parser.add_mutually_exclusive_group()
in_group.add_argument('string', nargs='*', default=[],
help='String to titlecase')
in_group.add_argument('-f', '--input-file',
help='File to read from to titlecase')
parser.add_argument('-o', '--output-file',
help='File to write titlecased output to)')
args = parser.parse_args()
if args.input_file is not None:
if args.input_file == '-':
ifile = sys.stdin
else:
ifile = open(args.input_file)
else:
ifile = sys.stdin
if args.output_file is not None:
if args.output_file == '-':
ofile = sys.stdout
else:
ofile = open(args.output_file, 'w')
else:
ofile = sys.stdout
if len(args.string) > 0:
in_string = ' '.join(args.string)
else:
with ifile:
in_string = ifile.read()
with ofile:
ofile.write(titlecase(in_string)) | python | def cmd():
'''Handler for command line invocation'''
# Try to handle any reasonable thing thrown at this.
# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout
# and treat any subsequent arguments as a space separated string to
# be titlecased (so it still works if people forget quotes)
parser = argparse.ArgumentParser()
in_group = parser.add_mutually_exclusive_group()
in_group.add_argument('string', nargs='*', default=[],
help='String to titlecase')
in_group.add_argument('-f', '--input-file',
help='File to read from to titlecase')
parser.add_argument('-o', '--output-file',
help='File to write titlecased output to)')
args = parser.parse_args()
if args.input_file is not None:
if args.input_file == '-':
ifile = sys.stdin
else:
ifile = open(args.input_file)
else:
ifile = sys.stdin
if args.output_file is not None:
if args.output_file == '-':
ofile = sys.stdout
else:
ofile = open(args.output_file, 'w')
else:
ofile = sys.stdout
if len(args.string) > 0:
in_string = ' '.join(args.string)
else:
with ifile:
in_string = ifile.read()
with ofile:
ofile.write(titlecase(in_string)) | [
"def",
"cmd",
"(",
")",
":",
"# Try to handle any reasonable thing thrown at this.",
"# Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout",
"# and treat any subsequent arguments as a space separated string to",
"# be titlecased (so it still works if people forget quotes)",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"in_group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"in_group",
".",
"add_argument",
"(",
"'string'",
",",
"nargs",
"=",
"'*'",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"'String to titlecase'",
")",
"in_group",
".",
"add_argument",
"(",
"'-f'",
",",
"'--input-file'",
",",
"help",
"=",
"'File to read from to titlecase'",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output-file'",
",",
"help",
"=",
"'File to write titlecased output to)'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"input_file",
"is",
"not",
"None",
":",
"if",
"args",
".",
"input_file",
"==",
"'-'",
":",
"ifile",
"=",
"sys",
".",
"stdin",
"else",
":",
"ifile",
"=",
"open",
"(",
"args",
".",
"input_file",
")",
"else",
":",
"ifile",
"=",
"sys",
".",
"stdin",
"if",
"args",
".",
"output_file",
"is",
"not",
"None",
":",
"if",
"args",
".",
"output_file",
"==",
"'-'",
":",
"ofile",
"=",
"sys",
".",
"stdout",
"else",
":",
"ofile",
"=",
"open",
"(",
"args",
".",
"output_file",
",",
"'w'",
")",
"else",
":",
"ofile",
"=",
"sys",
".",
"stdout",
"if",
"len",
"(",
"args",
".",
"string",
")",
">",
"0",
":",
"in_string",
"=",
"' '",
".",
"join",
"(",
"args",
".",
"string",
")",
"else",
":",
"with",
"ifile",
":",
"in_string",
"=",
"ifile",
".",
"read",
"(",
")",
"with",
"ofile",
":",
"ofile",
".",
"write",
"(",
"titlecase",
"(",
"in_string",
")",
")"
] | Handler for command line invocation | [
"Handler",
"for",
"command",
"line",
"invocation"
] | 9000878d917f88030807b1bcdc04a0c37f7001ee | https://github.com/ppannuto/python-titlecase/blob/9000878d917f88030807b1bcdc04a0c37f7001ee/titlecase/__init__.py#L165-L206 |
1,937 | sobolevn/flake8-eradicate | flake8_eradicate.py | Checker.add_options | def add_options(cls, parser: OptionManager) -> None:
"""
``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance.
"""
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
type=None,
) | python | def add_options(cls, parser: OptionManager) -> None:
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
type=None,
) | [
"def",
"add_options",
"(",
"cls",
",",
"parser",
":",
"OptionManager",
")",
"->",
"None",
":",
"parser",
".",
"add_option",
"(",
"'--eradicate-aggressive'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"(",
"'Enables aggressive mode for eradicate; '",
"'this may result in false positives'",
")",
",",
"action",
"=",
"'store_true'",
",",
"type",
"=",
"None",
",",
")"
] | ``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance. | [
"flake8",
"api",
"method",
"to",
"register",
"new",
"plugin",
"options",
"."
] | 0d992fae5dd3bd9014d79291a4f08b6da17d3031 | https://github.com/sobolevn/flake8-eradicate/blob/0d992fae5dd3bd9014d79291a4f08b6da17d3031/flake8_eradicate.py#L52-L71 |
1,938 | sobolevn/flake8-eradicate | flake8_eradicate.py | Checker.run | def run(self) -> Generator[Tuple[int, int, str, type], None, None]:
"""
Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened.
"""
if self.filename != STDIN:
buffer = StringIO()
options = _Options(aggressive=self.options.eradicate_aggressive)
fix_file(self.filename, options, buffer)
traceback = buffer.getvalue()
if traceback:
yield 1, 0, self._error(traceback), type(self) | python | def run(self) -> Generator[Tuple[int, int, str, type], None, None]:
if self.filename != STDIN:
buffer = StringIO()
options = _Options(aggressive=self.options.eradicate_aggressive)
fix_file(self.filename, options, buffer)
traceback = buffer.getvalue()
if traceback:
yield 1, 0, self._error(traceback), type(self) | [
"def",
"run",
"(",
"self",
")",
"->",
"Generator",
"[",
"Tuple",
"[",
"int",
",",
"int",
",",
"str",
",",
"type",
"]",
",",
"None",
",",
"None",
"]",
":",
"if",
"self",
".",
"filename",
"!=",
"STDIN",
":",
"buffer",
"=",
"StringIO",
"(",
")",
"options",
"=",
"_Options",
"(",
"aggressive",
"=",
"self",
".",
"options",
".",
"eradicate_aggressive",
")",
"fix_file",
"(",
"self",
".",
"filename",
",",
"options",
",",
"buffer",
")",
"traceback",
"=",
"buffer",
".",
"getvalue",
"(",
")",
"if",
"traceback",
":",
"yield",
"1",
",",
"0",
",",
"self",
".",
"_error",
"(",
"traceback",
")",
",",
"type",
"(",
"self",
")"
] | Runs the checker.
``fix_file()`` only mutates the buffer object.
It is the only way to find out if some error happened. | [
"Runs",
"the",
"checker",
"."
] | 0d992fae5dd3bd9014d79291a4f08b6da17d3031 | https://github.com/sobolevn/flake8-eradicate/blob/0d992fae5dd3bd9014d79291a4f08b6da17d3031/flake8_eradicate.py#L78-L92 |
1,939 | ssanderson/interface | interface/utils.py | unique | def unique(g):
"""
Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2]
"""
yielded = set()
for value in g:
if value not in yielded:
yield value
yielded.add(value) | python | def unique(g):
yielded = set()
for value in g:
if value not in yielded:
yield value
yielded.add(value) | [
"def",
"unique",
"(",
"g",
")",
":",
"yielded",
"=",
"set",
"(",
")",
"for",
"value",
"in",
"g",
":",
"if",
"value",
"not",
"in",
"yielded",
":",
"yield",
"value",
"yielded",
".",
"add",
"(",
"value",
")"
] | Yield values yielded by ``g``, removing any duplicates.
Example
-------
>>> list(unique(iter([1, 3, 1, 2, 3])))
[1, 3, 2] | [
"Yield",
"values",
"yielded",
"by",
"g",
"removing",
"any",
"duplicates",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/utils.py#L6-L19 |
1,940 | ssanderson/interface | interface/interface.py | static_get_type_attr | def static_get_type_attr(t, name):
"""
Get a type attribute statically, circumventing the descriptor protocol.
"""
for type_ in t.mro():
try:
return vars(type_)[name]
except KeyError:
pass
raise AttributeError(name) | python | def static_get_type_attr(t, name):
for type_ in t.mro():
try:
return vars(type_)[name]
except KeyError:
pass
raise AttributeError(name) | [
"def",
"static_get_type_attr",
"(",
"t",
",",
"name",
")",
":",
"for",
"type_",
"in",
"t",
".",
"mro",
"(",
")",
":",
"try",
":",
"return",
"vars",
"(",
"type_",
")",
"[",
"name",
"]",
"except",
"KeyError",
":",
"pass",
"raise",
"AttributeError",
"(",
"name",
")"
] | Get a type attribute statically, circumventing the descriptor protocol. | [
"Get",
"a",
"type",
"attribute",
"statically",
"circumventing",
"the",
"descriptor",
"protocol",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L37-L46 |
1,941 | ssanderson/interface | interface/interface.py | _conflicting_defaults | def _conflicting_defaults(typename, conflicts):
"""Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------
message : str
User-facing error message.
"""
message = "\nclass {C} received conflicting default implementations:".format(
C=typename,
)
for attrname, interfaces in conflicts.items():
message += dedent(
"""
The following interfaces provided default implementations for {attr!r}:
{interfaces}"""
).format(
attr=attrname,
interfaces=bulleted_list(sorted(map(getname, interfaces))),
)
return InvalidImplementation(message) | python | def _conflicting_defaults(typename, conflicts):
message = "\nclass {C} received conflicting default implementations:".format(
C=typename,
)
for attrname, interfaces in conflicts.items():
message += dedent(
"""
The following interfaces provided default implementations for {attr!r}:
{interfaces}"""
).format(
attr=attrname,
interfaces=bulleted_list(sorted(map(getname, interfaces))),
)
return InvalidImplementation(message) | [
"def",
"_conflicting_defaults",
"(",
"typename",
",",
"conflicts",
")",
":",
"message",
"=",
"\"\\nclass {C} received conflicting default implementations:\"",
".",
"format",
"(",
"C",
"=",
"typename",
",",
")",
"for",
"attrname",
",",
"interfaces",
"in",
"conflicts",
".",
"items",
"(",
")",
":",
"message",
"+=",
"dedent",
"(",
"\"\"\"\n\n The following interfaces provided default implementations for {attr!r}:\n {interfaces}\"\"\"",
")",
".",
"format",
"(",
"attr",
"=",
"attrname",
",",
"interfaces",
"=",
"bulleted_list",
"(",
"sorted",
"(",
"map",
"(",
"getname",
",",
"interfaces",
")",
")",
")",
",",
")",
"return",
"InvalidImplementation",
"(",
"message",
")"
] | Format an error message for conflicting default implementations.
Parameters
----------
typename : str
Name of the type for which we're producing an error.
conflicts : dict[str -> list[Interface]]
Map from strings to interfaces providing a default with that name.
Returns
-------
message : str
User-facing error message. | [
"Format",
"an",
"error",
"message",
"for",
"conflicting",
"default",
"implementations",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L49-L77 |
1,942 | ssanderson/interface | interface/interface.py | InterfaceMeta._diff_signatures | def _diff_signatures(self, type_):
"""
Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a list of missing interface names.
``mistyped`` is a list mapping names to incorrect types.
``mismatched`` is a dict mapping names to incorrect signatures.
"""
missing = []
mistyped = {}
mismatched = {}
for name, iface_sig in self._signatures.items():
try:
# Don't invoke the descriptor protocol here so that we get
# staticmethod/classmethod/property objects instead of the
# functions they wrap.
f = static_get_type_attr(type_, name)
except AttributeError:
missing.append(name)
continue
impl_sig = TypedSignature(f)
if not issubclass(impl_sig.type, iface_sig.type):
mistyped[name] = impl_sig.type
if not compatible(impl_sig.signature, iface_sig.signature):
mismatched[name] = impl_sig
return missing, mistyped, mismatched | python | def _diff_signatures(self, type_):
missing = []
mistyped = {}
mismatched = {}
for name, iface_sig in self._signatures.items():
try:
# Don't invoke the descriptor protocol here so that we get
# staticmethod/classmethod/property objects instead of the
# functions they wrap.
f = static_get_type_attr(type_, name)
except AttributeError:
missing.append(name)
continue
impl_sig = TypedSignature(f)
if not issubclass(impl_sig.type, iface_sig.type):
mistyped[name] = impl_sig.type
if not compatible(impl_sig.signature, iface_sig.signature):
mismatched[name] = impl_sig
return missing, mistyped, mismatched | [
"def",
"_diff_signatures",
"(",
"self",
",",
"type_",
")",
":",
"missing",
"=",
"[",
"]",
"mistyped",
"=",
"{",
"}",
"mismatched",
"=",
"{",
"}",
"for",
"name",
",",
"iface_sig",
"in",
"self",
".",
"_signatures",
".",
"items",
"(",
")",
":",
"try",
":",
"# Don't invoke the descriptor protocol here so that we get",
"# staticmethod/classmethod/property objects instead of the",
"# functions they wrap.",
"f",
"=",
"static_get_type_attr",
"(",
"type_",
",",
"name",
")",
"except",
"AttributeError",
":",
"missing",
".",
"append",
"(",
"name",
")",
"continue",
"impl_sig",
"=",
"TypedSignature",
"(",
"f",
")",
"if",
"not",
"issubclass",
"(",
"impl_sig",
".",
"type",
",",
"iface_sig",
".",
"type",
")",
":",
"mistyped",
"[",
"name",
"]",
"=",
"impl_sig",
".",
"type",
"if",
"not",
"compatible",
"(",
"impl_sig",
".",
"signature",
",",
"iface_sig",
".",
"signature",
")",
":",
"mismatched",
"[",
"name",
"]",
"=",
"impl_sig",
"return",
"missing",
",",
"mistyped",
",",
"mismatched"
] | Diff our method signatures against the methods provided by type_.
Parameters
----------
type_ : type
The type to check.
Returns
-------
missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa
``missing`` is a list of missing interface names.
``mistyped`` is a list mapping names to incorrect types.
``mismatched`` is a dict mapping names to incorrect signatures. | [
"Diff",
"our",
"method",
"signatures",
"against",
"the",
"methods",
"provided",
"by",
"type_",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L116-L153 |
1,943 | ssanderson/interface | interface/interface.py | InterfaceMeta.verify | def verify(self, type_):
"""
Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None
"""
raw_missing, mistyped, mismatched = self._diff_signatures(type_)
# See if we have defaults for missing methods.
missing = []
defaults_to_use = {}
for name in raw_missing:
try:
defaults_to_use[name] = self._defaults[name].implementation
except KeyError:
missing.append(name)
if not any((missing, mistyped, mismatched)):
return defaults_to_use
raise self._invalid_implementation(type_, missing, mistyped, mismatched) | python | def verify(self, type_):
raw_missing, mistyped, mismatched = self._diff_signatures(type_)
# See if we have defaults for missing methods.
missing = []
defaults_to_use = {}
for name in raw_missing:
try:
defaults_to_use[name] = self._defaults[name].implementation
except KeyError:
missing.append(name)
if not any((missing, mistyped, mismatched)):
return defaults_to_use
raise self._invalid_implementation(type_, missing, mistyped, mismatched) | [
"def",
"verify",
"(",
"self",
",",
"type_",
")",
":",
"raw_missing",
",",
"mistyped",
",",
"mismatched",
"=",
"self",
".",
"_diff_signatures",
"(",
"type_",
")",
"# See if we have defaults for missing methods.",
"missing",
"=",
"[",
"]",
"defaults_to_use",
"=",
"{",
"}",
"for",
"name",
"in",
"raw_missing",
":",
"try",
":",
"defaults_to_use",
"[",
"name",
"]",
"=",
"self",
".",
"_defaults",
"[",
"name",
"]",
".",
"implementation",
"except",
"KeyError",
":",
"missing",
".",
"append",
"(",
"name",
")",
"if",
"not",
"any",
"(",
"(",
"missing",
",",
"mistyped",
",",
"mismatched",
")",
")",
":",
"return",
"defaults_to_use",
"raise",
"self",
".",
"_invalid_implementation",
"(",
"type_",
",",
"missing",
",",
"mistyped",
",",
"mismatched",
")"
] | Check whether a type implements ``self``.
Parameters
----------
type_ : type
The type to check.
Raises
------
TypeError
If ``type_`` doesn't conform to our interface.
Returns
-------
None | [
"Check",
"whether",
"a",
"type",
"implements",
"self",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L155-L187 |
1,944 | ssanderson/interface | interface/interface.py | InterfaceMeta._invalid_implementation | def _invalid_implementation(self, t, missing, mistyped, mismatched):
"""
Make a TypeError explaining why ``t`` doesn't implement our interface.
"""
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
"""
The following methods of {I} were not implemented:
{missing_methods}"""
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
"""
The following methods of {I} were implemented with incorrect types:
{mismatched_types}"""
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
"""
The following methods of {I} were implemented with invalid signatures:
{mismatched_methods}"""
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | python | def _invalid_implementation(self, t, missing, mistyped, mismatched):
assert missing or mistyped or mismatched, "Implementation wasn't invalid."
message = "\nclass {C} failed to implement interface {I}:".format(
C=getname(t),
I=getname(self),
)
if missing:
message += dedent(
"""
The following methods of {I} were not implemented:
{missing_methods}"""
).format(
I=getname(self),
missing_methods=self._format_missing_methods(missing)
)
if mistyped:
message += dedent(
"""
The following methods of {I} were implemented with incorrect types:
{mismatched_types}"""
).format(
I=getname(self),
mismatched_types=self._format_mismatched_types(mistyped),
)
if mismatched:
message += dedent(
"""
The following methods of {I} were implemented with invalid signatures:
{mismatched_methods}"""
).format(
I=getname(self),
mismatched_methods=self._format_mismatched_methods(mismatched),
)
return InvalidImplementation(message) | [
"def",
"_invalid_implementation",
"(",
"self",
",",
"t",
",",
"missing",
",",
"mistyped",
",",
"mismatched",
")",
":",
"assert",
"missing",
"or",
"mistyped",
"or",
"mismatched",
",",
"\"Implementation wasn't invalid.\"",
"message",
"=",
"\"\\nclass {C} failed to implement interface {I}:\"",
".",
"format",
"(",
"C",
"=",
"getname",
"(",
"t",
")",
",",
"I",
"=",
"getname",
"(",
"self",
")",
",",
")",
"if",
"missing",
":",
"message",
"+=",
"dedent",
"(",
"\"\"\"\n\n The following methods of {I} were not implemented:\n {missing_methods}\"\"\"",
")",
".",
"format",
"(",
"I",
"=",
"getname",
"(",
"self",
")",
",",
"missing_methods",
"=",
"self",
".",
"_format_missing_methods",
"(",
"missing",
")",
")",
"if",
"mistyped",
":",
"message",
"+=",
"dedent",
"(",
"\"\"\"\n\n The following methods of {I} were implemented with incorrect types:\n {mismatched_types}\"\"\"",
")",
".",
"format",
"(",
"I",
"=",
"getname",
"(",
"self",
")",
",",
"mismatched_types",
"=",
"self",
".",
"_format_mismatched_types",
"(",
"mistyped",
")",
",",
")",
"if",
"mismatched",
":",
"message",
"+=",
"dedent",
"(",
"\"\"\"\n\n The following methods of {I} were implemented with invalid signatures:\n {mismatched_methods}\"\"\"",
")",
".",
"format",
"(",
"I",
"=",
"getname",
"(",
"self",
")",
",",
"mismatched_methods",
"=",
"self",
".",
"_format_mismatched_methods",
"(",
"mismatched",
")",
",",
")",
"return",
"InvalidImplementation",
"(",
"message",
")"
] | Make a TypeError explaining why ``t`` doesn't implement our interface. | [
"Make",
"a",
"TypeError",
"explaining",
"why",
"t",
"doesn",
"t",
"implement",
"our",
"interface",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L189-L231 |
1,945 | ssanderson/interface | interface/interface.py | Interface.from_class | def from_class(cls, existing_class, subset=None, name=None):
"""Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attributes not defined in an empty class.
name : str, optional
Name of the generated interface.
Default is ``existing_class.__name__ + 'Interface'``.
Returns
-------
interface : type
A new interface class with stubs generated from ``existing_class``.
"""
if name is None:
name = existing_class.__name__ + 'Interface'
if subset is None:
subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES
return InterfaceMeta(
name,
(Interface,),
{name: static_get_type_attr(existing_class, name) for name in subset},
) | python | def from_class(cls, existing_class, subset=None, name=None):
if name is None:
name = existing_class.__name__ + 'Interface'
if subset is None:
subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES
return InterfaceMeta(
name,
(Interface,),
{name: static_get_type_attr(existing_class, name) for name in subset},
) | [
"def",
"from_class",
"(",
"cls",
",",
"existing_class",
",",
"subset",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"existing_class",
".",
"__name__",
"+",
"'Interface'",
"if",
"subset",
"is",
"None",
":",
"subset",
"=",
"set",
"(",
"dir",
"(",
"existing_class",
")",
")",
"-",
"TRIVIAL_CLASS_ATTRIBUTES",
"return",
"InterfaceMeta",
"(",
"name",
",",
"(",
"Interface",
",",
")",
",",
"{",
"name",
":",
"static_get_type_attr",
"(",
"existing_class",
",",
"name",
")",
"for",
"name",
"in",
"subset",
"}",
",",
")"
] | Create an interface from an existing class.
Parameters
----------
existing_class : type
The type from which to extract an interface.
subset : list[str], optional
List of methods that should be included in the interface.
Default is to use all attributes not defined in an empty class.
name : str, optional
Name of the generated interface.
Default is ``existing_class.__name__ + 'Interface'``.
Returns
-------
interface : type
A new interface class with stubs generated from ``existing_class``. | [
"Create",
"an",
"interface",
"from",
"an",
"existing",
"class",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/interface.py#L319-L348 |
1,946 | ssanderson/interface | interface/typecheck.py | compatible | def compatible(impl_sig, iface_sig):
"""
Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an interface if any valid
way of passing parameters to the interface method is also valid for the
implementation.
Consequently, the following differences are allowed between the signature
of an implementation method and the signature of its interface definition:
1. An implementation may add new arguments to an interface iff:
a. All new arguments have default values.
b. All new arguments accepted positionally (i.e. all non-keyword-only
arguments) occur after any arguments declared by the interface.
c. Keyword-only arguments may be reordered by the implementation.
2. For type-annotated interfaces, type annotations my differ as follows:
a. Arguments to implementations of an interface may be annotated with
a **superclass** of the type specified by the interface.
b. The return type of an implementation may be annotated with a
**subclass** of the type specified by the interface.
"""
return all([
positionals_compatible(
takewhile(is_positional, impl_sig.parameters.values()),
takewhile(is_positional, iface_sig.parameters.values()),
),
keywords_compatible(
valfilter(complement(is_positional), impl_sig.parameters),
valfilter(complement(is_positional), iface_sig.parameters),
),
]) | python | def compatible(impl_sig, iface_sig):
return all([
positionals_compatible(
takewhile(is_positional, impl_sig.parameters.values()),
takewhile(is_positional, iface_sig.parameters.values()),
),
keywords_compatible(
valfilter(complement(is_positional), impl_sig.parameters),
valfilter(complement(is_positional), iface_sig.parameters),
),
]) | [
"def",
"compatible",
"(",
"impl_sig",
",",
"iface_sig",
")",
":",
"return",
"all",
"(",
"[",
"positionals_compatible",
"(",
"takewhile",
"(",
"is_positional",
",",
"impl_sig",
".",
"parameters",
".",
"values",
"(",
")",
")",
",",
"takewhile",
"(",
"is_positional",
",",
"iface_sig",
".",
"parameters",
".",
"values",
"(",
")",
")",
",",
")",
",",
"keywords_compatible",
"(",
"valfilter",
"(",
"complement",
"(",
"is_positional",
")",
",",
"impl_sig",
".",
"parameters",
")",
",",
"valfilter",
"(",
"complement",
"(",
"is_positional",
")",
",",
"iface_sig",
".",
"parameters",
")",
",",
")",
",",
"]",
")"
] | Check whether ``impl_sig`` is compatible with ``iface_sig``.
Parameters
----------
impl_sig : inspect.Signature
The signature of the implementation function.
iface_sig : inspect.Signature
The signature of the interface function.
In general, an implementation is compatible with an interface if any valid
way of passing parameters to the interface method is also valid for the
implementation.
Consequently, the following differences are allowed between the signature
of an implementation method and the signature of its interface definition:
1. An implementation may add new arguments to an interface iff:
a. All new arguments have default values.
b. All new arguments accepted positionally (i.e. all non-keyword-only
arguments) occur after any arguments declared by the interface.
c. Keyword-only arguments may be reordered by the implementation.
2. For type-annotated interfaces, type annotations my differ as follows:
a. Arguments to implementations of an interface may be annotated with
a **superclass** of the type specified by the interface.
b. The return type of an implementation may be annotated with a
**subclass** of the type specified by the interface. | [
"Check",
"whether",
"impl_sig",
"is",
"compatible",
"with",
"iface_sig",
"."
] | b1dabab8556848fd473e388e28399886321b6127 | https://github.com/ssanderson/interface/blob/b1dabab8556848fd473e388e28399886321b6127/interface/typecheck.py#L10-L49 |
1,947 | ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_count | def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | python | def step_count(group_idx):
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | [
"def",
"step_count",
"(",
"group_idx",
")",
":",
"cmp_pos",
"=",
"0",
"steps",
"=",
"1",
"if",
"len",
"(",
"group_idx",
")",
"<",
"1",
":",
"return",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"group_idx",
")",
")",
":",
"if",
"group_idx",
"[",
"cmp_pos",
"]",
"!=",
"group_idx",
"[",
"i",
"]",
":",
"cmp_pos",
"=",
"i",
"steps",
"+=",
"1",
"return",
"steps"
] | Return the amount of index changes within group_idx. | [
"Return",
"the",
"amount",
"of",
"index",
"changes",
"within",
"group_idx",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L445-L455 |
1,948 | ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | step_indices | def step_indices(group_idx):
"""Return the edges of areas within group_idx, which are filled with the same value."""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices | python | def step_indices(group_idx):
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, np.int64)
indices[0] = 0
indices[-1] = group_idx.size
cmp_pos = 0
ri = 1
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
indices[ri] = i
ri += 1
return indices | [
"def",
"step_indices",
"(",
"group_idx",
")",
":",
"ilen",
"=",
"step_count",
"(",
"group_idx",
")",
"+",
"1",
"indices",
"=",
"np",
".",
"empty",
"(",
"ilen",
",",
"np",
".",
"int64",
")",
"indices",
"[",
"0",
"]",
"=",
"0",
"indices",
"[",
"-",
"1",
"]",
"=",
"group_idx",
".",
"size",
"cmp_pos",
"=",
"0",
"ri",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"group_idx",
")",
")",
":",
"if",
"group_idx",
"[",
"cmp_pos",
"]",
"!=",
"group_idx",
"[",
"i",
"]",
":",
"cmp_pos",
"=",
"i",
"indices",
"[",
"ri",
"]",
"=",
"i",
"ri",
"+=",
"1",
"return",
"indices"
] | Return the edges of areas within group_idx, which are filled with the same value. | [
"Return",
"the",
"edges",
"of",
"areas",
"within",
"group_idx",
"which",
"are",
"filled",
"with",
"the",
"same",
"value",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L459-L472 |
1,949 | ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateOp.callable | def callable(cls, nans=False, reverse=False, scalar=False):
""" Compile a jitted function doing the hard part of the job """
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True) | python | def callable(cls, nans=False, reverse=False, scalar=False):
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter
valgetter = nb.njit(_valgetter)
outersetter = nb.njit(cls._outersetter)
_cls_inner = nb.njit(cls._inner)
if nans:
def _inner(ri, val, ret, counter, mean):
if not np.isnan(val):
_cls_inner(ri, val, ret, counter, mean)
inner = nb.njit(_inner)
else:
inner = _cls_inner
def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof):
# fill_value and ddof need to be present for being exchangeable with loop_2pass
size = len(ret)
rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx))
for i in rng:
ri = group_idx[i]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
val = valgetter(a, i)
inner(ri, val, ret, counter, mean)
outersetter(outer, i, ret[ri])
return nb.njit(_loop, nogil=True) | [
"def",
"callable",
"(",
"cls",
",",
"nans",
"=",
"False",
",",
"reverse",
"=",
"False",
",",
"scalar",
"=",
"False",
")",
":",
"_valgetter",
"=",
"cls",
".",
"_valgetter_scalar",
"if",
"scalar",
"else",
"cls",
".",
"_valgetter",
"valgetter",
"=",
"nb",
".",
"njit",
"(",
"_valgetter",
")",
"outersetter",
"=",
"nb",
".",
"njit",
"(",
"cls",
".",
"_outersetter",
")",
"_cls_inner",
"=",
"nb",
".",
"njit",
"(",
"cls",
".",
"_inner",
")",
"if",
"nans",
":",
"def",
"_inner",
"(",
"ri",
",",
"val",
",",
"ret",
",",
"counter",
",",
"mean",
")",
":",
"if",
"not",
"np",
".",
"isnan",
"(",
"val",
")",
":",
"_cls_inner",
"(",
"ri",
",",
"val",
",",
"ret",
",",
"counter",
",",
"mean",
")",
"inner",
"=",
"nb",
".",
"njit",
"(",
"_inner",
")",
"else",
":",
"inner",
"=",
"_cls_inner",
"def",
"_loop",
"(",
"group_idx",
",",
"a",
",",
"ret",
",",
"counter",
",",
"mean",
",",
"outer",
",",
"fill_value",
",",
"ddof",
")",
":",
"# fill_value and ddof need to be present for being exchangeable with loop_2pass",
"size",
"=",
"len",
"(",
"ret",
")",
"rng",
"=",
"range",
"(",
"len",
"(",
"group_idx",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"if",
"reverse",
"else",
"range",
"(",
"len",
"(",
"group_idx",
")",
")",
"for",
"i",
"in",
"rng",
":",
"ri",
"=",
"group_idx",
"[",
"i",
"]",
"if",
"ri",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"negative indices not supported\"",
")",
"if",
"ri",
">=",
"size",
":",
"raise",
"ValueError",
"(",
"\"one or more indices in group_idx are too large\"",
")",
"val",
"=",
"valgetter",
"(",
"a",
",",
"i",
")",
"inner",
"(",
"ri",
",",
"val",
",",
"ret",
",",
"counter",
",",
"mean",
")",
"outersetter",
"(",
"outer",
",",
"i",
",",
"ret",
"[",
"ri",
"]",
")",
"return",
"nb",
".",
"njit",
"(",
"_loop",
",",
"nogil",
"=",
"True",
")"
] | Compile a jitted function doing the hard part of the job | [
"Compile",
"a",
"jitted",
"function",
"doing",
"the",
"hard",
"part",
"of",
"the",
"job"
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L91-L119 |
1,950 | ml31415/numpy-groupies | numpy_groupies/aggregate_numba.py | AggregateGeneric.callable | def callable(self, nans=False):
"""Compile a jitted function and loop it over the sorted data."""
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True) | python | def callable(self, nans=False):
jitfunc = nb.njit(self.func, nogil=True)
def _loop(sortidx, group_idx, a, ret):
size = len(ret)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
indices = step_indices(group_idx_srt)
for i in range(len(indices) - 1):
start_idx, stop_idx = indices[i], indices[i + 1]
ri = group_idx_srt[start_idx]
if ri < 0:
raise ValueError("negative indices not supported")
if ri >= size:
raise ValueError("one or more indices in group_idx are too large")
ret[ri] = jitfunc(a_srt[start_idx:stop_idx])
return nb.njit(_loop, nogil=True) | [
"def",
"callable",
"(",
"self",
",",
"nans",
"=",
"False",
")",
":",
"jitfunc",
"=",
"nb",
".",
"njit",
"(",
"self",
".",
"func",
",",
"nogil",
"=",
"True",
")",
"def",
"_loop",
"(",
"sortidx",
",",
"group_idx",
",",
"a",
",",
"ret",
")",
":",
"size",
"=",
"len",
"(",
"ret",
")",
"group_idx_srt",
"=",
"group_idx",
"[",
"sortidx",
"]",
"a_srt",
"=",
"a",
"[",
"sortidx",
"]",
"indices",
"=",
"step_indices",
"(",
"group_idx_srt",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"indices",
")",
"-",
"1",
")",
":",
"start_idx",
",",
"stop_idx",
"=",
"indices",
"[",
"i",
"]",
",",
"indices",
"[",
"i",
"+",
"1",
"]",
"ri",
"=",
"group_idx_srt",
"[",
"start_idx",
"]",
"if",
"ri",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"negative indices not supported\"",
")",
"if",
"ri",
">=",
"size",
":",
"raise",
"ValueError",
"(",
"\"one or more indices in group_idx are too large\"",
")",
"ret",
"[",
"ri",
"]",
"=",
"jitfunc",
"(",
"a_srt",
"[",
"start_idx",
":",
"stop_idx",
"]",
")",
"return",
"nb",
".",
"njit",
"(",
"_loop",
",",
"nogil",
"=",
"True",
")"
] | Compile a jitted function and loop it over the sorted data. | [
"Compile",
"a",
"jitted",
"function",
"and",
"loop",
"it",
"over",
"the",
"sorted",
"data",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numba.py#L208-L226 |
1,951 | ml31415/numpy-groupies | numpy_groupies/utils.py | get_func | def get_func(func, aliasing, implementations):
""" Return the key of a found implementation or the func itself """
try:
func_str = aliasing[func]
except KeyError:
if callable(func):
return func
else:
if func_str in implementations:
return func_str
if func_str.startswith('nan') and \
func_str[3:] in funcs_no_separate_nan:
raise ValueError("%s does not have a nan-version".format(func_str[3:]))
else:
raise NotImplementedError("No such function available")
raise ValueError("func %s is neither a valid function string nor a "
"callable object".format(func)) | python | def get_func(func, aliasing, implementations):
try:
func_str = aliasing[func]
except KeyError:
if callable(func):
return func
else:
if func_str in implementations:
return func_str
if func_str.startswith('nan') and \
func_str[3:] in funcs_no_separate_nan:
raise ValueError("%s does not have a nan-version".format(func_str[3:]))
else:
raise NotImplementedError("No such function available")
raise ValueError("func %s is neither a valid function string nor a "
"callable object".format(func)) | [
"def",
"get_func",
"(",
"func",
",",
"aliasing",
",",
"implementations",
")",
":",
"try",
":",
"func_str",
"=",
"aliasing",
"[",
"func",
"]",
"except",
"KeyError",
":",
"if",
"callable",
"(",
"func",
")",
":",
"return",
"func",
"else",
":",
"if",
"func_str",
"in",
"implementations",
":",
"return",
"func_str",
"if",
"func_str",
".",
"startswith",
"(",
"'nan'",
")",
"and",
"func_str",
"[",
"3",
":",
"]",
"in",
"funcs_no_separate_nan",
":",
"raise",
"ValueError",
"(",
"\"%s does not have a nan-version\"",
".",
"format",
"(",
"func_str",
"[",
"3",
":",
"]",
")",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"No such function available\"",
")",
"raise",
"ValueError",
"(",
"\"func %s is neither a valid function string nor a \"",
"\"callable object\"",
".",
"format",
"(",
"func",
")",
")"
] | Return the key of a found implementation or the func itself | [
"Return",
"the",
"key",
"of",
"a",
"found",
"implementation",
"or",
"the",
"func",
"itself"
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils.py#L118-L134 |
1,952 | ml31415/numpy-groupies | numpy_groupies/utils_numpy.py | minimum_dtype | def minimum_dtype(x, dtype=np.bool_):
"""returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype."""
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32) | python | def minimum_dtype(x, dtype=np.bool_):
def check_type(x, dtype):
try:
converted = dtype.type(x)
except (ValueError, OverflowError):
return False
# False if some overflow has happened
return converted == x or np.isnan(x)
def type_loop(x, dtype, dtype_dict, default=None):
while True:
try:
dtype = np.dtype(dtype_dict[dtype.name])
if check_type(x, dtype):
return np.dtype(dtype)
except KeyError:
if default is not None:
return np.dtype(default)
raise ValueError("Can not determine dtype of %r" % x)
dtype = np.dtype(dtype)
if check_type(x, dtype):
return dtype
if np.issubdtype(dtype, np.inexact):
return type_loop(x, dtype, _next_float_dtype)
else:
return type_loop(x, dtype, _next_int_dtype, default=np.float32) | [
"def",
"minimum_dtype",
"(",
"x",
",",
"dtype",
"=",
"np",
".",
"bool_",
")",
":",
"def",
"check_type",
"(",
"x",
",",
"dtype",
")",
":",
"try",
":",
"converted",
"=",
"dtype",
".",
"type",
"(",
"x",
")",
"except",
"(",
"ValueError",
",",
"OverflowError",
")",
":",
"return",
"False",
"# False if some overflow has happened",
"return",
"converted",
"==",
"x",
"or",
"np",
".",
"isnan",
"(",
"x",
")",
"def",
"type_loop",
"(",
"x",
",",
"dtype",
",",
"dtype_dict",
",",
"default",
"=",
"None",
")",
":",
"while",
"True",
":",
"try",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype_dict",
"[",
"dtype",
".",
"name",
"]",
")",
"if",
"check_type",
"(",
"x",
",",
"dtype",
")",
":",
"return",
"np",
".",
"dtype",
"(",
"dtype",
")",
"except",
"KeyError",
":",
"if",
"default",
"is",
"not",
"None",
":",
"return",
"np",
".",
"dtype",
"(",
"default",
")",
"raise",
"ValueError",
"(",
"\"Can not determine dtype of %r\"",
"%",
"x",
")",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"if",
"check_type",
"(",
"x",
",",
"dtype",
")",
":",
"return",
"dtype",
"if",
"np",
".",
"issubdtype",
"(",
"dtype",
",",
"np",
".",
"inexact",
")",
":",
"return",
"type_loop",
"(",
"x",
",",
"dtype",
",",
"_next_float_dtype",
")",
"else",
":",
"return",
"type_loop",
"(",
"x",
",",
"dtype",
",",
"_next_int_dtype",
",",
"default",
"=",
"np",
".",
"float32",
")"
] | returns the "most basic" dtype which represents `x` properly, which
provides at least the same value range as the specified dtype. | [
"returns",
"the",
"most",
"basic",
"dtype",
"which",
"represents",
"x",
"properly",
"which",
"provides",
"at",
"least",
"the",
"same",
"value",
"range",
"as",
"the",
"specified",
"dtype",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/utils_numpy.py#L60-L90 |
1,953 | ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _array | def _array(group_idx, a, size, fill_value, dtype=None):
"""groups a into separate arrays, keeping the order intact."""
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret | python | def _array(group_idx, a, size, fill_value, dtype=None):
if fill_value is not None and not (np.isscalar(fill_value) or
len(fill_value) == 0):
raise ValueError("fill_value must be None, a scalar or an empty "
"sequence")
order_group_idx = np.argsort(group_idx, kind='mergesort')
counts = np.bincount(group_idx, minlength=size)
ret = np.split(a[order_group_idx], np.cumsum(counts)[:-1])
ret = np.asanyarray(ret)
if fill_value is None or np.isscalar(fill_value):
_fill_untouched(group_idx, ret, fill_value)
return ret | [
"def",
"_array",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"fill_value",
"is",
"not",
"None",
"and",
"not",
"(",
"np",
".",
"isscalar",
"(",
"fill_value",
")",
"or",
"len",
"(",
"fill_value",
")",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"fill_value must be None, a scalar or an empty \"",
"\"sequence\"",
")",
"order_group_idx",
"=",
"np",
".",
"argsort",
"(",
"group_idx",
",",
"kind",
"=",
"'mergesort'",
")",
"counts",
"=",
"np",
".",
"bincount",
"(",
"group_idx",
",",
"minlength",
"=",
"size",
")",
"ret",
"=",
"np",
".",
"split",
"(",
"a",
"[",
"order_group_idx",
"]",
",",
"np",
".",
"cumsum",
"(",
"counts",
")",
"[",
":",
"-",
"1",
"]",
")",
"ret",
"=",
"np",
".",
"asanyarray",
"(",
"ret",
")",
"if",
"fill_value",
"is",
"None",
"or",
"np",
".",
"isscalar",
"(",
"fill_value",
")",
":",
"_fill_untouched",
"(",
"group_idx",
",",
"ret",
",",
"fill_value",
")",
"return",
"ret"
] | groups a into separate arrays, keeping the order intact. | [
"groups",
"a",
"into",
"separate",
"arrays",
"keeping",
"the",
"order",
"intact",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L188-L200 |
1,954 | ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _generic_callable | def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
"""groups a by inds, and then applies foo to each group in turn, placing
the results in an array."""
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret | python | def _generic_callable(group_idx, a, size, fill_value, dtype=None,
func=lambda g: g, **kwargs):
groups = _array(group_idx, a, size, ())
ret = np.full(size, fill_value, dtype=dtype or np.float64)
for i, grp in enumerate(groups):
if np.ndim(grp) == 1 and len(grp) > 0:
ret[i] = func(grp)
return ret | [
"def",
"_generic_callable",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"None",
",",
"func",
"=",
"lambda",
"g",
":",
"g",
",",
"*",
"*",
"kwargs",
")",
":",
"groups",
"=",
"_array",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"(",
")",
")",
"ret",
"=",
"np",
".",
"full",
"(",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"dtype",
"or",
"np",
".",
"float64",
")",
"for",
"i",
",",
"grp",
"in",
"enumerate",
"(",
"groups",
")",
":",
"if",
"np",
".",
"ndim",
"(",
"grp",
")",
"==",
"1",
"and",
"len",
"(",
"grp",
")",
">",
"0",
":",
"ret",
"[",
"i",
"]",
"=",
"func",
"(",
"grp",
")",
"return",
"ret"
] | groups a by inds, and then applies foo to each group in turn, placing
the results in an array. | [
"groups",
"a",
"by",
"inds",
"and",
"then",
"applies",
"foo",
"to",
"each",
"group",
"in",
"turn",
"placing",
"the",
"results",
"in",
"an",
"array",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L203-L213 |
1,955 | ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _cumsum | def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx] | python | def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx] | [
"def",
"_cumsum",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"fill_value",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"sortidx",
"=",
"np",
".",
"argsort",
"(",
"group_idx",
",",
"kind",
"=",
"'mergesort'",
")",
"invsortidx",
"=",
"np",
".",
"argsort",
"(",
"sortidx",
",",
"kind",
"=",
"'mergesort'",
")",
"group_idx_srt",
"=",
"group_idx",
"[",
"sortidx",
"]",
"a_srt",
"=",
"a",
"[",
"sortidx",
"]",
"a_srt_cumsum",
"=",
"np",
".",
"cumsum",
"(",
"a_srt",
",",
"dtype",
"=",
"dtype",
")",
"increasing",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"a",
")",
",",
"dtype",
"=",
"int",
")",
"group_starts",
"=",
"_min",
"(",
"group_idx_srt",
",",
"increasing",
",",
"size",
",",
"fill_value",
"=",
"0",
")",
"[",
"group_idx_srt",
"]",
"a_srt_cumsum",
"+=",
"-",
"a_srt_cumsum",
"[",
"group_starts",
"]",
"+",
"a_srt",
"[",
"group_starts",
"]",
"return",
"a_srt_cumsum",
"[",
"invsortidx",
"]"
] | N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) | [
"N",
"to",
"N",
"aggregate",
"operation",
"of",
"cumsum",
".",
"Perform",
"cumulative",
"sum",
"for",
"each",
"group",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L216-L235 |
1,956 | ml31415/numpy-groupies | numpy_groupies/aggregate_numpy.py | _fill_untouched | def _fill_untouched(idx, ret, fill_value):
"""any elements of ret not indexed by idx are set to fill_value."""
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value | python | def _fill_untouched(idx, ret, fill_value):
untouched = np.ones_like(ret, dtype=bool)
untouched[idx] = False
ret[untouched] = fill_value | [
"def",
"_fill_untouched",
"(",
"idx",
",",
"ret",
",",
"fill_value",
")",
":",
"untouched",
"=",
"np",
".",
"ones_like",
"(",
"ret",
",",
"dtype",
"=",
"bool",
")",
"untouched",
"[",
"idx",
"]",
"=",
"False",
"ret",
"[",
"untouched",
"]",
"=",
"fill_value"
] | any elements of ret not indexed by idx are set to fill_value. | [
"any",
"elements",
"of",
"ret",
"not",
"indexed",
"by",
"idx",
"are",
"set",
"to",
"fill_value",
"."
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy.py#L296-L300 |
1,957 | ml31415/numpy-groupies | numpy_groupies/aggregate_numpy_ufunc.py | _prod | def _prod(group_idx, a, size, fill_value, dtype=None):
"""Same as aggregate_numpy.py"""
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product should start from 1
np.multiply.at(ret, group_idx, a)
return ret | python | def _prod(group_idx, a, size, fill_value, dtype=None):
dtype = minimum_dtype_scalar(fill_value, dtype, a)
ret = np.full(size, fill_value, dtype=dtype)
if fill_value != 1:
ret[group_idx] = 1 # product should start from 1
np.multiply.at(ret, group_idx, a)
return ret | [
"def",
"_prod",
"(",
"group_idx",
",",
"a",
",",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"None",
")",
":",
"dtype",
"=",
"minimum_dtype_scalar",
"(",
"fill_value",
",",
"dtype",
",",
"a",
")",
"ret",
"=",
"np",
".",
"full",
"(",
"size",
",",
"fill_value",
",",
"dtype",
"=",
"dtype",
")",
"if",
"fill_value",
"!=",
"1",
":",
"ret",
"[",
"group_idx",
"]",
"=",
"1",
"# product should start from 1",
"np",
".",
"multiply",
".",
"at",
"(",
"ret",
",",
"group_idx",
",",
"a",
")",
"return",
"ret"
] | Same as aggregate_numpy.py | [
"Same",
"as",
"aggregate_numpy",
".",
"py"
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_numpy_ufunc.py#L50-L57 |
1,958 | ml31415/numpy-groupies | numpy_groupies/aggregate_weave.py | c_func | def c_func(funcname, reverse=False, nans=False, scalar=False):
""" Fill c_funcs with constructed code from the templates """
varnames = ['group_idx', 'a', 'ret', 'counter']
codebase = c_base_reverse if reverse else c_base
iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname]
if scalar:
varnames.remove('a')
return codebase % dict(init=c_init(varnames), iter=iteration,
finish=c_finish.get(funcname, ''),
ri_redir=(c_ri_redir if nans else c_ri)) | python | def c_func(funcname, reverse=False, nans=False, scalar=False):
varnames = ['group_idx', 'a', 'ret', 'counter']
codebase = c_base_reverse if reverse else c_base
iteration = c_iter_scalar[funcname] if scalar else c_iter[funcname]
if scalar:
varnames.remove('a')
return codebase % dict(init=c_init(varnames), iter=iteration,
finish=c_finish.get(funcname, ''),
ri_redir=(c_ri_redir if nans else c_ri)) | [
"def",
"c_func",
"(",
"funcname",
",",
"reverse",
"=",
"False",
",",
"nans",
"=",
"False",
",",
"scalar",
"=",
"False",
")",
":",
"varnames",
"=",
"[",
"'group_idx'",
",",
"'a'",
",",
"'ret'",
",",
"'counter'",
"]",
"codebase",
"=",
"c_base_reverse",
"if",
"reverse",
"else",
"c_base",
"iteration",
"=",
"c_iter_scalar",
"[",
"funcname",
"]",
"if",
"scalar",
"else",
"c_iter",
"[",
"funcname",
"]",
"if",
"scalar",
":",
"varnames",
".",
"remove",
"(",
"'a'",
")",
"return",
"codebase",
"%",
"dict",
"(",
"init",
"=",
"c_init",
"(",
"varnames",
")",
",",
"iter",
"=",
"iteration",
",",
"finish",
"=",
"c_finish",
".",
"get",
"(",
"funcname",
",",
"''",
")",
",",
"ri_redir",
"=",
"(",
"c_ri_redir",
"if",
"nans",
"else",
"c_ri",
")",
")"
] | Fill c_funcs with constructed code from the templates | [
"Fill",
"c_funcs",
"with",
"constructed",
"code",
"from",
"the",
"templates"
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L154-L163 |
1,959 | ml31415/numpy-groupies | numpy_groupies/aggregate_weave.py | step_indices | def step_indices(group_idx):
""" Get the edges of areas within group_idx, which are filled
with the same value
"""
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices | python | def step_indices(group_idx):
ilen = step_count(group_idx) + 1
indices = np.empty(ilen, int)
indices[0] = 0
indices[-1] = group_idx.size
inline(c_step_indices, ['group_idx', 'indices'], define_macros=c_macros, extra_compile_args=c_args)
return indices | [
"def",
"step_indices",
"(",
"group_idx",
")",
":",
"ilen",
"=",
"step_count",
"(",
"group_idx",
")",
"+",
"1",
"indices",
"=",
"np",
".",
"empty",
"(",
"ilen",
",",
"int",
")",
"indices",
"[",
"0",
"]",
"=",
"0",
"indices",
"[",
"-",
"1",
"]",
"=",
"group_idx",
".",
"size",
"inline",
"(",
"c_step_indices",
",",
"[",
"'group_idx'",
",",
"'indices'",
"]",
",",
"define_macros",
"=",
"c_macros",
",",
"extra_compile_args",
"=",
"c_args",
")",
"return",
"indices"
] | Get the edges of areas within group_idx, which are filled
with the same value | [
"Get",
"the",
"edges",
"of",
"areas",
"within",
"group_idx",
"which",
"are",
"filled",
"with",
"the",
"same",
"value"
] | 0911e9c59b14e11319e82d0876056ad2a17e6568 | https://github.com/ml31415/numpy-groupies/blob/0911e9c59b14e11319e82d0876056ad2a17e6568/numpy_groupies/aggregate_weave.py#L212-L221 |
1,960 | takuti/flurs | flurs/utils/projection.py | RandomProjection.__create_proj_mat | def __create_proj_mat(self, size):
"""Create a random projection matrix
[1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins.
[2] P. Li, et al. Very sparse random projections.
http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
"""
# [1]
# return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6])
# [2]
s = 1 / self.density
return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)],
size=size,
p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)]) | python | def __create_proj_mat(self, size):
# [1]
# return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6])
# [2]
s = 1 / self.density
return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)],
size=size,
p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)]) | [
"def",
"__create_proj_mat",
"(",
"self",
",",
"size",
")",
":",
"# [1]",
"# return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6])",
"# [2]",
"s",
"=",
"1",
"/",
"self",
".",
"density",
"return",
"np",
".",
"random",
".",
"choice",
"(",
"[",
"-",
"np",
".",
"sqrt",
"(",
"s",
"/",
"self",
".",
"k",
")",
",",
"0",
",",
"np",
".",
"sqrt",
"(",
"s",
"/",
"self",
".",
"k",
")",
"]",
",",
"size",
"=",
"size",
",",
"p",
"=",
"[",
"1",
"/",
"(",
"2",
"*",
"s",
")",
",",
"1",
"-",
"1",
"/",
"s",
",",
"1",
"/",
"(",
"2",
"*",
"s",
")",
"]",
")"
] | Create a random projection matrix
[1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins.
[2] P. Li, et al. Very sparse random projections.
http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection | [
"Create",
"a",
"random",
"projection",
"matrix"
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/projection.py#L72-L88 |
1,961 | takuti/flurs | flurs/datasets/movielens.py | load_ratings | def load_ratings(data_home, size):
"""Load all samples in the dataset.
"""
if size == '100k':
with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f:
lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines()))
elif size == '1m':
with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f:
lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines()))
ratings = []
for l in lines:
# Since we consider positive-only feedback setting, ratings < 5 will be excluded.
if l[2] == 5:
ratings.append(l)
ratings = np.asarray(ratings)
# sorted by timestamp
return ratings[np.argsort(ratings[:, 3])] | python | def load_ratings(data_home, size):
if size == '100k':
with open(os.path.join(data_home, 'u.data'), encoding='ISO-8859-1') as f:
lines = list(map(lambda l: list(map(int, l.rstrip().split('\t'))), f.readlines()))
elif size == '1m':
with open(os.path.join(data_home, 'ratings.dat'), encoding='ISO-8859-1') as f:
lines = list(map(lambda l: list(map(int, l.rstrip().split('::'))), f.readlines()))
ratings = []
for l in lines:
# Since we consider positive-only feedback setting, ratings < 5 will be excluded.
if l[2] == 5:
ratings.append(l)
ratings = np.asarray(ratings)
# sorted by timestamp
return ratings[np.argsort(ratings[:, 3])] | [
"def",
"load_ratings",
"(",
"data_home",
",",
"size",
")",
":",
"if",
"size",
"==",
"'100k'",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_home",
",",
"'u.data'",
")",
",",
"encoding",
"=",
"'ISO-8859-1'",
")",
"as",
"f",
":",
"lines",
"=",
"list",
"(",
"map",
"(",
"lambda",
"l",
":",
"list",
"(",
"map",
"(",
"int",
",",
"l",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
")",
")",
",",
"f",
".",
"readlines",
"(",
")",
")",
")",
"elif",
"size",
"==",
"'1m'",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"data_home",
",",
"'ratings.dat'",
")",
",",
"encoding",
"=",
"'ISO-8859-1'",
")",
"as",
"f",
":",
"lines",
"=",
"list",
"(",
"map",
"(",
"lambda",
"l",
":",
"list",
"(",
"map",
"(",
"int",
",",
"l",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"'::'",
")",
")",
")",
",",
"f",
".",
"readlines",
"(",
")",
")",
")",
"ratings",
"=",
"[",
"]",
"for",
"l",
"in",
"lines",
":",
"# Since we consider positive-only feedback setting, ratings < 5 will be excluded.",
"if",
"l",
"[",
"2",
"]",
"==",
"5",
":",
"ratings",
".",
"append",
"(",
"l",
")",
"ratings",
"=",
"np",
".",
"asarray",
"(",
"ratings",
")",
"# sorted by timestamp",
"return",
"ratings",
"[",
"np",
".",
"argsort",
"(",
"ratings",
"[",
":",
",",
"3",
"]",
")",
"]"
] | Load all samples in the dataset. | [
"Load",
"all",
"samples",
"in",
"the",
"dataset",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/datasets/movielens.py#L127-L148 |
1,962 | takuti/flurs | flurs/utils/feature_hash.py | n_feature_hash | def n_feature_hash(feature, dims, seeds):
"""N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
"""
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec | python | def n_feature_hash(feature, dims, seeds):
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec | [
"def",
"n_feature_hash",
"(",
"feature",
",",
"dims",
",",
"seeds",
")",
":",
"vec",
"=",
"np",
".",
"zeros",
"(",
"sum",
"(",
"dims",
")",
")",
"offset",
"=",
"0",
"for",
"seed",
",",
"dim",
"in",
"zip",
"(",
"seeds",
",",
"dims",
")",
":",
"vec",
"[",
"offset",
":",
"(",
"offset",
"+",
"dim",
")",
"]",
"=",
"feature_hash",
"(",
"feature",
",",
"dim",
",",
"seed",
")",
"offset",
"+=",
"dim",
"return",
"vec"
] | N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`. | [
"N",
"-",
"hot",
"-",
"encoded",
"feature",
"hashing",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/feature_hash.py#L5-L24 |
1,963 | takuti/flurs | flurs/utils/feature_hash.py | feature_hash | def feature_hash(feature, dim, seed=123):
"""Feature hashing.
Args:
feature (str): Target feature represented as string.
dim (int): Number of dimensions for a hash value.
seed (float): Seed of a MurmurHash3 hash function.
Returns:
numpy 1d array: one-hot-encoded feature vector for `s`.
"""
vec = np.zeros(dim)
i = mmh3.hash(feature, seed) % dim
vec[i] = 1
return vec | python | def feature_hash(feature, dim, seed=123):
vec = np.zeros(dim)
i = mmh3.hash(feature, seed) % dim
vec[i] = 1
return vec | [
"def",
"feature_hash",
"(",
"feature",
",",
"dim",
",",
"seed",
"=",
"123",
")",
":",
"vec",
"=",
"np",
".",
"zeros",
"(",
"dim",
")",
"i",
"=",
"mmh3",
".",
"hash",
"(",
"feature",
",",
"seed",
")",
"%",
"dim",
"vec",
"[",
"i",
"]",
"=",
"1",
"return",
"vec"
] | Feature hashing.
Args:
feature (str): Target feature represented as string.
dim (int): Number of dimensions for a hash value.
seed (float): Seed of a MurmurHash3 hash function.
Returns:
numpy 1d array: one-hot-encoded feature vector for `s`. | [
"Feature",
"hashing",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/feature_hash.py#L27-L42 |
1,964 | takuti/flurs | flurs/utils/metric.py | count_true_positive | def count_true_positive(truth, recommend):
"""Count number of true positives from given sets of samples.
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
int: Number of true positives.
"""
tp = 0
for r in recommend:
if r in truth:
tp += 1
return tp | python | def count_true_positive(truth, recommend):
tp = 0
for r in recommend:
if r in truth:
tp += 1
return tp | [
"def",
"count_true_positive",
"(",
"truth",
",",
"recommend",
")",
":",
"tp",
"=",
"0",
"for",
"r",
"in",
"recommend",
":",
"if",
"r",
"in",
"truth",
":",
"tp",
"+=",
"1",
"return",
"tp"
] | Count number of true positives from given sets of samples.
Args:
truth (numpy 1d array): Set of truth samples.
recommend (numpy 1d array): Ordered set of recommended samples.
Returns:
int: Number of true positives. | [
"Count",
"number",
"of",
"true",
"positives",
"from",
"given",
"sets",
"of",
"samples",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/utils/metric.py#L4-L19 |
1,965 | takuti/flurs | flurs/base.py | RecommenderMixin.initialize | def initialize(self, *args):
"""Initialize a recommender by resetting stored users and items.
"""
# number of observed users
self.n_user = 0
# store user data
self.users = {}
# number of observed items
self.n_item = 0
# store item data
self.items = {} | python | def initialize(self, *args):
# number of observed users
self.n_user = 0
# store user data
self.users = {}
# number of observed items
self.n_item = 0
# store item data
self.items = {} | [
"def",
"initialize",
"(",
"self",
",",
"*",
"args",
")",
":",
"# number of observed users",
"self",
".",
"n_user",
"=",
"0",
"# store user data",
"self",
".",
"users",
"=",
"{",
"}",
"# number of observed items",
"self",
".",
"n_item",
"=",
"0",
"# store item data",
"self",
".",
"items",
"=",
"{",
"}"
] | Initialize a recommender by resetting stored users and items. | [
"Initialize",
"a",
"recommender",
"by",
"resetting",
"stored",
"users",
"and",
"items",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L11-L24 |
1,966 | takuti/flurs | flurs/base.py | RecommenderMixin.register_user | def register_user(self, user):
"""For new users, append their information into the dictionaries.
Args:
user (User): User.
"""
self.users[user.index] = {'known_items': set()}
self.n_user += 1 | python | def register_user(self, user):
self.users[user.index] = {'known_items': set()}
self.n_user += 1 | [
"def",
"register_user",
"(",
"self",
",",
"user",
")",
":",
"self",
".",
"users",
"[",
"user",
".",
"index",
"]",
"=",
"{",
"'known_items'",
":",
"set",
"(",
")",
"}",
"self",
".",
"n_user",
"+=",
"1"
] | For new users, append their information into the dictionaries.
Args:
user (User): User. | [
"For",
"new",
"users",
"append",
"their",
"information",
"into",
"the",
"dictionaries",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L45-L53 |
1,967 | takuti/flurs | flurs/base.py | RecommenderMixin.scores2recos | def scores2recos(self, scores, candidates, rev=False):
"""Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores).
"""
sorted_indices = np.argsort(scores)
if rev:
sorted_indices = sorted_indices[::-1]
return candidates[sorted_indices], scores[sorted_indices] | python | def scores2recos(self, scores, candidates, rev=False):
sorted_indices = np.argsort(scores)
if rev:
sorted_indices = sorted_indices[::-1]
return candidates[sorted_indices], scores[sorted_indices] | [
"def",
"scores2recos",
"(",
"self",
",",
"scores",
",",
"candidates",
",",
"rev",
"=",
"False",
")",
":",
"sorted_indices",
"=",
"np",
".",
"argsort",
"(",
"scores",
")",
"if",
"rev",
":",
"sorted_indices",
"=",
"sorted_indices",
"[",
":",
":",
"-",
"1",
"]",
"return",
"candidates",
"[",
"sorted_indices",
"]",
",",
"scores",
"[",
"sorted_indices",
"]"
] | Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores). | [
"Get",
"recommendation",
"list",
"for",
"a",
"user",
"u_index",
"based",
"on",
"scores",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L115-L133 |
1,968 | takuti/flurs | flurs/evaluator.py | Evaluator.fit | def fit(self, train_events, test_events, n_epoch=1):
"""Train a model using the first 30% positive events to avoid cold-start.
Evaluation of this batch training is done by using the next 20% positive events.
After the batch SGD training, the models are incrementally updated by using the 20% test events.
Args:
train_events (list of Event): Positive training events (0-30%).
test_events (list of Event): Test events (30-50%).
n_epoch (int): Number of epochs for the batch training.
"""
# make initial status for batch training
for e in train_events:
self.__validate(e)
self.rec.users[e.user.index]['known_items'].add(e.item.index)
self.item_buffer.append(e.item.index)
# for batch evaluation, temporarily save new users info
for e in test_events:
self.__validate(e)
self.item_buffer.append(e.item.index)
self.__batch_update(train_events, test_events, n_epoch)
# batch test events are considered as a new observations;
# the model is incrementally updated based on them before the incremental evaluation step
for e in test_events:
self.rec.users[e.user.index]['known_items'].add(e.item.index)
self.rec.update(e) | python | def fit(self, train_events, test_events, n_epoch=1):
# make initial status for batch training
for e in train_events:
self.__validate(e)
self.rec.users[e.user.index]['known_items'].add(e.item.index)
self.item_buffer.append(e.item.index)
# for batch evaluation, temporarily save new users info
for e in test_events:
self.__validate(e)
self.item_buffer.append(e.item.index)
self.__batch_update(train_events, test_events, n_epoch)
# batch test events are considered as a new observations;
# the model is incrementally updated based on them before the incremental evaluation step
for e in test_events:
self.rec.users[e.user.index]['known_items'].add(e.item.index)
self.rec.update(e) | [
"def",
"fit",
"(",
"self",
",",
"train_events",
",",
"test_events",
",",
"n_epoch",
"=",
"1",
")",
":",
"# make initial status for batch training",
"for",
"e",
"in",
"train_events",
":",
"self",
".",
"__validate",
"(",
"e",
")",
"self",
".",
"rec",
".",
"users",
"[",
"e",
".",
"user",
".",
"index",
"]",
"[",
"'known_items'",
"]",
".",
"add",
"(",
"e",
".",
"item",
".",
"index",
")",
"self",
".",
"item_buffer",
".",
"append",
"(",
"e",
".",
"item",
".",
"index",
")",
"# for batch evaluation, temporarily save new users info",
"for",
"e",
"in",
"test_events",
":",
"self",
".",
"__validate",
"(",
"e",
")",
"self",
".",
"item_buffer",
".",
"append",
"(",
"e",
".",
"item",
".",
"index",
")",
"self",
".",
"__batch_update",
"(",
"train_events",
",",
"test_events",
",",
"n_epoch",
")",
"# batch test events are considered as a new observations;",
"# the model is incrementally updated based on them before the incremental evaluation step",
"for",
"e",
"in",
"test_events",
":",
"self",
".",
"rec",
".",
"users",
"[",
"e",
".",
"user",
".",
"index",
"]",
"[",
"'known_items'",
"]",
".",
"add",
"(",
"e",
".",
"item",
".",
"index",
")",
"self",
".",
"rec",
".",
"update",
"(",
"e",
")"
] | Train a model using the first 30% positive events to avoid cold-start.
Evaluation of this batch training is done by using the next 20% positive events.
After the batch SGD training, the models are incrementally updated by using the 20% test events.
Args:
train_events (list of Event): Positive training events (0-30%).
test_events (list of Event): Test events (30-50%).
n_epoch (int): Number of epochs for the batch training. | [
"Train",
"a",
"model",
"using",
"the",
"first",
"30%",
"positive",
"events",
"to",
"avoid",
"cold",
"-",
"start",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L35-L64 |
1,969 | takuti/flurs | flurs/evaluator.py | Evaluator.__batch_update | def __batch_update(self, train_events, test_events, n_epoch):
"""Batch update called by the fitting method.
Args:
train_events (list of Event): Positive training events.
test_events (list of Event): Test events.
n_epoch (int): Number of epochs for the batch training.
"""
for epoch in range(n_epoch):
# SGD requires us to shuffle events in each iteration
# * if n_epoch == 1
# => shuffle is not required because it is a deterministic training (i.e. matrix sketching)
if n_epoch != 1:
np.random.shuffle(train_events)
# train
for e in train_events:
self.rec.update(e, batch_train=True)
# test
MPR = self.__batch_evaluate(test_events)
if self.debug:
logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR)) | python | def __batch_update(self, train_events, test_events, n_epoch):
for epoch in range(n_epoch):
# SGD requires us to shuffle events in each iteration
# * if n_epoch == 1
# => shuffle is not required because it is a deterministic training (i.e. matrix sketching)
if n_epoch != 1:
np.random.shuffle(train_events)
# train
for e in train_events:
self.rec.update(e, batch_train=True)
# test
MPR = self.__batch_evaluate(test_events)
if self.debug:
logger.debug('epoch %2d: MPR = %f' % (epoch + 1, MPR)) | [
"def",
"__batch_update",
"(",
"self",
",",
"train_events",
",",
"test_events",
",",
"n_epoch",
")",
":",
"for",
"epoch",
"in",
"range",
"(",
"n_epoch",
")",
":",
"# SGD requires us to shuffle events in each iteration",
"# * if n_epoch == 1",
"# => shuffle is not required because it is a deterministic training (i.e. matrix sketching)",
"if",
"n_epoch",
"!=",
"1",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"train_events",
")",
"# train",
"for",
"e",
"in",
"train_events",
":",
"self",
".",
"rec",
".",
"update",
"(",
"e",
",",
"batch_train",
"=",
"True",
")",
"# test",
"MPR",
"=",
"self",
".",
"__batch_evaluate",
"(",
"test_events",
")",
"if",
"self",
".",
"debug",
":",
"logger",
".",
"debug",
"(",
"'epoch %2d: MPR = %f'",
"%",
"(",
"epoch",
"+",
"1",
",",
"MPR",
")",
")"
] | Batch update called by the fitting method.
Args:
train_events (list of Event): Positive training events.
test_events (list of Event): Test events.
n_epoch (int): Number of epochs for the batch training. | [
"Batch",
"update",
"called",
"by",
"the",
"fitting",
"method",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L126-L149 |
1,970 | takuti/flurs | flurs/evaluator.py | Evaluator.__batch_evaluate | def __batch_evaluate(self, test_events):
"""Evaluate the current model by using the given test events.
Args:
test_events (list of Event): Current model is evaluated by these events.
Returns:
float: Mean Percentile Rank for the test set.
"""
percentiles = np.zeros(len(test_events))
all_items = set(self.item_buffer)
for i, e in enumerate(test_events):
# check if the data allows users to interact the same items repeatedly
unobserved = all_items
if not self.repeat:
# make recommendation for all unobserved items
unobserved -= self.rec.users[e.user.index]['known_items']
# true item itself must be in the recommendation candidates
unobserved.add(e.item.index)
candidates = np.asarray(list(unobserved))
recos, scores = self.__recommend(e, candidates)
pos = np.where(recos == e.item.index)[0][0]
percentiles[i] = pos / (len(recos) - 1) * 100
return np.mean(percentiles) | python | def __batch_evaluate(self, test_events):
percentiles = np.zeros(len(test_events))
all_items = set(self.item_buffer)
for i, e in enumerate(test_events):
# check if the data allows users to interact the same items repeatedly
unobserved = all_items
if not self.repeat:
# make recommendation for all unobserved items
unobserved -= self.rec.users[e.user.index]['known_items']
# true item itself must be in the recommendation candidates
unobserved.add(e.item.index)
candidates = np.asarray(list(unobserved))
recos, scores = self.__recommend(e, candidates)
pos = np.where(recos == e.item.index)[0][0]
percentiles[i] = pos / (len(recos) - 1) * 100
return np.mean(percentiles) | [
"def",
"__batch_evaluate",
"(",
"self",
",",
"test_events",
")",
":",
"percentiles",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"test_events",
")",
")",
"all_items",
"=",
"set",
"(",
"self",
".",
"item_buffer",
")",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"test_events",
")",
":",
"# check if the data allows users to interact the same items repeatedly",
"unobserved",
"=",
"all_items",
"if",
"not",
"self",
".",
"repeat",
":",
"# make recommendation for all unobserved items",
"unobserved",
"-=",
"self",
".",
"rec",
".",
"users",
"[",
"e",
".",
"user",
".",
"index",
"]",
"[",
"'known_items'",
"]",
"# true item itself must be in the recommendation candidates",
"unobserved",
".",
"add",
"(",
"e",
".",
"item",
".",
"index",
")",
"candidates",
"=",
"np",
".",
"asarray",
"(",
"list",
"(",
"unobserved",
")",
")",
"recos",
",",
"scores",
"=",
"self",
".",
"__recommend",
"(",
"e",
",",
"candidates",
")",
"pos",
"=",
"np",
".",
"where",
"(",
"recos",
"==",
"e",
".",
"item",
".",
"index",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"percentiles",
"[",
"i",
"]",
"=",
"pos",
"/",
"(",
"len",
"(",
"recos",
")",
"-",
"1",
")",
"*",
"100",
"return",
"np",
".",
"mean",
"(",
"percentiles",
")"
] | Evaluate the current model by using the given test events.
Args:
test_events (list of Event): Current model is evaluated by these events.
Returns:
float: Mean Percentile Rank for the test set. | [
"Evaluate",
"the",
"current",
"model",
"by",
"using",
"the",
"given",
"test",
"events",
"."
] | a998fc180b45db7eaf38dbbbf8125a93100b8a8c | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/evaluator.py#L151-L180 |
1,971 | linkedin/asciietch | asciietch/graph.py | Grapher._scale_x_values | def _scale_x_values(self, values, max_width):
'''Scale X values to new width'''
if type(values) == dict:
values = self._scale_x_values_timestamps(values=values, max_width=max_width)
adjusted_values = list(values)
if len(adjusted_values) > max_width:
def get_position(current_pos):
return len(adjusted_values) * current_pos // max_width
adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)]
return adjusted_values | python | def _scale_x_values(self, values, max_width):
'''Scale X values to new width'''
if type(values) == dict:
values = self._scale_x_values_timestamps(values=values, max_width=max_width)
adjusted_values = list(values)
if len(adjusted_values) > max_width:
def get_position(current_pos):
return len(adjusted_values) * current_pos // max_width
adjusted_values = [statistics.mean(adjusted_values[get_position(i):get_position(i + 1)]) for i in range(max_width)]
return adjusted_values | [
"def",
"_scale_x_values",
"(",
"self",
",",
"values",
",",
"max_width",
")",
":",
"if",
"type",
"(",
"values",
")",
"==",
"dict",
":",
"values",
"=",
"self",
".",
"_scale_x_values_timestamps",
"(",
"values",
"=",
"values",
",",
"max_width",
"=",
"max_width",
")",
"adjusted_values",
"=",
"list",
"(",
"values",
")",
"if",
"len",
"(",
"adjusted_values",
")",
">",
"max_width",
":",
"def",
"get_position",
"(",
"current_pos",
")",
":",
"return",
"len",
"(",
"adjusted_values",
")",
"*",
"current_pos",
"//",
"max_width",
"adjusted_values",
"=",
"[",
"statistics",
".",
"mean",
"(",
"adjusted_values",
"[",
"get_position",
"(",
"i",
")",
":",
"get_position",
"(",
"i",
"+",
"1",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"max_width",
")",
"]",
"return",
"adjusted_values"
] | Scale X values to new width | [
"Scale",
"X",
"values",
"to",
"new",
"width"
] | 33499e9b1c5226c04078d08a210ef657c630291c | https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L11-L25 |
1,972 | linkedin/asciietch | asciietch/graph.py | Grapher._scale_x_values_timestamps | def _scale_x_values_timestamps(self, values, max_width):
'''Scale X values to new width based on timestamps'''
first_timestamp = float(values[0][0])
last_timestamp = float(values[-1][0])
step_size = (last_timestamp - first_timestamp) / max_width
values_by_column = [[] for i in range(max_width)]
for timestamp, value in values:
if value is None:
continue
timestamp = float(timestamp)
column = (timestamp - first_timestamp) // step_size
column = int(min(column, max_width - 1)) # Don't go beyond the last column
values_by_column[column].append(value)
adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values
return adjusted_values | python | def _scale_x_values_timestamps(self, values, max_width):
'''Scale X values to new width based on timestamps'''
first_timestamp = float(values[0][0])
last_timestamp = float(values[-1][0])
step_size = (last_timestamp - first_timestamp) / max_width
values_by_column = [[] for i in range(max_width)]
for timestamp, value in values:
if value is None:
continue
timestamp = float(timestamp)
column = (timestamp - first_timestamp) // step_size
column = int(min(column, max_width - 1)) # Don't go beyond the last column
values_by_column[column].append(value)
adjusted_values = [statistics.mean(values) if values else 0 for values in values_by_column] # Average each column, 0 if no values
return adjusted_values | [
"def",
"_scale_x_values_timestamps",
"(",
"self",
",",
"values",
",",
"max_width",
")",
":",
"first_timestamp",
"=",
"float",
"(",
"values",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"last_timestamp",
"=",
"float",
"(",
"values",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"step_size",
"=",
"(",
"last_timestamp",
"-",
"first_timestamp",
")",
"/",
"max_width",
"values_by_column",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"max_width",
")",
"]",
"for",
"timestamp",
",",
"value",
"in",
"values",
":",
"if",
"value",
"is",
"None",
":",
"continue",
"timestamp",
"=",
"float",
"(",
"timestamp",
")",
"column",
"=",
"(",
"timestamp",
"-",
"first_timestamp",
")",
"//",
"step_size",
"column",
"=",
"int",
"(",
"min",
"(",
"column",
",",
"max_width",
"-",
"1",
")",
")",
"# Don't go beyond the last column",
"values_by_column",
"[",
"column",
"]",
".",
"append",
"(",
"value",
")",
"adjusted_values",
"=",
"[",
"statistics",
".",
"mean",
"(",
"values",
")",
"if",
"values",
"else",
"0",
"for",
"values",
"in",
"values_by_column",
"]",
"# Average each column, 0 if no values",
"return",
"adjusted_values"
] | Scale X values to new width based on timestamps | [
"Scale",
"X",
"values",
"to",
"new",
"width",
"based",
"on",
"timestamps"
] | 33499e9b1c5226c04078d08a210ef657c630291c | https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L27-L44 |
1,973 | linkedin/asciietch | asciietch/graph.py | Grapher._scale_y_values | def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True):
'''
Take values and transmute them into a new range
'''
# Scale Y values - Create a scaled list of values to use for the visual graph
scaled_values = []
y_min_value = min(values)
if scale_old_from_zero:
y_min_value = 0
y_max_value = max(values)
new_min = 0
OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same
NewRange = (new_max - new_min) # max_height is new_max
for old_value in values:
new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min
scaled_values.append(new_value)
return scaled_values | python | def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True):
'''
Take values and transmute them into a new range
'''
# Scale Y values - Create a scaled list of values to use for the visual graph
scaled_values = []
y_min_value = min(values)
if scale_old_from_zero:
y_min_value = 0
y_max_value = max(values)
new_min = 0
OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same
NewRange = (new_max - new_min) # max_height is new_max
for old_value in values:
new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min
scaled_values.append(new_value)
return scaled_values | [
"def",
"_scale_y_values",
"(",
"self",
",",
"values",
",",
"new_min",
",",
"new_max",
",",
"scale_old_from_zero",
"=",
"True",
")",
":",
"# Scale Y values - Create a scaled list of values to use for the visual graph",
"scaled_values",
"=",
"[",
"]",
"y_min_value",
"=",
"min",
"(",
"values",
")",
"if",
"scale_old_from_zero",
":",
"y_min_value",
"=",
"0",
"y_max_value",
"=",
"max",
"(",
"values",
")",
"new_min",
"=",
"0",
"OldRange",
"=",
"(",
"y_max_value",
"-",
"y_min_value",
")",
"or",
"1",
"# Prevents division by zero if all values are the same",
"NewRange",
"=",
"(",
"new_max",
"-",
"new_min",
")",
"# max_height is new_max",
"for",
"old_value",
"in",
"values",
":",
"new_value",
"=",
"(",
"(",
"(",
"old_value",
"-",
"y_min_value",
")",
"*",
"NewRange",
")",
"/",
"OldRange",
")",
"+",
"new_min",
"scaled_values",
".",
"append",
"(",
"new_value",
")",
"return",
"scaled_values"
] | Take values and transmute them into a new range | [
"Take",
"values",
"and",
"transmute",
"them",
"into",
"a",
"new",
"range"
] | 33499e9b1c5226c04078d08a210ef657c630291c | https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L46-L62 |
1,974 | linkedin/asciietch | asciietch/graph.py | Grapher._assign_ascii_character | def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity
'''Assign the character to be placed into the graph'''
char = '?'
if y_next > y and y_prev > y:
char = '-'
elif y_next < y and y_prev < y:
char = '-'
elif y_prev < y and y == y_next:
char = '-'
elif y_prev == y and y_next < y:
char = '-'
elif y_next > y:
char = '/'
elif y_next < y:
char = '\\'
elif y_prev < y:
char = '/'
elif y_prev > y:
char = '\\'
elif y_next == y:
char = '-'
elif y == y_prev:
char = '-'
return char | python | def _assign_ascii_character(self, y_prev, y, y_next): # noqa for complexity
'''Assign the character to be placed into the graph'''
char = '?'
if y_next > y and y_prev > y:
char = '-'
elif y_next < y and y_prev < y:
char = '-'
elif y_prev < y and y == y_next:
char = '-'
elif y_prev == y and y_next < y:
char = '-'
elif y_next > y:
char = '/'
elif y_next < y:
char = '\\'
elif y_prev < y:
char = '/'
elif y_prev > y:
char = '\\'
elif y_next == y:
char = '-'
elif y == y_prev:
char = '-'
return char | [
"def",
"_assign_ascii_character",
"(",
"self",
",",
"y_prev",
",",
"y",
",",
"y_next",
")",
":",
"# noqa for complexity",
"char",
"=",
"'?'",
"if",
"y_next",
">",
"y",
"and",
"y_prev",
">",
"y",
":",
"char",
"=",
"'-'",
"elif",
"y_next",
"<",
"y",
"and",
"y_prev",
"<",
"y",
":",
"char",
"=",
"'-'",
"elif",
"y_prev",
"<",
"y",
"and",
"y",
"==",
"y_next",
":",
"char",
"=",
"'-'",
"elif",
"y_prev",
"==",
"y",
"and",
"y_next",
"<",
"y",
":",
"char",
"=",
"'-'",
"elif",
"y_next",
">",
"y",
":",
"char",
"=",
"'/'",
"elif",
"y_next",
"<",
"y",
":",
"char",
"=",
"'\\\\'",
"elif",
"y_prev",
"<",
"y",
":",
"char",
"=",
"'/'",
"elif",
"y_prev",
">",
"y",
":",
"char",
"=",
"'\\\\'",
"elif",
"y_next",
"==",
"y",
":",
"char",
"=",
"'-'",
"elif",
"y",
"==",
"y_prev",
":",
"char",
"=",
"'-'",
"return",
"char"
] | Assign the character to be placed into the graph | [
"Assign",
"the",
"character",
"to",
"be",
"placed",
"into",
"the",
"graph"
] | 33499e9b1c5226c04078d08a210ef657c630291c | https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L97-L120 |
1,975 | linkedin/asciietch | asciietch/graph.py | Grapher.asciigraph | def asciigraph(self, values=None, max_height=None, max_width=None, label=False):
'''
Accepts a list of y values and returns an ascii graph
Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
'''
result = ''
border_fill_char = '*'
start_ctime = None
end_ctime = None
if not max_width:
max_width = 180
# If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values
if isinstance(values, dict):
time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps
start_timestamp = time_series_sorted[0][0]
end_timestamp = time_series_sorted[-1][0]
start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime()
end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime()
values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width)
values = [value for value in values if value is not None]
if not max_height:
max_height = min(20, max(values))
stdev = statistics.stdev(values)
mean = statistics.mean(values)
# Do value adjustments
adjusted_values = list(values)
adjusted_values = self._scale_x_values(values=values, max_width=max_width)
upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see
lower_value = min(adjusted_values)
adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False)
adjusted_values = self._round_floats_to_ints(values=adjusted_values)
# Obtain Ascii Graph String
field = self._get_ascii_field(adjusted_values)
graph_string = self._draw_ascii_graph(field=field)
# Label the graph
if label:
top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char)
result += top_label + '\n'
result += '{graph_string}\n'.format(graph_string=graph_string)
if label:
lower = f'Lower value: {lower_value:.2f} '
stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******'
fill_length = max_width - len(lower) - len(stats)
stat_label = f'{lower}{"*" * fill_length}{stats}\n'
result += stat_label
if start_ctime and end_ctime:
fill_length = max_width - len(start_ctime) - len(end_ctime)
result += f'{start_ctime}{" " * fill_length}{end_ctime}\n'
return result | python | def asciigraph(self, values=None, max_height=None, max_width=None, label=False):
'''
Accepts a list of y values and returns an ascii graph
Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example.
'''
result = ''
border_fill_char = '*'
start_ctime = None
end_ctime = None
if not max_width:
max_width = 180
# If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values
if isinstance(values, dict):
time_series_sorted = sorted(list(values.items()), key=lambda x: x[0]) # Sort timestamp/value dict by the timestamps
start_timestamp = time_series_sorted[0][0]
end_timestamp = time_series_sorted[-1][0]
start_ctime = datetime.fromtimestamp(float(start_timestamp)).ctime()
end_ctime = datetime.fromtimestamp(float(end_timestamp)).ctime()
values = self._scale_x_values_timestamps(values=time_series_sorted, max_width=max_width)
values = [value for value in values if value is not None]
if not max_height:
max_height = min(20, max(values))
stdev = statistics.stdev(values)
mean = statistics.mean(values)
# Do value adjustments
adjusted_values = list(values)
adjusted_values = self._scale_x_values(values=values, max_width=max_width)
upper_value = max(adjusted_values) # Getting upper/lower after scaling x values so we don't label a spike we can't see
lower_value = min(adjusted_values)
adjusted_values = self._scale_y_values(values=adjusted_values, new_min=0, new_max=max_height, scale_old_from_zero=False)
adjusted_values = self._round_floats_to_ints(values=adjusted_values)
# Obtain Ascii Graph String
field = self._get_ascii_field(adjusted_values)
graph_string = self._draw_ascii_graph(field=field)
# Label the graph
if label:
top_label = 'Upper value: {upper_value:.2f} '.format(upper_value=upper_value).ljust(max_width, border_fill_char)
result += top_label + '\n'
result += '{graph_string}\n'.format(graph_string=graph_string)
if label:
lower = f'Lower value: {lower_value:.2f} '
stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******'
fill_length = max_width - len(lower) - len(stats)
stat_label = f'{lower}{"*" * fill_length}{stats}\n'
result += stat_label
if start_ctime and end_ctime:
fill_length = max_width - len(start_ctime) - len(end_ctime)
result += f'{start_ctime}{" " * fill_length}{end_ctime}\n'
return result | [
"def",
"asciigraph",
"(",
"self",
",",
"values",
"=",
"None",
",",
"max_height",
"=",
"None",
",",
"max_width",
"=",
"None",
",",
"label",
"=",
"False",
")",
":",
"result",
"=",
"''",
"border_fill_char",
"=",
"'*'",
"start_ctime",
"=",
"None",
"end_ctime",
"=",
"None",
"if",
"not",
"max_width",
":",
"max_width",
"=",
"180",
"# If this is a dict of timestamp -> value, sort the data, store the start/end time, and convert values to a list of values",
"if",
"isinstance",
"(",
"values",
",",
"dict",
")",
":",
"time_series_sorted",
"=",
"sorted",
"(",
"list",
"(",
"values",
".",
"items",
"(",
")",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"# Sort timestamp/value dict by the timestamps",
"start_timestamp",
"=",
"time_series_sorted",
"[",
"0",
"]",
"[",
"0",
"]",
"end_timestamp",
"=",
"time_series_sorted",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"start_ctime",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"start_timestamp",
")",
")",
".",
"ctime",
"(",
")",
"end_ctime",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"end_timestamp",
")",
")",
".",
"ctime",
"(",
")",
"values",
"=",
"self",
".",
"_scale_x_values_timestamps",
"(",
"values",
"=",
"time_series_sorted",
",",
"max_width",
"=",
"max_width",
")",
"values",
"=",
"[",
"value",
"for",
"value",
"in",
"values",
"if",
"value",
"is",
"not",
"None",
"]",
"if",
"not",
"max_height",
":",
"max_height",
"=",
"min",
"(",
"20",
",",
"max",
"(",
"values",
")",
")",
"stdev",
"=",
"statistics",
".",
"stdev",
"(",
"values",
")",
"mean",
"=",
"statistics",
".",
"mean",
"(",
"values",
")",
"# Do value adjustments",
"adjusted_values",
"=",
"list",
"(",
"values",
")",
"adjusted_values",
"=",
"self",
".",
"_scale_x_values",
"(",
"values",
"=",
"values",
",",
"max_width",
"=",
"max_width",
")",
"upper_value",
"=",
"max",
"(",
"adjusted_values",
")",
"# Getting upper/lower after scaling x values so we don't label a spike we can't see",
"lower_value",
"=",
"min",
"(",
"adjusted_values",
")",
"adjusted_values",
"=",
"self",
".",
"_scale_y_values",
"(",
"values",
"=",
"adjusted_values",
",",
"new_min",
"=",
"0",
",",
"new_max",
"=",
"max_height",
",",
"scale_old_from_zero",
"=",
"False",
")",
"adjusted_values",
"=",
"self",
".",
"_round_floats_to_ints",
"(",
"values",
"=",
"adjusted_values",
")",
"# Obtain Ascii Graph String",
"field",
"=",
"self",
".",
"_get_ascii_field",
"(",
"adjusted_values",
")",
"graph_string",
"=",
"self",
".",
"_draw_ascii_graph",
"(",
"field",
"=",
"field",
")",
"# Label the graph",
"if",
"label",
":",
"top_label",
"=",
"'Upper value: {upper_value:.2f} '",
".",
"format",
"(",
"upper_value",
"=",
"upper_value",
")",
".",
"ljust",
"(",
"max_width",
",",
"border_fill_char",
")",
"result",
"+=",
"top_label",
"+",
"'\\n'",
"result",
"+=",
"'{graph_string}\\n'",
".",
"format",
"(",
"graph_string",
"=",
"graph_string",
")",
"if",
"label",
":",
"lower",
"=",
"f'Lower value: {lower_value:.2f} '",
"stats",
"=",
"f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******'",
"fill_length",
"=",
"max_width",
"-",
"len",
"(",
"lower",
")",
"-",
"len",
"(",
"stats",
")",
"stat_label",
"=",
"f'{lower}{\"*\" * fill_length}{stats}\\n'",
"result",
"+=",
"stat_label",
"if",
"start_ctime",
"and",
"end_ctime",
":",
"fill_length",
"=",
"max_width",
"-",
"len",
"(",
"start_ctime",
")",
"-",
"len",
"(",
"end_ctime",
")",
"result",
"+=",
"f'{start_ctime}{\" \" * fill_length}{end_ctime}\\n'",
"return",
"result"
] | Accepts a list of y values and returns an ascii graph
Optionally values can also be a dictionary with a key of timestamp, and a value of value. InGraphs returns data in this format for example. | [
"Accepts",
"a",
"list",
"of",
"y",
"values",
"and",
"returns",
"an",
"ascii",
"graph",
"Optionally",
"values",
"can",
"also",
"be",
"a",
"dictionary",
"with",
"a",
"key",
"of",
"timestamp",
"and",
"a",
"value",
"of",
"value",
".",
"InGraphs",
"returns",
"data",
"in",
"this",
"format",
"for",
"example",
"."
] | 33499e9b1c5226c04078d08a210ef657c630291c | https://github.com/linkedin/asciietch/blob/33499e9b1c5226c04078d08a210ef657c630291c/asciietch/graph.py#L133-L192 |
1,976 | HPAC/matchpy | matchpy/functions.py | replace | def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement:
r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`.
The original `expression` itself is not modified, but a modified copy is returned. If the replacement
is a list of expressions, it will be expanded into the list of operands of the respective operation:
>>> print(replace(f(a), (0, ), [b, c]))
f(b, c)
Parameters:
expression:
An :class:`Expression` where a (sub)expression is to be replaced.
position:
A tuple of indices, e.g. the empty tuple refers to the `expression` itself,
`(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first
child of the first child etc.
replacement:
Either an :class:`Expression` or a list of :class:`Expression`\s to be
inserted into the `expression` instead of the original expression at that `position`.
Returns:
The resulting expression from the replacement.
Raises:
IndexError: If the position is invalid or out of range.
"""
if len(position) == 0:
return replacement
if not isinstance(expression, Operation):
raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression))
if position[0] >= op_len(expression):
raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression))
pos = position[0]
operands = list(op_iter(expression))
subexpr = replace(operands[pos], position[1:], replacement)
if isinstance(subexpr, Sequence):
new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:])
return create_operation_expression(expression, new_operands)
operands[pos] = subexpr
return create_operation_expression(expression, operands) | python | def replace(expression: Expression, position: Sequence[int], replacement: Replacement) -> Replacement:
r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`.
The original `expression` itself is not modified, but a modified copy is returned. If the replacement
is a list of expressions, it will be expanded into the list of operands of the respective operation:
>>> print(replace(f(a), (0, ), [b, c]))
f(b, c)
Parameters:
expression:
An :class:`Expression` where a (sub)expression is to be replaced.
position:
A tuple of indices, e.g. the empty tuple refers to the `expression` itself,
`(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first
child of the first child etc.
replacement:
Either an :class:`Expression` or a list of :class:`Expression`\s to be
inserted into the `expression` instead of the original expression at that `position`.
Returns:
The resulting expression from the replacement.
Raises:
IndexError: If the position is invalid or out of range.
"""
if len(position) == 0:
return replacement
if not isinstance(expression, Operation):
raise IndexError("Invalid position {!r} for expression {!s}".format(position, expression))
if position[0] >= op_len(expression):
raise IndexError("Position {!r} out of range for expression {!s}".format(position, expression))
pos = position[0]
operands = list(op_iter(expression))
subexpr = replace(operands[pos], position[1:], replacement)
if isinstance(subexpr, Sequence):
new_operands = tuple(operands[:pos]) + tuple(subexpr) + tuple(operands[pos + 1:])
return create_operation_expression(expression, new_operands)
operands[pos] = subexpr
return create_operation_expression(expression, operands) | [
"def",
"replace",
"(",
"expression",
":",
"Expression",
",",
"position",
":",
"Sequence",
"[",
"int",
"]",
",",
"replacement",
":",
"Replacement",
")",
"->",
"Replacement",
":",
"if",
"len",
"(",
"position",
")",
"==",
"0",
":",
"return",
"replacement",
"if",
"not",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"raise",
"IndexError",
"(",
"\"Invalid position {!r} for expression {!s}\"",
".",
"format",
"(",
"position",
",",
"expression",
")",
")",
"if",
"position",
"[",
"0",
"]",
">=",
"op_len",
"(",
"expression",
")",
":",
"raise",
"IndexError",
"(",
"\"Position {!r} out of range for expression {!s}\"",
".",
"format",
"(",
"position",
",",
"expression",
")",
")",
"pos",
"=",
"position",
"[",
"0",
"]",
"operands",
"=",
"list",
"(",
"op_iter",
"(",
"expression",
")",
")",
"subexpr",
"=",
"replace",
"(",
"operands",
"[",
"pos",
"]",
",",
"position",
"[",
"1",
":",
"]",
",",
"replacement",
")",
"if",
"isinstance",
"(",
"subexpr",
",",
"Sequence",
")",
":",
"new_operands",
"=",
"tuple",
"(",
"operands",
"[",
":",
"pos",
"]",
")",
"+",
"tuple",
"(",
"subexpr",
")",
"+",
"tuple",
"(",
"operands",
"[",
"pos",
"+",
"1",
":",
"]",
")",
"return",
"create_operation_expression",
"(",
"expression",
",",
"new_operands",
")",
"operands",
"[",
"pos",
"]",
"=",
"subexpr",
"return",
"create_operation_expression",
"(",
"expression",
",",
"operands",
")"
] | r"""Replaces the subexpression of `expression` at the given `position` with the given `replacement`.
The original `expression` itself is not modified, but a modified copy is returned. If the replacement
is a list of expressions, it will be expanded into the list of operands of the respective operation:
>>> print(replace(f(a), (0, ), [b, c]))
f(b, c)
Parameters:
expression:
An :class:`Expression` where a (sub)expression is to be replaced.
position:
A tuple of indices, e.g. the empty tuple refers to the `expression` itself,
`(0, )` refers to the first child (operand) of the `expression`, `(0, 0)` to the first
child of the first child etc.
replacement:
Either an :class:`Expression` or a list of :class:`Expression`\s to be
inserted into the `expression` instead of the original expression at that `position`.
Returns:
The resulting expression from the replacement.
Raises:
IndexError: If the position is invalid or out of range. | [
"r",
"Replaces",
"the",
"subexpression",
"of",
"expression",
"at",
"the",
"given",
"position",
"with",
"the",
"given",
"replacement",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/functions.py#L96-L135 |
1,977 | HPAC/matchpy | matchpy/matching/bipartite.py | BipartiteGraph.find_matching | def find_matching(self) -> Dict[TLeft, TRight]:
"""Finds a matching in the bipartite graph.
This is done using the Hopcroft-Karp algorithm with an implementation from the
`hopcroftkarp` package.
Returns:
A dictionary where each edge of the matching is represented by a key-value pair
with the key being from the left part of the graph and the value from te right part.
"""
# The directed graph is represented as a dictionary of edges
# The key is the tail of all edges which are represented by the value
# The value is a set of heads for the all edges originating from the tail (key)
# In addition, the graph stores which part of the bipartite graph a node originated from
# to avoid problems when a value exists in both halfs.
# Only one direction of the undirected edge is needed for the HopcroftKarp class
directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]]
for (left, right) in self._edges:
tail = (LEFT, left)
head = (RIGHT, right)
if tail not in directed_graph:
directed_graph[tail] = {head}
else:
directed_graph[tail].add(head)
matching = HopcroftKarp(directed_graph).maximum_matching()
# Filter out the partitions (LEFT and RIGHT) and only return the matching edges
# that go from LEFT to RIGHT
return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT) | python | def find_matching(self) -> Dict[TLeft, TRight]:
# The directed graph is represented as a dictionary of edges
# The key is the tail of all edges which are represented by the value
# The value is a set of heads for the all edges originating from the tail (key)
# In addition, the graph stores which part of the bipartite graph a node originated from
# to avoid problems when a value exists in both halfs.
# Only one direction of the undirected edge is needed for the HopcroftKarp class
directed_graph = {} # type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]]
for (left, right) in self._edges:
tail = (LEFT, left)
head = (RIGHT, right)
if tail not in directed_graph:
directed_graph[tail] = {head}
else:
directed_graph[tail].add(head)
matching = HopcroftKarp(directed_graph).maximum_matching()
# Filter out the partitions (LEFT and RIGHT) and only return the matching edges
# that go from LEFT to RIGHT
return dict((tail[1], head[1]) for tail, head in matching.items() if tail[0] == LEFT) | [
"def",
"find_matching",
"(",
"self",
")",
"->",
"Dict",
"[",
"TLeft",
",",
"TRight",
"]",
":",
"# The directed graph is represented as a dictionary of edges",
"# The key is the tail of all edges which are represented by the value",
"# The value is a set of heads for the all edges originating from the tail (key)",
"# In addition, the graph stores which part of the bipartite graph a node originated from",
"# to avoid problems when a value exists in both halfs.",
"# Only one direction of the undirected edge is needed for the HopcroftKarp class",
"directed_graph",
"=",
"{",
"}",
"# type: Dict[Tuple[int, TLeft], Set[Tuple[int, TRight]]]",
"for",
"(",
"left",
",",
"right",
")",
"in",
"self",
".",
"_edges",
":",
"tail",
"=",
"(",
"LEFT",
",",
"left",
")",
"head",
"=",
"(",
"RIGHT",
",",
"right",
")",
"if",
"tail",
"not",
"in",
"directed_graph",
":",
"directed_graph",
"[",
"tail",
"]",
"=",
"{",
"head",
"}",
"else",
":",
"directed_graph",
"[",
"tail",
"]",
".",
"add",
"(",
"head",
")",
"matching",
"=",
"HopcroftKarp",
"(",
"directed_graph",
")",
".",
"maximum_matching",
"(",
")",
"# Filter out the partitions (LEFT and RIGHT) and only return the matching edges",
"# that go from LEFT to RIGHT",
"return",
"dict",
"(",
"(",
"tail",
"[",
"1",
"]",
",",
"head",
"[",
"1",
"]",
")",
"for",
"tail",
",",
"head",
"in",
"matching",
".",
"items",
"(",
")",
"if",
"tail",
"[",
"0",
"]",
"==",
"LEFT",
")"
] | Finds a matching in the bipartite graph.
This is done using the Hopcroft-Karp algorithm with an implementation from the
`hopcroftkarp` package.
Returns:
A dictionary where each edge of the matching is represented by a key-value pair
with the key being from the left part of the graph and the value from te right part. | [
"Finds",
"a",
"matching",
"in",
"the",
"bipartite",
"graph",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L144-L174 |
1,978 | HPAC/matchpy | matchpy/matching/bipartite.py | BipartiteGraph.without_nodes | def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
"""Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed."""
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1]) | python | def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1]) | [
"def",
"without_nodes",
"(",
"self",
",",
"edge",
":",
"Edge",
")",
"->",
"'BipartiteGraph[TLeft, TRight, TEdgeValue]'",
":",
"return",
"BipartiteGraph",
"(",
"(",
"(",
"n1",
",",
"n2",
")",
",",
"v",
")",
"for",
"(",
"n1",
",",
"n2",
")",
",",
"v",
"in",
"self",
".",
"_edges",
".",
"items",
"(",
")",
"if",
"n1",
"!=",
"edge",
"[",
"0",
"]",
"and",
"n2",
"!=",
"edge",
"[",
"1",
"]",
")"
] | Returns a copy of this bipartite graph with the given edge and its adjacent nodes removed. | [
"Returns",
"a",
"copy",
"of",
"this",
"bipartite",
"graph",
"with",
"the",
"given",
"edge",
"and",
"its",
"adjacent",
"nodes",
"removed",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L176-L178 |
1,979 | HPAC/matchpy | matchpy/matching/bipartite.py | BipartiteGraph.without_edge | def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
"""Returns a copy of this bipartite graph with the given edge removed."""
return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2) | python | def without_edge(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
return BipartiteGraph((e2, v) for e2, v in self._edges.items() if edge != e2) | [
"def",
"without_edge",
"(",
"self",
",",
"edge",
":",
"Edge",
")",
"->",
"'BipartiteGraph[TLeft, TRight, TEdgeValue]'",
":",
"return",
"BipartiteGraph",
"(",
"(",
"e2",
",",
"v",
")",
"for",
"e2",
",",
"v",
"in",
"self",
".",
"_edges",
".",
"items",
"(",
")",
"if",
"edge",
"!=",
"e2",
")"
] | Returns a copy of this bipartite graph with the given edge removed. | [
"Returns",
"a",
"copy",
"of",
"this",
"bipartite",
"graph",
"with",
"the",
"given",
"edge",
"removed",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L180-L182 |
1,980 | HPAC/matchpy | matchpy/matching/bipartite.py | BipartiteGraph.limited_to | def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
"""Returns the induced subgraph where only the nodes from the given sets are included."""
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right) | python | def limited_to(self, left: Set[TLeft], right: Set[TRight]) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':
return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 in left and n2 in right) | [
"def",
"limited_to",
"(",
"self",
",",
"left",
":",
"Set",
"[",
"TLeft",
"]",
",",
"right",
":",
"Set",
"[",
"TRight",
"]",
")",
"->",
"'BipartiteGraph[TLeft, TRight, TEdgeValue]'",
":",
"return",
"BipartiteGraph",
"(",
"(",
"(",
"n1",
",",
"n2",
")",
",",
"v",
")",
"for",
"(",
"n1",
",",
"n2",
")",
",",
"v",
"in",
"self",
".",
"_edges",
".",
"items",
"(",
")",
"if",
"n1",
"in",
"left",
"and",
"n2",
"in",
"right",
")"
] | Returns the induced subgraph where only the nodes from the given sets are included. | [
"Returns",
"the",
"induced",
"subgraph",
"where",
"only",
"the",
"nodes",
"from",
"the",
"given",
"sets",
"are",
"included",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/bipartite.py#L184-L186 |
1,981 | HPAC/matchpy | matchpy/expressions/functions.py | is_constant | def is_constant(expression):
"""Check if the given expression is constant, i.e. it does not contain Wildcards."""
if isinstance(expression, Wildcard):
return False
if isinstance(expression, Expression):
return expression.is_constant
if isinstance(expression, Operation):
return all(is_constant(o) for o in op_iter(expression))
return True | python | def is_constant(expression):
if isinstance(expression, Wildcard):
return False
if isinstance(expression, Expression):
return expression.is_constant
if isinstance(expression, Operation):
return all(is_constant(o) for o in op_iter(expression))
return True | [
"def",
"is_constant",
"(",
"expression",
")",
":",
"if",
"isinstance",
"(",
"expression",
",",
"Wildcard",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"expression",
",",
"Expression",
")",
":",
"return",
"expression",
".",
"is_constant",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"return",
"all",
"(",
"is_constant",
"(",
"o",
")",
"for",
"o",
"in",
"op_iter",
"(",
"expression",
")",
")",
"return",
"True"
] | Check if the given expression is constant, i.e. it does not contain Wildcards. | [
"Check",
"if",
"the",
"given",
"expression",
"is",
"constant",
"i",
".",
"e",
".",
"it",
"does",
"not",
"contain",
"Wildcards",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L15-L23 |
1,982 | HPAC/matchpy | matchpy/expressions/functions.py | get_head | def get_head(expression):
"""Returns the given expression's head."""
if isinstance(expression, Wildcard):
if isinstance(expression, SymbolWildcard):
return expression.symbol_type
return None
return type(expression) | python | def get_head(expression):
if isinstance(expression, Wildcard):
if isinstance(expression, SymbolWildcard):
return expression.symbol_type
return None
return type(expression) | [
"def",
"get_head",
"(",
"expression",
")",
":",
"if",
"isinstance",
"(",
"expression",
",",
"Wildcard",
")",
":",
"if",
"isinstance",
"(",
"expression",
",",
"SymbolWildcard",
")",
":",
"return",
"expression",
".",
"symbol_type",
"return",
"None",
"return",
"type",
"(",
"expression",
")"
] | Returns the given expression's head. | [
"Returns",
"the",
"given",
"expression",
"s",
"head",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L42-L48 |
1,983 | HPAC/matchpy | matchpy/expressions/functions.py | match_head | def match_head(subject, pattern):
"""Checks if the head of subject matches the pattern's head."""
if isinstance(pattern, Pattern):
pattern = pattern.expression
pattern_head = get_head(pattern)
if pattern_head is None:
return True
if issubclass(pattern_head, OneIdentityOperation):
return True
subject_head = get_head(subject)
assert subject_head is not None
return issubclass(subject_head, pattern_head) | python | def match_head(subject, pattern):
if isinstance(pattern, Pattern):
pattern = pattern.expression
pattern_head = get_head(pattern)
if pattern_head is None:
return True
if issubclass(pattern_head, OneIdentityOperation):
return True
subject_head = get_head(subject)
assert subject_head is not None
return issubclass(subject_head, pattern_head) | [
"def",
"match_head",
"(",
"subject",
",",
"pattern",
")",
":",
"if",
"isinstance",
"(",
"pattern",
",",
"Pattern",
")",
":",
"pattern",
"=",
"pattern",
".",
"expression",
"pattern_head",
"=",
"get_head",
"(",
"pattern",
")",
"if",
"pattern_head",
"is",
"None",
":",
"return",
"True",
"if",
"issubclass",
"(",
"pattern_head",
",",
"OneIdentityOperation",
")",
":",
"return",
"True",
"subject_head",
"=",
"get_head",
"(",
"subject",
")",
"assert",
"subject_head",
"is",
"not",
"None",
"return",
"issubclass",
"(",
"subject_head",
",",
"pattern_head",
")"
] | Checks if the head of subject matches the pattern's head. | [
"Checks",
"if",
"the",
"head",
"of",
"subject",
"matches",
"the",
"pattern",
"s",
"head",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L51-L62 |
1,984 | HPAC/matchpy | matchpy/expressions/functions.py | is_anonymous | def is_anonymous(expression):
"""Returns True iff the expression does not contain any variables."""
if hasattr(expression, 'variable_name') and expression.variable_name:
return False
if isinstance(expression, Operation):
return all(is_anonymous(o) for o in op_iter(expression))
return True | python | def is_anonymous(expression):
if hasattr(expression, 'variable_name') and expression.variable_name:
return False
if isinstance(expression, Operation):
return all(is_anonymous(o) for o in op_iter(expression))
return True | [
"def",
"is_anonymous",
"(",
"expression",
")",
":",
"if",
"hasattr",
"(",
"expression",
",",
"'variable_name'",
")",
"and",
"expression",
".",
"variable_name",
":",
"return",
"False",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"return",
"all",
"(",
"is_anonymous",
"(",
"o",
")",
"for",
"o",
"in",
"op_iter",
"(",
"expression",
")",
")",
"return",
"True"
] | Returns True iff the expression does not contain any variables. | [
"Returns",
"True",
"iff",
"the",
"expression",
"does",
"not",
"contain",
"any",
"variables",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L85-L91 |
1,985 | HPAC/matchpy | matchpy/expressions/functions.py | contains_variables_from_set | def contains_variables_from_set(expression, variables):
"""Returns True iff the expression contains any of the variables from the given set."""
if hasattr(expression, 'variable_name') and expression.variable_name in variables:
return True
if isinstance(expression, Operation):
return any(contains_variables_from_set(o, variables) for o in op_iter(expression))
return False | python | def contains_variables_from_set(expression, variables):
if hasattr(expression, 'variable_name') and expression.variable_name in variables:
return True
if isinstance(expression, Operation):
return any(contains_variables_from_set(o, variables) for o in op_iter(expression))
return False | [
"def",
"contains_variables_from_set",
"(",
"expression",
",",
"variables",
")",
":",
"if",
"hasattr",
"(",
"expression",
",",
"'variable_name'",
")",
"and",
"expression",
".",
"variable_name",
"in",
"variables",
":",
"return",
"True",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"return",
"any",
"(",
"contains_variables_from_set",
"(",
"o",
",",
"variables",
")",
"for",
"o",
"in",
"op_iter",
"(",
"expression",
")",
")",
"return",
"False"
] | Returns True iff the expression contains any of the variables from the given set. | [
"Returns",
"True",
"iff",
"the",
"expression",
"contains",
"any",
"of",
"the",
"variables",
"from",
"the",
"given",
"set",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L94-L100 |
1,986 | HPAC/matchpy | matchpy/expressions/functions.py | get_variables | def get_variables(expression, variables=None):
"""Returns the set of variable names in the given expression."""
if variables is None:
variables = set()
if hasattr(expression, 'variable_name') and expression.variable_name is not None:
variables.add(expression.variable_name)
if isinstance(expression, Operation):
for operand in op_iter(expression):
get_variables(operand, variables)
return variables | python | def get_variables(expression, variables=None):
if variables is None:
variables = set()
if hasattr(expression, 'variable_name') and expression.variable_name is not None:
variables.add(expression.variable_name)
if isinstance(expression, Operation):
for operand in op_iter(expression):
get_variables(operand, variables)
return variables | [
"def",
"get_variables",
"(",
"expression",
",",
"variables",
"=",
"None",
")",
":",
"if",
"variables",
"is",
"None",
":",
"variables",
"=",
"set",
"(",
")",
"if",
"hasattr",
"(",
"expression",
",",
"'variable_name'",
")",
"and",
"expression",
".",
"variable_name",
"is",
"not",
"None",
":",
"variables",
".",
"add",
"(",
"expression",
".",
"variable_name",
")",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"for",
"operand",
"in",
"op_iter",
"(",
"expression",
")",
":",
"get_variables",
"(",
"operand",
",",
"variables",
")",
"return",
"variables"
] | Returns the set of variable names in the given expression. | [
"Returns",
"the",
"set",
"of",
"variable",
"names",
"in",
"the",
"given",
"expression",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L103-L112 |
1,987 | HPAC/matchpy | matchpy/expressions/functions.py | rename_variables | def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:
"""Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables.
"""
if isinstance(expression, Operation):
if hasattr(expression, 'variable_name'):
variable_name = renaming.get(expression.variable_name, expression.variable_name)
return create_operation_expression(
expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name
)
operands = [rename_variables(o, renaming) for o in op_iter(expression)]
return create_operation_expression(expression, operands)
elif isinstance(expression, Expression):
expression = expression.__copy__()
expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)
return expression | python | def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression:
if isinstance(expression, Operation):
if hasattr(expression, 'variable_name'):
variable_name = renaming.get(expression.variable_name, expression.variable_name)
return create_operation_expression(
expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name
)
operands = [rename_variables(o, renaming) for o in op_iter(expression)]
return create_operation_expression(expression, operands)
elif isinstance(expression, Expression):
expression = expression.__copy__()
expression.variable_name = renaming.get(expression.variable_name, expression.variable_name)
return expression | [
"def",
"rename_variables",
"(",
"expression",
":",
"Expression",
",",
"renaming",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
"->",
"Expression",
":",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"if",
"hasattr",
"(",
"expression",
",",
"'variable_name'",
")",
":",
"variable_name",
"=",
"renaming",
".",
"get",
"(",
"expression",
".",
"variable_name",
",",
"expression",
".",
"variable_name",
")",
"return",
"create_operation_expression",
"(",
"expression",
",",
"[",
"rename_variables",
"(",
"o",
",",
"renaming",
")",
"for",
"o",
"in",
"op_iter",
"(",
"expression",
")",
"]",
",",
"variable_name",
"=",
"variable_name",
")",
"operands",
"=",
"[",
"rename_variables",
"(",
"o",
",",
"renaming",
")",
"for",
"o",
"in",
"op_iter",
"(",
"expression",
")",
"]",
"return",
"create_operation_expression",
"(",
"expression",
",",
"operands",
")",
"elif",
"isinstance",
"(",
"expression",
",",
"Expression",
")",
":",
"expression",
"=",
"expression",
".",
"__copy__",
"(",
")",
"expression",
".",
"variable_name",
"=",
"renaming",
".",
"get",
"(",
"expression",
".",
"variable_name",
",",
"expression",
".",
"variable_name",
")",
"return",
"expression"
] | Rename the variables in the expression according to the given dictionary.
Args:
expression:
The expression in which the variables are renamed.
renaming:
The renaming dictionary. Maps old variable names to new ones.
Variable names not occuring in the dictionary are left unchanged.
Returns:
The expression with renamed variables. | [
"Rename",
"the",
"variables",
"in",
"the",
"expression",
"according",
"to",
"the",
"given",
"dictionary",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/functions.py#L115-L139 |
1,988 | HPAC/matchpy | matchpy/utils.py | fixed_integer_vector_iter | def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]:
"""
Return an iterator over the integer vectors which
- are componentwise less than or equal to *max_vector*, and
- are non-negative, and where
- the sum of their components is exactly *vector_sum*.
The iterator yields the vectors in lexicographical order.
Examples:
List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2:
>>> vectors = list(fixed_integer_vector_iter([2, 2], 2))
>>> vectors
[(0, 2), (1, 1), (2, 0)]
>>> list(map(sum, vectors))
[2, 2, 2]
Args:
max_vector:
Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise.
vector_sum:
Every iterated vector will have a component sum equal to this value.
Yields:
All non-negative vectors that have the given sum and are not larger than the given maximum.
Raises:
ValueError:
If *vector_sum* is negative.
"""
if vector_sum < 0:
raise ValueError("Vector sum must not be negative")
if len(max_vector) == 0:
if vector_sum == 0:
yield tuple()
return
total = sum(max_vector)
if vector_sum <= total:
start = max(max_vector[0] + vector_sum - total, 0)
end = min(max_vector[0], vector_sum)
for j in range(start, end + 1):
for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j):
yield (j, ) + vec | python | def fixed_integer_vector_iter(max_vector: Tuple[int, ...], vector_sum: int) -> Iterator[Tuple[int, ...]]:
if vector_sum < 0:
raise ValueError("Vector sum must not be negative")
if len(max_vector) == 0:
if vector_sum == 0:
yield tuple()
return
total = sum(max_vector)
if vector_sum <= total:
start = max(max_vector[0] + vector_sum - total, 0)
end = min(max_vector[0], vector_sum)
for j in range(start, end + 1):
for vec in fixed_integer_vector_iter(max_vector[1:], vector_sum - j):
yield (j, ) + vec | [
"def",
"fixed_integer_vector_iter",
"(",
"max_vector",
":",
"Tuple",
"[",
"int",
",",
"...",
"]",
",",
"vector_sum",
":",
"int",
")",
"->",
"Iterator",
"[",
"Tuple",
"[",
"int",
",",
"...",
"]",
"]",
":",
"if",
"vector_sum",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Vector sum must not be negative\"",
")",
"if",
"len",
"(",
"max_vector",
")",
"==",
"0",
":",
"if",
"vector_sum",
"==",
"0",
":",
"yield",
"tuple",
"(",
")",
"return",
"total",
"=",
"sum",
"(",
"max_vector",
")",
"if",
"vector_sum",
"<=",
"total",
":",
"start",
"=",
"max",
"(",
"max_vector",
"[",
"0",
"]",
"+",
"vector_sum",
"-",
"total",
",",
"0",
")",
"end",
"=",
"min",
"(",
"max_vector",
"[",
"0",
"]",
",",
"vector_sum",
")",
"for",
"j",
"in",
"range",
"(",
"start",
",",
"end",
"+",
"1",
")",
":",
"for",
"vec",
"in",
"fixed_integer_vector_iter",
"(",
"max_vector",
"[",
"1",
":",
"]",
",",
"vector_sum",
"-",
"j",
")",
":",
"yield",
"(",
"j",
",",
")",
"+",
"vec"
] | Return an iterator over the integer vectors which
- are componentwise less than or equal to *max_vector*, and
- are non-negative, and where
- the sum of their components is exactly *vector_sum*.
The iterator yields the vectors in lexicographical order.
Examples:
List all vectors that are between ``(0, 0)`` and ``(2, 2)`` componentwise, where the sum of components is 2:
>>> vectors = list(fixed_integer_vector_iter([2, 2], 2))
>>> vectors
[(0, 2), (1, 1), (2, 0)]
>>> list(map(sum, vectors))
[2, 2, 2]
Args:
max_vector:
Maximum vector for the iteration. Every yielded result will be less than or equal to this componentwise.
vector_sum:
Every iterated vector will have a component sum equal to this value.
Yields:
All non-negative vectors that have the given sum and are not larger than the given maximum.
Raises:
ValueError:
If *vector_sum* is negative. | [
"Return",
"an",
"iterator",
"over",
"the",
"integer",
"vectors",
"which"
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L30-L75 |
1,989 | HPAC/matchpy | matchpy/utils.py | commutative_sequence_variable_partition_iter | def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount]
) -> Iterator[Dict[str, Multiset]]:
"""Yield all possible variable substitutions for given values and variables.
.. note::
The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until
Python 3.6 do not keep track of the insertion order.
Example:
For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the
following input parameters for the partitioning:
>>> x = VariableWithCount(name='x', count=1, minimum=1, default=None)
>>> y = VariableWithCount(name='y', count=2, minimum=0, default=None)
>>> values = Multiset('aaabbc')
Then the solutions are found (and sorted to get a unique output):
>>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y])
>>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions)
>>> for substitution in sorted(as_strings):
... print(substitution)
{x ↦ {a, a, a, b, b, c}, y ↦ {}}
{x ↦ {a, a, a, c}, y ↦ {b}}
{x ↦ {a, b, b, c}, y ↦ {a}}
{x ↦ {a, c}, y ↦ {a, b}}
Args:
values:
The multiset of values which are partitioned and distributed among the variables.
variables:
A list of the variables to distribute the values among. Each variable has a name, a count of how many times
it occurs and a minimum number of values it needs.
Yields:
Each possible substitutions that is a valid partitioning of the values among the variables.
"""
if len(variables) == 1:
yield from _commutative_single_variable_partiton_iter(values, variables[0])
return
generators = []
for value, count in values.items():
generators.append(_make_variable_generator_factory(value, count, variables))
initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]']
for subst in generator_chain(initial, *generators):
valid = True
for var in variables:
if var.default is not None and len(subst[var.name]) == 0:
subst[var.name] = var.default
elif len(subst[var.name]) < var.minimum:
valid = False
break
if valid:
if None in subst:
del subst[None]
yield subst | python | def commutative_sequence_variable_partition_iter(values: Multiset, variables: List[VariableWithCount]
) -> Iterator[Dict[str, Multiset]]:
if len(variables) == 1:
yield from _commutative_single_variable_partiton_iter(values, variables[0])
return
generators = []
for value, count in values.items():
generators.append(_make_variable_generator_factory(value, count, variables))
initial = dict((var.name, Multiset()) for var in variables) # type: Dict[str, 'Multiset[T]']
for subst in generator_chain(initial, *generators):
valid = True
for var in variables:
if var.default is not None and len(subst[var.name]) == 0:
subst[var.name] = var.default
elif len(subst[var.name]) < var.minimum:
valid = False
break
if valid:
if None in subst:
del subst[None]
yield subst | [
"def",
"commutative_sequence_variable_partition_iter",
"(",
"values",
":",
"Multiset",
",",
"variables",
":",
"List",
"[",
"VariableWithCount",
"]",
")",
"->",
"Iterator",
"[",
"Dict",
"[",
"str",
",",
"Multiset",
"]",
"]",
":",
"if",
"len",
"(",
"variables",
")",
"==",
"1",
":",
"yield",
"from",
"_commutative_single_variable_partiton_iter",
"(",
"values",
",",
"variables",
"[",
"0",
"]",
")",
"return",
"generators",
"=",
"[",
"]",
"for",
"value",
",",
"count",
"in",
"values",
".",
"items",
"(",
")",
":",
"generators",
".",
"append",
"(",
"_make_variable_generator_factory",
"(",
"value",
",",
"count",
",",
"variables",
")",
")",
"initial",
"=",
"dict",
"(",
"(",
"var",
".",
"name",
",",
"Multiset",
"(",
")",
")",
"for",
"var",
"in",
"variables",
")",
"# type: Dict[str, 'Multiset[T]']",
"for",
"subst",
"in",
"generator_chain",
"(",
"initial",
",",
"*",
"generators",
")",
":",
"valid",
"=",
"True",
"for",
"var",
"in",
"variables",
":",
"if",
"var",
".",
"default",
"is",
"not",
"None",
"and",
"len",
"(",
"subst",
"[",
"var",
".",
"name",
"]",
")",
"==",
"0",
":",
"subst",
"[",
"var",
".",
"name",
"]",
"=",
"var",
".",
"default",
"elif",
"len",
"(",
"subst",
"[",
"var",
".",
"name",
"]",
")",
"<",
"var",
".",
"minimum",
":",
"valid",
"=",
"False",
"break",
"if",
"valid",
":",
"if",
"None",
"in",
"subst",
":",
"del",
"subst",
"[",
"None",
"]",
"yield",
"subst"
] | Yield all possible variable substitutions for given values and variables.
.. note::
The results are not yielded in any particular order because the algorithm uses dictionaries. Dictionaries until
Python 3.6 do not keep track of the insertion order.
Example:
For a subject like ``fc(a, a, a, b, b, c)`` and a pattern like ``f(x__, y___, y___)`` one can define the
following input parameters for the partitioning:
>>> x = VariableWithCount(name='x', count=1, minimum=1, default=None)
>>> y = VariableWithCount(name='y', count=2, minimum=0, default=None)
>>> values = Multiset('aaabbc')
Then the solutions are found (and sorted to get a unique output):
>>> substitutions = commutative_sequence_variable_partition_iter(values, [x, y])
>>> as_strings = list(str(Substitution(substitution)) for substitution in substitutions)
>>> for substitution in sorted(as_strings):
... print(substitution)
{x ↦ {a, a, a, b, b, c}, y ↦ {}}
{x ↦ {a, a, a, c}, y ↦ {b}}
{x ↦ {a, b, b, c}, y ↦ {a}}
{x ↦ {a, c}, y ↦ {a, b}}
Args:
values:
The multiset of values which are partitioned and distributed among the variables.
variables:
A list of the variables to distribute the values among. Each variable has a name, a count of how many times
it occurs and a minimum number of values it needs.
Yields:
Each possible substitutions that is a valid partitioning of the values among the variables. | [
"Yield",
"all",
"possible",
"variable",
"substitutions",
"for",
"given",
"values",
"and",
"variables",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L173-L232 |
1,990 | HPAC/matchpy | matchpy/utils.py | generator_chain | def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]:
"""Chain multiple generators together by passing results from one to the next.
This helper function allows to create a chain of generator where each generator is constructed by a factory that
gets the data yielded by the previous generator. So each generator can generate new data dependant on the data
yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the
next factory.
Example:
Lets say for every number from 0 to 4, we want to count up to that number. Then we can do
something like this using list comprehensions:
>>> [i for n in range(1, 5) for i in range(1, n + 1)]
[1, 1, 2, 1, 2, 3, 1, 2, 3, 4]
You can use this function to achieve the same thing:
>>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1))))
[1, 1, 2, 1, 2, 3, 1, 2, 3, 4]
The advantage is, that this is independent of the number of dependant generators you have.
Also, this function does not use recursion so it is safe to use even with large generator counts.
Args:
initial_data:
The initial data that is passed to the first generator factory.
*factories:
The generator factories. Each of them gets passed its predecessors data and has to return an iterable.
The data from this iterable is passed to the next factory.
Yields:
Every data item yielded by the generators of the final factory.
"""
generator_count = len(factories)
if generator_count == 0:
yield initial_data
return
generators = [None] * generator_count # type: List[Optional[Iterator[T]]]
next_data = initial_data
generator_index = 0
while True:
try:
while generator_index < generator_count:
if generators[generator_index] is None:
generators[generator_index] = factories[generator_index](next_data)
next_data = next(generators[generator_index])
generator_index += 1
yield next_data
generator_index -= 1
except StopIteration:
generators[generator_index] = None
generator_index -= 1
if generator_index < 0:
break | python | def generator_chain(initial_data: T, *factories: Callable[[T], Iterator[T]]) -> Iterator[T]:
generator_count = len(factories)
if generator_count == 0:
yield initial_data
return
generators = [None] * generator_count # type: List[Optional[Iterator[T]]]
next_data = initial_data
generator_index = 0
while True:
try:
while generator_index < generator_count:
if generators[generator_index] is None:
generators[generator_index] = factories[generator_index](next_data)
next_data = next(generators[generator_index])
generator_index += 1
yield next_data
generator_index -= 1
except StopIteration:
generators[generator_index] = None
generator_index -= 1
if generator_index < 0:
break | [
"def",
"generator_chain",
"(",
"initial_data",
":",
"T",
",",
"*",
"factories",
":",
"Callable",
"[",
"[",
"T",
"]",
",",
"Iterator",
"[",
"T",
"]",
"]",
")",
"->",
"Iterator",
"[",
"T",
"]",
":",
"generator_count",
"=",
"len",
"(",
"factories",
")",
"if",
"generator_count",
"==",
"0",
":",
"yield",
"initial_data",
"return",
"generators",
"=",
"[",
"None",
"]",
"*",
"generator_count",
"# type: List[Optional[Iterator[T]]]",
"next_data",
"=",
"initial_data",
"generator_index",
"=",
"0",
"while",
"True",
":",
"try",
":",
"while",
"generator_index",
"<",
"generator_count",
":",
"if",
"generators",
"[",
"generator_index",
"]",
"is",
"None",
":",
"generators",
"[",
"generator_index",
"]",
"=",
"factories",
"[",
"generator_index",
"]",
"(",
"next_data",
")",
"next_data",
"=",
"next",
"(",
"generators",
"[",
"generator_index",
"]",
")",
"generator_index",
"+=",
"1",
"yield",
"next_data",
"generator_index",
"-=",
"1",
"except",
"StopIteration",
":",
"generators",
"[",
"generator_index",
"]",
"=",
"None",
"generator_index",
"-=",
"1",
"if",
"generator_index",
"<",
"0",
":",
"break"
] | Chain multiple generators together by passing results from one to the next.
This helper function allows to create a chain of generator where each generator is constructed by a factory that
gets the data yielded by the previous generator. So each generator can generate new data dependant on the data
yielded by the previous one. For each data item yielded by a generator, a new generator is constructed by the
next factory.
Example:
Lets say for every number from 0 to 4, we want to count up to that number. Then we can do
something like this using list comprehensions:
>>> [i for n in range(1, 5) for i in range(1, n + 1)]
[1, 1, 2, 1, 2, 3, 1, 2, 3, 4]
You can use this function to achieve the same thing:
>>> list(generator_chain(5, lambda n: iter(range(1, n)), lambda i: iter(range(1, i + 1))))
[1, 1, 2, 1, 2, 3, 1, 2, 3, 4]
The advantage is, that this is independent of the number of dependant generators you have.
Also, this function does not use recursion so it is safe to use even with large generator counts.
Args:
initial_data:
The initial data that is passed to the first generator factory.
*factories:
The generator factories. Each of them gets passed its predecessors data and has to return an iterable.
The data from this iterable is passed to the next factory.
Yields:
Every data item yielded by the generators of the final factory. | [
"Chain",
"multiple",
"generators",
"together",
"by",
"passing",
"results",
"from",
"one",
"to",
"the",
"next",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/utils.py#L477-L532 |
1,991 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.try_add_variable | def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None:
"""Try to add the variable with its replacement to the substitution.
This considers an existing replacement and will only succeed if the new replacement
can be merged with the old replacement. Merging can occur if either the two replacements
are equivalent. Replacements can also be merged if the old replacement for the variable_name was
unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it:
>>> subst = Substitution({'x': Multiset(['a', 'b'])})
>>> subst.try_add_variable('x', ('a', 'b'))
>>> print(subst)
{x ↦ (a, b)}
Args:
variable:
The name of the variable to add.
replacement:
The replacement for the variable.
Raises:
ValueError:
if the variable cannot be merged because it conflicts with the existing
substitution for the variable_name.
"""
if variable_name not in self:
self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement
else:
existing_value = self[variable_name]
if isinstance(existing_value, tuple):
if isinstance(replacement, Multiset):
if Multiset(existing_value) != replacement:
raise ValueError
elif replacement != existing_value:
raise ValueError
elif isinstance(existing_value, Multiset):
if not isinstance(replacement, (tuple, list, Multiset)):
raise ValueError
compare_value = Multiset(replacement)
if existing_value == compare_value:
if not isinstance(replacement, Multiset):
self[variable_name] = replacement
else:
raise ValueError
elif replacement != existing_value:
raise ValueError | python | def try_add_variable(self, variable_name: str, replacement: VariableReplacement) -> None:
if variable_name not in self:
self[variable_name] = replacement.copy() if isinstance(replacement, Multiset) else replacement
else:
existing_value = self[variable_name]
if isinstance(existing_value, tuple):
if isinstance(replacement, Multiset):
if Multiset(existing_value) != replacement:
raise ValueError
elif replacement != existing_value:
raise ValueError
elif isinstance(existing_value, Multiset):
if not isinstance(replacement, (tuple, list, Multiset)):
raise ValueError
compare_value = Multiset(replacement)
if existing_value == compare_value:
if not isinstance(replacement, Multiset):
self[variable_name] = replacement
else:
raise ValueError
elif replacement != existing_value:
raise ValueError | [
"def",
"try_add_variable",
"(",
"self",
",",
"variable_name",
":",
"str",
",",
"replacement",
":",
"VariableReplacement",
")",
"->",
"None",
":",
"if",
"variable_name",
"not",
"in",
"self",
":",
"self",
"[",
"variable_name",
"]",
"=",
"replacement",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"replacement",
",",
"Multiset",
")",
"else",
"replacement",
"else",
":",
"existing_value",
"=",
"self",
"[",
"variable_name",
"]",
"if",
"isinstance",
"(",
"existing_value",
",",
"tuple",
")",
":",
"if",
"isinstance",
"(",
"replacement",
",",
"Multiset",
")",
":",
"if",
"Multiset",
"(",
"existing_value",
")",
"!=",
"replacement",
":",
"raise",
"ValueError",
"elif",
"replacement",
"!=",
"existing_value",
":",
"raise",
"ValueError",
"elif",
"isinstance",
"(",
"existing_value",
",",
"Multiset",
")",
":",
"if",
"not",
"isinstance",
"(",
"replacement",
",",
"(",
"tuple",
",",
"list",
",",
"Multiset",
")",
")",
":",
"raise",
"ValueError",
"compare_value",
"=",
"Multiset",
"(",
"replacement",
")",
"if",
"existing_value",
"==",
"compare_value",
":",
"if",
"not",
"isinstance",
"(",
"replacement",
",",
"Multiset",
")",
":",
"self",
"[",
"variable_name",
"]",
"=",
"replacement",
"else",
":",
"raise",
"ValueError",
"elif",
"replacement",
"!=",
"existing_value",
":",
"raise",
"ValueError"
] | Try to add the variable with its replacement to the substitution.
This considers an existing replacement and will only succeed if the new replacement
can be merged with the old replacement. Merging can occur if either the two replacements
are equivalent. Replacements can also be merged if the old replacement for the variable_name was
unordered (i.e. a :class:`~.Multiset`) and the new one is an equivalent ordered version of it:
>>> subst = Substitution({'x': Multiset(['a', 'b'])})
>>> subst.try_add_variable('x', ('a', 'b'))
>>> print(subst)
{x ↦ (a, b)}
Args:
variable:
The name of the variable to add.
replacement:
The replacement for the variable.
Raises:
ValueError:
if the variable cannot be merged because it conflicts with the existing
substitution for the variable_name. | [
"Try",
"to",
"add",
"the",
"variable",
"with",
"its",
"replacement",
"to",
"the",
"substitution",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L32-L77 |
1,992 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.union_with_variable | def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution':
"""Try to create a new substitution with the given variable added.
See :meth:`try_add_variable` for a version of this method that modifies the substitution
in place.
Args:
variable_name:
The name of the variable to add.
replacement:
The substitution for the variable.
Returns:
The new substitution with the variable_name added or merged.
Raises:
ValueError:
if the variable cannot be merged because it conflicts with the existing
substitution for the variable.
"""
new_subst = Substitution(self)
new_subst.try_add_variable(variable, replacement)
return new_subst | python | def union_with_variable(self, variable: str, replacement: VariableReplacement) -> 'Substitution':
new_subst = Substitution(self)
new_subst.try_add_variable(variable, replacement)
return new_subst | [
"def",
"union_with_variable",
"(",
"self",
",",
"variable",
":",
"str",
",",
"replacement",
":",
"VariableReplacement",
")",
"->",
"'Substitution'",
":",
"new_subst",
"=",
"Substitution",
"(",
"self",
")",
"new_subst",
".",
"try_add_variable",
"(",
"variable",
",",
"replacement",
")",
"return",
"new_subst"
] | Try to create a new substitution with the given variable added.
See :meth:`try_add_variable` for a version of this method that modifies the substitution
in place.
Args:
variable_name:
The name of the variable to add.
replacement:
The substitution for the variable.
Returns:
The new substitution with the variable_name added or merged.
Raises:
ValueError:
if the variable cannot be merged because it conflicts with the existing
substitution for the variable. | [
"Try",
"to",
"create",
"a",
"new",
"substitution",
"with",
"the",
"given",
"variable",
"added",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L79-L101 |
1,993 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.extract_substitution | def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool:
"""Extract the variable substitution for the given pattern and subject.
This assumes that subject and pattern already match when being considered as linear.
Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here.
All that this method does is checking whether all the substitutions for the variables can be unified.
So, in case it returns ``False``, the substitution is invalid for the match.
..warning::
This method mutates the substitution and will even do so in case the extraction fails.
Create a copy before using this method if you need to preserve the original substitution.
Example:
With an empty initial substitution and a linear pattern, the extraction will always succeed:
>>> subst = Substitution()
>>> subst.extract_substitution(f(a, b), f(x_, y_))
True
>>> print(subst)
{x ↦ a, y ↦ b}
Clashing values for existing variables will fail:
>>> subst.extract_substitution(b, x_)
False
For non-linear patterns, the extraction can also fail with an empty substitution:
>>> subst = Substitution()
>>> subst.extract_substitution(f(a, b), f(x_, x_))
False
>>> print(subst)
{x ↦ a}
Note that the initial substitution got mutated even though the extraction failed!
Args:
subject:
A :term:`syntactic` subject that matches the pattern.
pattern:
A :term:`syntactic` pattern that matches the subject.
Returns:
``True`` iff the substitution could be extracted successfully.
"""
if getattr(pattern, 'variable_name', False):
try:
self.try_add_variable(pattern.variable_name, subject)
except ValueError:
return False
return True
elif isinstance(pattern, expressions.Operation):
assert isinstance(subject, type(pattern))
assert op_len(subject) == op_len(pattern)
op_expression = cast(expressions.Operation, subject)
for subj, patt in zip(op_iter(op_expression), op_iter(pattern)):
if not self.extract_substitution(subj, patt):
return False
return True | python | def extract_substitution(self, subject: 'expressions.Expression', pattern: 'expressions.Expression') -> bool:
if getattr(pattern, 'variable_name', False):
try:
self.try_add_variable(pattern.variable_name, subject)
except ValueError:
return False
return True
elif isinstance(pattern, expressions.Operation):
assert isinstance(subject, type(pattern))
assert op_len(subject) == op_len(pattern)
op_expression = cast(expressions.Operation, subject)
for subj, patt in zip(op_iter(op_expression), op_iter(pattern)):
if not self.extract_substitution(subj, patt):
return False
return True | [
"def",
"extract_substitution",
"(",
"self",
",",
"subject",
":",
"'expressions.Expression'",
",",
"pattern",
":",
"'expressions.Expression'",
")",
"->",
"bool",
":",
"if",
"getattr",
"(",
"pattern",
",",
"'variable_name'",
",",
"False",
")",
":",
"try",
":",
"self",
".",
"try_add_variable",
"(",
"pattern",
".",
"variable_name",
",",
"subject",
")",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True",
"elif",
"isinstance",
"(",
"pattern",
",",
"expressions",
".",
"Operation",
")",
":",
"assert",
"isinstance",
"(",
"subject",
",",
"type",
"(",
"pattern",
")",
")",
"assert",
"op_len",
"(",
"subject",
")",
"==",
"op_len",
"(",
"pattern",
")",
"op_expression",
"=",
"cast",
"(",
"expressions",
".",
"Operation",
",",
"subject",
")",
"for",
"subj",
",",
"patt",
"in",
"zip",
"(",
"op_iter",
"(",
"op_expression",
")",
",",
"op_iter",
"(",
"pattern",
")",
")",
":",
"if",
"not",
"self",
".",
"extract_substitution",
"(",
"subj",
",",
"patt",
")",
":",
"return",
"False",
"return",
"True"
] | Extract the variable substitution for the given pattern and subject.
This assumes that subject and pattern already match when being considered as linear.
Also, they both must be :term:`syntactic`, as sequence variables cannot be handled here.
All that this method does is checking whether all the substitutions for the variables can be unified.
So, in case it returns ``False``, the substitution is invalid for the match.
..warning::
This method mutates the substitution and will even do so in case the extraction fails.
Create a copy before using this method if you need to preserve the original substitution.
Example:
With an empty initial substitution and a linear pattern, the extraction will always succeed:
>>> subst = Substitution()
>>> subst.extract_substitution(f(a, b), f(x_, y_))
True
>>> print(subst)
{x ↦ a, y ↦ b}
Clashing values for existing variables will fail:
>>> subst.extract_substitution(b, x_)
False
For non-linear patterns, the extraction can also fail with an empty substitution:
>>> subst = Substitution()
>>> subst.extract_substitution(f(a, b), f(x_, x_))
False
>>> print(subst)
{x ↦ a}
Note that the initial substitution got mutated even though the extraction failed!
Args:
subject:
A :term:`syntactic` subject that matches the pattern.
pattern:
A :term:`syntactic` pattern that matches the subject.
Returns:
``True`` iff the substitution could be extracted successfully. | [
"Extract",
"the",
"variable",
"substitution",
"for",
"the",
"given",
"pattern",
"and",
"subject",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L103-L164 |
1,994 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.union | def union(self, *others: 'Substitution') -> 'Substitution':
"""Try to merge the substitutions.
If a variable occurs in multiple substitutions, try to merge the replacements.
See :meth:`union_with_variable` to see how replacements are merged.
Does not modify any of the original substitutions.
Example:
>>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a})
>>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )})
>>> print(subst1.union(subst2))
{x ↦ (a, b), y ↦ (c), z ↦ a}
Args:
others:
The other substitutions to merge with this one.
Returns:
The new substitution with the other substitutions merged.
Raises:
ValueError:
if a variable occurs in multiple substitutions but cannot be merged because the
substitutions conflict.
"""
new_subst = Substitution(self)
for other in others:
for variable_name, replacement in other.items():
new_subst.try_add_variable(variable_name, replacement)
return new_subst | python | def union(self, *others: 'Substitution') -> 'Substitution':
new_subst = Substitution(self)
for other in others:
for variable_name, replacement in other.items():
new_subst.try_add_variable(variable_name, replacement)
return new_subst | [
"def",
"union",
"(",
"self",
",",
"*",
"others",
":",
"'Substitution'",
")",
"->",
"'Substitution'",
":",
"new_subst",
"=",
"Substitution",
"(",
"self",
")",
"for",
"other",
"in",
"others",
":",
"for",
"variable_name",
",",
"replacement",
"in",
"other",
".",
"items",
"(",
")",
":",
"new_subst",
".",
"try_add_variable",
"(",
"variable_name",
",",
"replacement",
")",
"return",
"new_subst"
] | Try to merge the substitutions.
If a variable occurs in multiple substitutions, try to merge the replacements.
See :meth:`union_with_variable` to see how replacements are merged.
Does not modify any of the original substitutions.
Example:
>>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a})
>>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )})
>>> print(subst1.union(subst2))
{x ↦ (a, b), y ↦ (c), z ↦ a}
Args:
others:
The other substitutions to merge with this one.
Returns:
The new substitution with the other substitutions merged.
Raises:
ValueError:
if a variable occurs in multiple substitutions but cannot be merged because the
substitutions conflict. | [
"Try",
"to",
"merge",
"the",
"substitutions",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L166-L197 |
1,995 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.rename | def rename(self, renaming: Dict[str, str]) -> 'Substitution':
"""Return a copy of the substitution with renamed variables.
Example:
Rename the variable *x* to *y*:
>>> subst = Substitution({'x': a})
>>> subst.rename({'x': 'y'})
{'y': Symbol('a')}
Args:
renaming:
A dictionary mapping old variable names to new ones.
Returns:
A copy of the substitution where variable names have been replaced according to the given renaming
dictionary. Names that are not contained in the dictionary are left unchanged.
"""
return Substitution((renaming.get(name, name), value) for name, value in self.items()) | python | def rename(self, renaming: Dict[str, str]) -> 'Substitution':
return Substitution((renaming.get(name, name), value) for name, value in self.items()) | [
"def",
"rename",
"(",
"self",
",",
"renaming",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
")",
"->",
"'Substitution'",
":",
"return",
"Substitution",
"(",
"(",
"renaming",
".",
"get",
"(",
"name",
",",
"name",
")",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"self",
".",
"items",
"(",
")",
")"
] | Return a copy of the substitution with renamed variables.
Example:
Rename the variable *x* to *y*:
>>> subst = Substitution({'x': a})
>>> subst.rename({'x': 'y'})
{'y': Symbol('a')}
Args:
renaming:
A dictionary mapping old variable names to new ones.
Returns:
A copy of the substitution where variable names have been replaced according to the given renaming
dictionary. Names that are not contained in the dictionary are left unchanged. | [
"Return",
"a",
"copy",
"of",
"the",
"substitution",
"with",
"renamed",
"variables",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L199-L218 |
1,996 | HPAC/matchpy | matchpy/matching/syntactic.py | _get_symbol_wildcard_label | def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]:
"""Return the transition target for the given symbol type from the the given state or None if it does not exist."""
return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None) | python | def _get_symbol_wildcard_label(state: '_State', symbol: Symbol) -> Type[Symbol]:
return next((t for t in state.keys() if is_symbol_wildcard(t) and isinstance(symbol, t)), None) | [
"def",
"_get_symbol_wildcard_label",
"(",
"state",
":",
"'_State'",
",",
"symbol",
":",
"Symbol",
")",
"->",
"Type",
"[",
"Symbol",
"]",
":",
"return",
"next",
"(",
"(",
"t",
"for",
"t",
"in",
"state",
".",
"keys",
"(",
")",
"if",
"is_symbol_wildcard",
"(",
"t",
")",
"and",
"isinstance",
"(",
"symbol",
",",
"t",
")",
")",
",",
"None",
")"
] | Return the transition target for the given symbol type from the the given state or None if it does not exist. | [
"Return",
"the",
"transition",
"target",
"for",
"the",
"given",
"symbol",
"type",
"from",
"the",
"the",
"given",
"state",
"or",
"None",
"if",
"it",
"does",
"not",
"exist",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L50-L52 |
1,997 | HPAC/matchpy | matchpy/matching/syntactic.py | _term_str | def _term_str(term: TermAtom) -> str: # pragma: no cover
"""Return a string representation of a term atom."""
if is_operation(term):
return term.name + '('
elif is_symbol_wildcard(term):
return '*{!s}'.format(term.__name__)
elif isinstance(term, Wildcard):
return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '')
elif term == Wildcard:
return '*'
else:
return str(term) | python | def _term_str(term: TermAtom) -> str: # pragma: no cover
if is_operation(term):
return term.name + '('
elif is_symbol_wildcard(term):
return '*{!s}'.format(term.__name__)
elif isinstance(term, Wildcard):
return '*{!s}{!s}'.format(term.min_count, (not term.fixed_size) and '+' or '')
elif term == Wildcard:
return '*'
else:
return str(term) | [
"def",
"_term_str",
"(",
"term",
":",
"TermAtom",
")",
"->",
"str",
":",
"# pragma: no cover",
"if",
"is_operation",
"(",
"term",
")",
":",
"return",
"term",
".",
"name",
"+",
"'('",
"elif",
"is_symbol_wildcard",
"(",
"term",
")",
":",
"return",
"'*{!s}'",
".",
"format",
"(",
"term",
".",
"__name__",
")",
"elif",
"isinstance",
"(",
"term",
",",
"Wildcard",
")",
":",
"return",
"'*{!s}{!s}'",
".",
"format",
"(",
"term",
".",
"min_count",
",",
"(",
"not",
"term",
".",
"fixed_size",
")",
"and",
"'+'",
"or",
"''",
")",
"elif",
"term",
"==",
"Wildcard",
":",
"return",
"'*'",
"else",
":",
"return",
"str",
"(",
"term",
")"
] | Return a string representation of a term atom. | [
"Return",
"a",
"string",
"representation",
"of",
"a",
"term",
"atom",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L200-L211 |
1,998 | HPAC/matchpy | matchpy/matching/syntactic.py | FlatTerm.merged | def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm':
"""Concatenate the given flatterms to a single flatterm.
Args:
*flatterms:
The flatterms which are concatenated.
Returns:
The concatenated flatterms.
"""
return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty()))) | python | def merged(cls, *flatterms: 'FlatTerm') -> 'FlatTerm':
return cls(cls._combined_wildcards_iter(sum(flatterms, cls.empty()))) | [
"def",
"merged",
"(",
"cls",
",",
"*",
"flatterms",
":",
"'FlatTerm'",
")",
"->",
"'FlatTerm'",
":",
"return",
"cls",
"(",
"cls",
".",
"_combined_wildcards_iter",
"(",
"sum",
"(",
"flatterms",
",",
"cls",
".",
"empty",
"(",
")",
")",
")",
")"
] | Concatenate the given flatterms to a single flatterm.
Args:
*flatterms:
The flatterms which are concatenated.
Returns:
The concatenated flatterms. | [
"Concatenate",
"the",
"given",
"flatterms",
"to",
"a",
"single",
"flatterm",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L146-L156 |
1,999 | HPAC/matchpy | matchpy/matching/syntactic.py | FlatTerm._flatterm_iter | def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]:
"""Generator that yields the atoms of the expressions in prefix notation with operation end markers."""
if isinstance(expression, Operation):
yield type(expression)
for operand in op_iter(expression):
yield from cls._flatterm_iter(operand)
yield OPERATION_END
elif isinstance(expression, SymbolWildcard):
yield expression.symbol_type
elif isinstance(expression, (Symbol, Wildcard)):
yield expression
else:
assert False, "Unreachable unless a new unsupported expression type is added." | python | def _flatterm_iter(cls, expression: Expression) -> Iterator[TermAtom]:
if isinstance(expression, Operation):
yield type(expression)
for operand in op_iter(expression):
yield from cls._flatterm_iter(operand)
yield OPERATION_END
elif isinstance(expression, SymbolWildcard):
yield expression.symbol_type
elif isinstance(expression, (Symbol, Wildcard)):
yield expression
else:
assert False, "Unreachable unless a new unsupported expression type is added." | [
"def",
"_flatterm_iter",
"(",
"cls",
",",
"expression",
":",
"Expression",
")",
"->",
"Iterator",
"[",
"TermAtom",
"]",
":",
"if",
"isinstance",
"(",
"expression",
",",
"Operation",
")",
":",
"yield",
"type",
"(",
"expression",
")",
"for",
"operand",
"in",
"op_iter",
"(",
"expression",
")",
":",
"yield",
"from",
"cls",
".",
"_flatterm_iter",
"(",
"operand",
")",
"yield",
"OPERATION_END",
"elif",
"isinstance",
"(",
"expression",
",",
"SymbolWildcard",
")",
":",
"yield",
"expression",
".",
"symbol_type",
"elif",
"isinstance",
"(",
"expression",
",",
"(",
"Symbol",
",",
"Wildcard",
")",
")",
":",
"yield",
"expression",
"else",
":",
"assert",
"False",
",",
"\"Unreachable unless a new unsupported expression type is added.\""
] | Generator that yields the atoms of the expressions in prefix notation with operation end markers. | [
"Generator",
"that",
"yields",
"the",
"atoms",
"of",
"the",
"expressions",
"in",
"prefix",
"notation",
"with",
"operation",
"end",
"markers",
"."
] | 06b2ec50ee0efdf3dd183768c0ffdb51b7efc393 | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/matching/syntactic.py#L159-L171 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.