repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
crate/crash | src/crate/crash/command.py | _parse_statements | def _parse_statements(lines):
"""Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts) | python | def _parse_statements(lines):
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts) | [
"def",
"_parse_statements",
"(",
"lines",
")",
":",
"lines",
"=",
"(",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"lines",
"if",
"l",
")",
"lines",
"=",
"(",
"l",
"for",
"l",
"in",
"lines",
"if",
"l",
"and",
"not",
"l",
".",
"startswith",
"(",
"'--'",
")",
")",
"parts",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"parts",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
"';'",
")",
")",
"if",
"line",
".",
"endswith",
"(",
"';'",
")",
":",
"yield",
"'\\n'",
".",
"join",
"(",
"parts",
")",
"parts",
"[",
":",
"]",
"=",
"[",
"]",
"if",
"parts",
":",
"yield",
"'\\n'",
".",
"join",
"(",
"parts",
")"
]
| Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1'] | [
"Return",
"a",
"generator",
"of",
"statements"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L191-L213 |
crate/crash | src/crate/crash/command.py | host_and_port | def host_and_port(host_or_port):
"""
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
"""
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200' | python | def host_and_port(host_or_port):
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200' | [
"def",
"host_and_port",
"(",
"host_or_port",
")",
":",
"if",
"':'",
"in",
"host_or_port",
":",
"if",
"len",
"(",
"host_or_port",
")",
"==",
"1",
":",
"return",
"'localhost:4200'",
"elif",
"host_or_port",
".",
"startswith",
"(",
"':'",
")",
":",
"return",
"'localhost'",
"+",
"host_or_port",
"return",
"host_or_port",
"return",
"host_or_port",
"+",
"':4200'"
]
| Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200 | [
"Return",
"full",
"hostname",
"/",
"IP",
"+",
"port",
"possible",
"input",
"formats",
"are",
":",
"*",
"host",
":",
"port",
"-",
">",
"host",
":",
"port",
"*",
":",
"-",
">",
"localhost",
":",
"4200",
"*",
":",
"port",
"-",
">",
"localhost",
":",
"port",
"*",
"host",
"-",
">",
"host",
":",
"4200"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L519-L533 |
crate/crash | src/crate/crash/command.py | CrateShell._show_tables | def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter)) | python | def _show_tables(self, *args):
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter)) | [
"def",
"_show_tables",
"(",
"self",
",",
"*",
"args",
")",
":",
"v",
"=",
"self",
".",
"connection",
".",
"lowest_server_version",
"schema_name",
"=",
"\"table_schema\"",
"if",
"v",
">=",
"TABLE_SCHEMA_MIN_VERSION",
"else",
"\"schema_name\"",
"table_filter",
"=",
"\" AND table_type = 'BASE TABLE'\"",
"if",
"v",
">=",
"TABLE_TYPE_MIN_VERSION",
"else",
"\"\"",
"self",
".",
"_exec",
"(",
"\"SELECT format('%s.%s', {schema}, table_name) AS name \"",
"\"FROM information_schema.tables \"",
"\"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')\"",
"\"{table_filter}\"",
".",
"format",
"(",
"schema",
"=",
"schema_name",
",",
"table_filter",
"=",
"table_filter",
")",
")"
]
| print the existing tables within the 'doc' schema | [
"print",
"the",
"existing",
"tables",
"within",
"the",
"doc",
"schema"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L321-L333 |
crate/crash | src/crate/crash/command.py | CrateShell._quit | def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit()) | python | def _quit(self, *args):
self.logger.warn('Bye!')
sys.exit(self.exit()) | [
"def",
"_quit",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'Bye!'",
")",
"sys",
".",
"exit",
"(",
"self",
".",
"exit",
"(",
")",
")"
]
| quit crash | [
"quit",
"crash"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L336-L339 |
crate/crash | src/crate/crash/command.py | CrateShell._connect | def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True) | python | def _connect(self, servers):
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True) | [
"def",
"_connect",
"(",
"self",
",",
"servers",
")",
":",
"self",
".",
"_do_connect",
"(",
"servers",
".",
"split",
"(",
"' '",
")",
")",
"self",
".",
"_verify_connection",
"(",
"verbose",
"=",
"True",
")"
]
| connect to the given server, e.g.: \\connect localhost:4200 | [
"connect",
"to",
"the",
"given",
"server",
"e",
".",
"g",
".",
":",
"\\\\",
"connect",
"localhost",
":",
"4200"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L362-L365 |
crate/crash | src/crate/crash/sysinfo.py | SysInfoCommand.execute | def execute(self):
""" print system and cluster info """
if not self.cmd.is_conn_available():
return
if self.cmd.connection.lowest_server_version >= SYSINFO_MIN_VERSION:
success, rows = self._sys_info()
self.cmd.exit_code = self.cmd.exit_code or int(not success)
if success:
for result in rows:
self.cmd.pprint(result.rows, result.cols)
self.cmd.logger.info(
"For debugging purposes you can send above listed information to [email protected]")
else:
tmpl = 'Crate {version} does not support the cluster "sysinfo" command'
self.cmd.logger.warn(tmpl
.format(version=self.cmd.connection.lowest_server_version)) | python | def execute(self):
if not self.cmd.is_conn_available():
return
if self.cmd.connection.lowest_server_version >= SYSINFO_MIN_VERSION:
success, rows = self._sys_info()
self.cmd.exit_code = self.cmd.exit_code or int(not success)
if success:
for result in rows:
self.cmd.pprint(result.rows, result.cols)
self.cmd.logger.info(
"For debugging purposes you can send above listed information to [email protected]")
else:
tmpl = 'Crate {version} does not support the cluster "sysinfo" command'
self.cmd.logger.warn(tmpl
.format(version=self.cmd.connection.lowest_server_version)) | [
"def",
"execute",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"cmd",
".",
"is_conn_available",
"(",
")",
":",
"return",
"if",
"self",
".",
"cmd",
".",
"connection",
".",
"lowest_server_version",
">=",
"SYSINFO_MIN_VERSION",
":",
"success",
",",
"rows",
"=",
"self",
".",
"_sys_info",
"(",
")",
"self",
".",
"cmd",
".",
"exit_code",
"=",
"self",
".",
"cmd",
".",
"exit_code",
"or",
"int",
"(",
"not",
"success",
")",
"if",
"success",
":",
"for",
"result",
"in",
"rows",
":",
"self",
".",
"cmd",
".",
"pprint",
"(",
"result",
".",
"rows",
",",
"result",
".",
"cols",
")",
"self",
".",
"cmd",
".",
"logger",
".",
"info",
"(",
"\"For debugging purposes you can send above listed information to [email protected]\"",
")",
"else",
":",
"tmpl",
"=",
"'Crate {version} does not support the cluster \"sysinfo\" command'",
"self",
".",
"cmd",
".",
"logger",
".",
"warn",
"(",
"tmpl",
".",
"format",
"(",
"version",
"=",
"self",
".",
"cmd",
".",
"connection",
".",
"lowest_server_version",
")",
")"
]
| print system and cluster info | [
"print",
"system",
"and",
"cluster",
"info"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/sysinfo.py#L72-L87 |
crate/crash | src/crate/crash/config.py | Configuration.bwc_bool_transform_from | def bwc_bool_transform_from(cls, x):
"""
Read boolean values from old config files correctly
and interpret 'True' and 'False' as correct booleans.
"""
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
return bool(int(x)) | python | def bwc_bool_transform_from(cls, x):
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
return bool(int(x)) | [
"def",
"bwc_bool_transform_from",
"(",
"cls",
",",
"x",
")",
":",
"if",
"x",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"return",
"True",
"elif",
"x",
".",
"lower",
"(",
")",
"==",
"'false'",
":",
"return",
"False",
"return",
"bool",
"(",
"int",
"(",
"x",
")",
")"
]
| Read boolean values from old config files correctly
and interpret 'True' and 'False' as correct booleans. | [
"Read",
"boolean",
"values",
"from",
"old",
"config",
"files",
"correctly",
"and",
"interpret",
"True",
"and",
"False",
"as",
"correct",
"booleans",
"."
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/config.py#L44-L53 |
crate/crash | src/crate/crash/outputs.py | _transform_field | def _transform_field(field):
"""transform field for displaying"""
if isinstance(field, bool):
return TRUE if field else FALSE
elif isinstance(field, (list, dict)):
return json.dumps(field, sort_keys=True, ensure_ascii=False)
else:
return field | python | def _transform_field(field):
if isinstance(field, bool):
return TRUE if field else FALSE
elif isinstance(field, (list, dict)):
return json.dumps(field, sort_keys=True, ensure_ascii=False)
else:
return field | [
"def",
"_transform_field",
"(",
"field",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"bool",
")",
":",
"return",
"TRUE",
"if",
"field",
"else",
"FALSE",
"elif",
"isinstance",
"(",
"field",
",",
"(",
"list",
",",
"dict",
")",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"field",
",",
"sort_keys",
"=",
"True",
",",
"ensure_ascii",
"=",
"False",
")",
"else",
":",
"return",
"field"
]
| transform field for displaying | [
"transform",
"field",
"for",
"displaying"
]
| train | https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/outputs.py#L42-L49 |
supermihi/pytaglib | src/pyprinttags.py | script | def script():
"""Run the command-line script."""
parser = argparse.ArgumentParser(description="Print all textual tags of one or more audio files.")
parser.add_argument("-b", "--batch", help="disable user interaction", action="store_true")
parser.add_argument("file", nargs="+", help="file(s) to print tags of")
args = parser.parse_args()
for filename in args.file:
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding())
line = "TAGS OF '{0}'".format(os.path.basename(filename))
print("*" * len(line))
print(line)
print("*" * len(line))
audioFile = taglib.File(filename)
tags = audioFile.tags
if len(tags) > 0:
maxKeyLen = max(len(key) for key in tags.keys())
for key, values in tags.items():
for value in values:
print(('{0:' + str(maxKeyLen) + '} = {1}').format(key, value))
if len(audioFile.unsupported) > 0:
print('Unsupported tag elements: ' + "; ".join(audioFile.unsupported))
if sys.version_info[0] == 2:
inputFunction = raw_input
else:
inputFunction = input
if not args.batch and inputFunction("remove unsupported properties? [yN] ").lower() in ["y", "yes"]:
audioFile.removeUnsupportedProperties(audioFile.unsupported)
audioFile.save() | python | def script():
parser = argparse.ArgumentParser(description="Print all textual tags of one or more audio files.")
parser.add_argument("-b", "--batch", help="disable user interaction", action="store_true")
parser.add_argument("file", nargs="+", help="file(s) to print tags of")
args = parser.parse_args()
for filename in args.file:
if isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding())
line = "TAGS OF '{0}'".format(os.path.basename(filename))
print("*" * len(line))
print(line)
print("*" * len(line))
audioFile = taglib.File(filename)
tags = audioFile.tags
if len(tags) > 0:
maxKeyLen = max(len(key) for key in tags.keys())
for key, values in tags.items():
for value in values:
print(('{0:' + str(maxKeyLen) + '} = {1}').format(key, value))
if len(audioFile.unsupported) > 0:
print('Unsupported tag elements: ' + "; ".join(audioFile.unsupported))
if sys.version_info[0] == 2:
inputFunction = raw_input
else:
inputFunction = input
if not args.batch and inputFunction("remove unsupported properties? [yN] ").lower() in ["y", "yes"]:
audioFile.removeUnsupportedProperties(audioFile.unsupported)
audioFile.save() | [
"def",
"script",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Print all textual tags of one or more audio files.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-b\"",
",",
"\"--batch\"",
",",
"help",
"=",
"\"disable user interaction\"",
",",
"action",
"=",
"\"store_true\"",
")",
"parser",
".",
"add_argument",
"(",
"\"file\"",
",",
"nargs",
"=",
"\"+\"",
",",
"help",
"=",
"\"file(s) to print tags of\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"for",
"filename",
"in",
"args",
".",
"file",
":",
"if",
"isinstance",
"(",
"filename",
",",
"bytes",
")",
":",
"filename",
"=",
"filename",
".",
"decode",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"line",
"=",
"\"TAGS OF '{0}'\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
")",
"print",
"(",
"\"*\"",
"*",
"len",
"(",
"line",
")",
")",
"print",
"(",
"line",
")",
"print",
"(",
"\"*\"",
"*",
"len",
"(",
"line",
")",
")",
"audioFile",
"=",
"taglib",
".",
"File",
"(",
"filename",
")",
"tags",
"=",
"audioFile",
".",
"tags",
"if",
"len",
"(",
"tags",
")",
">",
"0",
":",
"maxKeyLen",
"=",
"max",
"(",
"len",
"(",
"key",
")",
"for",
"key",
"in",
"tags",
".",
"keys",
"(",
")",
")",
"for",
"key",
",",
"values",
"in",
"tags",
".",
"items",
"(",
")",
":",
"for",
"value",
"in",
"values",
":",
"print",
"(",
"(",
"'{0:'",
"+",
"str",
"(",
"maxKeyLen",
")",
"+",
"'} = {1}'",
")",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"if",
"len",
"(",
"audioFile",
".",
"unsupported",
")",
">",
"0",
":",
"print",
"(",
"'Unsupported tag elements: '",
"+",
"\"; \"",
".",
"join",
"(",
"audioFile",
".",
"unsupported",
")",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"inputFunction",
"=",
"raw_input",
"else",
":",
"inputFunction",
"=",
"input",
"if",
"not",
"args",
".",
"batch",
"and",
"inputFunction",
"(",
"\"remove unsupported properties? [yN] \"",
")",
".",
"lower",
"(",
")",
"in",
"[",
"\"y\"",
",",
"\"yes\"",
"]",
":",
"audioFile",
".",
"removeUnsupportedProperties",
"(",
"audioFile",
".",
"unsupported",
")",
"audioFile",
".",
"save",
"(",
")"
]
| Run the command-line script. | [
"Run",
"the",
"command",
"-",
"line",
"script",
"."
]
| train | https://github.com/supermihi/pytaglib/blob/719224d4fdfee09925b865335f2af510ccdcad58/src/pyprinttags.py#L23-L51 |
adafruit/Adafruit_CircuitPython_seesaw | adafruit_seesaw/seesaw.py | Seesaw.sw_reset | def sw_reset(self):
"""Trigger a software reset of the SeeSaw chip"""
self.write8(_STATUS_BASE, _STATUS_SWRST, 0xFF)
time.sleep(.500)
chip_id = self.read8(_STATUS_BASE, _STATUS_HW_ID)
if chip_id != _HW_ID_CODE:
raise RuntimeError("Seesaw hardware ID returned (0x{:x}) is not "
"correct! Expected 0x{:x}. Please check your wiring."
.format(chip_id, _HW_ID_CODE))
pid = self.get_version() >> 16
if pid == _CRICKIT_PID:
from adafruit_seesaw.crickit import Crickit_Pinmap
self.pin_mapping = Crickit_Pinmap
elif pid == _ROBOHATMM1_PID:
from adafruit_seesaw.robohat import MM1_Pinmap
self.pin_mapping = MM1_Pinmap
else:
from adafruit_seesaw.samd09 import SAMD09_Pinmap
self.pin_mapping = SAMD09_Pinmap | python | def sw_reset(self):
self.write8(_STATUS_BASE, _STATUS_SWRST, 0xFF)
time.sleep(.500)
chip_id = self.read8(_STATUS_BASE, _STATUS_HW_ID)
if chip_id != _HW_ID_CODE:
raise RuntimeError("Seesaw hardware ID returned (0x{:x}) is not "
"correct! Expected 0x{:x}. Please check your wiring."
.format(chip_id, _HW_ID_CODE))
pid = self.get_version() >> 16
if pid == _CRICKIT_PID:
from adafruit_seesaw.crickit import Crickit_Pinmap
self.pin_mapping = Crickit_Pinmap
elif pid == _ROBOHATMM1_PID:
from adafruit_seesaw.robohat import MM1_Pinmap
self.pin_mapping = MM1_Pinmap
else:
from adafruit_seesaw.samd09 import SAMD09_Pinmap
self.pin_mapping = SAMD09_Pinmap | [
"def",
"sw_reset",
"(",
"self",
")",
":",
"self",
".",
"write8",
"(",
"_STATUS_BASE",
",",
"_STATUS_SWRST",
",",
"0xFF",
")",
"time",
".",
"sleep",
"(",
".500",
")",
"chip_id",
"=",
"self",
".",
"read8",
"(",
"_STATUS_BASE",
",",
"_STATUS_HW_ID",
")",
"if",
"chip_id",
"!=",
"_HW_ID_CODE",
":",
"raise",
"RuntimeError",
"(",
"\"Seesaw hardware ID returned (0x{:x}) is not \"",
"\"correct! Expected 0x{:x}. Please check your wiring.\"",
".",
"format",
"(",
"chip_id",
",",
"_HW_ID_CODE",
")",
")",
"pid",
"=",
"self",
".",
"get_version",
"(",
")",
">>",
"16",
"if",
"pid",
"==",
"_CRICKIT_PID",
":",
"from",
"adafruit_seesaw",
".",
"crickit",
"import",
"Crickit_Pinmap",
"self",
".",
"pin_mapping",
"=",
"Crickit_Pinmap",
"elif",
"pid",
"==",
"_ROBOHATMM1_PID",
":",
"from",
"adafruit_seesaw",
".",
"robohat",
"import",
"MM1_Pinmap",
"self",
".",
"pin_mapping",
"=",
"MM1_Pinmap",
"else",
":",
"from",
"adafruit_seesaw",
".",
"samd09",
"import",
"SAMD09_Pinmap",
"self",
".",
"pin_mapping",
"=",
"SAMD09_Pinmap"
]
| Trigger a software reset of the SeeSaw chip | [
"Trigger",
"a",
"software",
"reset",
"of",
"the",
"SeeSaw",
"chip"
]
| train | https://github.com/adafruit/Adafruit_CircuitPython_seesaw/blob/3f55058dbdfcfde8cb5ce8708c0a37aacde9b313/adafruit_seesaw/seesaw.py#L144-L165 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_default_cache_dir | def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR) | python | def _get_default_cache_dir(self):
return os.path.join(os.path.dirname(__file__), self._DATA_DIR) | [
"def",
"_get_default_cache_dir",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"self",
".",
"_DATA_DIR",
")"
]
| Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str | [
"Returns",
"default",
"cache",
"directory",
"(",
"data",
"directory",
")"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L66-L75 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_default_cache_file_path | def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path | python | def _get_default_cache_file_path(self):
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path | [
"def",
"_get_default_cache_file_path",
"(",
"self",
")",
":",
"default_list_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_get_default_cache_dir",
"(",
")",
",",
"self",
".",
"_CACHE_FILE_NAME",
")",
"if",
"not",
"os",
".",
"access",
"(",
"default_list_path",
",",
"os",
".",
"F_OK",
")",
":",
"raise",
"CacheFileError",
"(",
"\"Default cache file does not exist \"",
"\"'{}'!\"",
".",
"format",
"(",
"default_list_path",
")",
")",
"return",
"default_list_path"
]
| Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str | [
"Returns",
"default",
"cache",
"file",
"path"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L77-L94 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_writable_cache_dir | def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.") | python | def _get_writable_cache_dir(self):
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.") | [
"def",
"_get_writable_cache_dir",
"(",
"self",
")",
":",
"dir_path_data",
"=",
"self",
".",
"_get_default_cache_dir",
"(",
")",
"if",
"os",
".",
"access",
"(",
"dir_path_data",
",",
"os",
".",
"W_OK",
")",
":",
"self",
".",
"_default_cache_file",
"=",
"True",
"return",
"dir_path_data",
"dir_path_user",
"=",
"user_cache_dir",
"(",
"self",
".",
"_URLEXTRACT_NAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path_user",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_path_user",
",",
"exist_ok",
"=",
"True",
")",
"if",
"os",
".",
"access",
"(",
"dir_path_user",
",",
"os",
".",
"W_OK",
")",
":",
"return",
"dir_path_user",
"dir_path_temp",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"if",
"os",
".",
"access",
"(",
"dir_path_temp",
",",
"os",
".",
"W_OK",
")",
":",
"return",
"dir_path_temp",
"raise",
"CacheFileError",
"(",
"\"Cache directories are not writable.\"",
")"
]
| Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str | [
"Get",
"writable",
"cache",
"directory",
"with",
"fallback",
"to",
"user",
"s",
"cache",
"directory",
"and",
"global",
"temp",
"directory"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L96-L122 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_cache_file_path | def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME) | python | def _get_cache_file_path(self, cache_dir=None):
if cache_dir is None:
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
return os.path.join(cache_dir, self._CACHE_FILE_NAME) | [
"def",
"_get_cache_file_path",
"(",
"self",
",",
"cache_dir",
"=",
"None",
")",
":",
"if",
"cache_dir",
"is",
"None",
":",
"# Tries to get writable cache dir with fallback to users data dir",
"# and temp directory",
"cache_dir",
"=",
"self",
".",
"_get_writable_cache_dir",
"(",
")",
"else",
":",
"if",
"not",
"os",
".",
"access",
"(",
"cache_dir",
",",
"os",
".",
"W_OK",
")",
":",
"raise",
"CacheFileError",
"(",
"\"None of cache directories is writable.\"",
")",
"# get directory for cached file",
"return",
"os",
".",
"path",
".",
"join",
"(",
"cache_dir",
",",
"self",
".",
"_CACHE_FILE_NAME",
")"
]
| Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str | [
"Get",
"path",
"for",
"cache",
"file"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L124-L142 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._download_tlds_list | def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True | python | def _download_tlds_list(self):
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True | [
"def",
"_download_tlds_list",
"(",
"self",
")",
":",
"url_list",
"=",
"'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'",
"# Default cache file exist (set by _default_cache_file)",
"# and we want to write permission",
"if",
"self",
".",
"_default_cache_file",
"and",
"not",
"os",
".",
"access",
"(",
"self",
".",
"_tld_list_path",
",",
"os",
".",
"W_OK",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Default cache file is not writable.\"",
")",
"self",
".",
"_tld_list_path",
"=",
"self",
".",
"_get_cache_file_path",
"(",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Changed path of cache file to: %s\"",
",",
"self",
".",
"_tld_list_path",
")",
"if",
"os",
".",
"access",
"(",
"self",
".",
"_tld_list_path",
",",
"os",
".",
"F_OK",
")",
"and",
"not",
"os",
".",
"access",
"(",
"self",
".",
"_tld_list_path",
",",
"os",
".",
"W_OK",
")",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"ERROR: Cache file is not writable for current \"",
"\"user. ({})\"",
".",
"format",
"(",
"self",
".",
"_tld_list_path",
")",
")",
"return",
"False",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"url_list",
")",
"req",
".",
"add_header",
"(",
"'User-Agent'",
",",
"'Mozilla/5.0 (Windows NT 6.0; '",
"'WOW64; rv:24.0) Gecko/20100101 '",
"'Firefox/24.0'",
")",
"with",
"open",
"(",
"self",
".",
"_tld_list_path",
",",
"'w'",
")",
"as",
"ftld",
":",
"try",
":",
"with",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"req",
")",
"as",
"f",
":",
"page",
"=",
"f",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"ftld",
".",
"write",
"(",
"page",
")",
"except",
"HTTPError",
"as",
"e",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"ERROR: Can not download list ot TLDs. \"",
"\"(HTTPError: {})\"",
".",
"format",
"(",
"e",
".",
"reason",
")",
")",
"return",
"False",
"except",
"URLError",
"as",
"e",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"ERROR: Can not download list ot TLDs. \"",
"\"(URLError: {})\"",
".",
"format",
"(",
"e",
".",
"reason",
")",
")",
"return",
"False",
"return",
"True"
]
| Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool | [
"Function",
"downloads",
"list",
"of",
"TLDs",
"from",
"IANA",
".",
"LINK",
":",
"https",
":",
"//",
"data",
".",
"iana",
".",
"org",
"/",
"TLD",
"/",
"tlds",
"-",
"alpha",
"-",
"by",
"-",
"domain",
".",
"txt"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L144-L188 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._load_cached_tlds | def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds | python | def _load_cached_tlds(self):
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
if not tld:
continue
if tld[0] == '
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds | [
"def",
"_load_cached_tlds",
"(",
"self",
")",
":",
"# check if cached file is readable",
"if",
"not",
"os",
".",
"access",
"(",
"self",
".",
"_tld_list_path",
",",
"os",
".",
"R_OK",
")",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Cached file is not readable for current \"",
"\"user. ({})\"",
".",
"format",
"(",
"self",
".",
"_tld_list_path",
")",
")",
"raise",
"CacheFileError",
"(",
"\"Cached file is not readable for current user.\"",
")",
"set_of_tlds",
"=",
"set",
"(",
")",
"with",
"open",
"(",
"self",
".",
"_tld_list_path",
",",
"'r'",
")",
"as",
"f_cache_tld",
":",
"for",
"line",
"in",
"f_cache_tld",
":",
"tld",
"=",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"# skip empty lines",
"if",
"not",
"tld",
":",
"continue",
"# skip comments",
"if",
"tld",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"set_of_tlds",
".",
"add",
"(",
"\".\"",
"+",
"tld",
")",
"set_of_tlds",
".",
"add",
"(",
"\".\"",
"+",
"idna",
".",
"decode",
"(",
"tld",
")",
")",
"return",
"set_of_tlds"
]
| Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set | [
"Loads",
"TLDs",
"from",
"cached",
"file",
"to",
"set",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L190-L220 |
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_last_cachefile_modification | def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime) | python | def _get_last_cachefile_modification(self):
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime) | [
"def",
"_get_last_cachefile_modification",
"(",
"self",
")",
":",
"try",
":",
"mtime",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"self",
".",
"_tld_list_path",
")",
"except",
"OSError",
":",
"return",
"None",
"return",
"datetime",
".",
"fromtimestamp",
"(",
"mtime",
")"
]
| Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None | [
"Get",
"last",
"modification",
"of",
"cache",
"file",
"with",
"TLDs",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L222-L236 |
lipoja/URLExtract | urlextract/urlextract_core.py | _urlextract_cli | def _urlextract_cli():
"""
urlextract - command line program that will print all URLs to stdout
Usage: urlextract [input_file] [-u] [-v]
input_file - text file with URLs to extract
"""
import argparse
def get_args():
"""
Parse programs arguments
"""
parser = argparse.ArgumentParser(
description='urlextract - prints out all URLs that were '
'found in input file or stdin based on locating '
'their TLDs')
ver = URLExtract.get_version()
parser.add_argument("-v", "--version", action="version",
version='%(prog)s - version {}'.format(ver))
parser.add_argument(
"-u", "--unique", dest='unique', action='store_true',
help='print out only unique URLs found in file.')
parser.add_argument(
'input_file', nargs='?', metavar='<input_file>',
type=argparse.FileType(encoding="UTF-8"), default=sys.stdin,
help='input text file with URLs to extract. [UTF-8]')
parsed_args = parser.parse_args()
return parsed_args
args = get_args()
logging.basicConfig(
level=logging.INFO, stream=sys.stderr,
format='%(asctime)s - %(levelname)s (%(name)s): %(message)s')
logger = logging.getLogger('urlextract')
try:
urlextract = URLExtract()
urlextract.update_when_older(30)
content = args.input_file.read()
for url in urlextract.find_urls(content, args.unique):
print(url)
except CacheFileError as e:
logger.error(str(e))
sys.exit(-1)
finally:
args.input_file.close() | python | def _urlextract_cli():
import argparse
def get_args():
parser = argparse.ArgumentParser(
description='urlextract - prints out all URLs that were '
'found in input file or stdin based on locating '
'their TLDs')
ver = URLExtract.get_version()
parser.add_argument("-v", "--version", action="version",
version='%(prog)s - version {}'.format(ver))
parser.add_argument(
"-u", "--unique", dest='unique', action='store_true',
help='print out only unique URLs found in file.')
parser.add_argument(
'input_file', nargs='?', metavar='<input_file>',
type=argparse.FileType(encoding="UTF-8"), default=sys.stdin,
help='input text file with URLs to extract. [UTF-8]')
parsed_args = parser.parse_args()
return parsed_args
args = get_args()
logging.basicConfig(
level=logging.INFO, stream=sys.stderr,
format='%(asctime)s - %(levelname)s (%(name)s): %(message)s')
logger = logging.getLogger('urlextract')
try:
urlextract = URLExtract()
urlextract.update_when_older(30)
content = args.input_file.read()
for url in urlextract.find_urls(content, args.unique):
print(url)
except CacheFileError as e:
logger.error(str(e))
sys.exit(-1)
finally:
args.input_file.close() | [
"def",
"_urlextract_cli",
"(",
")",
":",
"import",
"argparse",
"def",
"get_args",
"(",
")",
":",
"\"\"\"\n Parse programs arguments\n \"\"\"",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'urlextract - prints out all URLs that were '",
"'found in input file or stdin based on locating '",
"'their TLDs'",
")",
"ver",
"=",
"URLExtract",
".",
"get_version",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"'%(prog)s - version {}'",
".",
"format",
"(",
"ver",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"-u\"",
",",
"\"--unique\"",
",",
"dest",
"=",
"'unique'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'print out only unique URLs found in file.'",
")",
"parser",
".",
"add_argument",
"(",
"'input_file'",
",",
"nargs",
"=",
"'?'",
",",
"metavar",
"=",
"'<input_file>'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"encoding",
"=",
"\"UTF-8\"",
")",
",",
"default",
"=",
"sys",
".",
"stdin",
",",
"help",
"=",
"'input text file with URLs to extract. [UTF-8]'",
")",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"parsed_args",
"args",
"=",
"get_args",
"(",
")",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"stream",
"=",
"sys",
".",
"stderr",
",",
"format",
"=",
"'%(asctime)s - %(levelname)s (%(name)s): %(message)s'",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'urlextract'",
")",
"try",
":",
"urlextract",
"=",
"URLExtract",
"(",
")",
"urlextract",
".",
"update_when_older",
"(",
"30",
")",
"content",
"=",
"args",
".",
"input_file",
".",
"read",
"(",
")",
"for",
"url",
"in",
"urlextract",
".",
"find_urls",
"(",
"content",
",",
"args",
".",
"unique",
")",
":",
"print",
"(",
"url",
")",
"except",
"CacheFileError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"str",
"(",
"e",
")",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"finally",
":",
"args",
".",
"input_file",
".",
"close",
"(",
")"
]
| urlextract - command line program that will print all URLs to stdout
Usage: urlextract [input_file] [-u] [-v]
input_file - text file with URLs to extract | [
"urlextract",
"-",
"command",
"line",
"program",
"that",
"will",
"print",
"all",
"URLs",
"to",
"stdout",
"Usage",
":",
"urlextract",
"[",
"input_file",
"]",
"[",
"-",
"u",
"]",
"[",
"-",
"v",
"]"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L590-L640 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._get_after_tld_chars | def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars | python | def _get_after_tld_chars(self):
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
_, right_enclosure = zip(*self._enclosure)
after_tld_chars |= set(right_enclosure)
return after_tld_chars | [
"def",
"_get_after_tld_chars",
"(",
"self",
")",
":",
"after_tld_chars",
"=",
"set",
"(",
"string",
".",
"whitespace",
")",
"after_tld_chars",
"|=",
"{",
"'/'",
",",
"'\\\"'",
",",
"'\\''",
",",
"'<'",
",",
"'>'",
",",
"'?'",
",",
"':'",
",",
"'.'",
",",
"','",
"}",
"# get left enclosure characters",
"_",
",",
"right_enclosure",
"=",
"zip",
"(",
"*",
"self",
".",
"_enclosure",
")",
"# add right enclosure characters to be valid after TLD",
"# for correct parsing of URL e.g. (example.com)",
"after_tld_chars",
"|=",
"set",
"(",
"right_enclosure",
")",
"return",
"after_tld_chars"
]
| Initialize after tld characters | [
"Initialize",
"after",
"tld",
"characters"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L91-L103 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._reload_tlds_from_file | def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped)) | python | def _reload_tlds_from_file(self):
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped)) | [
"def",
"_reload_tlds_from_file",
"(",
"self",
")",
":",
"tlds",
"=",
"sorted",
"(",
"self",
".",
"_load_cached_tlds",
"(",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"re_escaped",
"=",
"[",
"re",
".",
"escape",
"(",
"str",
"(",
"tld",
")",
")",
"for",
"tld",
"in",
"tlds",
"]",
"self",
".",
"_tlds_re",
"=",
"re",
".",
"compile",
"(",
"'|'",
".",
"join",
"(",
"re_escaped",
")",
")"
]
| Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user | [
"Reloads",
"TLDs",
"from",
"file",
"and",
"compile",
"regexp",
".",
":",
"raises",
":",
"CacheFileError",
"when",
"cached",
"file",
"is",
"not",
"readable",
"for",
"user"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L105-L113 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.update_when_older | def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True | python | def update_when_older(self, days):
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True | [
"def",
"update_when_older",
"(",
"self",
",",
"days",
")",
":",
"last_cache",
"=",
"self",
".",
"_get_last_cachefile_modification",
"(",
")",
"if",
"last_cache",
"is",
"None",
":",
"return",
"self",
".",
"update",
"(",
")",
"time_to_update",
"=",
"last_cache",
"+",
"timedelta",
"(",
"days",
"=",
"days",
")",
"if",
"datetime",
".",
"now",
"(",
")",
">=",
"time_to_update",
":",
"return",
"self",
".",
"update",
"(",
")",
"return",
"True"
]
| Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool | [
"Update",
"TLD",
"list",
"cache",
"file",
"if",
"the",
"list",
"is",
"older",
"than",
"number",
"of",
"days",
"given",
"in",
"parameter",
"days",
"or",
"if",
"does",
"not",
"exist",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L129-L148 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.set_stop_chars | def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars | python | def set_stop_chars(self, stop_chars):
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars | [
"def",
"set_stop_chars",
"(",
"self",
",",
"stop_chars",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Method set_stop_chars is deprecated, \"",
"\"use `set_stop_chars_left` or \"",
"\"`set_stop_chars_right` instead\"",
",",
"DeprecationWarning",
")",
"self",
".",
"_stop_chars",
"=",
"set",
"(",
"stop_chars",
")",
"self",
".",
"_stop_chars_left",
"=",
"self",
".",
"_stop_chars",
"self",
".",
"_stop_chars_right",
"=",
"self",
".",
"_stop_chars"
]
| Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters | [
"Set",
"stop",
"characters",
"used",
"when",
"determining",
"end",
"of",
"URL",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L196-L212 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.set_stop_chars_left | def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right | python | def set_stop_chars_left(self, stop_chars):
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right | [
"def",
"set_stop_chars_left",
"(",
"self",
",",
"stop_chars",
")",
":",
"if",
"not",
"isinstance",
"(",
"stop_chars",
",",
"set",
")",
":",
"raise",
"TypeError",
"(",
"\"stop_chars should be type set \"",
"\"but {} was given\"",
".",
"format",
"(",
"type",
"(",
"stop_chars",
")",
")",
")",
"self",
".",
"_stop_chars_left",
"=",
"stop_chars",
"self",
".",
"_stop_chars",
"=",
"self",
".",
"_stop_chars_left",
"|",
"self",
".",
"_stop_chars_right"
]
| Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError | [
"Set",
"stop",
"characters",
"for",
"text",
"on",
"left",
"from",
"TLD",
".",
"Stop",
"characters",
"are",
"used",
"when",
"determining",
"end",
"of",
"URL",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L223-L236 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.add_enclosure | def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars() | python | def add_enclosure(self, left_char, right_char):
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars() | [
"def",
"add_enclosure",
"(",
"self",
",",
"left_char",
",",
"right_char",
")",
":",
"assert",
"len",
"(",
"left_char",
")",
"==",
"1",
",",
"\"Parameter left_char must be character not string\"",
"assert",
"len",
"(",
"right_char",
")",
"==",
"1",
",",
"\"Parameter right_char must be character not string\"",
"self",
".",
"_enclosure",
".",
"add",
"(",
"(",
"left_char",
",",
"right_char",
")",
")",
"self",
".",
"_after_tld_chars",
"=",
"self",
".",
"_get_after_tld_chars",
"(",
")"
]
| Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")" | [
"Add",
"new",
"enclosure",
"pair",
"of",
"characters",
".",
"That",
"and",
"should",
"be",
"removed",
"when",
"their",
"presence",
"is",
"detected",
"at",
"beginning",
"and",
"end",
"of",
"found",
"URL"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L272-L286 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.remove_enclosure | def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars() | python | def remove_enclosure(self, left_char, right_char):
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars() | [
"def",
"remove_enclosure",
"(",
"self",
",",
"left_char",
",",
"right_char",
")",
":",
"assert",
"len",
"(",
"left_char",
")",
"==",
"1",
",",
"\"Parameter left_char must be character not string\"",
"assert",
"len",
"(",
"right_char",
")",
"==",
"1",
",",
"\"Parameter right_char must be character not string\"",
"rm_enclosure",
"=",
"(",
"left_char",
",",
"right_char",
")",
"if",
"rm_enclosure",
"in",
"self",
".",
"_enclosure",
":",
"self",
".",
"_enclosure",
".",
"remove",
"(",
"rm_enclosure",
")",
"self",
".",
"_after_tld_chars",
"=",
"self",
".",
"_get_after_tld_chars",
"(",
")"
]
| Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")" | [
"Remove",
"enclosure",
"pair",
"from",
"set",
"of",
"enclosures",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L288-L303 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._complete_url | def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url | python | def _complete_url(self, text, tld_pos, tld):
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
temp_tlds = {tld + c for c in self._after_tld_chars}
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url | [
"def",
"_complete_url",
"(",
"self",
",",
"text",
",",
"tld_pos",
",",
"tld",
")",
":",
"left_ok",
"=",
"True",
"right_ok",
"=",
"True",
"max_len",
"=",
"len",
"(",
"text",
")",
"-",
"1",
"end_pos",
"=",
"tld_pos",
"start_pos",
"=",
"tld_pos",
"while",
"left_ok",
"or",
"right_ok",
":",
"if",
"left_ok",
":",
"if",
"start_pos",
"<=",
"0",
":",
"left_ok",
"=",
"False",
"else",
":",
"if",
"text",
"[",
"start_pos",
"-",
"1",
"]",
"not",
"in",
"self",
".",
"_stop_chars_left",
":",
"start_pos",
"-=",
"1",
"else",
":",
"left_ok",
"=",
"False",
"if",
"right_ok",
":",
"if",
"end_pos",
">=",
"max_len",
":",
"right_ok",
"=",
"False",
"else",
":",
"if",
"text",
"[",
"end_pos",
"+",
"1",
"]",
"not",
"in",
"self",
".",
"_stop_chars_right",
":",
"end_pos",
"+=",
"1",
"else",
":",
"right_ok",
"=",
"False",
"complete_url",
"=",
"text",
"[",
"start_pos",
":",
"end_pos",
"+",
"1",
"]",
".",
"lstrip",
"(",
"'/'",
")",
"# remove last character from url",
"# when it is allowed character right after TLD (e.g. dot, comma)",
"temp_tlds",
"=",
"{",
"tld",
"+",
"c",
"for",
"c",
"in",
"self",
".",
"_after_tld_chars",
"}",
"# get only dot+tld+one_char and compare",
"if",
"complete_url",
"[",
"len",
"(",
"complete_url",
")",
"-",
"len",
"(",
"tld",
")",
"-",
"1",
":",
"]",
"in",
"temp_tlds",
":",
"complete_url",
"=",
"complete_url",
"[",
":",
"-",
"1",
"]",
"complete_url",
"=",
"self",
".",
"_split_markdown",
"(",
"complete_url",
",",
"tld_pos",
"-",
"start_pos",
")",
"complete_url",
"=",
"self",
".",
"_remove_enclosure_from_url",
"(",
"complete_url",
",",
"tld_pos",
"-",
"start_pos",
",",
"tld",
")",
"if",
"not",
"self",
".",
"_is_domain_valid",
"(",
"complete_url",
",",
"tld",
")",
":",
"return",
"\"\"",
"return",
"complete_url"
]
| Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str | [
"Expand",
"string",
"in",
"both",
"sides",
"to",
"match",
"whole",
"URL",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L305-L354 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._validate_tld_match | def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False | python | def _validate_tld_match(self, text, matched_tld, tld_pos):
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False | [
"def",
"_validate_tld_match",
"(",
"self",
",",
"text",
",",
"matched_tld",
",",
"tld_pos",
")",
":",
"if",
"tld_pos",
">",
"len",
"(",
"text",
")",
":",
"return",
"False",
"right_tld_pos",
"=",
"tld_pos",
"+",
"len",
"(",
"matched_tld",
")",
"if",
"len",
"(",
"text",
")",
">",
"right_tld_pos",
":",
"if",
"text",
"[",
"right_tld_pos",
"]",
"in",
"self",
".",
"_after_tld_chars",
":",
"if",
"tld_pos",
">",
"0",
"and",
"text",
"[",
"tld_pos",
"-",
"1",
"]",
"not",
"in",
"self",
".",
"_stop_chars_left",
":",
"return",
"True",
"else",
":",
"if",
"tld_pos",
">",
"0",
"and",
"text",
"[",
"tld_pos",
"-",
"1",
"]",
"not",
"in",
"self",
".",
"_stop_chars_left",
":",
"return",
"True",
"return",
"False"
]
| Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool | [
"Validate",
"TLD",
"match",
"-",
"tells",
"if",
"at",
"found",
"position",
"is",
"really",
"TLD",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L356-L379 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._is_domain_valid | def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True | python | def _is_domain_valid(self, url, tld):
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True | [
"def",
"_is_domain_valid",
"(",
"self",
",",
"url",
",",
"tld",
")",
":",
"if",
"not",
"url",
":",
"return",
"False",
"scheme_pos",
"=",
"url",
".",
"find",
"(",
"'://'",
")",
"if",
"scheme_pos",
"==",
"-",
"1",
":",
"url",
"=",
"'http://'",
"+",
"url",
"url_parts",
"=",
"uritools",
".",
"urisplit",
"(",
"url",
")",
"# <scheme>://<authority>/<path>?<query>#<fragment>",
"try",
":",
"host",
"=",
"url_parts",
".",
"gethost",
"(",
")",
"except",
"ValueError",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Invalid host '%s'. \"",
"\"If the host is valid report a bug.\"",
",",
"url",
")",
"return",
"False",
"if",
"not",
"host",
":",
"return",
"False",
"host_parts",
"=",
"host",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"host_parts",
")",
"<=",
"1",
":",
"return",
"False",
"host_tld",
"=",
"'.'",
"+",
"host_parts",
"[",
"-",
"1",
"]",
"if",
"host_tld",
"!=",
"tld",
":",
"return",
"False",
"top",
"=",
"host_parts",
"[",
"-",
"2",
"]",
"if",
"self",
".",
"_hostname_re",
".",
"match",
"(",
"top",
")",
"is",
"None",
":",
"return",
"False",
"return",
"True"
]
| Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False | [
"Checks",
"if",
"given",
"URL",
"has",
"valid",
"domain",
"name",
"(",
"ignores",
"subdomains",
")"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L381-L451 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._remove_enclosure_from_url | def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url | python | def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url | [
"def",
"_remove_enclosure_from_url",
"(",
"self",
",",
"text_url",
",",
"tld_pos",
",",
"tld",
")",
":",
"enclosure_map",
"=",
"{",
"left_char",
":",
"right_char",
"for",
"left_char",
",",
"right_char",
"in",
"self",
".",
"_enclosure",
"}",
"# get position of most right left_char of enclosure pairs",
"left_pos",
"=",
"max",
"(",
"[",
"text_url",
".",
"rfind",
"(",
"left_char",
",",
"0",
",",
"tld_pos",
")",
"for",
"left_char",
"in",
"enclosure_map",
".",
"keys",
"(",
")",
"]",
")",
"left_char",
"=",
"text_url",
"[",
"left_pos",
"]",
"if",
"left_pos",
">=",
"0",
"else",
"''",
"right_char",
"=",
"enclosure_map",
".",
"get",
"(",
"left_char",
",",
"''",
")",
"right_pos",
"=",
"text_url",
".",
"rfind",
"(",
"right_char",
")",
"if",
"right_char",
"else",
"len",
"(",
"text_url",
")",
"if",
"right_pos",
"<",
"0",
"or",
"right_pos",
"<",
"tld_pos",
":",
"right_pos",
"=",
"len",
"(",
"text_url",
")",
"new_url",
"=",
"text_url",
"[",
"left_pos",
"+",
"1",
":",
"right_pos",
"]",
"tld_pos",
"-=",
"left_pos",
"+",
"1",
"# Get valid domain when we have input as: example.com)/path",
"# we assume that if there is enclosure character after TLD it is",
"# the end URL it self therefore we remove the rest",
"after_tld_pos",
"=",
"tld_pos",
"+",
"len",
"(",
"tld",
")",
"if",
"after_tld_pos",
"<",
"len",
"(",
"new_url",
")",
":",
"if",
"new_url",
"[",
"after_tld_pos",
"]",
"in",
"enclosure_map",
".",
"values",
"(",
")",
":",
"new_url_tmp",
"=",
"new_url",
"[",
":",
"after_tld_pos",
"]",
"return",
"self",
".",
"_remove_enclosure_from_url",
"(",
"new_url_tmp",
",",
"tld_pos",
",",
"tld",
")",
"return",
"new_url"
]
| Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str | [
"Removes",
"enclosure",
"characters",
"from",
"URL",
"given",
"in",
"text_url",
".",
"For",
"example",
":",
"(",
"example",
".",
"com",
")",
"-",
">",
"example",
".",
"com"
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L453-L494 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._split_markdown | def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url | python | def _split_markdown(text_url, tld_pos):
left_bracket_pos = text_url.find('[')
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url | [
"def",
"_split_markdown",
"(",
"text_url",
",",
"tld_pos",
")",
":",
"# Markdown url can looks like:",
"# [http://example.com/](http://example.com/status/210)",
"left_bracket_pos",
"=",
"text_url",
".",
"find",
"(",
"'['",
")",
"# subtract 3 because URL is never shorter than 3 characters",
"if",
"left_bracket_pos",
">",
"tld_pos",
"-",
"3",
":",
"return",
"text_url",
"right_bracket_pos",
"=",
"text_url",
".",
"find",
"(",
"')'",
")",
"if",
"right_bracket_pos",
"<",
"tld_pos",
":",
"return",
"text_url",
"middle_pos",
"=",
"text_url",
".",
"rfind",
"(",
"\"](\"",
")",
"if",
"middle_pos",
">",
"tld_pos",
":",
"return",
"text_url",
"[",
"left_bracket_pos",
"+",
"1",
":",
"middle_pos",
"]",
"return",
"text_url"
]
| Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str | [
"Split",
"markdown",
"URL",
".",
"There",
"is",
"an",
"issue",
"wen",
"Markdown",
"URL",
"is",
"found",
".",
"Parsing",
"of",
"the",
"URL",
"does",
"not",
"stop",
"on",
"right",
"place",
"so",
"wrongly",
"found",
"URL",
"has",
"to",
"be",
"split",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L497-L523 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.gen_urls | def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset | python | def gen_urls(self, text):
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
tld_pos_url = tmp_url.find(tld)
tld_pos += len(tld) + offset
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
tld_pos += len(tld) + offset | [
"def",
"gen_urls",
"(",
"self",
",",
"text",
")",
":",
"tld_pos",
"=",
"0",
"matched_tlds",
"=",
"self",
".",
"_tlds_re",
".",
"findall",
"(",
"text",
")",
"for",
"tld",
"in",
"matched_tlds",
":",
"tmp_text",
"=",
"text",
"[",
"tld_pos",
":",
"]",
"offset",
"=",
"tld_pos",
"tld_pos",
"=",
"tmp_text",
".",
"find",
"(",
"tld",
")",
"validated",
"=",
"self",
".",
"_validate_tld_match",
"(",
"text",
",",
"tld",
",",
"offset",
"+",
"tld_pos",
")",
"if",
"tld_pos",
"!=",
"-",
"1",
"and",
"validated",
":",
"tmp_url",
"=",
"self",
".",
"_complete_url",
"(",
"text",
",",
"offset",
"+",
"tld_pos",
",",
"tld",
")",
"if",
"tmp_url",
":",
"yield",
"tmp_url",
"# do not search for TLD in already extracted URL",
"tld_pos_url",
"=",
"tmp_url",
".",
"find",
"(",
"tld",
")",
"# move cursor right after found TLD",
"tld_pos",
"+=",
"len",
"(",
"tld",
")",
"+",
"offset",
"# move cursor after end of found URL",
"tld_pos",
"+=",
"len",
"(",
"tmp_url",
"[",
"tld_pos_url",
"+",
"len",
"(",
"tld",
")",
":",
"]",
")",
"continue",
"# move cursor right after found TLD",
"tld_pos",
"+=",
"len",
"(",
"tld",
")",
"+",
"offset"
]
| Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str | [
"Creates",
"generator",
"over",
"found",
"URLs",
"in",
"given",
"text",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L525-L555 |
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.find_urls | def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls) | python | def find_urls(self, text, only_unique=False):
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls) | [
"def",
"find_urls",
"(",
"self",
",",
"text",
",",
"only_unique",
"=",
"False",
")",
":",
"urls",
"=",
"self",
".",
"gen_urls",
"(",
"text",
")",
"urls",
"=",
"OrderedDict",
".",
"fromkeys",
"(",
"urls",
")",
"if",
"only_unique",
"else",
"urls",
"return",
"list",
"(",
"urls",
")"
]
| Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list | [
"Find",
"all",
"URLs",
"in",
"given",
"text",
"."
]
| train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L557-L568 |
pinax/pinax-badges | pinax/badges/base.py | Badge.possibly_award | def possibly_award(self, **state):
"""
Will see if the user should be awarded a badge. If this badge is
asynchronous it just queues up the badge awarding.
"""
assert "user" in state
if self.async:
from .tasks import AsyncBadgeAward
state = self.freeze(**state)
AsyncBadgeAward.delay(self, state)
return
self.actually_possibly_award(**state) | python | def possibly_award(self, **state):
assert "user" in state
if self.async:
from .tasks import AsyncBadgeAward
state = self.freeze(**state)
AsyncBadgeAward.delay(self, state)
return
self.actually_possibly_award(**state) | [
"def",
"possibly_award",
"(",
"self",
",",
"*",
"*",
"state",
")",
":",
"assert",
"\"user\"",
"in",
"state",
"if",
"self",
".",
"async",
":",
"from",
".",
"tasks",
"import",
"AsyncBadgeAward",
"state",
"=",
"self",
".",
"freeze",
"(",
"*",
"*",
"state",
")",
"AsyncBadgeAward",
".",
"delay",
"(",
"self",
",",
"state",
")",
"return",
"self",
".",
"actually_possibly_award",
"(",
"*",
"*",
"state",
")"
]
| Will see if the user should be awarded a badge. If this badge is
asynchronous it just queues up the badge awarding. | [
"Will",
"see",
"if",
"the",
"user",
"should",
"be",
"awarded",
"a",
"badge",
".",
"If",
"this",
"badge",
"is",
"asynchronous",
"it",
"just",
"queues",
"up",
"the",
"badge",
"awarding",
"."
]
| train | https://github.com/pinax/pinax-badges/blob/0921c388088e7c7a77098dc7d0eea393b4707ce5/pinax/badges/base.py#L26-L37 |
pinax/pinax-badges | pinax/badges/base.py | Badge.actually_possibly_award | def actually_possibly_award(self, **state):
"""
Does the actual work of possibly awarding a badge.
"""
user = state["user"]
force_timestamp = state.pop("force_timestamp", None)
awarded = self.award(**state)
if awarded is None:
return
if awarded.level is None:
assert len(self.levels) == 1
awarded.level = 1
# awarded levels are 1 indexed, for conveineince
awarded = awarded.level - 1
assert awarded < len(self.levels)
if (
not self.multiple and
BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded)
):
return
extra_kwargs = {}
if force_timestamp is not None:
extra_kwargs["awarded_at"] = force_timestamp
badge = BadgeAward.objects.create(
user=user,
slug=self.slug,
level=awarded,
**extra_kwargs
)
self.send_badge_messages(badge)
badge_awarded.send(sender=self, badge_award=badge) | python | def actually_possibly_award(self, **state):
user = state["user"]
force_timestamp = state.pop("force_timestamp", None)
awarded = self.award(**state)
if awarded is None:
return
if awarded.level is None:
assert len(self.levels) == 1
awarded.level = 1
awarded = awarded.level - 1
assert awarded < len(self.levels)
if (
not self.multiple and
BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded)
):
return
extra_kwargs = {}
if force_timestamp is not None:
extra_kwargs["awarded_at"] = force_timestamp
badge = BadgeAward.objects.create(
user=user,
slug=self.slug,
level=awarded,
**extra_kwargs
)
self.send_badge_messages(badge)
badge_awarded.send(sender=self, badge_award=badge) | [
"def",
"actually_possibly_award",
"(",
"self",
",",
"*",
"*",
"state",
")",
":",
"user",
"=",
"state",
"[",
"\"user\"",
"]",
"force_timestamp",
"=",
"state",
".",
"pop",
"(",
"\"force_timestamp\"",
",",
"None",
")",
"awarded",
"=",
"self",
".",
"award",
"(",
"*",
"*",
"state",
")",
"if",
"awarded",
"is",
"None",
":",
"return",
"if",
"awarded",
".",
"level",
"is",
"None",
":",
"assert",
"len",
"(",
"self",
".",
"levels",
")",
"==",
"1",
"awarded",
".",
"level",
"=",
"1",
"# awarded levels are 1 indexed, for conveineince",
"awarded",
"=",
"awarded",
".",
"level",
"-",
"1",
"assert",
"awarded",
"<",
"len",
"(",
"self",
".",
"levels",
")",
"if",
"(",
"not",
"self",
".",
"multiple",
"and",
"BadgeAward",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"slug",
"=",
"self",
".",
"slug",
",",
"level",
"=",
"awarded",
")",
")",
":",
"return",
"extra_kwargs",
"=",
"{",
"}",
"if",
"force_timestamp",
"is",
"not",
"None",
":",
"extra_kwargs",
"[",
"\"awarded_at\"",
"]",
"=",
"force_timestamp",
"badge",
"=",
"BadgeAward",
".",
"objects",
".",
"create",
"(",
"user",
"=",
"user",
",",
"slug",
"=",
"self",
".",
"slug",
",",
"level",
"=",
"awarded",
",",
"*",
"*",
"extra_kwargs",
")",
"self",
".",
"send_badge_messages",
"(",
"badge",
")",
"badge_awarded",
".",
"send",
"(",
"sender",
"=",
"self",
",",
"badge_award",
"=",
"badge",
")"
]
| Does the actual work of possibly awarding a badge. | [
"Does",
"the",
"actual",
"work",
"of",
"possibly",
"awarding",
"a",
"badge",
"."
]
| train | https://github.com/pinax/pinax-badges/blob/0921c388088e7c7a77098dc7d0eea393b4707ce5/pinax/badges/base.py#L39-L69 |
pinax/pinax-badges | pinax/badges/base.py | Badge.send_badge_messages | def send_badge_messages(self, badge_award):
"""
If the Badge class defines a message, send it to the user who was just
awarded the badge.
"""
user_message = getattr(badge_award.badge, "user_message", None)
if callable(user_message):
message = user_message(badge_award)
else:
message = user_message
if message is not None:
badge_award.user.message_set.create(message=message) | python | def send_badge_messages(self, badge_award):
user_message = getattr(badge_award.badge, "user_message", None)
if callable(user_message):
message = user_message(badge_award)
else:
message = user_message
if message is not None:
badge_award.user.message_set.create(message=message) | [
"def",
"send_badge_messages",
"(",
"self",
",",
"badge_award",
")",
":",
"user_message",
"=",
"getattr",
"(",
"badge_award",
".",
"badge",
",",
"\"user_message\"",
",",
"None",
")",
"if",
"callable",
"(",
"user_message",
")",
":",
"message",
"=",
"user_message",
"(",
"badge_award",
")",
"else",
":",
"message",
"=",
"user_message",
"if",
"message",
"is",
"not",
"None",
":",
"badge_award",
".",
"user",
".",
"message_set",
".",
"create",
"(",
"message",
"=",
"message",
")"
]
| If the Badge class defines a message, send it to the user who was just
awarded the badge. | [
"If",
"the",
"Badge",
"class",
"defines",
"a",
"message",
"send",
"it",
"to",
"the",
"user",
"who",
"was",
"just",
"awarded",
"the",
"badge",
"."
]
| train | https://github.com/pinax/pinax-badges/blob/0921c388088e7c7a77098dc7d0eea393b4707ce5/pinax/badges/base.py#L71-L82 |
JBKahn/flake8-print | flake8_print.py | PrintFinder.visit_Print | def visit_Print(self, node):
"""Only exists in python 2."""
self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][PRINT_FUNCTION_NAME] | python | def visit_Print(self, node):
self.prints_used[(node.lineno, node.col_offset)] = VIOLATIONS["found"][PRINT_FUNCTION_NAME] | [
"def",
"visit_Print",
"(",
"self",
",",
"node",
")",
":",
"self",
".",
"prints_used",
"[",
"(",
"node",
".",
"lineno",
",",
"node",
".",
"col_offset",
")",
"]",
"=",
"VIOLATIONS",
"[",
"\"found\"",
"]",
"[",
"PRINT_FUNCTION_NAME",
"]"
]
| Only exists in python 2. | [
"Only",
"exists",
"in",
"python",
"2",
"."
]
| train | https://github.com/JBKahn/flake8-print/blob/e5d3812c4c93628ed804e9ecf74c2d31780627e5/flake8_print.py#L30-L32 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.create_lockfile | def create_lockfile(self):
"""
Write recursive dependencies list to outfile
with hard-pinned versions.
Then fix it.
"""
process = subprocess.Popen(
self.pin_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
if process.returncode == 0:
self.fix_lockfile()
else:
logger.critical("ERROR executing %s", ' '.join(self.pin_command))
logger.critical("Exit code: %s", process.returncode)
logger.critical(stdout.decode('utf-8'))
logger.critical(stderr.decode('utf-8'))
raise RuntimeError("Failed to pip-compile {0}".format(self.infile)) | python | def create_lockfile(self):
process = subprocess.Popen(
self.pin_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
if process.returncode == 0:
self.fix_lockfile()
else:
logger.critical("ERROR executing %s", ' '.join(self.pin_command))
logger.critical("Exit code: %s", process.returncode)
logger.critical(stdout.decode('utf-8'))
logger.critical(stderr.decode('utf-8'))
raise RuntimeError("Failed to pip-compile {0}".format(self.infile)) | [
"def",
"create_lockfile",
"(",
"self",
")",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"pin_command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"stdout",
",",
"stderr",
"=",
"process",
".",
"communicate",
"(",
")",
"if",
"process",
".",
"returncode",
"==",
"0",
":",
"self",
".",
"fix_lockfile",
"(",
")",
"else",
":",
"logger",
".",
"critical",
"(",
"\"ERROR executing %s\"",
",",
"' '",
".",
"join",
"(",
"self",
".",
"pin_command",
")",
")",
"logger",
".",
"critical",
"(",
"\"Exit code: %s\"",
",",
"process",
".",
"returncode",
")",
"logger",
".",
"critical",
"(",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"logger",
".",
"critical",
"(",
"stderr",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"raise",
"RuntimeError",
"(",
"\"Failed to pip-compile {0}\"",
".",
"format",
"(",
"self",
".",
"infile",
")",
")"
]
| Write recursive dependencies list to outfile
with hard-pinned versions.
Then fix it. | [
"Write",
"recursive",
"dependencies",
"list",
"to",
"outfile",
"with",
"hard",
"-",
"pinned",
"versions",
".",
"Then",
"fix",
"it",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L31-L50 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.parse_references | def parse_references(cls, filename):
"""
Read filename line by line searching for pattern:
-r file.in
or
--requirement file.in
return set of matched file names without extension.
E.g. ['file']
"""
references = set()
for line in open(filename):
matched = cls.RE_REF.match(line)
if matched:
reference = matched.group('path')
reference_base = os.path.splitext(reference)[0]
references.add(reference_base)
return references | python | def parse_references(cls, filename):
references = set()
for line in open(filename):
matched = cls.RE_REF.match(line)
if matched:
reference = matched.group('path')
reference_base = os.path.splitext(reference)[0]
references.add(reference_base)
return references | [
"def",
"parse_references",
"(",
"cls",
",",
"filename",
")",
":",
"references",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"open",
"(",
"filename",
")",
":",
"matched",
"=",
"cls",
".",
"RE_REF",
".",
"match",
"(",
"line",
")",
"if",
"matched",
":",
"reference",
"=",
"matched",
".",
"group",
"(",
"'path'",
")",
"reference_base",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"reference",
")",
"[",
"0",
"]",
"references",
".",
"add",
"(",
"reference_base",
")",
"return",
"references"
]
| Read filename line by line searching for pattern:
-r file.in
or
--requirement file.in
return set of matched file names without extension.
E.g. ['file'] | [
"Read",
"filename",
"line",
"by",
"line",
"searching",
"for",
"pattern",
":"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L53-L71 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.infile | def infile(self):
"""Path of the input file"""
return os.path.join(OPTIONS['base_dir'],
'{0}.{1}'.format(self.name, OPTIONS['in_ext'])) | python | def infile(self):
return os.path.join(OPTIONS['base_dir'],
'{0}.{1}'.format(self.name, OPTIONS['in_ext'])) | [
"def",
"infile",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"OPTIONS",
"[",
"'base_dir'",
"]",
",",
"'{0}.{1}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"OPTIONS",
"[",
"'in_ext'",
"]",
")",
")"
]
| Path of the input file | [
"Path",
"of",
"the",
"input",
"file"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L74-L77 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.outfile | def outfile(self):
"""Path of the output file"""
return os.path.join(OPTIONS['base_dir'],
'{0}.{1}'.format(self.name, OPTIONS['out_ext'])) | python | def outfile(self):
return os.path.join(OPTIONS['base_dir'],
'{0}.{1}'.format(self.name, OPTIONS['out_ext'])) | [
"def",
"outfile",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"OPTIONS",
"[",
"'base_dir'",
"]",
",",
"'{0}.{1}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"OPTIONS",
"[",
"'out_ext'",
"]",
")",
")"
]
| Path of the output file | [
"Path",
"of",
"the",
"output",
"file"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L80-L83 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.pin_command | def pin_command(self):
"""Compose pip-compile shell command"""
parts = [
'pip-compile',
'--no-header',
'--verbose',
'--rebuild',
'--no-index',
'--output-file', self.outfile,
self.infile,
]
if OPTIONS['upgrade']:
parts.insert(3, '--upgrade')
if self.add_hashes:
parts.insert(1, '--generate-hashes')
return parts | python | def pin_command(self):
parts = [
'pip-compile',
'--no-header',
'--verbose',
'--rebuild',
'--no-index',
'--output-file', self.outfile,
self.infile,
]
if OPTIONS['upgrade']:
parts.insert(3, '--upgrade')
if self.add_hashes:
parts.insert(1, '--generate-hashes')
return parts | [
"def",
"pin_command",
"(",
"self",
")",
":",
"parts",
"=",
"[",
"'pip-compile'",
",",
"'--no-header'",
",",
"'--verbose'",
",",
"'--rebuild'",
",",
"'--no-index'",
",",
"'--output-file'",
",",
"self",
".",
"outfile",
",",
"self",
".",
"infile",
",",
"]",
"if",
"OPTIONS",
"[",
"'upgrade'",
"]",
":",
"parts",
".",
"insert",
"(",
"3",
",",
"'--upgrade'",
")",
"if",
"self",
".",
"add_hashes",
":",
"parts",
".",
"insert",
"(",
"1",
",",
"'--generate-hashes'",
")",
"return",
"parts"
]
| Compose pip-compile shell command | [
"Compose",
"pip",
"-",
"compile",
"shell",
"command"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L86-L101 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.fix_lockfile | def fix_lockfile(self):
"""Run each line of outfile through fix_pin"""
with open(self.outfile, 'rt') as fp:
lines = [
self.fix_pin(line)
for line in self.concatenated(fp)
]
with open(self.outfile, 'wt') as fp:
fp.writelines([
line + '\n'
for line in lines
if line is not None
]) | python | def fix_lockfile(self):
with open(self.outfile, 'rt') as fp:
lines = [
self.fix_pin(line)
for line in self.concatenated(fp)
]
with open(self.outfile, 'wt') as fp:
fp.writelines([
line + '\n'
for line in lines
if line is not None
]) | [
"def",
"fix_lockfile",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'rt'",
")",
"as",
"fp",
":",
"lines",
"=",
"[",
"self",
".",
"fix_pin",
"(",
"line",
")",
"for",
"line",
"in",
"self",
".",
"concatenated",
"(",
"fp",
")",
"]",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'wt'",
")",
"as",
"fp",
":",
"fp",
".",
"writelines",
"(",
"[",
"line",
"+",
"'\\n'",
"for",
"line",
"in",
"lines",
"if",
"line",
"is",
"not",
"None",
"]",
")"
]
| Run each line of outfile through fix_pin | [
"Run",
"each",
"line",
"of",
"outfile",
"through",
"fix_pin"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L103-L115 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.concatenated | def concatenated(fp):
"""Read lines from fp concatenating on backslash (\\)"""
line_parts = []
for line in fp:
line = line.strip()
if line.endswith('\\'):
line_parts.append(line[:-1].rstrip())
else:
line_parts.append(line)
yield ' '.join(line_parts)
line_parts[:] = []
if line_parts:
# Impossible:
raise RuntimeError("Compiled file ends with backslash \\") | python | def concatenated(fp):
line_parts = []
for line in fp:
line = line.strip()
if line.endswith('\\'):
line_parts.append(line[:-1].rstrip())
else:
line_parts.append(line)
yield ' '.join(line_parts)
line_parts[:] = []
if line_parts:
raise RuntimeError("Compiled file ends with backslash \\") | [
"def",
"concatenated",
"(",
"fp",
")",
":",
"line_parts",
"=",
"[",
"]",
"for",
"line",
"in",
"fp",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"endswith",
"(",
"'\\\\'",
")",
":",
"line_parts",
".",
"append",
"(",
"line",
"[",
":",
"-",
"1",
"]",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"line_parts",
".",
"append",
"(",
"line",
")",
"yield",
"' '",
".",
"join",
"(",
"line_parts",
")",
"line_parts",
"[",
":",
"]",
"=",
"[",
"]",
"if",
"line_parts",
":",
"# Impossible:",
"raise",
"RuntimeError",
"(",
"\"Compiled file ends with backslash \\\\\"",
")"
]
| Read lines from fp concatenating on backslash (\\) | [
"Read",
"lines",
"from",
"fp",
"concatenating",
"on",
"backslash",
"(",
"\\\\",
")"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L118-L131 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.fix_pin | def fix_pin(self, line):
"""
Fix dependency by removing post-releases from versions
and loosing constraints on internal packages.
Drop packages from ignore set
Also populate packages set
"""
dep = Dependency(line)
if dep.valid:
if dep.package in self.ignore:
ignored_version = self.ignore[dep.package]
if ignored_version is not None:
# ignored_version can be None to disable conflict detection
if dep.version and dep.version != ignored_version:
logger.error(
"Package %s was resolved to different "
"versions in different environments: %s and %s",
dep.package, dep.version, ignored_version,
)
raise RuntimeError(
"Please add constraints for the package "
"version listed above"
)
return None
self.packages[dep.package] = dep.version
if self.forbid_post or dep.is_compatible:
# Always drop post for internal packages
dep.drop_post()
return dep.serialize()
return line.strip() | python | def fix_pin(self, line):
dep = Dependency(line)
if dep.valid:
if dep.package in self.ignore:
ignored_version = self.ignore[dep.package]
if ignored_version is not None:
if dep.version and dep.version != ignored_version:
logger.error(
"Package %s was resolved to different "
"versions in different environments: %s and %s",
dep.package, dep.version, ignored_version,
)
raise RuntimeError(
"Please add constraints for the package "
"version listed above"
)
return None
self.packages[dep.package] = dep.version
if self.forbid_post or dep.is_compatible:
dep.drop_post()
return dep.serialize()
return line.strip() | [
"def",
"fix_pin",
"(",
"self",
",",
"line",
")",
":",
"dep",
"=",
"Dependency",
"(",
"line",
")",
"if",
"dep",
".",
"valid",
":",
"if",
"dep",
".",
"package",
"in",
"self",
".",
"ignore",
":",
"ignored_version",
"=",
"self",
".",
"ignore",
"[",
"dep",
".",
"package",
"]",
"if",
"ignored_version",
"is",
"not",
"None",
":",
"# ignored_version can be None to disable conflict detection",
"if",
"dep",
".",
"version",
"and",
"dep",
".",
"version",
"!=",
"ignored_version",
":",
"logger",
".",
"error",
"(",
"\"Package %s was resolved to different \"",
"\"versions in different environments: %s and %s\"",
",",
"dep",
".",
"package",
",",
"dep",
".",
"version",
",",
"ignored_version",
",",
")",
"raise",
"RuntimeError",
"(",
"\"Please add constraints for the package \"",
"\"version listed above\"",
")",
"return",
"None",
"self",
".",
"packages",
"[",
"dep",
".",
"package",
"]",
"=",
"dep",
".",
"version",
"if",
"self",
".",
"forbid_post",
"or",
"dep",
".",
"is_compatible",
":",
"# Always drop post for internal packages",
"dep",
".",
"drop_post",
"(",
")",
"return",
"dep",
".",
"serialize",
"(",
")",
"return",
"line",
".",
"strip",
"(",
")"
]
| Fix dependency by removing post-releases from versions
and loosing constraints on internal packages.
Drop packages from ignore set
Also populate packages set | [
"Fix",
"dependency",
"by",
"removing",
"post",
"-",
"releases",
"from",
"versions",
"and",
"loosing",
"constraints",
"on",
"internal",
"packages",
".",
"Drop",
"packages",
"from",
"ignore",
"set"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L133-L163 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.add_references | def add_references(self, other_names):
"""Add references to other_names in outfile"""
if not other_names:
# Skip on empty list
return
with open(self.outfile, 'rt') as fp:
header, body = self.split_header(fp)
with open(self.outfile, 'wt') as fp:
fp.writelines(header)
fp.writelines(
'-r {0}.{1}\n'.format(other_name, OPTIONS['out_ext'])
for other_name in sorted(other_names)
)
fp.writelines(body) | python | def add_references(self, other_names):
if not other_names:
return
with open(self.outfile, 'rt') as fp:
header, body = self.split_header(fp)
with open(self.outfile, 'wt') as fp:
fp.writelines(header)
fp.writelines(
'-r {0}.{1}\n'.format(other_name, OPTIONS['out_ext'])
for other_name in sorted(other_names)
)
fp.writelines(body) | [
"def",
"add_references",
"(",
"self",
",",
"other_names",
")",
":",
"if",
"not",
"other_names",
":",
"# Skip on empty list",
"return",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'rt'",
")",
"as",
"fp",
":",
"header",
",",
"body",
"=",
"self",
".",
"split_header",
"(",
"fp",
")",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'wt'",
")",
"as",
"fp",
":",
"fp",
".",
"writelines",
"(",
"header",
")",
"fp",
".",
"writelines",
"(",
"'-r {0}.{1}\\n'",
".",
"format",
"(",
"other_name",
",",
"OPTIONS",
"[",
"'out_ext'",
"]",
")",
"for",
"other_name",
"in",
"sorted",
"(",
"other_names",
")",
")",
"fp",
".",
"writelines",
"(",
"body",
")"
]
| Add references to other_names in outfile | [
"Add",
"references",
"to",
"other_names",
"in",
"outfile"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L165-L178 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.split_header | def split_header(fp):
"""
Read file pointer and return pair of lines lists:
first - header, second - the rest.
"""
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith('#') and not header_ended:
# Header text
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:] | python | def split_header(fp):
body_start, header_ended = 0, False
lines = []
for line in fp:
if line.startswith('
body_start += 1
else:
header_ended = True
lines.append(line)
return lines[:body_start], lines[body_start:] | [
"def",
"split_header",
"(",
"fp",
")",
":",
"body_start",
",",
"header_ended",
"=",
"0",
",",
"False",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"fp",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
"and",
"not",
"header_ended",
":",
"# Header text",
"body_start",
"+=",
"1",
"else",
":",
"header_ended",
"=",
"True",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines",
"[",
":",
"body_start",
"]",
",",
"lines",
"[",
"body_start",
":",
"]"
]
| Read file pointer and return pair of lines lists:
first - header, second - the rest. | [
"Read",
"file",
"pointer",
"and",
"return",
"pair",
"of",
"lines",
"lists",
":",
"first",
"-",
"header",
"second",
"-",
"the",
"rest",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L181-L195 |
peterdemin/pip-compile-multi | pipcompilemulti/environment.py | Environment.replace_header | def replace_header(self, header_text):
"""Replace pip-compile header with custom text"""
with open(self.outfile, 'rt') as fp:
_, body = self.split_header(fp)
with open(self.outfile, 'wt') as fp:
fp.write(header_text)
fp.writelines(body) | python | def replace_header(self, header_text):
with open(self.outfile, 'rt') as fp:
_, body = self.split_header(fp)
with open(self.outfile, 'wt') as fp:
fp.write(header_text)
fp.writelines(body) | [
"def",
"replace_header",
"(",
"self",
",",
"header_text",
")",
":",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'rt'",
")",
"as",
"fp",
":",
"_",
",",
"body",
"=",
"self",
".",
"split_header",
"(",
"fp",
")",
"with",
"open",
"(",
"self",
".",
"outfile",
",",
"'wt'",
")",
"as",
"fp",
":",
"fp",
".",
"write",
"(",
"header_text",
")",
"fp",
".",
"writelines",
"(",
"body",
")"
]
| Replace pip-compile header with custom text | [
"Replace",
"pip",
"-",
"compile",
"header",
"with",
"custom",
"text"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/environment.py#L197-L203 |
peterdemin/pip-compile-multi | pipcompilemulti/discover.py | discover | def discover(glob_pattern):
"""
Find all files matching given glob_pattern,
parse them, and return list of environments:
>>> envs = discover("requirements/*.in")
>>> # print(envs)
>>> envs == [
... {'name': 'base', 'refs': set()},
... {'name': 'py27', 'refs': set()},
... {'name': 'test', 'refs': {'base'}},
... {'name': 'local', 'refs': {'test'}},
... {'name': 'local27', 'refs': {'test', 'py27'}},
... {'name': 'testwin', 'refs': {'test'}},
... ]
True
"""
in_paths = glob.glob(glob_pattern)
names = {
extract_env_name(path): path
for path in in_paths
}
return order_by_refs([
{'name': name, 'refs': Environment.parse_references(in_path)}
for name, in_path in names.items()
]) | python | def discover(glob_pattern):
in_paths = glob.glob(glob_pattern)
names = {
extract_env_name(path): path
for path in in_paths
}
return order_by_refs([
{'name': name, 'refs': Environment.parse_references(in_path)}
for name, in_path in names.items()
]) | [
"def",
"discover",
"(",
"glob_pattern",
")",
":",
"in_paths",
"=",
"glob",
".",
"glob",
"(",
"glob_pattern",
")",
"names",
"=",
"{",
"extract_env_name",
"(",
"path",
")",
":",
"path",
"for",
"path",
"in",
"in_paths",
"}",
"return",
"order_by_refs",
"(",
"[",
"{",
"'name'",
":",
"name",
",",
"'refs'",
":",
"Environment",
".",
"parse_references",
"(",
"in_path",
")",
"}",
"for",
"name",
",",
"in_path",
"in",
"names",
".",
"items",
"(",
")",
"]",
")"
]
| Find all files matching given glob_pattern,
parse them, and return list of environments:
>>> envs = discover("requirements/*.in")
>>> # print(envs)
>>> envs == [
... {'name': 'base', 'refs': set()},
... {'name': 'py27', 'refs': set()},
... {'name': 'test', 'refs': {'base'}},
... {'name': 'local', 'refs': {'test'}},
... {'name': 'local27', 'refs': {'test', 'py27'}},
... {'name': 'testwin', 'refs': {'test'}},
... ]
True | [
"Find",
"all",
"files",
"matching",
"given",
"glob_pattern",
"parse",
"them",
"and",
"return",
"list",
"of",
"environments",
":"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/discover.py#L13-L38 |
peterdemin/pip-compile-multi | pipcompilemulti/discover.py | order_by_refs | def order_by_refs(envs):
"""
Return topologicaly sorted list of environments.
I.e. all referenced environments are placed before their references.
"""
topology = {
env['name']: set(env['refs'])
for env in envs
}
by_name = {
env['name']: env
for env in envs
}
return [
by_name[name]
for name in toposort_flatten(topology)
] | python | def order_by_refs(envs):
topology = {
env['name']: set(env['refs'])
for env in envs
}
by_name = {
env['name']: env
for env in envs
}
return [
by_name[name]
for name in toposort_flatten(topology)
] | [
"def",
"order_by_refs",
"(",
"envs",
")",
":",
"topology",
"=",
"{",
"env",
"[",
"'name'",
"]",
":",
"set",
"(",
"env",
"[",
"'refs'",
"]",
")",
"for",
"env",
"in",
"envs",
"}",
"by_name",
"=",
"{",
"env",
"[",
"'name'",
"]",
":",
"env",
"for",
"env",
"in",
"envs",
"}",
"return",
"[",
"by_name",
"[",
"name",
"]",
"for",
"name",
"in",
"toposort_flatten",
"(",
"topology",
")",
"]"
]
| Return topologicaly sorted list of environments.
I.e. all referenced environments are placed before their references. | [
"Return",
"topologicaly",
"sorted",
"list",
"of",
"environments",
".",
"I",
".",
"e",
".",
"all",
"referenced",
"environments",
"are",
"placed",
"before",
"their",
"references",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/discover.py#L46-L62 |
peterdemin/pip-compile-multi | pipcompilemulti/cli_v1.py | cli | def cli(ctx, compatible, forbid_post, generate_hashes, directory,
in_ext, out_ext, header, only_name, upgrade):
"""Recompile"""
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
OPTIONS.update({
'compatible_patterns': compatible,
'forbid_post': set(forbid_post),
'add_hashes': set(generate_hashes),
'base_dir': directory,
'in_ext': in_ext,
'out_ext': out_ext,
'header_file': header or None,
'include_names': only_name,
'upgrade': upgrade,
})
if ctx.invoked_subcommand is None:
recompile() | python | def cli(ctx, compatible, forbid_post, generate_hashes, directory,
in_ext, out_ext, header, only_name, upgrade):
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
OPTIONS.update({
'compatible_patterns': compatible,
'forbid_post': set(forbid_post),
'add_hashes': set(generate_hashes),
'base_dir': directory,
'in_ext': in_ext,
'out_ext': out_ext,
'header_file': header or None,
'include_names': only_name,
'upgrade': upgrade,
})
if ctx.invoked_subcommand is None:
recompile() | [
"def",
"cli",
"(",
"ctx",
",",
"compatible",
",",
"forbid_post",
",",
"generate_hashes",
",",
"directory",
",",
"in_ext",
",",
"out_ext",
",",
"header",
",",
"only_name",
",",
"upgrade",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"\"%(message)s\"",
")",
"OPTIONS",
".",
"update",
"(",
"{",
"'compatible_patterns'",
":",
"compatible",
",",
"'forbid_post'",
":",
"set",
"(",
"forbid_post",
")",
",",
"'add_hashes'",
":",
"set",
"(",
"generate_hashes",
")",
",",
"'base_dir'",
":",
"directory",
",",
"'in_ext'",
":",
"in_ext",
",",
"'out_ext'",
":",
"out_ext",
",",
"'header_file'",
":",
"header",
"or",
"None",
",",
"'include_names'",
":",
"only_name",
",",
"'upgrade'",
":",
"upgrade",
",",
"}",
")",
"if",
"ctx",
".",
"invoked_subcommand",
"is",
"None",
":",
"recompile",
"(",
")"
]
| Recompile | [
"Recompile"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v1.py#L38-L54 |
peterdemin/pip-compile-multi | pipcompilemulti/dependency.py | Dependency.serialize | def serialize(self):
"""
Render dependency back in string using:
~= if package is internal
== otherwise
"""
if self.is_vcs:
return self.without_editable(self.line).strip()
equal = '~=' if self.is_compatible else '=='
package_version = '{package}{equal}{version} '.format(
package=self.without_editable(self.package),
version=self.version,
equal=equal,
)
if self.hashes:
hashes = self.hashes.split()
lines = [package_version.strip()]
lines.extend(hashes)
if self.comment:
lines.append(self.comment)
return ' \\\n '.join(lines)
else:
return '{0}{1}'.format(
package_version.ljust(self.COMMENT_JUSTIFICATION),
self.comment,
).rstrip() | python | def serialize(self):
if self.is_vcs:
return self.without_editable(self.line).strip()
equal = '~=' if self.is_compatible else '=='
package_version = '{package}{equal}{version} '.format(
package=self.without_editable(self.package),
version=self.version,
equal=equal,
)
if self.hashes:
hashes = self.hashes.split()
lines = [package_version.strip()]
lines.extend(hashes)
if self.comment:
lines.append(self.comment)
return ' \\\n '.join(lines)
else:
return '{0}{1}'.format(
package_version.ljust(self.COMMENT_JUSTIFICATION),
self.comment,
).rstrip() | [
"def",
"serialize",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_vcs",
":",
"return",
"self",
".",
"without_editable",
"(",
"self",
".",
"line",
")",
".",
"strip",
"(",
")",
"equal",
"=",
"'~='",
"if",
"self",
".",
"is_compatible",
"else",
"'=='",
"package_version",
"=",
"'{package}{equal}{version} '",
".",
"format",
"(",
"package",
"=",
"self",
".",
"without_editable",
"(",
"self",
".",
"package",
")",
",",
"version",
"=",
"self",
".",
"version",
",",
"equal",
"=",
"equal",
",",
")",
"if",
"self",
".",
"hashes",
":",
"hashes",
"=",
"self",
".",
"hashes",
".",
"split",
"(",
")",
"lines",
"=",
"[",
"package_version",
".",
"strip",
"(",
")",
"]",
"lines",
".",
"extend",
"(",
"hashes",
")",
"if",
"self",
".",
"comment",
":",
"lines",
".",
"append",
"(",
"self",
".",
"comment",
")",
"return",
"' \\\\\\n '",
".",
"join",
"(",
"lines",
")",
"else",
":",
"return",
"'{0}{1}'",
".",
"format",
"(",
"package_version",
".",
"ljust",
"(",
"self",
".",
"COMMENT_JUSTIFICATION",
")",
",",
"self",
".",
"comment",
",",
")",
".",
"rstrip",
"(",
")"
]
| Render dependency back in string using:
~= if package is internal
== otherwise | [
"Render",
"dependency",
"back",
"in",
"string",
"using",
":",
"~",
"=",
"if",
"package",
"is",
"internal",
"==",
"otherwise"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/dependency.py#L63-L88 |
peterdemin/pip-compile-multi | pipcompilemulti/dependency.py | Dependency.is_compatible | def is_compatible(self):
"""Check if package name is matched by compatible_patterns"""
for pattern in OPTIONS['compatible_patterns']:
if fnmatch(self.package.lower(), pattern):
return True
return False | python | def is_compatible(self):
for pattern in OPTIONS['compatible_patterns']:
if fnmatch(self.package.lower(), pattern):
return True
return False | [
"def",
"is_compatible",
"(",
"self",
")",
":",
"for",
"pattern",
"in",
"OPTIONS",
"[",
"'compatible_patterns'",
"]",
":",
"if",
"fnmatch",
"(",
"self",
".",
"package",
".",
"lower",
"(",
")",
",",
"pattern",
")",
":",
"return",
"True",
"return",
"False"
]
| Check if package name is matched by compatible_patterns | [
"Check",
"if",
"package",
"name",
"is",
"matched",
"by",
"compatible_patterns"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/dependency.py#L104-L109 |
peterdemin/pip-compile-multi | pipcompilemulti/dependency.py | Dependency.drop_post | def drop_post(self):
"""Remove .postXXXX postfix from version"""
post_index = self.version.find('.post')
if post_index >= 0:
self.version = self.version[:post_index] | python | def drop_post(self):
post_index = self.version.find('.post')
if post_index >= 0:
self.version = self.version[:post_index] | [
"def",
"drop_post",
"(",
"self",
")",
":",
"post_index",
"=",
"self",
".",
"version",
".",
"find",
"(",
"'.post'",
")",
"if",
"post_index",
">=",
"0",
":",
"self",
".",
"version",
"=",
"self",
".",
"version",
"[",
":",
"post_index",
"]"
]
| Remove .postXXXX postfix from version | [
"Remove",
".",
"postXXXX",
"postfix",
"from",
"version"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/dependency.py#L111-L115 |
peterdemin/pip-compile-multi | pipcompilemulti/verify.py | verify_environments | def verify_environments():
"""
For each environment verify hash comments and report failures.
If any failure occured, exit with code 1.
"""
env_confs = discover(
os.path.join(
OPTIONS['base_dir'],
'*.' + OPTIONS['in_ext'],
)
)
success = True
for conf in env_confs:
env = Environment(name=conf['name'])
current_comment = generate_hash_comment(env.infile)
existing_comment = parse_hash_comment(env.outfile)
if current_comment == existing_comment:
logger.info("OK - %s was generated from %s.",
env.outfile, env.infile)
else:
logger.error("ERROR! %s was not regenerated after changes in %s.",
env.outfile, env.infile)
logger.error("Expecting: %s", current_comment.strip())
logger.error("Found: %s", existing_comment.strip())
success = False
return success | python | def verify_environments():
env_confs = discover(
os.path.join(
OPTIONS['base_dir'],
'*.' + OPTIONS['in_ext'],
)
)
success = True
for conf in env_confs:
env = Environment(name=conf['name'])
current_comment = generate_hash_comment(env.infile)
existing_comment = parse_hash_comment(env.outfile)
if current_comment == existing_comment:
logger.info("OK - %s was generated from %s.",
env.outfile, env.infile)
else:
logger.error("ERROR! %s was not regenerated after changes in %s.",
env.outfile, env.infile)
logger.error("Expecting: %s", current_comment.strip())
logger.error("Found: %s", existing_comment.strip())
success = False
return success | [
"def",
"verify_environments",
"(",
")",
":",
"env_confs",
"=",
"discover",
"(",
"os",
".",
"path",
".",
"join",
"(",
"OPTIONS",
"[",
"'base_dir'",
"]",
",",
"'*.'",
"+",
"OPTIONS",
"[",
"'in_ext'",
"]",
",",
")",
")",
"success",
"=",
"True",
"for",
"conf",
"in",
"env_confs",
":",
"env",
"=",
"Environment",
"(",
"name",
"=",
"conf",
"[",
"'name'",
"]",
")",
"current_comment",
"=",
"generate_hash_comment",
"(",
"env",
".",
"infile",
")",
"existing_comment",
"=",
"parse_hash_comment",
"(",
"env",
".",
"outfile",
")",
"if",
"current_comment",
"==",
"existing_comment",
":",
"logger",
".",
"info",
"(",
"\"OK - %s was generated from %s.\"",
",",
"env",
".",
"outfile",
",",
"env",
".",
"infile",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"ERROR! %s was not regenerated after changes in %s.\"",
",",
"env",
".",
"outfile",
",",
"env",
".",
"infile",
")",
"logger",
".",
"error",
"(",
"\"Expecting: %s\"",
",",
"current_comment",
".",
"strip",
"(",
")",
")",
"logger",
".",
"error",
"(",
"\"Found: %s\"",
",",
"existing_comment",
".",
"strip",
"(",
")",
")",
"success",
"=",
"False",
"return",
"success"
]
| For each environment verify hash comments and report failures.
If any failure occured, exit with code 1. | [
"For",
"each",
"environment",
"verify",
"hash",
"comments",
"and",
"report",
"failures",
".",
"If",
"any",
"failure",
"occured",
"exit",
"with",
"code",
"1",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/verify.py#L15-L40 |
peterdemin/pip-compile-multi | pipcompilemulti/verify.py | generate_hash_comment | def generate_hash_comment(file_path):
"""
Read file with given file_path and return string of format
# SHA1:da39a3ee5e6b4b0d3255bfef95601890afd80709
which is hex representation of SHA1 file content hash
"""
with open(file_path, 'rb') as fp:
hexdigest = hashlib.sha1(fp.read().strip()).hexdigest()
return "# SHA1:{0}\n".format(hexdigest) | python | def generate_hash_comment(file_path):
with open(file_path, 'rb') as fp:
hexdigest = hashlib.sha1(fp.read().strip()).hexdigest()
return " | [
"def",
"generate_hash_comment",
"(",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"fp",
":",
"hexdigest",
"=",
"hashlib",
".",
"sha1",
"(",
"fp",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"\"# SHA1:{0}\\n\"",
".",
"format",
"(",
"hexdigest",
")"
]
| Read file with given file_path and return string of format
# SHA1:da39a3ee5e6b4b0d3255bfef95601890afd80709
which is hex representation of SHA1 file content hash | [
"Read",
"file",
"with",
"given",
"file_path",
"and",
"return",
"string",
"of",
"format"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/verify.py#L43-L53 |
peterdemin/pip-compile-multi | pipcompilemulti/verify.py | parse_hash_comment | def parse_hash_comment(file_path):
"""
Read file with given file_path line by line,
return the first line that starts with "# SHA1:", like this:
# SHA1:da39a3ee5e6b4b0d3255bfef95601890afd80709
"""
with open(file_path) as fp:
for line in fp:
if line.startswith("# SHA1:"):
return line
return None | python | def parse_hash_comment(file_path):
with open(file_path) as fp:
for line in fp:
if line.startswith("
return line
return None | [
"def",
"parse_hash_comment",
"(",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"if",
"line",
".",
"startswith",
"(",
"\"# SHA1:\"",
")",
":",
"return",
"line",
"return",
"None"
]
| Read file with given file_path line by line,
return the first line that starts with "# SHA1:", like this:
# SHA1:da39a3ee5e6b4b0d3255bfef95601890afd80709 | [
"Read",
"file",
"with",
"given",
"file_path",
"line",
"by",
"line",
"return",
"the",
"first",
"line",
"that",
"starts",
"with",
"#",
"SHA1",
":",
"like",
"this",
":"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/verify.py#L56-L67 |
peterdemin/pip-compile-multi | pipcompilemulti/config.py | filter_sections | def filter_sections(sections):
"""Filter through pairs (name, options)
leaving only those that match runtime.
If no requirements sections found, return None.
If some sections found, but none matches current runtime, return empty list.
"""
if not sections:
return None
jobs = []
matchers = python_version_matchers()
for name, options in sections:
target_version = options.pop('python', None)
if target_version in matchers:
jobs.append((name, options))
return jobs | python | def filter_sections(sections):
if not sections:
return None
jobs = []
matchers = python_version_matchers()
for name, options in sections:
target_version = options.pop('python', None)
if target_version in matchers:
jobs.append((name, options))
return jobs | [
"def",
"filter_sections",
"(",
"sections",
")",
":",
"if",
"not",
"sections",
":",
"return",
"None",
"jobs",
"=",
"[",
"]",
"matchers",
"=",
"python_version_matchers",
"(",
")",
"for",
"name",
",",
"options",
"in",
"sections",
":",
"target_version",
"=",
"options",
".",
"pop",
"(",
"'python'",
",",
"None",
")",
"if",
"target_version",
"in",
"matchers",
":",
"jobs",
".",
"append",
"(",
"(",
"name",
",",
"options",
")",
")",
"return",
"jobs"
]
| Filter through pairs (name, options)
leaving only those that match runtime.
If no requirements sections found, return None.
If some sections found, but none matches current runtime, return empty list. | [
"Filter",
"through",
"pairs",
"(",
"name",
"options",
")",
"leaving",
"only",
"those",
"that",
"match",
"runtime",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/config.py#L18-L33 |
peterdemin/pip-compile-multi | pipcompilemulti/config.py | read_sections | def read_sections():
"""Read ini files and return list of pairs (name, options)"""
config = configparser.ConfigParser()
config.read(('requirements.ini', 'setup.cfg', 'tox.ini'))
return [
(
name,
{
key: parse_value(key, config[name][key])
for key in config[name]
}
)
for name in config.sections()
if 'requirements' in name
] | python | def read_sections():
config = configparser.ConfigParser()
config.read(('requirements.ini', 'setup.cfg', 'tox.ini'))
return [
(
name,
{
key: parse_value(key, config[name][key])
for key in config[name]
}
)
for name in config.sections()
if 'requirements' in name
] | [
"def",
"read_sections",
"(",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"(",
"'requirements.ini'",
",",
"'setup.cfg'",
",",
"'tox.ini'",
")",
")",
"return",
"[",
"(",
"name",
",",
"{",
"key",
":",
"parse_value",
"(",
"key",
",",
"config",
"[",
"name",
"]",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"config",
"[",
"name",
"]",
"}",
")",
"for",
"name",
"in",
"config",
".",
"sections",
"(",
")",
"if",
"'requirements'",
"in",
"name",
"]"
]
| Read ini files and return list of pairs (name, options) | [
"Read",
"ini",
"files",
"and",
"return",
"list",
"of",
"pairs",
"(",
"name",
"options",
")"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/config.py#L36-L50 |
peterdemin/pip-compile-multi | pipcompilemulti/config.py | parse_value | def parse_value(key, value):
"""Parse value as comma-delimited list if default value for it is list"""
default = OPTIONS.get(key)
if isinstance(default, collections.Iterable):
if not isinstance(default, six.string_types):
return [item.strip()
for item in value.split(',')]
return value | python | def parse_value(key, value):
default = OPTIONS.get(key)
if isinstance(default, collections.Iterable):
if not isinstance(default, six.string_types):
return [item.strip()
for item in value.split(',')]
return value | [
"def",
"parse_value",
"(",
"key",
",",
"value",
")",
":",
"default",
"=",
"OPTIONS",
".",
"get",
"(",
"key",
")",
"if",
"isinstance",
"(",
"default",
",",
"collections",
".",
"Iterable",
")",
":",
"if",
"not",
"isinstance",
"(",
"default",
",",
"six",
".",
"string_types",
")",
":",
"return",
"[",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"value",
".",
"split",
"(",
"','",
")",
"]",
"return",
"value"
]
| Parse value as comma-delimited list if default value for it is list | [
"Parse",
"value",
"as",
"comma",
"-",
"delimited",
"list",
"if",
"default",
"value",
"for",
"it",
"is",
"list"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/config.py#L53-L60 |
peterdemin/pip-compile-multi | pipcompilemulti/config.py | python_version_matchers | def python_version_matchers():
"""Return set of string representations of current python version"""
version = sys.version_info
patterns = [
"{0}",
"{0}{1}",
"{0}.{1}",
]
matchers = [
pattern.format(*version)
for pattern in patterns
] + [None]
return set(matchers) | python | def python_version_matchers():
version = sys.version_info
patterns = [
"{0}",
"{0}{1}",
"{0}.{1}",
]
matchers = [
pattern.format(*version)
for pattern in patterns
] + [None]
return set(matchers) | [
"def",
"python_version_matchers",
"(",
")",
":",
"version",
"=",
"sys",
".",
"version_info",
"patterns",
"=",
"[",
"\"{0}\"",
",",
"\"{0}{1}\"",
",",
"\"{0}.{1}\"",
",",
"]",
"matchers",
"=",
"[",
"pattern",
".",
"format",
"(",
"*",
"version",
")",
"for",
"pattern",
"in",
"patterns",
"]",
"+",
"[",
"None",
"]",
"return",
"set",
"(",
"matchers",
")"
]
| Return set of string representations of current python version | [
"Return",
"set",
"of",
"string",
"representations",
"of",
"current",
"python",
"version"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/config.py#L63-L75 |
peterdemin/pip-compile-multi | pipcompilemulti/cli_v2.py | verify | def verify(ctx):
"""Upgrade locked dependency versions"""
oks = run_configurations(
skipper(verify_environments),
read_sections,
)
ctx.exit(0
if False not in oks
else 1) | python | def verify(ctx):
oks = run_configurations(
skipper(verify_environments),
read_sections,
)
ctx.exit(0
if False not in oks
else 1) | [
"def",
"verify",
"(",
"ctx",
")",
":",
"oks",
"=",
"run_configurations",
"(",
"skipper",
"(",
"verify_environments",
")",
",",
"read_sections",
",",
")",
"ctx",
".",
"exit",
"(",
"0",
"if",
"False",
"not",
"in",
"oks",
"else",
"1",
")"
]
| Upgrade locked dependency versions | [
"Upgrade",
"locked",
"dependency",
"versions"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v2.py#L38-L46 |
peterdemin/pip-compile-multi | pipcompilemulti/cli_v2.py | skipper | def skipper(func):
"""Decorator that memorizes base_dir, in_ext and out_ext from OPTIONS
and skips execution for duplicates."""
@functools.wraps(func)
def wrapped():
"""Dummy docstring to make pylint happy."""
key = (OPTIONS['base_dir'], OPTIONS['in_ext'], OPTIONS['out_ext'])
if key not in seen:
seen[key] = func()
return seen[key]
seen = {}
return wrapped | python | def skipper(func):
@functools.wraps(func)
def wrapped():
key = (OPTIONS['base_dir'], OPTIONS['in_ext'], OPTIONS['out_ext'])
if key not in seen:
seen[key] = func()
return seen[key]
seen = {}
return wrapped | [
"def",
"skipper",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapped",
"(",
")",
":",
"\"\"\"Dummy docstring to make pylint happy.\"\"\"",
"key",
"=",
"(",
"OPTIONS",
"[",
"'base_dir'",
"]",
",",
"OPTIONS",
"[",
"'in_ext'",
"]",
",",
"OPTIONS",
"[",
"'out_ext'",
"]",
")",
"if",
"key",
"not",
"in",
"seen",
":",
"seen",
"[",
"key",
"]",
"=",
"func",
"(",
")",
"return",
"seen",
"[",
"key",
"]",
"seen",
"=",
"{",
"}",
"return",
"wrapped"
]
| Decorator that memorizes base_dir, in_ext and out_ext from OPTIONS
and skips execution for duplicates. | [
"Decorator",
"that",
"memorizes",
"base_dir",
"in_ext",
"and",
"out_ext",
"from",
"OPTIONS",
"and",
"skips",
"execution",
"for",
"duplicates",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v2.py#L49-L60 |
peterdemin/pip-compile-multi | pipcompilemulti/cli_v2.py | run_configurations | def run_configurations(callback, sections_reader):
"""Parse configurations and execute callback for matching."""
base = dict(OPTIONS)
sections = sections_reader()
if sections is None:
logger.info("Configuration not found in .ini files. "
"Running with default settings")
recompile()
elif sections == []:
logger.info("Configuration does not match current runtime. "
"Exiting")
results = []
for section, options in sections:
OPTIONS.clear()
OPTIONS.update(base)
OPTIONS.update(options)
logger.debug("Running configuration from section \"%s\". OPTIONS: %r",
section, OPTIONS)
results.append(callback())
return results | python | def run_configurations(callback, sections_reader):
base = dict(OPTIONS)
sections = sections_reader()
if sections is None:
logger.info("Configuration not found in .ini files. "
"Running with default settings")
recompile()
elif sections == []:
logger.info("Configuration does not match current runtime. "
"Exiting")
results = []
for section, options in sections:
OPTIONS.clear()
OPTIONS.update(base)
OPTIONS.update(options)
logger.debug("Running configuration from section \"%s\". OPTIONS: %r",
section, OPTIONS)
results.append(callback())
return results | [
"def",
"run_configurations",
"(",
"callback",
",",
"sections_reader",
")",
":",
"base",
"=",
"dict",
"(",
"OPTIONS",
")",
"sections",
"=",
"sections_reader",
"(",
")",
"if",
"sections",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"\"Configuration not found in .ini files. \"",
"\"Running with default settings\"",
")",
"recompile",
"(",
")",
"elif",
"sections",
"==",
"[",
"]",
":",
"logger",
".",
"info",
"(",
"\"Configuration does not match current runtime. \"",
"\"Exiting\"",
")",
"results",
"=",
"[",
"]",
"for",
"section",
",",
"options",
"in",
"sections",
":",
"OPTIONS",
".",
"clear",
"(",
")",
"OPTIONS",
".",
"update",
"(",
"base",
")",
"OPTIONS",
".",
"update",
"(",
"options",
")",
"logger",
".",
"debug",
"(",
"\"Running configuration from section \\\"%s\\\". OPTIONS: %r\"",
",",
"section",
",",
"OPTIONS",
")",
"results",
".",
"append",
"(",
"callback",
"(",
")",
")",
"return",
"results"
]
| Parse configurations and execute callback for matching. | [
"Parse",
"configurations",
"and",
"execute",
"callback",
"for",
"matching",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/cli_v2.py#L63-L82 |
peterdemin/pip-compile-multi | pipcompilemulti/actions.py | recompile | def recompile():
"""
Compile requirements files for all environments.
"""
pinned_packages = {}
env_confs = discover(
os.path.join(
OPTIONS['base_dir'],
'*.' + OPTIONS['in_ext'],
),
)
if OPTIONS['header_file']:
with open(OPTIONS['header_file']) as fp:
base_header_text = fp.read()
else:
base_header_text = DEFAULT_HEADER
hashed_by_reference = set()
for name in OPTIONS['add_hashes']:
hashed_by_reference.update(
reference_cluster(env_confs, name)
)
included_and_refs = set(OPTIONS['include_names'])
for name in set(included_and_refs):
included_and_refs.update(
recursive_refs(env_confs, name)
)
for conf in env_confs:
if included_and_refs:
if conf['name'] not in included_and_refs:
# Skip envs that are not included or referenced by included:
continue
rrefs = recursive_refs(env_confs, conf['name'])
add_hashes = conf['name'] in hashed_by_reference
env = Environment(
name=conf['name'],
ignore=merged_packages(pinned_packages, rrefs),
forbid_post=conf['name'] in OPTIONS['forbid_post'],
add_hashes=add_hashes,
)
logger.info("Locking %s to %s. References: %r",
env.infile, env.outfile, sorted(rrefs))
env.create_lockfile()
header_text = generate_hash_comment(env.infile) + base_header_text
env.replace_header(header_text)
env.add_references(conf['refs'])
pinned_packages[conf['name']] = env.packages | python | def recompile():
pinned_packages = {}
env_confs = discover(
os.path.join(
OPTIONS['base_dir'],
'*.' + OPTIONS['in_ext'],
),
)
if OPTIONS['header_file']:
with open(OPTIONS['header_file']) as fp:
base_header_text = fp.read()
else:
base_header_text = DEFAULT_HEADER
hashed_by_reference = set()
for name in OPTIONS['add_hashes']:
hashed_by_reference.update(
reference_cluster(env_confs, name)
)
included_and_refs = set(OPTIONS['include_names'])
for name in set(included_and_refs):
included_and_refs.update(
recursive_refs(env_confs, name)
)
for conf in env_confs:
if included_and_refs:
if conf['name'] not in included_and_refs:
continue
rrefs = recursive_refs(env_confs, conf['name'])
add_hashes = conf['name'] in hashed_by_reference
env = Environment(
name=conf['name'],
ignore=merged_packages(pinned_packages, rrefs),
forbid_post=conf['name'] in OPTIONS['forbid_post'],
add_hashes=add_hashes,
)
logger.info("Locking %s to %s. References: %r",
env.infile, env.outfile, sorted(rrefs))
env.create_lockfile()
header_text = generate_hash_comment(env.infile) + base_header_text
env.replace_header(header_text)
env.add_references(conf['refs'])
pinned_packages[conf['name']] = env.packages | [
"def",
"recompile",
"(",
")",
":",
"pinned_packages",
"=",
"{",
"}",
"env_confs",
"=",
"discover",
"(",
"os",
".",
"path",
".",
"join",
"(",
"OPTIONS",
"[",
"'base_dir'",
"]",
",",
"'*.'",
"+",
"OPTIONS",
"[",
"'in_ext'",
"]",
",",
")",
",",
")",
"if",
"OPTIONS",
"[",
"'header_file'",
"]",
":",
"with",
"open",
"(",
"OPTIONS",
"[",
"'header_file'",
"]",
")",
"as",
"fp",
":",
"base_header_text",
"=",
"fp",
".",
"read",
"(",
")",
"else",
":",
"base_header_text",
"=",
"DEFAULT_HEADER",
"hashed_by_reference",
"=",
"set",
"(",
")",
"for",
"name",
"in",
"OPTIONS",
"[",
"'add_hashes'",
"]",
":",
"hashed_by_reference",
".",
"update",
"(",
"reference_cluster",
"(",
"env_confs",
",",
"name",
")",
")",
"included_and_refs",
"=",
"set",
"(",
"OPTIONS",
"[",
"'include_names'",
"]",
")",
"for",
"name",
"in",
"set",
"(",
"included_and_refs",
")",
":",
"included_and_refs",
".",
"update",
"(",
"recursive_refs",
"(",
"env_confs",
",",
"name",
")",
")",
"for",
"conf",
"in",
"env_confs",
":",
"if",
"included_and_refs",
":",
"if",
"conf",
"[",
"'name'",
"]",
"not",
"in",
"included_and_refs",
":",
"# Skip envs that are not included or referenced by included:",
"continue",
"rrefs",
"=",
"recursive_refs",
"(",
"env_confs",
",",
"conf",
"[",
"'name'",
"]",
")",
"add_hashes",
"=",
"conf",
"[",
"'name'",
"]",
"in",
"hashed_by_reference",
"env",
"=",
"Environment",
"(",
"name",
"=",
"conf",
"[",
"'name'",
"]",
",",
"ignore",
"=",
"merged_packages",
"(",
"pinned_packages",
",",
"rrefs",
")",
",",
"forbid_post",
"=",
"conf",
"[",
"'name'",
"]",
"in",
"OPTIONS",
"[",
"'forbid_post'",
"]",
",",
"add_hashes",
"=",
"add_hashes",
",",
")",
"logger",
".",
"info",
"(",
"\"Locking %s to %s. References: %r\"",
",",
"env",
".",
"infile",
",",
"env",
".",
"outfile",
",",
"sorted",
"(",
"rrefs",
")",
")",
"env",
".",
"create_lockfile",
"(",
")",
"header_text",
"=",
"generate_hash_comment",
"(",
"env",
".",
"infile",
")",
"+",
"base_header_text",
"env",
".",
"replace_header",
"(",
"header_text",
")",
"env",
".",
"add_references",
"(",
"conf",
"[",
"'refs'",
"]",
")",
"pinned_packages",
"[",
"conf",
"[",
"'name'",
"]",
"]",
"=",
"env",
".",
"packages"
]
| Compile requirements files for all environments. | [
"Compile",
"requirements",
"files",
"for",
"all",
"environments",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/actions.py#L17-L62 |
peterdemin/pip-compile-multi | pipcompilemulti/actions.py | merged_packages | def merged_packages(env_packages, names):
"""
Return union set of environment packages with given names
>>> sorted(merged_packages(
... {
... 'a': {'x': 1, 'y': 2},
... 'b': {'y': 2, 'z': 3},
... 'c': {'z': 3, 'w': 4}
... },
... ['a', 'b']
... ).items())
[('x', 1), ('y', 2), ('z', 3)]
"""
combined_packages = sorted(itertools.chain.from_iterable(
env_packages[name].items()
for name in names
))
result = {}
errors = set()
for name, version in combined_packages:
if name in result:
if result[name] != version:
errors.add((name, version, result[name]))
else:
result[name] = version
if errors:
for error in sorted(errors):
logger.error(
"Package %s was resolved to different "
"versions in different environments: %s and %s",
error[0], error[1], error[2],
)
raise RuntimeError(
"Please add constraints for the package version listed above"
)
return result | python | def merged_packages(env_packages, names):
combined_packages = sorted(itertools.chain.from_iterable(
env_packages[name].items()
for name in names
))
result = {}
errors = set()
for name, version in combined_packages:
if name in result:
if result[name] != version:
errors.add((name, version, result[name]))
else:
result[name] = version
if errors:
for error in sorted(errors):
logger.error(
"Package %s was resolved to different "
"versions in different environments: %s and %s",
error[0], error[1], error[2],
)
raise RuntimeError(
"Please add constraints for the package version listed above"
)
return result | [
"def",
"merged_packages",
"(",
"env_packages",
",",
"names",
")",
":",
"combined_packages",
"=",
"sorted",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"env_packages",
"[",
"name",
"]",
".",
"items",
"(",
")",
"for",
"name",
"in",
"names",
")",
")",
"result",
"=",
"{",
"}",
"errors",
"=",
"set",
"(",
")",
"for",
"name",
",",
"version",
"in",
"combined_packages",
":",
"if",
"name",
"in",
"result",
":",
"if",
"result",
"[",
"name",
"]",
"!=",
"version",
":",
"errors",
".",
"add",
"(",
"(",
"name",
",",
"version",
",",
"result",
"[",
"name",
"]",
")",
")",
"else",
":",
"result",
"[",
"name",
"]",
"=",
"version",
"if",
"errors",
":",
"for",
"error",
"in",
"sorted",
"(",
"errors",
")",
":",
"logger",
".",
"error",
"(",
"\"Package %s was resolved to different \"",
"\"versions in different environments: %s and %s\"",
",",
"error",
"[",
"0",
"]",
",",
"error",
"[",
"1",
"]",
",",
"error",
"[",
"2",
"]",
",",
")",
"raise",
"RuntimeError",
"(",
"\"Please add constraints for the package version listed above\"",
")",
"return",
"result"
]
| Return union set of environment packages with given names
>>> sorted(merged_packages(
... {
... 'a': {'x': 1, 'y': 2},
... 'b': {'y': 2, 'z': 3},
... 'c': {'z': 3, 'w': 4}
... },
... ['a', 'b']
... ).items())
[('x', 1), ('y', 2), ('z', 3)] | [
"Return",
"union",
"set",
"of",
"environment",
"packages",
"with",
"given",
"names"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/actions.py#L65-L101 |
peterdemin/pip-compile-multi | pipcompilemulti/actions.py | recursive_refs | def recursive_refs(envs, name):
"""
Return set of recursive refs for given env name
>>> local_refs = sorted(recursive_refs([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'local'))
>>> local_refs == ['base', 'test']
True
"""
refs_by_name = {
env['name']: set(env['refs'])
for env in envs
}
refs = refs_by_name[name]
if refs:
indirect_refs = set(itertools.chain.from_iterable([
recursive_refs(envs, ref)
for ref in refs
]))
else:
indirect_refs = set()
return set.union(refs, indirect_refs) | python | def recursive_refs(envs, name):
refs_by_name = {
env['name']: set(env['refs'])
for env in envs
}
refs = refs_by_name[name]
if refs:
indirect_refs = set(itertools.chain.from_iterable([
recursive_refs(envs, ref)
for ref in refs
]))
else:
indirect_refs = set()
return set.union(refs, indirect_refs) | [
"def",
"recursive_refs",
"(",
"envs",
",",
"name",
")",
":",
"refs_by_name",
"=",
"{",
"env",
"[",
"'name'",
"]",
":",
"set",
"(",
"env",
"[",
"'refs'",
"]",
")",
"for",
"env",
"in",
"envs",
"}",
"refs",
"=",
"refs_by_name",
"[",
"name",
"]",
"if",
"refs",
":",
"indirect_refs",
"=",
"set",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"recursive_refs",
"(",
"envs",
",",
"ref",
")",
"for",
"ref",
"in",
"refs",
"]",
")",
")",
"else",
":",
"indirect_refs",
"=",
"set",
"(",
")",
"return",
"set",
".",
"union",
"(",
"refs",
",",
"indirect_refs",
")"
]
| Return set of recursive refs for given env name
>>> local_refs = sorted(recursive_refs([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'local'))
>>> local_refs == ['base', 'test']
True | [
"Return",
"set",
"of",
"recursive",
"refs",
"for",
"given",
"env",
"name"
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/actions.py#L104-L128 |
peterdemin/pip-compile-multi | pipcompilemulti/actions.py | reference_cluster | def reference_cluster(envs, name):
"""
Return set of all env names referencing or
referenced by given name.
>>> cluster = sorted(reference_cluster([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'test'))
>>> cluster == ['base', 'local', 'test']
True
"""
edges = [
set([env['name'], ref])
for env in envs
for ref in env['refs']
]
prev, cluster = set(), set([name])
while prev != cluster:
# While cluster grows
prev = set(cluster)
to_visit = []
for edge in edges:
if cluster & edge:
# Add adjacent nodes:
cluster |= edge
else:
# Leave only edges that are out
# of cluster for the next round:
to_visit.append(edge)
edges = to_visit
return cluster | python | def reference_cluster(envs, name):
edges = [
set([env['name'], ref])
for env in envs
for ref in env['refs']
]
prev, cluster = set(), set([name])
while prev != cluster:
prev = set(cluster)
to_visit = []
for edge in edges:
if cluster & edge:
cluster |= edge
else:
to_visit.append(edge)
edges = to_visit
return cluster | [
"def",
"reference_cluster",
"(",
"envs",
",",
"name",
")",
":",
"edges",
"=",
"[",
"set",
"(",
"[",
"env",
"[",
"'name'",
"]",
",",
"ref",
"]",
")",
"for",
"env",
"in",
"envs",
"for",
"ref",
"in",
"env",
"[",
"'refs'",
"]",
"]",
"prev",
",",
"cluster",
"=",
"set",
"(",
")",
",",
"set",
"(",
"[",
"name",
"]",
")",
"while",
"prev",
"!=",
"cluster",
":",
"# While cluster grows",
"prev",
"=",
"set",
"(",
"cluster",
")",
"to_visit",
"=",
"[",
"]",
"for",
"edge",
"in",
"edges",
":",
"if",
"cluster",
"&",
"edge",
":",
"# Add adjacent nodes:",
"cluster",
"|=",
"edge",
"else",
":",
"# Leave only edges that are out",
"# of cluster for the next round:",
"to_visit",
".",
"append",
"(",
"edge",
")",
"edges",
"=",
"to_visit",
"return",
"cluster"
]
| Return set of all env names referencing or
referenced by given name.
>>> cluster = sorted(reference_cluster([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'test'))
>>> cluster == ['base', 'local', 'test']
True | [
"Return",
"set",
"of",
"all",
"env",
"names",
"referencing",
"or",
"referenced",
"by",
"given",
"name",
"."
]
| train | https://github.com/peterdemin/pip-compile-multi/blob/7bd1968c424dd7ce3236885b4b3e4e28523e6915/pipcompilemulti/actions.py#L131-L163 |
oceanprotocol/squid-py | squid_py/http_requests/requests_session.py | get_requests_session | def get_requests_session():
"""
Set connection pool maxsize and block value to avoid `connection pool full` warnings.
:return: requests session
"""
session = requests.sessions.Session()
session.mount('http://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
session.mount('https://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
return session | python | def get_requests_session():
session = requests.sessions.Session()
session.mount('http://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
session.mount('https://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True))
return session | [
"def",
"get_requests_session",
"(",
")",
":",
"session",
"=",
"requests",
".",
"sessions",
".",
"Session",
"(",
")",
"session",
".",
"mount",
"(",
"'http://'",
",",
"HTTPAdapter",
"(",
"pool_connections",
"=",
"25",
",",
"pool_maxsize",
"=",
"25",
",",
"pool_block",
"=",
"True",
")",
")",
"session",
".",
"mount",
"(",
"'https://'",
",",
"HTTPAdapter",
"(",
"pool_connections",
"=",
"25",
",",
"pool_maxsize",
"=",
"25",
",",
"pool_block",
"=",
"True",
")",
")",
"return",
"session"
]
| Set connection pool maxsize and block value to avoid `connection pool full` warnings.
:return: requests session | [
"Set",
"connection",
"pool",
"maxsize",
"and",
"block",
"value",
"to",
"avoid",
"connection",
"pool",
"full",
"warnings",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/http_requests/requests_session.py#L5-L14 |
oceanprotocol/squid-py | squid_py/keeper/dispenser.py | Dispenser.request_tokens | def request_tokens(self, amount, account):
"""
Request an amount of tokens for a particular address.
This transaction has gas cost
:param amount: Amount of tokens, int
:param account: Account instance
:raise OceanInvalidTransaction: Transaction failed
:return: bool
"""
address = account.address
try:
tx_hash = self.send_transaction(
'requestTokens',
(amount,),
transact={'from': address,
'passphrase': account.password}
)
logging.debug(f'{address} requests {amount} tokens, returning receipt')
try:
receipt = Web3Provider.get_web3().eth.waitForTransactionReceipt(
tx_hash, timeout=20)
logging.debug(f'requestTokens receipt: {receipt}')
except Timeout:
receipt = None
if not receipt:
return False
if receipt.status == 0:
logging.warning(f'request tokens failed: Tx-receipt={receipt}')
logging.warning(f'request tokens failed: account {address}')
return False
# check for emitted events:
rfe = EventFilter(
'RequestFrequencyExceeded',
self.events.RequestFrequencyExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rfe.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestFrequencyExceeded')
logging.info(f'RequestFrequencyExceeded event logs: {logs}')
return False
rle = EventFilter(
'RequestLimitExceeded',
self.events.RequestLimitExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rle.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestLimitExceeded')
logging.info(f'RequestLimitExceeded event logs: {logs}')
return False
return True
except ValueError as err:
raise OceanInvalidTransaction(
f'Requesting {amount} tokens'
f' to {address} failed with error: {err}'
) | python | def request_tokens(self, amount, account):
address = account.address
try:
tx_hash = self.send_transaction(
'requestTokens',
(amount,),
transact={'from': address,
'passphrase': account.password}
)
logging.debug(f'{address} requests {amount} tokens, returning receipt')
try:
receipt = Web3Provider.get_web3().eth.waitForTransactionReceipt(
tx_hash, timeout=20)
logging.debug(f'requestTokens receipt: {receipt}')
except Timeout:
receipt = None
if not receipt:
return False
if receipt.status == 0:
logging.warning(f'request tokens failed: Tx-receipt={receipt}')
logging.warning(f'request tokens failed: account {address}')
return False
rfe = EventFilter(
'RequestFrequencyExceeded',
self.events.RequestFrequencyExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rfe.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestFrequencyExceeded')
logging.info(f'RequestFrequencyExceeded event logs: {logs}')
return False
rle = EventFilter(
'RequestLimitExceeded',
self.events.RequestLimitExceeded,
argument_filters={'requester': Web3Provider.get_web3().toBytes(hexstr=address)},
from_block='latest',
to_block='latest',
)
logs = rle.get_all_entries(max_tries=5)
if logs:
logging.warning(f'request tokens failed RequestLimitExceeded')
logging.info(f'RequestLimitExceeded event logs: {logs}')
return False
return True
except ValueError as err:
raise OceanInvalidTransaction(
f'Requesting {amount} tokens'
f' to {address} failed with error: {err}'
) | [
"def",
"request_tokens",
"(",
"self",
",",
"amount",
",",
"account",
")",
":",
"address",
"=",
"account",
".",
"address",
"try",
":",
"tx_hash",
"=",
"self",
".",
"send_transaction",
"(",
"'requestTokens'",
",",
"(",
"amount",
",",
")",
",",
"transact",
"=",
"{",
"'from'",
":",
"address",
",",
"'passphrase'",
":",
"account",
".",
"password",
"}",
")",
"logging",
".",
"debug",
"(",
"f'{address} requests {amount} tokens, returning receipt'",
")",
"try",
":",
"receipt",
"=",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"eth",
".",
"waitForTransactionReceipt",
"(",
"tx_hash",
",",
"timeout",
"=",
"20",
")",
"logging",
".",
"debug",
"(",
"f'requestTokens receipt: {receipt}'",
")",
"except",
"Timeout",
":",
"receipt",
"=",
"None",
"if",
"not",
"receipt",
":",
"return",
"False",
"if",
"receipt",
".",
"status",
"==",
"0",
":",
"logging",
".",
"warning",
"(",
"f'request tokens failed: Tx-receipt={receipt}'",
")",
"logging",
".",
"warning",
"(",
"f'request tokens failed: account {address}'",
")",
"return",
"False",
"# check for emitted events:",
"rfe",
"=",
"EventFilter",
"(",
"'RequestFrequencyExceeded'",
",",
"self",
".",
"events",
".",
"RequestFrequencyExceeded",
",",
"argument_filters",
"=",
"{",
"'requester'",
":",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"toBytes",
"(",
"hexstr",
"=",
"address",
")",
"}",
",",
"from_block",
"=",
"'latest'",
",",
"to_block",
"=",
"'latest'",
",",
")",
"logs",
"=",
"rfe",
".",
"get_all_entries",
"(",
"max_tries",
"=",
"5",
")",
"if",
"logs",
":",
"logging",
".",
"warning",
"(",
"f'request tokens failed RequestFrequencyExceeded'",
")",
"logging",
".",
"info",
"(",
"f'RequestFrequencyExceeded event logs: {logs}'",
")",
"return",
"False",
"rle",
"=",
"EventFilter",
"(",
"'RequestLimitExceeded'",
",",
"self",
".",
"events",
".",
"RequestLimitExceeded",
",",
"argument_filters",
"=",
"{",
"'requester'",
":",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"toBytes",
"(",
"hexstr",
"=",
"address",
")",
"}",
",",
"from_block",
"=",
"'latest'",
",",
"to_block",
"=",
"'latest'",
",",
")",
"logs",
"=",
"rle",
".",
"get_all_entries",
"(",
"max_tries",
"=",
"5",
")",
"if",
"logs",
":",
"logging",
".",
"warning",
"(",
"f'request tokens failed RequestLimitExceeded'",
")",
"logging",
".",
"info",
"(",
"f'RequestLimitExceeded event logs: {logs}'",
")",
"return",
"False",
"return",
"True",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"OceanInvalidTransaction",
"(",
"f'Requesting {amount} tokens'",
"f' to {address} failed with error: {err}'",
")"
]
| Request an amount of tokens for a particular address.
This transaction has gas cost
:param amount: Amount of tokens, int
:param account: Account instance
:raise OceanInvalidTransaction: Transaction failed
:return: bool | [
"Request",
"an",
"amount",
"of",
"tokens",
"for",
"a",
"particular",
"address",
".",
"This",
"transaction",
"has",
"gas",
"cost"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/dispenser.py#L20-L87 |
oceanprotocol/squid-py | squid_py/keeper/keeper.py | Keeper.get_network_name | def get_network_name(network_id):
"""
Return the keeper network name based on the current ethereum network id.
Return `development` for every network id that is not mapped.
:param network_id: Network id, int
:return: Network name, str
"""
if os.environ.get('KEEPER_NETWORK_NAME'):
logging.debug('keeper network name overridden by an environment variable: {}'.format(
os.environ.get('KEEPER_NETWORK_NAME')))
return os.environ.get('KEEPER_NETWORK_NAME')
return Keeper._network_name_map.get(network_id, Keeper.DEFAULT_NETWORK_NAME) | python | def get_network_name(network_id):
if os.environ.get('KEEPER_NETWORK_NAME'):
logging.debug('keeper network name overridden by an environment variable: {}'.format(
os.environ.get('KEEPER_NETWORK_NAME')))
return os.environ.get('KEEPER_NETWORK_NAME')
return Keeper._network_name_map.get(network_id, Keeper.DEFAULT_NETWORK_NAME) | [
"def",
"get_network_name",
"(",
"network_id",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'KEEPER_NETWORK_NAME'",
")",
":",
"logging",
".",
"debug",
"(",
"'keeper network name overridden by an environment variable: {}'",
".",
"format",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'KEEPER_NETWORK_NAME'",
")",
")",
")",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'KEEPER_NETWORK_NAME'",
")",
"return",
"Keeper",
".",
"_network_name_map",
".",
"get",
"(",
"network_id",
",",
"Keeper",
".",
"DEFAULT_NETWORK_NAME",
")"
]
| Return the keeper network name based on the current ethereum network id.
Return `development` for every network id that is not mapped.
:param network_id: Network id, int
:return: Network name, str | [
"Return",
"the",
"keeper",
"network",
"name",
"based",
"on",
"the",
"current",
"ethereum",
"network",
"id",
".",
"Return",
"development",
"for",
"every",
"network",
"id",
"that",
"is",
"not",
"mapped",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/keeper.py#L68-L81 |
oceanprotocol/squid-py | squid_py/keeper/keeper.py | Keeper.unlock_account | def unlock_account(account):
"""
Unlock the account.
:param account: Account
:return:
"""
return Web3Provider.get_web3().personal.unlockAccount(account.address, account.password) | python | def unlock_account(account):
return Web3Provider.get_web3().personal.unlockAccount(account.address, account.password) | [
"def",
"unlock_account",
"(",
"account",
")",
":",
"return",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"personal",
".",
"unlockAccount",
"(",
"account",
".",
"address",
",",
"account",
".",
"password",
")"
]
| Unlock the account.
:param account: Account
:return: | [
"Unlock",
"the",
"account",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/keeper.py#L114-L121 |
oceanprotocol/squid-py | squid_py/keeper/keeper.py | Keeper.get_condition_name_by_address | def get_condition_name_by_address(self, address):
"""Return the condition name for a given address."""
if self.lock_reward_condition.address == address:
return 'lockReward'
elif self.access_secret_store_condition.address == address:
return 'accessSecretStore'
elif self.escrow_reward_condition.address == address:
return 'escrowReward'
else:
logging.error(f'The current address {address} is not a condition address') | python | def get_condition_name_by_address(self, address):
if self.lock_reward_condition.address == address:
return 'lockReward'
elif self.access_secret_store_condition.address == address:
return 'accessSecretStore'
elif self.escrow_reward_condition.address == address:
return 'escrowReward'
else:
logging.error(f'The current address {address} is not a condition address') | [
"def",
"get_condition_name_by_address",
"(",
"self",
",",
"address",
")",
":",
"if",
"self",
".",
"lock_reward_condition",
".",
"address",
"==",
"address",
":",
"return",
"'lockReward'",
"elif",
"self",
".",
"access_secret_store_condition",
".",
"address",
"==",
"address",
":",
"return",
"'accessSecretStore'",
"elif",
"self",
".",
"escrow_reward_condition",
".",
"address",
"==",
"address",
":",
"return",
"'escrowReward'",
"else",
":",
"logging",
".",
"error",
"(",
"f'The current address {address} is not a condition address'",
")"
]
| Return the condition name for a given address. | [
"Return",
"the",
"condition",
"name",
"for",
"a",
"given",
"address",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/keeper.py#L133-L142 |
oceanprotocol/squid-py | squid_py/brizo/brizo.py | Brizo.initialize_service_agreement | def initialize_service_agreement(did, agreement_id, service_definition_id, signature,
account_address,
consume_endpoint):
"""
Send a request to the service provider (consume_endpoint) to initialize the service
agreement for the asset identified by `did`.
:param did: id of the asset includes the `did:op:` prefix, str
:param agreement_id: id of the agreement, hex str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param signature: signed agreement hash, hex str
:param account_address: ethereum address of the consumer signing this agreement, hex str
:param consume_endpoint: url of the service provider, str
:return: bool
"""
payload = Brizo._prepare_consume_payload(
did, agreement_id, service_definition_id, signature, account_address
)
response = Brizo._http_client.post(
consume_endpoint, data=payload,
headers={'content-type': 'application/json'}
)
if response and hasattr(response, 'status_code'):
if response.status_code != 201:
msg = (f'Initialize service agreement failed at the consumeEndpoint '
f'{consume_endpoint}, reason {response.text}, status {response.status_code}'
)
logger.error(msg)
raise OceanInitializeServiceAgreementError(msg)
logger.info(
f'Service agreement initialized successfully, service agreement id {agreement_id},'
f' consumeEndpoint {consume_endpoint}')
return True | python | def initialize_service_agreement(did, agreement_id, service_definition_id, signature,
account_address,
consume_endpoint):
payload = Brizo._prepare_consume_payload(
did, agreement_id, service_definition_id, signature, account_address
)
response = Brizo._http_client.post(
consume_endpoint, data=payload,
headers={'content-type': 'application/json'}
)
if response and hasattr(response, 'status_code'):
if response.status_code != 201:
msg = (f'Initialize service agreement failed at the consumeEndpoint '
f'{consume_endpoint}, reason {response.text}, status {response.status_code}'
)
logger.error(msg)
raise OceanInitializeServiceAgreementError(msg)
logger.info(
f'Service agreement initialized successfully, service agreement id {agreement_id},'
f' consumeEndpoint {consume_endpoint}')
return True | [
"def",
"initialize_service_agreement",
"(",
"did",
",",
"agreement_id",
",",
"service_definition_id",
",",
"signature",
",",
"account_address",
",",
"consume_endpoint",
")",
":",
"payload",
"=",
"Brizo",
".",
"_prepare_consume_payload",
"(",
"did",
",",
"agreement_id",
",",
"service_definition_id",
",",
"signature",
",",
"account_address",
")",
"response",
"=",
"Brizo",
".",
"_http_client",
".",
"post",
"(",
"consume_endpoint",
",",
"data",
"=",
"payload",
",",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
"}",
")",
"if",
"response",
"and",
"hasattr",
"(",
"response",
",",
"'status_code'",
")",
":",
"if",
"response",
".",
"status_code",
"!=",
"201",
":",
"msg",
"=",
"(",
"f'Initialize service agreement failed at the consumeEndpoint '",
"f'{consume_endpoint}, reason {response.text}, status {response.status_code}'",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"OceanInitializeServiceAgreementError",
"(",
"msg",
")",
"logger",
".",
"info",
"(",
"f'Service agreement initialized successfully, service agreement id {agreement_id},'",
"f' consumeEndpoint {consume_endpoint}'",
")",
"return",
"True"
]
| Send a request to the service provider (consume_endpoint) to initialize the service
agreement for the asset identified by `did`.
:param did: id of the asset includes the `did:op:` prefix, str
:param agreement_id: id of the agreement, hex str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param signature: signed agreement hash, hex str
:param account_address: ethereum address of the consumer signing this agreement, hex str
:param consume_endpoint: url of the service provider, str
:return: bool | [
"Send",
"a",
"request",
"to",
"the",
"service",
"provider",
"(",
"consume_endpoint",
")",
"to",
"initialize",
"the",
"service",
"agreement",
"for",
"the",
"asset",
"identified",
"by",
"did",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L63-L96 |
oceanprotocol/squid-py | squid_py/brizo/brizo.py | Brizo.consume_service | def consume_service(service_agreement_id, service_endpoint, account, files,
destination_folder, index=None):
"""
Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool
"""
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error(
'index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account,
None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name)
else:
for i, _file in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id,
account, _file,
signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) | python | def consume_service(service_agreement_id, service_endpoint, account, files,
destination_folder, index=None):
signature = Keeper.get_instance().sign_hash(service_agreement_id, account)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(files), logger.error(
'index can not be bigger than the number of files')
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id, account,
None, signature, index)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name)
else:
for i, _file in enumerate(files):
consume_url = Brizo._create_consume_url(service_endpoint, service_agreement_id,
account, _file,
signature, i)
logger.info(f'invoke consume endpoint with this url: {consume_url}')
response = Brizo._http_client.get(consume_url, stream=True)
file_name = Brizo._get_file_name(response)
Brizo.write_file(response, destination_folder, file_name) | [
"def",
"consume_service",
"(",
"service_agreement_id",
",",
"service_endpoint",
",",
"account",
",",
"files",
",",
"destination_folder",
",",
"index",
"=",
"None",
")",
":",
"signature",
"=",
"Keeper",
".",
"get_instance",
"(",
")",
".",
"sign_hash",
"(",
"service_agreement_id",
",",
"account",
")",
"if",
"index",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"index",
",",
"int",
")",
",",
"logger",
".",
"error",
"(",
"'index has to be an integer.'",
")",
"assert",
"index",
">=",
"0",
",",
"logger",
".",
"error",
"(",
"'index has to be 0 or a positive integer.'",
")",
"assert",
"index",
"<",
"len",
"(",
"files",
")",
",",
"logger",
".",
"error",
"(",
"'index can not be bigger than the number of files'",
")",
"consume_url",
"=",
"Brizo",
".",
"_create_consume_url",
"(",
"service_endpoint",
",",
"service_agreement_id",
",",
"account",
",",
"None",
",",
"signature",
",",
"index",
")",
"logger",
".",
"info",
"(",
"f'invoke consume endpoint with this url: {consume_url}'",
")",
"response",
"=",
"Brizo",
".",
"_http_client",
".",
"get",
"(",
"consume_url",
",",
"stream",
"=",
"True",
")",
"file_name",
"=",
"Brizo",
".",
"_get_file_name",
"(",
"response",
")",
"Brizo",
".",
"write_file",
"(",
"response",
",",
"destination_folder",
",",
"file_name",
")",
"else",
":",
"for",
"i",
",",
"_file",
"in",
"enumerate",
"(",
"files",
")",
":",
"consume_url",
"=",
"Brizo",
".",
"_create_consume_url",
"(",
"service_endpoint",
",",
"service_agreement_id",
",",
"account",
",",
"_file",
",",
"signature",
",",
"i",
")",
"logger",
".",
"info",
"(",
"f'invoke consume endpoint with this url: {consume_url}'",
")",
"response",
"=",
"Brizo",
".",
"_http_client",
".",
"get",
"(",
"consume_url",
",",
"stream",
"=",
"True",
")",
"file_name",
"=",
"Brizo",
".",
"_get_file_name",
"(",
"response",
")",
"Brizo",
".",
"write_file",
"(",
"response",
",",
"destination_folder",
",",
"file_name",
")"
]
| Call the brizo endpoint to get access to the different files that form the asset.
:param service_agreement_id: Service Agreement Id, str
:param service_endpoint: Url to consume, str
:param account: Account instance of the consumer signing this agreement, hex-str
:param files: List containing the files to be consumed, list
:param index: Index of the document that is going to be downloaded, int
:param destination_folder: Path, str
:return: True if was downloaded, bool | [
"Call",
"the",
"brizo",
"endpoint",
"to",
"get",
"access",
"to",
"the",
"different",
"files",
"that",
"form",
"the",
"asset",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L99-L132 |
oceanprotocol/squid-py | squid_py/brizo/brizo.py | Brizo._prepare_consume_payload | def _prepare_consume_payload(did, service_agreement_id, service_definition_id, signature,
consumer_address):
"""Prepare a payload to send to `Brizo`.
:param did: DID, str
:param service_agreement_id: Service Agreement Id, str
:param service_definition_id: identifier of the service inside the asset DDO, str
service in the DDO (DID document)
:param signature: the signed agreement message hash which includes
conditions and their parameters values and other details of the agreement, str
:param consumer_address: ethereum address of the consumer signing this agreement, hex-str
:return: dict
"""
return json.dumps({
'did': did,
'serviceAgreementId': service_agreement_id,
ServiceAgreement.SERVICE_DEFINITION_ID: service_definition_id,
'signature': signature,
'consumerAddress': consumer_address
}) | python | def _prepare_consume_payload(did, service_agreement_id, service_definition_id, signature,
consumer_address):
return json.dumps({
'did': did,
'serviceAgreementId': service_agreement_id,
ServiceAgreement.SERVICE_DEFINITION_ID: service_definition_id,
'signature': signature,
'consumerAddress': consumer_address
}) | [
"def",
"_prepare_consume_payload",
"(",
"did",
",",
"service_agreement_id",
",",
"service_definition_id",
",",
"signature",
",",
"consumer_address",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"{",
"'did'",
":",
"did",
",",
"'serviceAgreementId'",
":",
"service_agreement_id",
",",
"ServiceAgreement",
".",
"SERVICE_DEFINITION_ID",
":",
"service_definition_id",
",",
"'signature'",
":",
"signature",
",",
"'consumerAddress'",
":",
"consumer_address",
"}",
")"
]
| Prepare a payload to send to `Brizo`.
:param did: DID, str
:param service_agreement_id: Service Agreement Id, str
:param service_definition_id: identifier of the service inside the asset DDO, str
service in the DDO (DID document)
:param signature: the signed agreement message hash which includes
conditions and their parameters values and other details of the agreement, str
:param consumer_address: ethereum address of the consumer signing this agreement, hex-str
:return: dict | [
"Prepare",
"a",
"payload",
"to",
"send",
"to",
"Brizo",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L135-L154 |
oceanprotocol/squid-py | squid_py/brizo/brizo.py | Brizo.get_brizo_url | def get_brizo_url(config):
"""
Return the Brizo component url.
:param config: Config
:return: Url, str
"""
brizo_url = 'http://localhost:8030'
if config.has_option('resources', 'brizo.url'):
brizo_url = config.get('resources', 'brizo.url') or brizo_url
brizo_path = '/api/v1/brizo'
return f'{brizo_url}{brizo_path}' | python | def get_brizo_url(config):
brizo_url = 'http://localhost:8030'
if config.has_option('resources', 'brizo.url'):
brizo_url = config.get('resources', 'brizo.url') or brizo_url
brizo_path = '/api/v1/brizo'
return f'{brizo_url}{brizo_path}' | [
"def",
"get_brizo_url",
"(",
"config",
")",
":",
"brizo_url",
"=",
"'http://localhost:8030'",
"if",
"config",
".",
"has_option",
"(",
"'resources'",
",",
"'brizo.url'",
")",
":",
"brizo_url",
"=",
"config",
".",
"get",
"(",
"'resources'",
",",
"'brizo.url'",
")",
"or",
"brizo_url",
"brizo_path",
"=",
"'/api/v1/brizo'",
"return",
"f'{brizo_url}{brizo_path}'"
]
| Return the Brizo component url.
:param config: Config
:return: Url, str | [
"Return",
"the",
"Brizo",
"component",
"url",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L157-L169 |
oceanprotocol/squid-py | squid_py/brizo/brizo.py | Brizo.write_file | def write_file(response, destination_folder, file_name):
"""
Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool
"""
if response.status_code == 200:
with open(os.path.join(destination_folder, file_name), 'wb') as f:
for chunk in response.iter_content(chunk_size=None):
f.write(chunk)
logger.info(f'Saved downloaded file in {f.name}')
else:
logger.warning(f'consume failed: {response.reason}') | python | def write_file(response, destination_folder, file_name):
if response.status_code == 200:
with open(os.path.join(destination_folder, file_name), 'wb') as f:
for chunk in response.iter_content(chunk_size=None):
f.write(chunk)
logger.info(f'Saved downloaded file in {f.name}')
else:
logger.warning(f'consume failed: {response.reason}') | [
"def",
"write_file",
"(",
"response",
",",
"destination_folder",
",",
"file_name",
")",
":",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"destination_folder",
",",
"file_name",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"None",
")",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"logger",
".",
"info",
"(",
"f'Saved downloaded file in {f.name}'",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"f'consume failed: {response.reason}'",
")"
]
| Write the response content in a file in the destination folder.
:param response: Response
:param destination_folder: Destination folder, string
:param file_name: File name, string
:return: bool | [
"Write",
"the",
"response",
"content",
"in",
"a",
"file",
"in",
"the",
"destination",
"folder",
".",
":",
"param",
"response",
":",
"Response",
":",
"param",
"destination_folder",
":",
"Destination",
"folder",
"string",
":",
"param",
"file_name",
":",
"File",
"name",
"string",
":",
"return",
":",
"bool"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/brizo/brizo.py#L204-L218 |
oceanprotocol/squid-py | squid_py/ddo/metadata.py | Metadata.validate | def validate(metadata):
"""Validator of the metadata composition
:param metadata: conforming to the Metadata accepted by Ocean Protocol, dict
:return: bool
"""
# validate required sections and their sub items
for section_key in Metadata.REQUIRED_SECTIONS:
if section_key not in metadata or not metadata[section_key] or not isinstance(
metadata[section_key], dict):
return False
section = Metadata.MAIN_SECTIONS[section_key]
section_metadata = metadata[section_key]
for subkey in section.REQUIRED_VALUES_KEYS:
if subkey not in section_metadata or section_metadata[subkey] is None:
return False
return True | python | def validate(metadata):
for section_key in Metadata.REQUIRED_SECTIONS:
if section_key not in metadata or not metadata[section_key] or not isinstance(
metadata[section_key], dict):
return False
section = Metadata.MAIN_SECTIONS[section_key]
section_metadata = metadata[section_key]
for subkey in section.REQUIRED_VALUES_KEYS:
if subkey not in section_metadata or section_metadata[subkey] is None:
return False
return True | [
"def",
"validate",
"(",
"metadata",
")",
":",
"# validate required sections and their sub items",
"for",
"section_key",
"in",
"Metadata",
".",
"REQUIRED_SECTIONS",
":",
"if",
"section_key",
"not",
"in",
"metadata",
"or",
"not",
"metadata",
"[",
"section_key",
"]",
"or",
"not",
"isinstance",
"(",
"metadata",
"[",
"section_key",
"]",
",",
"dict",
")",
":",
"return",
"False",
"section",
"=",
"Metadata",
".",
"MAIN_SECTIONS",
"[",
"section_key",
"]",
"section_metadata",
"=",
"metadata",
"[",
"section_key",
"]",
"for",
"subkey",
"in",
"section",
".",
"REQUIRED_VALUES_KEYS",
":",
"if",
"subkey",
"not",
"in",
"section_metadata",
"or",
"section_metadata",
"[",
"subkey",
"]",
"is",
"None",
":",
"return",
"False",
"return",
"True"
]
| Validator of the metadata composition
:param metadata: conforming to the Metadata accepted by Ocean Protocol, dict
:return: bool | [
"Validator",
"of",
"the",
"metadata",
"composition"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/metadata.py#L127-L145 |
oceanprotocol/squid-py | squid_py/ddo/metadata.py | Metadata.get_example | def get_example():
"""Retrieve an example of the metadata"""
example = dict()
for section_key, section in Metadata.MAIN_SECTIONS.items():
example[section_key] = section.EXAMPLE.copy()
return example | python | def get_example():
example = dict()
for section_key, section in Metadata.MAIN_SECTIONS.items():
example[section_key] = section.EXAMPLE.copy()
return example | [
"def",
"get_example",
"(",
")",
":",
"example",
"=",
"dict",
"(",
")",
"for",
"section_key",
",",
"section",
"in",
"Metadata",
".",
"MAIN_SECTIONS",
".",
"items",
"(",
")",
":",
"example",
"[",
"section_key",
"]",
"=",
"section",
".",
"EXAMPLE",
".",
"copy",
"(",
")",
"return",
"example"
]
| Retrieve an example of the metadata | [
"Retrieve",
"an",
"example",
"of",
"the",
"metadata"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/metadata.py#L148-L154 |
oceanprotocol/squid-py | squid_py/secret_store/secret_store.py | SecretStore.encrypt_document | def encrypt_document(self, document_id, content, threshold=0):
"""
encrypt string data using the DID as an secret store id,
if secret store is enabled then return the result from secret store encryption
None for no encryption performed
:param document_id: hex str id of document to use for encryption session
:param content: str to be encrypted
:param threshold: int
:return:
None -- if encryption failed
hex str -- the encrypted document
"""
return self._secret_store_client(self._account).publish_document(
remove_0x_prefix(document_id), content, threshold
) | python | def encrypt_document(self, document_id, content, threshold=0):
return self._secret_store_client(self._account).publish_document(
remove_0x_prefix(document_id), content, threshold
) | [
"def",
"encrypt_document",
"(",
"self",
",",
"document_id",
",",
"content",
",",
"threshold",
"=",
"0",
")",
":",
"return",
"self",
".",
"_secret_store_client",
"(",
"self",
".",
"_account",
")",
".",
"publish_document",
"(",
"remove_0x_prefix",
"(",
"document_id",
")",
",",
"content",
",",
"threshold",
")"
]
| encrypt string data using the DID as an secret store id,
if secret store is enabled then return the result from secret store encryption
None for no encryption performed
:param document_id: hex str id of document to use for encryption session
:param content: str to be encrypted
:param threshold: int
:return:
None -- if encryption failed
hex str -- the encrypted document | [
"encrypt",
"string",
"data",
"using",
"the",
"DID",
"as",
"an",
"secret",
"store",
"id",
"if",
"secret",
"store",
"is",
"enabled",
"then",
"return",
"the",
"result",
"from",
"secret",
"store",
"encryption"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/secret_store/secret_store.py#L50-L66 |
oceanprotocol/squid-py | squid_py/secret_store/secret_store.py | SecretStore.decrypt_document | def decrypt_document(self, document_id, encrypted_content):
"""
Decrypt a previously encrypted content using the secret store keys identified
by document_id.
Note that decryption requires permission already granted to the consumer account.
:param document_id: hex str id of document to use for encryption session
:param encrypted_content: hex str -- the encrypted content from a previous
`encrypt_document` operation
:return:
None -- if decryption failed
str -- the original content that was encrypted previously
"""
return self._secret_store_client(self._account).decrypt_document(
remove_0x_prefix(document_id),
encrypted_content
) | python | def decrypt_document(self, document_id, encrypted_content):
return self._secret_store_client(self._account).decrypt_document(
remove_0x_prefix(document_id),
encrypted_content
) | [
"def",
"decrypt_document",
"(",
"self",
",",
"document_id",
",",
"encrypted_content",
")",
":",
"return",
"self",
".",
"_secret_store_client",
"(",
"self",
".",
"_account",
")",
".",
"decrypt_document",
"(",
"remove_0x_prefix",
"(",
"document_id",
")",
",",
"encrypted_content",
")"
]
| Decrypt a previously encrypted content using the secret store keys identified
by document_id.
Note that decryption requires permission already granted to the consumer account.
:param document_id: hex str id of document to use for encryption session
:param encrypted_content: hex str -- the encrypted content from a previous
`encrypt_document` operation
:return:
None -- if decryption failed
str -- the original content that was encrypted previously | [
"Decrypt",
"a",
"previously",
"encrypted",
"content",
"using",
"the",
"secret",
"store",
"keys",
"identified",
"by",
"document_id",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/secret_store/secret_store.py#L68-L85 |
oceanprotocol/squid-py | squid_py/assets/asset_consumer.py | AssetConsumer.download | def download(service_agreement_id, service_definition_id, ddo, consumer_account, destination,
brizo, secret_store, index=None):
"""
Download asset data files or result files from a compute job.
:param service_agreement_id: Service agreement id, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param ddo: DDO
:param consumer_account: Account instance of the consumer
:param destination: Path, str
:param brizo: Brizo instance
:param secret_store: SecretStore instance
:param index: Index of the document that is going to be downloaded, int
:return: Asset folder path, str
"""
did = ddo.did
encrypted_files = ddo.metadata['base']['encryptedFiles']
encrypted_files = (
encrypted_files if isinstance(encrypted_files, str)
else encrypted_files[0]
)
sa = ServiceAgreement.from_ddo(service_definition_id, ddo)
consume_url = sa.consume_endpoint
if not consume_url:
logger.error(
'Consume asset failed, service definition is missing the "serviceEndpoint".')
raise AssertionError(
'Consume asset failed, service definition is missing the "serviceEndpoint".')
if ddo.get_service('Authorization'):
secret_store_service = ddo.get_service(service_type=ServiceTypes.AUTHORIZATION)
secret_store_url = secret_store_service.endpoints.service
secret_store.set_secret_store_url(secret_store_url)
# decrypt the contentUrls
decrypted_content_urls = json.loads(
secret_store.decrypt_document(did_to_id(did), encrypted_files)
)
if isinstance(decrypted_content_urls, str):
decrypted_content_urls = [decrypted_content_urls]
logger.debug(f'got decrypted contentUrls: {decrypted_content_urls}')
if not os.path.isabs(destination):
destination = os.path.abspath(destination)
if not os.path.exists(destination):
os.mkdir(destination)
asset_folder = os.path.join(destination,
f'datafile.{did_to_id(did)}.{sa.service_definition_id}')
if not os.path.exists(asset_folder):
os.mkdir(asset_folder)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(decrypted_content_urls), logger.error(
'index can not be bigger than the number of files')
brizo.consume_service(
service_agreement_id,
consume_url,
consumer_account,
decrypted_content_urls,
asset_folder,
index
)
return asset_folder | python | def download(service_agreement_id, service_definition_id, ddo, consumer_account, destination,
brizo, secret_store, index=None):
did = ddo.did
encrypted_files = ddo.metadata['base']['encryptedFiles']
encrypted_files = (
encrypted_files if isinstance(encrypted_files, str)
else encrypted_files[0]
)
sa = ServiceAgreement.from_ddo(service_definition_id, ddo)
consume_url = sa.consume_endpoint
if not consume_url:
logger.error(
'Consume asset failed, service definition is missing the "serviceEndpoint".')
raise AssertionError(
'Consume asset failed, service definition is missing the "serviceEndpoint".')
if ddo.get_service('Authorization'):
secret_store_service = ddo.get_service(service_type=ServiceTypes.AUTHORIZATION)
secret_store_url = secret_store_service.endpoints.service
secret_store.set_secret_store_url(secret_store_url)
decrypted_content_urls = json.loads(
secret_store.decrypt_document(did_to_id(did), encrypted_files)
)
if isinstance(decrypted_content_urls, str):
decrypted_content_urls = [decrypted_content_urls]
logger.debug(f'got decrypted contentUrls: {decrypted_content_urls}')
if not os.path.isabs(destination):
destination = os.path.abspath(destination)
if not os.path.exists(destination):
os.mkdir(destination)
asset_folder = os.path.join(destination,
f'datafile.{did_to_id(did)}.{sa.service_definition_id}')
if not os.path.exists(asset_folder):
os.mkdir(asset_folder)
if index is not None:
assert isinstance(index, int), logger.error('index has to be an integer.')
assert index >= 0, logger.error('index has to be 0 or a positive integer.')
assert index < len(decrypted_content_urls), logger.error(
'index can not be bigger than the number of files')
brizo.consume_service(
service_agreement_id,
consume_url,
consumer_account,
decrypted_content_urls,
asset_folder,
index
)
return asset_folder | [
"def",
"download",
"(",
"service_agreement_id",
",",
"service_definition_id",
",",
"ddo",
",",
"consumer_account",
",",
"destination",
",",
"brizo",
",",
"secret_store",
",",
"index",
"=",
"None",
")",
":",
"did",
"=",
"ddo",
".",
"did",
"encrypted_files",
"=",
"ddo",
".",
"metadata",
"[",
"'base'",
"]",
"[",
"'encryptedFiles'",
"]",
"encrypted_files",
"=",
"(",
"encrypted_files",
"if",
"isinstance",
"(",
"encrypted_files",
",",
"str",
")",
"else",
"encrypted_files",
"[",
"0",
"]",
")",
"sa",
"=",
"ServiceAgreement",
".",
"from_ddo",
"(",
"service_definition_id",
",",
"ddo",
")",
"consume_url",
"=",
"sa",
".",
"consume_endpoint",
"if",
"not",
"consume_url",
":",
"logger",
".",
"error",
"(",
"'Consume asset failed, service definition is missing the \"serviceEndpoint\".'",
")",
"raise",
"AssertionError",
"(",
"'Consume asset failed, service definition is missing the \"serviceEndpoint\".'",
")",
"if",
"ddo",
".",
"get_service",
"(",
"'Authorization'",
")",
":",
"secret_store_service",
"=",
"ddo",
".",
"get_service",
"(",
"service_type",
"=",
"ServiceTypes",
".",
"AUTHORIZATION",
")",
"secret_store_url",
"=",
"secret_store_service",
".",
"endpoints",
".",
"service",
"secret_store",
".",
"set_secret_store_url",
"(",
"secret_store_url",
")",
"# decrypt the contentUrls",
"decrypted_content_urls",
"=",
"json",
".",
"loads",
"(",
"secret_store",
".",
"decrypt_document",
"(",
"did_to_id",
"(",
"did",
")",
",",
"encrypted_files",
")",
")",
"if",
"isinstance",
"(",
"decrypted_content_urls",
",",
"str",
")",
":",
"decrypted_content_urls",
"=",
"[",
"decrypted_content_urls",
"]",
"logger",
".",
"debug",
"(",
"f'got decrypted contentUrls: {decrypted_content_urls}'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"destination",
")",
":",
"destination",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"destination",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
":",
"os",
".",
"mkdir",
"(",
"destination",
")",
"asset_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination",
",",
"f'datafile.{did_to_id(did)}.{sa.service_definition_id}'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"asset_folder",
")",
":",
"os",
".",
"mkdir",
"(",
"asset_folder",
")",
"if",
"index",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"index",
",",
"int",
")",
",",
"logger",
".",
"error",
"(",
"'index has to be an integer.'",
")",
"assert",
"index",
">=",
"0",
",",
"logger",
".",
"error",
"(",
"'index has to be 0 or a positive integer.'",
")",
"assert",
"index",
"<",
"len",
"(",
"decrypted_content_urls",
")",
",",
"logger",
".",
"error",
"(",
"'index can not be bigger than the number of files'",
")",
"brizo",
".",
"consume_service",
"(",
"service_agreement_id",
",",
"consume_url",
",",
"consumer_account",
",",
"decrypted_content_urls",
",",
"asset_folder",
",",
"index",
")",
"return",
"asset_folder"
]
| Download asset data files or result files from a compute job.
:param service_agreement_id: Service agreement id, str
:param service_definition_id: identifier of the service inside the asset DDO, str
:param ddo: DDO
:param consumer_account: Account instance of the consumer
:param destination: Path, str
:param brizo: Brizo instance
:param secret_store: SecretStore instance
:param index: Index of the document that is going to be downloaded, int
:return: Asset folder path, str | [
"Download",
"asset",
"data",
"files",
"or",
"result",
"files",
"from",
"a",
"compute",
"job",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/assets/asset_consumer.py#L20-L85 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.assign_did | def assign_did(self, did):
"""
assign the DID as the key id, if the DID does not have a '#value'
at the end, then automatically add a new key value
"""
if re.match('^#.*', self._id):
self._id = did + self._id
if re.match('^#.*', self._owner):
self._owner = did + self._owner | python | def assign_did(self, did):
if re.match('^
self._id = did + self._id
if re.match('^
self._owner = did + self._owner | [
"def",
"assign_did",
"(",
"self",
",",
"did",
")",
":",
"if",
"re",
".",
"match",
"(",
"'^#.*'",
",",
"self",
".",
"_id",
")",
":",
"self",
".",
"_id",
"=",
"did",
"+",
"self",
".",
"_id",
"if",
"re",
".",
"match",
"(",
"'^#.*'",
",",
"self",
".",
"_owner",
")",
":",
"self",
".",
"_owner",
"=",
"did",
"+",
"self",
".",
"_owner"
]
| assign the DID as the key id, if the DID does not have a '#value'
at the end, then automatically add a new key value | [
"assign",
"the",
"DID",
"as",
"the",
"key",
"id",
"if",
"the",
"DID",
"does",
"not",
"have",
"a",
"#value",
"at",
"the",
"end",
"then",
"automatically",
"add",
"a",
"new",
"key",
"value"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L36-L44 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.set_key_value | def set_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64):
"""Set the key value based on it's storage type."""
if isinstance(value, dict):
if PUBLIC_KEY_STORE_TYPE_HEX in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_HEX], PUBLIC_KEY_STORE_TYPE_HEX)
elif PUBLIC_KEY_STORE_TYPE_BASE64 in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_BASE64],
PUBLIC_KEY_STORE_TYPE_BASE64)
elif PUBLIC_KEY_STORE_TYPE_BASE85 in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_BASE85],
PUBLIC_KEY_STORE_TYPE_BASE85)
elif PUBLIC_KEY_STORE_TYPE_JWK in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_JWK], PUBLIC_KEY_STORE_TYPE_JWK)
elif PUBLIC_KEY_STORE_TYPE_PEM in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_PEM], PUBLIC_KEY_STORE_TYPE_PEM)
else:
self._value = value
self._store_type = store_type | python | def set_key_value(self, value, store_type=PUBLIC_KEY_STORE_TYPE_BASE64):
if isinstance(value, dict):
if PUBLIC_KEY_STORE_TYPE_HEX in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_HEX], PUBLIC_KEY_STORE_TYPE_HEX)
elif PUBLIC_KEY_STORE_TYPE_BASE64 in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_BASE64],
PUBLIC_KEY_STORE_TYPE_BASE64)
elif PUBLIC_KEY_STORE_TYPE_BASE85 in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_BASE85],
PUBLIC_KEY_STORE_TYPE_BASE85)
elif PUBLIC_KEY_STORE_TYPE_JWK in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_JWK], PUBLIC_KEY_STORE_TYPE_JWK)
elif PUBLIC_KEY_STORE_TYPE_PEM in value:
self.set_key_value(value[PUBLIC_KEY_STORE_TYPE_PEM], PUBLIC_KEY_STORE_TYPE_PEM)
else:
self._value = value
self._store_type = store_type | [
"def",
"set_key_value",
"(",
"self",
",",
"value",
",",
"store_type",
"=",
"PUBLIC_KEY_STORE_TYPE_BASE64",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"PUBLIC_KEY_STORE_TYPE_HEX",
"in",
"value",
":",
"self",
".",
"set_key_value",
"(",
"value",
"[",
"PUBLIC_KEY_STORE_TYPE_HEX",
"]",
",",
"PUBLIC_KEY_STORE_TYPE_HEX",
")",
"elif",
"PUBLIC_KEY_STORE_TYPE_BASE64",
"in",
"value",
":",
"self",
".",
"set_key_value",
"(",
"value",
"[",
"PUBLIC_KEY_STORE_TYPE_BASE64",
"]",
",",
"PUBLIC_KEY_STORE_TYPE_BASE64",
")",
"elif",
"PUBLIC_KEY_STORE_TYPE_BASE85",
"in",
"value",
":",
"self",
".",
"set_key_value",
"(",
"value",
"[",
"PUBLIC_KEY_STORE_TYPE_BASE85",
"]",
",",
"PUBLIC_KEY_STORE_TYPE_BASE85",
")",
"elif",
"PUBLIC_KEY_STORE_TYPE_JWK",
"in",
"value",
":",
"self",
".",
"set_key_value",
"(",
"value",
"[",
"PUBLIC_KEY_STORE_TYPE_JWK",
"]",
",",
"PUBLIC_KEY_STORE_TYPE_JWK",
")",
"elif",
"PUBLIC_KEY_STORE_TYPE_PEM",
"in",
"value",
":",
"self",
".",
"set_key_value",
"(",
"value",
"[",
"PUBLIC_KEY_STORE_TYPE_PEM",
"]",
",",
"PUBLIC_KEY_STORE_TYPE_PEM",
")",
"else",
":",
"self",
".",
"_value",
"=",
"value",
"self",
".",
"_store_type",
"=",
"store_type"
]
| Set the key value based on it's storage type. | [
"Set",
"the",
"key",
"value",
"based",
"on",
"it",
"s",
"storage",
"type",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L61-L78 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.set_encode_key_value | def set_encode_key_value(self, value, store_type):
"""Save the key value base on it's storage type."""
self._store_type = store_type
if store_type == PUBLIC_KEY_STORE_TYPE_HEX:
self._value = value.hex()
elif store_type == PUBLIC_KEY_STORE_TYPE_BASE64:
self._value = b64encode(value).decode()
elif store_type == PUBLIC_KEY_STORE_TYPE_BASE85:
self._value = b85encode(value).decode()
elif store_type == PUBLIC_KEY_STORE_TYPE_JWK:
# TODO: need to decide on which jwk library to import?
raise NotImplementedError
else:
self._value = value
return value | python | def set_encode_key_value(self, value, store_type):
self._store_type = store_type
if store_type == PUBLIC_KEY_STORE_TYPE_HEX:
self._value = value.hex()
elif store_type == PUBLIC_KEY_STORE_TYPE_BASE64:
self._value = b64encode(value).decode()
elif store_type == PUBLIC_KEY_STORE_TYPE_BASE85:
self._value = b85encode(value).decode()
elif store_type == PUBLIC_KEY_STORE_TYPE_JWK:
raise NotImplementedError
else:
self._value = value
return value | [
"def",
"set_encode_key_value",
"(",
"self",
",",
"value",
",",
"store_type",
")",
":",
"self",
".",
"_store_type",
"=",
"store_type",
"if",
"store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_HEX",
":",
"self",
".",
"_value",
"=",
"value",
".",
"hex",
"(",
")",
"elif",
"store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_BASE64",
":",
"self",
".",
"_value",
"=",
"b64encode",
"(",
"value",
")",
".",
"decode",
"(",
")",
"elif",
"store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_BASE85",
":",
"self",
".",
"_value",
"=",
"b85encode",
"(",
"value",
")",
".",
"decode",
"(",
")",
"elif",
"store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_JWK",
":",
"# TODO: need to decide on which jwk library to import?",
"raise",
"NotImplementedError",
"else",
":",
"self",
".",
"_value",
"=",
"value",
"return",
"value"
]
| Save the key value base on it's storage type. | [
"Save",
"the",
"key",
"value",
"base",
"on",
"it",
"s",
"storage",
"type",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L80-L94 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.get_decode_value | def get_decode_value(self):
"""Return the key value based on it's storage type."""
if self._store_type == PUBLIC_KEY_STORE_TYPE_HEX:
value = bytes.fromhex(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE64:
value = b64decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE85:
value = b85decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_JWK:
# TODO: need to decide on which jwk library to import?
raise NotImplementedError
else:
value = self._value
return value | python | def get_decode_value(self):
if self._store_type == PUBLIC_KEY_STORE_TYPE_HEX:
value = bytes.fromhex(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE64:
value = b64decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE85:
value = b85decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_JWK:
raise NotImplementedError
else:
value = self._value
return value | [
"def",
"get_decode_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"_store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_HEX",
":",
"value",
"=",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"_value",
")",
"elif",
"self",
".",
"_store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_BASE64",
":",
"value",
"=",
"b64decode",
"(",
"self",
".",
"_value",
")",
"elif",
"self",
".",
"_store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_BASE85",
":",
"value",
"=",
"b85decode",
"(",
"self",
".",
"_value",
")",
"elif",
"self",
".",
"_store_type",
"==",
"PUBLIC_KEY_STORE_TYPE_JWK",
":",
"# TODO: need to decide on which jwk library to import?",
"raise",
"NotImplementedError",
"else",
":",
"value",
"=",
"self",
".",
"_value",
"return",
"value"
]
| Return the key value based on it's storage type. | [
"Return",
"the",
"key",
"value",
"based",
"on",
"it",
"s",
"storage",
"type",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L96-L109 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.as_text | def as_text(self, is_pretty=False):
"""Return the key as JSON text."""
values = {'id': self._id, 'type': self._type}
if self._owner:
values['owner'] = self._owner
if is_pretty:
return json.dumps(values, indent=4, separators=(',', ': '))
return json.dumps(values) | python | def as_text(self, is_pretty=False):
values = {'id': self._id, 'type': self._type}
if self._owner:
values['owner'] = self._owner
if is_pretty:
return json.dumps(values, indent=4, separators=(',', ': '))
return json.dumps(values) | [
"def",
"as_text",
"(",
"self",
",",
"is_pretty",
"=",
"False",
")",
":",
"values",
"=",
"{",
"'id'",
":",
"self",
".",
"_id",
",",
"'type'",
":",
"self",
".",
"_type",
"}",
"if",
"self",
".",
"_owner",
":",
"values",
"[",
"'owner'",
"]",
"=",
"self",
".",
"_owner",
"if",
"is_pretty",
":",
"return",
"json",
".",
"dumps",
"(",
"values",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"return",
"json",
".",
"dumps",
"(",
"values",
")"
]
| Return the key as JSON text. | [
"Return",
"the",
"key",
"as",
"JSON",
"text",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L115-L124 |
oceanprotocol/squid-py | squid_py/ddo/public_key_base.py | PublicKeyBase.as_dictionary | def as_dictionary(self):
"""Return the key as a python dictionary."""
values = {
'id': self._id,
'type': self._type
}
if self._owner:
values['owner'] = self._owner
return values | python | def as_dictionary(self):
values = {
'id': self._id,
'type': self._type
}
if self._owner:
values['owner'] = self._owner
return values | [
"def",
"as_dictionary",
"(",
"self",
")",
":",
"values",
"=",
"{",
"'id'",
":",
"self",
".",
"_id",
",",
"'type'",
":",
"self",
".",
"_type",
"}",
"if",
"self",
".",
"_owner",
":",
"values",
"[",
"'owner'",
"]",
"=",
"self",
".",
"_owner",
"return",
"values"
]
| Return the key as a python dictionary. | [
"Return",
"the",
"key",
"as",
"a",
"python",
"dictionary",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ddo/public_key_base.py#L126-L135 |
oceanprotocol/squid-py | squid_py/keeper/agreements/agreement_manager.py | AgreementStoreManager.create_agreement | def create_agreement(self, agreement_id, did, condition_types, condition_ids, time_locks,
time_outs):
"""
Create a new agreement.
The agreement will create conditions of conditionType with conditionId.
Only "approved" templates can access this function.
:param agreement_id:id of the agreement, hex str
:param did: DID of the asset. The DID must be registered beforehand, bytes32
:param condition_types: is a list of addresses that point to Condition contracts,
list(address)
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:return: bool
"""
tx_hash = self.contract_concise.createAgreement(
agreement_id,
did,
condition_types,
condition_ids,
time_locks,
time_outs,
)
receipt = self.get_tx_receipt(tx_hash)
return receipt and receipt.status == 1 | python | def create_agreement(self, agreement_id, did, condition_types, condition_ids, time_locks,
time_outs):
tx_hash = self.contract_concise.createAgreement(
agreement_id,
did,
condition_types,
condition_ids,
time_locks,
time_outs,
)
receipt = self.get_tx_receipt(tx_hash)
return receipt and receipt.status == 1 | [
"def",
"create_agreement",
"(",
"self",
",",
"agreement_id",
",",
"did",
",",
"condition_types",
",",
"condition_ids",
",",
"time_locks",
",",
"time_outs",
")",
":",
"tx_hash",
"=",
"self",
".",
"contract_concise",
".",
"createAgreement",
"(",
"agreement_id",
",",
"did",
",",
"condition_types",
",",
"condition_ids",
",",
"time_locks",
",",
"time_outs",
",",
")",
"receipt",
"=",
"self",
".",
"get_tx_receipt",
"(",
"tx_hash",
")",
"return",
"receipt",
"and",
"receipt",
".",
"status",
"==",
"1"
]
| Create a new agreement.
The agreement will create conditions of conditionType with conditionId.
Only "approved" templates can access this function.
:param agreement_id:id of the agreement, hex str
:param did: DID of the asset. The DID must be registered beforehand, bytes32
:param condition_types: is a list of addresses that point to Condition contracts,
list(address)
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:param time_locks: is a list of uint time lock values associated to each Condition, int
:param time_outs: is a list of uint time out values associated to each Condition, int
:return: bool | [
"Create",
"a",
"new",
"agreement",
".",
"The",
"agreement",
"will",
"create",
"conditions",
"of",
"conditionType",
"with",
"conditionId",
".",
"Only",
"approved",
"templates",
"can",
"access",
"this",
"function",
".",
":",
"param",
"agreement_id",
":",
"id",
"of",
"the",
"agreement",
"hex",
"str",
":",
"param",
"did",
":",
"DID",
"of",
"the",
"asset",
".",
"The",
"DID",
"must",
"be",
"registered",
"beforehand",
"bytes32",
":",
"param",
"condition_types",
":",
"is",
"a",
"list",
"of",
"addresses",
"that",
"point",
"to",
"Condition",
"contracts",
"list",
"(",
"address",
")",
":",
"param",
"condition_ids",
":",
"is",
"a",
"list",
"of",
"bytes32",
"content",
"-",
"addressed",
"Condition",
"IDs",
"bytes32",
":",
"param",
"time_locks",
":",
"is",
"a",
"list",
"of",
"uint",
"time",
"lock",
"values",
"associated",
"to",
"each",
"Condition",
"int",
":",
"param",
"time_outs",
":",
"is",
"a",
"list",
"of",
"uint",
"time",
"out",
"values",
"associated",
"to",
"each",
"Condition",
"int",
":",
"return",
":",
"bool"
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/agreements/agreement_manager.py#L21-L46 |
oceanprotocol/squid-py | squid_py/keeper/agreements/agreement_manager.py | AgreementStoreManager.get_agreement | def get_agreement(self, agreement_id):
"""
Retrieve the agreement for a agreement_id.
:param agreement_id: id of the agreement, hex str
:return: the agreement attributes.
"""
agreement = self.contract_concise.getAgreement(agreement_id)
if agreement and len(agreement) == 6:
agreement = AgreementValues(*agreement)
did = add_0x_prefix(agreement.did.hex())
cond_ids = [add_0x_prefix(_id.hex()) for _id in agreement.condition_ids]
return AgreementValues(
did,
agreement.owner,
agreement.template_id,
cond_ids,
agreement.updated_by,
agreement.block_number_updated
)
return None | python | def get_agreement(self, agreement_id):
agreement = self.contract_concise.getAgreement(agreement_id)
if agreement and len(agreement) == 6:
agreement = AgreementValues(*agreement)
did = add_0x_prefix(agreement.did.hex())
cond_ids = [add_0x_prefix(_id.hex()) for _id in agreement.condition_ids]
return AgreementValues(
did,
agreement.owner,
agreement.template_id,
cond_ids,
agreement.updated_by,
agreement.block_number_updated
)
return None | [
"def",
"get_agreement",
"(",
"self",
",",
"agreement_id",
")",
":",
"agreement",
"=",
"self",
".",
"contract_concise",
".",
"getAgreement",
"(",
"agreement_id",
")",
"if",
"agreement",
"and",
"len",
"(",
"agreement",
")",
"==",
"6",
":",
"agreement",
"=",
"AgreementValues",
"(",
"*",
"agreement",
")",
"did",
"=",
"add_0x_prefix",
"(",
"agreement",
".",
"did",
".",
"hex",
"(",
")",
")",
"cond_ids",
"=",
"[",
"add_0x_prefix",
"(",
"_id",
".",
"hex",
"(",
")",
")",
"for",
"_id",
"in",
"agreement",
".",
"condition_ids",
"]",
"return",
"AgreementValues",
"(",
"did",
",",
"agreement",
".",
"owner",
",",
"agreement",
".",
"template_id",
",",
"cond_ids",
",",
"agreement",
".",
"updated_by",
",",
"agreement",
".",
"block_number_updated",
")",
"return",
"None"
]
| Retrieve the agreement for a agreement_id.
:param agreement_id: id of the agreement, hex str
:return: the agreement attributes. | [
"Retrieve",
"the",
"agreement",
"for",
"a",
"agreement_id",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/agreements/agreement_manager.py#L48-L70 |
oceanprotocol/squid-py | squid_py/keeper/contract_handler.py | ContractHandler._load | def _load(contract_name):
"""Retrieve the contract instance for `contract_name` that represent the smart
contract in the keeper network.
:param contract_name: str name of the solidity keeper contract without the network name.
:return: web3.eth.Contract instance
"""
contract_definition = ContractHandler.get_contract_dict_by_name(contract_name)
address = Web3Provider.get_web3().toChecksumAddress(contract_definition['address'])
abi = contract_definition['abi']
contract = Web3Provider.get_web3().eth.contract(address=address, abi=abi)
ContractHandler._contracts[contract_name] = (contract, ConciseContract(contract))
return ContractHandler._contracts[contract_name] | python | def _load(contract_name):
contract_definition = ContractHandler.get_contract_dict_by_name(contract_name)
address = Web3Provider.get_web3().toChecksumAddress(contract_definition['address'])
abi = contract_definition['abi']
contract = Web3Provider.get_web3().eth.contract(address=address, abi=abi)
ContractHandler._contracts[contract_name] = (contract, ConciseContract(contract))
return ContractHandler._contracts[contract_name] | [
"def",
"_load",
"(",
"contract_name",
")",
":",
"contract_definition",
"=",
"ContractHandler",
".",
"get_contract_dict_by_name",
"(",
"contract_name",
")",
"address",
"=",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"toChecksumAddress",
"(",
"contract_definition",
"[",
"'address'",
"]",
")",
"abi",
"=",
"contract_definition",
"[",
"'abi'",
"]",
"contract",
"=",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"eth",
".",
"contract",
"(",
"address",
"=",
"address",
",",
"abi",
"=",
"abi",
")",
"ContractHandler",
".",
"_contracts",
"[",
"contract_name",
"]",
"=",
"(",
"contract",
",",
"ConciseContract",
"(",
"contract",
")",
")",
"return",
"ContractHandler",
".",
"_contracts",
"[",
"contract_name",
"]"
]
| Retrieve the contract instance for `contract_name` that represent the smart
contract in the keeper network.
:param contract_name: str name of the solidity keeper contract without the network name.
:return: web3.eth.Contract instance | [
"Retrieve",
"the",
"contract",
"instance",
"for",
"contract_name",
"that",
"represent",
"the",
"smart",
"contract",
"in",
"the",
"keeper",
"network",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/contract_handler.py#L70-L82 |
oceanprotocol/squid-py | squid_py/keeper/contract_handler.py | ContractHandler.get_contract_dict_by_name | def get_contract_dict_by_name(contract_name):
"""
Retrieve the Contract instance for a given contract name.
:param contract_name: str
:return: the smart contract's definition from the json abi file, dict
"""
network_name = Keeper.get_network_name(Keeper.get_network_id()).lower()
artifacts_path = ConfigProvider.get_config().keeper_path
# file_name = '{}.{}.json'.format(contract_name, network_name)
# path = os.path.join(keeper.artifacts_path, file_name)
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, network_name)
if not (path and os.path.exists(path)):
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, network_name.lower())
if not (path and os.path.exists(path)):
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, Keeper.DEFAULT_NETWORK_NAME)
if not (path and os.path.exists(path)):
raise FileNotFoundError(
f'Keeper contract {contract_name} file '
f'not found in {artifacts_path} '
f'using network name {network_name}'
)
with open(path) as f:
contract_dict = json.loads(f.read())
return contract_dict | python | def get_contract_dict_by_name(contract_name):
network_name = Keeper.get_network_name(Keeper.get_network_id()).lower()
artifacts_path = ConfigProvider.get_config().keeper_path
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, network_name)
if not (path and os.path.exists(path)):
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, network_name.lower())
if not (path and os.path.exists(path)):
path = ContractHandler._get_contract_file_path(
artifacts_path, contract_name, Keeper.DEFAULT_NETWORK_NAME)
if not (path and os.path.exists(path)):
raise FileNotFoundError(
f'Keeper contract {contract_name} file '
f'not found in {artifacts_path} '
f'using network name {network_name}'
)
with open(path) as f:
contract_dict = json.loads(f.read())
return contract_dict | [
"def",
"get_contract_dict_by_name",
"(",
"contract_name",
")",
":",
"network_name",
"=",
"Keeper",
".",
"get_network_name",
"(",
"Keeper",
".",
"get_network_id",
"(",
")",
")",
".",
"lower",
"(",
")",
"artifacts_path",
"=",
"ConfigProvider",
".",
"get_config",
"(",
")",
".",
"keeper_path",
"# file_name = '{}.{}.json'.format(contract_name, network_name)",
"# path = os.path.join(keeper.artifacts_path, file_name)",
"path",
"=",
"ContractHandler",
".",
"_get_contract_file_path",
"(",
"artifacts_path",
",",
"contract_name",
",",
"network_name",
")",
"if",
"not",
"(",
"path",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
":",
"path",
"=",
"ContractHandler",
".",
"_get_contract_file_path",
"(",
"artifacts_path",
",",
"contract_name",
",",
"network_name",
".",
"lower",
"(",
")",
")",
"if",
"not",
"(",
"path",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
":",
"path",
"=",
"ContractHandler",
".",
"_get_contract_file_path",
"(",
"artifacts_path",
",",
"contract_name",
",",
"Keeper",
".",
"DEFAULT_NETWORK_NAME",
")",
"if",
"not",
"(",
"path",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
")",
":",
"raise",
"FileNotFoundError",
"(",
"f'Keeper contract {contract_name} file '",
"f'not found in {artifacts_path} '",
"f'using network name {network_name}'",
")",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"contract_dict",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"contract_dict"
]
| Retrieve the Contract instance for a given contract name.
:param contract_name: str
:return: the smart contract's definition from the json abi file, dict | [
"Retrieve",
"the",
"Contract",
"instance",
"for",
"a",
"given",
"contract",
"name",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/contract_handler.py#L94-L126 |
oceanprotocol/squid-py | examples/buy_asset.py | buy_asset | def buy_asset():
"""
Requires all ocean services running.
"""
ConfigProvider.set_config(ExampleConfig.get_config())
config = ConfigProvider.get_config()
# make ocean instance
ocn = Ocean()
acc = get_publisher_account(config)
if not acc:
acc = ([acc for acc in ocn.accounts.list() if acc.password] or ocn.accounts.list())[0]
# Register ddo
ddo = ocn.assets.create(Metadata.get_example(), acc, providers=[acc.address], use_secret_store=False)
logging.info(f'registered ddo: {ddo.did}')
# ocn here will be used only to publish the asset. Handling the asset by the publisher
# will be performed by the Brizo server running locally
keeper = Keeper.get_instance()
if 'TEST_LOCAL_NILE' in os.environ and os.environ['TEST_LOCAL_NILE'] == '1':
provider = keeper.did_registry.to_checksum_address(
'0x413c9ba0a05b8a600899b41b0c62dd661e689354'
)
keeper.did_registry.add_provider(ddo.asset_id, provider, acc)
logging.debug(f'is did provider: '
f'{keeper.did_registry.is_did_provider(ddo.asset_id, provider)}')
cons_ocn = Ocean()
consumer_account = get_account_from_config(config, 'parity.address1', 'parity.password1')
# sign agreement using the registered asset did above
service = ddo.get_service(service_type=ServiceTypes.ASSET_ACCESS)
# This will send the order request to Brizo which in turn will execute the agreement on-chain
cons_ocn.accounts.request_tokens(consumer_account, 100)
sa = ServiceAgreement.from_service_dict(service.as_dictionary())
agreement_id = cons_ocn.assets.order(
ddo.did, sa.service_definition_id, consumer_account)
logging.info('placed order: %s, %s', ddo.did, agreement_id)
i = 0
while ocn.agreements.is_access_granted(
agreement_id, ddo.did, consumer_account.address) is not True and i < 30:
time.sleep(1)
i += 1
assert ocn.agreements.is_access_granted(agreement_id, ddo.did, consumer_account.address)
ocn.assets.consume(
agreement_id,
ddo.did,
sa.service_definition_id,
consumer_account,
config.downloads_path)
logging.info('Success buying asset.') | python | def buy_asset():
ConfigProvider.set_config(ExampleConfig.get_config())
config = ConfigProvider.get_config()
ocn = Ocean()
acc = get_publisher_account(config)
if not acc:
acc = ([acc for acc in ocn.accounts.list() if acc.password] or ocn.accounts.list())[0]
ddo = ocn.assets.create(Metadata.get_example(), acc, providers=[acc.address], use_secret_store=False)
logging.info(f'registered ddo: {ddo.did}')
keeper = Keeper.get_instance()
if 'TEST_LOCAL_NILE' in os.environ and os.environ['TEST_LOCAL_NILE'] == '1':
provider = keeper.did_registry.to_checksum_address(
'0x413c9ba0a05b8a600899b41b0c62dd661e689354'
)
keeper.did_registry.add_provider(ddo.asset_id, provider, acc)
logging.debug(f'is did provider: '
f'{keeper.did_registry.is_did_provider(ddo.asset_id, provider)}')
cons_ocn = Ocean()
consumer_account = get_account_from_config(config, 'parity.address1', 'parity.password1')
service = ddo.get_service(service_type=ServiceTypes.ASSET_ACCESS)
cons_ocn.accounts.request_tokens(consumer_account, 100)
sa = ServiceAgreement.from_service_dict(service.as_dictionary())
agreement_id = cons_ocn.assets.order(
ddo.did, sa.service_definition_id, consumer_account)
logging.info('placed order: %s, %s', ddo.did, agreement_id)
i = 0
while ocn.agreements.is_access_granted(
agreement_id, ddo.did, consumer_account.address) is not True and i < 30:
time.sleep(1)
i += 1
assert ocn.agreements.is_access_granted(agreement_id, ddo.did, consumer_account.address)
ocn.assets.consume(
agreement_id,
ddo.did,
sa.service_definition_id,
consumer_account,
config.downloads_path)
logging.info('Success buying asset.') | [
"def",
"buy_asset",
"(",
")",
":",
"ConfigProvider",
".",
"set_config",
"(",
"ExampleConfig",
".",
"get_config",
"(",
")",
")",
"config",
"=",
"ConfigProvider",
".",
"get_config",
"(",
")",
"# make ocean instance",
"ocn",
"=",
"Ocean",
"(",
")",
"acc",
"=",
"get_publisher_account",
"(",
"config",
")",
"if",
"not",
"acc",
":",
"acc",
"=",
"(",
"[",
"acc",
"for",
"acc",
"in",
"ocn",
".",
"accounts",
".",
"list",
"(",
")",
"if",
"acc",
".",
"password",
"]",
"or",
"ocn",
".",
"accounts",
".",
"list",
"(",
")",
")",
"[",
"0",
"]",
"# Register ddo",
"ddo",
"=",
"ocn",
".",
"assets",
".",
"create",
"(",
"Metadata",
".",
"get_example",
"(",
")",
",",
"acc",
",",
"providers",
"=",
"[",
"acc",
".",
"address",
"]",
",",
"use_secret_store",
"=",
"False",
")",
"logging",
".",
"info",
"(",
"f'registered ddo: {ddo.did}'",
")",
"# ocn here will be used only to publish the asset. Handling the asset by the publisher",
"# will be performed by the Brizo server running locally",
"keeper",
"=",
"Keeper",
".",
"get_instance",
"(",
")",
"if",
"'TEST_LOCAL_NILE'",
"in",
"os",
".",
"environ",
"and",
"os",
".",
"environ",
"[",
"'TEST_LOCAL_NILE'",
"]",
"==",
"'1'",
":",
"provider",
"=",
"keeper",
".",
"did_registry",
".",
"to_checksum_address",
"(",
"'0x413c9ba0a05b8a600899b41b0c62dd661e689354'",
")",
"keeper",
".",
"did_registry",
".",
"add_provider",
"(",
"ddo",
".",
"asset_id",
",",
"provider",
",",
"acc",
")",
"logging",
".",
"debug",
"(",
"f'is did provider: '",
"f'{keeper.did_registry.is_did_provider(ddo.asset_id, provider)}'",
")",
"cons_ocn",
"=",
"Ocean",
"(",
")",
"consumer_account",
"=",
"get_account_from_config",
"(",
"config",
",",
"'parity.address1'",
",",
"'parity.password1'",
")",
"# sign agreement using the registered asset did above",
"service",
"=",
"ddo",
".",
"get_service",
"(",
"service_type",
"=",
"ServiceTypes",
".",
"ASSET_ACCESS",
")",
"# This will send the order request to Brizo which in turn will execute the agreement on-chain",
"cons_ocn",
".",
"accounts",
".",
"request_tokens",
"(",
"consumer_account",
",",
"100",
")",
"sa",
"=",
"ServiceAgreement",
".",
"from_service_dict",
"(",
"service",
".",
"as_dictionary",
"(",
")",
")",
"agreement_id",
"=",
"cons_ocn",
".",
"assets",
".",
"order",
"(",
"ddo",
".",
"did",
",",
"sa",
".",
"service_definition_id",
",",
"consumer_account",
")",
"logging",
".",
"info",
"(",
"'placed order: %s, %s'",
",",
"ddo",
".",
"did",
",",
"agreement_id",
")",
"i",
"=",
"0",
"while",
"ocn",
".",
"agreements",
".",
"is_access_granted",
"(",
"agreement_id",
",",
"ddo",
".",
"did",
",",
"consumer_account",
".",
"address",
")",
"is",
"not",
"True",
"and",
"i",
"<",
"30",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"i",
"+=",
"1",
"assert",
"ocn",
".",
"agreements",
".",
"is_access_granted",
"(",
"agreement_id",
",",
"ddo",
".",
"did",
",",
"consumer_account",
".",
"address",
")",
"ocn",
".",
"assets",
".",
"consume",
"(",
"agreement_id",
",",
"ddo",
".",
"did",
",",
"sa",
".",
"service_definition_id",
",",
"consumer_account",
",",
"config",
".",
"downloads_path",
")",
"logging",
".",
"info",
"(",
"'Success buying asset.'",
")"
]
| Requires all ocean services running. | [
"Requires",
"all",
"ocean",
"services",
"running",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/examples/buy_asset.py#L29-L83 |
oceanprotocol/squid-py | squid_py/keeper/token.py | Token.token_approve | def token_approve(self, spender_address, price, from_account):
"""
Approve the passed address to spend the specified amount of tokens.
:param spender_address: Account address, str
:param price: Asset price, int
:param from_account: Account address, str
:return: bool
"""
if not Web3Provider.get_web3().isChecksumAddress(spender_address):
spender_address = Web3Provider.get_web3().toChecksumAddress(spender_address)
tx_hash = self.send_transaction(
'approve',
(spender_address,
price),
transact={'from': from_account.address,
'passphrase': from_account.password}
)
return self.get_tx_receipt(tx_hash).status == 1 | python | def token_approve(self, spender_address, price, from_account):
if not Web3Provider.get_web3().isChecksumAddress(spender_address):
spender_address = Web3Provider.get_web3().toChecksumAddress(spender_address)
tx_hash = self.send_transaction(
'approve',
(spender_address,
price),
transact={'from': from_account.address,
'passphrase': from_account.password}
)
return self.get_tx_receipt(tx_hash).status == 1 | [
"def",
"token_approve",
"(",
"self",
",",
"spender_address",
",",
"price",
",",
"from_account",
")",
":",
"if",
"not",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"isChecksumAddress",
"(",
"spender_address",
")",
":",
"spender_address",
"=",
"Web3Provider",
".",
"get_web3",
"(",
")",
".",
"toChecksumAddress",
"(",
"spender_address",
")",
"tx_hash",
"=",
"self",
".",
"send_transaction",
"(",
"'approve'",
",",
"(",
"spender_address",
",",
"price",
")",
",",
"transact",
"=",
"{",
"'from'",
":",
"from_account",
".",
"address",
",",
"'passphrase'",
":",
"from_account",
".",
"password",
"}",
")",
"return",
"self",
".",
"get_tx_receipt",
"(",
"tx_hash",
")",
".",
"status",
"==",
"1"
]
| Approve the passed address to spend the specified amount of tokens.
:param spender_address: Account address, str
:param price: Asset price, int
:param from_account: Account address, str
:return: bool | [
"Approve",
"the",
"passed",
"address",
"to",
"spend",
"the",
"specified",
"amount",
"of",
"tokens",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/token.py#L31-L50 |
oceanprotocol/squid-py | squid_py/keeper/token.py | Token.transfer | def transfer(self, receiver_address, amount, from_account):
"""
Transfer tokens from one account to the receiver address.
:param receiver_address: Address of the transfer receiver, str
:param amount: Amount of tokens, int
:param from_account: Sender account, Account
:return: bool
"""
tx_hash = self.send_transaction(
'transfer',
(receiver_address,
amount),
transact={'from': from_account.address,
'passphrase': from_account.password}
)
return self.get_tx_receipt(tx_hash).status == 1 | python | def transfer(self, receiver_address, amount, from_account):
tx_hash = self.send_transaction(
'transfer',
(receiver_address,
amount),
transact={'from': from_account.address,
'passphrase': from_account.password}
)
return self.get_tx_receipt(tx_hash).status == 1 | [
"def",
"transfer",
"(",
"self",
",",
"receiver_address",
",",
"amount",
",",
"from_account",
")",
":",
"tx_hash",
"=",
"self",
".",
"send_transaction",
"(",
"'transfer'",
",",
"(",
"receiver_address",
",",
"amount",
")",
",",
"transact",
"=",
"{",
"'from'",
":",
"from_account",
".",
"address",
",",
"'passphrase'",
":",
"from_account",
".",
"password",
"}",
")",
"return",
"self",
".",
"get_tx_receipt",
"(",
"tx_hash",
")",
".",
"status",
"==",
"1"
]
| Transfer tokens from one account to the receiver address.
:param receiver_address: Address of the transfer receiver, str
:param amount: Amount of tokens, int
:param from_account: Sender account, Account
:return: bool | [
"Transfer",
"tokens",
"from",
"one",
"account",
"to",
"the",
"receiver",
"address",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/token.py#L52-L68 |
oceanprotocol/squid-py | squid_py/keeper/multi_event_listener.py | MultiEventListener.make_event_filter | def make_event_filter(self, filter_key, filter_value):
"""Create a new event filter."""
event_filter = EventFilter(
self.event_name,
self.event,
{filter_key: filter_value},
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | python | def make_event_filter(self, filter_key, filter_value):
event_filter = EventFilter(
self.event_name,
self.event,
{filter_key: filter_value},
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | [
"def",
"make_event_filter",
"(",
"self",
",",
"filter_key",
",",
"filter_value",
")",
":",
"event_filter",
"=",
"EventFilter",
"(",
"self",
".",
"event_name",
",",
"self",
".",
"event",
",",
"{",
"filter_key",
":",
"filter_value",
"}",
",",
"from_block",
"=",
"self",
".",
"from_block",
",",
"to_block",
"=",
"self",
".",
"to_block",
")",
"event_filter",
".",
"set_poll_interval",
"(",
"0.5",
")",
"return",
"event_filter"
]
| Create a new event filter. | [
"Create",
"a",
"new",
"event",
"filter",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/multi_event_listener.py#L33-L43 |
oceanprotocol/squid-py | squid_py/keeper/multi_event_listener.py | MultiEventListener.process_event | def process_event(event_filter, callback, timeout_callback, timeout, args,
start_time=None):
"""
Start to watch one event.
:param event_filter:
:param callback:
:param timeout_callback:
:param timeout:
:param args:
:param start_time:
:return:
"""
try:
events = event_filter.get_all_entries()
if events:
callback(events[0], *args)
return True
except (ValueError, Exception) as err:
# ignore error, but log it
logger.debug(f'Got error grabbing keeper events: {str(err)}')
if timeout:
elapsed = int(datetime.now().timestamp()) - start_time
if elapsed > timeout:
if timeout_callback:
timeout_callback(*args)
else:
callback(None, *args)
return True
return False | python | def process_event(event_filter, callback, timeout_callback, timeout, args,
start_time=None):
try:
events = event_filter.get_all_entries()
if events:
callback(events[0], *args)
return True
except (ValueError, Exception) as err:
logger.debug(f'Got error grabbing keeper events: {str(err)}')
if timeout:
elapsed = int(datetime.now().timestamp()) - start_time
if elapsed > timeout:
if timeout_callback:
timeout_callback(*args)
else:
callback(None, *args)
return True
return False | [
"def",
"process_event",
"(",
"event_filter",
",",
"callback",
",",
"timeout_callback",
",",
"timeout",
",",
"args",
",",
"start_time",
"=",
"None",
")",
":",
"try",
":",
"events",
"=",
"event_filter",
".",
"get_all_entries",
"(",
")",
"if",
"events",
":",
"callback",
"(",
"events",
"[",
"0",
"]",
",",
"*",
"args",
")",
"return",
"True",
"except",
"(",
"ValueError",
",",
"Exception",
")",
"as",
"err",
":",
"# ignore error, but log it",
"logger",
".",
"debug",
"(",
"f'Got error grabbing keeper events: {str(err)}'",
")",
"if",
"timeout",
":",
"elapsed",
"=",
"int",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"timestamp",
"(",
")",
")",
"-",
"start_time",
"if",
"elapsed",
">",
"timeout",
":",
"if",
"timeout_callback",
":",
"timeout_callback",
"(",
"*",
"args",
")",
"else",
":",
"callback",
"(",
"None",
",",
"*",
"args",
")",
"return",
"True",
"return",
"False"
]
| Start to watch one event.
:param event_filter:
:param callback:
:param timeout_callback:
:param timeout:
:param args:
:param start_time:
:return: | [
"Start",
"to",
"watch",
"one",
"event",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/multi_event_listener.py#L122-L155 |
oceanprotocol/squid-py | squid_py/agreements/events/access_secret_store_condition.py | fulfill_access_secret_store_condition | def fulfill_access_secret_store_condition(event, agreement_id, did, service_agreement,
consumer_address, publisher_account):
"""
Fulfill the access condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param consumer_address: ethereum account address of consumer, hex str
:param publisher_account: Account instance of the publisher
"""
logger.debug(f"release reward after event {event}.")
name_to_parameter = {param.name: param for param in
service_agreement.condition_by_name['accessSecretStore'].parameters}
document_id = add_0x_prefix(name_to_parameter['_documentId'].value)
asset_id = add_0x_prefix(did_to_id(did))
assert document_id == asset_id, f'document_id {document_id} <=> asset_id {asset_id} mismatch.'
try:
tx_hash = Keeper.get_instance().access_secret_store_condition.fulfill(
agreement_id, document_id, consumer_address, publisher_account
)
process_tx_receipt(
tx_hash,
Keeper.get_instance().access_secret_store_condition.FULFILLED_EVENT,
'AccessSecretStoreCondition.Fulfilled'
)
except Exception as e:
# logger.error(f'Error when calling grantAccess condition function: {e}')
raise e | python | def fulfill_access_secret_store_condition(event, agreement_id, did, service_agreement,
consumer_address, publisher_account):
logger.debug(f"release reward after event {event}.")
name_to_parameter = {param.name: param for param in
service_agreement.condition_by_name['accessSecretStore'].parameters}
document_id = add_0x_prefix(name_to_parameter['_documentId'].value)
asset_id = add_0x_prefix(did_to_id(did))
assert document_id == asset_id, f'document_id {document_id} <=> asset_id {asset_id} mismatch.'
try:
tx_hash = Keeper.get_instance().access_secret_store_condition.fulfill(
agreement_id, document_id, consumer_address, publisher_account
)
process_tx_receipt(
tx_hash,
Keeper.get_instance().access_secret_store_condition.FULFILLED_EVENT,
'AccessSecretStoreCondition.Fulfilled'
)
except Exception as e:
raise e | [
"def",
"fulfill_access_secret_store_condition",
"(",
"event",
",",
"agreement_id",
",",
"did",
",",
"service_agreement",
",",
"consumer_address",
",",
"publisher_account",
")",
":",
"logger",
".",
"debug",
"(",
"f\"release reward after event {event}.\"",
")",
"name_to_parameter",
"=",
"{",
"param",
".",
"name",
":",
"param",
"for",
"param",
"in",
"service_agreement",
".",
"condition_by_name",
"[",
"'accessSecretStore'",
"]",
".",
"parameters",
"}",
"document_id",
"=",
"add_0x_prefix",
"(",
"name_to_parameter",
"[",
"'_documentId'",
"]",
".",
"value",
")",
"asset_id",
"=",
"add_0x_prefix",
"(",
"did_to_id",
"(",
"did",
")",
")",
"assert",
"document_id",
"==",
"asset_id",
",",
"f'document_id {document_id} <=> asset_id {asset_id} mismatch.'",
"try",
":",
"tx_hash",
"=",
"Keeper",
".",
"get_instance",
"(",
")",
".",
"access_secret_store_condition",
".",
"fulfill",
"(",
"agreement_id",
",",
"document_id",
",",
"consumer_address",
",",
"publisher_account",
")",
"process_tx_receipt",
"(",
"tx_hash",
",",
"Keeper",
".",
"get_instance",
"(",
")",
".",
"access_secret_store_condition",
".",
"FULFILLED_EVENT",
",",
"'AccessSecretStoreCondition.Fulfilled'",
")",
"except",
"Exception",
"as",
"e",
":",
"# logger.error(f'Error when calling grantAccess condition function: {e}')",
"raise",
"e"
]
| Fulfill the access condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param consumer_address: ethereum account address of consumer, hex str
:param publisher_account: Account instance of the publisher | [
"Fulfill",
"the",
"access",
"condition",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/events/access_secret_store_condition.py#L16-L45 |
oceanprotocol/squid-py | squid_py/keeper/event_listener.py | EventListener.make_event_filter | def make_event_filter(self):
"""Create a new event filter."""
event_filter = EventFilter(
self.event_name,
self.event,
self.filters,
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | python | def make_event_filter(self):
event_filter = EventFilter(
self.event_name,
self.event,
self.filters,
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | [
"def",
"make_event_filter",
"(",
"self",
")",
":",
"event_filter",
"=",
"EventFilter",
"(",
"self",
".",
"event_name",
",",
"self",
".",
"event",
",",
"self",
".",
"filters",
",",
"from_block",
"=",
"self",
".",
"from_block",
",",
"to_block",
"=",
"self",
".",
"to_block",
")",
"event_filter",
".",
"set_poll_interval",
"(",
"0.5",
")",
"return",
"event_filter"
]
| Create a new event filter. | [
"Create",
"a",
"new",
"event",
"filter",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/event_listener.py#L31-L41 |
oceanprotocol/squid-py | squid_py/keeper/event_listener.py | EventListener.watch_one_event | def watch_one_event(event_filter, callback, timeout_callback, timeout, args,
start_time=None):
"""
Start to watch one event.
:param event_filter:
:param callback:
:param timeout_callback:
:param timeout:
:param args:
:param start_time:
:return:
"""
if timeout and not start_time:
start_time = int(datetime.now().timestamp())
if not args:
args = []
while True:
try:
events = event_filter.get_all_entries()
if events:
callback(events[0], *args)
return
except (ValueError, Exception) as err:
# ignore error, but log it
logger.debug(f'Got error grabbing keeper events: {str(err)}')
time.sleep(0.1)
if timeout:
elapsed = int(datetime.now().timestamp()) - start_time
if elapsed > timeout:
if timeout_callback:
timeout_callback(*args)
else:
callback(None, *args)
break | python | def watch_one_event(event_filter, callback, timeout_callback, timeout, args,
start_time=None):
if timeout and not start_time:
start_time = int(datetime.now().timestamp())
if not args:
args = []
while True:
try:
events = event_filter.get_all_entries()
if events:
callback(events[0], *args)
return
except (ValueError, Exception) as err:
logger.debug(f'Got error grabbing keeper events: {str(err)}')
time.sleep(0.1)
if timeout:
elapsed = int(datetime.now().timestamp()) - start_time
if elapsed > timeout:
if timeout_callback:
timeout_callback(*args)
else:
callback(None, *args)
break | [
"def",
"watch_one_event",
"(",
"event_filter",
",",
"callback",
",",
"timeout_callback",
",",
"timeout",
",",
"args",
",",
"start_time",
"=",
"None",
")",
":",
"if",
"timeout",
"and",
"not",
"start_time",
":",
"start_time",
"=",
"int",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"timestamp",
"(",
")",
")",
"if",
"not",
"args",
":",
"args",
"=",
"[",
"]",
"while",
"True",
":",
"try",
":",
"events",
"=",
"event_filter",
".",
"get_all_entries",
"(",
")",
"if",
"events",
":",
"callback",
"(",
"events",
"[",
"0",
"]",
",",
"*",
"args",
")",
"return",
"except",
"(",
"ValueError",
",",
"Exception",
")",
"as",
"err",
":",
"# ignore error, but log it",
"logger",
".",
"debug",
"(",
"f'Got error grabbing keeper events: {str(err)}'",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"timeout",
":",
"elapsed",
"=",
"int",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"timestamp",
"(",
")",
")",
"-",
"start_time",
"if",
"elapsed",
">",
"timeout",
":",
"if",
"timeout_callback",
":",
"timeout_callback",
"(",
"*",
"args",
")",
"else",
":",
"callback",
"(",
"None",
",",
"*",
"args",
")",
"break"
]
| Start to watch one event.
:param event_filter:
:param callback:
:param timeout_callback:
:param timeout:
:param args:
:param start_time:
:return: | [
"Start",
"to",
"watch",
"one",
"event",
"."
]
| train | https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/event_listener.py#L84-L122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.