id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 51
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
3,100 | nirum/tableprint | tableprint/printer.py | header | def header(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True):
"""Returns a formatted row of column header strings
Parameters
----------
headers : list of strings
A list of n strings, the column headers
width : int
The width of each column (Default: 11)
style : string or tuple, optional
A formatting style (see STYLES)
Returns
-------
headerstr : string
A string consisting of the full header row to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(headers))
alignment = ALIGNMENTS[align]
# string formatter
data = map(lambda x: ('{:%s%d}' % (alignment, x[0] + ansi_len(x[1]))).format(x[1]), zip(widths, headers))
# build the formatted str
headerstr = format_line(data, tablestyle.row)
if add_hr:
upper = hrule(len(headers), widths, tablestyle.top)
lower = hrule(len(headers), widths, tablestyle.below_header)
headerstr = '\n'.join([upper, headerstr, lower])
return headerstr | python | def header(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True):
tablestyle = STYLES[style]
widths = parse_width(width, len(headers))
alignment = ALIGNMENTS[align]
# string formatter
data = map(lambda x: ('{:%s%d}' % (alignment, x[0] + ansi_len(x[1]))).format(x[1]), zip(widths, headers))
# build the formatted str
headerstr = format_line(data, tablestyle.row)
if add_hr:
upper = hrule(len(headers), widths, tablestyle.top)
lower = hrule(len(headers), widths, tablestyle.below_header)
headerstr = '\n'.join([upper, headerstr, lower])
return headerstr | [
"def",
"header",
"(",
"headers",
",",
"width",
"=",
"WIDTH",
",",
"align",
"=",
"ALIGN",
",",
"style",
"=",
"STYLE",
",",
"add_hr",
"=",
"True",
")",
":",
"tablestyle",
"=",
"STYLES",
"[",
"style",
"]",
"widths",
"=",
"parse_width",
"(",
"width",
",",
"len",
"(",
"headers",
")",
")",
"alignment",
"=",
"ALIGNMENTS",
"[",
"align",
"]",
"# string formatter",
"data",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"'{:%s%d}'",
"%",
"(",
"alignment",
",",
"x",
"[",
"0",
"]",
"+",
"ansi_len",
"(",
"x",
"[",
"1",
"]",
")",
")",
")",
".",
"format",
"(",
"x",
"[",
"1",
"]",
")",
",",
"zip",
"(",
"widths",
",",
"headers",
")",
")",
"# build the formatted str",
"headerstr",
"=",
"format_line",
"(",
"data",
",",
"tablestyle",
".",
"row",
")",
"if",
"add_hr",
":",
"upper",
"=",
"hrule",
"(",
"len",
"(",
"headers",
")",
",",
"widths",
",",
"tablestyle",
".",
"top",
")",
"lower",
"=",
"hrule",
"(",
"len",
"(",
"headers",
")",
",",
"widths",
",",
"tablestyle",
".",
"below_header",
")",
"headerstr",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"upper",
",",
"headerstr",
",",
"lower",
"]",
")",
"return",
"headerstr"
] | Returns a formatted row of column header strings
Parameters
----------
headers : list of strings
A list of n strings, the column headers
width : int
The width of each column (Default: 11)
style : string or tuple, optional
A formatting style (see STYLES)
Returns
-------
headerstr : string
A string consisting of the full header row to print | [
"Returns",
"a",
"formatted",
"row",
"of",
"column",
"header",
"strings"
] | 50ab4b96706fce8ee035a4d48cb456e3271eab3d | https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L126-L160 |
3,101 | nirum/tableprint | tableprint/printer.py | row | def row(values, width=WIDTH, format_spec=FMT, align=ALIGN, style=STYLE):
"""Returns a formatted row of data
Parameters
----------
values : array_like
An iterable array of data (numbers or strings), each value is printed in a separate column
width : int
The width of each column (Default: 11)
format_spec : string
The precision format string used to format numbers in the values array (Default: '5g')
align : string
The alignment to use ('left', 'center', or 'right'). (Default: 'right')
style : namedtuple, optional
A line formatting style
Returns
-------
rowstr : string
A string consisting of the full row of data to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(values))
assert isinstance(format_spec, string_types) | isinstance(format_spec, list), \
"format_spec must be a string or list of strings"
if isinstance(format_spec, string_types):
format_spec = [format_spec] * len(list(values))
# mapping function for string formatting
def mapdata(val):
# unpack
width, datum, prec = val
if isinstance(datum, string_types):
return ('{:%s%i}' % (ALIGNMENTS[align], width + ansi_len(datum))).format(datum)
elif isinstance(datum, Number):
return ('{:%s%i.%s}' % (ALIGNMENTS[align], width, prec)).format(datum)
else:
raise ValueError('Elements in the values array must be strings, ints, or floats')
# string formatter
data = map(mapdata, zip(widths, values, format_spec))
# build the row string
return format_line(data, tablestyle.row) | python | def row(values, width=WIDTH, format_spec=FMT, align=ALIGN, style=STYLE):
tablestyle = STYLES[style]
widths = parse_width(width, len(values))
assert isinstance(format_spec, string_types) | isinstance(format_spec, list), \
"format_spec must be a string or list of strings"
if isinstance(format_spec, string_types):
format_spec = [format_spec] * len(list(values))
# mapping function for string formatting
def mapdata(val):
# unpack
width, datum, prec = val
if isinstance(datum, string_types):
return ('{:%s%i}' % (ALIGNMENTS[align], width + ansi_len(datum))).format(datum)
elif isinstance(datum, Number):
return ('{:%s%i.%s}' % (ALIGNMENTS[align], width, prec)).format(datum)
else:
raise ValueError('Elements in the values array must be strings, ints, or floats')
# string formatter
data = map(mapdata, zip(widths, values, format_spec))
# build the row string
return format_line(data, tablestyle.row) | [
"def",
"row",
"(",
"values",
",",
"width",
"=",
"WIDTH",
",",
"format_spec",
"=",
"FMT",
",",
"align",
"=",
"ALIGN",
",",
"style",
"=",
"STYLE",
")",
":",
"tablestyle",
"=",
"STYLES",
"[",
"style",
"]",
"widths",
"=",
"parse_width",
"(",
"width",
",",
"len",
"(",
"values",
")",
")",
"assert",
"isinstance",
"(",
"format_spec",
",",
"string_types",
")",
"|",
"isinstance",
"(",
"format_spec",
",",
"list",
")",
",",
"\"format_spec must be a string or list of strings\"",
"if",
"isinstance",
"(",
"format_spec",
",",
"string_types",
")",
":",
"format_spec",
"=",
"[",
"format_spec",
"]",
"*",
"len",
"(",
"list",
"(",
"values",
")",
")",
"# mapping function for string formatting",
"def",
"mapdata",
"(",
"val",
")",
":",
"# unpack",
"width",
",",
"datum",
",",
"prec",
"=",
"val",
"if",
"isinstance",
"(",
"datum",
",",
"string_types",
")",
":",
"return",
"(",
"'{:%s%i}'",
"%",
"(",
"ALIGNMENTS",
"[",
"align",
"]",
",",
"width",
"+",
"ansi_len",
"(",
"datum",
")",
")",
")",
".",
"format",
"(",
"datum",
")",
"elif",
"isinstance",
"(",
"datum",
",",
"Number",
")",
":",
"return",
"(",
"'{:%s%i.%s}'",
"%",
"(",
"ALIGNMENTS",
"[",
"align",
"]",
",",
"width",
",",
"prec",
")",
")",
".",
"format",
"(",
"datum",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Elements in the values array must be strings, ints, or floats'",
")",
"# string formatter",
"data",
"=",
"map",
"(",
"mapdata",
",",
"zip",
"(",
"widths",
",",
"values",
",",
"format_spec",
")",
")",
"# build the row string",
"return",
"format_line",
"(",
"data",
",",
"tablestyle",
".",
"row",
")"
] | Returns a formatted row of data
Parameters
----------
values : array_like
An iterable array of data (numbers or strings), each value is printed in a separate column
width : int
The width of each column (Default: 11)
format_spec : string
The precision format string used to format numbers in the values array (Default: '5g')
align : string
The alignment to use ('left', 'center', or 'right'). (Default: 'right')
style : namedtuple, optional
A line formatting style
Returns
-------
rowstr : string
A string consisting of the full row of data to print | [
"Returns",
"a",
"formatted",
"row",
"of",
"data"
] | 50ab4b96706fce8ee035a4d48cb456e3271eab3d | https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L163-L216 |
3,102 | nirum/tableprint | tableprint/printer.py | top | def top(n, width=WIDTH, style=STYLE):
"""Prints the top row of a table"""
return hrule(n, width, linestyle=STYLES[style].top) | python | def top(n, width=WIDTH, style=STYLE):
return hrule(n, width, linestyle=STYLES[style].top) | [
"def",
"top",
"(",
"n",
",",
"width",
"=",
"WIDTH",
",",
"style",
"=",
"STYLE",
")",
":",
"return",
"hrule",
"(",
"n",
",",
"width",
",",
"linestyle",
"=",
"STYLES",
"[",
"style",
"]",
".",
"top",
")"
] | Prints the top row of a table | [
"Prints",
"the",
"top",
"row",
"of",
"a",
"table"
] | 50ab4b96706fce8ee035a4d48cb456e3271eab3d | https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L245-L247 |
3,103 | nirum/tableprint | tableprint/printer.py | banner | def banner(message, width=30, style='banner', out=sys.stdout):
"""Prints a banner message
Parameters
----------
message : string
The message to print in the banner
width : int
The minimum width of the banner (Default: 30)
style : string
A line formatting style (Default: 'banner')
out : writer
An object that has write() and flush() methods (Default: sys.stdout)
"""
out.write(header([message], width=max(width, len(message)), style=style) + '\n')
out.flush() | python | def banner(message, width=30, style='banner', out=sys.stdout):
out.write(header([message], width=max(width, len(message)), style=style) + '\n')
out.flush() | [
"def",
"banner",
"(",
"message",
",",
"width",
"=",
"30",
",",
"style",
"=",
"'banner'",
",",
"out",
"=",
"sys",
".",
"stdout",
")",
":",
"out",
".",
"write",
"(",
"header",
"(",
"[",
"message",
"]",
",",
"width",
"=",
"max",
"(",
"width",
",",
"len",
"(",
"message",
")",
")",
",",
"style",
"=",
"style",
")",
"+",
"'\\n'",
")",
"out",
".",
"flush",
"(",
")"
] | Prints a banner message
Parameters
----------
message : string
The message to print in the banner
width : int
The minimum width of the banner (Default: 30)
style : string
A line formatting style (Default: 'banner')
out : writer
An object that has write() and flush() methods (Default: sys.stdout) | [
"Prints",
"a",
"banner",
"message"
] | 50ab4b96706fce8ee035a4d48cb456e3271eab3d | https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L255-L273 |
3,104 | nirum/tableprint | tableprint/printer.py | dataframe | def dataframe(df, **kwargs):
"""Print table with data from the given pandas DataFrame
Parameters
----------
df : DataFrame
A pandas DataFrame with the table to print
"""
table(df.values, list(df.columns), **kwargs) | python | def dataframe(df, **kwargs):
table(df.values, list(df.columns), **kwargs) | [
"def",
"dataframe",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"table",
"(",
"df",
".",
"values",
",",
"list",
"(",
"df",
".",
"columns",
")",
",",
"*",
"*",
"kwargs",
")"
] | Print table with data from the given pandas DataFrame
Parameters
----------
df : DataFrame
A pandas DataFrame with the table to print | [
"Print",
"table",
"with",
"data",
"from",
"the",
"given",
"pandas",
"DataFrame"
] | 50ab4b96706fce8ee035a4d48cb456e3271eab3d | https://github.com/nirum/tableprint/blob/50ab4b96706fce8ee035a4d48cb456e3271eab3d/tableprint/printer.py#L276-L284 |
3,105 | fralau/mkdocs_macros_plugin | macros/plugin.py | MacrosPlugin.on_config | def on_config(self, config):
"Fetch the variables and functions"
#print("Here is the config:", config)
# fetch variables from YAML file:
self._variables = config.get(YAML_SUBSET)
# add variables and functions from the module:
module_reader.load_variables(self._variables, config)
print("Variables:", self.variables) | python | def on_config(self, config):
"Fetch the variables and functions"
#print("Here is the config:", config)
# fetch variables from YAML file:
self._variables = config.get(YAML_SUBSET)
# add variables and functions from the module:
module_reader.load_variables(self._variables, config)
print("Variables:", self.variables) | [
"def",
"on_config",
"(",
"self",
",",
"config",
")",
":",
"#print(\"Here is the config:\", config)",
"# fetch variables from YAML file:",
"self",
".",
"_variables",
"=",
"config",
".",
"get",
"(",
"YAML_SUBSET",
")",
"# add variables and functions from the module:",
"module_reader",
".",
"load_variables",
"(",
"self",
".",
"_variables",
",",
"config",
")",
"print",
"(",
"\"Variables:\"",
",",
"self",
".",
"variables",
")"
] | Fetch the variables and functions | [
"Fetch",
"the",
"variables",
"and",
"functions"
] | 8a02189395adae3acd2d18d9edcf0790ff7b4904 | https://github.com/fralau/mkdocs_macros_plugin/blob/8a02189395adae3acd2d18d9edcf0790ff7b4904/macros/plugin.py#L41-L51 |
3,106 | fralau/mkdocs_macros_plugin | macros/plugin.py | MacrosPlugin.on_page_markdown | def on_page_markdown(self, markdown, page, config,
site_navigation=None, **kwargs):
"Provide a hook for defining functions from an external module"
# the site_navigation argument has been made optional
# (deleted in post 1.0 mkdocs, but maintained here
# for backward compatibility)
if not self.variables:
return markdown
else:
# Create templae and get the variables
md_template = Template(markdown)
# Execute the jinja2 template and return
return md_template.render(**self.variables) | python | def on_page_markdown(self, markdown, page, config,
site_navigation=None, **kwargs):
"Provide a hook for defining functions from an external module"
# the site_navigation argument has been made optional
# (deleted in post 1.0 mkdocs, but maintained here
# for backward compatibility)
if not self.variables:
return markdown
else:
# Create templae and get the variables
md_template = Template(markdown)
# Execute the jinja2 template and return
return md_template.render(**self.variables) | [
"def",
"on_page_markdown",
"(",
"self",
",",
"markdown",
",",
"page",
",",
"config",
",",
"site_navigation",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# the site_navigation argument has been made optional",
"# (deleted in post 1.0 mkdocs, but maintained here",
"# for backward compatibility)",
"if",
"not",
"self",
".",
"variables",
":",
"return",
"markdown",
"else",
":",
"# Create templae and get the variables",
"md_template",
"=",
"Template",
"(",
"markdown",
")",
"# Execute the jinja2 template and return",
"return",
"md_template",
".",
"render",
"(",
"*",
"*",
"self",
".",
"variables",
")"
] | Provide a hook for defining functions from an external module | [
"Provide",
"a",
"hook",
"for",
"defining",
"functions",
"from",
"an",
"external",
"module"
] | 8a02189395adae3acd2d18d9edcf0790ff7b4904 | https://github.com/fralau/mkdocs_macros_plugin/blob/8a02189395adae3acd2d18d9edcf0790ff7b4904/macros/plugin.py#L54-L71 |
3,107 | fralau/mkdocs_macros_plugin | macros/module_reader.py | load_variables | def load_variables(variables, config):
"""
Add the template functions, via the python module
located in the same directory as the Yaml config file.
The python module must contain the following hook:
declare_variables(variables, macro):
variables['a'] = 5
@macro
def bar(x):
....
@macro
def baz(x):
....
"""
def macro(v, name=''):
"""
Registers a variable as a macro in the template,
i.e. in the variables dictionary:
macro(myfunc)
Optionally, you can assign a different name:
macro(myfunc, 'funcname')
You can also use it as a decorator:
@macro
def foo(a):
return a ** 2
More info:
https://stackoverflow.com/questions/6036082/call-a-python-function-from-jinja2
"""
name = name or v.__name__
variables[name] = v
return v
# determine the package name, from the filename:
python_module = config.get('python_module') or DEFAULT_MODULE_NAME
# get the directory of the yaml file:
config_file = config['config_file_path']
yaml_dir = os.path.dirname(config_file)
# print("Found yaml directory: %s" % yaml_dir)
# that's the directory of the package:
repackage.add(yaml_dir)
try:
module = importlib.import_module(python_module)
print("Found module '%s'" % python_module)
# execute the hook, passing the template decorator function
module.declare_variables(variables, macro)
except ModuleNotFoundError:
print("No module found.") | python | def load_variables(variables, config):
def macro(v, name=''):
"""
Registers a variable as a macro in the template,
i.e. in the variables dictionary:
macro(myfunc)
Optionally, you can assign a different name:
macro(myfunc, 'funcname')
You can also use it as a decorator:
@macro
def foo(a):
return a ** 2
More info:
https://stackoverflow.com/questions/6036082/call-a-python-function-from-jinja2
"""
name = name or v.__name__
variables[name] = v
return v
# determine the package name, from the filename:
python_module = config.get('python_module') or DEFAULT_MODULE_NAME
# get the directory of the yaml file:
config_file = config['config_file_path']
yaml_dir = os.path.dirname(config_file)
# print("Found yaml directory: %s" % yaml_dir)
# that's the directory of the package:
repackage.add(yaml_dir)
try:
module = importlib.import_module(python_module)
print("Found module '%s'" % python_module)
# execute the hook, passing the template decorator function
module.declare_variables(variables, macro)
except ModuleNotFoundError:
print("No module found.") | [
"def",
"load_variables",
"(",
"variables",
",",
"config",
")",
":",
"def",
"macro",
"(",
"v",
",",
"name",
"=",
"''",
")",
":",
"\"\"\"\n Registers a variable as a macro in the template,\n i.e. in the variables dictionary:\n\n macro(myfunc)\n\n Optionally, you can assign a different name:\n\n macro(myfunc, 'funcname')\n\n\n You can also use it as a decorator:\n\n @macro\n def foo(a):\n return a ** 2\n\n More info:\n https://stackoverflow.com/questions/6036082/call-a-python-function-from-jinja2\n \"\"\"",
"name",
"=",
"name",
"or",
"v",
".",
"__name__",
"variables",
"[",
"name",
"]",
"=",
"v",
"return",
"v",
"# determine the package name, from the filename:",
"python_module",
"=",
"config",
".",
"get",
"(",
"'python_module'",
")",
"or",
"DEFAULT_MODULE_NAME",
"# get the directory of the yaml file:",
"config_file",
"=",
"config",
"[",
"'config_file_path'",
"]",
"yaml_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"config_file",
")",
"# print(\"Found yaml directory: %s\" % yaml_dir)",
"# that's the directory of the package:",
"repackage",
".",
"add",
"(",
"yaml_dir",
")",
"try",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"python_module",
")",
"print",
"(",
"\"Found module '%s'\"",
"%",
"python_module",
")",
"# execute the hook, passing the template decorator function",
"module",
".",
"declare_variables",
"(",
"variables",
",",
"macro",
")",
"except",
"ModuleNotFoundError",
":",
"print",
"(",
"\"No module found.\"",
")"
] | Add the template functions, via the python module
located in the same directory as the Yaml config file.
The python module must contain the following hook:
declare_variables(variables, macro):
variables['a'] = 5
@macro
def bar(x):
....
@macro
def baz(x):
.... | [
"Add",
"the",
"template",
"functions",
"via",
"the",
"python",
"module",
"located",
"in",
"the",
"same",
"directory",
"as",
"the",
"Yaml",
"config",
"file",
"."
] | 8a02189395adae3acd2d18d9edcf0790ff7b4904 | https://github.com/fralau/mkdocs_macros_plugin/blob/8a02189395adae3acd2d18d9edcf0790ff7b4904/macros/module_reader.py#L18-L84 |
3,108 | mayeut/pybase64 | pybase64/_fallback.py | b64decode | def b64decode(s, altchars=None, validate=False):
"""Decode bytes encoded with the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` or ASCII string to
decode.
Optional ``altchars`` must be a :term:`bytes-like object` or ASCII
string of length 2 which specifies the alternative alphabet used instead
of the '+' and '/' characters.
If ``validate`` is ``False`` (the default), characters that are neither in
the normal base-64 alphabet nor the alternative alphabet are discarded
prior to the padding check.
If ``validate`` is ``True``, these non-alphabet characters in the input
result in a :exc:`binascii.Error`.
The result is returned as a :class:`bytes` object.
A :exc:`binascii.Error` is raised if ``s`` is incorrectly padded.
"""
if version_info < (3, 0) or validate:
if validate and len(s) % 4 != 0:
raise BinAsciiError('Incorrect padding')
s = _get_bytes(s)
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version_info < (3, 0):
map = maketrans(altchars, b'+/')
else:
map = bytes.maketrans(altchars, b'+/')
s = s.translate(map)
try:
result = builtin_decode(s, altchars)
except TypeError as e:
raise BinAsciiError(str(e))
if validate:
# check length of result vs length of input
padding = 0
if len(s) > 1 and s[-2] in (b'=', 61):
padding = padding + 1
if len(s) > 0 and s[-1] in (b'=', 61):
padding = padding + 1
if 3 * (len(s) / 4) - padding != len(result):
raise BinAsciiError('Non-base64 digit found')
return result
return builtin_decode(s, altchars) | python | def b64decode(s, altchars=None, validate=False):
if version_info < (3, 0) or validate:
if validate and len(s) % 4 != 0:
raise BinAsciiError('Incorrect padding')
s = _get_bytes(s)
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version_info < (3, 0):
map = maketrans(altchars, b'+/')
else:
map = bytes.maketrans(altchars, b'+/')
s = s.translate(map)
try:
result = builtin_decode(s, altchars)
except TypeError as e:
raise BinAsciiError(str(e))
if validate:
# check length of result vs length of input
padding = 0
if len(s) > 1 and s[-2] in (b'=', 61):
padding = padding + 1
if len(s) > 0 and s[-1] in (b'=', 61):
padding = padding + 1
if 3 * (len(s) / 4) - padding != len(result):
raise BinAsciiError('Non-base64 digit found')
return result
return builtin_decode(s, altchars) | [
"def",
"b64decode",
"(",
"s",
",",
"altchars",
"=",
"None",
",",
"validate",
"=",
"False",
")",
":",
"if",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
"or",
"validate",
":",
"if",
"validate",
"and",
"len",
"(",
"s",
")",
"%",
"4",
"!=",
"0",
":",
"raise",
"BinAsciiError",
"(",
"'Incorrect padding'",
")",
"s",
"=",
"_get_bytes",
"(",
"s",
")",
"if",
"altchars",
"is",
"not",
"None",
":",
"altchars",
"=",
"_get_bytes",
"(",
"altchars",
")",
"assert",
"len",
"(",
"altchars",
")",
"==",
"2",
",",
"repr",
"(",
"altchars",
")",
"if",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"map",
"=",
"maketrans",
"(",
"altchars",
",",
"b'+/'",
")",
"else",
":",
"map",
"=",
"bytes",
".",
"maketrans",
"(",
"altchars",
",",
"b'+/'",
")",
"s",
"=",
"s",
".",
"translate",
"(",
"map",
")",
"try",
":",
"result",
"=",
"builtin_decode",
"(",
"s",
",",
"altchars",
")",
"except",
"TypeError",
"as",
"e",
":",
"raise",
"BinAsciiError",
"(",
"str",
"(",
"e",
")",
")",
"if",
"validate",
":",
"# check length of result vs length of input",
"padding",
"=",
"0",
"if",
"len",
"(",
"s",
")",
">",
"1",
"and",
"s",
"[",
"-",
"2",
"]",
"in",
"(",
"b'='",
",",
"61",
")",
":",
"padding",
"=",
"padding",
"+",
"1",
"if",
"len",
"(",
"s",
")",
">",
"0",
"and",
"s",
"[",
"-",
"1",
"]",
"in",
"(",
"b'='",
",",
"61",
")",
":",
"padding",
"=",
"padding",
"+",
"1",
"if",
"3",
"*",
"(",
"len",
"(",
"s",
")",
"/",
"4",
")",
"-",
"padding",
"!=",
"len",
"(",
"result",
")",
":",
"raise",
"BinAsciiError",
"(",
"'Non-base64 digit found'",
")",
"return",
"result",
"return",
"builtin_decode",
"(",
"s",
",",
"altchars",
")"
] | Decode bytes encoded with the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` or ASCII string to
decode.
Optional ``altchars`` must be a :term:`bytes-like object` or ASCII
string of length 2 which specifies the alternative alphabet used instead
of the '+' and '/' characters.
If ``validate`` is ``False`` (the default), characters that are neither in
the normal base-64 alphabet nor the alternative alphabet are discarded
prior to the padding check.
If ``validate`` is ``True``, these non-alphabet characters in the input
result in a :exc:`binascii.Error`.
The result is returned as a :class:`bytes` object.
A :exc:`binascii.Error` is raised if ``s`` is incorrectly padded. | [
"Decode",
"bytes",
"encoded",
"with",
"the",
"standard",
"Base64",
"alphabet",
"."
] | 861c48675fd6e37c129e1d7a1233074f8d54449e | https://github.com/mayeut/pybase64/blob/861c48675fd6e37c129e1d7a1233074f8d54449e/pybase64/_fallback.py#L40-L86 |
3,109 | mayeut/pybase64 | pybase64/_fallback.py | b64encode | def b64encode(s, altchars=None):
"""Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The result is returned as a :class:`bytes` object.
"""
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version_info < (3, 0):
if isinstance(s, text_type):
raise TypeError('a bytes-like object is required, not \''
+ type(s).__name__ + '\'')
return builtin_encode(s, altchars) | python | def b64encode(s, altchars=None):
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
if version_info < (3, 0):
if isinstance(s, text_type):
raise TypeError('a bytes-like object is required, not \''
+ type(s).__name__ + '\'')
return builtin_encode(s, altchars) | [
"def",
"b64encode",
"(",
"s",
",",
"altchars",
"=",
"None",
")",
":",
"if",
"altchars",
"is",
"not",
"None",
":",
"altchars",
"=",
"_get_bytes",
"(",
"altchars",
")",
"assert",
"len",
"(",
"altchars",
")",
"==",
"2",
",",
"repr",
"(",
"altchars",
")",
"if",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
":",
"raise",
"TypeError",
"(",
"'a bytes-like object is required, not \\''",
"+",
"type",
"(",
"s",
")",
".",
"__name__",
"+",
"'\\''",
")",
"return",
"builtin_encode",
"(",
"s",
",",
"altchars",
")"
] | Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The result is returned as a :class:`bytes` object. | [
"Encode",
"bytes",
"using",
"the",
"standard",
"Base64",
"alphabet",
"."
] | 861c48675fd6e37c129e1d7a1233074f8d54449e | https://github.com/mayeut/pybase64/blob/861c48675fd6e37c129e1d7a1233074f8d54449e/pybase64/_fallback.py#L89-L107 |
3,110 | kalbhor/MusicTools | musictools/musictools.py | get_song_urls | def get_song_urls(song_input):
"""
Gather all urls, titles for a search query
from youtube
"""
YOUTUBECLASS = 'spf-prefetch'
html = requests.get("https://www.youtube.com/results",
params={'search_query': song_input})
soup = BeautifulSoup(html.text, 'html.parser')
soup_section = soup.findAll('a', {'rel': YOUTUBECLASS})
# Use generator over list, since storage isn't important
song_urls = ('https://www.youtube.com' + i.get('href')
for i in soup_section)
song_titles = (i.get('title') for i in soup_section)
youtube_list = list(zip(song_urls, song_titles))
del song_urls
del song_titles
return youtube_list | python | def get_song_urls(song_input):
YOUTUBECLASS = 'spf-prefetch'
html = requests.get("https://www.youtube.com/results",
params={'search_query': song_input})
soup = BeautifulSoup(html.text, 'html.parser')
soup_section = soup.findAll('a', {'rel': YOUTUBECLASS})
# Use generator over list, since storage isn't important
song_urls = ('https://www.youtube.com' + i.get('href')
for i in soup_section)
song_titles = (i.get('title') for i in soup_section)
youtube_list = list(zip(song_urls, song_titles))
del song_urls
del song_titles
return youtube_list | [
"def",
"get_song_urls",
"(",
"song_input",
")",
":",
"YOUTUBECLASS",
"=",
"'spf-prefetch'",
"html",
"=",
"requests",
".",
"get",
"(",
"\"https://www.youtube.com/results\"",
",",
"params",
"=",
"{",
"'search_query'",
":",
"song_input",
"}",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
".",
"text",
",",
"'html.parser'",
")",
"soup_section",
"=",
"soup",
".",
"findAll",
"(",
"'a'",
",",
"{",
"'rel'",
":",
"YOUTUBECLASS",
"}",
")",
"# Use generator over list, since storage isn't important",
"song_urls",
"=",
"(",
"'https://www.youtube.com'",
"+",
"i",
".",
"get",
"(",
"'href'",
")",
"for",
"i",
"in",
"soup_section",
")",
"song_titles",
"=",
"(",
"i",
".",
"get",
"(",
"'title'",
")",
"for",
"i",
"in",
"soup_section",
")",
"youtube_list",
"=",
"list",
"(",
"zip",
"(",
"song_urls",
",",
"song_titles",
")",
")",
"del",
"song_urls",
"del",
"song_titles",
"return",
"youtube_list"
] | Gather all urls, titles for a search query
from youtube | [
"Gather",
"all",
"urls",
"titles",
"for",
"a",
"search",
"query",
"from",
"youtube"
] | 324159448553033173bb050458c6a56e3cfa2738 | https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L46-L69 |
3,111 | kalbhor/MusicTools | musictools/musictools.py | download_song | def download_song(song_url, song_title):
"""
Download a song using youtube url and song title
"""
outtmpl = song_title + '.%(ext)s'
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': outtmpl,
'postprocessors': [
{'key': 'FFmpegExtractAudio','preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(song_url, download=True) | python | def download_song(song_url, song_title):
outtmpl = song_title + '.%(ext)s'
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': outtmpl,
'postprocessors': [
{'key': 'FFmpegExtractAudio','preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(song_url, download=True) | [
"def",
"download_song",
"(",
"song_url",
",",
"song_title",
")",
":",
"outtmpl",
"=",
"song_title",
"+",
"'.%(ext)s'",
"ydl_opts",
"=",
"{",
"'format'",
":",
"'bestaudio/best'",
",",
"'outtmpl'",
":",
"outtmpl",
",",
"'postprocessors'",
":",
"[",
"{",
"'key'",
":",
"'FFmpegExtractAudio'",
",",
"'preferredcodec'",
":",
"'mp3'",
",",
"'preferredquality'",
":",
"'192'",
",",
"}",
",",
"{",
"'key'",
":",
"'FFmpegMetadata'",
"}",
",",
"]",
",",
"}",
"with",
"youtube_dl",
".",
"YoutubeDL",
"(",
"ydl_opts",
")",
"as",
"ydl",
":",
"info_dict",
"=",
"ydl",
".",
"extract_info",
"(",
"song_url",
",",
"download",
"=",
"True",
")"
] | Download a song using youtube url and song title | [
"Download",
"a",
"song",
"using",
"youtube",
"url",
"and",
"song",
"title"
] | 324159448553033173bb050458c6a56e3cfa2738 | https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L72-L90 |
3,112 | kalbhor/MusicTools | musictools/musictools.py | add_album_art | def add_album_art(file_name, album_art):
"""
Add album_art in .mp3's tags
"""
img = requests.get(album_art, stream=True) # Gets album art from url
img = img.raw
audio = EasyMP3(file_name, ID3=ID3)
try:
audio.add_tags()
except _util.error:
pass
audio.tags.add(
APIC(
encoding=3, # UTF-8
mime='image/png',
type=3, # 3 is for album art
desc='Cover',
data=img.read() # Reads and adds album art
)
)
audio.save()
return album_art | python | def add_album_art(file_name, album_art):
img = requests.get(album_art, stream=True) # Gets album art from url
img = img.raw
audio = EasyMP3(file_name, ID3=ID3)
try:
audio.add_tags()
except _util.error:
pass
audio.tags.add(
APIC(
encoding=3, # UTF-8
mime='image/png',
type=3, # 3 is for album art
desc='Cover',
data=img.read() # Reads and adds album art
)
)
audio.save()
return album_art | [
"def",
"add_album_art",
"(",
"file_name",
",",
"album_art",
")",
":",
"img",
"=",
"requests",
".",
"get",
"(",
"album_art",
",",
"stream",
"=",
"True",
")",
"# Gets album art from url",
"img",
"=",
"img",
".",
"raw",
"audio",
"=",
"EasyMP3",
"(",
"file_name",
",",
"ID3",
"=",
"ID3",
")",
"try",
":",
"audio",
".",
"add_tags",
"(",
")",
"except",
"_util",
".",
"error",
":",
"pass",
"audio",
".",
"tags",
".",
"add",
"(",
"APIC",
"(",
"encoding",
"=",
"3",
",",
"# UTF-8",
"mime",
"=",
"'image/png'",
",",
"type",
"=",
"3",
",",
"# 3 is for album art",
"desc",
"=",
"'Cover'",
",",
"data",
"=",
"img",
".",
"read",
"(",
")",
"# Reads and adds album art",
")",
")",
"audio",
".",
"save",
"(",
")",
"return",
"album_art"
] | Add album_art in .mp3's tags | [
"Add",
"album_art",
"in",
".",
"mp3",
"s",
"tags"
] | 324159448553033173bb050458c6a56e3cfa2738 | https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L113-L139 |
3,113 | kalbhor/MusicTools | musictools/musictools.py | add_metadata | def add_metadata(file_name, title, artist, album):
"""
As the method name suggests
"""
tags = EasyMP3(file_name)
if title:
tags["title"] = title
if artist:
tags["artist"] = artist
if album:
tags["album"] = album
tags.save()
return file_name | python | def add_metadata(file_name, title, artist, album):
tags = EasyMP3(file_name)
if title:
tags["title"] = title
if artist:
tags["artist"] = artist
if album:
tags["album"] = album
tags.save()
return file_name | [
"def",
"add_metadata",
"(",
"file_name",
",",
"title",
",",
"artist",
",",
"album",
")",
":",
"tags",
"=",
"EasyMP3",
"(",
"file_name",
")",
"if",
"title",
":",
"tags",
"[",
"\"title\"",
"]",
"=",
"title",
"if",
"artist",
":",
"tags",
"[",
"\"artist\"",
"]",
"=",
"artist",
"if",
"album",
":",
"tags",
"[",
"\"album\"",
"]",
"=",
"album",
"tags",
".",
"save",
"(",
")",
"return",
"file_name"
] | As the method name suggests | [
"As",
"the",
"method",
"name",
"suggests"
] | 324159448553033173bb050458c6a56e3cfa2738 | https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L142-L156 |
3,114 | kalbhor/MusicTools | musictools/musictools.py | revert_metadata | def revert_metadata(files):
"""
Removes all tags from a mp3 file
"""
for file_path in files:
tags = EasyMP3(file_path)
tags.delete()
tags.save() | python | def revert_metadata(files):
for file_path in files:
tags = EasyMP3(file_path)
tags.delete()
tags.save() | [
"def",
"revert_metadata",
"(",
"files",
")",
":",
"for",
"file_path",
"in",
"files",
":",
"tags",
"=",
"EasyMP3",
"(",
"file_path",
")",
"tags",
".",
"delete",
"(",
")",
"tags",
".",
"save",
"(",
")"
] | Removes all tags from a mp3 file | [
"Removes",
"all",
"tags",
"from",
"a",
"mp3",
"file"
] | 324159448553033173bb050458c6a56e3cfa2738 | https://github.com/kalbhor/MusicTools/blob/324159448553033173bb050458c6a56e3cfa2738/musictools/musictools.py#L166-L173 |
3,115 | Tivix/django-braintree | django_braintree/models.py | UserVaultManager.get_user_vault_instance_or_none | def get_user_vault_instance_or_none(self, user):
"""Returns a vault_id string or None"""
qset = self.filter(user=user)
if not qset:
return None
if qset.count() > 1:
raise Exception('This app does not currently support multiple vault ids')
return qset.get() | python | def get_user_vault_instance_or_none(self, user):
qset = self.filter(user=user)
if not qset:
return None
if qset.count() > 1:
raise Exception('This app does not currently support multiple vault ids')
return qset.get() | [
"def",
"get_user_vault_instance_or_none",
"(",
"self",
",",
"user",
")",
":",
"qset",
"=",
"self",
".",
"filter",
"(",
"user",
"=",
"user",
")",
"if",
"not",
"qset",
":",
"return",
"None",
"if",
"qset",
".",
"count",
"(",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'This app does not currently support multiple vault ids'",
")",
"return",
"qset",
".",
"get",
"(",
")"
] | Returns a vault_id string or None | [
"Returns",
"a",
"vault_id",
"string",
"or",
"None"
] | 7beb2c8392c2a454c36b353818f3e1db20511ef9 | https://github.com/Tivix/django-braintree/blob/7beb2c8392c2a454c36b353818f3e1db20511ef9/django_braintree/models.py#L11-L20 |
3,116 | Tivix/django-braintree | django_braintree/models.py | UserVaultManager.charge | def charge(self, user, vault_id=None):
"""If vault_id is not passed this will assume that there is only one instane of user and vault_id in the db."""
assert self.is_in_vault(user)
if vault_id:
user_vault = self.get(user=user, vault_id=vault_id)
else:
user_vault = self.get(user=user) | python | def charge(self, user, vault_id=None):
assert self.is_in_vault(user)
if vault_id:
user_vault = self.get(user=user, vault_id=vault_id)
else:
user_vault = self.get(user=user) | [
"def",
"charge",
"(",
"self",
",",
"user",
",",
"vault_id",
"=",
"None",
")",
":",
"assert",
"self",
".",
"is_in_vault",
"(",
"user",
")",
"if",
"vault_id",
":",
"user_vault",
"=",
"self",
".",
"get",
"(",
"user",
"=",
"user",
",",
"vault_id",
"=",
"vault_id",
")",
"else",
":",
"user_vault",
"=",
"self",
".",
"get",
"(",
"user",
"=",
"user",
")"
] | If vault_id is not passed this will assume that there is only one instane of user and vault_id in the db. | [
"If",
"vault_id",
"is",
"not",
"passed",
"this",
"will",
"assume",
"that",
"there",
"is",
"only",
"one",
"instane",
"of",
"user",
"and",
"vault_id",
"in",
"the",
"db",
"."
] | 7beb2c8392c2a454c36b353818f3e1db20511ef9 | https://github.com/Tivix/django-braintree/blob/7beb2c8392c2a454c36b353818f3e1db20511ef9/django_braintree/models.py#L25-L31 |
3,117 | Tivix/django-braintree | django_braintree/forms.py | UserCCDetailsForm.save | def save(self, prepend_vault_id=''):
"""
Adds or updates a users CC to the vault.
@prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by
multiple projects/apps.
"""
assert self.is_valid()
cc_details_map = { # cc details
'number': self.cleaned_data['cc_number'],
'cardholder_name': self.cleaned_data['name'],
'expiration_date': '%s/%s' %\
(self.cleaned_data['expiration_month'], self.cleaned_data['expiration_year']),
'cvv': self.cleaned_data['cvv'],
'billing_address': {
'postal_code': self.cleaned_data['zip_code'],
}
}
if self.__user_vault:
try:
# get customer info, its credit card and then update that credit card
response = Customer.find(self.__user_vault.vault_id)
cc_info = response.credit_cards[0]
return CreditCard.update(cc_info.token, params=cc_details_map)
except Exception, e:
logging.error('Was not able to get customer from vault. %s' % e)
self.__user_vault.delete() # delete the stale instance from our db
# in case the above updating fails or user was never in the vault
new_customer_vault_id = '%s%s' % (prepend_vault_id, md5_hash()[:24])
respone = Customer.create({ # creating a customer, but we really just want to store their CC details
'id': new_customer_vault_id, # vault id, uniquely identifies customer. We're not caring about tokens (used for storing multiple CC's per user)
'credit_card': cc_details_map
})
if respone.is_success: # save a new UserVault instance
UserVault.objects.create(user=self.__user, vault_id=new_customer_vault_id)
return respone | python | def save(self, prepend_vault_id=''):
assert self.is_valid()
cc_details_map = { # cc details
'number': self.cleaned_data['cc_number'],
'cardholder_name': self.cleaned_data['name'],
'expiration_date': '%s/%s' %\
(self.cleaned_data['expiration_month'], self.cleaned_data['expiration_year']),
'cvv': self.cleaned_data['cvv'],
'billing_address': {
'postal_code': self.cleaned_data['zip_code'],
}
}
if self.__user_vault:
try:
# get customer info, its credit card and then update that credit card
response = Customer.find(self.__user_vault.vault_id)
cc_info = response.credit_cards[0]
return CreditCard.update(cc_info.token, params=cc_details_map)
except Exception, e:
logging.error('Was not able to get customer from vault. %s' % e)
self.__user_vault.delete() # delete the stale instance from our db
# in case the above updating fails or user was never in the vault
new_customer_vault_id = '%s%s' % (prepend_vault_id, md5_hash()[:24])
respone = Customer.create({ # creating a customer, but we really just want to store their CC details
'id': new_customer_vault_id, # vault id, uniquely identifies customer. We're not caring about tokens (used for storing multiple CC's per user)
'credit_card': cc_details_map
})
if respone.is_success: # save a new UserVault instance
UserVault.objects.create(user=self.__user, vault_id=new_customer_vault_id)
return respone | [
"def",
"save",
"(",
"self",
",",
"prepend_vault_id",
"=",
"''",
")",
":",
"assert",
"self",
".",
"is_valid",
"(",
")",
"cc_details_map",
"=",
"{",
"# cc details",
"'number'",
":",
"self",
".",
"cleaned_data",
"[",
"'cc_number'",
"]",
",",
"'cardholder_name'",
":",
"self",
".",
"cleaned_data",
"[",
"'name'",
"]",
",",
"'expiration_date'",
":",
"'%s/%s'",
"%",
"(",
"self",
".",
"cleaned_data",
"[",
"'expiration_month'",
"]",
",",
"self",
".",
"cleaned_data",
"[",
"'expiration_year'",
"]",
")",
",",
"'cvv'",
":",
"self",
".",
"cleaned_data",
"[",
"'cvv'",
"]",
",",
"'billing_address'",
":",
"{",
"'postal_code'",
":",
"self",
".",
"cleaned_data",
"[",
"'zip_code'",
"]",
",",
"}",
"}",
"if",
"self",
".",
"__user_vault",
":",
"try",
":",
"# get customer info, its credit card and then update that credit card",
"response",
"=",
"Customer",
".",
"find",
"(",
"self",
".",
"__user_vault",
".",
"vault_id",
")",
"cc_info",
"=",
"response",
".",
"credit_cards",
"[",
"0",
"]",
"return",
"CreditCard",
".",
"update",
"(",
"cc_info",
".",
"token",
",",
"params",
"=",
"cc_details_map",
")",
"except",
"Exception",
",",
"e",
":",
"logging",
".",
"error",
"(",
"'Was not able to get customer from vault. %s'",
"%",
"e",
")",
"self",
".",
"__user_vault",
".",
"delete",
"(",
")",
"# delete the stale instance from our db",
"# in case the above updating fails or user was never in the vault",
"new_customer_vault_id",
"=",
"'%s%s'",
"%",
"(",
"prepend_vault_id",
",",
"md5_hash",
"(",
")",
"[",
":",
"24",
"]",
")",
"respone",
"=",
"Customer",
".",
"create",
"(",
"{",
"# creating a customer, but we really just want to store their CC details",
"'id'",
":",
"new_customer_vault_id",
",",
"# vault id, uniquely identifies customer. We're not caring about tokens (used for storing multiple CC's per user)",
"'credit_card'",
":",
"cc_details_map",
"}",
")",
"if",
"respone",
".",
"is_success",
":",
"# save a new UserVault instance",
"UserVault",
".",
"objects",
".",
"create",
"(",
"user",
"=",
"self",
".",
"__user",
",",
"vault_id",
"=",
"new_customer_vault_id",
")",
"return",
"respone"
] | Adds or updates a users CC to the vault.
@prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by
multiple projects/apps. | [
"Adds",
"or",
"updates",
"a",
"users",
"CC",
"to",
"the",
"vault",
"."
] | 7beb2c8392c2a454c36b353818f3e1db20511ef9 | https://github.com/Tivix/django-braintree/blob/7beb2c8392c2a454c36b353818f3e1db20511ef9/django_braintree/forms.py#L93-L133 |
3,118 | dropbox/pygerduty | pygerduty/events.py | Events.resolve_incident | def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) | python | def resolve_incident(self, incident_key,
description=None, details=None):
return self.create_event(description, "resolve",
details, incident_key) | [
"def",
"resolve_incident",
"(",
"self",
",",
"incident_key",
",",
"description",
"=",
"None",
",",
"details",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_event",
"(",
"description",
",",
"\"resolve\"",
",",
"details",
",",
"incident_key",
")"
] | Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed. | [
"Causes",
"the",
"referenced",
"incident",
"to",
"enter",
"resolved",
"state",
".",
"Send",
"a",
"resolve",
"event",
"when",
"the",
"problem",
"that",
"caused",
"the",
"initial",
"trigger",
"has",
"been",
"fixed",
"."
] | 11b28bfb66306aa7fc2b95ab9df65eb97ea831cf | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/events.py#L57-L65 |
3,119 | dropbox/pygerduty | pygerduty/common.py | clean_response | def clean_response(response):
'''Recurse through dictionary and replace any keys "self" with
"self_"'''
if type(response) is list:
for elem in response:
clean_response(elem)
elif type(response) is dict:
for key, val in response.items():
if key == 'self':
val = response.pop('self')
response['self_'] = val
clean_response(val)
else:
clean_response(response[key])
return response | python | def clean_response(response):
'''Recurse through dictionary and replace any keys "self" with
"self_"'''
if type(response) is list:
for elem in response:
clean_response(elem)
elif type(response) is dict:
for key, val in response.items():
if key == 'self':
val = response.pop('self')
response['self_'] = val
clean_response(val)
else:
clean_response(response[key])
return response | [
"def",
"clean_response",
"(",
"response",
")",
":",
"if",
"type",
"(",
"response",
")",
"is",
"list",
":",
"for",
"elem",
"in",
"response",
":",
"clean_response",
"(",
"elem",
")",
"elif",
"type",
"(",
"response",
")",
"is",
"dict",
":",
"for",
"key",
",",
"val",
"in",
"response",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'self'",
":",
"val",
"=",
"response",
".",
"pop",
"(",
"'self'",
")",
"response",
"[",
"'self_'",
"]",
"=",
"val",
"clean_response",
"(",
"val",
")",
"else",
":",
"clean_response",
"(",
"response",
"[",
"key",
"]",
")",
"return",
"response"
] | Recurse through dictionary and replace any keys "self" with
"self_" | [
"Recurse",
"through",
"dictionary",
"and",
"replace",
"any",
"keys",
"self",
"with",
"self_"
] | 11b28bfb66306aa7fc2b95ab9df65eb97ea831cf | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/common.py#L55-L69 |
3,120 | dropbox/pygerduty | pygerduty/__init__.py | PagerDuty.acknowledge_incident | def acknowledge_incident(self, service_key, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter the acknowledged state.
Send an acknowledge event when someone is presently working on the
incident.
"""
return self.create_event(service_key, description, "acknowledge",
details, incident_key) | python | def acknowledge_incident(self, service_key, incident_key,
description=None, details=None):
return self.create_event(service_key, description, "acknowledge",
details, incident_key) | [
"def",
"acknowledge_incident",
"(",
"self",
",",
"service_key",
",",
"incident_key",
",",
"description",
"=",
"None",
",",
"details",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_event",
"(",
"service_key",
",",
"description",
",",
"\"acknowledge\"",
",",
"details",
",",
"incident_key",
")"
] | Causes the referenced incident to enter the acknowledged state.
Send an acknowledge event when someone is presently working on the
incident. | [
"Causes",
"the",
"referenced",
"incident",
"to",
"enter",
"the",
"acknowledged",
"state",
".",
"Send",
"an",
"acknowledge",
"event",
"when",
"someone",
"is",
"presently",
"working",
"on",
"the",
"incident",
"."
] | 11b28bfb66306aa7fc2b95ab9df65eb97ea831cf | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/__init__.py#L573-L581 |
3,121 | dropbox/pygerduty | pygerduty/__init__.py | PagerDuty.trigger_incident | def trigger_incident(self, service_key, description,
incident_key=None, details=None,
client=None, client_url=None, contexts=None):
""" Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident.
"""
return self.create_event(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | python | def trigger_incident(self, service_key, description,
incident_key=None, details=None,
client=None, client_url=None, contexts=None):
return self.create_event(service_key, description, "trigger",
details, incident_key,
client=client, client_url=client_url, contexts=contexts) | [
"def",
"trigger_incident",
"(",
"self",
",",
"service_key",
",",
"description",
",",
"incident_key",
"=",
"None",
",",
"details",
"=",
"None",
",",
"client",
"=",
"None",
",",
"client_url",
"=",
"None",
",",
"contexts",
"=",
"None",
")",
":",
"return",
"self",
".",
"create_event",
"(",
"service_key",
",",
"description",
",",
"\"trigger\"",
",",
"details",
",",
"incident_key",
",",
"client",
"=",
"client",
",",
"client_url",
"=",
"client_url",
",",
"contexts",
"=",
"contexts",
")"
] | Report a new or ongoing problem. When PagerDuty receives a trigger,
it will either open a new incident, or add a new log entry to an
existing incident. | [
"Report",
"a",
"new",
"or",
"ongoing",
"problem",
".",
"When",
"PagerDuty",
"receives",
"a",
"trigger",
"it",
"will",
"either",
"open",
"a",
"new",
"incident",
"or",
"add",
"a",
"new",
"log",
"entry",
"to",
"an",
"existing",
"incident",
"."
] | 11b28bfb66306aa7fc2b95ab9df65eb97ea831cf | https://github.com/dropbox/pygerduty/blob/11b28bfb66306aa7fc2b95ab9df65eb97ea831cf/pygerduty/__init__.py#L583-L593 |
3,122 | tanyaschlusser/array2gif | array2gif/core.py | try_fix_dataset | def try_fix_dataset(dataset):
"""Transpose the image data if it's in PIL format."""
if isinstance(dataset, numpy.ndarray):
if len(dataset.shape) == 3: # NumPy 3D
if dataset.shape[-1] == 3:
return dataset.transpose((2, 0, 1))
elif len(dataset.shape) == 4: # NumPy 4D
if dataset.shape[-1] == 3:
return dataset.transpose((0, 3, 1, 2))
# Otherwise couldn't fix it.
return dataset
# List of Numpy 3D arrays.
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
return dataset
if not (len(d.shape) == 3 and d.shape[-1] == 3):
return dataset
dataset[i] = d.transpose()
return dataset | python | def try_fix_dataset(dataset):
if isinstance(dataset, numpy.ndarray):
if len(dataset.shape) == 3: # NumPy 3D
if dataset.shape[-1] == 3:
return dataset.transpose((2, 0, 1))
elif len(dataset.shape) == 4: # NumPy 4D
if dataset.shape[-1] == 3:
return dataset.transpose((0, 3, 1, 2))
# Otherwise couldn't fix it.
return dataset
# List of Numpy 3D arrays.
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
return dataset
if not (len(d.shape) == 3 and d.shape[-1] == 3):
return dataset
dataset[i] = d.transpose()
return dataset | [
"def",
"try_fix_dataset",
"(",
"dataset",
")",
":",
"if",
"isinstance",
"(",
"dataset",
",",
"numpy",
".",
"ndarray",
")",
":",
"if",
"len",
"(",
"dataset",
".",
"shape",
")",
"==",
"3",
":",
"# NumPy 3D",
"if",
"dataset",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
":",
"return",
"dataset",
".",
"transpose",
"(",
"(",
"2",
",",
"0",
",",
"1",
")",
")",
"elif",
"len",
"(",
"dataset",
".",
"shape",
")",
"==",
"4",
":",
"# NumPy 4D",
"if",
"dataset",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
":",
"return",
"dataset",
".",
"transpose",
"(",
"(",
"0",
",",
"3",
",",
"1",
",",
"2",
")",
")",
"# Otherwise couldn't fix it.",
"return",
"dataset",
"# List of Numpy 3D arrays.",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"dataset",
")",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"numpy",
".",
"ndarray",
")",
":",
"return",
"dataset",
"if",
"not",
"(",
"len",
"(",
"d",
".",
"shape",
")",
"==",
"3",
"and",
"d",
".",
"shape",
"[",
"-",
"1",
"]",
"==",
"3",
")",
":",
"return",
"dataset",
"dataset",
"[",
"i",
"]",
"=",
"d",
".",
"transpose",
"(",
")",
"return",
"dataset"
] | Transpose the image data if it's in PIL format. | [
"Transpose",
"the",
"image",
"data",
"if",
"it",
"s",
"in",
"PIL",
"format",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L77-L95 |
3,123 | tanyaschlusser/array2gif | array2gif/core.py | get_image | def get_image(dataset):
"""Convert the NumPy array to two nested lists with r,g,b tuples."""
dim, nrow, ncol = dataset.shape
uint8_dataset = dataset.astype('uint8')
if not (uint8_dataset == dataset).all():
message = (
"\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), "
"but some information was lost.\nPlease check your gif and "
"convert to uint8 beforehand if the gif looks wrong."
)
warnings.warn(message)
image = [[
struct.pack(
'BBB',
uint8_dataset[0, i, j],
uint8_dataset[1, i, j],
uint8_dataset[2, i, j]
)
for j in range(ncol)]
for i in range(nrow)]
return image | python | def get_image(dataset):
dim, nrow, ncol = dataset.shape
uint8_dataset = dataset.astype('uint8')
if not (uint8_dataset == dataset).all():
message = (
"\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), "
"but some information was lost.\nPlease check your gif and "
"convert to uint8 beforehand if the gif looks wrong."
)
warnings.warn(message)
image = [[
struct.pack(
'BBB',
uint8_dataset[0, i, j],
uint8_dataset[1, i, j],
uint8_dataset[2, i, j]
)
for j in range(ncol)]
for i in range(nrow)]
return image | [
"def",
"get_image",
"(",
"dataset",
")",
":",
"dim",
",",
"nrow",
",",
"ncol",
"=",
"dataset",
".",
"shape",
"uint8_dataset",
"=",
"dataset",
".",
"astype",
"(",
"'uint8'",
")",
"if",
"not",
"(",
"uint8_dataset",
"==",
"dataset",
")",
".",
"all",
"(",
")",
":",
"message",
"=",
"(",
"\"\\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), \"",
"\"but some information was lost.\\nPlease check your gif and \"",
"\"convert to uint8 beforehand if the gif looks wrong.\"",
")",
"warnings",
".",
"warn",
"(",
"message",
")",
"image",
"=",
"[",
"[",
"struct",
".",
"pack",
"(",
"'BBB'",
",",
"uint8_dataset",
"[",
"0",
",",
"i",
",",
"j",
"]",
",",
"uint8_dataset",
"[",
"1",
",",
"i",
",",
"j",
"]",
",",
"uint8_dataset",
"[",
"2",
",",
"i",
",",
"j",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"ncol",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"nrow",
")",
"]",
"return",
"image"
] | Convert the NumPy array to two nested lists with r,g,b tuples. | [
"Convert",
"the",
"NumPy",
"array",
"to",
"two",
"nested",
"lists",
"with",
"r",
"g",
"b",
"tuples",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L98-L118 |
3,124 | tanyaschlusser/array2gif | array2gif/core.py | get_colors | def get_colors(image):
"""Return a Counter containing each color and how often it appears.
"""
colors = Counter(pixel for row in image for pixel in row)
if len(colors) > 256:
msg = (
"The maximum number of distinct colors in a GIF is 256 but "
"this image has {} colors and can't be encoded properly."
)
raise RuntimeError(msg.format(len(colors)))
return colors | python | def get_colors(image):
colors = Counter(pixel for row in image for pixel in row)
if len(colors) > 256:
msg = (
"The maximum number of distinct colors in a GIF is 256 but "
"this image has {} colors and can't be encoded properly."
)
raise RuntimeError(msg.format(len(colors)))
return colors | [
"def",
"get_colors",
"(",
"image",
")",
":",
"colors",
"=",
"Counter",
"(",
"pixel",
"for",
"row",
"in",
"image",
"for",
"pixel",
"in",
"row",
")",
"if",
"len",
"(",
"colors",
")",
">",
"256",
":",
"msg",
"=",
"(",
"\"The maximum number of distinct colors in a GIF is 256 but \"",
"\"this image has {} colors and can't be encoded properly.\"",
")",
"raise",
"RuntimeError",
"(",
"msg",
".",
"format",
"(",
"len",
"(",
"colors",
")",
")",
")",
"return",
"colors"
] | Return a Counter containing each color and how often it appears. | [
"Return",
"a",
"Counter",
"containing",
"each",
"color",
"and",
"how",
"often",
"it",
"appears",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L167-L177 |
3,125 | tanyaschlusser/array2gif | array2gif/core.py | _get_global_color_table | def _get_global_color_table(colors):
"""Return a color table sorted in descending order of count.
"""
global_color_table = b''.join(c[0] for c in colors.most_common())
full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2))
repeats = 3 * (full_table_size - len(colors))
zeros = struct.pack('<{}x'.format(repeats))
return global_color_table + zeros | python | def _get_global_color_table(colors):
global_color_table = b''.join(c[0] for c in colors.most_common())
full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2))
repeats = 3 * (full_table_size - len(colors))
zeros = struct.pack('<{}x'.format(repeats))
return global_color_table + zeros | [
"def",
"_get_global_color_table",
"(",
"colors",
")",
":",
"global_color_table",
"=",
"b''",
".",
"join",
"(",
"c",
"[",
"0",
"]",
"for",
"c",
"in",
"colors",
".",
"most_common",
"(",
")",
")",
"full_table_size",
"=",
"2",
"**",
"(",
"1",
"+",
"int",
"(",
"get_color_table_size",
"(",
"len",
"(",
"colors",
")",
")",
",",
"2",
")",
")",
"repeats",
"=",
"3",
"*",
"(",
"full_table_size",
"-",
"len",
"(",
"colors",
")",
")",
"zeros",
"=",
"struct",
".",
"pack",
"(",
"'<{}x'",
".",
"format",
"(",
"repeats",
")",
")",
"return",
"global_color_table",
"+",
"zeros"
] | Return a color table sorted in descending order of count. | [
"Return",
"a",
"color",
"table",
"sorted",
"in",
"descending",
"order",
"of",
"count",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L180-L187 |
3,126 | tanyaschlusser/array2gif | array2gif/core.py | _get_image_data | def _get_image_data(image, colors):
"""Performs the LZW compression as described by Matthew Flickinger.
This isn't fast, but it works.
http://www.matthewflickinger.com/lab/whatsinagif/lzw_image_data.asp
"""
lzw_code_size, coded_bits = _lzw_encode(image, colors)
coded_bytes = ''.join(
'{{:0{}b}}'.format(nbits).format(val) for val, nbits in coded_bits)
coded_bytes = '0' * ((8 - len(coded_bytes)) % 8) + coded_bytes
coded_data = list(
reversed([
int(coded_bytes[8*i:8*(i+1)], 2)
for i in range(len(coded_bytes) // 8)
])
)
output = [struct.pack('<B', lzw_code_size)]
# Must output the data in blocks of length 255
block_length = min(255, len(coded_data))
while block_length > 0:
block = struct.pack(
'<{}B'.format(block_length + 1),
block_length,
*coded_data[:block_length]
)
output.append(block)
coded_data = coded_data[block_length:]
block_length = min(255, len(coded_data))
return b''.join(output) | python | def _get_image_data(image, colors):
lzw_code_size, coded_bits = _lzw_encode(image, colors)
coded_bytes = ''.join(
'{{:0{}b}}'.format(nbits).format(val) for val, nbits in coded_bits)
coded_bytes = '0' * ((8 - len(coded_bytes)) % 8) + coded_bytes
coded_data = list(
reversed([
int(coded_bytes[8*i:8*(i+1)], 2)
for i in range(len(coded_bytes) // 8)
])
)
output = [struct.pack('<B', lzw_code_size)]
# Must output the data in blocks of length 255
block_length = min(255, len(coded_data))
while block_length > 0:
block = struct.pack(
'<{}B'.format(block_length + 1),
block_length,
*coded_data[:block_length]
)
output.append(block)
coded_data = coded_data[block_length:]
block_length = min(255, len(coded_data))
return b''.join(output) | [
"def",
"_get_image_data",
"(",
"image",
",",
"colors",
")",
":",
"lzw_code_size",
",",
"coded_bits",
"=",
"_lzw_encode",
"(",
"image",
",",
"colors",
")",
"coded_bytes",
"=",
"''",
".",
"join",
"(",
"'{{:0{}b}}'",
".",
"format",
"(",
"nbits",
")",
".",
"format",
"(",
"val",
")",
"for",
"val",
",",
"nbits",
"in",
"coded_bits",
")",
"coded_bytes",
"=",
"'0'",
"*",
"(",
"(",
"8",
"-",
"len",
"(",
"coded_bytes",
")",
")",
"%",
"8",
")",
"+",
"coded_bytes",
"coded_data",
"=",
"list",
"(",
"reversed",
"(",
"[",
"int",
"(",
"coded_bytes",
"[",
"8",
"*",
"i",
":",
"8",
"*",
"(",
"i",
"+",
"1",
")",
"]",
",",
"2",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"coded_bytes",
")",
"//",
"8",
")",
"]",
")",
")",
"output",
"=",
"[",
"struct",
".",
"pack",
"(",
"'<B'",
",",
"lzw_code_size",
")",
"]",
"# Must output the data in blocks of length 255",
"block_length",
"=",
"min",
"(",
"255",
",",
"len",
"(",
"coded_data",
")",
")",
"while",
"block_length",
">",
"0",
":",
"block",
"=",
"struct",
".",
"pack",
"(",
"'<{}B'",
".",
"format",
"(",
"block_length",
"+",
"1",
")",
",",
"block_length",
",",
"*",
"coded_data",
"[",
":",
"block_length",
"]",
")",
"output",
".",
"append",
"(",
"block",
")",
"coded_data",
"=",
"coded_data",
"[",
"block_length",
":",
"]",
"block_length",
"=",
"min",
"(",
"255",
",",
"len",
"(",
"coded_data",
")",
")",
"return",
"b''",
".",
"join",
"(",
"output",
")"
] | Performs the LZW compression as described by Matthew Flickinger.
This isn't fast, but it works.
http://www.matthewflickinger.com/lab/whatsinagif/lzw_image_data.asp | [
"Performs",
"the",
"LZW",
"compression",
"as",
"described",
"by",
"Matthew",
"Flickinger",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L311-L339 |
3,127 | tanyaschlusser/array2gif | array2gif/core.py | write_gif | def write_gif(dataset, filename, fps=10):
"""Write a NumPy array to GIF 89a format.
Or write a list of NumPy arrays to an animation (GIF 89a format).
- Positional arguments::
:param dataset: A NumPy arrayor list of arrays with shape
rgb x rows x cols and integer values in [0, 255].
:param filename: The output file that will contain the GIF image.
:param fps: The (integer) frames/second of the animation (default 10).
:type dataset: a NumPy array or list of NumPy arrays.
:return: None
- Example: a minimal array, with one red pixel, would look like this::
import numpy as np
one_red_pixel = np.array([[[255]], [[0]], [[0]]])
write_gif(one_red_pixel, 'red_pixel.gif')
..raises:: ValueError
"""
try:
check_dataset(dataset)
except ValueError as e:
dataset = try_fix_dataset(dataset)
check_dataset(dataset)
delay_time = 100 // int(fps)
def encode(d):
four_d = isinstance(dataset, numpy.ndarray) and len(dataset.shape) == 4
if four_d or not isinstance(dataset, numpy.ndarray):
return _make_animated_gif(d, delay_time=delay_time)
else:
return _make_gif(d)
with open(filename, 'wb') as outfile:
outfile.write(HEADER)
for block in encode(dataset):
outfile.write(block)
outfile.write(TRAILER) | python | def write_gif(dataset, filename, fps=10):
try:
check_dataset(dataset)
except ValueError as e:
dataset = try_fix_dataset(dataset)
check_dataset(dataset)
delay_time = 100 // int(fps)
def encode(d):
four_d = isinstance(dataset, numpy.ndarray) and len(dataset.shape) == 4
if four_d or not isinstance(dataset, numpy.ndarray):
return _make_animated_gif(d, delay_time=delay_time)
else:
return _make_gif(d)
with open(filename, 'wb') as outfile:
outfile.write(HEADER)
for block in encode(dataset):
outfile.write(block)
outfile.write(TRAILER) | [
"def",
"write_gif",
"(",
"dataset",
",",
"filename",
",",
"fps",
"=",
"10",
")",
":",
"try",
":",
"check_dataset",
"(",
"dataset",
")",
"except",
"ValueError",
"as",
"e",
":",
"dataset",
"=",
"try_fix_dataset",
"(",
"dataset",
")",
"check_dataset",
"(",
"dataset",
")",
"delay_time",
"=",
"100",
"//",
"int",
"(",
"fps",
")",
"def",
"encode",
"(",
"d",
")",
":",
"four_d",
"=",
"isinstance",
"(",
"dataset",
",",
"numpy",
".",
"ndarray",
")",
"and",
"len",
"(",
"dataset",
".",
"shape",
")",
"==",
"4",
"if",
"four_d",
"or",
"not",
"isinstance",
"(",
"dataset",
",",
"numpy",
".",
"ndarray",
")",
":",
"return",
"_make_animated_gif",
"(",
"d",
",",
"delay_time",
"=",
"delay_time",
")",
"else",
":",
"return",
"_make_gif",
"(",
"d",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"HEADER",
")",
"for",
"block",
"in",
"encode",
"(",
"dataset",
")",
":",
"outfile",
".",
"write",
"(",
"block",
")",
"outfile",
".",
"write",
"(",
"TRAILER",
")"
] | Write a NumPy array to GIF 89a format.
Or write a list of NumPy arrays to an animation (GIF 89a format).
- Positional arguments::
:param dataset: A NumPy arrayor list of arrays with shape
rgb x rows x cols and integer values in [0, 255].
:param filename: The output file that will contain the GIF image.
:param fps: The (integer) frames/second of the animation (default 10).
:type dataset: a NumPy array or list of NumPy arrays.
:return: None
- Example: a minimal array, with one red pixel, would look like this::
import numpy as np
one_red_pixel = np.array([[[255]], [[0]], [[0]]])
write_gif(one_red_pixel, 'red_pixel.gif')
..raises:: ValueError | [
"Write",
"a",
"NumPy",
"array",
"to",
"GIF",
"89a",
"format",
"."
] | b229da6c8e979314810f59ed0a15ea0f16f71243 | https://github.com/tanyaschlusser/array2gif/blob/b229da6c8e979314810f59ed0a15ea0f16f71243/array2gif/core.py#L386-L426 |
3,128 | hovren/crisp | crisp/timesync.py | good_sequences_to_track | def good_sequences_to_track(flow, motion_threshold=1.0):
"""Get list of good frames to do tracking in.
Looking at the optical flow, this function chooses a span of frames
that fulfill certain criteria.
These include
* not being too short or too long
* not too low or too high mean flow magnitude
* a low max value (avoids motion blur)
Currently, the cost function for a sequence is hard coded. Sorry about that.
Parameters
-------------
flow : ndarray
The optical flow magnitude
motion_threshold : float
The maximum amount of motion to consider for sequence endpoints.
Returns
------------
sequences : list
Sorted list of (a, b, score) elements (highest scpre first) of sequences
where a sequence is frames with frame indices in the span [a, b].
"""
endpoints = []
in_low = False
for i, val in enumerate(flow):
if val < motion_threshold:
if not in_low:
endpoints.append(i)
in_low = True
else:
if in_low:
endpoints.append(i-1) # Previous was last in a low spot
in_low = False
def mean_score_func(m):
mu = 15
sigma = 8
top_val = normpdf(mu, mu, sigma)
return normpdf(m, mu, sigma) / top_val
def max_score_func(m):
mu = 40
sigma = 8
if m <= mu:
return 1.
else:
top_val = normpdf(mu, mu, sigma)
return normpdf(m, mu, sigma) / top_val
def length_score_func(l):
mu = 30
sigma = 10
top_val = normpdf(mu, mu, sigma)
return normpdf(l, mu, sigma) / top_val
min_length = 5 # frames
sequences = []
for k, i in enumerate(endpoints[:-1]):
for j in endpoints[k+1:]:
length = j - i
if length < min_length:
continue
seq = flow[i:j+1]
m_score = mean_score_func(np.mean(seq))
mx_score = max_score_func(np.max(seq))
l_score = length_score_func(length)
logger.debug("%d, %d scores: (mean=%.5f, max=%.5f, length=%.5f)" % (i,j,m_score, mx_score, l_score))
if min(m_score, mx_score, l_score) < 0.2:
continue
score = m_score + mx_score + l_score
sequences.append((i, j, score))
return sorted(sequences, key=lambda x: x[2], reverse=True) | python | def good_sequences_to_track(flow, motion_threshold=1.0):
endpoints = []
in_low = False
for i, val in enumerate(flow):
if val < motion_threshold:
if not in_low:
endpoints.append(i)
in_low = True
else:
if in_low:
endpoints.append(i-1) # Previous was last in a low spot
in_low = False
def mean_score_func(m):
mu = 15
sigma = 8
top_val = normpdf(mu, mu, sigma)
return normpdf(m, mu, sigma) / top_val
def max_score_func(m):
mu = 40
sigma = 8
if m <= mu:
return 1.
else:
top_val = normpdf(mu, mu, sigma)
return normpdf(m, mu, sigma) / top_val
def length_score_func(l):
mu = 30
sigma = 10
top_val = normpdf(mu, mu, sigma)
return normpdf(l, mu, sigma) / top_val
min_length = 5 # frames
sequences = []
for k, i in enumerate(endpoints[:-1]):
for j in endpoints[k+1:]:
length = j - i
if length < min_length:
continue
seq = flow[i:j+1]
m_score = mean_score_func(np.mean(seq))
mx_score = max_score_func(np.max(seq))
l_score = length_score_func(length)
logger.debug("%d, %d scores: (mean=%.5f, max=%.5f, length=%.5f)" % (i,j,m_score, mx_score, l_score))
if min(m_score, mx_score, l_score) < 0.2:
continue
score = m_score + mx_score + l_score
sequences.append((i, j, score))
return sorted(sequences, key=lambda x: x[2], reverse=True) | [
"def",
"good_sequences_to_track",
"(",
"flow",
",",
"motion_threshold",
"=",
"1.0",
")",
":",
"endpoints",
"=",
"[",
"]",
"in_low",
"=",
"False",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"flow",
")",
":",
"if",
"val",
"<",
"motion_threshold",
":",
"if",
"not",
"in_low",
":",
"endpoints",
".",
"append",
"(",
"i",
")",
"in_low",
"=",
"True",
"else",
":",
"if",
"in_low",
":",
"endpoints",
".",
"append",
"(",
"i",
"-",
"1",
")",
"# Previous was last in a low spot",
"in_low",
"=",
"False",
"def",
"mean_score_func",
"(",
"m",
")",
":",
"mu",
"=",
"15",
"sigma",
"=",
"8",
"top_val",
"=",
"normpdf",
"(",
"mu",
",",
"mu",
",",
"sigma",
")",
"return",
"normpdf",
"(",
"m",
",",
"mu",
",",
"sigma",
")",
"/",
"top_val",
"def",
"max_score_func",
"(",
"m",
")",
":",
"mu",
"=",
"40",
"sigma",
"=",
"8",
"if",
"m",
"<=",
"mu",
":",
"return",
"1.",
"else",
":",
"top_val",
"=",
"normpdf",
"(",
"mu",
",",
"mu",
",",
"sigma",
")",
"return",
"normpdf",
"(",
"m",
",",
"mu",
",",
"sigma",
")",
"/",
"top_val",
"def",
"length_score_func",
"(",
"l",
")",
":",
"mu",
"=",
"30",
"sigma",
"=",
"10",
"top_val",
"=",
"normpdf",
"(",
"mu",
",",
"mu",
",",
"sigma",
")",
"return",
"normpdf",
"(",
"l",
",",
"mu",
",",
"sigma",
")",
"/",
"top_val",
"min_length",
"=",
"5",
"# frames",
"sequences",
"=",
"[",
"]",
"for",
"k",
",",
"i",
"in",
"enumerate",
"(",
"endpoints",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"j",
"in",
"endpoints",
"[",
"k",
"+",
"1",
":",
"]",
":",
"length",
"=",
"j",
"-",
"i",
"if",
"length",
"<",
"min_length",
":",
"continue",
"seq",
"=",
"flow",
"[",
"i",
":",
"j",
"+",
"1",
"]",
"m_score",
"=",
"mean_score_func",
"(",
"np",
".",
"mean",
"(",
"seq",
")",
")",
"mx_score",
"=",
"max_score_func",
"(",
"np",
".",
"max",
"(",
"seq",
")",
")",
"l_score",
"=",
"length_score_func",
"(",
"length",
")",
"logger",
".",
"debug",
"(",
"\"%d, %d scores: (mean=%.5f, max=%.5f, length=%.5f)\"",
"%",
"(",
"i",
",",
"j",
",",
"m_score",
",",
"mx_score",
",",
"l_score",
")",
")",
"if",
"min",
"(",
"m_score",
",",
"mx_score",
",",
"l_score",
")",
"<",
"0.2",
":",
"continue",
"score",
"=",
"m_score",
"+",
"mx_score",
"+",
"l_score",
"sequences",
".",
"append",
"(",
"(",
"i",
",",
"j",
",",
"score",
")",
")",
"return",
"sorted",
"(",
"sequences",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
",",
"reverse",
"=",
"True",
")"
] | Get list of good frames to do tracking in.
Looking at the optical flow, this function chooses a span of frames
that fulfill certain criteria.
These include
* not being too short or too long
* not too low or too high mean flow magnitude
* a low max value (avoids motion blur)
Currently, the cost function for a sequence is hard coded. Sorry about that.
Parameters
-------------
flow : ndarray
The optical flow magnitude
motion_threshold : float
The maximum amount of motion to consider for sequence endpoints.
Returns
------------
sequences : list
Sorted list of (a, b, score) elements (highest scpre first) of sequences
where a sequence is frames with frame indices in the span [a, b]. | [
"Get",
"list",
"of",
"good",
"frames",
"to",
"do",
"tracking",
"in",
"."
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/timesync.py#L336-L411 |
3,129 | hovren/crisp | crisp/calibration.py | AutoCalibrator.initialize | def initialize(self, gyro_rate, slices=None, skip_estimation=False):
"""Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails
"""
self.params['user']['gyro_rate'] = gyro_rate
for p in ('gbias_x', 'gbias_y', 'gbias_z'):
self.params['initialized'][p] = 0.0
if slices is not None:
self.slices = slices
if self.slices is None:
self.slices = videoslice.Slice.from_stream_randomly(self.video)
logger.debug("Number of slices: {:d}".format(len(self.slices)))
if len(self.slices) < 2:
logger.error("Calibration requires at least 2 video slices to proceed, got %d", len(self.slices))
raise InitializationError("Calibration requires at least 2 video slices to proceed, got {:d}".format(len(self.slices)))
if not skip_estimation:
time_offset = self.find_initial_offset()
# TODO: Detect when time offset initialization fails, and raise InitializationError
R = self.find_initial_rotation()
if R is None:
raise InitializationError("Failed to calculate initial rotation") | python | def initialize(self, gyro_rate, slices=None, skip_estimation=False):
self.params['user']['gyro_rate'] = gyro_rate
for p in ('gbias_x', 'gbias_y', 'gbias_z'):
self.params['initialized'][p] = 0.0
if slices is not None:
self.slices = slices
if self.slices is None:
self.slices = videoslice.Slice.from_stream_randomly(self.video)
logger.debug("Number of slices: {:d}".format(len(self.slices)))
if len(self.slices) < 2:
logger.error("Calibration requires at least 2 video slices to proceed, got %d", len(self.slices))
raise InitializationError("Calibration requires at least 2 video slices to proceed, got {:d}".format(len(self.slices)))
if not skip_estimation:
time_offset = self.find_initial_offset()
# TODO: Detect when time offset initialization fails, and raise InitializationError
R = self.find_initial_rotation()
if R is None:
raise InitializationError("Failed to calculate initial rotation") | [
"def",
"initialize",
"(",
"self",
",",
"gyro_rate",
",",
"slices",
"=",
"None",
",",
"skip_estimation",
"=",
"False",
")",
":",
"self",
".",
"params",
"[",
"'user'",
"]",
"[",
"'gyro_rate'",
"]",
"=",
"gyro_rate",
"for",
"p",
"in",
"(",
"'gbias_x'",
",",
"'gbias_y'",
",",
"'gbias_z'",
")",
":",
"self",
".",
"params",
"[",
"'initialized'",
"]",
"[",
"p",
"]",
"=",
"0.0",
"if",
"slices",
"is",
"not",
"None",
":",
"self",
".",
"slices",
"=",
"slices",
"if",
"self",
".",
"slices",
"is",
"None",
":",
"self",
".",
"slices",
"=",
"videoslice",
".",
"Slice",
".",
"from_stream_randomly",
"(",
"self",
".",
"video",
")",
"logger",
".",
"debug",
"(",
"\"Number of slices: {:d}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"slices",
")",
")",
")",
"if",
"len",
"(",
"self",
".",
"slices",
")",
"<",
"2",
":",
"logger",
".",
"error",
"(",
"\"Calibration requires at least 2 video slices to proceed, got %d\"",
",",
"len",
"(",
"self",
".",
"slices",
")",
")",
"raise",
"InitializationError",
"(",
"\"Calibration requires at least 2 video slices to proceed, got {:d}\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"slices",
")",
")",
")",
"if",
"not",
"skip_estimation",
":",
"time_offset",
"=",
"self",
".",
"find_initial_offset",
"(",
")",
"# TODO: Detect when time offset initialization fails, and raise InitializationError",
"R",
"=",
"self",
".",
"find_initial_rotation",
"(",
")",
"if",
"R",
"is",
"None",
":",
"raise",
"InitializationError",
"(",
"\"Failed to calculate initial rotation\"",
")"
] | Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails | [
"Prepare",
"calibrator",
"for",
"calibration"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L90-L134 |
3,130 | hovren/crisp | crisp/calibration.py | AutoCalibrator.video_time_to_gyro_sample | def video_time_to_gyro_sample(self, t):
"""Convert video time to gyroscope sample index and interpolation factor
Parameters
-------------------
t : float
Video timestamp
Returns
--------------------
n : int
Sample index that precedes t
tau : float
Interpolation factor [0.0-1.0]. If tau=0, then t falls on exactly n. If tau=1 then t falls exactly on n+1
"""
f_g = self.parameter['gyro_rate']
d_c = self.parameter['time_offset']
n = f_g * (t + d_c)
n0 = int(np.floor(n))
tau = n - n0
return n0, tau | python | def video_time_to_gyro_sample(self, t):
f_g = self.parameter['gyro_rate']
d_c = self.parameter['time_offset']
n = f_g * (t + d_c)
n0 = int(np.floor(n))
tau = n - n0
return n0, tau | [
"def",
"video_time_to_gyro_sample",
"(",
"self",
",",
"t",
")",
":",
"f_g",
"=",
"self",
".",
"parameter",
"[",
"'gyro_rate'",
"]",
"d_c",
"=",
"self",
".",
"parameter",
"[",
"'time_offset'",
"]",
"n",
"=",
"f_g",
"*",
"(",
"t",
"+",
"d_c",
")",
"n0",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"n",
")",
")",
"tau",
"=",
"n",
"-",
"n0",
"return",
"n0",
",",
"tau"
] | Convert video time to gyroscope sample index and interpolation factor
Parameters
-------------------
t : float
Video timestamp
Returns
--------------------
n : int
Sample index that precedes t
tau : float
Interpolation factor [0.0-1.0]. If tau=0, then t falls on exactly n. If tau=1 then t falls exactly on n+1 | [
"Convert",
"video",
"time",
"to",
"gyroscope",
"sample",
"index",
"and",
"interpolation",
"factor"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L137-L157 |
3,131 | hovren/crisp | crisp/calibration.py | AutoCalibrator.parameter | def parameter(self):
"""Return the current best value of a parameter"""
D = {}
for source in PARAM_SOURCE_ORDER:
D.update(self.params[source])
return D | python | def parameter(self):
D = {}
for source in PARAM_SOURCE_ORDER:
D.update(self.params[source])
return D | [
"def",
"parameter",
"(",
"self",
")",
":",
"D",
"=",
"{",
"}",
"for",
"source",
"in",
"PARAM_SOURCE_ORDER",
":",
"D",
".",
"update",
"(",
"self",
".",
"params",
"[",
"source",
"]",
")",
"return",
"D"
] | Return the current best value of a parameter | [
"Return",
"the",
"current",
"best",
"value",
"of",
"a",
"parameter"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L160-L165 |
3,132 | hovren/crisp | crisp/calibration.py | AutoCalibrator.print_params | def print_params(self):
"""Print the current best set of parameters"""
print("Parameters")
print("--------------------")
for param in PARAM_ORDER:
print(' {:>11s} = {}'.format(param, self.parameter[param])) | python | def print_params(self):
print("Parameters")
print("--------------------")
for param in PARAM_ORDER:
print(' {:>11s} = {}'.format(param, self.parameter[param])) | [
"def",
"print_params",
"(",
"self",
")",
":",
"print",
"(",
"\"Parameters\"",
")",
"print",
"(",
"\"--------------------\"",
")",
"for",
"param",
"in",
"PARAM_ORDER",
":",
"print",
"(",
"' {:>11s} = {}'",
".",
"format",
"(",
"param",
",",
"self",
".",
"parameter",
"[",
"param",
"]",
")",
")"
] | Print the current best set of parameters | [
"Print",
"the",
"current",
"best",
"set",
"of",
"parameters"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/calibration.py#L328-L333 |
3,133 | hovren/crisp | crisp/camera.py | AtanCameraModel.from_hdf | def from_hdf(cls, filename):
"""Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance
"""
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_size, fps, readout, K, wc, lgamma)
return instance | python | def from_hdf(cls, filename):
import h5py
with h5py.File(filename, 'r') as f:
wc = f["wc"].value
lgamma = f["lgamma"].value
K = f["K"].value
readout = f["readout"].value
image_size = f["size"].value
fps = f["fps"].value
instance = cls(image_size, fps, readout, K, wc, lgamma)
return instance | [
"def",
"from_hdf",
"(",
"cls",
",",
"filename",
")",
":",
"import",
"h5py",
"with",
"h5py",
".",
"File",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"wc",
"=",
"f",
"[",
"\"wc\"",
"]",
".",
"value",
"lgamma",
"=",
"f",
"[",
"\"lgamma\"",
"]",
".",
"value",
"K",
"=",
"f",
"[",
"\"K\"",
"]",
".",
"value",
"readout",
"=",
"f",
"[",
"\"readout\"",
"]",
".",
"value",
"image_size",
"=",
"f",
"[",
"\"size\"",
"]",
".",
"value",
"fps",
"=",
"f",
"[",
"\"fps\"",
"]",
".",
"value",
"instance",
"=",
"cls",
"(",
"image_size",
",",
"fps",
",",
"readout",
",",
"K",
",",
"wc",
",",
"lgamma",
")",
"return",
"instance"
] | Load camera model params from a HDF5 file
The HDF5 file should contain the following datasets:
wc : (2,) float with distortion center
lgamma : float distortion parameter
readout : float readout value
size : (2,) int image size
fps : float frame rate
K : (3, 3) float camera matrix
Parameters
--------------------
filename : str
Path to file with parameters
Returns
---------------------
AtanCameraModel
Camera model instance | [
"Load",
"camera",
"model",
"params",
"from",
"a",
"HDF5",
"file"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L128-L158 |
3,134 | hovren/crisp | crisp/camera.py | AtanCameraModel.invert | def invert(self, points):
"""Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points
"""
X = points if not points.ndim == 1 else points.reshape((points.size, 1))
wx, wy = self.wc
# Switch to polar coordinates
rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2)
phi = np.arctan2(X[1,:] - wy, X[0,:]-wx)
# 'atan' method
r = np.tan(rn * self.lgamma) / self.lgamma;
# Switch back to rectangular coordinates
Y = np.ones(X.shape)
Y[0,:] = wx + r * np.cos(phi)
Y[1,:]= wy + r * np.sin(phi)
return Y | python | def invert(self, points):
X = points if not points.ndim == 1 else points.reshape((points.size, 1))
wx, wy = self.wc
# Switch to polar coordinates
rn = np.sqrt((X[0,:] - wx)**2 + (X[1,:] - wy)**2)
phi = np.arctan2(X[1,:] - wy, X[0,:]-wx)
# 'atan' method
r = np.tan(rn * self.lgamma) / self.lgamma;
# Switch back to rectangular coordinates
Y = np.ones(X.shape)
Y[0,:] = wx + r * np.cos(phi)
Y[1,:]= wy + r * np.sin(phi)
return Y | [
"def",
"invert",
"(",
"self",
",",
"points",
")",
":",
"X",
"=",
"points",
"if",
"not",
"points",
".",
"ndim",
"==",
"1",
"else",
"points",
".",
"reshape",
"(",
"(",
"points",
".",
"size",
",",
"1",
")",
")",
"wx",
",",
"wy",
"=",
"self",
".",
"wc",
"# Switch to polar coordinates",
"rn",
"=",
"np",
".",
"sqrt",
"(",
"(",
"X",
"[",
"0",
",",
":",
"]",
"-",
"wx",
")",
"**",
"2",
"+",
"(",
"X",
"[",
"1",
",",
":",
"]",
"-",
"wy",
")",
"**",
"2",
")",
"phi",
"=",
"np",
".",
"arctan2",
"(",
"X",
"[",
"1",
",",
":",
"]",
"-",
"wy",
",",
"X",
"[",
"0",
",",
":",
"]",
"-",
"wx",
")",
"# 'atan' method",
"r",
"=",
"np",
".",
"tan",
"(",
"rn",
"*",
"self",
".",
"lgamma",
")",
"/",
"self",
".",
"lgamma",
"# Switch back to rectangular coordinates",
"Y",
"=",
"np",
".",
"ones",
"(",
"X",
".",
"shape",
")",
"Y",
"[",
"0",
",",
":",
"]",
"=",
"wx",
"+",
"r",
"*",
"np",
".",
"cos",
"(",
"phi",
")",
"Y",
"[",
"1",
",",
":",
"]",
"=",
"wy",
"+",
"r",
"*",
"np",
".",
"sin",
"(",
"phi",
")",
"return",
"Y"
] | Invert the distortion
Parameters
------------------
points : ndarray
Input image points
Returns
-----------------
ndarray
Undistorted points | [
"Invert",
"the",
"distortion"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L160-L187 |
3,135 | hovren/crisp | crisp/camera.py | Kinect.purge_bad_timestamp_files | def purge_bad_timestamp_files(file_list):
"Given a list of image files, find bad frames, remove them and modify file_list"
MAX_INITIAL_BAD_FRAMES = 15
bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list))
# Trivial case
if not bad_ts:
return file_list
# No bad frames after the initial allowed
last_bad = max(bad_ts)
if last_bad >= MAX_INITIAL_BAD_FRAMES:
raise Exception('Only 15 initial bad frames are allowed, but last bad frame is %d' % last_bad)
# Remove all frames up to the last bad frame
for i in range(last_bad + 1):
os.remove(file_list[i])
# Purge from the list
file_list = file_list[last_bad+1:]
return file_list | python | def purge_bad_timestamp_files(file_list):
"Given a list of image files, find bad frames, remove them and modify file_list"
MAX_INITIAL_BAD_FRAMES = 15
bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list))
# Trivial case
if not bad_ts:
return file_list
# No bad frames after the initial allowed
last_bad = max(bad_ts)
if last_bad >= MAX_INITIAL_BAD_FRAMES:
raise Exception('Only 15 initial bad frames are allowed, but last bad frame is %d' % last_bad)
# Remove all frames up to the last bad frame
for i in range(last_bad + 1):
os.remove(file_list[i])
# Purge from the list
file_list = file_list[last_bad+1:]
return file_list | [
"def",
"purge_bad_timestamp_files",
"(",
"file_list",
")",
":",
"MAX_INITIAL_BAD_FRAMES",
"=",
"15",
"bad_ts",
"=",
"Kinect",
".",
"detect_bad_timestamps",
"(",
"Kinect",
".",
"timestamps_from_file_list",
"(",
"file_list",
")",
")",
"# Trivial case",
"if",
"not",
"bad_ts",
":",
"return",
"file_list",
"# No bad frames after the initial allowed",
"last_bad",
"=",
"max",
"(",
"bad_ts",
")",
"if",
"last_bad",
">=",
"MAX_INITIAL_BAD_FRAMES",
":",
"raise",
"Exception",
"(",
"'Only 15 initial bad frames are allowed, but last bad frame is %d'",
"%",
"last_bad",
")",
"# Remove all frames up to the last bad frame",
"for",
"i",
"in",
"range",
"(",
"last_bad",
"+",
"1",
")",
":",
"os",
".",
"remove",
"(",
"file_list",
"[",
"i",
"]",
")",
"# Purge from the list",
"file_list",
"=",
"file_list",
"[",
"last_bad",
"+",
"1",
":",
"]",
"return",
"file_list"
] | Given a list of image files, find bad frames, remove them and modify file_list | [
"Given",
"a",
"list",
"of",
"image",
"files",
"find",
"bad",
"frames",
"remove",
"them",
"and",
"modify",
"file_list"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L498-L519 |
3,136 | hovren/crisp | crisp/camera.py | Kinect.depth_file_for_nir_file | def depth_file_for_nir_file(video_filename, depth_file_list):
"""Returns the corresponding depth filename given a NIR filename"""
(root, filename) = os.path.split(video_filename)
needle_ts = int(filename.split('-')[2].split('.')[0])
haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list))
haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0]
depth_filename = depth_file_list[haystack_idx]
return depth_filename | python | def depth_file_for_nir_file(video_filename, depth_file_list):
(root, filename) = os.path.split(video_filename)
needle_ts = int(filename.split('-')[2].split('.')[0])
haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list))
haystack_idx = np.flatnonzero(haystack_ts_list == needle_ts)[0]
depth_filename = depth_file_list[haystack_idx]
return depth_filename | [
"def",
"depth_file_for_nir_file",
"(",
"video_filename",
",",
"depth_file_list",
")",
":",
"(",
"root",
",",
"filename",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"video_filename",
")",
"needle_ts",
"=",
"int",
"(",
"filename",
".",
"split",
"(",
"'-'",
")",
"[",
"2",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"haystack_ts_list",
"=",
"np",
".",
"array",
"(",
"Kinect",
".",
"timestamps_from_file_list",
"(",
"depth_file_list",
")",
")",
"haystack_idx",
"=",
"np",
".",
"flatnonzero",
"(",
"haystack_ts_list",
"==",
"needle_ts",
")",
"[",
"0",
"]",
"depth_filename",
"=",
"depth_file_list",
"[",
"haystack_idx",
"]",
"return",
"depth_filename"
] | Returns the corresponding depth filename given a NIR filename | [
"Returns",
"the",
"corresponding",
"depth",
"filename",
"given",
"a",
"NIR",
"filename"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L522-L529 |
3,137 | hovren/crisp | crisp/camera.py | Kinect.find_nir_file_with_missing_depth | def find_nir_file_with_missing_depth(video_file_list, depth_file_list):
"Remove all files without its own counterpart. Returns new lists of files"
new_video_list = []
new_depth_list = []
for fname in video_file_list:
try:
depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list)
new_video_list.append(fname)
new_depth_list.append(depth_file)
except IndexError: # Missing file
pass
# Purge bad files
bad_nir = [f for f in video_file_list if f not in new_video_list]
bad_depth = [f for f in depth_file_list if f not in new_depth_list]
return (new_video_list, new_depth_list, bad_nir, bad_depth) | python | def find_nir_file_with_missing_depth(video_file_list, depth_file_list):
"Remove all files without its own counterpart. Returns new lists of files"
new_video_list = []
new_depth_list = []
for fname in video_file_list:
try:
depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list)
new_video_list.append(fname)
new_depth_list.append(depth_file)
except IndexError: # Missing file
pass
# Purge bad files
bad_nir = [f for f in video_file_list if f not in new_video_list]
bad_depth = [f for f in depth_file_list if f not in new_depth_list]
return (new_video_list, new_depth_list, bad_nir, bad_depth) | [
"def",
"find_nir_file_with_missing_depth",
"(",
"video_file_list",
",",
"depth_file_list",
")",
":",
"new_video_list",
"=",
"[",
"]",
"new_depth_list",
"=",
"[",
"]",
"for",
"fname",
"in",
"video_file_list",
":",
"try",
":",
"depth_file",
"=",
"Kinect",
".",
"depth_file_for_nir_file",
"(",
"fname",
",",
"depth_file_list",
")",
"new_video_list",
".",
"append",
"(",
"fname",
")",
"new_depth_list",
".",
"append",
"(",
"depth_file",
")",
"except",
"IndexError",
":",
"# Missing file",
"pass",
"# Purge bad files",
"bad_nir",
"=",
"[",
"f",
"for",
"f",
"in",
"video_file_list",
"if",
"f",
"not",
"in",
"new_video_list",
"]",
"bad_depth",
"=",
"[",
"f",
"for",
"f",
"in",
"depth_file_list",
"if",
"f",
"not",
"in",
"new_depth_list",
"]",
"return",
"(",
"new_video_list",
",",
"new_depth_list",
",",
"bad_nir",
",",
"bad_depth",
")"
] | Remove all files without its own counterpart. Returns new lists of files | [
"Remove",
"all",
"files",
"without",
"its",
"own",
"counterpart",
".",
"Returns",
"new",
"lists",
"of",
"files"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/camera.py#L543-L559 |
3,138 | hovren/crisp | crisp/videoslice.py | fill_sampling | def fill_sampling(slice_list, N):
"""Given a list of slices, draw N samples such that each slice contributes as much as possible
Parameters
--------------------------
slice_list : list of Slice
List of slices
N : int
Number of samples to draw
"""
A = [len(s.inliers) for s in slice_list]
N_max = np.sum(A)
if N > N_max:
raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max))
samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group
remaining = N
while remaining > 0:
remaining_groups = np.flatnonzero(samples_from - np.array(A))
if remaining < len(remaining_groups):
np.random.shuffle(remaining_groups)
for g in remaining_groups[:remaining]:
samples_from[g] += 1
else:
# Give each group the allowed number of samples. Constrain to their max size.
to_each = max(1, int(remaining / len(remaining_groups)))
samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0)
# Update remaining count
remaining = int(N - np.sum(samples_from))
if not remaining == 0:
raise ValueError("Still {:d} samples left! This is an error in the selection.")
# Construct index list of selected samples
samples = []
for s, a, n in zip(slice_list, A, samples_from):
if a == n:
samples.append(np.array(s.inliers)) # all
elif a == 0:
samples.append(np.arange([]))
else:
chosen = np.random.choice(s.inliers, n, replace=False)
samples.append(np.array(chosen))
return samples | python | def fill_sampling(slice_list, N):
A = [len(s.inliers) for s in slice_list]
N_max = np.sum(A)
if N > N_max:
raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max))
samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group
remaining = N
while remaining > 0:
remaining_groups = np.flatnonzero(samples_from - np.array(A))
if remaining < len(remaining_groups):
np.random.shuffle(remaining_groups)
for g in remaining_groups[:remaining]:
samples_from[g] += 1
else:
# Give each group the allowed number of samples. Constrain to their max size.
to_each = max(1, int(remaining / len(remaining_groups)))
samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0)
# Update remaining count
remaining = int(N - np.sum(samples_from))
if not remaining == 0:
raise ValueError("Still {:d} samples left! This is an error in the selection.")
# Construct index list of selected samples
samples = []
for s, a, n in zip(slice_list, A, samples_from):
if a == n:
samples.append(np.array(s.inliers)) # all
elif a == 0:
samples.append(np.arange([]))
else:
chosen = np.random.choice(s.inliers, n, replace=False)
samples.append(np.array(chosen))
return samples | [
"def",
"fill_sampling",
"(",
"slice_list",
",",
"N",
")",
":",
"A",
"=",
"[",
"len",
"(",
"s",
".",
"inliers",
")",
"for",
"s",
"in",
"slice_list",
"]",
"N_max",
"=",
"np",
".",
"sum",
"(",
"A",
")",
"if",
"N",
">",
"N_max",
":",
"raise",
"ValueError",
"(",
"\"Tried to draw {:d} samples from a pool of only {:d} items\"",
".",
"format",
"(",
"N",
",",
"N_max",
")",
")",
"samples_from",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"A",
")",
",",
")",
",",
"dtype",
"=",
"'int'",
")",
"# Number of samples to draw from each group",
"remaining",
"=",
"N",
"while",
"remaining",
">",
"0",
":",
"remaining_groups",
"=",
"np",
".",
"flatnonzero",
"(",
"samples_from",
"-",
"np",
".",
"array",
"(",
"A",
")",
")",
"if",
"remaining",
"<",
"len",
"(",
"remaining_groups",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"remaining_groups",
")",
"for",
"g",
"in",
"remaining_groups",
"[",
":",
"remaining",
"]",
":",
"samples_from",
"[",
"g",
"]",
"+=",
"1",
"else",
":",
"# Give each group the allowed number of samples. Constrain to their max size.",
"to_each",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"remaining",
"/",
"len",
"(",
"remaining_groups",
")",
")",
")",
"samples_from",
"=",
"np",
".",
"min",
"(",
"np",
".",
"vstack",
"(",
"(",
"samples_from",
"+",
"to_each",
",",
"A",
")",
")",
",",
"axis",
"=",
"0",
")",
"# Update remaining count",
"remaining",
"=",
"int",
"(",
"N",
"-",
"np",
".",
"sum",
"(",
"samples_from",
")",
")",
"if",
"not",
"remaining",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Still {:d} samples left! This is an error in the selection.\"",
")",
"# Construct index list of selected samples",
"samples",
"=",
"[",
"]",
"for",
"s",
",",
"a",
",",
"n",
"in",
"zip",
"(",
"slice_list",
",",
"A",
",",
"samples_from",
")",
":",
"if",
"a",
"==",
"n",
":",
"samples",
".",
"append",
"(",
"np",
".",
"array",
"(",
"s",
".",
"inliers",
")",
")",
"# all",
"elif",
"a",
"==",
"0",
":",
"samples",
".",
"append",
"(",
"np",
".",
"arange",
"(",
"[",
"]",
")",
")",
"else",
":",
"chosen",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"s",
".",
"inliers",
",",
"n",
",",
"replace",
"=",
"False",
")",
"samples",
".",
"append",
"(",
"np",
".",
"array",
"(",
"chosen",
")",
")",
"return",
"samples"
] | Given a list of slices, draw N samples such that each slice contributes as much as possible
Parameters
--------------------------
slice_list : list of Slice
List of slices
N : int
Number of samples to draw | [
"Given",
"a",
"list",
"of",
"slices",
"draw",
"N",
"samples",
"such",
"that",
"each",
"slice",
"contributes",
"as",
"much",
"as",
"possible"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L117-L162 |
3,139 | hovren/crisp | crisp/videoslice.py | Slice.estimate_rotation | def estimate_rotation(self, camera, ransac_threshold=7.0):
"""Estimate the rotation between first and last frame
It uses RANSAC where the error metric is the reprojection error of the points
from the last frame to the first frame.
Parameters
-----------------
camera : CameraModel
Camera model
ransac_threshold : float
Distance threshold (in pixels) for a reprojected point to count as an inlier
"""
if self.axis is None:
x = self.points[:, 0, :].T
y = self.points[:, -1, :].T
inlier_ratio = 0.5
R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y,
camera,
ransac_threshold,
inlier_ratio=inlier_ratio,
do_translation=False)
if R is not None:
self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R)
if self.angle < 0: # Constrain to positive angles
self.angle = -self.angle
self.axis = -self.axis
self.inliers = idx
return self.axis is not None | python | def estimate_rotation(self, camera, ransac_threshold=7.0):
if self.axis is None:
x = self.points[:, 0, :].T
y = self.points[:, -1, :].T
inlier_ratio = 0.5
R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y,
camera,
ransac_threshold,
inlier_ratio=inlier_ratio,
do_translation=False)
if R is not None:
self.axis, self.angle = rotations.rotation_matrix_to_axis_angle(R)
if self.angle < 0: # Constrain to positive angles
self.angle = -self.angle
self.axis = -self.axis
self.inliers = idx
return self.axis is not None | [
"def",
"estimate_rotation",
"(",
"self",
",",
"camera",
",",
"ransac_threshold",
"=",
"7.0",
")",
":",
"if",
"self",
".",
"axis",
"is",
"None",
":",
"x",
"=",
"self",
".",
"points",
"[",
":",
",",
"0",
",",
":",
"]",
".",
"T",
"y",
"=",
"self",
".",
"points",
"[",
":",
",",
"-",
"1",
",",
":",
"]",
".",
"T",
"inlier_ratio",
"=",
"0.5",
"R",
",",
"t",
",",
"dist",
",",
"idx",
"=",
"rotations",
".",
"estimate_rotation_procrustes_ransac",
"(",
"x",
",",
"y",
",",
"camera",
",",
"ransac_threshold",
",",
"inlier_ratio",
"=",
"inlier_ratio",
",",
"do_translation",
"=",
"False",
")",
"if",
"R",
"is",
"not",
"None",
":",
"self",
".",
"axis",
",",
"self",
".",
"angle",
"=",
"rotations",
".",
"rotation_matrix_to_axis_angle",
"(",
"R",
")",
"if",
"self",
".",
"angle",
"<",
"0",
":",
"# Constrain to positive angles",
"self",
".",
"angle",
"=",
"-",
"self",
".",
"angle",
"self",
".",
"axis",
"=",
"-",
"self",
".",
"axis",
"self",
".",
"inliers",
"=",
"idx",
"return",
"self",
".",
"axis",
"is",
"not",
"None"
] | Estimate the rotation between first and last frame
It uses RANSAC where the error metric is the reprojection error of the points
from the last frame to the first frame.
Parameters
-----------------
camera : CameraModel
Camera model
ransac_threshold : float
Distance threshold (in pixels) for a reprojected point to count as an inlier | [
"Estimate",
"the",
"rotation",
"between",
"first",
"and",
"last",
"frame"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L31-L61 |
3,140 | hovren/crisp | crisp/videoslice.py | Slice.from_stream_randomly | def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10):
"""Create slices from a video stream using random sampling
Parameters
-----------------
video_stream : VideoStream
A video stream
step_bounds : tuple
Range bounds (inclusive) of possible step lengths
length_bounds : tuple
Range bounds (inclusive) of possible slice lengths
max_start : int
Maximum frame number to start from
min_distance : float
Minimum (initial) distance between tracked points
min_slice_points : int
Minimum number of points to keep a slice
Returns
-------------------
list of Slice
List of slices
"""
new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1]))
new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1]))
seq_frames = []
slices = []
seq_start_points = None
next_seq_start = new_step() if max_start is None else min(new_step(), max_start)
next_seq_length = new_length()
for i, im in enumerate(video_stream):
if next_seq_start <= i < next_seq_start + next_seq_length:
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
seq_frames.append(im)
if len(seq_frames) == 1:
max_corners = 400
quality_level = 0.07
seq_start_points = cv2.goodFeaturesToTrack(im, max_corners, quality_level, min_distance)
elif len(seq_frames) == next_seq_length:
points, status = tracking.track_retrack(seq_frames, seq_start_points)
if points.shape[0] >= min_slice_points:
s = Slice(next_seq_start, i, points)
slices.append(s)
logger.debug('{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'.format(len(slices)-1, points.shape[1], points.shape[0], next_seq_start, i))
seq_frames = []
next_seq_start = i + new_step()
next_seq_length = new_length()
return slices | python | def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10):
new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1]))
new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1]))
seq_frames = []
slices = []
seq_start_points = None
next_seq_start = new_step() if max_start is None else min(new_step(), max_start)
next_seq_length = new_length()
for i, im in enumerate(video_stream):
if next_seq_start <= i < next_seq_start + next_seq_length:
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
seq_frames.append(im)
if len(seq_frames) == 1:
max_corners = 400
quality_level = 0.07
seq_start_points = cv2.goodFeaturesToTrack(im, max_corners, quality_level, min_distance)
elif len(seq_frames) == next_seq_length:
points, status = tracking.track_retrack(seq_frames, seq_start_points)
if points.shape[0] >= min_slice_points:
s = Slice(next_seq_start, i, points)
slices.append(s)
logger.debug('{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'.format(len(slices)-1, points.shape[1], points.shape[0], next_seq_start, i))
seq_frames = []
next_seq_start = i + new_step()
next_seq_length = new_length()
return slices | [
"def",
"from_stream_randomly",
"(",
"video_stream",
",",
"step_bounds",
"=",
"(",
"5",
",",
"15",
")",
",",
"length_bounds",
"=",
"(",
"2",
",",
"15",
")",
",",
"max_start",
"=",
"None",
",",
"min_distance",
"=",
"10",
",",
"min_slice_points",
"=",
"10",
")",
":",
"new_step",
"=",
"lambda",
":",
"int",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"step_bounds",
"[",
"0",
"]",
",",
"high",
"=",
"step_bounds",
"[",
"1",
"]",
")",
")",
"new_length",
"=",
"lambda",
":",
"int",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"low",
"=",
"length_bounds",
"[",
"0",
"]",
",",
"high",
"=",
"length_bounds",
"[",
"1",
"]",
")",
")",
"seq_frames",
"=",
"[",
"]",
"slices",
"=",
"[",
"]",
"seq_start_points",
"=",
"None",
"next_seq_start",
"=",
"new_step",
"(",
")",
"if",
"max_start",
"is",
"None",
"else",
"min",
"(",
"new_step",
"(",
")",
",",
"max_start",
")",
"next_seq_length",
"=",
"new_length",
"(",
")",
"for",
"i",
",",
"im",
"in",
"enumerate",
"(",
"video_stream",
")",
":",
"if",
"next_seq_start",
"<=",
"i",
"<",
"next_seq_start",
"+",
"next_seq_length",
":",
"im",
"=",
"cv2",
".",
"cvtColor",
"(",
"im",
",",
"cv2",
".",
"COLOR_BGR2GRAY",
")",
"seq_frames",
".",
"append",
"(",
"im",
")",
"if",
"len",
"(",
"seq_frames",
")",
"==",
"1",
":",
"max_corners",
"=",
"400",
"quality_level",
"=",
"0.07",
"seq_start_points",
"=",
"cv2",
".",
"goodFeaturesToTrack",
"(",
"im",
",",
"max_corners",
",",
"quality_level",
",",
"min_distance",
")",
"elif",
"len",
"(",
"seq_frames",
")",
"==",
"next_seq_length",
":",
"points",
",",
"status",
"=",
"tracking",
".",
"track_retrack",
"(",
"seq_frames",
",",
"seq_start_points",
")",
"if",
"points",
".",
"shape",
"[",
"0",
"]",
">=",
"min_slice_points",
":",
"s",
"=",
"Slice",
"(",
"next_seq_start",
",",
"i",
",",
"points",
")",
"slices",
".",
"append",
"(",
"s",
")",
"logger",
".",
"debug",
"(",
"'{0:4d} {1:3d} {2:5d} {3:>5d}-{4:<5d}'",
".",
"format",
"(",
"len",
"(",
"slices",
")",
"-",
"1",
",",
"points",
".",
"shape",
"[",
"1",
"]",
",",
"points",
".",
"shape",
"[",
"0",
"]",
",",
"next_seq_start",
",",
"i",
")",
")",
"seq_frames",
"=",
"[",
"]",
"next_seq_start",
"=",
"i",
"+",
"new_step",
"(",
")",
"next_seq_length",
"=",
"new_length",
"(",
")",
"return",
"slices"
] | Create slices from a video stream using random sampling
Parameters
-----------------
video_stream : VideoStream
A video stream
step_bounds : tuple
Range bounds (inclusive) of possible step lengths
length_bounds : tuple
Range bounds (inclusive) of possible slice lengths
max_start : int
Maximum frame number to start from
min_distance : float
Minimum (initial) distance between tracked points
min_slice_points : int
Minimum number of points to keep a slice
Returns
-------------------
list of Slice
List of slices | [
"Create",
"slices",
"from",
"a",
"video",
"stream",
"using",
"random",
"sampling"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/videoslice.py#L64-L115 |
3,141 | hovren/crisp | crisp/rotations.py | estimate_rotation_procrustes_ransac | def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False):
"""Calculate rotation between two sets of image coordinates using ransac.
Inlier criteria is the reprojection error of y into image 1.
Parameters
-------------------------
x : array 2xN image coordinates in image 1
y : array 2xN image coordinates in image 2
camera : Camera model
threshold : float pixel distance threshold to accept as inlier
do_translation : bool Try to estimate the translation as well
Returns
------------------------
R : array 3x3 The rotation that best fulfills X = RY
t : array 3x1 translation if do_translation is False
residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion)
inliers : array Indices of the points (in X and Y) that are RANSAC inliers
"""
assert x.shape == y.shape
assert x.shape[0] == 2
X = camera.unproject(x)
Y = camera.unproject(y)
data = np.vstack((X, Y, x))
assert data.shape[0] == 8
model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation)
def eval_func(model, data):
Y = data[3:6].reshape(3,-1)
x = data[6:].reshape(2,-1)
R, t = model
Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t
xhat = camera.project(Xhat)
dist = np.sqrt(np.sum((x-xhat)**2, axis=0))
return dist
inlier_selection_prob = 0.99999
model_points = 2
ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points))
model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True)
if model_est is not None:
(R, t) = model_est
dist = eval_func((R, t), data)
else:
dist = None
R, t = None, None
ransac_consensus_idx = []
return R, t, dist, ransac_consensus_idx | python | def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False):
assert x.shape == y.shape
assert x.shape[0] == 2
X = camera.unproject(x)
Y = camera.unproject(y)
data = np.vstack((X, Y, x))
assert data.shape[0] == 8
model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation)
def eval_func(model, data):
Y = data[3:6].reshape(3,-1)
x = data[6:].reshape(2,-1)
R, t = model
Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t
xhat = camera.project(Xhat)
dist = np.sqrt(np.sum((x-xhat)**2, axis=0))
return dist
inlier_selection_prob = 0.99999
model_points = 2
ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points))
model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True)
if model_est is not None:
(R, t) = model_est
dist = eval_func((R, t), data)
else:
dist = None
R, t = None, None
ransac_consensus_idx = []
return R, t, dist, ransac_consensus_idx | [
"def",
"estimate_rotation_procrustes_ransac",
"(",
"x",
",",
"y",
",",
"camera",
",",
"threshold",
",",
"inlier_ratio",
"=",
"0.75",
",",
"do_translation",
"=",
"False",
")",
":",
"assert",
"x",
".",
"shape",
"==",
"y",
".",
"shape",
"assert",
"x",
".",
"shape",
"[",
"0",
"]",
"==",
"2",
"X",
"=",
"camera",
".",
"unproject",
"(",
"x",
")",
"Y",
"=",
"camera",
".",
"unproject",
"(",
"y",
")",
"data",
"=",
"np",
".",
"vstack",
"(",
"(",
"X",
",",
"Y",
",",
"x",
")",
")",
"assert",
"data",
".",
"shape",
"[",
"0",
"]",
"==",
"8",
"model_func",
"=",
"lambda",
"data",
":",
"procrustes",
"(",
"data",
"[",
":",
"3",
"]",
",",
"data",
"[",
"3",
":",
"6",
"]",
",",
"remove_mean",
"=",
"do_translation",
")",
"def",
"eval_func",
"(",
"model",
",",
"data",
")",
":",
"Y",
"=",
"data",
"[",
"3",
":",
"6",
"]",
".",
"reshape",
"(",
"3",
",",
"-",
"1",
")",
"x",
"=",
"data",
"[",
"6",
":",
"]",
".",
"reshape",
"(",
"2",
",",
"-",
"1",
")",
"R",
",",
"t",
"=",
"model",
"Xhat",
"=",
"np",
".",
"dot",
"(",
"R",
",",
"Y",
")",
"if",
"t",
"is",
"None",
"else",
"np",
".",
"dot",
"(",
"R",
",",
"Y",
")",
"+",
"t",
"xhat",
"=",
"camera",
".",
"project",
"(",
"Xhat",
")",
"dist",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"(",
"x",
"-",
"xhat",
")",
"**",
"2",
",",
"axis",
"=",
"0",
")",
")",
"return",
"dist",
"inlier_selection_prob",
"=",
"0.99999",
"model_points",
"=",
"2",
"ransac_iterations",
"=",
"int",
"(",
"np",
".",
"log",
"(",
"1",
"-",
"inlier_selection_prob",
")",
"/",
"np",
".",
"log",
"(",
"1",
"-",
"inlier_ratio",
"**",
"model_points",
")",
")",
"model_est",
",",
"ransac_consensus_idx",
"=",
"ransac",
".",
"RANSAC",
"(",
"model_func",
",",
"eval_func",
",",
"data",
",",
"model_points",
",",
"ransac_iterations",
",",
"threshold",
",",
"recalculate",
"=",
"True",
")",
"if",
"model_est",
"is",
"not",
"None",
":",
"(",
"R",
",",
"t",
")",
"=",
"model_est",
"dist",
"=",
"eval_func",
"(",
"(",
"R",
",",
"t",
")",
",",
"data",
")",
"else",
":",
"dist",
"=",
"None",
"R",
",",
"t",
"=",
"None",
",",
"None",
"ransac_consensus_idx",
"=",
"[",
"]",
"return",
"R",
",",
"t",
",",
"dist",
",",
"ransac_consensus_idx"
] | Calculate rotation between two sets of image coordinates using ransac.
Inlier criteria is the reprojection error of y into image 1.
Parameters
-------------------------
x : array 2xN image coordinates in image 1
y : array 2xN image coordinates in image 2
camera : Camera model
threshold : float pixel distance threshold to accept as inlier
do_translation : bool Try to estimate the translation as well
Returns
------------------------
R : array 3x3 The rotation that best fulfills X = RY
t : array 3x1 translation if do_translation is False
residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion)
inliers : array Indices of the points (in X and Y) that are RANSAC inliers | [
"Calculate",
"rotation",
"between",
"two",
"sets",
"of",
"image",
"coordinates",
"using",
"ransac",
".",
"Inlier",
"criteria",
"is",
"the",
"reprojection",
"error",
"of",
"y",
"into",
"image",
"1",
"."
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/rotations.py#L270-L325 |
3,142 | hovren/crisp | crisp/ransac.py | RANSAC | def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False):
"""Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector)
eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
data : array (DxN) where D is dimensionality and N number of samples
"""
M = None
max_consensus = 0
all_idx = list(range(data.shape[1]))
final_consensus = []
for k in range(num_iter):
np.random.shuffle(all_idx)
model_set = all_idx[:num_points]
x = data[:, model_set]
m = model_func(x)
model_error = eval_func(m, data)
assert model_error.ndim == 1
assert model_error.size == data.shape[1]
consensus_idx = np.flatnonzero(model_error < threshold)
if len(consensus_idx) > max_consensus:
M = m
max_consensus = len(consensus_idx)
final_consensus = consensus_idx
# Recalculate using current consensus set?
if recalculate and len(final_consensus) > 0:
final_consensus_set = data[:, final_consensus]
M = model_func(final_consensus_set)
return (M, final_consensus) | python | def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False):
M = None
max_consensus = 0
all_idx = list(range(data.shape[1]))
final_consensus = []
for k in range(num_iter):
np.random.shuffle(all_idx)
model_set = all_idx[:num_points]
x = data[:, model_set]
m = model_func(x)
model_error = eval_func(m, data)
assert model_error.ndim == 1
assert model_error.size == data.shape[1]
consensus_idx = np.flatnonzero(model_error < threshold)
if len(consensus_idx) > max_consensus:
M = m
max_consensus = len(consensus_idx)
final_consensus = consensus_idx
# Recalculate using current consensus set?
if recalculate and len(final_consensus) > 0:
final_consensus_set = data[:, final_consensus]
M = model_func(final_consensus_set)
return (M, final_consensus) | [
"def",
"RANSAC",
"(",
"model_func",
",",
"eval_func",
",",
"data",
",",
"num_points",
",",
"num_iter",
",",
"threshold",
",",
"recalculate",
"=",
"False",
")",
":",
"M",
"=",
"None",
"max_consensus",
"=",
"0",
"all_idx",
"=",
"list",
"(",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
")",
"final_consensus",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"num_iter",
")",
":",
"np",
".",
"random",
".",
"shuffle",
"(",
"all_idx",
")",
"model_set",
"=",
"all_idx",
"[",
":",
"num_points",
"]",
"x",
"=",
"data",
"[",
":",
",",
"model_set",
"]",
"m",
"=",
"model_func",
"(",
"x",
")",
"model_error",
"=",
"eval_func",
"(",
"m",
",",
"data",
")",
"assert",
"model_error",
".",
"ndim",
"==",
"1",
"assert",
"model_error",
".",
"size",
"==",
"data",
".",
"shape",
"[",
"1",
"]",
"consensus_idx",
"=",
"np",
".",
"flatnonzero",
"(",
"model_error",
"<",
"threshold",
")",
"if",
"len",
"(",
"consensus_idx",
")",
">",
"max_consensus",
":",
"M",
"=",
"m",
"max_consensus",
"=",
"len",
"(",
"consensus_idx",
")",
"final_consensus",
"=",
"consensus_idx",
"# Recalculate using current consensus set?",
"if",
"recalculate",
"and",
"len",
"(",
"final_consensus",
")",
">",
"0",
":",
"final_consensus_set",
"=",
"data",
"[",
":",
",",
"final_consensus",
"]",
"M",
"=",
"model_func",
"(",
"final_consensus_set",
")",
"return",
"(",
"M",
",",
"final_consensus",
")"
] | Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector)
eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
data : array (DxN) where D is dimensionality and N number of samples | [
"Apply",
"RANSAC",
"."
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/ransac.py#L5-L41 |
3,143 | hovren/crisp | crisp/imu.py | IMU.integrate | def integrate(self, pose_correction=np.eye(3), uniform=True):
"""Integrate angular velocity measurements to rotations.
Parameters
-------------
pose_correction : (3,3) ndarray, optional
Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera).
uniform : bool
If True (default), assume uniform sample rate. This will use a faster integration method.
Returns
-------------
rotations : (4, N) ndarray
Rotations as unit quaternions with scalar as first element.
"""
if uniform:
dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work
return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt)
else:
N = len(self.timestamps)
integrated = np.zeros((4, N))
integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation)
# Iterate over all
for i in range(1, len(self.timestamps)):
w = pose_correction.dot(self.gyro_data[:, i]) # Change to correct coordinate frame
dt = float(self.timestamps[i] - self.timestamps[i - 1])
qprev = integrated[:, i - 1].flatten()
A = np.array([[0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]])
qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev)
qnorm = np.sqrt(np.sum(qnew ** 2))
qnew = qnew / qnorm if qnorm > 0 else 0
integrated[:, i] = qnew
#print "%d, %s, %s, %s, %s" % (i, w, dt, qprev, qnew)
return integrated | python | def integrate(self, pose_correction=np.eye(3), uniform=True):
if uniform:
dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work
return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt)
else:
N = len(self.timestamps)
integrated = np.zeros((4, N))
integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation)
# Iterate over all
for i in range(1, len(self.timestamps)):
w = pose_correction.dot(self.gyro_data[:, i]) # Change to correct coordinate frame
dt = float(self.timestamps[i] - self.timestamps[i - 1])
qprev = integrated[:, i - 1].flatten()
A = np.array([[0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0]])
qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev)
qnorm = np.sqrt(np.sum(qnew ** 2))
qnew = qnew / qnorm if qnorm > 0 else 0
integrated[:, i] = qnew
#print "%d, %s, %s, %s, %s" % (i, w, dt, qprev, qnew)
return integrated | [
"def",
"integrate",
"(",
"self",
",",
"pose_correction",
"=",
"np",
".",
"eye",
"(",
"3",
")",
",",
"uniform",
"=",
"True",
")",
":",
"if",
"uniform",
":",
"dt",
"=",
"float",
"(",
"self",
".",
"timestamps",
"[",
"1",
"]",
"-",
"self",
".",
"timestamps",
"[",
"0",
"]",
")",
"# Must be python float for fastintegrate to work",
"return",
"fastintegrate",
".",
"integrate_gyro_quaternion_uniform",
"(",
"self",
".",
"gyro_data_corrected",
",",
"dt",
")",
"else",
":",
"N",
"=",
"len",
"(",
"self",
".",
"timestamps",
")",
"integrated",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"N",
")",
")",
"integrated",
"[",
":",
",",
"0",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"0",
",",
"0",
",",
"0",
"]",
")",
"# Initial rotation (no rotation)",
"# Iterate over all",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"timestamps",
")",
")",
":",
"w",
"=",
"pose_correction",
".",
"dot",
"(",
"self",
".",
"gyro_data",
"[",
":",
",",
"i",
"]",
")",
"# Change to correct coordinate frame",
"dt",
"=",
"float",
"(",
"self",
".",
"timestamps",
"[",
"i",
"]",
"-",
"self",
".",
"timestamps",
"[",
"i",
"-",
"1",
"]",
")",
"qprev",
"=",
"integrated",
"[",
":",
",",
"i",
"-",
"1",
"]",
".",
"flatten",
"(",
")",
"A",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"w",
"[",
"0",
"]",
",",
"-",
"w",
"[",
"1",
"]",
",",
"-",
"w",
"[",
"2",
"]",
"]",
",",
"[",
"w",
"[",
"0",
"]",
",",
"0",
",",
"w",
"[",
"2",
"]",
",",
"-",
"w",
"[",
"1",
"]",
"]",
",",
"[",
"w",
"[",
"1",
"]",
",",
"-",
"w",
"[",
"2",
"]",
",",
"0",
",",
"w",
"[",
"0",
"]",
"]",
",",
"[",
"w",
"[",
"2",
"]",
",",
"w",
"[",
"1",
"]",
",",
"-",
"w",
"[",
"0",
"]",
",",
"0",
"]",
"]",
")",
"qnew",
"=",
"(",
"np",
".",
"eye",
"(",
"4",
")",
"+",
"(",
"dt",
"/",
"2.0",
")",
"*",
"A",
")",
".",
"dot",
"(",
"qprev",
")",
"qnorm",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"qnew",
"**",
"2",
")",
")",
"qnew",
"=",
"qnew",
"/",
"qnorm",
"if",
"qnorm",
">",
"0",
"else",
"0",
"integrated",
"[",
":",
",",
"i",
"]",
"=",
"qnew",
"#print \"%d, %s, %s, %s, %s\" % (i, w, dt, qprev, qnew)",
"return",
"integrated"
] | Integrate angular velocity measurements to rotations.
Parameters
-------------
pose_correction : (3,3) ndarray, optional
Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera).
uniform : bool
If True (default), assume uniform sample rate. This will use a faster integration method.
Returns
-------------
rotations : (4, N) ndarray
Rotations as unit quaternions with scalar as first element. | [
"Integrate",
"angular",
"velocity",
"measurements",
"to",
"rotations",
"."
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L119-L157 |
3,144 | hovren/crisp | crisp/stream.py | GyroStream.from_csv | def from_csv(cls, filename):
"""Create gyro stream from CSV data
Load data from a CSV file.
The data must be formatted with three values per line: (x, y, z)
where x, y, z is the measured angular velocity (in radians) of the specified axis.
Parameters
-------------------
filename : str
Path to the CSV file
Returns
---------------------
GyroStream
A gyroscope stream
"""
instance = cls()
instance.data = np.loadtxt(filename, delimiter=',')
return instance | python | def from_csv(cls, filename):
instance = cls()
instance.data = np.loadtxt(filename, delimiter=',')
return instance | [
"def",
"from_csv",
"(",
"cls",
",",
"filename",
")",
":",
"instance",
"=",
"cls",
"(",
")",
"instance",
".",
"data",
"=",
"np",
".",
"loadtxt",
"(",
"filename",
",",
"delimiter",
"=",
"','",
")",
"return",
"instance"
] | Create gyro stream from CSV data
Load data from a CSV file.
The data must be formatted with three values per line: (x, y, z)
where x, y, z is the measured angular velocity (in radians) of the specified axis.
Parameters
-------------------
filename : str
Path to the CSV file
Returns
---------------------
GyroStream
A gyroscope stream | [
"Create",
"gyro",
"stream",
"from",
"CSV",
"data"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L36-L55 |
3,145 | hovren/crisp | crisp/stream.py | GyroStream.from_data | def from_data(cls, data):
"""Create gyroscope stream from data array
Parameters
-------------------
data : (N, 3) ndarray
Data array of angular velocities (rad/s)
Returns
-------------------
GyroStream
Stream object
"""
if not data.shape[1] == 3:
raise ValueError("Gyroscope data must have shape (N, 3)")
instance = cls()
instance.data = data
return instance | python | def from_data(cls, data):
if not data.shape[1] == 3:
raise ValueError("Gyroscope data must have shape (N, 3)")
instance = cls()
instance.data = data
return instance | [
"def",
"from_data",
"(",
"cls",
",",
"data",
")",
":",
"if",
"not",
"data",
".",
"shape",
"[",
"1",
"]",
"==",
"3",
":",
"raise",
"ValueError",
"(",
"\"Gyroscope data must have shape (N, 3)\"",
")",
"instance",
"=",
"cls",
"(",
")",
"instance",
".",
"data",
"=",
"data",
"return",
"instance"
] | Create gyroscope stream from data array
Parameters
-------------------
data : (N, 3) ndarray
Data array of angular velocities (rad/s)
Returns
-------------------
GyroStream
Stream object | [
"Create",
"gyroscope",
"stream",
"from",
"data",
"array"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L58-L76 |
3,146 | hovren/crisp | crisp/stream.py | GyroStream.integrate | def integrate(self, dt):
"""Integrate gyro measurements to orientation using a uniform sample rate.
Parameters
-------------------
dt : float
Sample distance in seconds
Returns
----------------
orientation : (4, N) ndarray
Gyroscope orientation in quaternion form (s, q1, q2, q3)
"""
if not dt == self.__last_dt:
self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt)
self.__last_dt = dt
return self.__last_q | python | def integrate(self, dt):
if not dt == self.__last_dt:
self.__last_q = fastintegrate.integrate_gyro_quaternion_uniform(self.data, dt)
self.__last_dt = dt
return self.__last_q | [
"def",
"integrate",
"(",
"self",
",",
"dt",
")",
":",
"if",
"not",
"dt",
"==",
"self",
".",
"__last_dt",
":",
"self",
".",
"__last_q",
"=",
"fastintegrate",
".",
"integrate_gyro_quaternion_uniform",
"(",
"self",
".",
"data",
",",
"dt",
")",
"self",
".",
"__last_dt",
"=",
"dt",
"return",
"self",
".",
"__last_q"
] | Integrate gyro measurements to orientation using a uniform sample rate.
Parameters
-------------------
dt : float
Sample distance in seconds
Returns
----------------
orientation : (4, N) ndarray
Gyroscope orientation in quaternion form (s, q1, q2, q3) | [
"Integrate",
"gyro",
"measurements",
"to",
"orientation",
"using",
"a",
"uniform",
"sample",
"rate",
"."
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/stream.py#L83-L99 |
3,147 | hovren/crisp | crisp/znccpyr.py | gaussian_kernel | def gaussian_kernel(gstd):
"""Generate odd sized truncated Gaussian
The generated filter kernel has a cutoff at $3\sigma$
and is normalized to sum to 1
Parameters
-------------
gstd : float
Standard deviation of filter
Returns
-------------
g : ndarray
Array with kernel coefficients
"""
Nc = np.ceil(gstd*3)*2+1
x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True)
g = np.exp(-.5*((x/gstd)**2))
g = g/np.sum(g)
return g | python | def gaussian_kernel(gstd):
Nc = np.ceil(gstd*3)*2+1
x = np.linspace(-(Nc-1)/2,(Nc-1)/2,Nc,endpoint=True)
g = np.exp(-.5*((x/gstd)**2))
g = g/np.sum(g)
return g | [
"def",
"gaussian_kernel",
"(",
"gstd",
")",
":",
"Nc",
"=",
"np",
".",
"ceil",
"(",
"gstd",
"*",
"3",
")",
"*",
"2",
"+",
"1",
"x",
"=",
"np",
".",
"linspace",
"(",
"-",
"(",
"Nc",
"-",
"1",
")",
"/",
"2",
",",
"(",
"Nc",
"-",
"1",
")",
"/",
"2",
",",
"Nc",
",",
"endpoint",
"=",
"True",
")",
"g",
"=",
"np",
".",
"exp",
"(",
"-",
".5",
"*",
"(",
"(",
"x",
"/",
"gstd",
")",
"**",
"2",
")",
")",
"g",
"=",
"g",
"/",
"np",
".",
"sum",
"(",
"g",
")",
"return",
"g"
] | Generate odd sized truncated Gaussian
The generated filter kernel has a cutoff at $3\sigma$
and is normalized to sum to 1
Parameters
-------------
gstd : float
Standard deviation of filter
Returns
-------------
g : ndarray
Array with kernel coefficients | [
"Generate",
"odd",
"sized",
"truncated",
"Gaussian"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L18-L39 |
3,148 | hovren/crisp | crisp/znccpyr.py | subsample | def subsample(time_series, downsample_factor):
"""Subsample with Gaussian prefilter
The prefilter will have the filter size $\sigma_g=.5*ssfactor$
Parameters
--------------
time_series : ndarray
Input signal
downsample_factor : float
Downsampling factor
Returns
--------------
ts_out : ndarray
The downsampled signal
"""
Ns = np.int(np.floor(np.size(time_series)/downsample_factor))
g = gaussian_kernel(0.5*downsample_factor)
ts_blur = np.convolve(time_series,g,'same')
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = (k+.5)*downsample_factor-.5
cfrac = cpos-np.floor(cpos)
cind = np.floor(cpos)
if cfrac>0:
ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac
else:
ts_out[k]=ts_blur[cind]
return ts_out | python | def subsample(time_series, downsample_factor):
Ns = np.int(np.floor(np.size(time_series)/downsample_factor))
g = gaussian_kernel(0.5*downsample_factor)
ts_blur = np.convolve(time_series,g,'same')
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = (k+.5)*downsample_factor-.5
cfrac = cpos-np.floor(cpos)
cind = np.floor(cpos)
if cfrac>0:
ts_out[k]=ts_blur[cind]*(1-cfrac)+ts_blur[cind+1]*cfrac
else:
ts_out[k]=ts_blur[cind]
return ts_out | [
"def",
"subsample",
"(",
"time_series",
",",
"downsample_factor",
")",
":",
"Ns",
"=",
"np",
".",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
".",
"size",
"(",
"time_series",
")",
"/",
"downsample_factor",
")",
")",
"g",
"=",
"gaussian_kernel",
"(",
"0.5",
"*",
"downsample_factor",
")",
"ts_blur",
"=",
"np",
".",
"convolve",
"(",
"time_series",
",",
"g",
",",
"'same'",
")",
"ts_out",
"=",
"np",
".",
"zeros",
"(",
"(",
"Ns",
",",
"1",
")",
",",
"dtype",
"=",
"'float64'",
")",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"Ns",
")",
":",
"cpos",
"=",
"(",
"k",
"+",
".5",
")",
"*",
"downsample_factor",
"-",
".5",
"cfrac",
"=",
"cpos",
"-",
"np",
".",
"floor",
"(",
"cpos",
")",
"cind",
"=",
"np",
".",
"floor",
"(",
"cpos",
")",
"if",
"cfrac",
">",
"0",
":",
"ts_out",
"[",
"k",
"]",
"=",
"ts_blur",
"[",
"cind",
"]",
"*",
"(",
"1",
"-",
"cfrac",
")",
"+",
"ts_blur",
"[",
"cind",
"+",
"1",
"]",
"*",
"cfrac",
"else",
":",
"ts_out",
"[",
"k",
"]",
"=",
"ts_blur",
"[",
"cind",
"]",
"return",
"ts_out"
] | Subsample with Gaussian prefilter
The prefilter will have the filter size $\sigma_g=.5*ssfactor$
Parameters
--------------
time_series : ndarray
Input signal
downsample_factor : float
Downsampling factor
Returns
--------------
ts_out : ndarray
The downsampled signal | [
"Subsample",
"with",
"Gaussian",
"prefilter"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L41-L71 |
3,149 | hovren/crisp | crisp/znccpyr.py | upsample | def upsample(time_series, scaling_factor):
"""Upsample using linear interpolation
The function uses replication of the value at edges
Parameters
--------------
time_series : ndarray
Input signal
scaling_factor : float
The factor to upsample with
Returns
--------------
ts_out : ndarray
The upsampled signal
"""
Ns0 = np.size(time_series)
Ns = np.int(np.floor(np.size(time_series)*scaling_factor))
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])]))
cfrac = cpos-np.floor(cpos)
cind = int(np.floor(cpos))
#print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind)
if cfrac>0:
ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac
else:
ts_out[k]=time_series[cind]
return ts_out | python | def upsample(time_series, scaling_factor):
Ns0 = np.size(time_series)
Ns = np.int(np.floor(np.size(time_series)*scaling_factor))
ts_out = np.zeros((Ns,1), dtype='float64')
for k in range(0,Ns):
cpos = int(np.min([Ns0-1,np.max([0.,(k+0.5)/scaling_factor-0.5])]))
cfrac = cpos-np.floor(cpos)
cind = int(np.floor(cpos))
#print "cpos=%f cfrac=%f cind=%d", (cpos,cfrac,cind)
if cfrac>0:
ts_out[k]=time_series[cind]*(1-cfrac)+time_series[cind+1]*cfrac
else:
ts_out[k]=time_series[cind]
return ts_out | [
"def",
"upsample",
"(",
"time_series",
",",
"scaling_factor",
")",
":",
"Ns0",
"=",
"np",
".",
"size",
"(",
"time_series",
")",
"Ns",
"=",
"np",
".",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
".",
"size",
"(",
"time_series",
")",
"*",
"scaling_factor",
")",
")",
"ts_out",
"=",
"np",
".",
"zeros",
"(",
"(",
"Ns",
",",
"1",
")",
",",
"dtype",
"=",
"'float64'",
")",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"Ns",
")",
":",
"cpos",
"=",
"int",
"(",
"np",
".",
"min",
"(",
"[",
"Ns0",
"-",
"1",
",",
"np",
".",
"max",
"(",
"[",
"0.",
",",
"(",
"k",
"+",
"0.5",
")",
"/",
"scaling_factor",
"-",
"0.5",
"]",
")",
"]",
")",
")",
"cfrac",
"=",
"cpos",
"-",
"np",
".",
"floor",
"(",
"cpos",
")",
"cind",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"cpos",
")",
")",
"#print \"cpos=%f cfrac=%f cind=%d\", (cpos,cfrac,cind)",
"if",
"cfrac",
">",
"0",
":",
"ts_out",
"[",
"k",
"]",
"=",
"time_series",
"[",
"cind",
"]",
"*",
"(",
"1",
"-",
"cfrac",
")",
"+",
"time_series",
"[",
"cind",
"+",
"1",
"]",
"*",
"cfrac",
"else",
":",
"ts_out",
"[",
"k",
"]",
"=",
"time_series",
"[",
"cind",
"]",
"return",
"ts_out"
] | Upsample using linear interpolation
The function uses replication of the value at edges
Parameters
--------------
time_series : ndarray
Input signal
scaling_factor : float
The factor to upsample with
Returns
--------------
ts_out : ndarray
The upsampled signal | [
"Upsample",
"using",
"linear",
"interpolation"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L73-L103 |
3,150 | hovren/crisp | crisp/znccpyr.py | find_shift_pyr | def find_shift_pyr(ts1,ts2,nlevels):
"""
Find shift that best aligns two time series
The shift that aligns the timeseries ts1 with ts2.
This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels.
Parameters
----------------
ts1 : list_like
The first timeseries
ts2 : list_like
The seconds timeseries
nlevels : int
Number of levels in pyramid
Returns
----------------
ts1_shift : float
How many samples to shift ts1 to align with ts2
"""
pyr1 = create_pyramid(ts1,nlevels)
pyr2 = create_pyramid(ts2,nlevels)
logger.debug("pyramid size = %d" % len(pyr1))
logger.debug("size of first element %d " % np.size(pyr1[0]))
logger.debug("size of last element %d " % np.size(pyr1[-1]))
ishift, corrfn = zncc(pyr1[-1],pyr2[-1])
for k in range(1,nlevels+1):
ishift, corrfn = refine_correlation(pyr1[-k-1],pyr2[-k-1],ishift*2)
return ishift | python | def find_shift_pyr(ts1,ts2,nlevels):
pyr1 = create_pyramid(ts1,nlevels)
pyr2 = create_pyramid(ts2,nlevels)
logger.debug("pyramid size = %d" % len(pyr1))
logger.debug("size of first element %d " % np.size(pyr1[0]))
logger.debug("size of last element %d " % np.size(pyr1[-1]))
ishift, corrfn = zncc(pyr1[-1],pyr2[-1])
for k in range(1,nlevels+1):
ishift, corrfn = refine_correlation(pyr1[-k-1],pyr2[-k-1],ishift*2)
return ishift | [
"def",
"find_shift_pyr",
"(",
"ts1",
",",
"ts2",
",",
"nlevels",
")",
":",
"pyr1",
"=",
"create_pyramid",
"(",
"ts1",
",",
"nlevels",
")",
"pyr2",
"=",
"create_pyramid",
"(",
"ts2",
",",
"nlevels",
")",
"logger",
".",
"debug",
"(",
"\"pyramid size = %d\"",
"%",
"len",
"(",
"pyr1",
")",
")",
"logger",
".",
"debug",
"(",
"\"size of first element %d \"",
"%",
"np",
".",
"size",
"(",
"pyr1",
"[",
"0",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"size of last element %d \"",
"%",
"np",
".",
"size",
"(",
"pyr1",
"[",
"-",
"1",
"]",
")",
")",
"ishift",
",",
"corrfn",
"=",
"zncc",
"(",
"pyr1",
"[",
"-",
"1",
"]",
",",
"pyr2",
"[",
"-",
"1",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"nlevels",
"+",
"1",
")",
":",
"ishift",
",",
"corrfn",
"=",
"refine_correlation",
"(",
"pyr1",
"[",
"-",
"k",
"-",
"1",
"]",
",",
"pyr2",
"[",
"-",
"k",
"-",
"1",
"]",
",",
"ishift",
"*",
"2",
")",
"return",
"ishift"
] | Find shift that best aligns two time series
The shift that aligns the timeseries ts1 with ts2.
This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels.
Parameters
----------------
ts1 : list_like
The first timeseries
ts2 : list_like
The seconds timeseries
nlevels : int
Number of levels in pyramid
Returns
----------------
ts1_shift : float
How many samples to shift ts1 to align with ts2 | [
"Find",
"shift",
"that",
"best",
"aligns",
"two",
"time",
"series"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/znccpyr.py#L245-L278 |
3,151 | hovren/crisp | examples/gopro_dataset_example.py | to_rot_matrix | def to_rot_matrix(r):
"Convert combined axis angle vector to rotation matrix"
theta = np.linalg.norm(r)
v = r/theta
R = crisp.rotations.axis_angle_to_rotation_matrix(v, theta)
return R | python | def to_rot_matrix(r):
"Convert combined axis angle vector to rotation matrix"
theta = np.linalg.norm(r)
v = r/theta
R = crisp.rotations.axis_angle_to_rotation_matrix(v, theta)
return R | [
"def",
"to_rot_matrix",
"(",
"r",
")",
":",
"theta",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"r",
")",
"v",
"=",
"r",
"/",
"theta",
"R",
"=",
"crisp",
".",
"rotations",
".",
"axis_angle_to_rotation_matrix",
"(",
"v",
",",
"theta",
")",
"return",
"R"
] | Convert combined axis angle vector to rotation matrix | [
"Convert",
"combined",
"axis",
"angle",
"vector",
"to",
"rotation",
"matrix"
] | 65cae19e7cfae5a397859096c9ef666e0f4e7f1b | https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/examples/gopro_dataset_example.py#L42-L47 |
3,152 | codejamninja/sphinx-markdown-builder | sphinx_markdown_builder/doctree2md.py | add_pass_thru | def add_pass_thru(pass_thrus):
""" Decorator adds explicit pass-through visit and depart methods
"""
def meth(self, node):
pass
def dec(cls):
for element_name in pass_thrus:
for meth_prefix in ('visit_', 'depart_'):
meth_name = meth_prefix + element_name
if hasattr(cls, meth_name):
raise ValueError('method name {} already defined'
.format(meth_name))
setattr(cls, meth_name, meth)
return cls
return dec | python | def add_pass_thru(pass_thrus):
def meth(self, node):
pass
def dec(cls):
for element_name in pass_thrus:
for meth_prefix in ('visit_', 'depart_'):
meth_name = meth_prefix + element_name
if hasattr(cls, meth_name):
raise ValueError('method name {} already defined'
.format(meth_name))
setattr(cls, meth_name, meth)
return cls
return dec | [
"def",
"add_pass_thru",
"(",
"pass_thrus",
")",
":",
"def",
"meth",
"(",
"self",
",",
"node",
")",
":",
"pass",
"def",
"dec",
"(",
"cls",
")",
":",
"for",
"element_name",
"in",
"pass_thrus",
":",
"for",
"meth_prefix",
"in",
"(",
"'visit_'",
",",
"'depart_'",
")",
":",
"meth_name",
"=",
"meth_prefix",
"+",
"element_name",
"if",
"hasattr",
"(",
"cls",
",",
"meth_name",
")",
":",
"raise",
"ValueError",
"(",
"'method name {} already defined'",
".",
"format",
"(",
"meth_name",
")",
")",
"setattr",
"(",
"cls",
",",
"meth_name",
",",
"meth",
")",
"return",
"cls",
"return",
"dec"
] | Decorator adds explicit pass-through visit and depart methods | [
"Decorator",
"adds",
"explicit",
"pass",
"-",
"through",
"visit",
"and",
"depart",
"methods"
] | a28f48df937d4b0e158ba453e5e1c66824299196 | https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/doctree2md.py#L256-L272 |
3,153 | codejamninja/sphinx-markdown-builder | sphinx_markdown_builder/doctree2md.py | IndentLevel.write | def write(self):
""" Add ``self.contents`` with current ``prefix`` and ``first_prefix``
Add processed ``self.contents`` to ``self.base``. The first line has
``first_prefix`` prepended, further lines have ``prefix`` prepended.
Empty (all whitespace) lines get written as bare carriage returns, to
avoid ugly extra whitespace.
"""
string = ''.join(self.content)
lines = string.splitlines(True)
if len(lines) == 0:
return
texts = [self.first_prefix + lines[0]]
for line in lines[1:]:
if line.strip() == '': # avoid prefix for empty lines
texts.append('\n')
else:
texts.append(self.prefix + line)
self.base.append(''.join(texts)) | python | def write(self):
string = ''.join(self.content)
lines = string.splitlines(True)
if len(lines) == 0:
return
texts = [self.first_prefix + lines[0]]
for line in lines[1:]:
if line.strip() == '': # avoid prefix for empty lines
texts.append('\n')
else:
texts.append(self.prefix + line)
self.base.append(''.join(texts)) | [
"def",
"write",
"(",
"self",
")",
":",
"string",
"=",
"''",
".",
"join",
"(",
"self",
".",
"content",
")",
"lines",
"=",
"string",
".",
"splitlines",
"(",
"True",
")",
"if",
"len",
"(",
"lines",
")",
"==",
"0",
":",
"return",
"texts",
"=",
"[",
"self",
".",
"first_prefix",
"+",
"lines",
"[",
"0",
"]",
"]",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"''",
":",
"# avoid prefix for empty lines",
"texts",
".",
"append",
"(",
"'\\n'",
")",
"else",
":",
"texts",
".",
"append",
"(",
"self",
".",
"prefix",
"+",
"line",
")",
"self",
".",
"base",
".",
"append",
"(",
"''",
".",
"join",
"(",
"texts",
")",
")"
] | Add ``self.contents`` with current ``prefix`` and ``first_prefix``
Add processed ``self.contents`` to ``self.base``. The first line has
``first_prefix`` prepended, further lines have ``prefix`` prepended.
Empty (all whitespace) lines get written as bare carriage returns, to
avoid ugly extra whitespace. | [
"Add",
"self",
".",
"contents",
"with",
"current",
"prefix",
"and",
"first_prefix"
] | a28f48df937d4b0e158ba453e5e1c66824299196 | https://github.com/codejamninja/sphinx-markdown-builder/blob/a28f48df937d4b0e158ba453e5e1c66824299196/sphinx_markdown_builder/doctree2md.py#L206-L225 |
3,154 | sbuss/bitmerchant | bitmerchant/wallet/bip32.py | Wallet.identifier | def identifier(self):
"""Get the identifier for this node.
Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256)
of the public key's `key`. This corresponds exactly to the data used in
traditional Bitcoin addresses. It is not advised to represent this data
in base58 format though, as it may be interpreted as an address that
way (and wallet software is not required to accept payment to the chain
key itself).
"""
key = self.get_public_key_hex()
return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key))))) | python | def identifier(self):
key = self.get_public_key_hex()
return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key))))) | [
"def",
"identifier",
"(",
"self",
")",
":",
"key",
"=",
"self",
".",
"get_public_key_hex",
"(",
")",
"return",
"ensure_bytes",
"(",
"hexlify",
"(",
"hash160",
"(",
"unhexlify",
"(",
"ensure_bytes",
"(",
"key",
")",
")",
")",
")",
")"
] | Get the identifier for this node.
Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256)
of the public key's `key`. This corresponds exactly to the data used in
traditional Bitcoin addresses. It is not advised to represent this data
in base58 format though, as it may be interpreted as an address that
way (and wallet software is not required to accept payment to the chain
key itself). | [
"Get",
"the",
"identifier",
"for",
"this",
"node",
"."
] | 901de06489805c396a922f955eeef2da04734e3e | https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L153-L164 |
3,155 | sbuss/bitmerchant | bitmerchant/wallet/bip32.py | Wallet.get_child | def get_child(self, child_number, is_prime=None, as_private=True):
"""Derive a child key.
:param child_number: The number of the child key to compute
:type child_number: int
:param is_prime: If True, the child is calculated via private
derivation. If False, then public derivation is used. If None,
then it is figured out from the value of child_number.
:type is_prime: bool, defaults to None
:param as_private: If True, strips private key from the result.
Defaults to False. If there is no private key present, this is
ignored.
:type as_private: bool
Positive child_numbers (>= 0, < 2,147,483,648) produce publicly
derived children. (prime=False)
Negative numbers (> -2,147,483,648, < 0) use private derivation.
(prime=True)
NOTE: Python can't do -0, so if you want the privately derived 0th
child you need to manually set is_prime=True.
NOTE: negative numbered children are provided as a convenience
because nobody wants to remember the above numbers. Negative numbers
are considered 'prime children', which is described in the BIP32 spec
as a leading 1 in a 32 bit unsigned int.
This derivation is fully described at
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8
"""
boundary = 0x80000000
# Note: If this boundary check gets removed, then children above
# the boundary should use private (prime) derivation.
if abs(child_number) >= boundary:
raise ValueError("Invalid child number %s" % child_number)
# If is_prime isn't set, then we can infer it from the child_number
if is_prime is None:
# Prime children are either < 0 or > 0x80000000
if child_number < 0:
child_number = abs(child_number)
is_prime = True
else:
is_prime = False
else:
# Otherwise is_prime is set so the child_number should be between
# 0 and 0x80000000
if child_number < 0 or child_number >= boundary:
raise ValueError(
"Invalid child number. Must be between 0 and %s" %
boundary)
if not self.private_key and is_prime:
raise ValueError(
"Cannot compute a prime child without a private key")
if is_prime:
# Even though we take child_number as an int < boundary, the
# internal derivation needs it to be the larger number.
child_number = child_number + boundary
child_number_hex = long_to_hex(child_number, 8)
if is_prime:
# Let data = concat(0x00, self.key, child_number)
data = b'00' + self.private_key.get_key()
else:
data = self.get_public_key_hex()
data += child_number_hex
# Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code
# as the seed, and data as the message.
I = hmac.new(
unhexlify(ensure_bytes(self.chain_code)),
msg=unhexlify(ensure_bytes(data)),
digestmod=sha512).digest()
# Split I into its 32 Byte components.
I_L, I_R = I[:32], I[32:]
if long_or_int(hexlify(I_L), 16) >= SECP256k1.order:
raise InvalidPrivateKeyError("The derived key is too large.")
c_i = hexlify(I_R)
private_exponent = None
public_pair = None
if self.private_key:
# Use private information for derivation
# I_L is added to the current key's secret exponent (mod n), where
# n is the order of the ECDSA curve in use.
private_exponent = (
(long_or_int(hexlify(I_L), 16) +
long_or_int(self.private_key.get_key(), 16))
% SECP256k1.order)
# I_R is the child's chain code
else:
# Only use public information for this derivation
g = SECP256k1.generator
I_L_long = long_or_int(hexlify(I_L), 16)
point = (_ECDSA_Public_key(g, g * I_L_long).point +
self.public_key.to_point())
# I_R is the child's chain code
public_pair = PublicPair(point.x(), point.y())
child = self.__class__(
chain_code=c_i,
depth=self.depth + 1, # we have to go deeper...
parent_fingerprint=self.fingerprint,
child_number=child_number_hex,
private_exponent=private_exponent,
public_pair=public_pair,
network=self.network)
if child.public_key.to_point() == INFINITY:
raise InfinityPointException("The point at infinity is invalid.")
if not as_private:
return child.public_copy()
return child | python | def get_child(self, child_number, is_prime=None, as_private=True):
boundary = 0x80000000
# Note: If this boundary check gets removed, then children above
# the boundary should use private (prime) derivation.
if abs(child_number) >= boundary:
raise ValueError("Invalid child number %s" % child_number)
# If is_prime isn't set, then we can infer it from the child_number
if is_prime is None:
# Prime children are either < 0 or > 0x80000000
if child_number < 0:
child_number = abs(child_number)
is_prime = True
else:
is_prime = False
else:
# Otherwise is_prime is set so the child_number should be between
# 0 and 0x80000000
if child_number < 0 or child_number >= boundary:
raise ValueError(
"Invalid child number. Must be between 0 and %s" %
boundary)
if not self.private_key and is_prime:
raise ValueError(
"Cannot compute a prime child without a private key")
if is_prime:
# Even though we take child_number as an int < boundary, the
# internal derivation needs it to be the larger number.
child_number = child_number + boundary
child_number_hex = long_to_hex(child_number, 8)
if is_prime:
# Let data = concat(0x00, self.key, child_number)
data = b'00' + self.private_key.get_key()
else:
data = self.get_public_key_hex()
data += child_number_hex
# Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code
# as the seed, and data as the message.
I = hmac.new(
unhexlify(ensure_bytes(self.chain_code)),
msg=unhexlify(ensure_bytes(data)),
digestmod=sha512).digest()
# Split I into its 32 Byte components.
I_L, I_R = I[:32], I[32:]
if long_or_int(hexlify(I_L), 16) >= SECP256k1.order:
raise InvalidPrivateKeyError("The derived key is too large.")
c_i = hexlify(I_R)
private_exponent = None
public_pair = None
if self.private_key:
# Use private information for derivation
# I_L is added to the current key's secret exponent (mod n), where
# n is the order of the ECDSA curve in use.
private_exponent = (
(long_or_int(hexlify(I_L), 16) +
long_or_int(self.private_key.get_key(), 16))
% SECP256k1.order)
# I_R is the child's chain code
else:
# Only use public information for this derivation
g = SECP256k1.generator
I_L_long = long_or_int(hexlify(I_L), 16)
point = (_ECDSA_Public_key(g, g * I_L_long).point +
self.public_key.to_point())
# I_R is the child's chain code
public_pair = PublicPair(point.x(), point.y())
child = self.__class__(
chain_code=c_i,
depth=self.depth + 1, # we have to go deeper...
parent_fingerprint=self.fingerprint,
child_number=child_number_hex,
private_exponent=private_exponent,
public_pair=public_pair,
network=self.network)
if child.public_key.to_point() == INFINITY:
raise InfinityPointException("The point at infinity is invalid.")
if not as_private:
return child.public_copy()
return child | [
"def",
"get_child",
"(",
"self",
",",
"child_number",
",",
"is_prime",
"=",
"None",
",",
"as_private",
"=",
"True",
")",
":",
"boundary",
"=",
"0x80000000",
"# Note: If this boundary check gets removed, then children above",
"# the boundary should use private (prime) derivation.",
"if",
"abs",
"(",
"child_number",
")",
">=",
"boundary",
":",
"raise",
"ValueError",
"(",
"\"Invalid child number %s\"",
"%",
"child_number",
")",
"# If is_prime isn't set, then we can infer it from the child_number",
"if",
"is_prime",
"is",
"None",
":",
"# Prime children are either < 0 or > 0x80000000",
"if",
"child_number",
"<",
"0",
":",
"child_number",
"=",
"abs",
"(",
"child_number",
")",
"is_prime",
"=",
"True",
"else",
":",
"is_prime",
"=",
"False",
"else",
":",
"# Otherwise is_prime is set so the child_number should be between",
"# 0 and 0x80000000",
"if",
"child_number",
"<",
"0",
"or",
"child_number",
">=",
"boundary",
":",
"raise",
"ValueError",
"(",
"\"Invalid child number. Must be between 0 and %s\"",
"%",
"boundary",
")",
"if",
"not",
"self",
".",
"private_key",
"and",
"is_prime",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute a prime child without a private key\"",
")",
"if",
"is_prime",
":",
"# Even though we take child_number as an int < boundary, the",
"# internal derivation needs it to be the larger number.",
"child_number",
"=",
"child_number",
"+",
"boundary",
"child_number_hex",
"=",
"long_to_hex",
"(",
"child_number",
",",
"8",
")",
"if",
"is_prime",
":",
"# Let data = concat(0x00, self.key, child_number)",
"data",
"=",
"b'00'",
"+",
"self",
".",
"private_key",
".",
"get_key",
"(",
")",
"else",
":",
"data",
"=",
"self",
".",
"get_public_key_hex",
"(",
")",
"data",
"+=",
"child_number_hex",
"# Compute a 64 Byte I that is the HMAC-SHA512, using self.chain_code",
"# as the seed, and data as the message.",
"I",
"=",
"hmac",
".",
"new",
"(",
"unhexlify",
"(",
"ensure_bytes",
"(",
"self",
".",
"chain_code",
")",
")",
",",
"msg",
"=",
"unhexlify",
"(",
"ensure_bytes",
"(",
"data",
")",
")",
",",
"digestmod",
"=",
"sha512",
")",
".",
"digest",
"(",
")",
"# Split I into its 32 Byte components.",
"I_L",
",",
"I_R",
"=",
"I",
"[",
":",
"32",
"]",
",",
"I",
"[",
"32",
":",
"]",
"if",
"long_or_int",
"(",
"hexlify",
"(",
"I_L",
")",
",",
"16",
")",
">=",
"SECP256k1",
".",
"order",
":",
"raise",
"InvalidPrivateKeyError",
"(",
"\"The derived key is too large.\"",
")",
"c_i",
"=",
"hexlify",
"(",
"I_R",
")",
"private_exponent",
"=",
"None",
"public_pair",
"=",
"None",
"if",
"self",
".",
"private_key",
":",
"# Use private information for derivation",
"# I_L is added to the current key's secret exponent (mod n), where",
"# n is the order of the ECDSA curve in use.",
"private_exponent",
"=",
"(",
"(",
"long_or_int",
"(",
"hexlify",
"(",
"I_L",
")",
",",
"16",
")",
"+",
"long_or_int",
"(",
"self",
".",
"private_key",
".",
"get_key",
"(",
")",
",",
"16",
")",
")",
"%",
"SECP256k1",
".",
"order",
")",
"# I_R is the child's chain code",
"else",
":",
"# Only use public information for this derivation",
"g",
"=",
"SECP256k1",
".",
"generator",
"I_L_long",
"=",
"long_or_int",
"(",
"hexlify",
"(",
"I_L",
")",
",",
"16",
")",
"point",
"=",
"(",
"_ECDSA_Public_key",
"(",
"g",
",",
"g",
"*",
"I_L_long",
")",
".",
"point",
"+",
"self",
".",
"public_key",
".",
"to_point",
"(",
")",
")",
"# I_R is the child's chain code",
"public_pair",
"=",
"PublicPair",
"(",
"point",
".",
"x",
"(",
")",
",",
"point",
".",
"y",
"(",
")",
")",
"child",
"=",
"self",
".",
"__class__",
"(",
"chain_code",
"=",
"c_i",
",",
"depth",
"=",
"self",
".",
"depth",
"+",
"1",
",",
"# we have to go deeper...",
"parent_fingerprint",
"=",
"self",
".",
"fingerprint",
",",
"child_number",
"=",
"child_number_hex",
",",
"private_exponent",
"=",
"private_exponent",
",",
"public_pair",
"=",
"public_pair",
",",
"network",
"=",
"self",
".",
"network",
")",
"if",
"child",
".",
"public_key",
".",
"to_point",
"(",
")",
"==",
"INFINITY",
":",
"raise",
"InfinityPointException",
"(",
"\"The point at infinity is invalid.\"",
")",
"if",
"not",
"as_private",
":",
"return",
"child",
".",
"public_copy",
"(",
")",
"return",
"child"
] | Derive a child key.
:param child_number: The number of the child key to compute
:type child_number: int
:param is_prime: If True, the child is calculated via private
derivation. If False, then public derivation is used. If None,
then it is figured out from the value of child_number.
:type is_prime: bool, defaults to None
:param as_private: If True, strips private key from the result.
Defaults to False. If there is no private key present, this is
ignored.
:type as_private: bool
Positive child_numbers (>= 0, < 2,147,483,648) produce publicly
derived children. (prime=False)
Negative numbers (> -2,147,483,648, < 0) use private derivation.
(prime=True)
NOTE: Python can't do -0, so if you want the privately derived 0th
child you need to manually set is_prime=True.
NOTE: negative numbered children are provided as a convenience
because nobody wants to remember the above numbers. Negative numbers
are considered 'prime children', which is described in the BIP32 spec
as a leading 1 in a 32 bit unsigned int.
This derivation is fully described at
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8 | [
"Derive",
"a",
"child",
"key",
"."
] | 901de06489805c396a922f955eeef2da04734e3e | https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L247-L363 |
3,156 | sbuss/bitmerchant | bitmerchant/wallet/bip32.py | Wallet.crack_private_key | def crack_private_key(self, child_private_key):
"""Crack the parent private key given a child private key.
BIP32 has a vulnerability/feature that allows you to recover the
master private key if you're given a master public key and any of its
publicly-derived child private keys. This is a pretty serious security
vulnerability that looks as innocuous as this:
>>> w = Wallet.new_random_wallet()
>>> child = w.get_child(0, is_prime=False)
>>> w_pub = w.public_copy()
>>> assert w_pub.private_key is None
>>> master_public_key = w_pub.serialize_b58(private=False)
>>> # Now you put master_public_key on your website
>>> # and give somebody a private key
>>> public_master = Wallet.deserialize(master_public_key)
>>> cracked_private_master = public_master.crack_private_key(child)
>>> assert w == cracked_private_master # :(
Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8
"""
if self.private_key:
raise AssertionError("You already know the private key")
if child_private_key.parent_fingerprint != self.fingerprint:
raise ValueError("This is not a valid child")
if child_private_key.child_number >= 0x80000000:
raise ValueError(
"Cannot crack private keys from private derivation")
# Duplicate the public child derivation
child_number_hex = long_to_hex(child_private_key.child_number, 8)
data = self.get_public_key_hex() + child_number_hex
I = hmac.new(
unhexlify(ensure_bytes(self.chain_code)),
msg=unhexlify(ensure_bytes(data)),
digestmod=sha512).digest()
I_L, I_R = I[:32], I[32:]
# Public derivation is the same as private derivation plus some offset
# knowing the child's private key allows us to find this offset just
# by subtracting the child's private key from the parent I_L data
privkey = PrivateKey(long_or_int(hexlify(I_L), 16),
network=self.network)
parent_private_key = child_private_key.private_key - privkey
return self.__class__(
chain_code=self.chain_code,
depth=self.depth,
parent_fingerprint=self.parent_fingerprint,
child_number=self.child_number,
private_key=parent_private_key,
network=self.network) | python | def crack_private_key(self, child_private_key):
if self.private_key:
raise AssertionError("You already know the private key")
if child_private_key.parent_fingerprint != self.fingerprint:
raise ValueError("This is not a valid child")
if child_private_key.child_number >= 0x80000000:
raise ValueError(
"Cannot crack private keys from private derivation")
# Duplicate the public child derivation
child_number_hex = long_to_hex(child_private_key.child_number, 8)
data = self.get_public_key_hex() + child_number_hex
I = hmac.new(
unhexlify(ensure_bytes(self.chain_code)),
msg=unhexlify(ensure_bytes(data)),
digestmod=sha512).digest()
I_L, I_R = I[:32], I[32:]
# Public derivation is the same as private derivation plus some offset
# knowing the child's private key allows us to find this offset just
# by subtracting the child's private key from the parent I_L data
privkey = PrivateKey(long_or_int(hexlify(I_L), 16),
network=self.network)
parent_private_key = child_private_key.private_key - privkey
return self.__class__(
chain_code=self.chain_code,
depth=self.depth,
parent_fingerprint=self.parent_fingerprint,
child_number=self.child_number,
private_key=parent_private_key,
network=self.network) | [
"def",
"crack_private_key",
"(",
"self",
",",
"child_private_key",
")",
":",
"if",
"self",
".",
"private_key",
":",
"raise",
"AssertionError",
"(",
"\"You already know the private key\"",
")",
"if",
"child_private_key",
".",
"parent_fingerprint",
"!=",
"self",
".",
"fingerprint",
":",
"raise",
"ValueError",
"(",
"\"This is not a valid child\"",
")",
"if",
"child_private_key",
".",
"child_number",
">=",
"0x80000000",
":",
"raise",
"ValueError",
"(",
"\"Cannot crack private keys from private derivation\"",
")",
"# Duplicate the public child derivation",
"child_number_hex",
"=",
"long_to_hex",
"(",
"child_private_key",
".",
"child_number",
",",
"8",
")",
"data",
"=",
"self",
".",
"get_public_key_hex",
"(",
")",
"+",
"child_number_hex",
"I",
"=",
"hmac",
".",
"new",
"(",
"unhexlify",
"(",
"ensure_bytes",
"(",
"self",
".",
"chain_code",
")",
")",
",",
"msg",
"=",
"unhexlify",
"(",
"ensure_bytes",
"(",
"data",
")",
")",
",",
"digestmod",
"=",
"sha512",
")",
".",
"digest",
"(",
")",
"I_L",
",",
"I_R",
"=",
"I",
"[",
":",
"32",
"]",
",",
"I",
"[",
"32",
":",
"]",
"# Public derivation is the same as private derivation plus some offset",
"# knowing the child's private key allows us to find this offset just",
"# by subtracting the child's private key from the parent I_L data",
"privkey",
"=",
"PrivateKey",
"(",
"long_or_int",
"(",
"hexlify",
"(",
"I_L",
")",
",",
"16",
")",
",",
"network",
"=",
"self",
".",
"network",
")",
"parent_private_key",
"=",
"child_private_key",
".",
"private_key",
"-",
"privkey",
"return",
"self",
".",
"__class__",
"(",
"chain_code",
"=",
"self",
".",
"chain_code",
",",
"depth",
"=",
"self",
".",
"depth",
",",
"parent_fingerprint",
"=",
"self",
".",
"parent_fingerprint",
",",
"child_number",
"=",
"self",
".",
"child_number",
",",
"private_key",
"=",
"parent_private_key",
",",
"network",
"=",
"self",
".",
"network",
")"
] | Crack the parent private key given a child private key.
BIP32 has a vulnerability/feature that allows you to recover the
master private key if you're given a master public key and any of its
publicly-derived child private keys. This is a pretty serious security
vulnerability that looks as innocuous as this:
>>> w = Wallet.new_random_wallet()
>>> child = w.get_child(0, is_prime=False)
>>> w_pub = w.public_copy()
>>> assert w_pub.private_key is None
>>> master_public_key = w_pub.serialize_b58(private=False)
>>> # Now you put master_public_key on your website
>>> # and give somebody a private key
>>> public_master = Wallet.deserialize(master_public_key)
>>> cracked_private_master = public_master.crack_private_key(child)
>>> assert w == cracked_private_master # :(
Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8 | [
"Crack",
"the",
"parent",
"private",
"key",
"given",
"a",
"child",
"private",
"key",
"."
] | 901de06489805c396a922f955eeef2da04734e3e | https://github.com/sbuss/bitmerchant/blob/901de06489805c396a922f955eeef2da04734e3e/bitmerchant/wallet/bip32.py#L375-L424 |
3,157 | CloverHealth/pytest-pgsql | deploy.py | _pypi_push | def _pypi_push(dist):
"""Push created package to PyPI.
Requires the following defined environment variables:
- TWINE_USERNAME: The PyPI username to upload this package under
- TWINE_PASSWORD: The password to the user's account
Args:
dist (str):
The distribution to push. Must be a valid directory; shell globs are
NOT allowed.
"""
# Register all distributions and wheels with PyPI. We have to list the dist
# directory and register each file individually because `twine` doesn't
# handle globs.
for filename in os.listdir(dist):
full_path = os.path.join(dist, filename)
if os.path.isfile(full_path):
# This will fail if the project has never been uploaded, so use check=false
_shell('twine register ' + shlex.quote(full_path), check=False)
_shell('twine upload ' + shlex.quote(dist + '/*')) | python | def _pypi_push(dist):
# Register all distributions and wheels with PyPI. We have to list the dist
# directory and register each file individually because `twine` doesn't
# handle globs.
for filename in os.listdir(dist):
full_path = os.path.join(dist, filename)
if os.path.isfile(full_path):
# This will fail if the project has never been uploaded, so use check=false
_shell('twine register ' + shlex.quote(full_path), check=False)
_shell('twine upload ' + shlex.quote(dist + '/*')) | [
"def",
"_pypi_push",
"(",
"dist",
")",
":",
"# Register all distributions and wheels with PyPI. We have to list the dist",
"# directory and register each file individually because `twine` doesn't",
"# handle globs.",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"dist",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dist",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"# This will fail if the project has never been uploaded, so use check=false",
"_shell",
"(",
"'twine register '",
"+",
"shlex",
".",
"quote",
"(",
"full_path",
")",
",",
"check",
"=",
"False",
")",
"_shell",
"(",
"'twine upload '",
"+",
"shlex",
".",
"quote",
"(",
"dist",
"+",
"'/*'",
")",
")"
] | Push created package to PyPI.
Requires the following defined environment variables:
- TWINE_USERNAME: The PyPI username to upload this package under
- TWINE_PASSWORD: The password to the user's account
Args:
dist (str):
The distribution to push. Must be a valid directory; shell globs are
NOT allowed. | [
"Push",
"created",
"package",
"to",
"PyPI",
"."
] | a863ed4b652053e315dfa039d978b56f03664c07 | https://github.com/CloverHealth/pytest-pgsql/blob/a863ed4b652053e315dfa039d978b56f03664c07/deploy.py#L19-L40 |
3,158 | CloverHealth/pytest-pgsql | deploy.py | deploy | def deploy(target):
"""Deploys the package and documentation.
Proceeds in the following steps:
1. Ensures proper environment variables are set and checks that we are on Circle CI
2. Tags the repository with the new version
3. Creates a standard distribution and a wheel
4. Updates version.py to have the proper version
5. Commits the ChangeLog, AUTHORS, and version.py file
6. Pushes to PyPI
7. Pushes the tags and newly committed files
Raises:
`EnvironmentError`:
- Not running on CircleCI
- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables
are missing
- Attempting to deploy to production from a branch that isn't master
"""
# Ensure proper environment
if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover
raise EnvironmentError('Must be on CircleCI to run this script')
current_branch = os.getenv('CIRCLE_BRANCH')
if (target == 'PROD') and (current_branch != 'master'):
raise EnvironmentError(
f'Refusing to deploy to production from branch {current_branch!r}. '
f'Production deploys can only be made from master.')
if target in ('PROD', 'TEST'):
pypi_username = os.getenv(f'{target}_PYPI_USERNAME')
pypi_password = os.getenv(f'{target}_PYPI_PASSWORD')
else:
raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.")
if not (pypi_username and pypi_password): # pragma: no cover
raise EnvironmentError(
f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' "
f"environment variables. These are required to push to PyPI.")
# Twine requires these environment variables to be set. Subprocesses will
# inherit these when we invoke them, so no need to pass them on the command
# line. We want to avoid that in case something's logging each command run.
os.environ['TWINE_USERNAME'] = pypi_username
os.environ['TWINE_PASSWORD'] = pypi_password
# Set up git on circle to push to the current branch
_shell('git config --global user.email "[email protected]"')
_shell('git config --global user.name "Circle CI"')
_shell('git config push.default current')
# Obtain the version to deploy
ret = _shell('make version', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
print(f'Deploying version {version!r}...')
# Tag the version
_shell(f'git tag -f -a {version} -m "Version {version}"')
# Update the version
_shell(
f'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py')
# Create a standard distribution and a wheel
_shell('python setup.py sdist bdist_wheel')
# Add the updated ChangeLog and AUTHORS
_shell('git add ChangeLog AUTHORS */version.py')
# Start the commit message with "Merge" so that PBR will ignore it in the
# ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy.
_shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"')
# Push the distributions to PyPI.
_pypi_push('dist')
# Push the tag and AUTHORS / ChangeLog after successful PyPI deploy
_shell('git push --follow-tags')
print(f'Deployment complete. Latest version is {version}.') | python | def deploy(target):
# Ensure proper environment
if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover
raise EnvironmentError('Must be on CircleCI to run this script')
current_branch = os.getenv('CIRCLE_BRANCH')
if (target == 'PROD') and (current_branch != 'master'):
raise EnvironmentError(
f'Refusing to deploy to production from branch {current_branch!r}. '
f'Production deploys can only be made from master.')
if target in ('PROD', 'TEST'):
pypi_username = os.getenv(f'{target}_PYPI_USERNAME')
pypi_password = os.getenv(f'{target}_PYPI_PASSWORD')
else:
raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.")
if not (pypi_username and pypi_password): # pragma: no cover
raise EnvironmentError(
f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' "
f"environment variables. These are required to push to PyPI.")
# Twine requires these environment variables to be set. Subprocesses will
# inherit these when we invoke them, so no need to pass them on the command
# line. We want to avoid that in case something's logging each command run.
os.environ['TWINE_USERNAME'] = pypi_username
os.environ['TWINE_PASSWORD'] = pypi_password
# Set up git on circle to push to the current branch
_shell('git config --global user.email "[email protected]"')
_shell('git config --global user.name "Circle CI"')
_shell('git config push.default current')
# Obtain the version to deploy
ret = _shell('make version', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
print(f'Deploying version {version!r}...')
# Tag the version
_shell(f'git tag -f -a {version} -m "Version {version}"')
# Update the version
_shell(
f'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py')
# Create a standard distribution and a wheel
_shell('python setup.py sdist bdist_wheel')
# Add the updated ChangeLog and AUTHORS
_shell('git add ChangeLog AUTHORS */version.py')
# Start the commit message with "Merge" so that PBR will ignore it in the
# ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy.
_shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"')
# Push the distributions to PyPI.
_pypi_push('dist')
# Push the tag and AUTHORS / ChangeLog after successful PyPI deploy
_shell('git push --follow-tags')
print(f'Deployment complete. Latest version is {version}.') | [
"def",
"deploy",
"(",
"target",
")",
":",
"# Ensure proper environment",
"if",
"not",
"os",
".",
"getenv",
"(",
"CIRCLECI_ENV_VAR",
")",
":",
"# pragma: no cover",
"raise",
"EnvironmentError",
"(",
"'Must be on CircleCI to run this script'",
")",
"current_branch",
"=",
"os",
".",
"getenv",
"(",
"'CIRCLE_BRANCH'",
")",
"if",
"(",
"target",
"==",
"'PROD'",
")",
"and",
"(",
"current_branch",
"!=",
"'master'",
")",
":",
"raise",
"EnvironmentError",
"(",
"f'Refusing to deploy to production from branch {current_branch!r}. '",
"f'Production deploys can only be made from master.'",
")",
"if",
"target",
"in",
"(",
"'PROD'",
",",
"'TEST'",
")",
":",
"pypi_username",
"=",
"os",
".",
"getenv",
"(",
"f'{target}_PYPI_USERNAME'",
")",
"pypi_password",
"=",
"os",
".",
"getenv",
"(",
"f'{target}_PYPI_PASSWORD'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Deploy target must be 'PROD' or 'TEST', got {target!r}.\"",
")",
"if",
"not",
"(",
"pypi_username",
"and",
"pypi_password",
")",
":",
"# pragma: no cover",
"raise",
"EnvironmentError",
"(",
"f\"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' \"",
"f\"environment variables. These are required to push to PyPI.\"",
")",
"# Twine requires these environment variables to be set. Subprocesses will",
"# inherit these when we invoke them, so no need to pass them on the command",
"# line. We want to avoid that in case something's logging each command run.",
"os",
".",
"environ",
"[",
"'TWINE_USERNAME'",
"]",
"=",
"pypi_username",
"os",
".",
"environ",
"[",
"'TWINE_PASSWORD'",
"]",
"=",
"pypi_password",
"# Set up git on circle to push to the current branch",
"_shell",
"(",
"'git config --global user.email \"[email protected]\"'",
")",
"_shell",
"(",
"'git config --global user.name \"Circle CI\"'",
")",
"_shell",
"(",
"'git config push.default current'",
")",
"# Obtain the version to deploy",
"ret",
"=",
"_shell",
"(",
"'make version'",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"version",
"=",
"ret",
".",
"stdout",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"print",
"(",
"f'Deploying version {version!r}...'",
")",
"# Tag the version",
"_shell",
"(",
"f'git tag -f -a {version} -m \"Version {version}\"'",
")",
"# Update the version",
"_shell",
"(",
"f'sed -i.bak \"s/^__version__ = .*/__version__ = {version!r}/\" */version.py'",
")",
"# Create a standard distribution and a wheel",
"_shell",
"(",
"'python setup.py sdist bdist_wheel'",
")",
"# Add the updated ChangeLog and AUTHORS",
"_shell",
"(",
"'git add ChangeLog AUTHORS */version.py'",
")",
"# Start the commit message with \"Merge\" so that PBR will ignore it in the",
"# ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy.",
"_shell",
"(",
"'git commit --no-verify -m \"Merge autogenerated files [skip ci]\"'",
")",
"# Push the distributions to PyPI.",
"_pypi_push",
"(",
"'dist'",
")",
"# Push the tag and AUTHORS / ChangeLog after successful PyPI deploy",
"_shell",
"(",
"'git push --follow-tags'",
")",
"print",
"(",
"f'Deployment complete. Latest version is {version}.'",
")"
] | Deploys the package and documentation.
Proceeds in the following steps:
1. Ensures proper environment variables are set and checks that we are on Circle CI
2. Tags the repository with the new version
3. Creates a standard distribution and a wheel
4. Updates version.py to have the proper version
5. Commits the ChangeLog, AUTHORS, and version.py file
6. Pushes to PyPI
7. Pushes the tags and newly committed files
Raises:
`EnvironmentError`:
- Not running on CircleCI
- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables
are missing
- Attempting to deploy to production from a branch that isn't master | [
"Deploys",
"the",
"package",
"and",
"documentation",
"."
] | a863ed4b652053e315dfa039d978b56f03664c07 | https://github.com/CloverHealth/pytest-pgsql/blob/a863ed4b652053e315dfa039d978b56f03664c07/deploy.py#L43-L124 |
3,159 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | _get_triplet | def _get_triplet(dd):
"""Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse)
"""
return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse)) | python | def _get_triplet(dd):
return _s(dd['q']), _s(dd.get('r', NoResponse)), _s(dd.get('e', NoResponse)) | [
"def",
"_get_triplet",
"(",
"dd",
")",
":",
"return",
"_s",
"(",
"dd",
"[",
"'q'",
"]",
")",
",",
"_s",
"(",
"dd",
".",
"get",
"(",
"'r'",
",",
"NoResponse",
")",
")",
",",
"_s",
"(",
"dd",
".",
"get",
"(",
"'e'",
",",
"NoResponse",
")",
")"
] | Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse) | [
"Return",
"a",
"triplet",
"from",
"a",
"dialogue",
"dictionary",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L71-L79 |
3,160 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | _load | def _load(content_or_fp):
"""YAML Parse a file or str and check version.
"""
try:
data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader)
except Exception as e:
raise type(e)('Malformed yaml file:\n%r' % format_exc())
try:
ver = data['spec']
except:
raise ValueError('The file does not specify a spec version')
try:
ver = tuple(map(int, (ver.split("."))))
except:
raise ValueError("Invalid spec version format. Expect 'X.Y'"
" (X and Y integers), found %s" % ver)
if ver > SPEC_VERSION_TUPLE:
raise ValueError('The spec version of the file is '
'%s but the parser is %s. '
'Please update pyvisa-sim.' % (ver, SPEC_VERSION))
return data | python | def _load(content_or_fp):
try:
data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader)
except Exception as e:
raise type(e)('Malformed yaml file:\n%r' % format_exc())
try:
ver = data['spec']
except:
raise ValueError('The file does not specify a spec version')
try:
ver = tuple(map(int, (ver.split("."))))
except:
raise ValueError("Invalid spec version format. Expect 'X.Y'"
" (X and Y integers), found %s" % ver)
if ver > SPEC_VERSION_TUPLE:
raise ValueError('The spec version of the file is '
'%s but the parser is %s. '
'Please update pyvisa-sim.' % (ver, SPEC_VERSION))
return data | [
"def",
"_load",
"(",
"content_or_fp",
")",
":",
"try",
":",
"data",
"=",
"yaml",
".",
"load",
"(",
"content_or_fp",
",",
"Loader",
"=",
"yaml",
".",
"loader",
".",
"BaseLoader",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"type",
"(",
"e",
")",
"(",
"'Malformed yaml file:\\n%r'",
"%",
"format_exc",
"(",
")",
")",
"try",
":",
"ver",
"=",
"data",
"[",
"'spec'",
"]",
"except",
":",
"raise",
"ValueError",
"(",
"'The file does not specify a spec version'",
")",
"try",
":",
"ver",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"(",
"ver",
".",
"split",
"(",
"\".\"",
")",
")",
")",
")",
"except",
":",
"raise",
"ValueError",
"(",
"\"Invalid spec version format. Expect 'X.Y'\"",
"\" (X and Y integers), found %s\"",
"%",
"ver",
")",
"if",
"ver",
">",
"SPEC_VERSION_TUPLE",
":",
"raise",
"ValueError",
"(",
"'The spec version of the file is '",
"'%s but the parser is %s. '",
"'Please update pyvisa-sim.'",
"%",
"(",
"ver",
",",
"SPEC_VERSION",
")",
")",
"return",
"data"
] | YAML Parse a file or str and check version. | [
"YAML",
"Parse",
"a",
"file",
"or",
"str",
"and",
"check",
"version",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L82-L106 |
3,161 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | parse_resource | def parse_resource(name):
"""Parse a resource file
"""
with closing(pkg_resources.resource_stream(__name__, name)) as fp:
rbytes = fp.read()
return _load(StringIO(rbytes.decode('utf-8'))) | python | def parse_resource(name):
with closing(pkg_resources.resource_stream(__name__, name)) as fp:
rbytes = fp.read()
return _load(StringIO(rbytes.decode('utf-8'))) | [
"def",
"parse_resource",
"(",
"name",
")",
":",
"with",
"closing",
"(",
"pkg_resources",
".",
"resource_stream",
"(",
"__name__",
",",
"name",
")",
")",
"as",
"fp",
":",
"rbytes",
"=",
"fp",
".",
"read",
"(",
")",
"return",
"_load",
"(",
"StringIO",
"(",
"rbytes",
".",
"decode",
"(",
"'utf-8'",
")",
")",
")"
] | Parse a resource file | [
"Parse",
"a",
"resource",
"file"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L109-L115 |
3,162 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | update_component | def update_component(name, comp, component_dict):
"""Get a component from a component dict.
"""
for dia in component_dict.get('dialogues', ()):
try:
comp.add_dialogue(*_get_pair(dia))
except Exception as e:
msg = 'In device %s, malformed dialogue %s\n%r'
raise Exception(msg % (name, dia, e))
for prop_name, prop_dict in component_dict.get('properties', {}).items():
try:
getter = (_get_pair(prop_dict['getter'])
if 'getter' in prop_dict else None)
setter = (_get_triplet(prop_dict['setter'])
if 'setter' in prop_dict else None)
comp.add_property(prop_name, prop_dict.get('default', ''),
getter, setter, prop_dict.get('specs', {}))
except Exception as e:
msg = 'In device %s, malformed property %s\n%r'
raise type(e)(msg % (name, prop_name, format_exc())) | python | def update_component(name, comp, component_dict):
for dia in component_dict.get('dialogues', ()):
try:
comp.add_dialogue(*_get_pair(dia))
except Exception as e:
msg = 'In device %s, malformed dialogue %s\n%r'
raise Exception(msg % (name, dia, e))
for prop_name, prop_dict in component_dict.get('properties', {}).items():
try:
getter = (_get_pair(prop_dict['getter'])
if 'getter' in prop_dict else None)
setter = (_get_triplet(prop_dict['setter'])
if 'setter' in prop_dict else None)
comp.add_property(prop_name, prop_dict.get('default', ''),
getter, setter, prop_dict.get('specs', {}))
except Exception as e:
msg = 'In device %s, malformed property %s\n%r'
raise type(e)(msg % (name, prop_name, format_exc())) | [
"def",
"update_component",
"(",
"name",
",",
"comp",
",",
"component_dict",
")",
":",
"for",
"dia",
"in",
"component_dict",
".",
"get",
"(",
"'dialogues'",
",",
"(",
")",
")",
":",
"try",
":",
"comp",
".",
"add_dialogue",
"(",
"*",
"_get_pair",
"(",
"dia",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'In device %s, malformed dialogue %s\\n%r'",
"raise",
"Exception",
"(",
"msg",
"%",
"(",
"name",
",",
"dia",
",",
"e",
")",
")",
"for",
"prop_name",
",",
"prop_dict",
"in",
"component_dict",
".",
"get",
"(",
"'properties'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"try",
":",
"getter",
"=",
"(",
"_get_pair",
"(",
"prop_dict",
"[",
"'getter'",
"]",
")",
"if",
"'getter'",
"in",
"prop_dict",
"else",
"None",
")",
"setter",
"=",
"(",
"_get_triplet",
"(",
"prop_dict",
"[",
"'setter'",
"]",
")",
"if",
"'setter'",
"in",
"prop_dict",
"else",
"None",
")",
"comp",
".",
"add_property",
"(",
"prop_name",
",",
"prop_dict",
".",
"get",
"(",
"'default'",
",",
"''",
")",
",",
"getter",
",",
"setter",
",",
"prop_dict",
".",
"get",
"(",
"'specs'",
",",
"{",
"}",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"msg",
"=",
"'In device %s, malformed property %s\\n%r'",
"raise",
"type",
"(",
"e",
")",
"(",
"msg",
"%",
"(",
"name",
",",
"prop_name",
",",
"format_exc",
"(",
")",
")",
")"
] | Get a component from a component dict. | [
"Get",
"a",
"component",
"from",
"a",
"component",
"dict",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L126-L147 |
3,163 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | get_bases | def get_bases(definition_dict, loader):
"""Collect dependencies.
"""
bases = definition_dict.get('bases', ())
if bases:
bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0],
**b)
for b in bases)
return SimpleChainmap(definition_dict, *bases)
else:
return definition_dict | python | def get_bases(definition_dict, loader):
bases = definition_dict.get('bases', ())
if bases:
bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0],
**b)
for b in bases)
return SimpleChainmap(definition_dict, *bases)
else:
return definition_dict | [
"def",
"get_bases",
"(",
"definition_dict",
",",
"loader",
")",
":",
"bases",
"=",
"definition_dict",
".",
"get",
"(",
"'bases'",
",",
"(",
")",
")",
"if",
"bases",
":",
"bases",
"=",
"(",
"loader",
".",
"get_comp_dict",
"(",
"required_version",
"=",
"SPEC_VERSION_TUPLE",
"[",
"0",
"]",
",",
"*",
"*",
"b",
")",
"for",
"b",
"in",
"bases",
")",
"return",
"SimpleChainmap",
"(",
"definition_dict",
",",
"*",
"bases",
")",
"else",
":",
"return",
"definition_dict"
] | Collect dependencies. | [
"Collect",
"dependencies",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L150-L161 |
3,164 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | get_channel | def get_channel(device, ch_name, channel_dict, loader, resource_dict):
"""Get a channels from a channels dictionary.
:param name: name of the device
:param device_dict: device dictionary
:rtype: Device
"""
channel_dict = get_bases(channel_dict, loader)
r_ids = resource_dict.get('channel_ids', {}).get(ch_name, [])
ids = r_ids if r_ids else channel_dict.get('ids', {})
can_select = False if channel_dict.get('can_select') == 'False' else True
channels = Channels(device, ids, can_select)
update_component(ch_name, channels, channel_dict)
return channels | python | def get_channel(device, ch_name, channel_dict, loader, resource_dict):
channel_dict = get_bases(channel_dict, loader)
r_ids = resource_dict.get('channel_ids', {}).get(ch_name, [])
ids = r_ids if r_ids else channel_dict.get('ids', {})
can_select = False if channel_dict.get('can_select') == 'False' else True
channels = Channels(device, ids, can_select)
update_component(ch_name, channels, channel_dict)
return channels | [
"def",
"get_channel",
"(",
"device",
",",
"ch_name",
",",
"channel_dict",
",",
"loader",
",",
"resource_dict",
")",
":",
"channel_dict",
"=",
"get_bases",
"(",
"channel_dict",
",",
"loader",
")",
"r_ids",
"=",
"resource_dict",
".",
"get",
"(",
"'channel_ids'",
",",
"{",
"}",
")",
".",
"get",
"(",
"ch_name",
",",
"[",
"]",
")",
"ids",
"=",
"r_ids",
"if",
"r_ids",
"else",
"channel_dict",
".",
"get",
"(",
"'ids'",
",",
"{",
"}",
")",
"can_select",
"=",
"False",
"if",
"channel_dict",
".",
"get",
"(",
"'can_select'",
")",
"==",
"'False'",
"else",
"True",
"channels",
"=",
"Channels",
"(",
"device",
",",
"ids",
",",
"can_select",
")",
"update_component",
"(",
"ch_name",
",",
"channels",
",",
"channel_dict",
")",
"return",
"channels"
] | Get a channels from a channels dictionary.
:param name: name of the device
:param device_dict: device dictionary
:rtype: Device | [
"Get",
"a",
"channels",
"from",
"a",
"channels",
"dictionary",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L164-L181 |
3,165 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | get_device | def get_device(name, device_dict, loader, resource_dict):
"""Get a device from a device dictionary.
:param name: name of the device
:param device_dict: device dictionary
:rtype: Device
"""
device = Device(name, device_dict.get('delimiter', ';').encode('utf-8'))
device_dict = get_bases(device_dict, loader)
err = device_dict.get('error', {})
device.add_error_handler(err)
for itype, eom_dict in device_dict.get('eom', {}).items():
device.add_eom(itype, *_get_pair(eom_dict))
update_component(name, device, device_dict)
for ch_name, ch_dict in device_dict.get('channels', {}).items():
device.add_channels(ch_name, get_channel(device, ch_name, ch_dict,
loader, resource_dict))
return device | python | def get_device(name, device_dict, loader, resource_dict):
device = Device(name, device_dict.get('delimiter', ';').encode('utf-8'))
device_dict = get_bases(device_dict, loader)
err = device_dict.get('error', {})
device.add_error_handler(err)
for itype, eom_dict in device_dict.get('eom', {}).items():
device.add_eom(itype, *_get_pair(eom_dict))
update_component(name, device, device_dict)
for ch_name, ch_dict in device_dict.get('channels', {}).items():
device.add_channels(ch_name, get_channel(device, ch_name, ch_dict,
loader, resource_dict))
return device | [
"def",
"get_device",
"(",
"name",
",",
"device_dict",
",",
"loader",
",",
"resource_dict",
")",
":",
"device",
"=",
"Device",
"(",
"name",
",",
"device_dict",
".",
"get",
"(",
"'delimiter'",
",",
"';'",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"device_dict",
"=",
"get_bases",
"(",
"device_dict",
",",
"loader",
")",
"err",
"=",
"device_dict",
".",
"get",
"(",
"'error'",
",",
"{",
"}",
")",
"device",
".",
"add_error_handler",
"(",
"err",
")",
"for",
"itype",
",",
"eom_dict",
"in",
"device_dict",
".",
"get",
"(",
"'eom'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"device",
".",
"add_eom",
"(",
"itype",
",",
"*",
"_get_pair",
"(",
"eom_dict",
")",
")",
"update_component",
"(",
"name",
",",
"device",
",",
"device_dict",
")",
"for",
"ch_name",
",",
"ch_dict",
"in",
"device_dict",
".",
"get",
"(",
"'channels'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"device",
".",
"add_channels",
"(",
"ch_name",
",",
"get_channel",
"(",
"device",
",",
"ch_name",
",",
"ch_dict",
",",
"loader",
",",
"resource_dict",
")",
")",
"return",
"device"
] | Get a device from a device dictionary.
:param name: name of the device
:param device_dict: device dictionary
:rtype: Device | [
"Get",
"a",
"device",
"from",
"a",
"device",
"dictionary",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L184-L207 |
3,166 | pyvisa/pyvisa-sim | pyvisa-sim/parser.py | get_devices | def get_devices(filename, bundled):
"""Get a Devices object from a file.
:param filename: full path of the file to parse or name of the resource.
:param is_resource: boolean indicating if it is a resource.
:rtype: Devices
"""
loader = Loader(filename, bundled)
data = loader.data
devices = Devices()
# Iterate through the resources and generate each individual device
# on demand.
for resource_name, resource_dict in data.get('resources', {}).items():
device_name = resource_dict['device']
dd = loader.get_device_dict(device_name,
resource_dict.get('filename', None),
resource_dict.get('bundled', False),
SPEC_VERSION_TUPLE[0])
devices.add_device(resource_name,
get_device(device_name, dd, loader, resource_dict))
return devices | python | def get_devices(filename, bundled):
loader = Loader(filename, bundled)
data = loader.data
devices = Devices()
# Iterate through the resources and generate each individual device
# on demand.
for resource_name, resource_dict in data.get('resources', {}).items():
device_name = resource_dict['device']
dd = loader.get_device_dict(device_name,
resource_dict.get('filename', None),
resource_dict.get('bundled', False),
SPEC_VERSION_TUPLE[0])
devices.add_device(resource_name,
get_device(device_name, dd, loader, resource_dict))
return devices | [
"def",
"get_devices",
"(",
"filename",
",",
"bundled",
")",
":",
"loader",
"=",
"Loader",
"(",
"filename",
",",
"bundled",
")",
"data",
"=",
"loader",
".",
"data",
"devices",
"=",
"Devices",
"(",
")",
"# Iterate through the resources and generate each individual device",
"# on demand.",
"for",
"resource_name",
",",
"resource_dict",
"in",
"data",
".",
"get",
"(",
"'resources'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"device_name",
"=",
"resource_dict",
"[",
"'device'",
"]",
"dd",
"=",
"loader",
".",
"get_device_dict",
"(",
"device_name",
",",
"resource_dict",
".",
"get",
"(",
"'filename'",
",",
"None",
")",
",",
"resource_dict",
".",
"get",
"(",
"'bundled'",
",",
"False",
")",
",",
"SPEC_VERSION_TUPLE",
"[",
"0",
"]",
")",
"devices",
".",
"add_device",
"(",
"resource_name",
",",
"get_device",
"(",
"device_name",
",",
"dd",
",",
"loader",
",",
"resource_dict",
")",
")",
"return",
"devices"
] | Get a Devices object from a file.
:param filename: full path of the file to parse or name of the resource.
:param is_resource: boolean indicating if it is a resource.
:rtype: Devices | [
"Get",
"a",
"Devices",
"object",
"from",
"a",
"file",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/parser.py#L270-L298 |
3,167 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | ChannelProperty.init_value | def init_value(self, string_value):
"""Create an empty defaultdict holding the default value.
"""
value = self.validate_value(string_value)
self._value = defaultdict(lambda: value) | python | def init_value(self, string_value):
value = self.validate_value(string_value)
self._value = defaultdict(lambda: value) | [
"def",
"init_value",
"(",
"self",
",",
"string_value",
")",
":",
"value",
"=",
"self",
".",
"validate_value",
"(",
"string_value",
")",
"self",
".",
"_value",
"=",
"defaultdict",
"(",
"lambda",
":",
"value",
")"
] | Create an empty defaultdict holding the default value. | [
"Create",
"an",
"empty",
"defaultdict",
"holding",
"the",
"default",
"value",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L30-L35 |
3,168 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | ChannelProperty.set_value | def set_value(self, string_value):
"""Set the current value for a channel.
"""
value = self.validate_value(string_value)
self._value[self._channel._selected] = value | python | def set_value(self, string_value):
value = self.validate_value(string_value)
self._value[self._channel._selected] = value | [
"def",
"set_value",
"(",
"self",
",",
"string_value",
")",
":",
"value",
"=",
"self",
".",
"validate_value",
"(",
"string_value",
")",
"self",
".",
"_value",
"[",
"self",
".",
"_channel",
".",
"_selected",
"]",
"=",
"value"
] | Set the current value for a channel. | [
"Set",
"the",
"current",
"value",
"for",
"a",
"channel",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L43-L48 |
3,169 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | Channels.add_dialogue | def add_dialogue(self, query, response):
"""Add dialogue to channel.
:param query: query string
:param response: response string
"""
self._dialogues['__default__'][to_bytes(query)] = to_bytes(response) | python | def add_dialogue(self, query, response):
self._dialogues['__default__'][to_bytes(query)] = to_bytes(response) | [
"def",
"add_dialogue",
"(",
"self",
",",
"query",
",",
"response",
")",
":",
"self",
".",
"_dialogues",
"[",
"'__default__'",
"]",
"[",
"to_bytes",
"(",
"query",
")",
"]",
"=",
"to_bytes",
"(",
"response",
")"
] | Add dialogue to channel.
:param query: query string
:param response: response string | [
"Add",
"dialogue",
"to",
"channel",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L93-L99 |
3,170 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | Channels.add_property | def add_property(self, name, default_value, getter_pair, setter_triplet,
specs):
"""Add property to channel
:param name: property name
:param default_value: default value as string
:param getter_pair: (query, response)
:param setter_triplet: (query, response, error)
:param specs: specification of the Property
"""
self._properties[name] = ChannelProperty(self, name,
default_value, specs)
if getter_pair:
query, response = getter_pair
self._getters['__default__'][to_bytes(query)] = name, response
if setter_triplet:
query, response, error = setter_triplet
self._setters.append((name,
stringparser.Parser(query),
to_bytes(response),
to_bytes(error))) | python | def add_property(self, name, default_value, getter_pair, setter_triplet,
specs):
self._properties[name] = ChannelProperty(self, name,
default_value, specs)
if getter_pair:
query, response = getter_pair
self._getters['__default__'][to_bytes(query)] = name, response
if setter_triplet:
query, response, error = setter_triplet
self._setters.append((name,
stringparser.Parser(query),
to_bytes(response),
to_bytes(error))) | [
"def",
"add_property",
"(",
"self",
",",
"name",
",",
"default_value",
",",
"getter_pair",
",",
"setter_triplet",
",",
"specs",
")",
":",
"self",
".",
"_properties",
"[",
"name",
"]",
"=",
"ChannelProperty",
"(",
"self",
",",
"name",
",",
"default_value",
",",
"specs",
")",
"if",
"getter_pair",
":",
"query",
",",
"response",
"=",
"getter_pair",
"self",
".",
"_getters",
"[",
"'__default__'",
"]",
"[",
"to_bytes",
"(",
"query",
")",
"]",
"=",
"name",
",",
"response",
"if",
"setter_triplet",
":",
"query",
",",
"response",
",",
"error",
"=",
"setter_triplet",
"self",
".",
"_setters",
".",
"append",
"(",
"(",
"name",
",",
"stringparser",
".",
"Parser",
"(",
"query",
")",
",",
"to_bytes",
"(",
"response",
")",
",",
"to_bytes",
"(",
"error",
")",
")",
")"
] | Add property to channel
:param name: property name
:param default_value: default value as string
:param getter_pair: (query, response)
:param setter_triplet: (query, response, error)
:param specs: specification of the Property | [
"Add",
"property",
"to",
"channel"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L101-L123 |
3,171 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | Channels.match | def match(self, query):
"""Try to find a match for a query in the channel commands.
"""
if not self.can_select:
ch_id = self._device._properties['selected_channel'].get_value()
if ch_id in self._ids:
self._selected = ch_id
else:
return
response = self._match_dialog(query,
self._dialogues['__default__'])
if response is not None:
return response
response = self._match_getters(query,
self._getters['__default__'])
if response is not None:
return response
else:
for ch_id in self._ids:
self._selected = ch_id
response = self._match_dialog(query,
self._dialogues[ch_id])
if response is not None:
return response
response = self._match_getters(query,
self._getters[ch_id])
if response is not None:
return response
return self._match_setters(query) | python | def match(self, query):
if not self.can_select:
ch_id = self._device._properties['selected_channel'].get_value()
if ch_id in self._ids:
self._selected = ch_id
else:
return
response = self._match_dialog(query,
self._dialogues['__default__'])
if response is not None:
return response
response = self._match_getters(query,
self._getters['__default__'])
if response is not None:
return response
else:
for ch_id in self._ids:
self._selected = ch_id
response = self._match_dialog(query,
self._dialogues[ch_id])
if response is not None:
return response
response = self._match_getters(query,
self._getters[ch_id])
if response is not None:
return response
return self._match_setters(query) | [
"def",
"match",
"(",
"self",
",",
"query",
")",
":",
"if",
"not",
"self",
".",
"can_select",
":",
"ch_id",
"=",
"self",
".",
"_device",
".",
"_properties",
"[",
"'selected_channel'",
"]",
".",
"get_value",
"(",
")",
"if",
"ch_id",
"in",
"self",
".",
"_ids",
":",
"self",
".",
"_selected",
"=",
"ch_id",
"else",
":",
"return",
"response",
"=",
"self",
".",
"_match_dialog",
"(",
"query",
",",
"self",
".",
"_dialogues",
"[",
"'__default__'",
"]",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_getters",
"(",
"query",
",",
"self",
".",
"_getters",
"[",
"'__default__'",
"]",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"else",
":",
"for",
"ch_id",
"in",
"self",
".",
"_ids",
":",
"self",
".",
"_selected",
"=",
"ch_id",
"response",
"=",
"self",
".",
"_match_dialog",
"(",
"query",
",",
"self",
".",
"_dialogues",
"[",
"ch_id",
"]",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_getters",
"(",
"query",
",",
"self",
".",
"_getters",
"[",
"ch_id",
"]",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"return",
"self",
".",
"_match_setters",
"(",
"query",
")"
] | Try to find a match for a query in the channel commands. | [
"Try",
"to",
"find",
"a",
"match",
"for",
"a",
"query",
"in",
"the",
"channel",
"commands",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L125-L160 |
3,172 | pyvisa/pyvisa-sim | pyvisa-sim/channels.py | Channels._match_setters | def _match_setters(self, query):
"""Try to find a match
"""
q = query.decode('utf-8')
for name, parser, response, error_response in self._setters:
try:
parsed = parser(q)
logger.debug('Found response in setter of %s' % name)
except ValueError:
continue
try:
if isinstance(parsed, dict) and 'ch_id' in parsed:
self._selected = parsed['ch_id']
self._properties[name].set_value(parsed['0'])
else:
self._properties[name].set_value(parsed)
return response
except ValueError:
if isinstance(error_response, bytes):
return error_response
return self._device.error_response('command_error')
return None | python | def _match_setters(self, query):
q = query.decode('utf-8')
for name, parser, response, error_response in self._setters:
try:
parsed = parser(q)
logger.debug('Found response in setter of %s' % name)
except ValueError:
continue
try:
if isinstance(parsed, dict) and 'ch_id' in parsed:
self._selected = parsed['ch_id']
self._properties[name].set_value(parsed['0'])
else:
self._properties[name].set_value(parsed)
return response
except ValueError:
if isinstance(error_response, bytes):
return error_response
return self._device.error_response('command_error')
return None | [
"def",
"_match_setters",
"(",
"self",
",",
"query",
")",
":",
"q",
"=",
"query",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"name",
",",
"parser",
",",
"response",
",",
"error_response",
"in",
"self",
".",
"_setters",
":",
"try",
":",
"parsed",
"=",
"parser",
"(",
"q",
")",
"logger",
".",
"debug",
"(",
"'Found response in setter of %s'",
"%",
"name",
")",
"except",
"ValueError",
":",
"continue",
"try",
":",
"if",
"isinstance",
"(",
"parsed",
",",
"dict",
")",
"and",
"'ch_id'",
"in",
"parsed",
":",
"self",
".",
"_selected",
"=",
"parsed",
"[",
"'ch_id'",
"]",
"self",
".",
"_properties",
"[",
"name",
"]",
".",
"set_value",
"(",
"parsed",
"[",
"'0'",
"]",
")",
"else",
":",
"self",
".",
"_properties",
"[",
"name",
"]",
".",
"set_value",
"(",
"parsed",
")",
"return",
"response",
"except",
"ValueError",
":",
"if",
"isinstance",
"(",
"error_response",
",",
"bytes",
")",
":",
"return",
"error_response",
"return",
"self",
".",
"_device",
".",
"error_response",
"(",
"'command_error'",
")",
"return",
"None"
] | Try to find a match | [
"Try",
"to",
"find",
"a",
"match"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/channels.py#L162-L185 |
3,173 | pyvisa/pyvisa-sim | pyvisa-sim/sessions.py | Session.get_session_class | def get_session_class(cls, interface_type, resource_class):
"""Return the session class for a given interface type and resource class.
:type interface_type: constants.InterfaceType
:type resource_class: str
:return: Session
"""
try:
return cls._session_classes[(interface_type, resource_class)]
except KeyError:
raise ValueError('No class registered for %s, %s' % (interface_type, resource_class)) | python | def get_session_class(cls, interface_type, resource_class):
try:
return cls._session_classes[(interface_type, resource_class)]
except KeyError:
raise ValueError('No class registered for %s, %s' % (interface_type, resource_class)) | [
"def",
"get_session_class",
"(",
"cls",
",",
"interface_type",
",",
"resource_class",
")",
":",
"try",
":",
"return",
"cls",
".",
"_session_classes",
"[",
"(",
"interface_type",
",",
"resource_class",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'No class registered for %s, %s'",
"%",
"(",
"interface_type",
",",
"resource_class",
")",
")"
] | Return the session class for a given interface type and resource class.
:type interface_type: constants.InterfaceType
:type resource_class: str
:return: Session | [
"Return",
"the",
"session",
"class",
"for",
"a",
"given",
"interface",
"type",
"and",
"resource",
"class",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L42-L52 |
3,174 | pyvisa/pyvisa-sim | pyvisa-sim/sessions.py | Session.register | def register(cls, interface_type, resource_class):
"""Register a session class for a given interface type and resource class.
:type interface_type: constants.InterfaceType
:type resource_class: str
"""
def _internal(python_class):
if (interface_type, resource_class) in cls._session_classes:
logger.warning('%s is already registered in the ResourceManager. '
'Overwriting with %s' % ((interface_type, resource_class), python_class))
python_class.session_type = (interface_type, resource_class)
cls._session_classes[(interface_type, resource_class)] = python_class
return python_class
return _internal | python | def register(cls, interface_type, resource_class):
def _internal(python_class):
if (interface_type, resource_class) in cls._session_classes:
logger.warning('%s is already registered in the ResourceManager. '
'Overwriting with %s' % ((interface_type, resource_class), python_class))
python_class.session_type = (interface_type, resource_class)
cls._session_classes[(interface_type, resource_class)] = python_class
return python_class
return _internal | [
"def",
"register",
"(",
"cls",
",",
"interface_type",
",",
"resource_class",
")",
":",
"def",
"_internal",
"(",
"python_class",
")",
":",
"if",
"(",
"interface_type",
",",
"resource_class",
")",
"in",
"cls",
".",
"_session_classes",
":",
"logger",
".",
"warning",
"(",
"'%s is already registered in the ResourceManager. '",
"'Overwriting with %s'",
"%",
"(",
"(",
"interface_type",
",",
"resource_class",
")",
",",
"python_class",
")",
")",
"python_class",
".",
"session_type",
"=",
"(",
"interface_type",
",",
"resource_class",
")",
"cls",
".",
"_session_classes",
"[",
"(",
"interface_type",
",",
"resource_class",
")",
"]",
"=",
"python_class",
"return",
"python_class",
"return",
"_internal"
] | Register a session class for a given interface type and resource class.
:type interface_type: constants.InterfaceType
:type resource_class: str | [
"Register",
"a",
"session",
"class",
"for",
"a",
"given",
"interface",
"type",
"and",
"resource",
"class",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/sessions.py#L55-L69 |
3,175 | pyvisa/pyvisa-sim | pyvisa-sim/component.py | to_bytes | def to_bytes(val):
"""Takes a text message and return a tuple
"""
if val is NoResponse:
return val
val = val.replace('\\r', '\r').replace('\\n', '\n')
return val.encode() | python | def to_bytes(val):
if val is NoResponse:
return val
val = val.replace('\\r', '\r').replace('\\n', '\n')
return val.encode() | [
"def",
"to_bytes",
"(",
"val",
")",
":",
"if",
"val",
"is",
"NoResponse",
":",
"return",
"val",
"val",
"=",
"val",
".",
"replace",
"(",
"'\\\\r'",
",",
"'\\r'",
")",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"return",
"val",
".",
"encode",
"(",
")"
] | Takes a text message and return a tuple | [
"Takes",
"a",
"text",
"message",
"and",
"return",
"a",
"tuple"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L16-L22 |
3,176 | pyvisa/pyvisa-sim | pyvisa-sim/component.py | Property.validate_value | def validate_value(self, string_value):
"""Validate that a value match the Property specs.
"""
specs = self.specs
if 'type' in specs:
value = specs['type'](string_value)
else:
value = string_value
if 'min' in specs and value < specs['min']:
raise ValueError
if 'max' in specs and value > specs['max']:
raise ValueError
if 'valid' in specs and value not in specs['valid']:
raise ValueError
return value | python | def validate_value(self, string_value):
specs = self.specs
if 'type' in specs:
value = specs['type'](string_value)
else:
value = string_value
if 'min' in specs and value < specs['min']:
raise ValueError
if 'max' in specs and value > specs['max']:
raise ValueError
if 'valid' in specs and value not in specs['valid']:
raise ValueError
return value | [
"def",
"validate_value",
"(",
"self",
",",
"string_value",
")",
":",
"specs",
"=",
"self",
".",
"specs",
"if",
"'type'",
"in",
"specs",
":",
"value",
"=",
"specs",
"[",
"'type'",
"]",
"(",
"string_value",
")",
"else",
":",
"value",
"=",
"string_value",
"if",
"'min'",
"in",
"specs",
"and",
"value",
"<",
"specs",
"[",
"'min'",
"]",
":",
"raise",
"ValueError",
"if",
"'max'",
"in",
"specs",
"and",
"value",
">",
"specs",
"[",
"'max'",
"]",
":",
"raise",
"ValueError",
"if",
"'valid'",
"in",
"specs",
"and",
"value",
"not",
"in",
"specs",
"[",
"'valid'",
"]",
":",
"raise",
"ValueError",
"return",
"value"
] | Validate that a value match the Property specs. | [
"Validate",
"that",
"a",
"value",
"match",
"the",
"Property",
"specs",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L77-L92 |
3,177 | pyvisa/pyvisa-sim | pyvisa-sim/component.py | Component._match_dialog | def _match_dialog(self, query, dialogues=None):
"""Tries to match in dialogues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if dialogues is None:
dialogues = self._dialogues
# Try to match in the queries
if query in dialogues:
response = dialogues[query]
logger.debug('Found response in queries: %s' % repr(response))
return response | python | def _match_dialog(self, query, dialogues=None):
if dialogues is None:
dialogues = self._dialogues
# Try to match in the queries
if query in dialogues:
response = dialogues[query]
logger.debug('Found response in queries: %s' % repr(response))
return response | [
"def",
"_match_dialog",
"(",
"self",
",",
"query",
",",
"dialogues",
"=",
"None",
")",
":",
"if",
"dialogues",
"is",
"None",
":",
"dialogues",
"=",
"self",
".",
"_dialogues",
"# Try to match in the queries",
"if",
"query",
"in",
"dialogues",
":",
"response",
"=",
"dialogues",
"[",
"query",
"]",
"logger",
".",
"debug",
"(",
"'Found response in queries: %s'",
"%",
"repr",
"(",
"response",
")",
")",
"return",
"response"
] | Tries to match in dialogues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"dialogues"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L158-L174 |
3,178 | pyvisa/pyvisa-sim | pyvisa-sim/component.py | Component._match_getters | def _match_getters(self, query, getters=None):
"""Tries to match in getters
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if getters is None:
getters = self._getters
if query in getters:
name, response = getters[query]
logger.debug('Found response in getter of %s' % name)
response = response.format(self._properties[name].get_value())
return response.encode('utf-8') | python | def _match_getters(self, query, getters=None):
if getters is None:
getters = self._getters
if query in getters:
name, response = getters[query]
logger.debug('Found response in getter of %s' % name)
response = response.format(self._properties[name].get_value())
return response.encode('utf-8') | [
"def",
"_match_getters",
"(",
"self",
",",
"query",
",",
"getters",
"=",
"None",
")",
":",
"if",
"getters",
"is",
"None",
":",
"getters",
"=",
"self",
".",
"_getters",
"if",
"query",
"in",
"getters",
":",
"name",
",",
"response",
"=",
"getters",
"[",
"query",
"]",
"logger",
".",
"debug",
"(",
"'Found response in getter of %s'",
"%",
"name",
")",
"response",
"=",
"response",
".",
"format",
"(",
"self",
".",
"_properties",
"[",
"name",
"]",
".",
"get_value",
"(",
")",
")",
"return",
"response",
".",
"encode",
"(",
"'utf-8'",
")"
] | Tries to match in getters
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"getters"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L176-L191 |
3,179 | pyvisa/pyvisa-sim | pyvisa-sim/component.py | Component._match_setters | def _match_setters(self, query):
"""Tries to match in setters
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
q = query.decode('utf-8')
for name, parser, response, error_response in self._setters:
try:
value = parser(q)
logger.debug('Found response in setter of %s' % name)
except ValueError:
continue
try:
self._properties[name].set_value(value)
return response
except ValueError:
if isinstance(error_response, bytes):
return error_response
return self.error_response('command_error')
return None | python | def _match_setters(self, query):
q = query.decode('utf-8')
for name, parser, response, error_response in self._setters:
try:
value = parser(q)
logger.debug('Found response in setter of %s' % name)
except ValueError:
continue
try:
self._properties[name].set_value(value)
return response
except ValueError:
if isinstance(error_response, bytes):
return error_response
return self.error_response('command_error')
return None | [
"def",
"_match_setters",
"(",
"self",
",",
"query",
")",
":",
"q",
"=",
"query",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"name",
",",
"parser",
",",
"response",
",",
"error_response",
"in",
"self",
".",
"_setters",
":",
"try",
":",
"value",
"=",
"parser",
"(",
"q",
")",
"logger",
".",
"debug",
"(",
"'Found response in setter of %s'",
"%",
"name",
")",
"except",
"ValueError",
":",
"continue",
"try",
":",
"self",
".",
"_properties",
"[",
"name",
"]",
".",
"set_value",
"(",
"value",
")",
"return",
"response",
"except",
"ValueError",
":",
"if",
"isinstance",
"(",
"error_response",
",",
"bytes",
")",
":",
"return",
"error_response",
"return",
"self",
".",
"error_response",
"(",
"'command_error'",
")",
"return",
"None"
] | Tries to match in setters
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"setters"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/component.py#L193-L217 |
3,180 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device.add_error_handler | def add_error_handler(self, error_input):
"""Add error handler to the device
"""
if isinstance(error_input, dict):
error_response = error_input.get('response', {})
cerr = error_response.get('command_error', NoResponse)
qerr = error_response.get('query_error', NoResponse)
response_dict = {'command_error': cerr,
'query_error': qerr}
register_list = error_input.get('status_register', [])
for register_dict in register_list:
query = register_dict['q']
register = StatusRegister(register_dict)
self._status_registers[to_bytes(query)] = register
for key in register.keys():
self._error_map[key] = register
queue_list = error_input.get('error_queue', [])
for queue_dict in queue_list:
query = queue_dict['q']
err_queue = ErrorQueue(queue_dict)
self._error_queues[to_bytes(query)] = err_queue
else:
response_dict = {'command_error': error_input,
'query_error': error_input}
for key, value in response_dict.items():
self._error_response[key] = to_bytes(value) | python | def add_error_handler(self, error_input):
if isinstance(error_input, dict):
error_response = error_input.get('response', {})
cerr = error_response.get('command_error', NoResponse)
qerr = error_response.get('query_error', NoResponse)
response_dict = {'command_error': cerr,
'query_error': qerr}
register_list = error_input.get('status_register', [])
for register_dict in register_list:
query = register_dict['q']
register = StatusRegister(register_dict)
self._status_registers[to_bytes(query)] = register
for key in register.keys():
self._error_map[key] = register
queue_list = error_input.get('error_queue', [])
for queue_dict in queue_list:
query = queue_dict['q']
err_queue = ErrorQueue(queue_dict)
self._error_queues[to_bytes(query)] = err_queue
else:
response_dict = {'command_error': error_input,
'query_error': error_input}
for key, value in response_dict.items():
self._error_response[key] = to_bytes(value) | [
"def",
"add_error_handler",
"(",
"self",
",",
"error_input",
")",
":",
"if",
"isinstance",
"(",
"error_input",
",",
"dict",
")",
":",
"error_response",
"=",
"error_input",
".",
"get",
"(",
"'response'",
",",
"{",
"}",
")",
"cerr",
"=",
"error_response",
".",
"get",
"(",
"'command_error'",
",",
"NoResponse",
")",
"qerr",
"=",
"error_response",
".",
"get",
"(",
"'query_error'",
",",
"NoResponse",
")",
"response_dict",
"=",
"{",
"'command_error'",
":",
"cerr",
",",
"'query_error'",
":",
"qerr",
"}",
"register_list",
"=",
"error_input",
".",
"get",
"(",
"'status_register'",
",",
"[",
"]",
")",
"for",
"register_dict",
"in",
"register_list",
":",
"query",
"=",
"register_dict",
"[",
"'q'",
"]",
"register",
"=",
"StatusRegister",
"(",
"register_dict",
")",
"self",
".",
"_status_registers",
"[",
"to_bytes",
"(",
"query",
")",
"]",
"=",
"register",
"for",
"key",
"in",
"register",
".",
"keys",
"(",
")",
":",
"self",
".",
"_error_map",
"[",
"key",
"]",
"=",
"register",
"queue_list",
"=",
"error_input",
".",
"get",
"(",
"'error_queue'",
",",
"[",
"]",
")",
"for",
"queue_dict",
"in",
"queue_list",
":",
"query",
"=",
"queue_dict",
"[",
"'q'",
"]",
"err_queue",
"=",
"ErrorQueue",
"(",
"queue_dict",
")",
"self",
".",
"_error_queues",
"[",
"to_bytes",
"(",
"query",
")",
"]",
"=",
"err_queue",
"else",
":",
"response_dict",
"=",
"{",
"'command_error'",
":",
"error_input",
",",
"'query_error'",
":",
"error_input",
"}",
"for",
"key",
",",
"value",
"in",
"response_dict",
".",
"items",
"(",
")",
":",
"self",
".",
"_error_response",
"[",
"key",
"]",
"=",
"to_bytes",
"(",
"value",
")"
] | Add error handler to the device | [
"Add",
"error",
"handler",
"to",
"the",
"device"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L156-L189 |
3,181 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device.add_eom | def add_eom(self, type_class, query_termination, response_termination):
"""Add default end of message for a given interface type and resource class.
:param type_class: interface type and resource class as strings joined by space
:param query_termination: end of message used in queries.
:param response_termination: end of message used in responses.
"""
interface_type, resource_class = type_class.split(' ')
interface_type = getattr(constants.InterfaceType,
interface_type.lower())
self._eoms[(interface_type,
resource_class)] = (to_bytes(query_termination),
to_bytes(response_termination)) | python | def add_eom(self, type_class, query_termination, response_termination):
interface_type, resource_class = type_class.split(' ')
interface_type = getattr(constants.InterfaceType,
interface_type.lower())
self._eoms[(interface_type,
resource_class)] = (to_bytes(query_termination),
to_bytes(response_termination)) | [
"def",
"add_eom",
"(",
"self",
",",
"type_class",
",",
"query_termination",
",",
"response_termination",
")",
":",
"interface_type",
",",
"resource_class",
"=",
"type_class",
".",
"split",
"(",
"' '",
")",
"interface_type",
"=",
"getattr",
"(",
"constants",
".",
"InterfaceType",
",",
"interface_type",
".",
"lower",
"(",
")",
")",
"self",
".",
"_eoms",
"[",
"(",
"interface_type",
",",
"resource_class",
")",
"]",
"=",
"(",
"to_bytes",
"(",
"query_termination",
")",
",",
"to_bytes",
"(",
"response_termination",
")",
")"
] | Add default end of message for a given interface type and resource class.
:param type_class: interface type and resource class as strings joined by space
:param query_termination: end of message used in queries.
:param response_termination: end of message used in responses. | [
"Add",
"default",
"end",
"of",
"message",
"for",
"a",
"given",
"interface",
"type",
"and",
"resource",
"class",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L200-L212 |
3,182 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device.write | def write(self, data):
"""Write data into the device input buffer.
:param data: single element byte
:type data: bytes
"""
logger.debug('Writing into device input buffer: %r' % data)
if not isinstance(data, bytes):
raise TypeError('data must be an instance of bytes')
if len(data) != 1:
msg = 'data must have a length of 1, not %d'
raise ValueError(msg % len(data))
self._input_buffer.extend(data)
l = len(self._query_eom)
if not self._input_buffer.endswith(self._query_eom):
return
try:
message = bytes(self._input_buffer[:-l])
queries = (message.split(self.delimiter) if self.delimiter
else [message])
for query in queries:
response = self._match(query)
eom = self._response_eom
if response is None:
response = self.error_response('command_error')
if response is not NoResponse:
self._output_buffer.extend(response)
self._output_buffer.extend(eom)
finally:
self._input_buffer = bytearray() | python | def write(self, data):
logger.debug('Writing into device input buffer: %r' % data)
if not isinstance(data, bytes):
raise TypeError('data must be an instance of bytes')
if len(data) != 1:
msg = 'data must have a length of 1, not %d'
raise ValueError(msg % len(data))
self._input_buffer.extend(data)
l = len(self._query_eom)
if not self._input_buffer.endswith(self._query_eom):
return
try:
message = bytes(self._input_buffer[:-l])
queries = (message.split(self.delimiter) if self.delimiter
else [message])
for query in queries:
response = self._match(query)
eom = self._response_eom
if response is None:
response = self.error_response('command_error')
if response is not NoResponse:
self._output_buffer.extend(response)
self._output_buffer.extend(eom)
finally:
self._input_buffer = bytearray() | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"logger",
".",
"debug",
"(",
"'Writing into device input buffer: %r'",
"%",
"data",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"'data must be an instance of bytes'",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"1",
":",
"msg",
"=",
"'data must have a length of 1, not %d'",
"raise",
"ValueError",
"(",
"msg",
"%",
"len",
"(",
"data",
")",
")",
"self",
".",
"_input_buffer",
".",
"extend",
"(",
"data",
")",
"l",
"=",
"len",
"(",
"self",
".",
"_query_eom",
")",
"if",
"not",
"self",
".",
"_input_buffer",
".",
"endswith",
"(",
"self",
".",
"_query_eom",
")",
":",
"return",
"try",
":",
"message",
"=",
"bytes",
"(",
"self",
".",
"_input_buffer",
"[",
":",
"-",
"l",
"]",
")",
"queries",
"=",
"(",
"message",
".",
"split",
"(",
"self",
".",
"delimiter",
")",
"if",
"self",
".",
"delimiter",
"else",
"[",
"message",
"]",
")",
"for",
"query",
"in",
"queries",
":",
"response",
"=",
"self",
".",
"_match",
"(",
"query",
")",
"eom",
"=",
"self",
".",
"_response_eom",
"if",
"response",
"is",
"None",
":",
"response",
"=",
"self",
".",
"error_response",
"(",
"'command_error'",
")",
"if",
"response",
"is",
"not",
"NoResponse",
":",
"self",
".",
"_output_buffer",
".",
"extend",
"(",
"response",
")",
"self",
".",
"_output_buffer",
".",
"extend",
"(",
"eom",
")",
"finally",
":",
"self",
".",
"_input_buffer",
"=",
"bytearray",
"(",
")"
] | Write data into the device input buffer.
:param data: single element byte
:type data: bytes | [
"Write",
"data",
"into",
"the",
"device",
"input",
"buffer",
"."
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L214-L250 |
3,183 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device.read | def read(self):
"""Return a single byte from the output buffer
"""
if self._output_buffer:
b, self._output_buffer = (self._output_buffer[0:1],
self._output_buffer[1:])
return b
return b'' | python | def read(self):
if self._output_buffer:
b, self._output_buffer = (self._output_buffer[0:1],
self._output_buffer[1:])
return b
return b'' | [
"def",
"read",
"(",
"self",
")",
":",
"if",
"self",
".",
"_output_buffer",
":",
"b",
",",
"self",
".",
"_output_buffer",
"=",
"(",
"self",
".",
"_output_buffer",
"[",
"0",
":",
"1",
"]",
",",
"self",
".",
"_output_buffer",
"[",
"1",
":",
"]",
")",
"return",
"b",
"return",
"b''"
] | Return a single byte from the output buffer | [
"Return",
"a",
"single",
"byte",
"from",
"the",
"output",
"buffer"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L252-L260 |
3,184 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device._match | def _match(self, query):
"""Tries to match in dialogues, getters and setters and subcomponents
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
response = self._match_dialog(query)
if response is not None:
return response
response = self._match_getters(query)
if response is not None:
return response
response = self._match_registers(query)
if response is not None:
return response
response = self._match_errors_queues(query)
if response is not None:
return response
response = self._match_setters(query)
if response is not None:
return response
if response is None:
for channel in self._channels.values():
response = channel.match(query)
if response:
return response
return None | python | def _match(self, query):
response = self._match_dialog(query)
if response is not None:
return response
response = self._match_getters(query)
if response is not None:
return response
response = self._match_registers(query)
if response is not None:
return response
response = self._match_errors_queues(query)
if response is not None:
return response
response = self._match_setters(query)
if response is not None:
return response
if response is None:
for channel in self._channels.values():
response = channel.match(query)
if response:
return response
return None | [
"def",
"_match",
"(",
"self",
",",
"query",
")",
":",
"response",
"=",
"self",
".",
"_match_dialog",
"(",
"query",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_getters",
"(",
"query",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_registers",
"(",
"query",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_errors_queues",
"(",
"query",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"response",
"=",
"self",
".",
"_match_setters",
"(",
"query",
")",
"if",
"response",
"is",
"not",
"None",
":",
"return",
"response",
"if",
"response",
"is",
"None",
":",
"for",
"channel",
"in",
"self",
".",
"_channels",
".",
"values",
"(",
")",
":",
"response",
"=",
"channel",
".",
"match",
"(",
"query",
")",
"if",
"response",
":",
"return",
"response",
"return",
"None"
] | Tries to match in dialogues, getters and setters and subcomponents
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"dialogues",
"getters",
"and",
"setters",
"and",
"subcomponents"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L262-L296 |
3,185 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device._match_registers | def _match_registers(self, query):
"""Tries to match in status registers
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if query in self._status_registers:
register = self._status_registers[query]
response = register.value
logger.debug('Found response in status register: %s',
repr(response))
register.clear()
return response | python | def _match_registers(self, query):
if query in self._status_registers:
register = self._status_registers[query]
response = register.value
logger.debug('Found response in status register: %s',
repr(response))
register.clear()
return response | [
"def",
"_match_registers",
"(",
"self",
",",
"query",
")",
":",
"if",
"query",
"in",
"self",
".",
"_status_registers",
":",
"register",
"=",
"self",
".",
"_status_registers",
"[",
"query",
"]",
"response",
"=",
"register",
".",
"value",
"logger",
".",
"debug",
"(",
"'Found response in status register: %s'",
",",
"repr",
"(",
"response",
")",
")",
"register",
".",
"clear",
"(",
")",
"return",
"response"
] | Tries to match in status registers
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"status",
"registers"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L298-L313 |
3,186 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Device._match_errors_queues | def _match_errors_queues(self, query):
"""Tries to match in error queues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None
"""
if query in self._error_queues:
queue = self._error_queues[query]
response = queue.value
logger.debug('Found response in error queue: %s',
repr(response))
return response | python | def _match_errors_queues(self, query):
if query in self._error_queues:
queue = self._error_queues[query]
response = queue.value
logger.debug('Found response in error queue: %s',
repr(response))
return response | [
"def",
"_match_errors_queues",
"(",
"self",
",",
"query",
")",
":",
"if",
"query",
"in",
"self",
".",
"_error_queues",
":",
"queue",
"=",
"self",
".",
"_error_queues",
"[",
"query",
"]",
"response",
"=",
"queue",
".",
"value",
"logger",
".",
"debug",
"(",
"'Found response in error queue: %s'",
",",
"repr",
"(",
"response",
")",
")",
"return",
"response"
] | Tries to match in error queues
:param query: message tuple
:type query: Tuple[bytes]
:return: response if found or None
:rtype: Tuple[bytes] | None | [
"Tries",
"to",
"match",
"in",
"error",
"queues"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L315-L329 |
3,187 | pyvisa/pyvisa-sim | pyvisa-sim/devices.py | Devices.add_device | def add_device(self, resource_name, device):
"""Bind device to resource name
"""
if device.resource_name is not None:
msg = 'The device %r is already assigned to %s'
raise ValueError(msg % (device, device.resource_name))
device.resource_name = resource_name
self._internal[device.resource_name] = device | python | def add_device(self, resource_name, device):
if device.resource_name is not None:
msg = 'The device %r is already assigned to %s'
raise ValueError(msg % (device, device.resource_name))
device.resource_name = resource_name
self._internal[device.resource_name] = device | [
"def",
"add_device",
"(",
"self",
",",
"resource_name",
",",
"device",
")",
":",
"if",
"device",
".",
"resource_name",
"is",
"not",
"None",
":",
"msg",
"=",
"'The device %r is already assigned to %s'",
"raise",
"ValueError",
"(",
"msg",
"%",
"(",
"device",
",",
"device",
".",
"resource_name",
")",
")",
"device",
".",
"resource_name",
"=",
"resource_name",
"self",
".",
"_internal",
"[",
"device",
".",
"resource_name",
"]",
"=",
"device"
] | Bind device to resource name | [
"Bind",
"device",
"to",
"resource",
"name"
] | 9836166b6b57c165fc63a276f87fe81f106a4e26 | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L343-L353 |
3,188 | percipient/django-querysetsequence | queryset_sequence/pagination.py | SequenceCursorPagination.get_ordering | def get_ordering(self, *args, **kwargs):
"""Take whatever the expected ordering is and then first order by QuerySet."""
result = super(SequenceCursorPagination, self).get_ordering(*args, **kwargs)
# Because paginate_queryset sets self.ordering after reading it...we
# need to only modify it sometimes. (This allows re-use of the
# paginator, which probably only happens in tests.)
if result[0] != '#':
result = ('#', ) + result
return result | python | def get_ordering(self, *args, **kwargs):
result = super(SequenceCursorPagination, self).get_ordering(*args, **kwargs)
# Because paginate_queryset sets self.ordering after reading it...we
# need to only modify it sometimes. (This allows re-use of the
# paginator, which probably only happens in tests.)
if result[0] != '#':
result = ('#', ) + result
return result | [
"def",
"get_ordering",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"super",
"(",
"SequenceCursorPagination",
",",
"self",
")",
".",
"get_ordering",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Because paginate_queryset sets self.ordering after reading it...we",
"# need to only modify it sometimes. (This allows re-use of the",
"# paginator, which probably only happens in tests.)",
"if",
"result",
"[",
"0",
"]",
"!=",
"'#'",
":",
"result",
"=",
"(",
"'#'",
",",
")",
"+",
"result",
"return",
"result"
] | Take whatever the expected ordering is and then first order by QuerySet. | [
"Take",
"whatever",
"the",
"expected",
"ordering",
"is",
"and",
"then",
"first",
"order",
"by",
"QuerySet",
"."
] | 7bf324b08af6268821d235c18482847d7bf75eaa | https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/pagination.py#L155-L165 |
3,189 | percipient/django-querysetsequence | queryset_sequence/pagination.py | SequenceCursorPagination.decode_cursor | def decode_cursor(self, request):
"""
Given a request with a cursor, return a `Cursor` instance.
Differs from the standard CursorPagination to handle a tuple in the
position field.
"""
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
return None
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=self.offset_cutoff)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
# The difference. Don't get just the 0th entry: get all entries.
position = tokens.get('p', None)
except (TypeError, ValueError):
raise NotFound(self.invalid_cursor_message)
return Cursor(offset=offset, reverse=reverse, position=position) | python | def decode_cursor(self, request):
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
return None
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=self.offset_cutoff)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
# The difference. Don't get just the 0th entry: get all entries.
position = tokens.get('p', None)
except (TypeError, ValueError):
raise NotFound(self.invalid_cursor_message)
return Cursor(offset=offset, reverse=reverse, position=position) | [
"def",
"decode_cursor",
"(",
"self",
",",
"request",
")",
":",
"# Determine if we have a cursor, and if so then decode it.",
"encoded",
"=",
"request",
".",
"query_params",
".",
"get",
"(",
"self",
".",
"cursor_query_param",
")",
"if",
"encoded",
"is",
"None",
":",
"return",
"None",
"try",
":",
"querystring",
"=",
"b64decode",
"(",
"encoded",
".",
"encode",
"(",
"'ascii'",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"tokens",
"=",
"urlparse",
".",
"parse_qs",
"(",
"querystring",
",",
"keep_blank_values",
"=",
"True",
")",
"offset",
"=",
"tokens",
".",
"get",
"(",
"'o'",
",",
"[",
"'0'",
"]",
")",
"[",
"0",
"]",
"offset",
"=",
"_positive_int",
"(",
"offset",
",",
"cutoff",
"=",
"self",
".",
"offset_cutoff",
")",
"reverse",
"=",
"tokens",
".",
"get",
"(",
"'r'",
",",
"[",
"'0'",
"]",
")",
"[",
"0",
"]",
"reverse",
"=",
"bool",
"(",
"int",
"(",
"reverse",
")",
")",
"# The difference. Don't get just the 0th entry: get all entries.",
"position",
"=",
"tokens",
".",
"get",
"(",
"'p'",
",",
"None",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"NotFound",
"(",
"self",
".",
"invalid_cursor_message",
")",
"return",
"Cursor",
"(",
"offset",
"=",
"offset",
",",
"reverse",
"=",
"reverse",
",",
"position",
"=",
"position",
")"
] | Given a request with a cursor, return a `Cursor` instance.
Differs from the standard CursorPagination to handle a tuple in the
position field. | [
"Given",
"a",
"request",
"with",
"a",
"cursor",
"return",
"a",
"Cursor",
"instance",
"."
] | 7bf324b08af6268821d235c18482847d7bf75eaa | https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/pagination.py#L184-L211 |
3,190 | percipient/django-querysetsequence | queryset_sequence/__init__.py | multiply_iterables | def multiply_iterables(it1, it2):
"""
Element-wise iterables multiplications.
"""
assert len(it1) == len(it2),\
"Can not element-wise multiply iterables of different length."
return list(map(mul, it1, it2)) | python | def multiply_iterables(it1, it2):
assert len(it1) == len(it2),\
"Can not element-wise multiply iterables of different length."
return list(map(mul, it1, it2)) | [
"def",
"multiply_iterables",
"(",
"it1",
",",
"it2",
")",
":",
"assert",
"len",
"(",
"it1",
")",
"==",
"len",
"(",
"it2",
")",
",",
"\"Can not element-wise multiply iterables of different length.\"",
"return",
"list",
"(",
"map",
"(",
"mul",
",",
"it1",
",",
"it2",
")",
")"
] | Element-wise iterables multiplications. | [
"Element",
"-",
"wise",
"iterables",
"multiplications",
"."
] | 7bf324b08af6268821d235c18482847d7bf75eaa | https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L29-L35 |
3,191 | percipient/django-querysetsequence | queryset_sequence/__init__.py | ComparatorMixin._generate_comparator | def _generate_comparator(cls, field_names):
"""
Construct a comparator function based on the field names. The comparator
returns the first non-zero comparison value.
Inputs:
field_names (iterable of strings): The field names to sort on.
Returns:
A comparator function.
"""
# Ensure that field names is a list and not a tuple.
field_names = list(field_names)
# For fields that start with a '-', reverse the ordering of the
# comparison.
reverses = [1] * len(field_names)
for i, field_name in enumerate(field_names):
if field_name[0] == '-':
reverses[i] = -1
field_names[i] = field_name[1:]
field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names]
def comparator(i1, i2):
# Get a tuple of values for comparison.
v1 = attrgetter(*field_names)(i1)
v2 = attrgetter(*field_names)(i2)
# If there's only one arg supplied, attrgetter returns a single
# item, directly return the result in this case.
if len(field_names) == 1:
return cls._cmp(v1, v2) * reverses[0]
# Compare each field for the two items, reversing if necessary.
order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses)
try:
# The first non-zero element.
return next(dropwhile(__not__, order))
except StopIteration:
# Everything was equivalent.
return 0
return comparator | python | def _generate_comparator(cls, field_names):
# Ensure that field names is a list and not a tuple.
field_names = list(field_names)
# For fields that start with a '-', reverse the ordering of the
# comparison.
reverses = [1] * len(field_names)
for i, field_name in enumerate(field_names):
if field_name[0] == '-':
reverses[i] = -1
field_names[i] = field_name[1:]
field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names]
def comparator(i1, i2):
# Get a tuple of values for comparison.
v1 = attrgetter(*field_names)(i1)
v2 = attrgetter(*field_names)(i2)
# If there's only one arg supplied, attrgetter returns a single
# item, directly return the result in this case.
if len(field_names) == 1:
return cls._cmp(v1, v2) * reverses[0]
# Compare each field for the two items, reversing if necessary.
order = multiply_iterables(list(map(cls._cmp, v1, v2)), reverses)
try:
# The first non-zero element.
return next(dropwhile(__not__, order))
except StopIteration:
# Everything was equivalent.
return 0
return comparator | [
"def",
"_generate_comparator",
"(",
"cls",
",",
"field_names",
")",
":",
"# Ensure that field names is a list and not a tuple.",
"field_names",
"=",
"list",
"(",
"field_names",
")",
"# For fields that start with a '-', reverse the ordering of the",
"# comparison.",
"reverses",
"=",
"[",
"1",
"]",
"*",
"len",
"(",
"field_names",
")",
"for",
"i",
",",
"field_name",
"in",
"enumerate",
"(",
"field_names",
")",
":",
"if",
"field_name",
"[",
"0",
"]",
"==",
"'-'",
":",
"reverses",
"[",
"i",
"]",
"=",
"-",
"1",
"field_names",
"[",
"i",
"]",
"=",
"field_name",
"[",
"1",
":",
"]",
"field_names",
"=",
"[",
"f",
".",
"replace",
"(",
"LOOKUP_SEP",
",",
"'.'",
")",
"for",
"f",
"in",
"field_names",
"]",
"def",
"comparator",
"(",
"i1",
",",
"i2",
")",
":",
"# Get a tuple of values for comparison.",
"v1",
"=",
"attrgetter",
"(",
"*",
"field_names",
")",
"(",
"i1",
")",
"v2",
"=",
"attrgetter",
"(",
"*",
"field_names",
")",
"(",
"i2",
")",
"# If there's only one arg supplied, attrgetter returns a single",
"# item, directly return the result in this case.",
"if",
"len",
"(",
"field_names",
")",
"==",
"1",
":",
"return",
"cls",
".",
"_cmp",
"(",
"v1",
",",
"v2",
")",
"*",
"reverses",
"[",
"0",
"]",
"# Compare each field for the two items, reversing if necessary.",
"order",
"=",
"multiply_iterables",
"(",
"list",
"(",
"map",
"(",
"cls",
".",
"_cmp",
",",
"v1",
",",
"v2",
")",
")",
",",
"reverses",
")",
"try",
":",
"# The first non-zero element.",
"return",
"next",
"(",
"dropwhile",
"(",
"__not__",
",",
"order",
")",
")",
"except",
"StopIteration",
":",
"# Everything was equivalent.",
"return",
"0",
"return",
"comparator"
] | Construct a comparator function based on the field names. The comparator
returns the first non-zero comparison value.
Inputs:
field_names (iterable of strings): The field names to sort on.
Returns:
A comparator function. | [
"Construct",
"a",
"comparator",
"function",
"based",
"on",
"the",
"field",
"names",
".",
"The",
"comparator",
"returns",
"the",
"first",
"non",
"-",
"zero",
"comparison",
"value",
"."
] | 7bf324b08af6268821d235c18482847d7bf75eaa | https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L83-L127 |
3,192 | percipient/django-querysetsequence | queryset_sequence/__init__.py | QuerySetSequence._filter_or_exclude_querysets | def _filter_or_exclude_querysets(self, negate, **kwargs):
"""
Similar to QuerySet._filter_or_exclude, but run over the QuerySets in
the QuerySetSequence instead of over each QuerySet's fields.
"""
# Ensure negate is a boolean.
negate = bool(negate)
for kwarg, value in kwargs.items():
parts = kwarg.split(LOOKUP_SEP)
# Ensure this is being used to filter QuerySets.
if parts[0] != '#':
raise ValueError("Keyword '%s' is not a valid keyword to filter over, "
"it must begin with '#'." % kwarg)
# Don't allow __ multiple times.
if len(parts) > 2:
raise ValueError("Keyword '%s' must not contain multiple "
"lookup seperators." % kwarg)
# The actual lookup is the second part.
try:
lookup = parts[1]
except IndexError:
lookup = 'exact'
# Math operators that all have the same logic.
LOOKUP_TO_OPERATOR = {
'exact': eq,
'iexact': eq,
'gt': gt,
'gte': ge,
'lt': lt,
'lte': le,
}
try:
operator = LOOKUP_TO_OPERATOR[lookup]
# These expect integers, this matches the logic in
# IntegerField.get_prep_value(). (Essentially treat the '#'
# field as an IntegerField.)
if value is not None:
value = int(value)
self._queryset_idxs = filter(lambda i: operator(i, value) != negate, self._queryset_idxs)
continue
except KeyError:
# It wasn't one of the above operators, keep trying.
pass
# Some of these seem to get handled as bytes.
if lookup in ('contains', 'icontains'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: (value in six.text_type(i)) != negate, self._queryset_idxs)
elif lookup == 'in':
self._queryset_idxs = filter(lambda i: (i in value) != negate, self._queryset_idxs)
elif lookup in ('startswith', 'istartswith'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: six.text_type(i).startswith(value) != negate, self._queryset_idxs)
elif lookup in ('endswith', 'iendswith'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: six.text_type(i).endswith(value) != negate, self._queryset_idxs)
elif lookup == 'range':
# Inclusive include.
start, end = value
self._queryset_idxs = filter(lambda i: (start <= i <= end) != negate, self._queryset_idxs)
else:
# Any other field lookup is not supported, e.g. date, year, month,
# day, week_day, hour, minute, second, isnull, search, regex, and
# iregex.
raise ValueError("Unsupported lookup '%s'" % lookup)
# Convert back to a list on Python 3.
self._queryset_idxs = list(self._queryset_idxs)
# Finally, keep only the QuerySets we care about!
self._querysets = [self._querysets[i] for i in self._queryset_idxs] | python | def _filter_or_exclude_querysets(self, negate, **kwargs):
# Ensure negate is a boolean.
negate = bool(negate)
for kwarg, value in kwargs.items():
parts = kwarg.split(LOOKUP_SEP)
# Ensure this is being used to filter QuerySets.
if parts[0] != '#':
raise ValueError("Keyword '%s' is not a valid keyword to filter over, "
"it must begin with '#'." % kwarg)
# Don't allow __ multiple times.
if len(parts) > 2:
raise ValueError("Keyword '%s' must not contain multiple "
"lookup seperators." % kwarg)
# The actual lookup is the second part.
try:
lookup = parts[1]
except IndexError:
lookup = 'exact'
# Math operators that all have the same logic.
LOOKUP_TO_OPERATOR = {
'exact': eq,
'iexact': eq,
'gt': gt,
'gte': ge,
'lt': lt,
'lte': le,
}
try:
operator = LOOKUP_TO_OPERATOR[lookup]
# These expect integers, this matches the logic in
# IntegerField.get_prep_value(). (Essentially treat the '#'
# field as an IntegerField.)
if value is not None:
value = int(value)
self._queryset_idxs = filter(lambda i: operator(i, value) != negate, self._queryset_idxs)
continue
except KeyError:
# It wasn't one of the above operators, keep trying.
pass
# Some of these seem to get handled as bytes.
if lookup in ('contains', 'icontains'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: (value in six.text_type(i)) != negate, self._queryset_idxs)
elif lookup == 'in':
self._queryset_idxs = filter(lambda i: (i in value) != negate, self._queryset_idxs)
elif lookup in ('startswith', 'istartswith'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: six.text_type(i).startswith(value) != negate, self._queryset_idxs)
elif lookup in ('endswith', 'iendswith'):
value = six.text_type(value)
self._queryset_idxs = filter(lambda i: six.text_type(i).endswith(value) != negate, self._queryset_idxs)
elif lookup == 'range':
# Inclusive include.
start, end = value
self._queryset_idxs = filter(lambda i: (start <= i <= end) != negate, self._queryset_idxs)
else:
# Any other field lookup is not supported, e.g. date, year, month,
# day, week_day, hour, minute, second, isnull, search, regex, and
# iregex.
raise ValueError("Unsupported lookup '%s'" % lookup)
# Convert back to a list on Python 3.
self._queryset_idxs = list(self._queryset_idxs)
# Finally, keep only the QuerySets we care about!
self._querysets = [self._querysets[i] for i in self._queryset_idxs] | [
"def",
"_filter_or_exclude_querysets",
"(",
"self",
",",
"negate",
",",
"*",
"*",
"kwargs",
")",
":",
"# Ensure negate is a boolean.",
"negate",
"=",
"bool",
"(",
"negate",
")",
"for",
"kwarg",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"parts",
"=",
"kwarg",
".",
"split",
"(",
"LOOKUP_SEP",
")",
"# Ensure this is being used to filter QuerySets.",
"if",
"parts",
"[",
"0",
"]",
"!=",
"'#'",
":",
"raise",
"ValueError",
"(",
"\"Keyword '%s' is not a valid keyword to filter over, \"",
"\"it must begin with '#'.\"",
"%",
"kwarg",
")",
"# Don't allow __ multiple times.",
"if",
"len",
"(",
"parts",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Keyword '%s' must not contain multiple \"",
"\"lookup seperators.\"",
"%",
"kwarg",
")",
"# The actual lookup is the second part.",
"try",
":",
"lookup",
"=",
"parts",
"[",
"1",
"]",
"except",
"IndexError",
":",
"lookup",
"=",
"'exact'",
"# Math operators that all have the same logic.",
"LOOKUP_TO_OPERATOR",
"=",
"{",
"'exact'",
":",
"eq",
",",
"'iexact'",
":",
"eq",
",",
"'gt'",
":",
"gt",
",",
"'gte'",
":",
"ge",
",",
"'lt'",
":",
"lt",
",",
"'lte'",
":",
"le",
",",
"}",
"try",
":",
"operator",
"=",
"LOOKUP_TO_OPERATOR",
"[",
"lookup",
"]",
"# These expect integers, this matches the logic in",
"# IntegerField.get_prep_value(). (Essentially treat the '#'",
"# field as an IntegerField.)",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"int",
"(",
"value",
")",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"operator",
"(",
"i",
",",
"value",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"continue",
"except",
"KeyError",
":",
"# It wasn't one of the above operators, keep trying.",
"pass",
"# Some of these seem to get handled as bytes.",
"if",
"lookup",
"in",
"(",
"'contains'",
",",
"'icontains'",
")",
":",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"(",
"value",
"in",
"six",
".",
"text_type",
"(",
"i",
")",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"elif",
"lookup",
"==",
"'in'",
":",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"(",
"i",
"in",
"value",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"elif",
"lookup",
"in",
"(",
"'startswith'",
",",
"'istartswith'",
")",
":",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"six",
".",
"text_type",
"(",
"i",
")",
".",
"startswith",
"(",
"value",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"elif",
"lookup",
"in",
"(",
"'endswith'",
",",
"'iendswith'",
")",
":",
"value",
"=",
"six",
".",
"text_type",
"(",
"value",
")",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"six",
".",
"text_type",
"(",
"i",
")",
".",
"endswith",
"(",
"value",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"elif",
"lookup",
"==",
"'range'",
":",
"# Inclusive include.",
"start",
",",
"end",
"=",
"value",
"self",
".",
"_queryset_idxs",
"=",
"filter",
"(",
"lambda",
"i",
":",
"(",
"start",
"<=",
"i",
"<=",
"end",
")",
"!=",
"negate",
",",
"self",
".",
"_queryset_idxs",
")",
"else",
":",
"# Any other field lookup is not supported, e.g. date, year, month,",
"# day, week_day, hour, minute, second, isnull, search, regex, and",
"# iregex.",
"raise",
"ValueError",
"(",
"\"Unsupported lookup '%s'\"",
"%",
"lookup",
")",
"# Convert back to a list on Python 3.",
"self",
".",
"_queryset_idxs",
"=",
"list",
"(",
"self",
".",
"_queryset_idxs",
")",
"# Finally, keep only the QuerySets we care about!",
"self",
".",
"_querysets",
"=",
"[",
"self",
".",
"_querysets",
"[",
"i",
"]",
"for",
"i",
"in",
"self",
".",
"_queryset_idxs",
"]"
] | Similar to QuerySet._filter_or_exclude, but run over the QuerySets in
the QuerySetSequence instead of over each QuerySet's fields. | [
"Similar",
"to",
"QuerySet",
".",
"_filter_or_exclude",
"but",
"run",
"over",
"the",
"QuerySets",
"in",
"the",
"QuerySetSequence",
"instead",
"of",
"over",
"each",
"QuerySet",
"s",
"fields",
"."
] | 7bf324b08af6268821d235c18482847d7bf75eaa | https://github.com/percipient/django-querysetsequence/blob/7bf324b08af6268821d235c18482847d7bf75eaa/queryset_sequence/__init__.py#L445-L527 |
3,193 | cognitect/transit-python | transit/rolling_cache.py | RollingCache.decode | def decode(self, name, as_map_key=False):
"""Always returns the name"""
if is_cache_key(name) and (name in self.key_to_value):
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | python | def decode(self, name, as_map_key=False):
if is_cache_key(name) and (name in self.key_to_value):
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | [
"def",
"decode",
"(",
"self",
",",
"name",
",",
"as_map_key",
"=",
"False",
")",
":",
"if",
"is_cache_key",
"(",
"name",
")",
"and",
"(",
"name",
"in",
"self",
".",
"key_to_value",
")",
":",
"return",
"self",
".",
"key_to_value",
"[",
"name",
"]",
"return",
"self",
".",
"encache",
"(",
"name",
")",
"if",
"is_cacheable",
"(",
"name",
",",
"as_map_key",
")",
"else",
"name"
] | Always returns the name | [
"Always",
"returns",
"the",
"name"
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/rolling_cache.py#L61-L65 |
3,194 | cognitect/transit-python | transit/rolling_cache.py | RollingCache.encode | def encode(self, name, as_map_key=False):
"""Returns the name the first time and the key after that"""
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | python | def encode(self, name, as_map_key=False):
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name | [
"def",
"encode",
"(",
"self",
",",
"name",
",",
"as_map_key",
"=",
"False",
")",
":",
"if",
"name",
"in",
"self",
".",
"key_to_value",
":",
"return",
"self",
".",
"key_to_value",
"[",
"name",
"]",
"return",
"self",
".",
"encache",
"(",
"name",
")",
"if",
"is_cacheable",
"(",
"name",
",",
"as_map_key",
")",
"else",
"name"
] | Returns the name the first time and the key after that | [
"Returns",
"the",
"name",
"the",
"first",
"time",
"and",
"the",
"key",
"after",
"that"
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/rolling_cache.py#L67-L71 |
3,195 | cognitect/transit-python | transit/sosjson.py | read_chunk | def read_chunk(stream):
"""Ignore whitespace outside of strings. If we hit a string, read it in
its entirety.
"""
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk | python | def read_chunk(stream):
chunk = stream.read(1)
while chunk in SKIP:
chunk = stream.read(1)
if chunk == "\"":
chunk += stream.read(1)
while not chunk.endswith("\""):
if chunk[-1] == ESCAPE:
chunk += stream.read(2)
else:
chunk += stream.read(1)
return chunk | [
"def",
"read_chunk",
"(",
"stream",
")",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"1",
")",
"while",
"chunk",
"in",
"SKIP",
":",
"chunk",
"=",
"stream",
".",
"read",
"(",
"1",
")",
"if",
"chunk",
"==",
"\"\\\"\"",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"1",
")",
"while",
"not",
"chunk",
".",
"endswith",
"(",
"\"\\\"\"",
")",
":",
"if",
"chunk",
"[",
"-",
"1",
"]",
"==",
"ESCAPE",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"2",
")",
"else",
":",
"chunk",
"+=",
"stream",
".",
"read",
"(",
"1",
")",
"return",
"chunk"
] | Ignore whitespace outside of strings. If we hit a string, read it in
its entirety. | [
"Ignore",
"whitespace",
"outside",
"of",
"strings",
".",
"If",
"we",
"hit",
"a",
"string",
"read",
"it",
"in",
"its",
"entirety",
"."
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L25-L39 |
3,196 | cognitect/transit-python | transit/sosjson.py | yield_json | def yield_json(stream):
"""Uses array and object delimiter counts for balancing.
"""
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item | python | def yield_json(stream):
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item | [
"def",
"yield_json",
"(",
"stream",
")",
":",
"buff",
"=",
"u\"\"",
"arr_count",
"=",
"0",
"obj_count",
"=",
"0",
"while",
"True",
":",
"buff",
"+=",
"read_chunk",
"(",
"stream",
")",
"# If we finish parsing all objs or arrays, yield a finished JSON",
"# entity.",
"if",
"buff",
".",
"endswith",
"(",
"'{'",
")",
":",
"obj_count",
"+=",
"1",
"if",
"buff",
".",
"endswith",
"(",
"'['",
")",
":",
"arr_count",
"+=",
"1",
"if",
"buff",
".",
"endswith",
"(",
"']'",
")",
":",
"arr_count",
"-=",
"1",
"if",
"obj_count",
"==",
"arr_count",
"==",
"0",
":",
"json_item",
"=",
"copy",
"(",
"buff",
")",
"buff",
"=",
"u\"\"",
"yield",
"json_item",
"if",
"buff",
".",
"endswith",
"(",
"'}'",
")",
":",
"obj_count",
"-=",
"1",
"if",
"obj_count",
"==",
"arr_count",
"==",
"0",
":",
"json_item",
"=",
"copy",
"(",
"buff",
")",
"buff",
"=",
"u\"\"",
"yield",
"json_item"
] | Uses array and object delimiter counts for balancing. | [
"Uses",
"array",
"and",
"object",
"delimiter",
"counts",
"for",
"balancing",
"."
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/sosjson.py#L51-L77 |
3,197 | cognitect/transit-python | transit/writer.py | Marshaler.are_stringable_keys | def are_stringable_keys(self, m):
"""Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached
"""
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True | python | def are_stringable_keys(self, m):
for x in m.keys():
if len(self.handlers[x].tag(x)) != 1:
return False
return True | [
"def",
"are_stringable_keys",
"(",
"self",
",",
"m",
")",
":",
"for",
"x",
"in",
"m",
".",
"keys",
"(",
")",
":",
"if",
"len",
"(",
"self",
".",
"handlers",
"[",
"x",
"]",
".",
"tag",
"(",
"x",
")",
")",
"!=",
"1",
":",
"return",
"False",
"return",
"True"
] | Test whether the keys within a map are stringable - a simple map,
that can be optimized and whose keys can be cached | [
"Test",
"whether",
"the",
"keys",
"within",
"a",
"map",
"are",
"stringable",
"-",
"a",
"simple",
"map",
"that",
"can",
"be",
"optimized",
"and",
"whose",
"keys",
"can",
"be",
"cached"
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L112-L119 |
3,198 | cognitect/transit-python | transit/writer.py | Marshaler.marshal_top | def marshal_top(self, obj, cache=None):
"""Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream.
"""
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler)) | python | def marshal_top(self, obj, cache=None):
if not cache:
cache = RollingCache()
handler = self.handlers[obj]
tag = handler.tag(obj)
if tag:
if len(tag) == 1:
self.marshal(TaggedValue(QUOTE, obj), False, cache)
else:
self.marshal(obj, False, cache)
self.flush()
else:
raise AssertionError("Handler must provide a non-nil tag: " + str(handler)) | [
"def",
"marshal_top",
"(",
"self",
",",
"obj",
",",
"cache",
"=",
"None",
")",
":",
"if",
"not",
"cache",
":",
"cache",
"=",
"RollingCache",
"(",
")",
"handler",
"=",
"self",
".",
"handlers",
"[",
"obj",
"]",
"tag",
"=",
"handler",
".",
"tag",
"(",
"obj",
")",
"if",
"tag",
":",
"if",
"len",
"(",
"tag",
")",
"==",
"1",
":",
"self",
".",
"marshal",
"(",
"TaggedValue",
"(",
"QUOTE",
",",
"obj",
")",
",",
"False",
",",
"cache",
")",
"else",
":",
"self",
".",
"marshal",
"(",
"obj",
",",
"False",
",",
"cache",
")",
"self",
".",
"flush",
"(",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Handler must provide a non-nil tag: \"",
"+",
"str",
"(",
"handler",
")",
")"
] | Given a complete object that needs to be marshaled into Transit
data, and optionally a cache, dispatch accordingly, and flush the data
directly into the IO stream. | [
"Given",
"a",
"complete",
"object",
"that",
"needs",
"to",
"be",
"marshaled",
"into",
"Transit",
"data",
"and",
"optionally",
"a",
"cache",
"dispatch",
"accordingly",
"and",
"flush",
"the",
"data",
"directly",
"into",
"the",
"IO",
"stream",
"."
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L209-L227 |
3,199 | cognitect/transit-python | transit/writer.py | Marshaler.dispatch_map | def dispatch_map(self, rep, as_map_key, cache):
"""Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types.
"""
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache) | python | def dispatch_map(self, rep, as_map_key, cache):
if self.are_stringable_keys(rep):
return self.emit_map(rep, as_map_key, cache)
return self.emit_cmap(rep, as_map_key, cache) | [
"def",
"dispatch_map",
"(",
"self",
",",
"rep",
",",
"as_map_key",
",",
"cache",
")",
":",
"if",
"self",
".",
"are_stringable_keys",
"(",
"rep",
")",
":",
"return",
"self",
".",
"emit_map",
"(",
"rep",
",",
"as_map_key",
",",
"cache",
")",
"return",
"self",
".",
"emit_cmap",
"(",
"rep",
",",
"as_map_key",
",",
"cache",
")"
] | Used to determine and dipatch the writing of a map - a simple
map with strings as keys, or a complex map, whose keys are also
compound types. | [
"Used",
"to",
"determine",
"and",
"dipatch",
"the",
"writing",
"of",
"a",
"map",
"-",
"a",
"simple",
"map",
"with",
"strings",
"as",
"keys",
"or",
"a",
"complex",
"map",
"whose",
"keys",
"are",
"also",
"compound",
"types",
"."
] | 59e27e7d322feaa3a7e8eb3de06ae96d8adb614f | https://github.com/cognitect/transit-python/blob/59e27e7d322feaa3a7e8eb3de06ae96d8adb614f/transit/writer.py#L229-L236 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.