repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
gccxml/pygccxml
|
pygccxml/parser/declarations_cache.py
|
configuration_signature
|
def configuration_signature(config):
"""
Return a signature for a configuration (xml_generator_configuration_t)
object.
This can then be used as a key in the cache.
This method must take into account anything about
a configuration that could cause the declarations generated
to be different between runs.
"""
sig = hashlib.sha1()
if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t):
sig.update(str(config.xml_generator_path).encode())
sig.update(str(config.working_directory).encode('utf-8'))
if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t):
sig.update(str(config.cflags).encode('utf-8'))
for p in config.include_paths:
sig.update(str(p).encode('utf-8'))
for s in config.define_symbols:
sig.update(str(s).encode('utf-8'))
for u in config.undefine_symbols:
sig.update(str(u).encode('utf-8'))
return sig.hexdigest()
|
python
|
def configuration_signature(config):
sig = hashlib.sha1()
if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t):
sig.update(str(config.xml_generator_path).encode())
sig.update(str(config.working_directory).encode('utf-8'))
if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t):
sig.update(str(config.cflags).encode('utf-8'))
for p in config.include_paths:
sig.update(str(p).encode('utf-8'))
for s in config.define_symbols:
sig.update(str(s).encode('utf-8'))
for u in config.undefine_symbols:
sig.update(str(u).encode('utf-8'))
return sig.hexdigest()
|
[
"def",
"configuration_signature",
"(",
"config",
")",
":",
"sig",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"if",
"isinstance",
"(",
"config",
",",
"cxx_parsers_cfg",
".",
"xml_generator_configuration_t",
")",
":",
"sig",
".",
"update",
"(",
"str",
"(",
"config",
".",
"xml_generator_path",
")",
".",
"encode",
"(",
")",
")",
"sig",
".",
"update",
"(",
"str",
"(",
"config",
".",
"working_directory",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"if",
"isinstance",
"(",
"config",
",",
"cxx_parsers_cfg",
".",
"xml_generator_configuration_t",
")",
":",
"sig",
".",
"update",
"(",
"str",
"(",
"config",
".",
"cflags",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"for",
"p",
"in",
"config",
".",
"include_paths",
":",
"sig",
".",
"update",
"(",
"str",
"(",
"p",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"for",
"s",
"in",
"config",
".",
"define_symbols",
":",
"sig",
".",
"update",
"(",
"str",
"(",
"s",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"for",
"u",
"in",
"config",
".",
"undefine_symbols",
":",
"sig",
".",
"update",
"(",
"str",
"(",
"u",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"sig",
".",
"hexdigest",
"(",
")"
] |
Return a signature for a configuration (xml_generator_configuration_t)
object.
This can then be used as a key in the cache.
This method must take into account anything about
a configuration that could cause the declarations generated
to be different between runs.
|
[
"Return",
"a",
"signature",
"for",
"a",
"configuration",
"(",
"xml_generator_configuration_t",
")",
"object",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_cache.py#L37-L61
|
gccxml/pygccxml
|
pygccxml/parser/declarations_cache.py
|
file_cache_t.__load
|
def __load(file_name):
""" Load pickled cache from file and return the object. """
if os.path.exists(file_name) and not os.path.isfile(file_name):
raise RuntimeError(
'Cache should be initialized with valid full file name')
if not os.path.exists(file_name):
open(file_name, 'w+b').close()
return {}
cache_file_obj = open(file_name, 'rb')
try:
file_cache_t.logger.info('Loading cache file "%s".', file_name)
start_time = timeit.default_timer()
cache = pickle.load(cache_file_obj)
file_cache_t.logger.debug(
"Cache file has been loaded in %.1f secs",
(timeit.default_timer() - start_time))
file_cache_t.logger.debug(
"Found cache in file: [%s] entries: %s",
file_name, len(list(cache.keys())))
except (pickle.UnpicklingError, AttributeError, EOFError,
ImportError, IndexError) as error:
file_cache_t.logger.exception(
"Error occurred while reading cache file: %s",
error)
cache_file_obj.close()
file_cache_t.logger.info(
"Invalid cache file: [%s] Regenerating.",
file_name)
open(file_name, 'w+b').close() # Create empty file
cache = {} # Empty cache
finally:
cache_file_obj.close()
return cache
|
python
|
def __load(file_name):
if os.path.exists(file_name) and not os.path.isfile(file_name):
raise RuntimeError(
'Cache should be initialized with valid full file name')
if not os.path.exists(file_name):
open(file_name, 'w+b').close()
return {}
cache_file_obj = open(file_name, 'rb')
try:
file_cache_t.logger.info('Loading cache file "%s".', file_name)
start_time = timeit.default_timer()
cache = pickle.load(cache_file_obj)
file_cache_t.logger.debug(
"Cache file has been loaded in %.1f secs",
(timeit.default_timer() - start_time))
file_cache_t.logger.debug(
"Found cache in file: [%s] entries: %s",
file_name, len(list(cache.keys())))
except (pickle.UnpicklingError, AttributeError, EOFError,
ImportError, IndexError) as error:
file_cache_t.logger.exception(
"Error occurred while reading cache file: %s",
error)
cache_file_obj.close()
file_cache_t.logger.info(
"Invalid cache file: [%s] Regenerating.",
file_name)
open(file_name, 'w+b').close()
cache = {}
finally:
cache_file_obj.close()
return cache
|
[
"def",
"__load",
"(",
"file_name",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_name",
")",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"raise",
"RuntimeError",
"(",
"'Cache should be initialized with valid full file name'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_name",
")",
":",
"open",
"(",
"file_name",
",",
"'w+b'",
")",
".",
"close",
"(",
")",
"return",
"{",
"}",
"cache_file_obj",
"=",
"open",
"(",
"file_name",
",",
"'rb'",
")",
"try",
":",
"file_cache_t",
".",
"logger",
".",
"info",
"(",
"'Loading cache file \"%s\".'",
",",
"file_name",
")",
"start_time",
"=",
"timeit",
".",
"default_timer",
"(",
")",
"cache",
"=",
"pickle",
".",
"load",
"(",
"cache_file_obj",
")",
"file_cache_t",
".",
"logger",
".",
"debug",
"(",
"\"Cache file has been loaded in %.1f secs\"",
",",
"(",
"timeit",
".",
"default_timer",
"(",
")",
"-",
"start_time",
")",
")",
"file_cache_t",
".",
"logger",
".",
"debug",
"(",
"\"Found cache in file: [%s] entries: %s\"",
",",
"file_name",
",",
"len",
"(",
"list",
"(",
"cache",
".",
"keys",
"(",
")",
")",
")",
")",
"except",
"(",
"pickle",
".",
"UnpicklingError",
",",
"AttributeError",
",",
"EOFError",
",",
"ImportError",
",",
"IndexError",
")",
"as",
"error",
":",
"file_cache_t",
".",
"logger",
".",
"exception",
"(",
"\"Error occurred while reading cache file: %s\"",
",",
"error",
")",
"cache_file_obj",
".",
"close",
"(",
")",
"file_cache_t",
".",
"logger",
".",
"info",
"(",
"\"Invalid cache file: [%s] Regenerating.\"",
",",
"file_name",
")",
"open",
"(",
"file_name",
",",
"'w+b'",
")",
".",
"close",
"(",
")",
"# Create empty file",
"cache",
"=",
"{",
"}",
"# Empty cache",
"finally",
":",
"cache_file_obj",
".",
"close",
"(",
")",
"return",
"cache"
] |
Load pickled cache from file and return the object.
|
[
"Load",
"pickled",
"cache",
"from",
"file",
"and",
"return",
"the",
"object",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_cache.py#L179-L212
|
gccxml/pygccxml
|
pygccxml/parser/declarations_cache.py
|
file_cache_t.update
|
def update(self, source_file, configuration, declarations, included_files):
""" Update a cached record with the current key and value contents. """
record = record_t(
source_signature=file_signature(source_file),
config_signature=configuration_signature(configuration),
included_files=included_files,
included_files_signature=list(
map(
file_signature,
included_files)),
declarations=declarations)
# Switched over to holding full record in cache so we don't have
# to keep creating records in the next method.
self.__cache[record.key()] = record
self.__cache[record.key()].was_hit = True
self.__needs_flushed = True
|
python
|
def update(self, source_file, configuration, declarations, included_files):
record = record_t(
source_signature=file_signature(source_file),
config_signature=configuration_signature(configuration),
included_files=included_files,
included_files_signature=list(
map(
file_signature,
included_files)),
declarations=declarations)
self.__cache[record.key()] = record
self.__cache[record.key()].was_hit = True
self.__needs_flushed = True
|
[
"def",
"update",
"(",
"self",
",",
"source_file",
",",
"configuration",
",",
"declarations",
",",
"included_files",
")",
":",
"record",
"=",
"record_t",
"(",
"source_signature",
"=",
"file_signature",
"(",
"source_file",
")",
",",
"config_signature",
"=",
"configuration_signature",
"(",
"configuration",
")",
",",
"included_files",
"=",
"included_files",
",",
"included_files_signature",
"=",
"list",
"(",
"map",
"(",
"file_signature",
",",
"included_files",
")",
")",
",",
"declarations",
"=",
"declarations",
")",
"# Switched over to holding full record in cache so we don't have",
"# to keep creating records in the next method.",
"self",
".",
"__cache",
"[",
"record",
".",
"key",
"(",
")",
"]",
"=",
"record",
"self",
".",
"__cache",
"[",
"record",
".",
"key",
"(",
")",
"]",
".",
"was_hit",
"=",
"True",
"self",
".",
"__needs_flushed",
"=",
"True"
] |
Update a cached record with the current key and value contents.
|
[
"Update",
"a",
"cached",
"record",
"with",
"the",
"current",
"key",
"and",
"value",
"contents",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_cache.py#L234-L250
|
gccxml/pygccxml
|
pygccxml/parser/declarations_cache.py
|
file_cache_t.cached_value
|
def cached_value(self, source_file, configuration):
"""
Attempt to lookup the cached declarations for the given file and
configuration.
Returns None if declaration not found or signature check fails.
"""
key = record_t.create_key(source_file, configuration)
if key not in self.__cache:
return None
record = self.__cache[key]
if self.__is_valid_signature(record):
record.was_hit = True # Record cache hit
return record.declarations
# some file has been changed
del self.__cache[key]
return None
|
python
|
def cached_value(self, source_file, configuration):
key = record_t.create_key(source_file, configuration)
if key not in self.__cache:
return None
record = self.__cache[key]
if self.__is_valid_signature(record):
record.was_hit = True
return record.declarations
del self.__cache[key]
return None
|
[
"def",
"cached_value",
"(",
"self",
",",
"source_file",
",",
"configuration",
")",
":",
"key",
"=",
"record_t",
".",
"create_key",
"(",
"source_file",
",",
"configuration",
")",
"if",
"key",
"not",
"in",
"self",
".",
"__cache",
":",
"return",
"None",
"record",
"=",
"self",
".",
"__cache",
"[",
"key",
"]",
"if",
"self",
".",
"__is_valid_signature",
"(",
"record",
")",
":",
"record",
".",
"was_hit",
"=",
"True",
"# Record cache hit",
"return",
"record",
".",
"declarations",
"# some file has been changed",
"del",
"self",
".",
"__cache",
"[",
"key",
"]",
"return",
"None"
] |
Attempt to lookup the cached declarations for the given file and
configuration.
Returns None if declaration not found or signature check fails.
|
[
"Attempt",
"to",
"lookup",
"the",
"cached",
"declarations",
"for",
"the",
"given",
"file",
"and",
"configuration",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_cache.py#L252-L271
|
gccxml/pygccxml
|
pygccxml/parser/linker.py
|
linker_t.instance
|
def instance(self, inst):
"""
Called by __parse_xml_file in source_reader.
"""
self.__inst = inst
# use inst, to reduce attribute access time
if isinstance(inst, declarations.declaration_t) and \
inst.location is not None and \
inst.location.file_name != '':
inst.location.file_name = self.__files[inst.location.file_name]
|
python
|
def instance(self, inst):
self.__inst = inst
if isinstance(inst, declarations.declaration_t) and \
inst.location is not None and \
inst.location.file_name != '':
inst.location.file_name = self.__files[inst.location.file_name]
|
[
"def",
"instance",
"(",
"self",
",",
"inst",
")",
":",
"self",
".",
"__inst",
"=",
"inst",
"# use inst, to reduce attribute access time",
"if",
"isinstance",
"(",
"inst",
",",
"declarations",
".",
"declaration_t",
")",
"and",
"inst",
".",
"location",
"is",
"not",
"None",
"and",
"inst",
".",
"location",
".",
"file_name",
"!=",
"''",
":",
"inst",
".",
"location",
".",
"file_name",
"=",
"self",
".",
"__files",
"[",
"inst",
".",
"location",
".",
"file_name",
"]"
] |
Called by __parse_xml_file in source_reader.
|
[
"Called",
"by",
"__parse_xml_file",
"in",
"source_reader",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/linker.py#L35-L47
|
gccxml/pygccxml
|
release_utils/utils.py
|
find_version
|
def find_version(file_path):
"""
Find the version of pygccxml.
Used by setup.py and the sphinx's conf.py.
Inspired by https://packaging.python.org/single_source_version/
Args:
file_path (str): path to the file containing the version.
"""
with io.open(
os.path.join(
os.path.dirname(__file__),
os.path.normpath(file_path)),
encoding="utf8") as fp:
content = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
|
python
|
def find_version(file_path):
with io.open(
os.path.join(
os.path.dirname(__file__),
os.path.normpath(file_path)),
encoding="utf8") as fp:
content = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
|
[
"def",
"find_version",
"(",
"file_path",
")",
":",
"with",
"io",
".",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"os",
".",
"path",
".",
"normpath",
"(",
"file_path",
")",
")",
",",
"encoding",
"=",
"\"utf8\"",
")",
"as",
"fp",
":",
"content",
"=",
"fp",
".",
"read",
"(",
")",
"version_match",
"=",
"re",
".",
"search",
"(",
"r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"",
",",
"content",
",",
"re",
".",
"M",
")",
"if",
"version_match",
":",
"return",
"version_match",
".",
"group",
"(",
"1",
")",
"raise",
"RuntimeError",
"(",
"\"Unable to find version string.\"",
")"
] |
Find the version of pygccxml.
Used by setup.py and the sphinx's conf.py.
Inspired by https://packaging.python.org/single_source_version/
Args:
file_path (str): path to the file containing the version.
|
[
"Find",
"the",
"version",
"of",
"pygccxml",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/release_utils/utils.py#L12-L34
|
gccxml/pygccxml
|
pygccxml/parser/scanner.py
|
scanner_t.__read_byte_size
|
def __read_byte_size(decl, attrs):
"""Using duck typing to set the size instead of in constructor"""
size = attrs.get(XML_AN_SIZE, 0)
# Make sure the size is in bytes instead of bits
decl.byte_size = int(size) / 8
|
python
|
def __read_byte_size(decl, attrs):
size = attrs.get(XML_AN_SIZE, 0)
decl.byte_size = int(size) / 8
|
[
"def",
"__read_byte_size",
"(",
"decl",
",",
"attrs",
")",
":",
"size",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_SIZE",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_size",
"=",
"int",
"(",
"size",
")",
"/",
"8"
] |
Using duck typing to set the size instead of in constructor
|
[
"Using",
"duck",
"typing",
"to",
"set",
"the",
"size",
"instead",
"of",
"in",
"constructor"
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/scanner.py#L338-L342
|
gccxml/pygccxml
|
pygccxml/parser/scanner.py
|
scanner_t.__read_byte_offset
|
def __read_byte_offset(decl, attrs):
"""Using duck typing to set the offset instead of in constructor"""
offset = attrs.get(XML_AN_OFFSET, 0)
# Make sure the size is in bytes instead of bits
decl.byte_offset = int(offset) / 8
|
python
|
def __read_byte_offset(decl, attrs):
offset = attrs.get(XML_AN_OFFSET, 0)
decl.byte_offset = int(offset) / 8
|
[
"def",
"__read_byte_offset",
"(",
"decl",
",",
"attrs",
")",
":",
"offset",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_OFFSET",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_offset",
"=",
"int",
"(",
"offset",
")",
"/",
"8"
] |
Using duck typing to set the offset instead of in constructor
|
[
"Using",
"duck",
"typing",
"to",
"set",
"the",
"offset",
"instead",
"of",
"in",
"constructor"
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/scanner.py#L345-L349
|
gccxml/pygccxml
|
pygccxml/parser/scanner.py
|
scanner_t.__read_byte_align
|
def __read_byte_align(decl, attrs):
"""Using duck typing to set the alignment"""
align = attrs.get(XML_AN_ALIGN, 0)
# Make sure the size is in bytes instead of bits
decl.byte_align = int(align) / 8
|
python
|
def __read_byte_align(decl, attrs):
align = attrs.get(XML_AN_ALIGN, 0)
decl.byte_align = int(align) / 8
|
[
"def",
"__read_byte_align",
"(",
"decl",
",",
"attrs",
")",
":",
"align",
"=",
"attrs",
".",
"get",
"(",
"XML_AN_ALIGN",
",",
"0",
")",
"# Make sure the size is in bytes instead of bits",
"decl",
".",
"byte_align",
"=",
"int",
"(",
"align",
")",
"/",
"8"
] |
Using duck typing to set the alignment
|
[
"Using",
"duck",
"typing",
"to",
"set",
"the",
"alignment"
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/scanner.py#L352-L356
|
gccxml/pygccxml
|
pygccxml/parser/declarations_joiner.py
|
bind_aliases
|
def bind_aliases(decls):
"""
This function binds between class and it's typedefs.
:param decls: list of all declarations
:rtype: None
"""
visited = set()
typedefs = [
decl for decl in decls if isinstance(decl, declarations.typedef_t)]
for decl in typedefs:
type_ = declarations.remove_alias(decl.decl_type)
if not isinstance(type_, declarations.declarated_t):
continue
cls_inst = type_.declaration
if not isinstance(cls_inst, declarations.class_types):
continue
if id(cls_inst) not in visited:
visited.add(id(cls_inst))
del cls_inst.aliases[:]
cls_inst.aliases.append(decl)
|
python
|
def bind_aliases(decls):
visited = set()
typedefs = [
decl for decl in decls if isinstance(decl, declarations.typedef_t)]
for decl in typedefs:
type_ = declarations.remove_alias(decl.decl_type)
if not isinstance(type_, declarations.declarated_t):
continue
cls_inst = type_.declaration
if not isinstance(cls_inst, declarations.class_types):
continue
if id(cls_inst) not in visited:
visited.add(id(cls_inst))
del cls_inst.aliases[:]
cls_inst.aliases.append(decl)
|
[
"def",
"bind_aliases",
"(",
"decls",
")",
":",
"visited",
"=",
"set",
"(",
")",
"typedefs",
"=",
"[",
"decl",
"for",
"decl",
"in",
"decls",
"if",
"isinstance",
"(",
"decl",
",",
"declarations",
".",
"typedef_t",
")",
"]",
"for",
"decl",
"in",
"typedefs",
":",
"type_",
"=",
"declarations",
".",
"remove_alias",
"(",
"decl",
".",
"decl_type",
")",
"if",
"not",
"isinstance",
"(",
"type_",
",",
"declarations",
".",
"declarated_t",
")",
":",
"continue",
"cls_inst",
"=",
"type_",
".",
"declaration",
"if",
"not",
"isinstance",
"(",
"cls_inst",
",",
"declarations",
".",
"class_types",
")",
":",
"continue",
"if",
"id",
"(",
"cls_inst",
")",
"not",
"in",
"visited",
":",
"visited",
".",
"add",
"(",
"id",
"(",
"cls_inst",
")",
")",
"del",
"cls_inst",
".",
"aliases",
"[",
":",
"]",
"cls_inst",
".",
"aliases",
".",
"append",
"(",
"decl",
")"
] |
This function binds between class and it's typedefs.
:param decls: list of all declarations
:rtype: None
|
[
"This",
"function",
"binds",
"between",
"class",
"and",
"it",
"s",
"typedefs",
"."
] |
train
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/declarations_joiner.py#L9-L32
|
d11wtq/dockerpty
|
features/environment.py
|
before_all
|
def before_all(ctx):
"""
Pulls down busybox:latest before anything is tested.
"""
ctx.client = get_client()
try:
ctx.client.inspect_image(IMAGE)
except NotFound:
ctx.client.pull(IMAGE)
|
python
|
def before_all(ctx):
ctx.client = get_client()
try:
ctx.client.inspect_image(IMAGE)
except NotFound:
ctx.client.pull(IMAGE)
|
[
"def",
"before_all",
"(",
"ctx",
")",
":",
"ctx",
".",
"client",
"=",
"get_client",
"(",
")",
"try",
":",
"ctx",
".",
"client",
".",
"inspect_image",
"(",
"IMAGE",
")",
"except",
"NotFound",
":",
"ctx",
".",
"client",
".",
"pull",
"(",
"IMAGE",
")"
] |
Pulls down busybox:latest before anything is tested.
|
[
"Pulls",
"down",
"busybox",
":",
"latest",
"before",
"anything",
"is",
"tested",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/features/environment.py#L25-L34
|
d11wtq/dockerpty
|
features/environment.py
|
after_scenario
|
def after_scenario(ctx, scenario):
"""
Cleans up docker containers used as test fixtures after test completes.
"""
if hasattr(ctx, 'container') and hasattr(ctx, 'client'):
try:
ctx.client.remove_container(ctx.container, force=True)
except:
pass
|
python
|
def after_scenario(ctx, scenario):
if hasattr(ctx, 'container') and hasattr(ctx, 'client'):
try:
ctx.client.remove_container(ctx.container, force=True)
except:
pass
|
[
"def",
"after_scenario",
"(",
"ctx",
",",
"scenario",
")",
":",
"if",
"hasattr",
"(",
"ctx",
",",
"'container'",
")",
"and",
"hasattr",
"(",
"ctx",
",",
"'client'",
")",
":",
"try",
":",
"ctx",
".",
"client",
".",
"remove_container",
"(",
"ctx",
".",
"container",
",",
"force",
"=",
"True",
")",
"except",
":",
"pass"
] |
Cleans up docker containers used as test fixtures after test completes.
|
[
"Cleans",
"up",
"docker",
"containers",
"used",
"as",
"test",
"fixtures",
"after",
"test",
"completes",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/features/environment.py#L37-L46
|
d11wtq/dockerpty
|
dockerpty/io.py
|
set_blocking
|
def set_blocking(fd, blocking=True):
"""
Set the given file-descriptor blocking or non-blocking.
Returns the original blocking status.
"""
old_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
if blocking:
new_flag = old_flag & ~ os.O_NONBLOCK
else:
new_flag = old_flag | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, new_flag)
return not bool(old_flag & os.O_NONBLOCK)
|
python
|
def set_blocking(fd, blocking=True):
old_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
if blocking:
new_flag = old_flag & ~ os.O_NONBLOCK
else:
new_flag = old_flag | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, new_flag)
return not bool(old_flag & os.O_NONBLOCK)
|
[
"def",
"set_blocking",
"(",
"fd",
",",
"blocking",
"=",
"True",
")",
":",
"old_flag",
"=",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_GETFL",
")",
"if",
"blocking",
":",
"new_flag",
"=",
"old_flag",
"&",
"~",
"os",
".",
"O_NONBLOCK",
"else",
":",
"new_flag",
"=",
"old_flag",
"|",
"os",
".",
"O_NONBLOCK",
"fcntl",
".",
"fcntl",
"(",
"fd",
",",
"fcntl",
".",
"F_SETFL",
",",
"new_flag",
")",
"return",
"not",
"bool",
"(",
"old_flag",
"&",
"os",
".",
"O_NONBLOCK",
")"
] |
Set the given file-descriptor blocking or non-blocking.
Returns the original blocking status.
|
[
"Set",
"the",
"given",
"file",
"-",
"descriptor",
"blocking",
"or",
"non",
"-",
"blocking",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L25-L41
|
d11wtq/dockerpty
|
dockerpty/io.py
|
select
|
def select(read_streams, write_streams, timeout=0):
"""
Select the streams from `read_streams` that are ready for reading, and
streams from `write_streams` ready for writing.
Uses `select.select()` internally but only returns two lists of ready streams.
"""
exception_streams = []
try:
return builtin_select.select(
read_streams,
write_streams,
exception_streams,
timeout,
)[0:2]
except builtin_select.error as e:
# POSIX signals interrupt select()
no = e.errno if six.PY3 else e[0]
if no == errno.EINTR:
return ([], [])
else:
raise e
|
python
|
def select(read_streams, write_streams, timeout=0):
exception_streams = []
try:
return builtin_select.select(
read_streams,
write_streams,
exception_streams,
timeout,
)[0:2]
except builtin_select.error as e:
no = e.errno if six.PY3 else e[0]
if no == errno.EINTR:
return ([], [])
else:
raise e
|
[
"def",
"select",
"(",
"read_streams",
",",
"write_streams",
",",
"timeout",
"=",
"0",
")",
":",
"exception_streams",
"=",
"[",
"]",
"try",
":",
"return",
"builtin_select",
".",
"select",
"(",
"read_streams",
",",
"write_streams",
",",
"exception_streams",
",",
"timeout",
",",
")",
"[",
"0",
":",
"2",
"]",
"except",
"builtin_select",
".",
"error",
"as",
"e",
":",
"# POSIX signals interrupt select()",
"no",
"=",
"e",
".",
"errno",
"if",
"six",
".",
"PY3",
"else",
"e",
"[",
"0",
"]",
"if",
"no",
"==",
"errno",
".",
"EINTR",
":",
"return",
"(",
"[",
"]",
",",
"[",
"]",
")",
"else",
":",
"raise",
"e"
] |
Select the streams from `read_streams` that are ready for reading, and
streams from `write_streams` ready for writing.
Uses `select.select()` internally but only returns two lists of ready streams.
|
[
"Select",
"the",
"streams",
"from",
"read_streams",
"that",
"are",
"ready",
"for",
"reading",
"and",
"streams",
"from",
"write_streams",
"ready",
"for",
"writing",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L44-L67
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Stream.read
|
def read(self, n=4096):
"""
Return `n` bytes of data from the Stream, or None at end of stream.
"""
while True:
try:
if hasattr(self.fd, 'recv'):
return self.fd.recv(n)
return os.read(self.fd.fileno(), n)
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e
|
python
|
def read(self, n=4096):
while True:
try:
if hasattr(self.fd, 'recv'):
return self.fd.recv(n)
return os.read(self.fd.fileno(), n)
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e
|
[
"def",
"read",
"(",
"self",
",",
"n",
"=",
"4096",
")",
":",
"while",
"True",
":",
"try",
":",
"if",
"hasattr",
"(",
"self",
".",
"fd",
",",
"'recv'",
")",
":",
"return",
"self",
".",
"fd",
".",
"recv",
"(",
"n",
")",
"return",
"os",
".",
"read",
"(",
"self",
".",
"fd",
".",
"fileno",
"(",
")",
",",
"n",
")",
"except",
"EnvironmentError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"not",
"in",
"Stream",
".",
"ERRNO_RECOVERABLE",
":",
"raise",
"e"
] |
Return `n` bytes of data from the Stream, or None at end of stream.
|
[
"Return",
"n",
"bytes",
"of",
"data",
"from",
"the",
"Stream",
"or",
"None",
"at",
"end",
"of",
"stream",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L112-L124
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Stream.write
|
def write(self, data):
"""
Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer.
"""
if not data:
return None
self.buffer += data
self.do_write()
return len(data)
|
python
|
def write(self, data):
if not data:
return None
self.buffer += data
self.do_write()
return len(data)
|
[
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"data",
":",
"return",
"None",
"self",
".",
"buffer",
"+=",
"data",
"self",
".",
"do_write",
"(",
")",
"return",
"len",
"(",
"data",
")"
] |
Write `data` to the Stream. Not all data may be written right away.
Use select to find when the stream is writeable, and call do_write()
to flush the internal buffer.
|
[
"Write",
"data",
"to",
"the",
"Stream",
".",
"Not",
"all",
"data",
"may",
"be",
"written",
"right",
"away",
".",
"Use",
"select",
"to",
"find",
"when",
"the",
"stream",
"is",
"writeable",
"and",
"call",
"do_write",
"()",
"to",
"flush",
"the",
"internal",
"buffer",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L127-L140
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Stream.do_write
|
def do_write(self):
"""
Flushes as much pending data from the internal write buffer as possible.
"""
while True:
try:
written = 0
if hasattr(self.fd, 'send'):
written = self.fd.send(self.buffer)
else:
written = os.write(self.fd.fileno(), self.buffer)
self.buffer = self.buffer[written:]
# try to close after writes if a close was requested
if self.close_requested and len(self.buffer) == 0:
self.close()
return written
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e
|
python
|
def do_write(self):
while True:
try:
written = 0
if hasattr(self.fd, 'send'):
written = self.fd.send(self.buffer)
else:
written = os.write(self.fd.fileno(), self.buffer)
self.buffer = self.buffer[written:]
if self.close_requested and len(self.buffer) == 0:
self.close()
return written
except EnvironmentError as e:
if e.errno not in Stream.ERRNO_RECOVERABLE:
raise e
|
[
"def",
"do_write",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"written",
"=",
"0",
"if",
"hasattr",
"(",
"self",
".",
"fd",
",",
"'send'",
")",
":",
"written",
"=",
"self",
".",
"fd",
".",
"send",
"(",
"self",
".",
"buffer",
")",
"else",
":",
"written",
"=",
"os",
".",
"write",
"(",
"self",
".",
"fd",
".",
"fileno",
"(",
")",
",",
"self",
".",
"buffer",
")",
"self",
".",
"buffer",
"=",
"self",
".",
"buffer",
"[",
"written",
":",
"]",
"# try to close after writes if a close was requested",
"if",
"self",
".",
"close_requested",
"and",
"len",
"(",
"self",
".",
"buffer",
")",
"==",
"0",
":",
"self",
".",
"close",
"(",
")",
"return",
"written",
"except",
"EnvironmentError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"not",
"in",
"Stream",
".",
"ERRNO_RECOVERABLE",
":",
"raise",
"e"
] |
Flushes as much pending data from the internal write buffer as possible.
|
[
"Flushes",
"as",
"much",
"pending",
"data",
"from",
"the",
"internal",
"write",
"buffer",
"as",
"possible",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L142-L164
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Demuxer.read
|
def read(self, n=4096):
"""
Read up to `n` bytes of data from the Stream, after demuxing.
Less than `n` bytes of data may be returned depending on the available
payload, but the number of bytes returned will never exceed `n`.
Because demuxing involves scanning 8-byte headers, the actual amount of
data read from the underlying stream may be greater than `n`.
"""
size = self._next_packet_size(n)
if size <= 0:
return
else:
data = six.binary_type()
while len(data) < size:
nxt = self.stream.read(size - len(data))
if not nxt:
# the stream has closed, return what data we got
return data
data = data + nxt
return data
|
python
|
def read(self, n=4096):
size = self._next_packet_size(n)
if size <= 0:
return
else:
data = six.binary_type()
while len(data) < size:
nxt = self.stream.read(size - len(data))
if not nxt:
return data
data = data + nxt
return data
|
[
"def",
"read",
"(",
"self",
",",
"n",
"=",
"4096",
")",
":",
"size",
"=",
"self",
".",
"_next_packet_size",
"(",
"n",
")",
"if",
"size",
"<=",
"0",
":",
"return",
"else",
":",
"data",
"=",
"six",
".",
"binary_type",
"(",
")",
"while",
"len",
"(",
"data",
")",
"<",
"size",
":",
"nxt",
"=",
"self",
".",
"stream",
".",
"read",
"(",
"size",
"-",
"len",
"(",
"data",
")",
")",
"if",
"not",
"nxt",
":",
"# the stream has closed, return what data we got",
"return",
"data",
"data",
"=",
"data",
"+",
"nxt",
"return",
"data"
] |
Read up to `n` bytes of data from the Stream, after demuxing.
Less than `n` bytes of data may be returned depending on the available
payload, but the number of bytes returned will never exceed `n`.
Because demuxing involves scanning 8-byte headers, the actual amount of
data read from the underlying stream may be greater than `n`.
|
[
"Read",
"up",
"to",
"n",
"bytes",
"of",
"data",
"from",
"the",
"Stream",
"after",
"demuxing",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L224-L247
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Pump.flush
|
def flush(self, n=4096):
"""
Flush `n` bytes of data from the reader Stream to the writer Stream.
Returns the number of bytes that were actually flushed. A return value
of zero is not an error.
If EOF has been reached, `None` is returned.
"""
try:
read = self.from_stream.read(n)
if read is None or len(read) == 0:
self.eof = True
if self.propagate_close:
self.to_stream.close()
return None
return self.to_stream.write(read)
except OSError as e:
if e.errno != errno.EPIPE:
raise e
|
python
|
def flush(self, n=4096):
try:
read = self.from_stream.read(n)
if read is None or len(read) == 0:
self.eof = True
if self.propagate_close:
self.to_stream.close()
return None
return self.to_stream.write(read)
except OSError as e:
if e.errno != errno.EPIPE:
raise e
|
[
"def",
"flush",
"(",
"self",
",",
"n",
"=",
"4096",
")",
":",
"try",
":",
"read",
"=",
"self",
".",
"from_stream",
".",
"read",
"(",
"n",
")",
"if",
"read",
"is",
"None",
"or",
"len",
"(",
"read",
")",
"==",
"0",
":",
"self",
".",
"eof",
"=",
"True",
"if",
"self",
".",
"propagate_close",
":",
"self",
".",
"to_stream",
".",
"close",
"(",
")",
"return",
"None",
"return",
"self",
".",
"to_stream",
".",
"write",
"(",
"read",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EPIPE",
":",
"raise",
"e"
] |
Flush `n` bytes of data from the reader Stream to the writer Stream.
Returns the number of bytes that were actually flushed. A return value
of zero is not an error.
If EOF has been reached, `None` is returned.
|
[
"Flush",
"n",
"bytes",
"of",
"data",
"from",
"the",
"reader",
"Stream",
"to",
"the",
"writer",
"Stream",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L356-L378
|
d11wtq/dockerpty
|
dockerpty/io.py
|
Pump.is_done
|
def is_done(self):
"""
Returns True if the read stream is done (either it's returned EOF or
the pump doesn't have wait_for_output set), and the write
side does not have pending bytes to send.
"""
return (not self.wait_for_output or self.eof) and \
not (hasattr(self.to_stream, 'needs_write') and self.to_stream.needs_write())
|
python
|
def is_done(self):
return (not self.wait_for_output or self.eof) and \
not (hasattr(self.to_stream, 'needs_write') and self.to_stream.needs_write())
|
[
"def",
"is_done",
"(",
"self",
")",
":",
"return",
"(",
"not",
"self",
".",
"wait_for_output",
"or",
"self",
".",
"eof",
")",
"and",
"not",
"(",
"hasattr",
"(",
"self",
".",
"to_stream",
",",
"'needs_write'",
")",
"and",
"self",
".",
"to_stream",
".",
"needs_write",
"(",
")",
")"
] |
Returns True if the read stream is done (either it's returned EOF or
the pump doesn't have wait_for_output set), and the write
side does not have pending bytes to send.
|
[
"Returns",
"True",
"if",
"the",
"read",
"stream",
"is",
"done",
"(",
"either",
"it",
"s",
"returned",
"EOF",
"or",
"the",
"pump",
"doesn",
"t",
"have",
"wait_for_output",
"set",
")",
"and",
"the",
"write",
"side",
"does",
"not",
"have",
"pending",
"bytes",
"to",
"send",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/io.py#L380-L388
|
d11wtq/dockerpty
|
dockerpty/__init__.py
|
start
|
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start()
|
python
|
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start()
|
[
"def",
"start",
"(",
"client",
",",
"container",
",",
"interactive",
"=",
"True",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"stdin",
"=",
"None",
",",
"logs",
"=",
"None",
")",
":",
"operation",
"=",
"RunOperation",
"(",
"client",
",",
"container",
",",
"interactive",
"=",
"interactive",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"stdin",
"=",
"stdin",
",",
"logs",
"=",
"logs",
")",
"PseudoTerminal",
"(",
"client",
",",
"operation",
")",
".",
"start",
"(",
")"
] |
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
|
[
"Present",
"the",
"PTY",
"of",
"the",
"container",
"inside",
"the",
"current",
"process",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/__init__.py#L20-L30
|
d11wtq/dockerpty
|
dockerpty/__init__.py
|
exec_command
|
def exec_command(
client, container, command, interactive=True, stdout=None, stderr=None, stdin=None):
"""
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
"""
exec_id = exec_create(client, container, command, interactive=interactive)
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
|
python
|
def exec_command(
client, container, command, interactive=True, stdout=None, stderr=None, stdin=None):
exec_id = exec_create(client, container, command, interactive=interactive)
operation = ExecOperation(client, exec_id,
interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin)
PseudoTerminal(client, operation).start()
|
[
"def",
"exec_command",
"(",
"client",
",",
"container",
",",
"command",
",",
"interactive",
"=",
"True",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"stdin",
"=",
"None",
")",
":",
"exec_id",
"=",
"exec_create",
"(",
"client",
",",
"container",
",",
"command",
",",
"interactive",
"=",
"interactive",
")",
"operation",
"=",
"ExecOperation",
"(",
"client",
",",
"exec_id",
",",
"interactive",
"=",
"interactive",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"stdin",
"=",
"stdin",
")",
"PseudoTerminal",
"(",
"client",
",",
"operation",
")",
".",
"start",
"(",
")"
] |
Run provided command via exec API in provided container.
This is just a wrapper for PseudoTerminal(client, container).exec_command()
|
[
"Run",
"provided",
"command",
"via",
"exec",
"API",
"in",
"provided",
"container",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/__init__.py#L33-L44
|
d11wtq/dockerpty
|
dockerpty/tty.py
|
size
|
def size(fd):
"""
Return a tuple (rows,cols) representing the size of the TTY `fd`.
The provided file descriptor should be the stdout stream of the TTY.
If the TTY size cannot be determined, returns None.
"""
if not os.isatty(fd.fileno()):
return None
try:
dims = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'hhhh'))
except:
try:
dims = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return dims
|
python
|
def size(fd):
if not os.isatty(fd.fileno()):
return None
try:
dims = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'hhhh'))
except:
try:
dims = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return dims
|
[
"def",
"size",
"(",
"fd",
")",
":",
"if",
"not",
"os",
".",
"isatty",
"(",
"fd",
".",
"fileno",
"(",
")",
")",
":",
"return",
"None",
"try",
":",
"dims",
"=",
"struct",
".",
"unpack",
"(",
"'hh'",
",",
"fcntl",
".",
"ioctl",
"(",
"fd",
",",
"termios",
".",
"TIOCGWINSZ",
",",
"'hhhh'",
")",
")",
"except",
":",
"try",
":",
"dims",
"=",
"(",
"os",
".",
"environ",
"[",
"'LINES'",
"]",
",",
"os",
".",
"environ",
"[",
"'COLUMNS'",
"]",
")",
"except",
":",
"return",
"None",
"return",
"dims"
] |
Return a tuple (rows,cols) representing the size of the TTY `fd`.
The provided file descriptor should be the stdout stream of the TTY.
If the TTY size cannot be determined, returns None.
|
[
"Return",
"a",
"tuple",
"(",
"rows",
"cols",
")",
"representing",
"the",
"size",
"of",
"the",
"TTY",
"fd",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/tty.py#L26-L46
|
d11wtq/dockerpty
|
dockerpty/tty.py
|
Terminal.start
|
def start(self):
"""
Saves the current terminal attributes and makes the tty raw.
This method returns None immediately.
"""
if os.isatty(self.fd.fileno()) and self.israw():
self.original_attributes = termios.tcgetattr(self.fd)
tty.setraw(self.fd)
|
python
|
def start(self):
if os.isatty(self.fd.fileno()) and self.israw():
self.original_attributes = termios.tcgetattr(self.fd)
tty.setraw(self.fd)
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"os",
".",
"isatty",
"(",
"self",
".",
"fd",
".",
"fileno",
"(",
")",
")",
"and",
"self",
".",
"israw",
"(",
")",
":",
"self",
".",
"original_attributes",
"=",
"termios",
".",
"tcgetattr",
"(",
"self",
".",
"fd",
")",
"tty",
".",
"setraw",
"(",
"self",
".",
"fd",
")"
] |
Saves the current terminal attributes and makes the tty raw.
This method returns None immediately.
|
[
"Saves",
"the",
"current",
"terminal",
"attributes",
"and",
"makes",
"the",
"tty",
"raw",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/tty.py#L100-L109
|
d11wtq/dockerpty
|
dockerpty/tty.py
|
Terminal.stop
|
def stop(self):
"""
Restores the terminal attributes back to before setting raw mode.
If the raw terminal was not started, does nothing.
"""
if self.original_attributes is not None:
termios.tcsetattr(
self.fd,
termios.TCSADRAIN,
self.original_attributes,
)
|
python
|
def stop(self):
if self.original_attributes is not None:
termios.tcsetattr(
self.fd,
termios.TCSADRAIN,
self.original_attributes,
)
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"original_attributes",
"is",
"not",
"None",
":",
"termios",
".",
"tcsetattr",
"(",
"self",
".",
"fd",
",",
"termios",
".",
"TCSADRAIN",
",",
"self",
".",
"original_attributes",
",",
")"
] |
Restores the terminal attributes back to before setting raw mode.
If the raw terminal was not started, does nothing.
|
[
"Restores",
"the",
"terminal",
"attributes",
"back",
"to",
"before",
"setting",
"raw",
"mode",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/tty.py#L112-L124
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
WINCHHandler.start
|
def start(self):
"""
Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`.
"""
def handle(signum, frame):
if signum == signal.SIGWINCH:
self.pty.resize()
self.original_handler = signal.signal(signal.SIGWINCH, handle)
|
python
|
def start(self):
def handle(signum, frame):
if signum == signal.SIGWINCH:
self.pty.resize()
self.original_handler = signal.signal(signal.SIGWINCH, handle)
|
[
"def",
"start",
"(",
"self",
")",
":",
"def",
"handle",
"(",
"signum",
",",
"frame",
")",
":",
"if",
"signum",
"==",
"signal",
".",
"SIGWINCH",
":",
"self",
".",
"pty",
".",
"resize",
"(",
")",
"self",
".",
"original_handler",
"=",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGWINCH",
",",
"handle",
")"
] |
Start trapping WINCH signals and resizing the PTY.
This method saves the previous WINCH handler so it can be restored on
`stop()`.
|
[
"Start",
"trapping",
"WINCH",
"signals",
"and",
"resizing",
"the",
"PTY",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L57-L69
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
WINCHHandler.stop
|
def stop(self):
"""
Stop trapping WINCH signals and restore the previous WINCH handler.
"""
if self.original_handler is not None:
signal.signal(signal.SIGWINCH, self.original_handler)
|
python
|
def stop(self):
if self.original_handler is not None:
signal.signal(signal.SIGWINCH, self.original_handler)
|
[
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"original_handler",
"is",
"not",
"None",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGWINCH",
",",
"self",
".",
"original_handler",
")"
] |
Stop trapping WINCH signals and restore the previous WINCH handler.
|
[
"Stop",
"trapping",
"WINCH",
"signals",
"and",
"restore",
"the",
"previous",
"WINCH",
"handler",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L71-L77
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
RunOperation.start
|
def start(self, sockets=None, **kwargs):
"""
Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed.
"""
pty_stdin, pty_stdout, pty_stderr = sockets or self.sockets()
pumps = []
if pty_stdin and self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), pty_stdin, wait_for_output=False))
if pty_stdout:
pumps.append(io.Pump(pty_stdout, io.Stream(self.stdout), propagate_close=False))
if pty_stderr:
pumps.append(io.Pump(pty_stderr, io.Stream(self.stderr), propagate_close=False))
if not self._container_info()['State']['Running']:
self.client.start(self.container, **kwargs)
return pumps
|
python
|
def start(self, sockets=None, **kwargs):
pty_stdin, pty_stdout, pty_stderr = sockets or self.sockets()
pumps = []
if pty_stdin and self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), pty_stdin, wait_for_output=False))
if pty_stdout:
pumps.append(io.Pump(pty_stdout, io.Stream(self.stdout), propagate_close=False))
if pty_stderr:
pumps.append(io.Pump(pty_stderr, io.Stream(self.stderr), propagate_close=False))
if not self._container_info()['State']['Running']:
self.client.start(self.container, **kwargs)
return pumps
|
[
"def",
"start",
"(",
"self",
",",
"sockets",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"pty_stdin",
",",
"pty_stdout",
",",
"pty_stderr",
"=",
"sockets",
"or",
"self",
".",
"sockets",
"(",
")",
"pumps",
"=",
"[",
"]",
"if",
"pty_stdin",
"and",
"self",
".",
"interactive",
":",
"pumps",
".",
"append",
"(",
"io",
".",
"Pump",
"(",
"io",
".",
"Stream",
"(",
"self",
".",
"stdin",
")",
",",
"pty_stdin",
",",
"wait_for_output",
"=",
"False",
")",
")",
"if",
"pty_stdout",
":",
"pumps",
".",
"append",
"(",
"io",
".",
"Pump",
"(",
"pty_stdout",
",",
"io",
".",
"Stream",
"(",
"self",
".",
"stdout",
")",
",",
"propagate_close",
"=",
"False",
")",
")",
"if",
"pty_stderr",
":",
"pumps",
".",
"append",
"(",
"io",
".",
"Pump",
"(",
"pty_stderr",
",",
"io",
".",
"Stream",
"(",
"self",
".",
"stderr",
")",
",",
"propagate_close",
"=",
"False",
")",
")",
"if",
"not",
"self",
".",
"_container_info",
"(",
")",
"[",
"'State'",
"]",
"[",
"'Running'",
"]",
":",
"self",
".",
"client",
".",
"start",
"(",
"self",
".",
"container",
",",
"*",
"*",
"kwargs",
")",
"return",
"pumps"
] |
Present the PTY of the container inside the current process.
This will take over the current process' TTY until the container's PTY
is closed.
|
[
"Present",
"the",
"PTY",
"of",
"the",
"container",
"inside",
"the",
"current",
"process",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L128-L151
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
RunOperation.israw
|
def israw(self, **kwargs):
"""
Returns True if the PTY should operate in raw mode.
If the container was not started with tty=True, this will return False.
"""
if self.raw is None:
info = self._container_info()
self.raw = self.stdout.isatty() and info['Config']['Tty']
return self.raw
|
python
|
def israw(self, **kwargs):
if self.raw is None:
info = self._container_info()
self.raw = self.stdout.isatty() and info['Config']['Tty']
return self.raw
|
[
"def",
"israw",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"raw",
"is",
"None",
":",
"info",
"=",
"self",
".",
"_container_info",
"(",
")",
"self",
".",
"raw",
"=",
"self",
".",
"stdout",
".",
"isatty",
"(",
")",
"and",
"info",
"[",
"'Config'",
"]",
"[",
"'Tty'",
"]",
"return",
"self",
".",
"raw"
] |
Returns True if the PTY should operate in raw mode.
If the container was not started with tty=True, this will return False.
|
[
"Returns",
"True",
"if",
"the",
"PTY",
"should",
"operate",
"in",
"raw",
"mode",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L153-L164
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
RunOperation.sockets
|
def sockets(self):
"""
Returns a tuple of sockets connected to the pty (stdin,stdout,stderr).
If any of the sockets are not attached in the container, `None` is
returned in the tuple.
"""
info = self._container_info()
def attach_socket(key):
if info['Config']['Attach{0}'.format(key.capitalize())]:
socket = self.client.attach_socket(
self.container,
{key: 1, 'stream': 1, 'logs': self.logs},
)
stream = io.Stream(socket)
if info['Config']['Tty']:
return stream
else:
return io.Demuxer(stream)
else:
return None
return map(attach_socket, ('stdin', 'stdout', 'stderr'))
|
python
|
def sockets(self):
info = self._container_info()
def attach_socket(key):
if info['Config']['Attach{0}'.format(key.capitalize())]:
socket = self.client.attach_socket(
self.container,
{key: 1, 'stream': 1, 'logs': self.logs},
)
stream = io.Stream(socket)
if info['Config']['Tty']:
return stream
else:
return io.Demuxer(stream)
else:
return None
return map(attach_socket, ('stdin', 'stdout', 'stderr'))
|
[
"def",
"sockets",
"(",
"self",
")",
":",
"info",
"=",
"self",
".",
"_container_info",
"(",
")",
"def",
"attach_socket",
"(",
"key",
")",
":",
"if",
"info",
"[",
"'Config'",
"]",
"[",
"'Attach{0}'",
".",
"format",
"(",
"key",
".",
"capitalize",
"(",
")",
")",
"]",
":",
"socket",
"=",
"self",
".",
"client",
".",
"attach_socket",
"(",
"self",
".",
"container",
",",
"{",
"key",
":",
"1",
",",
"'stream'",
":",
"1",
",",
"'logs'",
":",
"self",
".",
"logs",
"}",
",",
")",
"stream",
"=",
"io",
".",
"Stream",
"(",
"socket",
")",
"if",
"info",
"[",
"'Config'",
"]",
"[",
"'Tty'",
"]",
":",
"return",
"stream",
"else",
":",
"return",
"io",
".",
"Demuxer",
"(",
"stream",
")",
"else",
":",
"return",
"None",
"return",
"map",
"(",
"attach_socket",
",",
"(",
"'stdin'",
",",
"'stdout'",
",",
"'stderr'",
")",
")"
] |
Returns a tuple of sockets connected to the pty (stdin,stdout,stderr).
If any of the sockets are not attached in the container, `None` is
returned in the tuple.
|
[
"Returns",
"a",
"tuple",
"of",
"sockets",
"connected",
"to",
"the",
"pty",
"(",
"stdin",
"stdout",
"stderr",
")",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L166-L191
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
RunOperation.resize
|
def resize(self, height, width, **kwargs):
"""
resize pty within container
"""
self.client.resize(self.container, height=height, width=width)
|
python
|
def resize(self, height, width, **kwargs):
self.client.resize(self.container, height=height, width=width)
|
[
"def",
"resize",
"(",
"self",
",",
"height",
",",
"width",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"client",
".",
"resize",
"(",
"self",
".",
"container",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
")"
] |
resize pty within container
|
[
"resize",
"pty",
"within",
"container"
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L193-L197
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
ExecOperation.start
|
def start(self, sockets=None, **kwargs):
"""
start execution
"""
stream = sockets or self.sockets()
pumps = []
if self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), stream, wait_for_output=False))
pumps.append(io.Pump(stream, io.Stream(self.stdout), propagate_close=False))
# FIXME: since exec_start returns a single socket, how do we
# distinguish between stdout and stderr?
# pumps.append(io.Pump(stream, io.Stream(self.stderr), propagate_close=False))
return pumps
|
python
|
def start(self, sockets=None, **kwargs):
stream = sockets or self.sockets()
pumps = []
if self.interactive:
pumps.append(io.Pump(io.Stream(self.stdin), stream, wait_for_output=False))
pumps.append(io.Pump(stream, io.Stream(self.stdout), propagate_close=False))
return pumps
|
[
"def",
"start",
"(",
"self",
",",
"sockets",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"stream",
"=",
"sockets",
"or",
"self",
".",
"sockets",
"(",
")",
"pumps",
"=",
"[",
"]",
"if",
"self",
".",
"interactive",
":",
"pumps",
".",
"append",
"(",
"io",
".",
"Pump",
"(",
"io",
".",
"Stream",
"(",
"self",
".",
"stdin",
")",
",",
"stream",
",",
"wait_for_output",
"=",
"False",
")",
")",
"pumps",
".",
"append",
"(",
"io",
".",
"Pump",
"(",
"stream",
",",
"io",
".",
"Stream",
"(",
"self",
".",
"stdout",
")",
",",
"propagate_close",
"=",
"False",
")",
")",
"# FIXME: since exec_start returns a single socket, how do we",
"# distinguish between stdout and stderr?",
"# pumps.append(io.Pump(stream, io.Stream(self.stderr), propagate_close=False))",
"return",
"pumps"
] |
start execution
|
[
"start",
"execution"
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L227-L242
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
ExecOperation.israw
|
def israw(self, **kwargs):
"""
Returns True if the PTY should operate in raw mode.
If the exec was not started with tty=True, this will return False.
"""
if self.raw is None:
self.raw = self.stdout.isatty() and self.is_process_tty()
return self.raw
|
python
|
def israw(self, **kwargs):
if self.raw is None:
self.raw = self.stdout.isatty() and self.is_process_tty()
return self.raw
|
[
"def",
"israw",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"raw",
"is",
"None",
":",
"self",
".",
"raw",
"=",
"self",
".",
"stdout",
".",
"isatty",
"(",
")",
"and",
"self",
".",
"is_process_tty",
"(",
")",
"return",
"self",
".",
"raw"
] |
Returns True if the PTY should operate in raw mode.
If the exec was not started with tty=True, this will return False.
|
[
"Returns",
"True",
"if",
"the",
"PTY",
"should",
"operate",
"in",
"raw",
"mode",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L244-L254
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
ExecOperation.sockets
|
def sockets(self):
"""
Return a single socket which is processing all I/O to exec
"""
socket = self.client.exec_start(self.exec_id, socket=True, tty=self.interactive)
stream = io.Stream(socket)
if self.is_process_tty():
return stream
else:
return io.Demuxer(stream)
|
python
|
def sockets(self):
socket = self.client.exec_start(self.exec_id, socket=True, tty=self.interactive)
stream = io.Stream(socket)
if self.is_process_tty():
return stream
else:
return io.Demuxer(stream)
|
[
"def",
"sockets",
"(",
"self",
")",
":",
"socket",
"=",
"self",
".",
"client",
".",
"exec_start",
"(",
"self",
".",
"exec_id",
",",
"socket",
"=",
"True",
",",
"tty",
"=",
"self",
".",
"interactive",
")",
"stream",
"=",
"io",
".",
"Stream",
"(",
"socket",
")",
"if",
"self",
".",
"is_process_tty",
"(",
")",
":",
"return",
"stream",
"else",
":",
"return",
"io",
".",
"Demuxer",
"(",
"stream",
")"
] |
Return a single socket which is processing all I/O to exec
|
[
"Return",
"a",
"single",
"socket",
"which",
"is",
"processing",
"all",
"I",
"/",
"O",
"to",
"exec"
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L256-L265
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
ExecOperation.resize
|
def resize(self, height, width, **kwargs):
"""
resize pty of an execed process
"""
self.client.exec_resize(self.exec_id, height=height, width=width)
|
python
|
def resize(self, height, width, **kwargs):
self.client.exec_resize(self.exec_id, height=height, width=width)
|
[
"def",
"resize",
"(",
"self",
",",
"height",
",",
"width",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"client",
".",
"exec_resize",
"(",
"self",
".",
"exec_id",
",",
"height",
"=",
"height",
",",
"width",
"=",
"width",
")"
] |
resize pty of an execed process
|
[
"resize",
"pty",
"of",
"an",
"execed",
"process"
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L267-L271
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
ExecOperation._exec_info
|
def _exec_info(self):
"""
Caching wrapper around client.exec_inspect
"""
if self._info is None:
self._info = self.client.exec_inspect(self.exec_id)
return self._info
|
python
|
def _exec_info(self):
if self._info is None:
self._info = self.client.exec_inspect(self.exec_id)
return self._info
|
[
"def",
"_exec_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"self",
".",
"_info",
"=",
"self",
".",
"client",
".",
"exec_inspect",
"(",
"self",
".",
"exec_id",
")",
"return",
"self",
".",
"_info"
] |
Caching wrapper around client.exec_inspect
|
[
"Caching",
"wrapper",
"around",
"client",
".",
"exec_inspect"
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L279-L285
|
d11wtq/dockerpty
|
dockerpty/pty.py
|
PseudoTerminal.resize
|
def resize(self, size=None):
"""
Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY.
"""
if not self.operation.israw():
return
size = size or tty.size(self.operation.stdout)
if size is not None:
rows, cols = size
try:
self.operation.resize(height=rows, width=cols)
except IOError: # Container already exited
pass
|
python
|
def resize(self, size=None):
if not self.operation.israw():
return
size = size or tty.size(self.operation.stdout)
if size is not None:
rows, cols = size
try:
self.operation.resize(height=rows, width=cols)
except IOError:
pass
|
[
"def",
"resize",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"operation",
".",
"israw",
"(",
")",
":",
"return",
"size",
"=",
"size",
"or",
"tty",
".",
"size",
"(",
"self",
".",
"operation",
".",
"stdout",
")",
"if",
"size",
"is",
"not",
"None",
":",
"rows",
",",
"cols",
"=",
"size",
"try",
":",
"self",
".",
"operation",
".",
"resize",
"(",
"height",
"=",
"rows",
",",
"width",
"=",
"cols",
")",
"except",
"IOError",
":",
"# Container already exited",
"pass"
] |
Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY.
|
[
"Resize",
"the",
"container",
"s",
"PTY",
"."
] |
train
|
https://github.com/d11wtq/dockerpty/blob/f8d17d893c6758b7cc25825e99f6b02202632a97/dockerpty/pty.py#L340-L358
|
mhe/pynrrd
|
nrrd/formatters.py
|
format_number
|
def format_number(x):
"""Format number to string
Function converts a number to string. For numbers of class :class:`float`, up to 17 digits will be used to print
the entire floating point number. Any padding zeros will be removed at the end of the number.
See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format.
.. note::
IEEE754-1985 standard says that 17 significant decimal digits are required to adequately represent a
64-bit floating point number. Not all fractional numbers can be exactly represented in floating point. An
example is 0.1 which will be approximated as 0.10000000000000001.
Parameters
----------
x : :class:`int` or :class:`float`
Number to convert to string
Returns
-------
vector : :class:`str`
String of number :obj:`x`
"""
if isinstance(x, float):
# Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision.
# However, IEEE754-1985 standard says that 17 significant decimal digits is required to adequately represent a
# floating point number.
# The g option is used rather than f because g precision uses significant digits while f is just the number of
# digits after the decimal. (NRRD C implementation uses g).
value = '{:.17g}'.format(x)
else:
value = str(x)
return value
|
python
|
def format_number(x):
if isinstance(x, float):
value = '{:.17g}'.format(x)
else:
value = str(x)
return value
|
[
"def",
"format_number",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"float",
")",
":",
"# Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision.",
"# However, IEEE754-1985 standard says that 17 significant decimal digits is required to adequately represent a",
"# floating point number.",
"# The g option is used rather than f because g precision uses significant digits while f is just the number of",
"# digits after the decimal. (NRRD C implementation uses g).",
"value",
"=",
"'{:.17g}'",
".",
"format",
"(",
"x",
")",
"else",
":",
"value",
"=",
"str",
"(",
"x",
")",
"return",
"value"
] |
Format number to string
Function converts a number to string. For numbers of class :class:`float`, up to 17 digits will be used to print
the entire floating point number. Any padding zeros will be removed at the end of the number.
See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format.
.. note::
IEEE754-1985 standard says that 17 significant decimal digits are required to adequately represent a
64-bit floating point number. Not all fractional numbers can be exactly represented in floating point. An
example is 0.1 which will be approximated as 0.10000000000000001.
Parameters
----------
x : :class:`int` or :class:`float`
Number to convert to string
Returns
-------
vector : :class:`str`
String of number :obj:`x`
|
[
"Format",
"number",
"to",
"string"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/formatters.py#L4-L38
|
mhe/pynrrd
|
nrrd/formatters.py
|
format_optional_vector
|
def format_optional_vector(x):
"""Format a (N,) :class:`numpy.ndarray` into a NRRD optional vector string
Function converts a (N,) :class:`numpy.ndarray` or :obj:`None` into a string using NRRD vector format. If the input
:obj:`x` is :obj:`None`, then :obj:`vector` will be 'none'
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : (N,) :class:`numpy.ndarray` or :obj:`None`
Vector to convert to NRRD vector string
Returns
-------
vector : :class:`str`
String containing NRRD vector
"""
# If vector is None or all elements are NaN, then return none
# Otherwise format the vector as normal
if x is None or np.all(np.isnan(x)):
return 'none'
else:
return format_vector(x)
|
python
|
def format_optional_vector(x):
if x is None or np.all(np.isnan(x)):
return 'none'
else:
return format_vector(x)
|
[
"def",
"format_optional_vector",
"(",
"x",
")",
":",
"# If vector is None or all elements are NaN, then return none",
"# Otherwise format the vector as normal",
"if",
"x",
"is",
"None",
"or",
"np",
".",
"all",
"(",
"np",
".",
"isnan",
"(",
"x",
")",
")",
":",
"return",
"'none'",
"else",
":",
"return",
"format_vector",
"(",
"x",
")"
] |
Format a (N,) :class:`numpy.ndarray` into a NRRD optional vector string
Function converts a (N,) :class:`numpy.ndarray` or :obj:`None` into a string using NRRD vector format. If the input
:obj:`x` is :obj:`None`, then :obj:`vector` will be 'none'
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : (N,) :class:`numpy.ndarray` or :obj:`None`
Vector to convert to NRRD vector string
Returns
-------
vector : :class:`str`
String containing NRRD vector
|
[
"Format",
"a",
"(",
"N",
")",
":",
"class",
":",
"numpy",
".",
"ndarray",
"into",
"a",
"NRRD",
"optional",
"vector",
"string"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/formatters.py#L60-L84
|
mhe/pynrrd
|
nrrd/writer.py
|
write
|
def write(filename, data, header=None, detached_header=False, relative_data_path=True, custom_field_map=None,
compression_level=9, index_order='F'):
"""Write :class:`numpy.ndarray` to NRRD file
The :obj:`filename` parameter specifies the absolute or relative filename to write the NRRD file to. If the
:obj:`filename` extension is .nhdr, then the :obj:`detached_header` parameter is set to true automatically. If the
:obj:`detached_header` parameter is set to :obj:`True` and the :obj:`filename` ends in .nrrd, then the header file
will have the same path and base name as the :obj:`filename` but with an extension of .nhdr. In all other cases,
the header and data are saved in the same file.
:obj:`header` is an optional parameter containing the fields and values to be added to the NRRD header.
.. note::
The following fields are automatically generated based on the :obj:`data` parameter ignoring these values
in the :obj:`header`: 'type', 'endian', 'dimension', 'sizes'. In addition, the generated fields will be
added to the given :obj:`header`. Thus, one can check the generated fields by viewing the passed
:obj:`header`.
.. note::
The default encoding field used if not specified in :obj:`header` is 'gzip'.
.. note::
The :obj:`index_order` parameter must be consistent with the index order specified in :meth:`read`.
Reading an NRRD file in C-order and then writing as Fortran-order or vice versa will result in the data
being transposed in the NRRD file.
See :ref:`user-guide:Writing NRRD files` for more information on writing NRRD files.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
data : :class:`numpy.ndarray`
Data to save to the NRRD file
detached_header : :obj:`bool`, optional
Whether the header and data should be saved in separate files. Defaults to :obj:`False`
relative_data_path : :class:`bool`
Whether the data filename in detached header is saved with a relative path or absolute path.
This parameter is ignored if there is no detached header. Defaults to :obj:`True`
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
compression_level : :class:`int`
Integer between 1 to 9 specifying the compression level when using a compressed encoding (gzip or bzip). A value
of :obj:`1` compresses the data the least amount and is the fastest, while a value of :obj:`9` compresses the
data the most and is the slowest.
index_order : {'C', 'F'}, optional
Specifies the index order used for writing. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
See Also
--------
:meth:`read`, :meth:`read_header`, :meth:`read_data`
"""
if header is None:
header = {}
# Infer a number of fields from the NumPy array and overwrite values in the header dictionary.
# Get type string identifier from the NumPy datatype
header['type'] = _TYPEMAP_NUMPY2NRRD[data.dtype.str[1:]]
# If the datatype contains more than one byte and the encoding is not ASCII, then set the endian header value
# based on the datatype's endianness. Otherwise, delete the endian field from the header if present
if data.dtype.itemsize > 1 and header.get('encoding', '').lower() not in ['ascii', 'text', 'txt']:
header['endian'] = _NUMPY2NRRD_ENDIAN_MAP[data.dtype.str[:1]]
elif 'endian' in header:
del header['endian']
# If space is specified in the header, then space dimension can not. See
# http://teem.sourceforge.net/nrrd/format.html#space
if 'space' in header.keys() and 'space dimension' in header.keys():
del header['space dimension']
# Update the dimension and sizes fields in the header based on the data. Since NRRD expects meta data to be in
# Fortran order we are required to reverse the shape in the case of the array being in C order. E.g., data was read
# using index_order='C'.
header['dimension'] = data.ndim
header['sizes'] = list(data.shape) if index_order == 'F' else list(data.shape[::-1])
# The default encoding is 'gzip'
if 'encoding' not in header:
header['encoding'] = 'gzip'
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `detached_header=False`
# If *.nrrd filename provided AND detached_header=True, separate header and data files written.
# If detached_header=True and data file is present, then write the files separately
# For all other cases, header & data written to same file.
if filename.endswith('.nhdr'):
detached_header = True
if 'data file' not in header:
# Get the base filename without the extension
base_filename = os.path.splitext(filename)[0]
# Get the appropriate data filename based on encoding, see here for information on the standard detached
# filename: http://teem.sourceforge.net/nrrd/format.html#encoding
if header['encoding'] == 'raw':
data_filename = '%s.raw' % base_filename
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data_filename = '%s.txt' % base_filename
elif header['encoding'] in ['gzip', 'gz']:
data_filename = '%s.raw.gz' % base_filename
elif header['encoding'] in ['bzip2', 'bz2']:
data_filename = '%s.raw.bz2' % base_filename
else:
raise NRRDError('Invalid encoding specification while writing NRRD file: %s' % header['encoding'])
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
else:
# TODO This will cause issues for relative data files because it will not save in the correct spot
data_filename = header['data file']
elif filename.endswith('.nrrd') and detached_header:
data_filename = filename
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
filename = '%s.nhdr' % os.path.splitext(filename)[0]
else:
# Write header & data as one file
data_filename = filename
detached_header = False
with open(filename, 'wb') as fh:
fh.write(b'NRRD0005\n')
fh.write(b'# This NRRD file was generated by pynrrd\n')
fh.write(b'# on ' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S').encode('ascii') + b'(GMT).\n')
fh.write(b'# Complete NRRD file format specification at:\n')
fh.write(b'# http://teem.sourceforge.net/nrrd/format.html\n')
# Copy the options since dictionaries are mutable when passed as an argument
# Thus, to prevent changes to the actual options, a copy is made
# Empty ordered_options list is made (will be converted into dictionary)
local_options = header.copy()
ordered_options = []
# Loop through field order and add the key/value if present
# Remove the key/value from the local options so that we know not to add it again
for field in _NRRD_FIELD_ORDER:
if field in local_options:
ordered_options.append((field, local_options[field]))
del local_options[field]
# Leftover items are assumed to be the custom field/value options
# So get current size and any items past this index will be a custom value
custom_field_start_index = len(ordered_options)
# Add the leftover items to the end of the list and convert the options into a dictionary
ordered_options.extend(local_options.items())
ordered_options = OrderedDict(ordered_options)
for x, (field, value) in enumerate(ordered_options.items()):
# Get the field_type based on field and then get corresponding
# value as a str using _format_field_value
field_type = _get_field_type(field, custom_field_map)
value_str = _format_field_value(value, field_type)
# Custom fields are written as key/value pairs with a := instead of : delimeter
if x >= custom_field_start_index:
fh.write(('%s:=%s\n' % (field, value_str)).encode('ascii'))
else:
fh.write(('%s: %s\n' % (field, value_str)).encode('ascii'))
# Write the closing extra newline
fh.write(b'\n')
# If header & data in the same file is desired, write data in the file
if not detached_header:
_write_data(data, fh, header, compression_level=compression_level, index_order=index_order)
# If detached header desired, write data to different file
if detached_header:
with open(data_filename, 'wb') as data_fh:
_write_data(data, data_fh, header, compression_level=compression_level, index_order=index_order)
|
python
|
def write(filename, data, header=None, detached_header=False, relative_data_path=True, custom_field_map=None,
compression_level=9, index_order='F'):
if header is None:
header = {}
header['type'] = _TYPEMAP_NUMPY2NRRD[data.dtype.str[1:]]
if data.dtype.itemsize > 1 and header.get('encoding', '').lower() not in ['ascii', 'text', 'txt']:
header['endian'] = _NUMPY2NRRD_ENDIAN_MAP[data.dtype.str[:1]]
elif 'endian' in header:
del header['endian']
if 'space' in header.keys() and 'space dimension' in header.keys():
del header['space dimension']
header['dimension'] = data.ndim
header['sizes'] = list(data.shape) if index_order == 'F' else list(data.shape[::-1])
if 'encoding' not in header:
header['encoding'] = 'gzip'
if filename.endswith('.nhdr'):
detached_header = True
if 'data file' not in header:
base_filename = os.path.splitext(filename)[0]
if header['encoding'] == 'raw':
data_filename = '%s.raw' % base_filename
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data_filename = '%s.txt' % base_filename
elif header['encoding'] in ['gzip', 'gz']:
data_filename = '%s.raw.gz' % base_filename
elif header['encoding'] in ['bzip2', 'bz2']:
data_filename = '%s.raw.bz2' % base_filename
else:
raise NRRDError('Invalid encoding specification while writing NRRD file: %s' % header['encoding'])
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
else:
data_filename = header['data file']
elif filename.endswith('.nrrd') and detached_header:
data_filename = filename
header['data file'] = os.path.basename(data_filename) \
if relative_data_path else os.path.abspath(data_filename)
filename = '%s.nhdr' % os.path.splitext(filename)[0]
else:
data_filename = filename
detached_header = False
with open(filename, 'wb') as fh:
fh.write(b'NRRD0005\n')
fh.write(b'
fh.write(b'
fh.write(b'
fh.write(b'
local_options = header.copy()
ordered_options = []
for field in _NRRD_FIELD_ORDER:
if field in local_options:
ordered_options.append((field, local_options[field]))
del local_options[field]
custom_field_start_index = len(ordered_options)
ordered_options.extend(local_options.items())
ordered_options = OrderedDict(ordered_options)
for x, (field, value) in enumerate(ordered_options.items()):
field_type = _get_field_type(field, custom_field_map)
value_str = _format_field_value(value, field_type)
if x >= custom_field_start_index:
fh.write(('%s:=%s\n' % (field, value_str)).encode('ascii'))
else:
fh.write(('%s: %s\n' % (field, value_str)).encode('ascii'))
fh.write(b'\n')
if not detached_header:
_write_data(data, fh, header, compression_level=compression_level, index_order=index_order)
if detached_header:
with open(data_filename, 'wb') as data_fh:
_write_data(data, data_fh, header, compression_level=compression_level, index_order=index_order)
|
[
"def",
"write",
"(",
"filename",
",",
"data",
",",
"header",
"=",
"None",
",",
"detached_header",
"=",
"False",
",",
"relative_data_path",
"=",
"True",
",",
"custom_field_map",
"=",
"None",
",",
"compression_level",
"=",
"9",
",",
"index_order",
"=",
"'F'",
")",
":",
"if",
"header",
"is",
"None",
":",
"header",
"=",
"{",
"}",
"# Infer a number of fields from the NumPy array and overwrite values in the header dictionary.",
"# Get type string identifier from the NumPy datatype",
"header",
"[",
"'type'",
"]",
"=",
"_TYPEMAP_NUMPY2NRRD",
"[",
"data",
".",
"dtype",
".",
"str",
"[",
"1",
":",
"]",
"]",
"# If the datatype contains more than one byte and the encoding is not ASCII, then set the endian header value",
"# based on the datatype's endianness. Otherwise, delete the endian field from the header if present",
"if",
"data",
".",
"dtype",
".",
"itemsize",
">",
"1",
"and",
"header",
".",
"get",
"(",
"'encoding'",
",",
"''",
")",
".",
"lower",
"(",
")",
"not",
"in",
"[",
"'ascii'",
",",
"'text'",
",",
"'txt'",
"]",
":",
"header",
"[",
"'endian'",
"]",
"=",
"_NUMPY2NRRD_ENDIAN_MAP",
"[",
"data",
".",
"dtype",
".",
"str",
"[",
":",
"1",
"]",
"]",
"elif",
"'endian'",
"in",
"header",
":",
"del",
"header",
"[",
"'endian'",
"]",
"# If space is specified in the header, then space dimension can not. See",
"# http://teem.sourceforge.net/nrrd/format.html#space",
"if",
"'space'",
"in",
"header",
".",
"keys",
"(",
")",
"and",
"'space dimension'",
"in",
"header",
".",
"keys",
"(",
")",
":",
"del",
"header",
"[",
"'space dimension'",
"]",
"# Update the dimension and sizes fields in the header based on the data. Since NRRD expects meta data to be in",
"# Fortran order we are required to reverse the shape in the case of the array being in C order. E.g., data was read",
"# using index_order='C'.",
"header",
"[",
"'dimension'",
"]",
"=",
"data",
".",
"ndim",
"header",
"[",
"'sizes'",
"]",
"=",
"list",
"(",
"data",
".",
"shape",
")",
"if",
"index_order",
"==",
"'F'",
"else",
"list",
"(",
"data",
".",
"shape",
"[",
":",
":",
"-",
"1",
"]",
")",
"# The default encoding is 'gzip'",
"if",
"'encoding'",
"not",
"in",
"header",
":",
"header",
"[",
"'encoding'",
"]",
"=",
"'gzip'",
"# A bit of magic in handling options here.",
"# If *.nhdr filename provided, this overrides `detached_header=False`",
"# If *.nrrd filename provided AND detached_header=True, separate header and data files written.",
"# If detached_header=True and data file is present, then write the files separately",
"# For all other cases, header & data written to same file.",
"if",
"filename",
".",
"endswith",
"(",
"'.nhdr'",
")",
":",
"detached_header",
"=",
"True",
"if",
"'data file'",
"not",
"in",
"header",
":",
"# Get the base filename without the extension",
"base_filename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"# Get the appropriate data filename based on encoding, see here for information on the standard detached",
"# filename: http://teem.sourceforge.net/nrrd/format.html#encoding",
"if",
"header",
"[",
"'encoding'",
"]",
"==",
"'raw'",
":",
"data_filename",
"=",
"'%s.raw'",
"%",
"base_filename",
"elif",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'ASCII'",
",",
"'ascii'",
",",
"'text'",
",",
"'txt'",
"]",
":",
"data_filename",
"=",
"'%s.txt'",
"%",
"base_filename",
"elif",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'gzip'",
",",
"'gz'",
"]",
":",
"data_filename",
"=",
"'%s.raw.gz'",
"%",
"base_filename",
"elif",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'bzip2'",
",",
"'bz2'",
"]",
":",
"data_filename",
"=",
"'%s.raw.bz2'",
"%",
"base_filename",
"else",
":",
"raise",
"NRRDError",
"(",
"'Invalid encoding specification while writing NRRD file: %s'",
"%",
"header",
"[",
"'encoding'",
"]",
")",
"header",
"[",
"'data file'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"data_filename",
")",
"if",
"relative_data_path",
"else",
"os",
".",
"path",
".",
"abspath",
"(",
"data_filename",
")",
"else",
":",
"# TODO This will cause issues for relative data files because it will not save in the correct spot",
"data_filename",
"=",
"header",
"[",
"'data file'",
"]",
"elif",
"filename",
".",
"endswith",
"(",
"'.nrrd'",
")",
"and",
"detached_header",
":",
"data_filename",
"=",
"filename",
"header",
"[",
"'data file'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"data_filename",
")",
"if",
"relative_data_path",
"else",
"os",
".",
"path",
".",
"abspath",
"(",
"data_filename",
")",
"filename",
"=",
"'%s.nhdr'",
"%",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"else",
":",
"# Write header & data as one file",
"data_filename",
"=",
"filename",
"detached_header",
"=",
"False",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"b'NRRD0005\\n'",
")",
"fh",
".",
"write",
"(",
"b'# This NRRD file was generated by pynrrd\\n'",
")",
"fh",
".",
"write",
"(",
"b'# on '",
"+",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
".",
"encode",
"(",
"'ascii'",
")",
"+",
"b'(GMT).\\n'",
")",
"fh",
".",
"write",
"(",
"b'# Complete NRRD file format specification at:\\n'",
")",
"fh",
".",
"write",
"(",
"b'# http://teem.sourceforge.net/nrrd/format.html\\n'",
")",
"# Copy the options since dictionaries are mutable when passed as an argument",
"# Thus, to prevent changes to the actual options, a copy is made",
"# Empty ordered_options list is made (will be converted into dictionary)",
"local_options",
"=",
"header",
".",
"copy",
"(",
")",
"ordered_options",
"=",
"[",
"]",
"# Loop through field order and add the key/value if present",
"# Remove the key/value from the local options so that we know not to add it again",
"for",
"field",
"in",
"_NRRD_FIELD_ORDER",
":",
"if",
"field",
"in",
"local_options",
":",
"ordered_options",
".",
"append",
"(",
"(",
"field",
",",
"local_options",
"[",
"field",
"]",
")",
")",
"del",
"local_options",
"[",
"field",
"]",
"# Leftover items are assumed to be the custom field/value options",
"# So get current size and any items past this index will be a custom value",
"custom_field_start_index",
"=",
"len",
"(",
"ordered_options",
")",
"# Add the leftover items to the end of the list and convert the options into a dictionary",
"ordered_options",
".",
"extend",
"(",
"local_options",
".",
"items",
"(",
")",
")",
"ordered_options",
"=",
"OrderedDict",
"(",
"ordered_options",
")",
"for",
"x",
",",
"(",
"field",
",",
"value",
")",
"in",
"enumerate",
"(",
"ordered_options",
".",
"items",
"(",
")",
")",
":",
"# Get the field_type based on field and then get corresponding",
"# value as a str using _format_field_value",
"field_type",
"=",
"_get_field_type",
"(",
"field",
",",
"custom_field_map",
")",
"value_str",
"=",
"_format_field_value",
"(",
"value",
",",
"field_type",
")",
"# Custom fields are written as key/value pairs with a := instead of : delimeter",
"if",
"x",
">=",
"custom_field_start_index",
":",
"fh",
".",
"write",
"(",
"(",
"'%s:=%s\\n'",
"%",
"(",
"field",
",",
"value_str",
")",
")",
".",
"encode",
"(",
"'ascii'",
")",
")",
"else",
":",
"fh",
".",
"write",
"(",
"(",
"'%s: %s\\n'",
"%",
"(",
"field",
",",
"value_str",
")",
")",
".",
"encode",
"(",
"'ascii'",
")",
")",
"# Write the closing extra newline",
"fh",
".",
"write",
"(",
"b'\\n'",
")",
"# If header & data in the same file is desired, write data in the file",
"if",
"not",
"detached_header",
":",
"_write_data",
"(",
"data",
",",
"fh",
",",
"header",
",",
"compression_level",
"=",
"compression_level",
",",
"index_order",
"=",
"index_order",
")",
"# If detached header desired, write data to different file",
"if",
"detached_header",
":",
"with",
"open",
"(",
"data_filename",
",",
"'wb'",
")",
"as",
"data_fh",
":",
"_write_data",
"(",
"data",
",",
"data_fh",
",",
"header",
",",
"compression_level",
"=",
"compression_level",
",",
"index_order",
"=",
"index_order",
")"
] |
Write :class:`numpy.ndarray` to NRRD file
The :obj:`filename` parameter specifies the absolute or relative filename to write the NRRD file to. If the
:obj:`filename` extension is .nhdr, then the :obj:`detached_header` parameter is set to true automatically. If the
:obj:`detached_header` parameter is set to :obj:`True` and the :obj:`filename` ends in .nrrd, then the header file
will have the same path and base name as the :obj:`filename` but with an extension of .nhdr. In all other cases,
the header and data are saved in the same file.
:obj:`header` is an optional parameter containing the fields and values to be added to the NRRD header.
.. note::
The following fields are automatically generated based on the :obj:`data` parameter ignoring these values
in the :obj:`header`: 'type', 'endian', 'dimension', 'sizes'. In addition, the generated fields will be
added to the given :obj:`header`. Thus, one can check the generated fields by viewing the passed
:obj:`header`.
.. note::
The default encoding field used if not specified in :obj:`header` is 'gzip'.
.. note::
The :obj:`index_order` parameter must be consistent with the index order specified in :meth:`read`.
Reading an NRRD file in C-order and then writing as Fortran-order or vice versa will result in the data
being transposed in the NRRD file.
See :ref:`user-guide:Writing NRRD files` for more information on writing NRRD files.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
data : :class:`numpy.ndarray`
Data to save to the NRRD file
detached_header : :obj:`bool`, optional
Whether the header and data should be saved in separate files. Defaults to :obj:`False`
relative_data_path : :class:`bool`
Whether the data filename in detached header is saved with a relative path or absolute path.
This parameter is ignored if there is no detached header. Defaults to :obj:`True`
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
compression_level : :class:`int`
Integer between 1 to 9 specifying the compression level when using a compressed encoding (gzip or bzip). A value
of :obj:`1` compresses the data the least amount and is the fastest, while a value of :obj:`9` compresses the
data the most and is the slowest.
index_order : {'C', 'F'}, optional
Specifies the index order used for writing. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
See Also
--------
:meth:`read`, :meth:`read_header`, :meth:`read_data`
|
[
"Write",
":",
"class",
":",
"numpy",
".",
"ndarray",
"to",
"NRRD",
"file"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/writer.py#L97-L272
|
mhe/pynrrd
|
nrrd/parsers.py
|
parse_vector
|
def parse_vector(x, dtype=None):
"""Parse NRRD vector from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD vector
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking any of the vector elements
for fractional numbers. If found, then the vector will be converted to :class:`float`, otherwise :class:`int`.
Default is to automatically determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
"""
if x[0] != '(' or x[-1] != ')':
raise NRRDError('Vector should be enclosed by parentheses.')
# Always convert to float and then truncate to integer if desired
# The reason why is parsing a floating point string to int will fail (i.e. int('25.1') will fail)
vector = np.array([float(x) for x in x[1:-1].split(',')])
# If using automatic datatype detection, then start by converting to float and determining if the number is whole
# Truncate to integer if dtype is int also
if dtype is None:
vector_trunc = vector.astype(int)
if np.all((vector - vector_trunc) == 0):
vector = vector_trunc
elif dtype == int:
vector = vector.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return vector
|
python
|
def parse_vector(x, dtype=None):
if x[0] != '(' or x[-1] != ')':
raise NRRDError('Vector should be enclosed by parentheses.')
vector = np.array([float(x) for x in x[1:-1].split(',')])
if dtype is None:
vector_trunc = vector.astype(int)
if np.all((vector - vector_trunc) == 0):
vector = vector_trunc
elif dtype == int:
vector = vector.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return vector
|
[
"def",
"parse_vector",
"(",
"x",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"x",
"[",
"0",
"]",
"!=",
"'('",
"or",
"x",
"[",
"-",
"1",
"]",
"!=",
"')'",
":",
"raise",
"NRRDError",
"(",
"'Vector should be enclosed by parentheses.'",
")",
"# Always convert to float and then truncate to integer if desired",
"# The reason why is parsing a floating point string to int will fail (i.e. int('25.1') will fail)",
"vector",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"x",
"[",
"1",
":",
"-",
"1",
"]",
".",
"split",
"(",
"','",
")",
"]",
")",
"# If using automatic datatype detection, then start by converting to float and determining if the number is whole",
"# Truncate to integer if dtype is int also",
"if",
"dtype",
"is",
"None",
":",
"vector_trunc",
"=",
"vector",
".",
"astype",
"(",
"int",
")",
"if",
"np",
".",
"all",
"(",
"(",
"vector",
"-",
"vector_trunc",
")",
"==",
"0",
")",
":",
"vector",
"=",
"vector_trunc",
"elif",
"dtype",
"==",
"int",
":",
"vector",
"=",
"vector",
".",
"astype",
"(",
"int",
")",
"elif",
"dtype",
"!=",
"float",
":",
"raise",
"NRRDError",
"(",
"'dtype should be None for automatic type detection, float or int'",
")",
"return",
"vector"
] |
Parse NRRD vector from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD vector
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking any of the vector elements
for fractional numbers. If found, then the vector will be converted to :class:`float`, otherwise :class:`int`.
Default is to automatically determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
|
[
"Parse",
"NRRD",
"vector",
"from",
"string",
"into",
"(",
"N",
")",
":",
"class",
":",
"numpy",
".",
"ndarray",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L6-L46
|
mhe/pynrrd
|
nrrd/parsers.py
|
parse_matrix
|
def parse_matrix(x, dtype=None):
"""Parse NRRD matrix from string into (M,N) :class:`numpy.ndarray`.
See :ref:`user-guide:int matrix` and :ref:`user-guide:double matrix` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD matrix
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking any of the elements
for fractional numbers. If found, then the matrix will be converted to :class:`float`, otherwise :class:`int`.
Default is to automatically determine datatype.
Returns
-------
matrix : (M,N) :class:`numpy.ndarray`
Matrix that is parsed from the :obj:`x` string
"""
# Split input by spaces, convert each row into a vector and stack them vertically to get a matrix
matrix = [parse_vector(x, dtype=float) for x in x.split()]
# Get the size of each row vector and then remove duplicate sizes
# There should be exactly one value in the matrix because all row sizes need to be the same
if len(np.unique([len(x) for x in matrix])) != 1:
raise NRRDError('Matrix should have same number of elements in each row')
matrix = np.vstack(matrix)
# If using automatic datatype detection, then start by converting to float and determining if the number is whole
# Truncate to integer if dtype is int also
if dtype is None:
matrix_trunc = matrix.astype(int)
if np.all((matrix - matrix_trunc) == 0):
matrix = matrix_trunc
elif dtype == int:
matrix = matrix.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return matrix
|
python
|
def parse_matrix(x, dtype=None):
matrix = [parse_vector(x, dtype=float) for x in x.split()]
if len(np.unique([len(x) for x in matrix])) != 1:
raise NRRDError('Matrix should have same number of elements in each row')
matrix = np.vstack(matrix)
if dtype is None:
matrix_trunc = matrix.astype(int)
if np.all((matrix - matrix_trunc) == 0):
matrix = matrix_trunc
elif dtype == int:
matrix = matrix.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return matrix
|
[
"def",
"parse_matrix",
"(",
"x",
",",
"dtype",
"=",
"None",
")",
":",
"# Split input by spaces, convert each row into a vector and stack them vertically to get a matrix",
"matrix",
"=",
"[",
"parse_vector",
"(",
"x",
",",
"dtype",
"=",
"float",
")",
"for",
"x",
"in",
"x",
".",
"split",
"(",
")",
"]",
"# Get the size of each row vector and then remove duplicate sizes",
"# There should be exactly one value in the matrix because all row sizes need to be the same",
"if",
"len",
"(",
"np",
".",
"unique",
"(",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"matrix",
"]",
")",
")",
"!=",
"1",
":",
"raise",
"NRRDError",
"(",
"'Matrix should have same number of elements in each row'",
")",
"matrix",
"=",
"np",
".",
"vstack",
"(",
"matrix",
")",
"# If using automatic datatype detection, then start by converting to float and determining if the number is whole",
"# Truncate to integer if dtype is int also",
"if",
"dtype",
"is",
"None",
":",
"matrix_trunc",
"=",
"matrix",
".",
"astype",
"(",
"int",
")",
"if",
"np",
".",
"all",
"(",
"(",
"matrix",
"-",
"matrix_trunc",
")",
"==",
"0",
")",
":",
"matrix",
"=",
"matrix_trunc",
"elif",
"dtype",
"==",
"int",
":",
"matrix",
"=",
"matrix",
".",
"astype",
"(",
"int",
")",
"elif",
"dtype",
"!=",
"float",
":",
"raise",
"NRRDError",
"(",
"'dtype should be None for automatic type detection, float or int'",
")",
"return",
"matrix"
] |
Parse NRRD matrix from string into (M,N) :class:`numpy.ndarray`.
See :ref:`user-guide:int matrix` and :ref:`user-guide:double matrix` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD matrix
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking any of the elements
for fractional numbers. If found, then the matrix will be converted to :class:`float`, otherwise :class:`int`.
Default is to automatically determine datatype.
Returns
-------
matrix : (M,N) :class:`numpy.ndarray`
Matrix that is parsed from the :obj:`x` string
|
[
"Parse",
"NRRD",
"matrix",
"from",
"string",
"into",
"(",
"M",
"N",
")",
":",
"class",
":",
"numpy",
".",
"ndarray",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L79-L122
|
mhe/pynrrd
|
nrrd/parsers.py
|
parse_optional_matrix
|
def parse_optional_matrix(x):
"""Parse optional NRRD matrix from string into (M,N) :class:`numpy.ndarray` of :class:`float`.
Function parses optional NRRD matrix from string into an (M,N) :class:`numpy.ndarray` of :class:`float`. This
function works the same as :meth:`parse_matrix` except if a row vector in the matrix is none, the resulting row in
the returned matrix will be all NaNs.
See :ref:`user-guide:double matrix` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD matrix
Returns
-------
matrix : (M,N) :class:`numpy.ndarray` of :class:`float`
Matrix that is parsed from the :obj:`x` string
"""
# Split input by spaces to get each row and convert into a vector. The row can be 'none', in which case it will
# return None
matrix = [parse_optional_vector(x, dtype=float) for x in x.split()]
# Get the size of each row vector, 0 if None
sizes = np.array([0 if x is None else len(x) for x in matrix])
# Get sizes of each row vector removing duplicate sizes
# Since each row vector should be same size, the unique sizes should return one value for the row size or it may
# return a second one (0) if there are None vectors
unique_sizes = np.unique(sizes)
if len(unique_sizes) != 1 and (len(unique_sizes) != 2 or unique_sizes.min() != 0):
raise NRRDError('Matrix should have same number of elements in each row')
# Create a vector row of NaN's that matches same size of remaining vector rows
# Stack the vector rows together to create matrix
nan_row = np.full((unique_sizes.max()), np.nan)
matrix = np.vstack([nan_row if x is None else x for x in matrix])
return matrix
|
python
|
def parse_optional_matrix(x):
matrix = [parse_optional_vector(x, dtype=float) for x in x.split()]
sizes = np.array([0 if x is None else len(x) for x in matrix])
unique_sizes = np.unique(sizes)
if len(unique_sizes) != 1 and (len(unique_sizes) != 2 or unique_sizes.min() != 0):
raise NRRDError('Matrix should have same number of elements in each row')
nan_row = np.full((unique_sizes.max()), np.nan)
matrix = np.vstack([nan_row if x is None else x for x in matrix])
return matrix
|
[
"def",
"parse_optional_matrix",
"(",
"x",
")",
":",
"# Split input by spaces to get each row and convert into a vector. The row can be 'none', in which case it will",
"# return None",
"matrix",
"=",
"[",
"parse_optional_vector",
"(",
"x",
",",
"dtype",
"=",
"float",
")",
"for",
"x",
"in",
"x",
".",
"split",
"(",
")",
"]",
"# Get the size of each row vector, 0 if None",
"sizes",
"=",
"np",
".",
"array",
"(",
"[",
"0",
"if",
"x",
"is",
"None",
"else",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"matrix",
"]",
")",
"# Get sizes of each row vector removing duplicate sizes",
"# Since each row vector should be same size, the unique sizes should return one value for the row size or it may",
"# return a second one (0) if there are None vectors",
"unique_sizes",
"=",
"np",
".",
"unique",
"(",
"sizes",
")",
"if",
"len",
"(",
"unique_sizes",
")",
"!=",
"1",
"and",
"(",
"len",
"(",
"unique_sizes",
")",
"!=",
"2",
"or",
"unique_sizes",
".",
"min",
"(",
")",
"!=",
"0",
")",
":",
"raise",
"NRRDError",
"(",
"'Matrix should have same number of elements in each row'",
")",
"# Create a vector row of NaN's that matches same size of remaining vector rows",
"# Stack the vector rows together to create matrix",
"nan_row",
"=",
"np",
".",
"full",
"(",
"(",
"unique_sizes",
".",
"max",
"(",
")",
")",
",",
"np",
".",
"nan",
")",
"matrix",
"=",
"np",
".",
"vstack",
"(",
"[",
"nan_row",
"if",
"x",
"is",
"None",
"else",
"x",
"for",
"x",
"in",
"matrix",
"]",
")",
"return",
"matrix"
] |
Parse optional NRRD matrix from string into (M,N) :class:`numpy.ndarray` of :class:`float`.
Function parses optional NRRD matrix from string into an (M,N) :class:`numpy.ndarray` of :class:`float`. This
function works the same as :meth:`parse_matrix` except if a row vector in the matrix is none, the resulting row in
the returned matrix will be all NaNs.
See :ref:`user-guide:double matrix` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD matrix
Returns
-------
matrix : (M,N) :class:`numpy.ndarray` of :class:`float`
Matrix that is parsed from the :obj:`x` string
|
[
"Parse",
"optional",
"NRRD",
"matrix",
"from",
"string",
"into",
"(",
"M",
"N",
")",
":",
"class",
":",
"numpy",
".",
"ndarray",
"of",
":",
"class",
":",
"float",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L125-L165
|
mhe/pynrrd
|
nrrd/parsers.py
|
parse_number_list
|
def parse_number_list(x, dtype=None):
"""Parse NRRD number list from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD number list
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking for fractional numbers. If
found, then the string will be converted to :class:`float`, otherwise :class:`int`. Default is to automatically
determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
"""
# Always convert to float and then perform truncation to integer if necessary
number_list = np.array([float(x) for x in x.split()])
if dtype is None:
number_list_trunc = number_list.astype(int)
# If there is no difference between the truncated number list and the number list, then that means that the
# number list was all integers and we can just return that
if np.all((number_list - number_list_trunc) == 0):
number_list = number_list_trunc
elif dtype == int:
number_list = number_list.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return number_list
|
python
|
def parse_number_list(x, dtype=None):
number_list = np.array([float(x) for x in x.split()])
if dtype is None:
number_list_trunc = number_list.astype(int)
if np.all((number_list - number_list_trunc) == 0):
number_list = number_list_trunc
elif dtype == int:
number_list = number_list.astype(int)
elif dtype != float:
raise NRRDError('dtype should be None for automatic type detection, float or int')
return number_list
|
[
"def",
"parse_number_list",
"(",
"x",
",",
"dtype",
"=",
"None",
")",
":",
"# Always convert to float and then perform truncation to integer if necessary",
"number_list",
"=",
"np",
".",
"array",
"(",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"x",
".",
"split",
"(",
")",
"]",
")",
"if",
"dtype",
"is",
"None",
":",
"number_list_trunc",
"=",
"number_list",
".",
"astype",
"(",
"int",
")",
"# If there is no difference between the truncated number list and the number list, then that means that the",
"# number list was all integers and we can just return that",
"if",
"np",
".",
"all",
"(",
"(",
"number_list",
"-",
"number_list_trunc",
")",
"==",
"0",
")",
":",
"number_list",
"=",
"number_list_trunc",
"elif",
"dtype",
"==",
"int",
":",
"number_list",
"=",
"number_list",
".",
"astype",
"(",
"int",
")",
"elif",
"dtype",
"!=",
"float",
":",
"raise",
"NRRDError",
"(",
"'dtype should be None for automatic type detection, float or int'",
")",
"return",
"number_list"
] |
Parse NRRD number list from string into (N,) :class:`numpy.ndarray`.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : :class:`str`
String containing NRRD number list
dtype : data-type, optional
Datatype to use for the resulting Numpy array. Datatype can be :class:`float`, :class:`int` or :obj:`None`. If
:obj:`dtype` is :obj:`None`, then it will be automatically determined by checking for fractional numbers. If
found, then the string will be converted to :class:`float`, otherwise :class:`int`. Default is to automatically
determine datatype.
Returns
-------
vector : (N,) :class:`numpy.ndarray`
Vector that is parsed from the :obj:`x` string
|
[
"Parse",
"NRRD",
"number",
"list",
"from",
"string",
"into",
"(",
"N",
")",
":",
"class",
":",
"numpy",
".",
"ndarray",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L168-L204
|
mhe/pynrrd
|
nrrd/parsers.py
|
parse_number_auto_dtype
|
def parse_number_auto_dtype(x):
"""Parse number from string with automatic type detection.
Parses input string and converts to a number using automatic type detection. If the number contains any
fractional parts, then the number will be converted to float, otherwise the number will be converted to an int.
See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format.
Parameters
----------
x : :class:`str`
String representation of number
Returns
-------
result : :class:`int` or :class:`float`
Number parsed from :obj:`x` string
"""
value = float(x)
if value.is_integer():
value = int(value)
return value
|
python
|
def parse_number_auto_dtype(x):
value = float(x)
if value.is_integer():
value = int(value)
return value
|
[
"def",
"parse_number_auto_dtype",
"(",
"x",
")",
":",
"value",
"=",
"float",
"(",
"x",
")",
"if",
"value",
".",
"is_integer",
"(",
")",
":",
"value",
"=",
"int",
"(",
"value",
")",
"return",
"value"
] |
Parse number from string with automatic type detection.
Parses input string and converts to a number using automatic type detection. If the number contains any
fractional parts, then the number will be converted to float, otherwise the number will be converted to an int.
See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format.
Parameters
----------
x : :class:`str`
String representation of number
Returns
-------
result : :class:`int` or :class:`float`
Number parsed from :obj:`x` string
|
[
"Parse",
"number",
"from",
"string",
"with",
"automatic",
"type",
"detection",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/parsers.py#L207-L231
|
mhe/pynrrd
|
nrrd/reader.py
|
_determine_datatype
|
def _determine_datatype(fields):
"""Determine the numpy dtype of the data."""
# Convert the NRRD type string identifier into a NumPy string identifier using a map
np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]
# This is only added if the datatype has more than one byte and is not using ASCII encoding
# Note: Endian is not required for ASCII encoding
if np.dtype(np_typestring).itemsize > 1 and fields['encoding'] not in ['ASCII', 'ascii', 'text', 'txt']:
if 'endian' not in fields:
raise NRRDError('Header is missing required field: "endian".')
elif fields['endian'] == 'big':
np_typestring = '>' + np_typestring
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
else:
raise NRRDError('Invalid endian value in header: "%s"' % fields['endian'])
return np.dtype(np_typestring)
|
python
|
def _determine_datatype(fields):
np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]
if np.dtype(np_typestring).itemsize > 1 and fields['encoding'] not in ['ASCII', 'ascii', 'text', 'txt']:
if 'endian' not in fields:
raise NRRDError('Header is missing required field: "endian".')
elif fields['endian'] == 'big':
np_typestring = '>' + np_typestring
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
else:
raise NRRDError('Invalid endian value in header: "%s"' % fields['endian'])
return np.dtype(np_typestring)
|
[
"def",
"_determine_datatype",
"(",
"fields",
")",
":",
"# Convert the NRRD type string identifier into a NumPy string identifier using a map",
"np_typestring",
"=",
"_TYPEMAP_NRRD2NUMPY",
"[",
"fields",
"[",
"'type'",
"]",
"]",
"# This is only added if the datatype has more than one byte and is not using ASCII encoding",
"# Note: Endian is not required for ASCII encoding",
"if",
"np",
".",
"dtype",
"(",
"np_typestring",
")",
".",
"itemsize",
">",
"1",
"and",
"fields",
"[",
"'encoding'",
"]",
"not",
"in",
"[",
"'ASCII'",
",",
"'ascii'",
",",
"'text'",
",",
"'txt'",
"]",
":",
"if",
"'endian'",
"not",
"in",
"fields",
":",
"raise",
"NRRDError",
"(",
"'Header is missing required field: \"endian\".'",
")",
"elif",
"fields",
"[",
"'endian'",
"]",
"==",
"'big'",
":",
"np_typestring",
"=",
"'>'",
"+",
"np_typestring",
"elif",
"fields",
"[",
"'endian'",
"]",
"==",
"'little'",
":",
"np_typestring",
"=",
"'<'",
"+",
"np_typestring",
"else",
":",
"raise",
"NRRDError",
"(",
"'Invalid endian value in header: \"%s\"'",
"%",
"fields",
"[",
"'endian'",
"]",
")",
"return",
"np",
".",
"dtype",
"(",
"np_typestring",
")"
] |
Determine the numpy dtype of the data.
|
[
"Determine",
"the",
"numpy",
"dtype",
"of",
"the",
"data",
"."
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L145-L163
|
mhe/pynrrd
|
nrrd/reader.py
|
_validate_magic_line
|
def _validate_magic_line(line):
"""For NRRD files, the first four characters are always "NRRD", and
remaining characters give information about the file format version
>>> _validate_magic_line('NRRD0005')
8
>>> _validate_magic_line('NRRD0006')
Traceback (most recent call last):
...
NrrdError: NRRD file version too new for this library.
>>> _validate_magic_line('NRRD')
Traceback (most recent call last):
...
NrrdError: Invalid NRRD magic line: NRRD
"""
if not line.startswith('NRRD'):
raise NRRDError('Invalid NRRD magic line. Is this an NRRD file?')
try:
version = int(line[4:])
if version > 5:
raise NRRDError('Unsupported NRRD file version (version: %i). This library only supports v%i and below.'
% (version, 5))
except ValueError:
raise NRRDError('Invalid NRRD magic line: %s' % line)
return len(line)
|
python
|
def _validate_magic_line(line):
if not line.startswith('NRRD'):
raise NRRDError('Invalid NRRD magic line. Is this an NRRD file?')
try:
version = int(line[4:])
if version > 5:
raise NRRDError('Unsupported NRRD file version (version: %i). This library only supports v%i and below.'
% (version, 5))
except ValueError:
raise NRRDError('Invalid NRRD magic line: %s' % line)
return len(line)
|
[
"def",
"_validate_magic_line",
"(",
"line",
")",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'NRRD'",
")",
":",
"raise",
"NRRDError",
"(",
"'Invalid NRRD magic line. Is this an NRRD file?'",
")",
"try",
":",
"version",
"=",
"int",
"(",
"line",
"[",
"4",
":",
"]",
")",
"if",
"version",
">",
"5",
":",
"raise",
"NRRDError",
"(",
"'Unsupported NRRD file version (version: %i). This library only supports v%i and below.'",
"%",
"(",
"version",
",",
"5",
")",
")",
"except",
"ValueError",
":",
"raise",
"NRRDError",
"(",
"'Invalid NRRD magic line: %s'",
"%",
"line",
")",
"return",
"len",
"(",
"line",
")"
] |
For NRRD files, the first four characters are always "NRRD", and
remaining characters give information about the file format version
>>> _validate_magic_line('NRRD0005')
8
>>> _validate_magic_line('NRRD0006')
Traceback (most recent call last):
...
NrrdError: NRRD file version too new for this library.
>>> _validate_magic_line('NRRD')
Traceback (most recent call last):
...
NrrdError: Invalid NRRD magic line: NRRD
|
[
"For",
"NRRD",
"files",
"the",
"first",
"four",
"characters",
"are",
"always",
"NRRD",
"and",
"remaining",
"characters",
"give",
"information",
"about",
"the",
"file",
"format",
"version"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L166-L193
|
mhe/pynrrd
|
nrrd/reader.py
|
read_header
|
def read_header(file, custom_field_map=None):
"""Read contents of header and parse values from :obj:`file`
:obj:`file` can be a filename indicating where the NRRD header is located or a string iterator object. If a
filename is specified, then the file will be opened and closed after the header is read from it. If not specifying
a filename, the :obj:`file` parameter can be any sort of iterator that returns a string each time :meth:`next` is
called. The two common objects that meet these requirements are file objects and a list of strings. When
:obj:`file` is a file object, it must be opened with the binary flag ('b') on platforms where that makes a
difference, such as Windows.
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
Parameters
----------
file : :class:`str` or string iterator
Filename, file object or string iterator object to read NRRD header from
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
Returns
-------
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`read`, :meth:`read_data`
"""
# If the file is a filename rather than the file handle, then open the file and call this function again with the
# file handle. Since read function uses a filename, it is easy to think read_header is the same syntax.
if isinstance(file, str) and file.count('\n') == 0:
with open(file, 'rb') as fh:
header = read_header(fh, custom_field_map)
return header
# Collect number of bytes in the file header (for seeking below)
header_size = 0
# Get iterator for the file and extract the first line, the magic line
it = iter(file)
magic_line = next(it)
# Depending on what type file is, decoding may or may not be necessary. Decode if necessary, otherwise skip.
need_decode = False
if hasattr(magic_line, 'decode'):
need_decode = True
magic_line = magic_line.decode('ascii', 'ignore')
# Validate the magic line and increment header size by size of the line
header_size += _validate_magic_line(magic_line)
# Create empty header
# This is an OrderedDict rather than an ordinary dict because an OrderedDict will keep it's order that key/values
# are added for when looping back through it. The added benefit of this is that saving the header will save the
# fields in the same order.
header = OrderedDict()
# Loop through each line
for line in it:
header_size += len(line)
if need_decode:
line = line.decode('ascii', 'ignore')
# Trailing whitespace ignored per the NRRD spec
line = line.rstrip()
# Skip comments starting with # (no leading whitespace is allowed)
# Or, stop reading the header once a blank line is encountered. This separates header from data.
if line.startswith('#'):
continue
elif line == '':
break
# Read the field and value from the line, split using regex to search for := or : delimiter
field, value = re.split(r':=?', line, 1)
# Remove whitespace before and after the field and value
field, value = field.strip(), value.strip()
# Check if the field has been added already
if field in header.keys():
dup_message = "Duplicate header field: '%s'" % str(field)
if not ALLOW_DUPLICATE_FIELD:
raise NRRDError(dup_message)
warnings.warn(dup_message)
# Get the datatype of the field based on it's field name and custom field map
field_type = _get_field_type(field, custom_field_map)
# Parse the field value using the datatype retrieved
# Place it in the header dictionary
header[field] = _parse_field_value(value, field_type)
# Reading the file line by line is buffered and so the header is not in the correct position for reading data if
# the file contains the data in it as well. The solution is to set the file pointer to just behind the header.
if hasattr(file, 'seek'):
file.seek(header_size)
return header
|
python
|
def read_header(file, custom_field_map=None):
if isinstance(file, str) and file.count('\n') == 0:
with open(file, 'rb') as fh:
header = read_header(fh, custom_field_map)
return header
header_size = 0
it = iter(file)
magic_line = next(it)
need_decode = False
if hasattr(magic_line, 'decode'):
need_decode = True
magic_line = magic_line.decode('ascii', 'ignore')
header_size += _validate_magic_line(magic_line)
header = OrderedDict()
for line in it:
header_size += len(line)
if need_decode:
line = line.decode('ascii', 'ignore')
line = line.rstrip()
if line.startswith('
continue
elif line == '':
break
field, value = re.split(r':=?', line, 1)
field, value = field.strip(), value.strip()
if field in header.keys():
dup_message = "Duplicate header field: '%s'" % str(field)
if not ALLOW_DUPLICATE_FIELD:
raise NRRDError(dup_message)
warnings.warn(dup_message)
field_type = _get_field_type(field, custom_field_map)
header[field] = _parse_field_value(value, field_type)
if hasattr(file, 'seek'):
file.seek(header_size)
return header
|
[
"def",
"read_header",
"(",
"file",
",",
"custom_field_map",
"=",
"None",
")",
":",
"# If the file is a filename rather than the file handle, then open the file and call this function again with the",
"# file handle. Since read function uses a filename, it is easy to think read_header is the same syntax.",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
"and",
"file",
".",
"count",
"(",
"'\\n'",
")",
"==",
"0",
":",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"fh",
":",
"header",
"=",
"read_header",
"(",
"fh",
",",
"custom_field_map",
")",
"return",
"header",
"# Collect number of bytes in the file header (for seeking below)",
"header_size",
"=",
"0",
"# Get iterator for the file and extract the first line, the magic line",
"it",
"=",
"iter",
"(",
"file",
")",
"magic_line",
"=",
"next",
"(",
"it",
")",
"# Depending on what type file is, decoding may or may not be necessary. Decode if necessary, otherwise skip.",
"need_decode",
"=",
"False",
"if",
"hasattr",
"(",
"magic_line",
",",
"'decode'",
")",
":",
"need_decode",
"=",
"True",
"magic_line",
"=",
"magic_line",
".",
"decode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"# Validate the magic line and increment header size by size of the line",
"header_size",
"+=",
"_validate_magic_line",
"(",
"magic_line",
")",
"# Create empty header",
"# This is an OrderedDict rather than an ordinary dict because an OrderedDict will keep it's order that key/values",
"# are added for when looping back through it. The added benefit of this is that saving the header will save the",
"# fields in the same order.",
"header",
"=",
"OrderedDict",
"(",
")",
"# Loop through each line",
"for",
"line",
"in",
"it",
":",
"header_size",
"+=",
"len",
"(",
"line",
")",
"if",
"need_decode",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"# Trailing whitespace ignored per the NRRD spec",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"# Skip comments starting with # (no leading whitespace is allowed)",
"# Or, stop reading the header once a blank line is encountered. This separates header from data.",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"elif",
"line",
"==",
"''",
":",
"break",
"# Read the field and value from the line, split using regex to search for := or : delimiter",
"field",
",",
"value",
"=",
"re",
".",
"split",
"(",
"r':=?'",
",",
"line",
",",
"1",
")",
"# Remove whitespace before and after the field and value",
"field",
",",
"value",
"=",
"field",
".",
"strip",
"(",
")",
",",
"value",
".",
"strip",
"(",
")",
"# Check if the field has been added already",
"if",
"field",
"in",
"header",
".",
"keys",
"(",
")",
":",
"dup_message",
"=",
"\"Duplicate header field: '%s'\"",
"%",
"str",
"(",
"field",
")",
"if",
"not",
"ALLOW_DUPLICATE_FIELD",
":",
"raise",
"NRRDError",
"(",
"dup_message",
")",
"warnings",
".",
"warn",
"(",
"dup_message",
")",
"# Get the datatype of the field based on it's field name and custom field map",
"field_type",
"=",
"_get_field_type",
"(",
"field",
",",
"custom_field_map",
")",
"# Parse the field value using the datatype retrieved",
"# Place it in the header dictionary",
"header",
"[",
"field",
"]",
"=",
"_parse_field_value",
"(",
"value",
",",
"field_type",
")",
"# Reading the file line by line is buffered and so the header is not in the correct position for reading data if",
"# the file contains the data in it as well. The solution is to set the file pointer to just behind the header.",
"if",
"hasattr",
"(",
"file",
",",
"'seek'",
")",
":",
"file",
".",
"seek",
"(",
"header_size",
")",
"return",
"header"
] |
Read contents of header and parse values from :obj:`file`
:obj:`file` can be a filename indicating where the NRRD header is located or a string iterator object. If a
filename is specified, then the file will be opened and closed after the header is read from it. If not specifying
a filename, the :obj:`file` parameter can be any sort of iterator that returns a string each time :meth:`next` is
called. The two common objects that meet these requirements are file objects and a list of strings. When
:obj:`file` is a file object, it must be opened with the binary flag ('b') on platforms where that makes a
difference, such as Windows.
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
Parameters
----------
file : :class:`str` or string iterator
Filename, file object or string iterator object to read NRRD header from
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
Returns
-------
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`read`, :meth:`read_data`
|
[
"Read",
"contents",
"of",
"header",
"and",
"parse",
"values",
"from",
":",
"obj",
":",
"file"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L196-L298
|
mhe/pynrrd
|
nrrd/reader.py
|
read_data
|
def read_data(header, fh=None, filename=None, index_order='F'):
"""Read data from file into :class:`numpy.ndarray`
The two parameters :obj:`fh` and :obj:`filename` are optional depending on the parameters but it never hurts to
specify both. The file handle (:obj:`fh`) is necessary if the header is attached with the NRRD data. However, if
the NRRD data is detached from the header, then the :obj:`filename` parameter is required to obtain the absolute
path to the data file.
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
Parameters
----------
header : :class:`dict` (:class:`str`, :obj:`Object`)
Parsed fields/values obtained from :meth:`read_header` function
fh : file-object, optional
File object pointing to first byte of data. Only necessary if data is attached to header.
filename : :class:`str`, optional
Filename of the header file. Only necessary if data is detached from the header. This is used to get the
absolute data path.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
See Also
--------
:meth:`read`, :meth:`read_header`
"""
if index_order not in ['F', 'C']:
raise NRRDError('Invalid index order')
# Check that the required fields are in the header
for field in _NRRD_REQUIRED_FIELDS:
if field not in header:
raise NRRDError('Header is missing required field: "%s".' % field)
if header['dimension'] != len(header['sizes']):
raise NRRDError('Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i' % (
header['dimension'], len(header['sizes'])))
# Determine the data type from the header
dtype = _determine_datatype(header)
# Determine the byte skip, line skip and the data file
# These all can be written with or without the space according to the NRRD spec, so we check them both
line_skip = header.get('lineskip', header.get('line skip', 0))
byte_skip = header.get('byteskip', header.get('byte skip', 0))
data_filename = header.get('datafile', header.get('data file', None))
# If the data file is separate from the header file, then open the data file to read from that instead
if data_filename is not None:
# If the pathname is relative, then append the current directory from the filename
if not os.path.isabs(data_filename):
if filename is None:
raise NRRDError('Filename parameter must be specified when a relative data file path is given')
data_filename = os.path.join(os.path.dirname(filename), data_filename)
# Override the fh parameter with the data filename
# Note that this is opened without a "with" block, thus it must be closed manually in all circumstances
fh = open(data_filename, 'rb')
# Get the total number of data points by multiplying the size of each dimension together
total_data_points = header['sizes'].prod()
# Skip the number of lines requested when line_skip >= 0
# Irrespective of the NRRD file having attached/detached header
# Lines are skipped before getting to the beginning of the data
if line_skip >= 0:
for _ in range(line_skip):
fh.readline()
else:
# Must close the file because if the file was opened above from detached filename, there is no "with" block to
# close it for us
fh.close()
raise NRRDError('Invalid lineskip, allowed values are greater than or equal to 0')
# Skip the requested number of bytes or seek backward, and then parse the data using NumPy
if byte_skip < -1:
# Must close the file because if the file was opened above from detached filename, there is no "with" block to
# close it for us
fh.close()
raise NRRDError('Invalid byteskip, allowed values are greater than or equal to -1')
elif byte_skip >= 0:
fh.seek(byte_skip, os.SEEK_CUR)
elif byte_skip == -1 and header['encoding'] not in ['gzip', 'gz', 'bzip2', 'bz2']:
fh.seek(-dtype.itemsize * total_data_points, os.SEEK_END)
else:
# The only case left should be: byte_skip == -1 and header['encoding'] == 'gzip'
byte_skip = -dtype.itemsize * total_data_points
# If a compression encoding is used, then byte skip AFTER decompressing
if header['encoding'] == 'raw':
data = np.fromfile(fh, dtype)
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data = np.fromfile(fh, dtype, sep=' ')
else:
# Handle compressed data now
# Construct the decompression object based on encoding
if header['encoding'] in ['gzip', 'gz']:
decompobj = zlib.decompressobj(zlib.MAX_WBITS | 16)
elif header['encoding'] in ['bzip2', 'bz2']:
decompobj = bz2.BZ2Decompressor()
else:
# Must close the file because if the file was opened above from detached filename, there is no "with" block
# to close it for us
fh.close()
raise NRRDError('Unsupported encoding: "%s"' % header['encoding'])
# Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks)
decompressed_data = bytearray()
# Read all of the remaining data from the file
# Obtain the length of the compressed data since we will be using it repeatedly, more efficient
compressed_data = fh.read()
compressed_data_len = len(compressed_data)
start_index = 0
# Loop through data and decompress it chunk by chunk
while start_index < compressed_data_len:
# Calculate the end index = start index plus chunk size
# Set to the string length to read the remaining chunk at the end
end_index = min(start_index + _READ_CHUNKSIZE, compressed_data_len)
# Decompress and append data
decompressed_data += decompobj.decompress(compressed_data[start_index:end_index])
# Update start index
start_index = end_index
# Delete the compressed data since we do not need it anymore
# This could potentially be using a lot of memory
del compressed_data
# Byte skip is applied AFTER the decompression. Skip first x bytes of the decompressed data and parse it using
# NumPy
data = np.frombuffer(decompressed_data[byte_skip:], dtype)
# Close the file, even if opened using "with" block, closing it manually does not hurt
fh.close()
if total_data_points != data.size:
raise NRRDError('Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'
.format(total_data_points, data.size, total_data_points - data.size))
# In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes
# fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order
# indexing.
# The array shape from NRRD (x,y,z) needs to be reversed as numpy expects (z,y,x).
data = np.reshape(data, tuple(header['sizes'][::-1]))
# Transpose data to enable Fortran indexing if requested.
if index_order == 'F':
data = data.T
return data
|
python
|
def read_data(header, fh=None, filename=None, index_order='F'):
if index_order not in ['F', 'C']:
raise NRRDError('Invalid index order')
for field in _NRRD_REQUIRED_FIELDS:
if field not in header:
raise NRRDError('Header is missing required field: "%s".' % field)
if header['dimension'] != len(header['sizes']):
raise NRRDError('Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i' % (
header['dimension'], len(header['sizes'])))
dtype = _determine_datatype(header)
line_skip = header.get('lineskip', header.get('line skip', 0))
byte_skip = header.get('byteskip', header.get('byte skip', 0))
data_filename = header.get('datafile', header.get('data file', None))
if data_filename is not None:
if not os.path.isabs(data_filename):
if filename is None:
raise NRRDError('Filename parameter must be specified when a relative data file path is given')
data_filename = os.path.join(os.path.dirname(filename), data_filename)
fh = open(data_filename, 'rb')
total_data_points = header['sizes'].prod()
if line_skip >= 0:
for _ in range(line_skip):
fh.readline()
else:
fh.close()
raise NRRDError('Invalid lineskip, allowed values are greater than or equal to 0')
if byte_skip < -1:
fh.close()
raise NRRDError('Invalid byteskip, allowed values are greater than or equal to -1')
elif byte_skip >= 0:
fh.seek(byte_skip, os.SEEK_CUR)
elif byte_skip == -1 and header['encoding'] not in ['gzip', 'gz', 'bzip2', 'bz2']:
fh.seek(-dtype.itemsize * total_data_points, os.SEEK_END)
else:
byte_skip = -dtype.itemsize * total_data_points
if header['encoding'] == 'raw':
data = np.fromfile(fh, dtype)
elif header['encoding'] in ['ASCII', 'ascii', 'text', 'txt']:
data = np.fromfile(fh, dtype, sep=' ')
else:
if header['encoding'] in ['gzip', 'gz']:
decompobj = zlib.decompressobj(zlib.MAX_WBITS | 16)
elif header['encoding'] in ['bzip2', 'bz2']:
decompobj = bz2.BZ2Decompressor()
else:
fh.close()
raise NRRDError('Unsupported encoding: "%s"' % header['encoding'])
decompressed_data = bytearray()
compressed_data = fh.read()
compressed_data_len = len(compressed_data)
start_index = 0
while start_index < compressed_data_len:
end_index = min(start_index + _READ_CHUNKSIZE, compressed_data_len)
decompressed_data += decompobj.decompress(compressed_data[start_index:end_index])
start_index = end_index
del compressed_data
data = np.frombuffer(decompressed_data[byte_skip:], dtype)
fh.close()
if total_data_points != data.size:
raise NRRDError('Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'
.format(total_data_points, data.size, total_data_points - data.size))
data = np.reshape(data, tuple(header['sizes'][::-1]))
if index_order == 'F':
data = data.T
return data
|
[
"def",
"read_data",
"(",
"header",
",",
"fh",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"index_order",
"=",
"'F'",
")",
":",
"if",
"index_order",
"not",
"in",
"[",
"'F'",
",",
"'C'",
"]",
":",
"raise",
"NRRDError",
"(",
"'Invalid index order'",
")",
"# Check that the required fields are in the header",
"for",
"field",
"in",
"_NRRD_REQUIRED_FIELDS",
":",
"if",
"field",
"not",
"in",
"header",
":",
"raise",
"NRRDError",
"(",
"'Header is missing required field: \"%s\".'",
"%",
"field",
")",
"if",
"header",
"[",
"'dimension'",
"]",
"!=",
"len",
"(",
"header",
"[",
"'sizes'",
"]",
")",
":",
"raise",
"NRRDError",
"(",
"'Number of elements in sizes does not match dimension. Dimension: %i, len(sizes): %i'",
"%",
"(",
"header",
"[",
"'dimension'",
"]",
",",
"len",
"(",
"header",
"[",
"'sizes'",
"]",
")",
")",
")",
"# Determine the data type from the header",
"dtype",
"=",
"_determine_datatype",
"(",
"header",
")",
"# Determine the byte skip, line skip and the data file",
"# These all can be written with or without the space according to the NRRD spec, so we check them both",
"line_skip",
"=",
"header",
".",
"get",
"(",
"'lineskip'",
",",
"header",
".",
"get",
"(",
"'line skip'",
",",
"0",
")",
")",
"byte_skip",
"=",
"header",
".",
"get",
"(",
"'byteskip'",
",",
"header",
".",
"get",
"(",
"'byte skip'",
",",
"0",
")",
")",
"data_filename",
"=",
"header",
".",
"get",
"(",
"'datafile'",
",",
"header",
".",
"get",
"(",
"'data file'",
",",
"None",
")",
")",
"# If the data file is separate from the header file, then open the data file to read from that instead",
"if",
"data_filename",
"is",
"not",
"None",
":",
"# If the pathname is relative, then append the current directory from the filename",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"data_filename",
")",
":",
"if",
"filename",
"is",
"None",
":",
"raise",
"NRRDError",
"(",
"'Filename parameter must be specified when a relative data file path is given'",
")",
"data_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
",",
"data_filename",
")",
"# Override the fh parameter with the data filename",
"# Note that this is opened without a \"with\" block, thus it must be closed manually in all circumstances",
"fh",
"=",
"open",
"(",
"data_filename",
",",
"'rb'",
")",
"# Get the total number of data points by multiplying the size of each dimension together",
"total_data_points",
"=",
"header",
"[",
"'sizes'",
"]",
".",
"prod",
"(",
")",
"# Skip the number of lines requested when line_skip >= 0",
"# Irrespective of the NRRD file having attached/detached header",
"# Lines are skipped before getting to the beginning of the data",
"if",
"line_skip",
">=",
"0",
":",
"for",
"_",
"in",
"range",
"(",
"line_skip",
")",
":",
"fh",
".",
"readline",
"(",
")",
"else",
":",
"# Must close the file because if the file was opened above from detached filename, there is no \"with\" block to",
"# close it for us",
"fh",
".",
"close",
"(",
")",
"raise",
"NRRDError",
"(",
"'Invalid lineskip, allowed values are greater than or equal to 0'",
")",
"# Skip the requested number of bytes or seek backward, and then parse the data using NumPy",
"if",
"byte_skip",
"<",
"-",
"1",
":",
"# Must close the file because if the file was opened above from detached filename, there is no \"with\" block to",
"# close it for us",
"fh",
".",
"close",
"(",
")",
"raise",
"NRRDError",
"(",
"'Invalid byteskip, allowed values are greater than or equal to -1'",
")",
"elif",
"byte_skip",
">=",
"0",
":",
"fh",
".",
"seek",
"(",
"byte_skip",
",",
"os",
".",
"SEEK_CUR",
")",
"elif",
"byte_skip",
"==",
"-",
"1",
"and",
"header",
"[",
"'encoding'",
"]",
"not",
"in",
"[",
"'gzip'",
",",
"'gz'",
",",
"'bzip2'",
",",
"'bz2'",
"]",
":",
"fh",
".",
"seek",
"(",
"-",
"dtype",
".",
"itemsize",
"*",
"total_data_points",
",",
"os",
".",
"SEEK_END",
")",
"else",
":",
"# The only case left should be: byte_skip == -1 and header['encoding'] == 'gzip'",
"byte_skip",
"=",
"-",
"dtype",
".",
"itemsize",
"*",
"total_data_points",
"# If a compression encoding is used, then byte skip AFTER decompressing",
"if",
"header",
"[",
"'encoding'",
"]",
"==",
"'raw'",
":",
"data",
"=",
"np",
".",
"fromfile",
"(",
"fh",
",",
"dtype",
")",
"elif",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'ASCII'",
",",
"'ascii'",
",",
"'text'",
",",
"'txt'",
"]",
":",
"data",
"=",
"np",
".",
"fromfile",
"(",
"fh",
",",
"dtype",
",",
"sep",
"=",
"' '",
")",
"else",
":",
"# Handle compressed data now",
"# Construct the decompression object based on encoding",
"if",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'gzip'",
",",
"'gz'",
"]",
":",
"decompobj",
"=",
"zlib",
".",
"decompressobj",
"(",
"zlib",
".",
"MAX_WBITS",
"|",
"16",
")",
"elif",
"header",
"[",
"'encoding'",
"]",
"in",
"[",
"'bzip2'",
",",
"'bz2'",
"]",
":",
"decompobj",
"=",
"bz2",
".",
"BZ2Decompressor",
"(",
")",
"else",
":",
"# Must close the file because if the file was opened above from detached filename, there is no \"with\" block",
"# to close it for us",
"fh",
".",
"close",
"(",
")",
"raise",
"NRRDError",
"(",
"'Unsupported encoding: \"%s\"'",
"%",
"header",
"[",
"'encoding'",
"]",
")",
"# Loop through the file and read a chunk at a time (see _READ_CHUNKSIZE why it is read in chunks)",
"decompressed_data",
"=",
"bytearray",
"(",
")",
"# Read all of the remaining data from the file",
"# Obtain the length of the compressed data since we will be using it repeatedly, more efficient",
"compressed_data",
"=",
"fh",
".",
"read",
"(",
")",
"compressed_data_len",
"=",
"len",
"(",
"compressed_data",
")",
"start_index",
"=",
"0",
"# Loop through data and decompress it chunk by chunk",
"while",
"start_index",
"<",
"compressed_data_len",
":",
"# Calculate the end index = start index plus chunk size",
"# Set to the string length to read the remaining chunk at the end",
"end_index",
"=",
"min",
"(",
"start_index",
"+",
"_READ_CHUNKSIZE",
",",
"compressed_data_len",
")",
"# Decompress and append data",
"decompressed_data",
"+=",
"decompobj",
".",
"decompress",
"(",
"compressed_data",
"[",
"start_index",
":",
"end_index",
"]",
")",
"# Update start index",
"start_index",
"=",
"end_index",
"# Delete the compressed data since we do not need it anymore",
"# This could potentially be using a lot of memory",
"del",
"compressed_data",
"# Byte skip is applied AFTER the decompression. Skip first x bytes of the decompressed data and parse it using",
"# NumPy",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"decompressed_data",
"[",
"byte_skip",
":",
"]",
",",
"dtype",
")",
"# Close the file, even if opened using \"with\" block, closing it manually does not hurt",
"fh",
".",
"close",
"(",
")",
"if",
"total_data_points",
"!=",
"data",
".",
"size",
":",
"raise",
"NRRDError",
"(",
"'Size of the data does not equal the product of all the dimensions: {0}-{1}={2}'",
".",
"format",
"(",
"total_data_points",
",",
"data",
".",
"size",
",",
"total_data_points",
"-",
"data",
".",
"size",
")",
")",
"# In the NRRD header, the fields are specified in Fortran order, i.e, the first index is the one that changes",
"# fastest and last index changes slowest. This needs to be taken into consideration since numpy uses C-order",
"# indexing.",
"# The array shape from NRRD (x,y,z) needs to be reversed as numpy expects (z,y,x).",
"data",
"=",
"np",
".",
"reshape",
"(",
"data",
",",
"tuple",
"(",
"header",
"[",
"'sizes'",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"# Transpose data to enable Fortran indexing if requested.",
"if",
"index_order",
"==",
"'F'",
":",
"data",
"=",
"data",
".",
"T",
"return",
"data"
] |
Read data from file into :class:`numpy.ndarray`
The two parameters :obj:`fh` and :obj:`filename` are optional depending on the parameters but it never hurts to
specify both. The file handle (:obj:`fh`) is necessary if the header is attached with the NRRD data. However, if
the NRRD data is detached from the header, then the :obj:`filename` parameter is required to obtain the absolute
path to the data file.
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
Parameters
----------
header : :class:`dict` (:class:`str`, :obj:`Object`)
Parsed fields/values obtained from :meth:`read_header` function
fh : file-object, optional
File object pointing to first byte of data. Only necessary if data is attached to header.
filename : :class:`str`, optional
Filename of the header file. Only necessary if data is detached from the header. This is used to get the
absolute data path.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
See Also
--------
:meth:`read`, :meth:`read_header`
|
[
"Read",
"data",
"from",
"file",
"into",
":",
"class",
":",
"numpy",
".",
"ndarray"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L301-L466
|
mhe/pynrrd
|
nrrd/reader.py
|
read
|
def read(filename, custom_field_map=None, index_order='F'):
"""Read a NRRD file and return the header and data
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
.. note::
Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the original data and hence the writer needs to be aware of this.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`write`, :meth:`read_header`, :meth:`read_data`
"""
"""Read a NRRD file and return a tuple (data, header)."""
with open(filename, 'rb') as fh:
header = read_header(fh, custom_field_map)
data = read_data(header, fh, filename, index_order)
return data, header
|
python
|
def read(filename, custom_field_map=None, index_order='F'):
with open(filename, 'rb') as fh:
header = read_header(fh, custom_field_map)
data = read_data(header, fh, filename, index_order)
return data, header
|
[
"def",
"read",
"(",
"filename",
",",
"custom_field_map",
"=",
"None",
",",
"index_order",
"=",
"'F'",
")",
":",
"\"\"\"Read a NRRD file and return a tuple (data, header).\"\"\"",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fh",
":",
"header",
"=",
"read_header",
"(",
"fh",
",",
"custom_field_map",
")",
"data",
"=",
"read_data",
"(",
"header",
",",
"fh",
",",
"filename",
",",
"index_order",
")",
"return",
"data",
",",
"header"
] |
Read a NRRD file and return the header and data
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
.. note::
Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the original data and hence the writer needs to be aware of this.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`write`, :meth:`read_header`, :meth:`read_data`
|
[
"Read",
"a",
"NRRD",
"file",
"and",
"return",
"the",
"header",
"and",
"data"
] |
train
|
https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L469-L506
|
bspaans/python-mingus
|
mingus/core/meter.py
|
valid_beat_duration
|
def valid_beat_duration(duration):
"""Return True when log2(duration) is an integer."""
if duration == 0:
return False
elif duration == 1:
return True
else:
r = duration
while r != 1:
if r % 2 == 1:
return False
r /= 2
return True
|
python
|
def valid_beat_duration(duration):
if duration == 0:
return False
elif duration == 1:
return True
else:
r = duration
while r != 1:
if r % 2 == 1:
return False
r /= 2
return True
|
[
"def",
"valid_beat_duration",
"(",
"duration",
")",
":",
"if",
"duration",
"==",
"0",
":",
"return",
"False",
"elif",
"duration",
"==",
"1",
":",
"return",
"True",
"else",
":",
"r",
"=",
"duration",
"while",
"r",
"!=",
"1",
":",
"if",
"r",
"%",
"2",
"==",
"1",
":",
"return",
"False",
"r",
"/=",
"2",
"return",
"True"
] |
Return True when log2(duration) is an integer.
|
[
"Return",
"True",
"when",
"log2",
"(",
"duration",
")",
"is",
"an",
"integer",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/meter.py#L30-L42
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.add_notes
|
def add_notes(self, note, duration=None):
"""Add a Note, note as string or NoteContainer to the last Bar.
If the Bar is full, a new one will automatically be created.
If the Bar is not full but the note can't fit in, this method will
return False. True otherwise.
An InstrumentRangeError exception will be raised if an Instrument is
attached to the Track, but the note turns out not to be within the
range of the Instrument.
"""
if self.instrument != None:
if not self.instrument.can_play_notes(note):
raise InstrumentRangeError, \
"Note '%s' is not in range of the instrument (%s)" % (note,
self.instrument)
if duration == None:
duration = 4
# Check whether the last bar is full, if so create a new bar and add the
# note there
if len(self.bars) == 0:
self.bars.append(Bar())
last_bar = self.bars[-1]
if last_bar.is_full():
self.bars.append(Bar(last_bar.key, last_bar.meter))
# warning should hold note if it doesn't fit
return self.bars[-1].place_notes(note, duration)
|
python
|
def add_notes(self, note, duration=None):
if self.instrument != None:
if not self.instrument.can_play_notes(note):
raise InstrumentRangeError, \
"Note '%s' is not in range of the instrument (%s)" % (note,
self.instrument)
if duration == None:
duration = 4
if len(self.bars) == 0:
self.bars.append(Bar())
last_bar = self.bars[-1]
if last_bar.is_full():
self.bars.append(Bar(last_bar.key, last_bar.meter))
return self.bars[-1].place_notes(note, duration)
|
[
"def",
"add_notes",
"(",
"self",
",",
"note",
",",
"duration",
"=",
"None",
")",
":",
"if",
"self",
".",
"instrument",
"!=",
"None",
":",
"if",
"not",
"self",
".",
"instrument",
".",
"can_play_notes",
"(",
"note",
")",
":",
"raise",
"InstrumentRangeError",
",",
"\"Note '%s' is not in range of the instrument (%s)\"",
"%",
"(",
"note",
",",
"self",
".",
"instrument",
")",
"if",
"duration",
"==",
"None",
":",
"duration",
"=",
"4",
"# Check whether the last bar is full, if so create a new bar and add the",
"# note there",
"if",
"len",
"(",
"self",
".",
"bars",
")",
"==",
"0",
":",
"self",
".",
"bars",
".",
"append",
"(",
"Bar",
"(",
")",
")",
"last_bar",
"=",
"self",
".",
"bars",
"[",
"-",
"1",
"]",
"if",
"last_bar",
".",
"is_full",
"(",
")",
":",
"self",
".",
"bars",
".",
"append",
"(",
"Bar",
"(",
"last_bar",
".",
"key",
",",
"last_bar",
".",
"meter",
")",
")",
"# warning should hold note if it doesn't fit",
"return",
"self",
".",
"bars",
"[",
"-",
"1",
"]",
".",
"place_notes",
"(",
"note",
",",
"duration",
")"
] |
Add a Note, note as string or NoteContainer to the last Bar.
If the Bar is full, a new one will automatically be created.
If the Bar is not full but the note can't fit in, this method will
return False. True otherwise.
An InstrumentRangeError exception will be raised if an Instrument is
attached to the Track, but the note turns out not to be within the
range of the Instrument.
|
[
"Add",
"a",
"Note",
"note",
"as",
"string",
"or",
"NoteContainer",
"to",
"the",
"last",
"Bar",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L51-L80
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.get_notes
|
def get_notes(self):
"""Return an iterator that iterates through every bar in the this
track."""
for bar in self.bars:
for beat, duration, notes in bar:
yield beat, duration, notes
|
python
|
def get_notes(self):
for bar in self.bars:
for beat, duration, notes in bar:
yield beat, duration, notes
|
[
"def",
"get_notes",
"(",
"self",
")",
":",
"for",
"bar",
"in",
"self",
".",
"bars",
":",
"for",
"beat",
",",
"duration",
",",
"notes",
"in",
"bar",
":",
"yield",
"beat",
",",
"duration",
",",
"notes"
] |
Return an iterator that iterates through every bar in the this
track.
|
[
"Return",
"an",
"iterator",
"that",
"iterates",
"through",
"every",
"bar",
"in",
"the",
"this",
"track",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L82-L87
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.from_chords
|
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self
|
python
|
def from_chords(self, chords, duration=1):
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self
|
[
"def",
"from_chords",
"(",
"self",
",",
"chords",
",",
"duration",
"=",
"1",
")",
":",
"tun",
"=",
"self",
".",
"get_tuning",
"(",
")",
"def",
"add_chord",
"(",
"chord",
",",
"duration",
")",
":",
"if",
"type",
"(",
"chord",
")",
"==",
"list",
":",
"for",
"c",
"in",
"chord",
":",
"add_chord",
"(",
"c",
",",
"duration",
"*",
"2",
")",
"else",
":",
"chord",
"=",
"NoteContainer",
"(",
")",
".",
"from_chord",
"(",
"chord",
")",
"if",
"tun",
":",
"chord",
"=",
"tun",
".",
"find_chord_fingering",
"(",
"chord",
",",
"return_best_as_NoteContainer",
"=",
"True",
")",
"if",
"not",
"self",
".",
"add_notes",
"(",
"chord",
",",
"duration",
")",
":",
"# This should be the standard behaviour of add_notes",
"dur",
"=",
"self",
".",
"bars",
"[",
"-",
"1",
"]",
".",
"value_left",
"(",
")",
"self",
".",
"add_notes",
"(",
"chord",
",",
"dur",
")",
"# warning should hold note",
"self",
".",
"add_notes",
"(",
"chord",
",",
"value",
".",
"subtract",
"(",
"duration",
",",
"dur",
")",
")",
"for",
"c",
"in",
"chords",
":",
"if",
"c",
"is",
"not",
"None",
":",
"add_chord",
"(",
"c",
",",
"duration",
")",
"else",
":",
"self",
".",
"add_notes",
"(",
"None",
",",
"duration",
")",
"return",
"self"
] |
Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
|
[
"Add",
"chords",
"to",
"the",
"Track",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L89-L127
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.get_tuning
|
def get_tuning(self):
"""Return a StringTuning object.
If an instrument is set and has a tuning it will be returned.
Otherwise the track's one will be used.
"""
if self.instrument and self.instrument.tuning:
return self.instrument.tuning
return self.tuning
|
python
|
def get_tuning(self):
if self.instrument and self.instrument.tuning:
return self.instrument.tuning
return self.tuning
|
[
"def",
"get_tuning",
"(",
"self",
")",
":",
"if",
"self",
".",
"instrument",
"and",
"self",
".",
"instrument",
".",
"tuning",
":",
"return",
"self",
".",
"instrument",
".",
"tuning",
"return",
"self",
".",
"tuning"
] |
Return a StringTuning object.
If an instrument is set and has a tuning it will be returned.
Otherwise the track's one will be used.
|
[
"Return",
"a",
"StringTuning",
"object",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L129-L137
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.set_tuning
|
def set_tuning(self, tuning):
"""Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
"""
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self
|
python
|
def set_tuning(self, tuning):
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self
|
[
"def",
"set_tuning",
"(",
"self",
",",
"tuning",
")",
":",
"if",
"self",
".",
"instrument",
":",
"self",
".",
"instrument",
".",
"tuning",
"=",
"tuning",
"self",
".",
"tuning",
"=",
"tuning",
"return",
"self"
] |
Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object.
|
[
"Set",
"the",
"tuning",
"attribute",
"on",
"both",
"the",
"Track",
"and",
"its",
"instrument",
"(",
"when",
"available",
")",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L139-L148
|
bspaans/python-mingus
|
mingus/containers/track.py
|
Track.transpose
|
def transpose(self, interval, up=True):
"""Transpose all the notes in the track up or down the interval.
Call transpose() on every Bar.
"""
for bar in self.bars:
bar.transpose(interval, up)
return self
|
python
|
def transpose(self, interval, up=True):
for bar in self.bars:
bar.transpose(interval, up)
return self
|
[
"def",
"transpose",
"(",
"self",
",",
"interval",
",",
"up",
"=",
"True",
")",
":",
"for",
"bar",
"in",
"self",
".",
"bars",
":",
"bar",
".",
"transpose",
"(",
"interval",
",",
"up",
")",
"return",
"self"
] |
Transpose all the notes in the track up or down the interval.
Call transpose() on every Bar.
|
[
"Transpose",
"all",
"the",
"notes",
"in",
"the",
"track",
"up",
"or",
"down",
"the",
"interval",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/track.py#L150-L157
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.set_meter
|
def set_meter(self, meter):
"""Set the meter of this bar.
Meters in mingus are represented by a single tuple.
If the format of the meter is not recognised, a MeterFormatError
will be raised.
"""
# warning should raise exception
if _meter.valid_beat_duration(meter[1]):
self.meter = (meter[0], meter[1])
self.length = meter[0] * (1.0 / meter[1])
elif meter == (0, 0):
self.meter = (0, 0)
self.length = 0.0
else:
raise MeterFormatError("The meter argument '%s' is not an "
"understood representation of a meter. "
"Expecting a tuple." % meter)
|
python
|
def set_meter(self, meter):
if _meter.valid_beat_duration(meter[1]):
self.meter = (meter[0], meter[1])
self.length = meter[0] * (1.0 / meter[1])
elif meter == (0, 0):
self.meter = (0, 0)
self.length = 0.0
else:
raise MeterFormatError("The meter argument '%s' is not an "
"understood representation of a meter. "
"Expecting a tuple." % meter)
|
[
"def",
"set_meter",
"(",
"self",
",",
"meter",
")",
":",
"# warning should raise exception",
"if",
"_meter",
".",
"valid_beat_duration",
"(",
"meter",
"[",
"1",
"]",
")",
":",
"self",
".",
"meter",
"=",
"(",
"meter",
"[",
"0",
"]",
",",
"meter",
"[",
"1",
"]",
")",
"self",
".",
"length",
"=",
"meter",
"[",
"0",
"]",
"*",
"(",
"1.0",
"/",
"meter",
"[",
"1",
"]",
")",
"elif",
"meter",
"==",
"(",
"0",
",",
"0",
")",
":",
"self",
".",
"meter",
"=",
"(",
"0",
",",
"0",
")",
"self",
".",
"length",
"=",
"0.0",
"else",
":",
"raise",
"MeterFormatError",
"(",
"\"The meter argument '%s' is not an \"",
"\"understood representation of a meter. \"",
"\"Expecting a tuple.\"",
"%",
"meter",
")"
] |
Set the meter of this bar.
Meters in mingus are represented by a single tuple.
If the format of the meter is not recognised, a MeterFormatError
will be raised.
|
[
"Set",
"the",
"meter",
"of",
"this",
"bar",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L54-L72
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.place_notes
|
def place_notes(self, notes, duration):
"""Place the notes on the current_beat.
Notes can be strings, Notes, list of strings, list of Notes or a
NoteContainer.
Raise a MeterFormatError if the duration is not valid.
Return True if succesful, False otherwise (ie. the Bar hasn't got
enough room for a note of that duration).
"""
# note should be able to be one of strings, lists, Notes or
# NoteContainers
if hasattr(notes, 'notes'):
pass
elif hasattr(notes, 'name'):
notes = NoteContainer(notes)
elif type(notes) == str:
notes = NoteContainer(notes)
elif type(notes) == list:
notes = NoteContainer(notes)
if self.current_beat + 1.0 / duration <= self.length or self.length\
== 0.0:
self.bar.append([self.current_beat, duration, notes])
self.current_beat += 1.0 / duration
return True
else:
return False
|
python
|
def place_notes(self, notes, duration):
if hasattr(notes, 'notes'):
pass
elif hasattr(notes, 'name'):
notes = NoteContainer(notes)
elif type(notes) == str:
notes = NoteContainer(notes)
elif type(notes) == list:
notes = NoteContainer(notes)
if self.current_beat + 1.0 / duration <= self.length or self.length\
== 0.0:
self.bar.append([self.current_beat, duration, notes])
self.current_beat += 1.0 / duration
return True
else:
return False
|
[
"def",
"place_notes",
"(",
"self",
",",
"notes",
",",
"duration",
")",
":",
"# note should be able to be one of strings, lists, Notes or",
"# NoteContainers",
"if",
"hasattr",
"(",
"notes",
",",
"'notes'",
")",
":",
"pass",
"elif",
"hasattr",
"(",
"notes",
",",
"'name'",
")",
":",
"notes",
"=",
"NoteContainer",
"(",
"notes",
")",
"elif",
"type",
"(",
"notes",
")",
"==",
"str",
":",
"notes",
"=",
"NoteContainer",
"(",
"notes",
")",
"elif",
"type",
"(",
"notes",
")",
"==",
"list",
":",
"notes",
"=",
"NoteContainer",
"(",
"notes",
")",
"if",
"self",
".",
"current_beat",
"+",
"1.0",
"/",
"duration",
"<=",
"self",
".",
"length",
"or",
"self",
".",
"length",
"==",
"0.0",
":",
"self",
".",
"bar",
".",
"append",
"(",
"[",
"self",
".",
"current_beat",
",",
"duration",
",",
"notes",
"]",
")",
"self",
".",
"current_beat",
"+=",
"1.0",
"/",
"duration",
"return",
"True",
"else",
":",
"return",
"False"
] |
Place the notes on the current_beat.
Notes can be strings, Notes, list of strings, list of Notes or a
NoteContainer.
Raise a MeterFormatError if the duration is not valid.
Return True if succesful, False otherwise (ie. the Bar hasn't got
enough room for a note of that duration).
|
[
"Place",
"the",
"notes",
"on",
"the",
"current_beat",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L74-L101
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.place_notes_at
|
def place_notes_at(self, notes, at):
"""Place notes at the given index."""
for x in self.bar:
if x[0] == at:
x[0][2] += notes
|
python
|
def place_notes_at(self, notes, at):
for x in self.bar:
if x[0] == at:
x[0][2] += notes
|
[
"def",
"place_notes_at",
"(",
"self",
",",
"notes",
",",
"at",
")",
":",
"for",
"x",
"in",
"self",
".",
"bar",
":",
"if",
"x",
"[",
"0",
"]",
"==",
"at",
":",
"x",
"[",
"0",
"]",
"[",
"2",
"]",
"+=",
"notes"
] |
Place notes at the given index.
|
[
"Place",
"notes",
"at",
"the",
"given",
"index",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L103-L107
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.remove_last_entry
|
def remove_last_entry(self):
"""Remove the last NoteContainer in the Bar."""
self.current_beat -= 1.0 / self.bar[-1][1]
self.bar = self.bar[:-1]
return self.current_beat
|
python
|
def remove_last_entry(self):
self.current_beat -= 1.0 / self.bar[-1][1]
self.bar = self.bar[:-1]
return self.current_beat
|
[
"def",
"remove_last_entry",
"(",
"self",
")",
":",
"self",
".",
"current_beat",
"-=",
"1.0",
"/",
"self",
".",
"bar",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"self",
".",
"bar",
"=",
"self",
".",
"bar",
"[",
":",
"-",
"1",
"]",
"return",
"self",
".",
"current_beat"
] |
Remove the last NoteContainer in the Bar.
|
[
"Remove",
"the",
"last",
"NoteContainer",
"in",
"the",
"Bar",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L116-L120
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.is_full
|
def is_full(self):
"""Return False if there is room in this Bar for another
NoteContainer, True otherwise."""
if self.length == 0.0:
return False
if len(self.bar) == 0:
return False
if self.current_beat >= self.length - 0.001:
return True
return False
|
python
|
def is_full(self):
if self.length == 0.0:
return False
if len(self.bar) == 0:
return False
if self.current_beat >= self.length - 0.001:
return True
return False
|
[
"def",
"is_full",
"(",
"self",
")",
":",
"if",
"self",
".",
"length",
"==",
"0.0",
":",
"return",
"False",
"if",
"len",
"(",
"self",
".",
"bar",
")",
"==",
"0",
":",
"return",
"False",
"if",
"self",
".",
"current_beat",
">=",
"self",
".",
"length",
"-",
"0.001",
":",
"return",
"True",
"return",
"False"
] |
Return False if there is room in this Bar for another
NoteContainer, True otherwise.
|
[
"Return",
"False",
"if",
"there",
"is",
"room",
"in",
"this",
"Bar",
"for",
"another",
"NoteContainer",
"True",
"otherwise",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L122-L131
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.change_note_duration
|
def change_note_duration(self, at, to):
"""Change the note duration at the given index to the given
duration."""
if valid_beat_duration(to):
diff = 0
for x in self.bar:
if diff != 0:
x[0][0] -= diff
if x[0] == at:
cur = x[0][1]
x[0][1] = to
diff = 1 / cur - 1 / to
|
python
|
def change_note_duration(self, at, to):
if valid_beat_duration(to):
diff = 0
for x in self.bar:
if diff != 0:
x[0][0] -= diff
if x[0] == at:
cur = x[0][1]
x[0][1] = to
diff = 1 / cur - 1 / to
|
[
"def",
"change_note_duration",
"(",
"self",
",",
"at",
",",
"to",
")",
":",
"if",
"valid_beat_duration",
"(",
"to",
")",
":",
"diff",
"=",
"0",
"for",
"x",
"in",
"self",
".",
"bar",
":",
"if",
"diff",
"!=",
"0",
":",
"x",
"[",
"0",
"]",
"[",
"0",
"]",
"-=",
"diff",
"if",
"x",
"[",
"0",
"]",
"==",
"at",
":",
"cur",
"=",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
"=",
"to",
"diff",
"=",
"1",
"/",
"cur",
"-",
"1",
"/",
"to"
] |
Change the note duration at the given index to the given
duration.
|
[
"Change",
"the",
"note",
"duration",
"at",
"the",
"given",
"index",
"to",
"the",
"given",
"duration",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L133-L144
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.get_range
|
def get_range(self):
"""Return the highest and the lowest note in a tuple."""
(min, max) = (100000, -1)
for cont in self.bar:
for note in cont[2]:
if int(note) < int(min):
min = note
elif int(note) > int(max):
max = note
return (min, max)
|
python
|
def get_range(self):
(min, max) = (100000, -1)
for cont in self.bar:
for note in cont[2]:
if int(note) < int(min):
min = note
elif int(note) > int(max):
max = note
return (min, max)
|
[
"def",
"get_range",
"(",
"self",
")",
":",
"(",
"min",
",",
"max",
")",
"=",
"(",
"100000",
",",
"-",
"1",
")",
"for",
"cont",
"in",
"self",
".",
"bar",
":",
"for",
"note",
"in",
"cont",
"[",
"2",
"]",
":",
"if",
"int",
"(",
"note",
")",
"<",
"int",
"(",
"min",
")",
":",
"min",
"=",
"note",
"elif",
"int",
"(",
"note",
")",
">",
"int",
"(",
"max",
")",
":",
"max",
"=",
"note",
"return",
"(",
"min",
",",
"max",
")"
] |
Return the highest and the lowest note in a tuple.
|
[
"Return",
"the",
"highest",
"and",
"the",
"lowest",
"note",
"in",
"a",
"tuple",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L146-L155
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.transpose
|
def transpose(self, interval, up=True):
"""Transpose the notes in the bar up or down the interval.
Call transpose() on all NoteContainers in the bar.
"""
for cont in self.bar:
cont[2].transpose(interval, up)
|
python
|
def transpose(self, interval, up=True):
for cont in self.bar:
cont[2].transpose(interval, up)
|
[
"def",
"transpose",
"(",
"self",
",",
"interval",
",",
"up",
"=",
"True",
")",
":",
"for",
"cont",
"in",
"self",
".",
"bar",
":",
"cont",
"[",
"2",
"]",
".",
"transpose",
"(",
"interval",
",",
"up",
")"
] |
Transpose the notes in the bar up or down the interval.
Call transpose() on all NoteContainers in the bar.
|
[
"Transpose",
"the",
"notes",
"in",
"the",
"bar",
"up",
"or",
"down",
"the",
"interval",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L175-L181
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.determine_chords
|
def determine_chords(self, shorthand=False):
"""Return a list of lists [place_in_beat, possible_chords]."""
chords = []
for x in self.bar:
chords.append([x[0], x[2].determine(shorthand)])
return chords
|
python
|
def determine_chords(self, shorthand=False):
chords = []
for x in self.bar:
chords.append([x[0], x[2].determine(shorthand)])
return chords
|
[
"def",
"determine_chords",
"(",
"self",
",",
"shorthand",
"=",
"False",
")",
":",
"chords",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"bar",
":",
"chords",
".",
"append",
"(",
"[",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"2",
"]",
".",
"determine",
"(",
"shorthand",
")",
"]",
")",
"return",
"chords"
] |
Return a list of lists [place_in_beat, possible_chords].
|
[
"Return",
"a",
"list",
"of",
"lists",
"[",
"place_in_beat",
"possible_chords",
"]",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L183-L188
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.determine_progression
|
def determine_progression(self, shorthand=False):
"""Return a list of lists [place_in_beat, possible_progressions]."""
res = []
for x in self.bar:
res.append([x[0], progressions.determine(x[2].get_note_names(),
self.key.key, shorthand)])
return res
|
python
|
def determine_progression(self, shorthand=False):
res = []
for x in self.bar:
res.append([x[0], progressions.determine(x[2].get_note_names(),
self.key.key, shorthand)])
return res
|
[
"def",
"determine_progression",
"(",
"self",
",",
"shorthand",
"=",
"False",
")",
":",
"res",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"bar",
":",
"res",
".",
"append",
"(",
"[",
"x",
"[",
"0",
"]",
",",
"progressions",
".",
"determine",
"(",
"x",
"[",
"2",
"]",
".",
"get_note_names",
"(",
")",
",",
"self",
".",
"key",
".",
"key",
",",
"shorthand",
")",
"]",
")",
"return",
"res"
] |
Return a list of lists [place_in_beat, possible_progressions].
|
[
"Return",
"a",
"list",
"of",
"lists",
"[",
"place_in_beat",
"possible_progressions",
"]",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L190-L196
|
bspaans/python-mingus
|
mingus/containers/bar.py
|
Bar.get_note_names
|
def get_note_names(self):
"""Return a list of unique note names in the Bar."""
res = []
for cont in self.bar:
for x in cont[2].get_note_names():
if x not in res:
res.append(x)
return res
|
python
|
def get_note_names(self):
res = []
for cont in self.bar:
for x in cont[2].get_note_names():
if x not in res:
res.append(x)
return res
|
[
"def",
"get_note_names",
"(",
"self",
")",
":",
"res",
"=",
"[",
"]",
"for",
"cont",
"in",
"self",
".",
"bar",
":",
"for",
"x",
"in",
"cont",
"[",
"2",
"]",
".",
"get_note_names",
"(",
")",
":",
"if",
"x",
"not",
"in",
"res",
":",
"res",
".",
"append",
"(",
"x",
")",
"return",
"res"
] |
Return a list of unique note names in the Bar.
|
[
"Return",
"a",
"list",
"of",
"unique",
"note",
"names",
"in",
"the",
"Bar",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/bar.py#L198-L205
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_midi_file_header
|
def parse_midi_file_header(self, fp):
"""Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information."""
# Check header
try:
if fp.read(4) != 'MThd':
raise HeaderError('Not a valid MIDI file header. Byte %d.'
% self.bytes_read)
self.bytes_read += 4
except:
raise IOError("Couldn't read from file.")
# Parse chunk size
try:
chunk_size = self.bytes_to_int(fp.read(4))
self.bytes_read += 4
except:
raise IOError("Couldn't read chunk size from file. Byte %d."
% self.bytes_read)
# Expect chunk size to be at least 6
if chunk_size < 6:
return False
try:
format_type = self.bytes_to_int(fp.read(2))
self.bytes_read += 2
if format_type not in [0, 1, 2]:
raise FormatError('%d is not a valid MIDI format.'
% format_type)
except:
raise IOError("Couldn't read format type from file.")
try:
number_of_tracks = self.bytes_to_int(fp.read(2))
time_division = self.parse_time_division(fp.read(2))
self.bytes_read += 4
except:
raise IOError("Couldn't read number of tracks "
"and/or time division from tracks.")
chunk_size -= 6
if chunk_size % 2 == 1:
raise FormatError("Won't parse this.")
fp.read(chunk_size / 2)
self.bytes_read += chunk_size / 2
return (format_type, number_of_tracks, time_division)
|
python
|
def parse_midi_file_header(self, fp):
try:
if fp.read(4) != 'MThd':
raise HeaderError('Not a valid MIDI file header. Byte %d.'
% self.bytes_read)
self.bytes_read += 4
except:
raise IOError("Couldn't read from file.")
try:
chunk_size = self.bytes_to_int(fp.read(4))
self.bytes_read += 4
except:
raise IOError("Couldn't read chunk size from file. Byte %d."
% self.bytes_read)
if chunk_size < 6:
return False
try:
format_type = self.bytes_to_int(fp.read(2))
self.bytes_read += 2
if format_type not in [0, 1, 2]:
raise FormatError('%d is not a valid MIDI format.'
% format_type)
except:
raise IOError("Couldn't read format type from file.")
try:
number_of_tracks = self.bytes_to_int(fp.read(2))
time_division = self.parse_time_division(fp.read(2))
self.bytes_read += 4
except:
raise IOError("Couldn't read number of tracks "
"and/or time division from tracks.")
chunk_size -= 6
if chunk_size % 2 == 1:
raise FormatError("Won't parse this.")
fp.read(chunk_size / 2)
self.bytes_read += chunk_size / 2
return (format_type, number_of_tracks, time_division)
|
[
"def",
"parse_midi_file_header",
"(",
"self",
",",
"fp",
")",
":",
"# Check header",
"try",
":",
"if",
"fp",
".",
"read",
"(",
"4",
")",
"!=",
"'MThd'",
":",
"raise",
"HeaderError",
"(",
"'Not a valid MIDI file header. Byte %d.'",
"%",
"self",
".",
"bytes_read",
")",
"self",
".",
"bytes_read",
"+=",
"4",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read from file.\"",
")",
"# Parse chunk size",
"try",
":",
"chunk_size",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"4",
")",
")",
"self",
".",
"bytes_read",
"+=",
"4",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read chunk size from file. Byte %d.\"",
"%",
"self",
".",
"bytes_read",
")",
"# Expect chunk size to be at least 6",
"if",
"chunk_size",
"<",
"6",
":",
"return",
"False",
"try",
":",
"format_type",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"2",
")",
")",
"self",
".",
"bytes_read",
"+=",
"2",
"if",
"format_type",
"not",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"raise",
"FormatError",
"(",
"'%d is not a valid MIDI format.'",
"%",
"format_type",
")",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read format type from file.\"",
")",
"try",
":",
"number_of_tracks",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"2",
")",
")",
"time_division",
"=",
"self",
".",
"parse_time_division",
"(",
"fp",
".",
"read",
"(",
"2",
")",
")",
"self",
".",
"bytes_read",
"+=",
"4",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read number of tracks \"",
"\"and/or time division from tracks.\"",
")",
"chunk_size",
"-=",
"6",
"if",
"chunk_size",
"%",
"2",
"==",
"1",
":",
"raise",
"FormatError",
"(",
"\"Won't parse this.\"",
")",
"fp",
".",
"read",
"(",
"chunk_size",
"/",
"2",
")",
"self",
".",
"bytes_read",
"+=",
"chunk_size",
"/",
"2",
"return",
"(",
"format_type",
",",
"number_of_tracks",
",",
"time_division",
")"
] |
Read the header of a MIDI file and return a tuple containing the
format type, number of tracks and parsed time division information.
|
[
"Read",
"the",
"header",
"of",
"a",
"MIDI",
"file",
"and",
"return",
"a",
"tuple",
"containing",
"the",
"format",
"type",
"number",
"of",
"tracks",
"and",
"parsed",
"time",
"division",
"information",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L171-L215
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_time_division
|
def parse_time_division(self, bytes):
"""Parse the time division found in the header of a MIDI file and
return a dictionary with the boolean fps set to indicate whether to
use frames per second or ticks per beat.
If fps is True, the values SMPTE_frames and clock_ticks will also be
set. If fps is False, ticks_per_beat will hold the value.
"""
# If highest bit is set, time division is set in frames per second
# otherwise in ticks_per_beat
value = self.bytes_to_int(bytes)
if not value & 0x8000:
return {'fps': False, 'ticks_per_beat': value & 0x7FFF}
else:
SMPTE_frames = (value & 0x7F00) >> 2
if SMPTE_frames not in [24, 25, 29, 30]:
raise TimeDivisionError, \
"'%d' is not a valid value for the number of SMPTE frames"\
% SMPTE_frames
clock_ticks = (value & 0x00FF) >> 2
return {'fps': True, 'SMPTE_frames': SMPTE_frames,
'clock_ticks': clock_ticks}
|
python
|
def parse_time_division(self, bytes):
value = self.bytes_to_int(bytes)
if not value & 0x8000:
return {'fps': False, 'ticks_per_beat': value & 0x7FFF}
else:
SMPTE_frames = (value & 0x7F00) >> 2
if SMPTE_frames not in [24, 25, 29, 30]:
raise TimeDivisionError, \
"'%d' is not a valid value for the number of SMPTE frames"\
% SMPTE_frames
clock_ticks = (value & 0x00FF) >> 2
return {'fps': True, 'SMPTE_frames': SMPTE_frames,
'clock_ticks': clock_ticks}
|
[
"def",
"parse_time_division",
"(",
"self",
",",
"bytes",
")",
":",
"# If highest bit is set, time division is set in frames per second",
"# otherwise in ticks_per_beat",
"value",
"=",
"self",
".",
"bytes_to_int",
"(",
"bytes",
")",
"if",
"not",
"value",
"&",
"0x8000",
":",
"return",
"{",
"'fps'",
":",
"False",
",",
"'ticks_per_beat'",
":",
"value",
"&",
"0x7FFF",
"}",
"else",
":",
"SMPTE_frames",
"=",
"(",
"value",
"&",
"0x7F00",
")",
">>",
"2",
"if",
"SMPTE_frames",
"not",
"in",
"[",
"24",
",",
"25",
",",
"29",
",",
"30",
"]",
":",
"raise",
"TimeDivisionError",
",",
"\"'%d' is not a valid value for the number of SMPTE frames\"",
"%",
"SMPTE_frames",
"clock_ticks",
"=",
"(",
"value",
"&",
"0x00FF",
")",
">>",
"2",
"return",
"{",
"'fps'",
":",
"True",
",",
"'SMPTE_frames'",
":",
"SMPTE_frames",
",",
"'clock_ticks'",
":",
"clock_ticks",
"}"
] |
Parse the time division found in the header of a MIDI file and
return a dictionary with the boolean fps set to indicate whether to
use frames per second or ticks per beat.
If fps is True, the values SMPTE_frames and clock_ticks will also be
set. If fps is False, ticks_per_beat will hold the value.
|
[
"Parse",
"the",
"time",
"division",
"found",
"in",
"the",
"header",
"of",
"a",
"MIDI",
"file",
"and",
"return",
"a",
"dictionary",
"with",
"the",
"boolean",
"fps",
"set",
"to",
"indicate",
"whether",
"to",
"use",
"frames",
"per",
"second",
"or",
"ticks",
"per",
"beat",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L220-L241
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_track
|
def parse_track(self, fp):
"""Parse a MIDI track from its header to its events.
Return a list of events and the number of bytes that were read.
"""
events = []
chunk_size = self.parse_track_header(fp)
bytes = chunk_size
while chunk_size > 0:
(delta_time, chunk_delta) = self.parse_varbyte_as_int(fp)
chunk_size -= chunk_delta
(event, chunk_delta) = self.parse_midi_event(fp)
chunk_size -= chunk_delta
events.append([delta_time, event])
if chunk_size < 0:
print 'yikes.', self.bytes_read, chunk_size
return events
|
python
|
def parse_track(self, fp):
events = []
chunk_size = self.parse_track_header(fp)
bytes = chunk_size
while chunk_size > 0:
(delta_time, chunk_delta) = self.parse_varbyte_as_int(fp)
chunk_size -= chunk_delta
(event, chunk_delta) = self.parse_midi_event(fp)
chunk_size -= chunk_delta
events.append([delta_time, event])
if chunk_size < 0:
print 'yikes.', self.bytes_read, chunk_size
return events
|
[
"def",
"parse_track",
"(",
"self",
",",
"fp",
")",
":",
"events",
"=",
"[",
"]",
"chunk_size",
"=",
"self",
".",
"parse_track_header",
"(",
"fp",
")",
"bytes",
"=",
"chunk_size",
"while",
"chunk_size",
">",
"0",
":",
"(",
"delta_time",
",",
"chunk_delta",
")",
"=",
"self",
".",
"parse_varbyte_as_int",
"(",
"fp",
")",
"chunk_size",
"-=",
"chunk_delta",
"(",
"event",
",",
"chunk_delta",
")",
"=",
"self",
".",
"parse_midi_event",
"(",
"fp",
")",
"chunk_size",
"-=",
"chunk_delta",
"events",
".",
"append",
"(",
"[",
"delta_time",
",",
"event",
"]",
")",
"if",
"chunk_size",
"<",
"0",
":",
"print",
"'yikes.'",
",",
"self",
".",
"bytes_read",
",",
"chunk_size",
"return",
"events"
] |
Parse a MIDI track from its header to its events.
Return a list of events and the number of bytes that were read.
|
[
"Parse",
"a",
"MIDI",
"track",
"from",
"its",
"header",
"to",
"its",
"events",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L243-L259
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_midi_event
|
def parse_midi_event(self, fp):
"""Parse a MIDI event.
Return a dictionary and the number of bytes read.
"""
chunk_size = 0
try:
ec = self.bytes_to_int(fp.read(1))
chunk_size += 1
self.bytes_read += 1
except:
raise IOError("Couldn't read event type "
"and channel data from file.")
# Get the nibbles
event_type = (ec & 0xf0) >> 4
channel = ec & 0x0f
# I don't know what these events are supposed to do, but I keep finding
# them. The parser ignores them.
if event_type < 8:
raise FormatError('Unknown event type %d. Byte %d.' % (event_type,
self.bytes_read))
# Meta events can have strings of variable length
if event_type == 0x0f:
try:
meta_event = self.bytes_to_int(fp.read(1))
(length, chunk_delta) = self.parse_varbyte_as_int(fp)
data = fp.read(length)
chunk_size += 1 + chunk_delta + length
self.bytes_read += 1 + length
except:
raise IOError("Couldn't read meta event from file.")
return ({'event': event_type, 'meta_event': meta_event,
'data': data}, chunk_size)
elif event_type in [12, 13]:
# Program change and Channel aftertouch events only have one
# parameter
try:
param1 = fp.read(1)
chunk_size += 1
self.bytes_read += 1
except:
raise IOError("Couldn't read MIDI event parameters from file.")
param1 = self.bytes_to_int(param1)
return ({'event': event_type, 'channel': channel,
'param1': param1}, chunk_size)
else:
try:
param1 = fp.read(1)
param2 = fp.read(1)
chunk_size += 2
self.bytes_read += 2
except:
raise IOError("Couldn't read MIDI event parameters from file.")
param1 = self.bytes_to_int(param1)
param2 = self.bytes_to_int(param2)
return ({'event': event_type, 'channel': channel, 'param1': param1,
'param2': param2}, chunk_size)
|
python
|
def parse_midi_event(self, fp):
chunk_size = 0
try:
ec = self.bytes_to_int(fp.read(1))
chunk_size += 1
self.bytes_read += 1
except:
raise IOError("Couldn't read event type "
"and channel data from file.")
event_type = (ec & 0xf0) >> 4
channel = ec & 0x0f
if event_type < 8:
raise FormatError('Unknown event type %d. Byte %d.' % (event_type,
self.bytes_read))
if event_type == 0x0f:
try:
meta_event = self.bytes_to_int(fp.read(1))
(length, chunk_delta) = self.parse_varbyte_as_int(fp)
data = fp.read(length)
chunk_size += 1 + chunk_delta + length
self.bytes_read += 1 + length
except:
raise IOError("Couldn't read meta event from file.")
return ({'event': event_type, 'meta_event': meta_event,
'data': data}, chunk_size)
elif event_type in [12, 13]:
try:
param1 = fp.read(1)
chunk_size += 1
self.bytes_read += 1
except:
raise IOError("Couldn't read MIDI event parameters from file.")
param1 = self.bytes_to_int(param1)
return ({'event': event_type, 'channel': channel,
'param1': param1}, chunk_size)
else:
try:
param1 = fp.read(1)
param2 = fp.read(1)
chunk_size += 2
self.bytes_read += 2
except:
raise IOError("Couldn't read MIDI event parameters from file.")
param1 = self.bytes_to_int(param1)
param2 = self.bytes_to_int(param2)
return ({'event': event_type, 'channel': channel, 'param1': param1,
'param2': param2}, chunk_size)
|
[
"def",
"parse_midi_event",
"(",
"self",
",",
"fp",
")",
":",
"chunk_size",
"=",
"0",
"try",
":",
"ec",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"1",
")",
")",
"chunk_size",
"+=",
"1",
"self",
".",
"bytes_read",
"+=",
"1",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read event type \"",
"\"and channel data from file.\"",
")",
"# Get the nibbles",
"event_type",
"=",
"(",
"ec",
"&",
"0xf0",
")",
">>",
"4",
"channel",
"=",
"ec",
"&",
"0x0f",
"# I don't know what these events are supposed to do, but I keep finding",
"# them. The parser ignores them.",
"if",
"event_type",
"<",
"8",
":",
"raise",
"FormatError",
"(",
"'Unknown event type %d. Byte %d.'",
"%",
"(",
"event_type",
",",
"self",
".",
"bytes_read",
")",
")",
"# Meta events can have strings of variable length",
"if",
"event_type",
"==",
"0x0f",
":",
"try",
":",
"meta_event",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"1",
")",
")",
"(",
"length",
",",
"chunk_delta",
")",
"=",
"self",
".",
"parse_varbyte_as_int",
"(",
"fp",
")",
"data",
"=",
"fp",
".",
"read",
"(",
"length",
")",
"chunk_size",
"+=",
"1",
"+",
"chunk_delta",
"+",
"length",
"self",
".",
"bytes_read",
"+=",
"1",
"+",
"length",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read meta event from file.\"",
")",
"return",
"(",
"{",
"'event'",
":",
"event_type",
",",
"'meta_event'",
":",
"meta_event",
",",
"'data'",
":",
"data",
"}",
",",
"chunk_size",
")",
"elif",
"event_type",
"in",
"[",
"12",
",",
"13",
"]",
":",
"# Program change and Channel aftertouch events only have one",
"# parameter",
"try",
":",
"param1",
"=",
"fp",
".",
"read",
"(",
"1",
")",
"chunk_size",
"+=",
"1",
"self",
".",
"bytes_read",
"+=",
"1",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read MIDI event parameters from file.\"",
")",
"param1",
"=",
"self",
".",
"bytes_to_int",
"(",
"param1",
")",
"return",
"(",
"{",
"'event'",
":",
"event_type",
",",
"'channel'",
":",
"channel",
",",
"'param1'",
":",
"param1",
"}",
",",
"chunk_size",
")",
"else",
":",
"try",
":",
"param1",
"=",
"fp",
".",
"read",
"(",
"1",
")",
"param2",
"=",
"fp",
".",
"read",
"(",
"1",
")",
"chunk_size",
"+=",
"2",
"self",
".",
"bytes_read",
"+=",
"2",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read MIDI event parameters from file.\"",
")",
"param1",
"=",
"self",
".",
"bytes_to_int",
"(",
"param1",
")",
"param2",
"=",
"self",
".",
"bytes_to_int",
"(",
"param2",
")",
"return",
"(",
"{",
"'event'",
":",
"event_type",
",",
"'channel'",
":",
"channel",
",",
"'param1'",
":",
"param1",
",",
"'param2'",
":",
"param2",
"}",
",",
"chunk_size",
")"
] |
Parse a MIDI event.
Return a dictionary and the number of bytes read.
|
[
"Parse",
"a",
"MIDI",
"event",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L261-L320
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_track_header
|
def parse_track_header(self, fp):
"""Return the size of the track chunk."""
# Check the header
try:
h = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track header from file. Byte %d."
% self.bytes_read)
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.'
% self.bytes_read)
# Parse the size of the header
try:
chunk_size = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track chunk size from file.")
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size
|
python
|
def parse_track_header(self, fp):
try:
h = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track header from file. Byte %d."
% self.bytes_read)
if h != 'MTrk':
raise HeaderError('Not a valid Track header. Byte %d.'
% self.bytes_read)
try:
chunk_size = fp.read(4)
self.bytes_read += 4
except:
raise IOError("Couldn't read track chunk size from file.")
chunk_size = self.bytes_to_int(chunk_size)
return chunk_size
|
[
"def",
"parse_track_header",
"(",
"self",
",",
"fp",
")",
":",
"# Check the header",
"try",
":",
"h",
"=",
"fp",
".",
"read",
"(",
"4",
")",
"self",
".",
"bytes_read",
"+=",
"4",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read track header from file. Byte %d.\"",
"%",
"self",
".",
"bytes_read",
")",
"if",
"h",
"!=",
"'MTrk'",
":",
"raise",
"HeaderError",
"(",
"'Not a valid Track header. Byte %d.'",
"%",
"self",
".",
"bytes_read",
")",
"# Parse the size of the header",
"try",
":",
"chunk_size",
"=",
"fp",
".",
"read",
"(",
"4",
")",
"self",
".",
"bytes_read",
"+=",
"4",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read track chunk size from file.\"",
")",
"chunk_size",
"=",
"self",
".",
"bytes_to_int",
"(",
"chunk_size",
")",
"return",
"chunk_size"
] |
Return the size of the track chunk.
|
[
"Return",
"the",
"size",
"of",
"the",
"track",
"chunk",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L322-L342
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_midi_file
|
def parse_midi_file(self, file):
"""Parse a MIDI file.
Return the header -as a tuple containing respectively the MIDI
format, the number of tracks and the time division-, the parsed
track data and the number of bytes read.
"""
try:
f = open(file, 'r')
except:
raise IOError('File not found')
self.bytes_read = 0
header = self.parse_midi_file_header(f)
tracks = header[1]
result = []
while tracks > 0:
events = self.parse_track(f)
result.append(events)
tracks -= 1
f.close()
return (header, result)
|
python
|
def parse_midi_file(self, file):
try:
f = open(file, 'r')
except:
raise IOError('File not found')
self.bytes_read = 0
header = self.parse_midi_file_header(f)
tracks = header[1]
result = []
while tracks > 0:
events = self.parse_track(f)
result.append(events)
tracks -= 1
f.close()
return (header, result)
|
[
"def",
"parse_midi_file",
"(",
"self",
",",
"file",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"file",
",",
"'r'",
")",
"except",
":",
"raise",
"IOError",
"(",
"'File not found'",
")",
"self",
".",
"bytes_read",
"=",
"0",
"header",
"=",
"self",
".",
"parse_midi_file_header",
"(",
"f",
")",
"tracks",
"=",
"header",
"[",
"1",
"]",
"result",
"=",
"[",
"]",
"while",
"tracks",
">",
"0",
":",
"events",
"=",
"self",
".",
"parse_track",
"(",
"f",
")",
"result",
".",
"append",
"(",
"events",
")",
"tracks",
"-=",
"1",
"f",
".",
"close",
"(",
")",
"return",
"(",
"header",
",",
"result",
")"
] |
Parse a MIDI file.
Return the header -as a tuple containing respectively the MIDI
format, the number of tracks and the time division-, the parsed
track data and the number of bytes read.
|
[
"Parse",
"a",
"MIDI",
"file",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L344-L364
|
bspaans/python-mingus
|
mingus/midi/midi_file_in.py
|
MidiFile.parse_varbyte_as_int
|
def parse_varbyte_as_int(self, fp, return_bytes_read=True):
"""Read a variable length byte from the file and return the
corresponding integer."""
result = 0
bytes_read = 0
r = 0x80
while r & 0x80:
try:
r = self.bytes_to_int(fp.read(1))
self.bytes_read += 1
except:
raise IOError("Couldn't read variable length byte from file.")
if r & 0x80:
result = (result << 7) + (r & 0x7F)
else:
result = (result << 7) + r
bytes_read += 1
if not return_bytes_read:
return result
else:
return (result, bytes_read)
|
python
|
def parse_varbyte_as_int(self, fp, return_bytes_read=True):
result = 0
bytes_read = 0
r = 0x80
while r & 0x80:
try:
r = self.bytes_to_int(fp.read(1))
self.bytes_read += 1
except:
raise IOError("Couldn't read variable length byte from file.")
if r & 0x80:
result = (result << 7) + (r & 0x7F)
else:
result = (result << 7) + r
bytes_read += 1
if not return_bytes_read:
return result
else:
return (result, bytes_read)
|
[
"def",
"parse_varbyte_as_int",
"(",
"self",
",",
"fp",
",",
"return_bytes_read",
"=",
"True",
")",
":",
"result",
"=",
"0",
"bytes_read",
"=",
"0",
"r",
"=",
"0x80",
"while",
"r",
"&",
"0x80",
":",
"try",
":",
"r",
"=",
"self",
".",
"bytes_to_int",
"(",
"fp",
".",
"read",
"(",
"1",
")",
")",
"self",
".",
"bytes_read",
"+=",
"1",
"except",
":",
"raise",
"IOError",
"(",
"\"Couldn't read variable length byte from file.\"",
")",
"if",
"r",
"&",
"0x80",
":",
"result",
"=",
"(",
"result",
"<<",
"7",
")",
"+",
"(",
"r",
"&",
"0x7F",
")",
"else",
":",
"result",
"=",
"(",
"result",
"<<",
"7",
")",
"+",
"r",
"bytes_read",
"+=",
"1",
"if",
"not",
"return_bytes_read",
":",
"return",
"result",
"else",
":",
"return",
"(",
"result",
",",
"bytes_read",
")"
] |
Read a variable length byte from the file and return the
corresponding integer.
|
[
"Read",
"a",
"variable",
"length",
"byte",
"from",
"the",
"file",
"and",
"return",
"the",
"corresponding",
"integer",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_file_in.py#L366-L386
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.set_note
|
def set_note(self, name='C', octave=4, dynamics={}):
"""Set the note to name in octave with dynamics.
Return the objects if it succeeded, raise an NoteFormatError
otherwise.
"""
dash_index = name.split('-')
if len(dash_index) == 1:
if notes.is_valid_note(name):
self.name = name
self.octave = octave
self.dynamics = dynamics
return self
else:
raise NoteFormatError("The string '%s' is not a valid "
"representation of a note in mingus" % name)
elif len(dash_index) == 2:
if notes.is_valid_note(dash_index[0]):
self.name = dash_index[0]
self.octave = int(dash_index[1])
self.dynamics = dynamics
return self
else:
raise NoteFormatError("The string '%s' is not a valid "
"representation of a note in mingus" % name)
return False
|
python
|
def set_note(self, name='C', octave=4, dynamics={}):
dash_index = name.split('-')
if len(dash_index) == 1:
if notes.is_valid_note(name):
self.name = name
self.octave = octave
self.dynamics = dynamics
return self
else:
raise NoteFormatError("The string '%s' is not a valid "
"representation of a note in mingus" % name)
elif len(dash_index) == 2:
if notes.is_valid_note(dash_index[0]):
self.name = dash_index[0]
self.octave = int(dash_index[1])
self.dynamics = dynamics
return self
else:
raise NoteFormatError("The string '%s' is not a valid "
"representation of a note in mingus" % name)
return False
|
[
"def",
"set_note",
"(",
"self",
",",
"name",
"=",
"'C'",
",",
"octave",
"=",
"4",
",",
"dynamics",
"=",
"{",
"}",
")",
":",
"dash_index",
"=",
"name",
".",
"split",
"(",
"'-'",
")",
"if",
"len",
"(",
"dash_index",
")",
"==",
"1",
":",
"if",
"notes",
".",
"is_valid_note",
"(",
"name",
")",
":",
"self",
".",
"name",
"=",
"name",
"self",
".",
"octave",
"=",
"octave",
"self",
".",
"dynamics",
"=",
"dynamics",
"return",
"self",
"else",
":",
"raise",
"NoteFormatError",
"(",
"\"The string '%s' is not a valid \"",
"\"representation of a note in mingus\"",
"%",
"name",
")",
"elif",
"len",
"(",
"dash_index",
")",
"==",
"2",
":",
"if",
"notes",
".",
"is_valid_note",
"(",
"dash_index",
"[",
"0",
"]",
")",
":",
"self",
".",
"name",
"=",
"dash_index",
"[",
"0",
"]",
"self",
".",
"octave",
"=",
"int",
"(",
"dash_index",
"[",
"1",
"]",
")",
"self",
".",
"dynamics",
"=",
"dynamics",
"return",
"self",
"else",
":",
"raise",
"NoteFormatError",
"(",
"\"The string '%s' is not a valid \"",
"\"representation of a note in mingus\"",
"%",
"name",
")",
"return",
"False"
] |
Set the note to name in octave with dynamics.
Return the objects if it succeeded, raise an NoteFormatError
otherwise.
|
[
"Set",
"the",
"note",
"to",
"name",
"in",
"octave",
"with",
"dynamics",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L70-L95
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.change_octave
|
def change_octave(self, diff):
"""Change the octave of the note to the current octave + diff."""
self.octave += diff
if self.octave < 0:
self.octave = 0
|
python
|
def change_octave(self, diff):
self.octave += diff
if self.octave < 0:
self.octave = 0
|
[
"def",
"change_octave",
"(",
"self",
",",
"diff",
")",
":",
"self",
".",
"octave",
"+=",
"diff",
"if",
"self",
".",
"octave",
"<",
"0",
":",
"self",
".",
"octave",
"=",
"0"
] |
Change the octave of the note to the current octave + diff.
|
[
"Change",
"the",
"octave",
"of",
"the",
"note",
"to",
"the",
"current",
"octave",
"+",
"diff",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L111-L115
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.transpose
|
def transpose(self, interval, up=True):
"""Transpose the note up or down the interval.
Examples:
>>> a = Note('A')
>>> a.transpose('3')
>>> a
'C#-5'
>>> a.transpose('3', False)
>>> a
'A-4'
"""
(old, o_octave) = (self.name, self.octave)
self.name = intervals.from_shorthand(self.name, interval, up)
if up:
if self < Note(old, o_octave):
self.octave += 1
else:
if self > Note(old, o_octave):
self.octave -= 1
|
python
|
def transpose(self, interval, up=True):
(old, o_octave) = (self.name, self.octave)
self.name = intervals.from_shorthand(self.name, interval, up)
if up:
if self < Note(old, o_octave):
self.octave += 1
else:
if self > Note(old, o_octave):
self.octave -= 1
|
[
"def",
"transpose",
"(",
"self",
",",
"interval",
",",
"up",
"=",
"True",
")",
":",
"(",
"old",
",",
"o_octave",
")",
"=",
"(",
"self",
".",
"name",
",",
"self",
".",
"octave",
")",
"self",
".",
"name",
"=",
"intervals",
".",
"from_shorthand",
"(",
"self",
".",
"name",
",",
"interval",
",",
"up",
")",
"if",
"up",
":",
"if",
"self",
"<",
"Note",
"(",
"old",
",",
"o_octave",
")",
":",
"self",
".",
"octave",
"+=",
"1",
"else",
":",
"if",
"self",
">",
"Note",
"(",
"old",
",",
"o_octave",
")",
":",
"self",
".",
"octave",
"-=",
"1"
] |
Transpose the note up or down the interval.
Examples:
>>> a = Note('A')
>>> a.transpose('3')
>>> a
'C#-5'
>>> a.transpose('3', False)
>>> a
'A-4'
|
[
"Transpose",
"the",
"note",
"up",
"or",
"down",
"the",
"interval",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L129-L148
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.from_int
|
def from_int(self, integer):
"""Set the Note corresponding to the integer.
0 is a C on octave 0, 12 is a C on octave 1, etc.
Example:
>>> Note().from_int(12)
'C-1'
"""
self.name = notes.int_to_note(integer % 12)
self.octave = integer // 12
return self
|
python
|
def from_int(self, integer):
self.name = notes.int_to_note(integer % 12)
self.octave = integer // 12
return self
|
[
"def",
"from_int",
"(",
"self",
",",
"integer",
")",
":",
"self",
".",
"name",
"=",
"notes",
".",
"int_to_note",
"(",
"integer",
"%",
"12",
")",
"self",
".",
"octave",
"=",
"integer",
"//",
"12",
"return",
"self"
] |
Set the Note corresponding to the integer.
0 is a C on octave 0, 12 is a C on octave 1, etc.
Example:
>>> Note().from_int(12)
'C-1'
|
[
"Set",
"the",
"Note",
"corresponding",
"to",
"the",
"integer",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L150-L161
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.from_hertz
|
def from_hertz(self, hertz, standard_pitch=440):
"""Set the Note name and pitch, calculated from the hertz value.
The standard_pitch argument can be used to set the pitch of A-4,
from which the rest is calculated.
"""
value = ((log((float(hertz) * 1024) / standard_pitch, 2) +
1.0 / 24) * 12 + 9) # notes.note_to_int("A")
self.name = notes.int_to_note(int(value) % 12)
self.octave = int(value / 12) - 6
return self
|
python
|
def from_hertz(self, hertz, standard_pitch=440):
value = ((log((float(hertz) * 1024) / standard_pitch, 2) +
1.0 / 24) * 12 + 9)
self.name = notes.int_to_note(int(value) % 12)
self.octave = int(value / 12) - 6
return self
|
[
"def",
"from_hertz",
"(",
"self",
",",
"hertz",
",",
"standard_pitch",
"=",
"440",
")",
":",
"value",
"=",
"(",
"(",
"log",
"(",
"(",
"float",
"(",
"hertz",
")",
"*",
"1024",
")",
"/",
"standard_pitch",
",",
"2",
")",
"+",
"1.0",
"/",
"24",
")",
"*",
"12",
"+",
"9",
")",
"# notes.note_to_int(\"A\")",
"self",
".",
"name",
"=",
"notes",
".",
"int_to_note",
"(",
"int",
"(",
"value",
")",
"%",
"12",
")",
"self",
".",
"octave",
"=",
"int",
"(",
"value",
"/",
"12",
")",
"-",
"6",
"return",
"self"
] |
Set the Note name and pitch, calculated from the hertz value.
The standard_pitch argument can be used to set the pitch of A-4,
from which the rest is calculated.
|
[
"Set",
"the",
"Note",
"name",
"and",
"pitch",
"calculated",
"from",
"the",
"hertz",
"value",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L184-L194
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.to_shorthand
|
def to_shorthand(self):
"""Give the traditional Helmhotz pitch notation.
Examples:
>>> Note('C-4').to_shorthand()
"c'"
>>> Note('C-3').to_shorthand()
'c'
>>> Note('C-2').to_shorthand()
'C'
>>> Note('C-1').to_shorthand()
'C,'
"""
if self.octave < 3:
res = self.name
else:
res = str.lower(self.name)
o = self.octave - 3
while o < -1:
res += ','
o += 1
while o > 0:
res += "'"
o -= 1
return res
|
python
|
def to_shorthand(self):
if self.octave < 3:
res = self.name
else:
res = str.lower(self.name)
o = self.octave - 3
while o < -1:
res += ','
o += 1
while o > 0:
res += "'"
o -= 1
return res
|
[
"def",
"to_shorthand",
"(",
"self",
")",
":",
"if",
"self",
".",
"octave",
"<",
"3",
":",
"res",
"=",
"self",
".",
"name",
"else",
":",
"res",
"=",
"str",
".",
"lower",
"(",
"self",
".",
"name",
")",
"o",
"=",
"self",
".",
"octave",
"-",
"3",
"while",
"o",
"<",
"-",
"1",
":",
"res",
"+=",
"','",
"o",
"+=",
"1",
"while",
"o",
">",
"0",
":",
"res",
"+=",
"\"'\"",
"o",
"-=",
"1",
"return",
"res"
] |
Give the traditional Helmhotz pitch notation.
Examples:
>>> Note('C-4').to_shorthand()
"c'"
>>> Note('C-3').to_shorthand()
'c'
>>> Note('C-2').to_shorthand()
'C'
>>> Note('C-1').to_shorthand()
'C,'
|
[
"Give",
"the",
"traditional",
"Helmhotz",
"pitch",
"notation",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L196-L220
|
bspaans/python-mingus
|
mingus/containers/note.py
|
Note.from_shorthand
|
def from_shorthand(self, shorthand):
"""Convert from traditional Helmhotz pitch notation.
Examples:
>>> Note().from_shorthand("C,,")
'C-0'
>>> Note().from_shorthand("C")
'C-2'
>>> Note().from_shorthand("c'")
'C-4'
"""
name = ''
octave = 0
for x in shorthand:
if x in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
name = str.upper(x)
octave = 3
elif x in ['A', 'B', 'C', 'D', 'E', 'F', 'G']:
name = x
octave = 2
elif x in ['#', 'b']:
name += x
elif x == ',':
octave -= 1
elif x == "'":
octave += 1
return self.set_note(name, octave, {})
|
python
|
def from_shorthand(self, shorthand):
name = ''
octave = 0
for x in shorthand:
if x in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:
name = str.upper(x)
octave = 3
elif x in ['A', 'B', 'C', 'D', 'E', 'F', 'G']:
name = x
octave = 2
elif x in ['
name += x
elif x == ',':
octave -= 1
elif x == "'":
octave += 1
return self.set_note(name, octave, {})
|
[
"def",
"from_shorthand",
"(",
"self",
",",
"shorthand",
")",
":",
"name",
"=",
"''",
"octave",
"=",
"0",
"for",
"x",
"in",
"shorthand",
":",
"if",
"x",
"in",
"[",
"'a'",
",",
"'b'",
",",
"'c'",
",",
"'d'",
",",
"'e'",
",",
"'f'",
",",
"'g'",
"]",
":",
"name",
"=",
"str",
".",
"upper",
"(",
"x",
")",
"octave",
"=",
"3",
"elif",
"x",
"in",
"[",
"'A'",
",",
"'B'",
",",
"'C'",
",",
"'D'",
",",
"'E'",
",",
"'F'",
",",
"'G'",
"]",
":",
"name",
"=",
"x",
"octave",
"=",
"2",
"elif",
"x",
"in",
"[",
"'#'",
",",
"'b'",
"]",
":",
"name",
"+=",
"x",
"elif",
"x",
"==",
"','",
":",
"octave",
"-=",
"1",
"elif",
"x",
"==",
"\"'\"",
":",
"octave",
"+=",
"1",
"return",
"self",
".",
"set_note",
"(",
"name",
",",
"octave",
",",
"{",
"}",
")"
] |
Convert from traditional Helmhotz pitch notation.
Examples:
>>> Note().from_shorthand("C,,")
'C-0'
>>> Note().from_shorthand("C")
'C-2'
>>> Note().from_shorthand("c'")
'C-4'
|
[
"Convert",
"from",
"traditional",
"Helmhotz",
"pitch",
"notation",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/note.py#L222-L248
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
interval
|
def interval(key, start_note, interval):
"""Return the note found at the interval starting from start_note in the
given key.
Raise a KeyError exception if start_note is not a valid note.
Example:
>>> interval('C', 'D', 1)
'E'
"""
if not notes.is_valid_note(start_note):
raise KeyError("The start note '%s' is not a valid note" % start_note)
notes_in_key = keys.get_notes(key)
for n in notes_in_key:
if n[0] == start_note[0]:
index = notes_in_key.index(n)
return notes_in_key[(index + interval) % 7]
|
python
|
def interval(key, start_note, interval):
if not notes.is_valid_note(start_note):
raise KeyError("The start note '%s' is not a valid note" % start_note)
notes_in_key = keys.get_notes(key)
for n in notes_in_key:
if n[0] == start_note[0]:
index = notes_in_key.index(n)
return notes_in_key[(index + interval) % 7]
|
[
"def",
"interval",
"(",
"key",
",",
"start_note",
",",
"interval",
")",
":",
"if",
"not",
"notes",
".",
"is_valid_note",
"(",
"start_note",
")",
":",
"raise",
"KeyError",
"(",
"\"The start note '%s' is not a valid note\"",
"%",
"start_note",
")",
"notes_in_key",
"=",
"keys",
".",
"get_notes",
"(",
"key",
")",
"for",
"n",
"in",
"notes_in_key",
":",
"if",
"n",
"[",
"0",
"]",
"==",
"start_note",
"[",
"0",
"]",
":",
"index",
"=",
"notes_in_key",
".",
"index",
"(",
"n",
")",
"return",
"notes_in_key",
"[",
"(",
"index",
"+",
"interval",
")",
"%",
"7",
"]"
] |
Return the note found at the interval starting from start_note in the
given key.
Raise a KeyError exception if start_note is not a valid note.
Example:
>>> interval('C', 'D', 1)
'E'
|
[
"Return",
"the",
"note",
"found",
"at",
"the",
"interval",
"starting",
"from",
"start_note",
"in",
"the",
"given",
"key",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L37-L53
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
get_interval
|
def get_interval(note, interval, key='C'):
"""Return the note an interval (in half notes) away from the given note.
This will produce mostly theoretical sound results, but you should use
the minor and major functions to work around the corner cases.
"""
intervals = map(lambda x: (notes.note_to_int(key) + x) % 12, [
0,
2,
4,
5,
7,
9,
11,
])
key_notes = keys.get_notes(key)
for x in key_notes:
if x[0] == note[0]:
result = (intervals[key_notes.index(x)] + interval) % 12
if result in intervals:
return key_notes[intervals.index(result)] + note[1:]
else:
return notes.diminish(key_notes[intervals.index((result + 1) % 12)]
+ note[1:])
|
python
|
def get_interval(note, interval, key='C'):
intervals = map(lambda x: (notes.note_to_int(key) + x) % 12, [
0,
2,
4,
5,
7,
9,
11,
])
key_notes = keys.get_notes(key)
for x in key_notes:
if x[0] == note[0]:
result = (intervals[key_notes.index(x)] + interval) % 12
if result in intervals:
return key_notes[intervals.index(result)] + note[1:]
else:
return notes.diminish(key_notes[intervals.index((result + 1) % 12)]
+ note[1:])
|
[
"def",
"get_interval",
"(",
"note",
",",
"interval",
",",
"key",
"=",
"'C'",
")",
":",
"intervals",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"notes",
".",
"note_to_int",
"(",
"key",
")",
"+",
"x",
")",
"%",
"12",
",",
"[",
"0",
",",
"2",
",",
"4",
",",
"5",
",",
"7",
",",
"9",
",",
"11",
",",
"]",
")",
"key_notes",
"=",
"keys",
".",
"get_notes",
"(",
"key",
")",
"for",
"x",
"in",
"key_notes",
":",
"if",
"x",
"[",
"0",
"]",
"==",
"note",
"[",
"0",
"]",
":",
"result",
"=",
"(",
"intervals",
"[",
"key_notes",
".",
"index",
"(",
"x",
")",
"]",
"+",
"interval",
")",
"%",
"12",
"if",
"result",
"in",
"intervals",
":",
"return",
"key_notes",
"[",
"intervals",
".",
"index",
"(",
"result",
")",
"]",
"+",
"note",
"[",
"1",
":",
"]",
"else",
":",
"return",
"notes",
".",
"diminish",
"(",
"key_notes",
"[",
"intervals",
".",
"index",
"(",
"(",
"result",
"+",
"1",
")",
"%",
"12",
")",
"]",
"+",
"note",
"[",
"1",
":",
"]",
")"
] |
Return the note an interval (in half notes) away from the given note.
This will produce mostly theoretical sound results, but you should use
the minor and major functions to work around the corner cases.
|
[
"Return",
"the",
"note",
"an",
"interval",
"(",
"in",
"half",
"notes",
")",
"away",
"from",
"the",
"given",
"note",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L210-L233
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
measure
|
def measure(note1, note2):
"""Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10
"""
res = notes.note_to_int(note2) - notes.note_to_int(note1)
if res < 0:
return 12 - res * -1
else:
return res
|
python
|
def measure(note1, note2):
res = notes.note_to_int(note2) - notes.note_to_int(note1)
if res < 0:
return 12 - res * -1
else:
return res
|
[
"def",
"measure",
"(",
"note1",
",",
"note2",
")",
":",
"res",
"=",
"notes",
".",
"note_to_int",
"(",
"note2",
")",
"-",
"notes",
".",
"note_to_int",
"(",
"note1",
")",
"if",
"res",
"<",
"0",
":",
"return",
"12",
"-",
"res",
"*",
"-",
"1",
"else",
":",
"return",
"res"
] |
Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10
|
[
"Return",
"an",
"integer",
"in",
"the",
"range",
"of",
"0",
"-",
"11",
"determining",
"the",
"half",
"note",
"steps",
"between",
"note1",
"and",
"note2",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L235-L249
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
augment_or_diminish_until_the_interval_is_right
|
def augment_or_diminish_until_the_interval_is_right(note1, note2, interval):
"""A helper function for the minor and major functions.
You should probably not use this directly.
"""
cur = measure(note1, note2)
while cur != interval:
if cur > interval:
note2 = notes.diminish(note2)
elif cur < interval:
note2 = notes.augment(note2)
cur = measure(note1, note2)
# We are practically done right now, but we need to be able to create the
# minor seventh of Cb and get Bbb instead of B######### as the result
val = 0
for token in note2[1:]:
if token == '#':
val += 1
elif token == 'b':
val -= 1
# These are some checks to see if we have generated too much #'s or too much
# b's. In these cases we need to convert #'s to b's and vice versa.
if val > 6:
val = val % 12
val = -12 + val
elif val < -6:
val = val % -12
val = 12 + val
# Rebuild the note
result = note2[0]
while val > 0:
result = notes.augment(result)
val -= 1
while val < 0:
result = notes.diminish(result)
val += 1
return result
|
python
|
def augment_or_diminish_until_the_interval_is_right(note1, note2, interval):
cur = measure(note1, note2)
while cur != interval:
if cur > interval:
note2 = notes.diminish(note2)
elif cur < interval:
note2 = notes.augment(note2)
cur = measure(note1, note2)
val = 0
for token in note2[1:]:
if token == '
val += 1
elif token == 'b':
val -= 1
if val > 6:
val = val % 12
val = -12 + val
elif val < -6:
val = val % -12
val = 12 + val
result = note2[0]
while val > 0:
result = notes.augment(result)
val -= 1
while val < 0:
result = notes.diminish(result)
val += 1
return result
|
[
"def",
"augment_or_diminish_until_the_interval_is_right",
"(",
"note1",
",",
"note2",
",",
"interval",
")",
":",
"cur",
"=",
"measure",
"(",
"note1",
",",
"note2",
")",
"while",
"cur",
"!=",
"interval",
":",
"if",
"cur",
">",
"interval",
":",
"note2",
"=",
"notes",
".",
"diminish",
"(",
"note2",
")",
"elif",
"cur",
"<",
"interval",
":",
"note2",
"=",
"notes",
".",
"augment",
"(",
"note2",
")",
"cur",
"=",
"measure",
"(",
"note1",
",",
"note2",
")",
"# We are practically done right now, but we need to be able to create the",
"# minor seventh of Cb and get Bbb instead of B######### as the result",
"val",
"=",
"0",
"for",
"token",
"in",
"note2",
"[",
"1",
":",
"]",
":",
"if",
"token",
"==",
"'#'",
":",
"val",
"+=",
"1",
"elif",
"token",
"==",
"'b'",
":",
"val",
"-=",
"1",
"# These are some checks to see if we have generated too much #'s or too much",
"# b's. In these cases we need to convert #'s to b's and vice versa.",
"if",
"val",
">",
"6",
":",
"val",
"=",
"val",
"%",
"12",
"val",
"=",
"-",
"12",
"+",
"val",
"elif",
"val",
"<",
"-",
"6",
":",
"val",
"=",
"val",
"%",
"-",
"12",
"val",
"=",
"12",
"+",
"val",
"# Rebuild the note",
"result",
"=",
"note2",
"[",
"0",
"]",
"while",
"val",
">",
"0",
":",
"result",
"=",
"notes",
".",
"augment",
"(",
"result",
")",
"val",
"-=",
"1",
"while",
"val",
"<",
"0",
":",
"result",
"=",
"notes",
".",
"diminish",
"(",
"result",
")",
"val",
"+=",
"1",
"return",
"result"
] |
A helper function for the minor and major functions.
You should probably not use this directly.
|
[
"A",
"helper",
"function",
"for",
"the",
"minor",
"and",
"major",
"functions",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L251-L290
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
invert
|
def invert(interval):
"""Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C']
"""
interval.reverse()
res = list(interval)
interval.reverse()
return res
|
python
|
def invert(interval):
interval.reverse()
res = list(interval)
interval.reverse()
return res
|
[
"def",
"invert",
"(",
"interval",
")",
":",
"interval",
".",
"reverse",
"(",
")",
"res",
"=",
"list",
"(",
"interval",
")",
"interval",
".",
"reverse",
"(",
")",
"return",
"res"
] |
Invert an interval.
Example:
>>> invert(['C', 'E'])
['E', 'C']
|
[
"Invert",
"an",
"interval",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L292-L302
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
determine
|
def determine(note1, note2, shorthand=False):
"""Name the interval between note1 and note2.
Examples:
>>> determine('C', 'E')
'major third'
>>> determine('C', 'Eb')
'minor third'
>>> determine('C', 'E#')
'augmented third'
>>> determine('C', 'Ebb')
'diminished third'
This works for all intervals. Note that there are corner cases for major
fifths and fourths:
>>> determine('C', 'G')
'perfect fifth'
>>> determine('C', 'F')
'perfect fourth'
"""
# Corner case for unisons ('A' and 'Ab', for instance)
if note1[0] == note2[0]:
def get_val(note):
"""Private function: count the value of accidentals."""
r = 0
for x in note[1:]:
if x == 'b':
r -= 1
elif x == '#':
r += 1
return r
x = get_val(note1)
y = get_val(note2)
if x == y:
if not shorthand:
return 'major unison'
return '1'
elif x < y:
if not shorthand:
return 'augmented unison'
return '#1'
elif x - y == 1:
if not shorthand:
return 'minor unison'
return 'b1'
else:
if not shorthand:
return 'diminished unison'
return 'bb1'
# Other intervals
n1 = notes.fifths.index(note1[0])
n2 = notes.fifths.index(note2[0])
number_of_fifth_steps = n2 - n1
if n2 < n1:
number_of_fifth_steps = len(notes.fifths) - n1 + n2
# [name, shorthand_name, half notes for major version of this interval]
fifth_steps = [
['unison', '1', 0],
['fifth', '5', 7],
['second', '2', 2],
['sixth', '6', 9],
['third', '3', 4],
['seventh', '7', 11],
['fourth', '4', 5],
]
# Count half steps between note1 and note2
half_notes = measure(note1, note2)
# Get the proper list from the number of fifth steps
current = fifth_steps[number_of_fifth_steps]
# maj = number of major steps for this interval
maj = current[2]
# if maj is equal to the half steps between note1 and note2 the interval is
# major or perfect
if maj == half_notes:
# Corner cases for perfect fifths and fourths
if current[0] == 'fifth':
if not shorthand:
return 'perfect fifth'
elif current[0] == 'fourth':
if not shorthand:
return 'perfect fourth'
if not shorthand:
return 'major ' + current[0]
return current[1]
elif maj + 1 <= half_notes:
# if maj + 1 is equal to half_notes, the interval is augmented.
if not shorthand:
return 'augmented ' + current[0]
return '#' * (half_notes - maj) + current[1]
elif maj - 1 == half_notes:
# etc.
if not shorthand:
return 'minor ' + current[0]
return 'b' + current[1]
elif maj - 2 >= half_notes:
if not shorthand:
return 'diminished ' + current[0]
return 'b' * (maj - half_notes) + current[1]
|
python
|
def determine(note1, note2, shorthand=False):
if note1[0] == note2[0]:
def get_val(note):
r = 0
for x in note[1:]:
if x == 'b':
r -= 1
elif x == '
r += 1
return r
x = get_val(note1)
y = get_val(note2)
if x == y:
if not shorthand:
return 'major unison'
return '1'
elif x < y:
if not shorthand:
return 'augmented unison'
return '
elif x - y == 1:
if not shorthand:
return 'minor unison'
return 'b1'
else:
if not shorthand:
return 'diminished unison'
return 'bb1'
n1 = notes.fifths.index(note1[0])
n2 = notes.fifths.index(note2[0])
number_of_fifth_steps = n2 - n1
if n2 < n1:
number_of_fifth_steps = len(notes.fifths) - n1 + n2
fifth_steps = [
['unison', '1', 0],
['fifth', '5', 7],
['second', '2', 2],
['sixth', '6', 9],
['third', '3', 4],
['seventh', '7', 11],
['fourth', '4', 5],
]
half_notes = measure(note1, note2)
current = fifth_steps[number_of_fifth_steps]
maj = current[2]
if maj == half_notes:
if current[0] == 'fifth':
if not shorthand:
return 'perfect fifth'
elif current[0] == 'fourth':
if not shorthand:
return 'perfect fourth'
if not shorthand:
return 'major ' + current[0]
return current[1]
elif maj + 1 <= half_notes:
if not shorthand:
return 'augmented ' + current[0]
return '
elif maj - 1 == half_notes:
if not shorthand:
return 'minor ' + current[0]
return 'b' + current[1]
elif maj - 2 >= half_notes:
if not shorthand:
return 'diminished ' + current[0]
return 'b' * (maj - half_notes) + current[1]
|
[
"def",
"determine",
"(",
"note1",
",",
"note2",
",",
"shorthand",
"=",
"False",
")",
":",
"# Corner case for unisons ('A' and 'Ab', for instance)",
"if",
"note1",
"[",
"0",
"]",
"==",
"note2",
"[",
"0",
"]",
":",
"def",
"get_val",
"(",
"note",
")",
":",
"\"\"\"Private function: count the value of accidentals.\"\"\"",
"r",
"=",
"0",
"for",
"x",
"in",
"note",
"[",
"1",
":",
"]",
":",
"if",
"x",
"==",
"'b'",
":",
"r",
"-=",
"1",
"elif",
"x",
"==",
"'#'",
":",
"r",
"+=",
"1",
"return",
"r",
"x",
"=",
"get_val",
"(",
"note1",
")",
"y",
"=",
"get_val",
"(",
"note2",
")",
"if",
"x",
"==",
"y",
":",
"if",
"not",
"shorthand",
":",
"return",
"'major unison'",
"return",
"'1'",
"elif",
"x",
"<",
"y",
":",
"if",
"not",
"shorthand",
":",
"return",
"'augmented unison'",
"return",
"'#1'",
"elif",
"x",
"-",
"y",
"==",
"1",
":",
"if",
"not",
"shorthand",
":",
"return",
"'minor unison'",
"return",
"'b1'",
"else",
":",
"if",
"not",
"shorthand",
":",
"return",
"'diminished unison'",
"return",
"'bb1'",
"# Other intervals",
"n1",
"=",
"notes",
".",
"fifths",
".",
"index",
"(",
"note1",
"[",
"0",
"]",
")",
"n2",
"=",
"notes",
".",
"fifths",
".",
"index",
"(",
"note2",
"[",
"0",
"]",
")",
"number_of_fifth_steps",
"=",
"n2",
"-",
"n1",
"if",
"n2",
"<",
"n1",
":",
"number_of_fifth_steps",
"=",
"len",
"(",
"notes",
".",
"fifths",
")",
"-",
"n1",
"+",
"n2",
"# [name, shorthand_name, half notes for major version of this interval]",
"fifth_steps",
"=",
"[",
"[",
"'unison'",
",",
"'1'",
",",
"0",
"]",
",",
"[",
"'fifth'",
",",
"'5'",
",",
"7",
"]",
",",
"[",
"'second'",
",",
"'2'",
",",
"2",
"]",
",",
"[",
"'sixth'",
",",
"'6'",
",",
"9",
"]",
",",
"[",
"'third'",
",",
"'3'",
",",
"4",
"]",
",",
"[",
"'seventh'",
",",
"'7'",
",",
"11",
"]",
",",
"[",
"'fourth'",
",",
"'4'",
",",
"5",
"]",
",",
"]",
"# Count half steps between note1 and note2",
"half_notes",
"=",
"measure",
"(",
"note1",
",",
"note2",
")",
"# Get the proper list from the number of fifth steps",
"current",
"=",
"fifth_steps",
"[",
"number_of_fifth_steps",
"]",
"# maj = number of major steps for this interval",
"maj",
"=",
"current",
"[",
"2",
"]",
"# if maj is equal to the half steps between note1 and note2 the interval is",
"# major or perfect",
"if",
"maj",
"==",
"half_notes",
":",
"# Corner cases for perfect fifths and fourths",
"if",
"current",
"[",
"0",
"]",
"==",
"'fifth'",
":",
"if",
"not",
"shorthand",
":",
"return",
"'perfect fifth'",
"elif",
"current",
"[",
"0",
"]",
"==",
"'fourth'",
":",
"if",
"not",
"shorthand",
":",
"return",
"'perfect fourth'",
"if",
"not",
"shorthand",
":",
"return",
"'major '",
"+",
"current",
"[",
"0",
"]",
"return",
"current",
"[",
"1",
"]",
"elif",
"maj",
"+",
"1",
"<=",
"half_notes",
":",
"# if maj + 1 is equal to half_notes, the interval is augmented.",
"if",
"not",
"shorthand",
":",
"return",
"'augmented '",
"+",
"current",
"[",
"0",
"]",
"return",
"'#'",
"*",
"(",
"half_notes",
"-",
"maj",
")",
"+",
"current",
"[",
"1",
"]",
"elif",
"maj",
"-",
"1",
"==",
"half_notes",
":",
"# etc.",
"if",
"not",
"shorthand",
":",
"return",
"'minor '",
"+",
"current",
"[",
"0",
"]",
"return",
"'b'",
"+",
"current",
"[",
"1",
"]",
"elif",
"maj",
"-",
"2",
">=",
"half_notes",
":",
"if",
"not",
"shorthand",
":",
"return",
"'diminished '",
"+",
"current",
"[",
"0",
"]",
"return",
"'b'",
"*",
"(",
"maj",
"-",
"half_notes",
")",
"+",
"current",
"[",
"1",
"]"
] |
Name the interval between note1 and note2.
Examples:
>>> determine('C', 'E')
'major third'
>>> determine('C', 'Eb')
'minor third'
>>> determine('C', 'E#')
'augmented third'
>>> determine('C', 'Ebb')
'diminished third'
This works for all intervals. Note that there are corner cases for major
fifths and fourths:
>>> determine('C', 'G')
'perfect fifth'
>>> determine('C', 'F')
'perfect fourth'
|
[
"Name",
"the",
"interval",
"between",
"note1",
"and",
"note2",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L304-L408
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
from_shorthand
|
def from_shorthand(note, interval, up=True):
"""Return the note on interval up or down.
Examples:
>>> from_shorthand('A', 'b3')
'C'
>>> from_shorthand('D', '2')
'E'
>>> from_shorthand('E', '2', False)
'D'
"""
# warning should be a valid note.
if not notes.is_valid_note(note):
return False
# [shorthand, interval function up, interval function down]
shorthand_lookup = [
['1', major_unison, major_unison],
['2', major_second, minor_seventh],
['3', major_third, minor_sixth],
['4', major_fourth, major_fifth],
['5', major_fifth, major_fourth],
['6', major_sixth, minor_third],
['7', major_seventh, minor_second],
]
# Looking up last character in interval in shorthand_lookup and calling that
# function.
val = False
for shorthand in shorthand_lookup:
if shorthand[0] == interval[-1]:
if up:
val = shorthand[1](note)
else:
val = shorthand[2](note)
# warning Last character in interval should be 1-7
if val == False:
return False
# Collect accidentals
for x in interval:
if x == '#':
if up:
val = notes.augment(val)
else:
val = notes.diminish(val)
elif x == 'b':
if up:
val = notes.diminish(val)
else:
val = notes.augment(val)
else:
return val
|
python
|
def from_shorthand(note, interval, up=True):
if not notes.is_valid_note(note):
return False
shorthand_lookup = [
['1', major_unison, major_unison],
['2', major_second, minor_seventh],
['3', major_third, minor_sixth],
['4', major_fourth, major_fifth],
['5', major_fifth, major_fourth],
['6', major_sixth, minor_third],
['7', major_seventh, minor_second],
]
val = False
for shorthand in shorthand_lookup:
if shorthand[0] == interval[-1]:
if up:
val = shorthand[1](note)
else:
val = shorthand[2](note)
if val == False:
return False
for x in interval:
if x == '
if up:
val = notes.augment(val)
else:
val = notes.diminish(val)
elif x == 'b':
if up:
val = notes.diminish(val)
else:
val = notes.augment(val)
else:
return val
|
[
"def",
"from_shorthand",
"(",
"note",
",",
"interval",
",",
"up",
"=",
"True",
")",
":",
"# warning should be a valid note.",
"if",
"not",
"notes",
".",
"is_valid_note",
"(",
"note",
")",
":",
"return",
"False",
"# [shorthand, interval function up, interval function down]",
"shorthand_lookup",
"=",
"[",
"[",
"'1'",
",",
"major_unison",
",",
"major_unison",
"]",
",",
"[",
"'2'",
",",
"major_second",
",",
"minor_seventh",
"]",
",",
"[",
"'3'",
",",
"major_third",
",",
"minor_sixth",
"]",
",",
"[",
"'4'",
",",
"major_fourth",
",",
"major_fifth",
"]",
",",
"[",
"'5'",
",",
"major_fifth",
",",
"major_fourth",
"]",
",",
"[",
"'6'",
",",
"major_sixth",
",",
"minor_third",
"]",
",",
"[",
"'7'",
",",
"major_seventh",
",",
"minor_second",
"]",
",",
"]",
"# Looking up last character in interval in shorthand_lookup and calling that",
"# function.",
"val",
"=",
"False",
"for",
"shorthand",
"in",
"shorthand_lookup",
":",
"if",
"shorthand",
"[",
"0",
"]",
"==",
"interval",
"[",
"-",
"1",
"]",
":",
"if",
"up",
":",
"val",
"=",
"shorthand",
"[",
"1",
"]",
"(",
"note",
")",
"else",
":",
"val",
"=",
"shorthand",
"[",
"2",
"]",
"(",
"note",
")",
"# warning Last character in interval should be 1-7",
"if",
"val",
"==",
"False",
":",
"return",
"False",
"# Collect accidentals",
"for",
"x",
"in",
"interval",
":",
"if",
"x",
"==",
"'#'",
":",
"if",
"up",
":",
"val",
"=",
"notes",
".",
"augment",
"(",
"val",
")",
"else",
":",
"val",
"=",
"notes",
".",
"diminish",
"(",
"val",
")",
"elif",
"x",
"==",
"'b'",
":",
"if",
"up",
":",
"val",
"=",
"notes",
".",
"diminish",
"(",
"val",
")",
"else",
":",
"val",
"=",
"notes",
".",
"augment",
"(",
"val",
")",
"else",
":",
"return",
"val"
] |
Return the note on interval up or down.
Examples:
>>> from_shorthand('A', 'b3')
'C'
>>> from_shorthand('D', '2')
'E'
>>> from_shorthand('E', '2', False)
'D'
|
[
"Return",
"the",
"note",
"on",
"interval",
"up",
"or",
"down",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L410-L463
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
is_consonant
|
def is_consonant(note1, note2, include_fourths=True):
"""Return True if the interval is consonant.
A consonance is a harmony, chord, or interval considered stable, as
opposed to a dissonance.
This function tests whether the given interval is consonant. This
basically means that it checks whether the interval is (or sounds like)
a unison, third, sixth, perfect fourth or perfect fifth.
In classical music the fourth is considered dissonant when used
contrapuntal, which is why you can choose to exclude it.
"""
return (is_perfect_consonant(note1, note2, include_fourths) or
is_imperfect_consonant(note1, note2))
|
python
|
def is_consonant(note1, note2, include_fourths=True):
return (is_perfect_consonant(note1, note2, include_fourths) or
is_imperfect_consonant(note1, note2))
|
[
"def",
"is_consonant",
"(",
"note1",
",",
"note2",
",",
"include_fourths",
"=",
"True",
")",
":",
"return",
"(",
"is_perfect_consonant",
"(",
"note1",
",",
"note2",
",",
"include_fourths",
")",
"or",
"is_imperfect_consonant",
"(",
"note1",
",",
"note2",
")",
")"
] |
Return True if the interval is consonant.
A consonance is a harmony, chord, or interval considered stable, as
opposed to a dissonance.
This function tests whether the given interval is consonant. This
basically means that it checks whether the interval is (or sounds like)
a unison, third, sixth, perfect fourth or perfect fifth.
In classical music the fourth is considered dissonant when used
contrapuntal, which is why you can choose to exclude it.
|
[
"Return",
"True",
"if",
"the",
"interval",
"is",
"consonant",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L465-L479
|
bspaans/python-mingus
|
mingus/core/intervals.py
|
is_perfect_consonant
|
def is_perfect_consonant(note1, note2, include_fourths=True):
"""Return True if the interval is a perfect consonant one.
Perfect consonances are either unisons, perfect fourths or fifths, or
octaves (which is the same as a unison in this model).
Perfect fourths are usually included as well, but are considered
dissonant when used contrapuntal, which is why you can exclude them.
"""
dhalf = measure(note1, note2)
return dhalf in [0, 7] or include_fourths and dhalf == 5
|
python
|
def is_perfect_consonant(note1, note2, include_fourths=True):
dhalf = measure(note1, note2)
return dhalf in [0, 7] or include_fourths and dhalf == 5
|
[
"def",
"is_perfect_consonant",
"(",
"note1",
",",
"note2",
",",
"include_fourths",
"=",
"True",
")",
":",
"dhalf",
"=",
"measure",
"(",
"note1",
",",
"note2",
")",
"return",
"dhalf",
"in",
"[",
"0",
",",
"7",
"]",
"or",
"include_fourths",
"and",
"dhalf",
"==",
"5"
] |
Return True if the interval is a perfect consonant one.
Perfect consonances are either unisons, perfect fourths or fifths, or
octaves (which is the same as a unison in this model).
Perfect fourths are usually included as well, but are considered
dissonant when used contrapuntal, which is why you can exclude them.
|
[
"Return",
"True",
"if",
"the",
"interval",
"is",
"a",
"perfect",
"consonant",
"one",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/core/intervals.py#L481-L491
|
bspaans/python-mingus
|
mingus/containers/composition.py
|
Composition.add_track
|
def add_track(self, track):
"""Add a track to the composition.
Raise an UnexpectedObjectError if the argument is not a
mingus.containers.Track object.
"""
if not hasattr(track, 'bars'):
raise UnexpectedObjectError("Unexpected object '%s', "
"expecting a mingus.containers.Track object" % track)
self.tracks.append(track)
self.selected_tracks = [len(self.tracks) - 1]
|
python
|
def add_track(self, track):
if not hasattr(track, 'bars'):
raise UnexpectedObjectError("Unexpected object '%s', "
"expecting a mingus.containers.Track object" % track)
self.tracks.append(track)
self.selected_tracks = [len(self.tracks) - 1]
|
[
"def",
"add_track",
"(",
"self",
",",
"track",
")",
":",
"if",
"not",
"hasattr",
"(",
"track",
",",
"'bars'",
")",
":",
"raise",
"UnexpectedObjectError",
"(",
"\"Unexpected object '%s', \"",
"\"expecting a mingus.containers.Track object\"",
"%",
"track",
")",
"self",
".",
"tracks",
".",
"append",
"(",
"track",
")",
"self",
".",
"selected_tracks",
"=",
"[",
"len",
"(",
"self",
".",
"tracks",
")",
"-",
"1",
"]"
] |
Add a track to the composition.
Raise an UnexpectedObjectError if the argument is not a
mingus.containers.Track object.
|
[
"Add",
"a",
"track",
"to",
"the",
"composition",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/composition.py#L55-L65
|
bspaans/python-mingus
|
mingus/containers/composition.py
|
Composition.add_note
|
def add_note(self, note):
"""Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
"""
for n in self.selected_tracks:
self.tracks[n] + note
|
python
|
def add_note(self, note):
for n in self.selected_tracks:
self.tracks[n] + note
|
[
"def",
"add_note",
"(",
"self",
",",
"note",
")",
":",
"for",
"n",
"in",
"self",
".",
"selected_tracks",
":",
"self",
".",
"tracks",
"[",
"n",
"]",
"+",
"note"
] |
Add a note to the selected tracks.
Everything container.Track supports in __add__ is accepted.
|
[
"Add",
"a",
"note",
"to",
"the",
"selected",
"tracks",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/containers/composition.py#L67-L73
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
cfunc
|
def cfunc(name, result, *args):
"""Build and apply a ctypes prototype complete with parameter flags."""
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
|
python
|
def cfunc(name, result, *args):
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
|
[
"def",
"cfunc",
"(",
"name",
",",
"result",
",",
"*",
"args",
")",
":",
"atypes",
"=",
"[",
"]",
"aflags",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"atypes",
".",
"append",
"(",
"arg",
"[",
"1",
"]",
")",
"aflags",
".",
"append",
"(",
"(",
"arg",
"[",
"2",
"]",
",",
"arg",
"[",
"0",
"]",
")",
"+",
"arg",
"[",
"3",
":",
"]",
")",
"return",
"CFUNCTYPE",
"(",
"result",
",",
"*",
"atypes",
")",
"(",
"(",
"name",
",",
"_fl",
")",
",",
"tuple",
"(",
"aflags",
")",
")"
] |
Build and apply a ctypes prototype complete with parameter flags.
|
[
"Build",
"and",
"apply",
"a",
"ctypes",
"prototype",
"complete",
"with",
"parameter",
"flags",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L41-L48
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
fluid_synth_write_s16_stereo
|
def fluid_synth_write_s16_stereo(synth, len):
"""Return generated samples in stereo 16-bit format.
Return value is a Numpy array of samples.
"""
import numpy
buf = create_string_buffer(len * 4)
fluid_synth_write_s16(synth, len, buf, 0, 2, buf, 1, 2)
return numpy.fromstring(buf[:], dtype=numpy.int16)
|
python
|
def fluid_synth_write_s16_stereo(synth, len):
import numpy
buf = create_string_buffer(len * 4)
fluid_synth_write_s16(synth, len, buf, 0, 2, buf, 1, 2)
return numpy.fromstring(buf[:], dtype=numpy.int16)
|
[
"def",
"fluid_synth_write_s16_stereo",
"(",
"synth",
",",
"len",
")",
":",
"import",
"numpy",
"buf",
"=",
"create_string_buffer",
"(",
"len",
"*",
"4",
")",
"fluid_synth_write_s16",
"(",
"synth",
",",
"len",
",",
"buf",
",",
"0",
",",
"2",
",",
"buf",
",",
"1",
",",
"2",
")",
"return",
"numpy",
".",
"fromstring",
"(",
"buf",
"[",
":",
"]",
",",
"dtype",
"=",
"numpy",
".",
"int16",
")"
] |
Return generated samples in stereo 16-bit format.
Return value is a Numpy array of samples.
|
[
"Return",
"generated",
"samples",
"in",
"stereo",
"16",
"-",
"bit",
"format",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L132-L140
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
Synth.start
|
def start(self, driver=None):
"""Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform.
"""
if driver is not None:
assert driver in [
'alsa',
'oss',
'jack',
'portaudio',
'sndmgr',
'coreaudio',
'Direct Sound',
'dsound',
'pulseaudio'
]
fluid_settings_setstr(self.settings, 'audio.driver', driver)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
|
python
|
def start(self, driver=None):
if driver is not None:
assert driver in [
'alsa',
'oss',
'jack',
'portaudio',
'sndmgr',
'coreaudio',
'Direct Sound',
'dsound',
'pulseaudio'
]
fluid_settings_setstr(self.settings, 'audio.driver', driver)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
|
[
"def",
"start",
"(",
"self",
",",
"driver",
"=",
"None",
")",
":",
"if",
"driver",
"is",
"not",
"None",
":",
"assert",
"driver",
"in",
"[",
"'alsa'",
",",
"'oss'",
",",
"'jack'",
",",
"'portaudio'",
",",
"'sndmgr'",
",",
"'coreaudio'",
",",
"'Direct Sound'",
",",
"'dsound'",
",",
"'pulseaudio'",
"]",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"'audio.driver'",
",",
"driver",
")",
"self",
".",
"audio_driver",
"=",
"new_fluid_audio_driver",
"(",
"self",
".",
"settings",
",",
"self",
".",
"synth",
")"
] |
Start audio output driver in separate background thread.
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver: which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'dsound', 'pulseaudio'
Not all drivers will be available for every platform, it depends on
which drivers were compiled into FluidSynth for your platform.
|
[
"Start",
"audio",
"output",
"driver",
"in",
"separate",
"background",
"thread",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L164-L194
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
Synth.program_select
|
def program_select(self, chan, sfid, bank, preset):
"""Select a program."""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset)
|
python
|
def program_select(self, chan, sfid, bank, preset):
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset)
|
[
"def",
"program_select",
"(",
"self",
",",
"chan",
",",
"sfid",
",",
"bank",
",",
"preset",
")",
":",
"return",
"fluid_synth_program_select",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"sfid",
",",
"bank",
",",
"preset",
")"
] |
Select a program.
|
[
"Select",
"a",
"program",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L210-L212
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
Synth.noteon
|
def noteon(self, chan, key, vel):
"""Play a note."""
if key < 0 or key > 128:
return False
if chan < 0:
return False
if vel < 0 or vel > 128:
return False
return fluid_synth_noteon(self.synth, chan, key, vel)
|
python
|
def noteon(self, chan, key, vel):
if key < 0 or key > 128:
return False
if chan < 0:
return False
if vel < 0 or vel > 128:
return False
return fluid_synth_noteon(self.synth, chan, key, vel)
|
[
"def",
"noteon",
"(",
"self",
",",
"chan",
",",
"key",
",",
"vel",
")",
":",
"if",
"key",
"<",
"0",
"or",
"key",
">",
"128",
":",
"return",
"False",
"if",
"chan",
"<",
"0",
":",
"return",
"False",
"if",
"vel",
"<",
"0",
"or",
"vel",
">",
"128",
":",
"return",
"False",
"return",
"fluid_synth_noteon",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"key",
",",
"vel",
")"
] |
Play a note.
|
[
"Play",
"a",
"note",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L214-L222
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
Synth.noteoff
|
def noteoff(self, chan, key):
"""Stop a note."""
if key < 0 or key > 128:
return False
if chan < 0:
return False
return fluid_synth_noteoff(self.synth, chan, key)
|
python
|
def noteoff(self, chan, key):
if key < 0 or key > 128:
return False
if chan < 0:
return False
return fluid_synth_noteoff(self.synth, chan, key)
|
[
"def",
"noteoff",
"(",
"self",
",",
"chan",
",",
"key",
")",
":",
"if",
"key",
"<",
"0",
"or",
"key",
">",
"128",
":",
"return",
"False",
"if",
"chan",
"<",
"0",
":",
"return",
"False",
"return",
"fluid_synth_noteoff",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"key",
")"
] |
Stop a note.
|
[
"Stop",
"a",
"note",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L224-L230
|
bspaans/python-mingus
|
mingus/midi/pyfluidsynth.py
|
Synth.cc
|
def cc(self, chan, ctrl, val):
"""Send control change value.
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1: vibrato
7: volume
10: pan (left to right)
11: expression (soft to loud)
64: sustain
91: reverb
93: chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
|
python
|
def cc(self, chan, ctrl, val):
return fluid_synth_cc(self.synth, chan, ctrl, val)
|
[
"def",
"cc",
"(",
"self",
",",
"chan",
",",
"ctrl",
",",
"val",
")",
":",
"return",
"fluid_synth_cc",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"ctrl",
",",
"val",
")"
] |
Send control change value.
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1: vibrato
7: volume
10: pan (left to right)
11: expression (soft to loud)
64: sustain
91: reverb
93: chorus
|
[
"Send",
"control",
"change",
"value",
"."
] |
train
|
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/pyfluidsynth.py#L242-L256
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.