body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
2a7a13e065d3bb7dfedf58f41cefe5eb3e4f772d0e4d3785fdc3151617023fc6 | @callback
def get_sources_from_dict(data: MappingProxyType[(str, Any)]) -> list[Any]:
'Munge Sources.'
sources_config = data[CONF_SOURCES]
source_id_name = {int(index): name for (index, name) in sources_config.items()}
source_name_id = {v: k for (k, v) in source_id_name.items()}
source_names = sorted(source_name_id.keys(), key=(lambda v: source_name_id[v]))
return [source_id_name, source_name_id, source_names] | Munge Sources. | custom_components/nuvo_serial/helpers.py | get_sources_from_dict | lleo19/hacs-nuvo-serial | 2 | python | @callback
def get_sources_from_dict(data: MappingProxyType[(str, Any)]) -> list[Any]:
sources_config = data[CONF_SOURCES]
source_id_name = {int(index): name for (index, name) in sources_config.items()}
source_name_id = {v: k for (k, v) in source_id_name.items()}
source_names = sorted(source_name_id.keys(), key=(lambda v: source_name_id[v]))
return [source_id_name, source_name_id, source_names] | @callback
def get_sources_from_dict(data: MappingProxyType[(str, Any)]) -> list[Any]:
sources_config = data[CONF_SOURCES]
source_id_name = {int(index): name for (index, name) in sources_config.items()}
source_name_id = {v: k for (k, v) in source_id_name.items()}
source_names = sorted(source_name_id.keys(), key=(lambda v: source_name_id[v]))
return [source_id_name, source_name_id, source_names]<|docstring|>Munge Sources.<|endoftext|> |
1952dce78bdde5646f1b7fc2213f6396e3f928f822d5d30919e9a1ef5939c81a | @callback
def get_sources(config_entry: ConfigEntry) -> list[Any]:
'Get the Nuvo Sources.'
if (CONF_SOURCES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
return get_sources_from_dict(data) | Get the Nuvo Sources. | custom_components/nuvo_serial/helpers.py | get_sources | lleo19/hacs-nuvo-serial | 2 | python | @callback
def get_sources(config_entry: ConfigEntry) -> list[Any]:
if (CONF_SOURCES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
return get_sources_from_dict(data) | @callback
def get_sources(config_entry: ConfigEntry) -> list[Any]:
if (CONF_SOURCES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
return get_sources_from_dict(data)<|docstring|>Get the Nuvo Sources.<|endoftext|> |
0456cd5242ea816c7027aa9fe079325e9f334805817f6e8a7050584adc071829 | @callback
def get_zones(config_entry: ConfigEntry) -> dict[(str, str)]:
'Get the Nuvo Zones.'
if (CONF_ZONES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
zone: dict[(str, str)] = data[CONF_ZONES]
return zone | Get the Nuvo Zones. | custom_components/nuvo_serial/helpers.py | get_zones | lleo19/hacs-nuvo-serial | 2 | python | @callback
def get_zones(config_entry: ConfigEntry) -> dict[(str, str)]:
if (CONF_ZONES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
zone: dict[(str, str)] = data[CONF_ZONES]
return zone | @callback
def get_zones(config_entry: ConfigEntry) -> dict[(str, str)]:
if (CONF_ZONES in config_entry.options):
data = config_entry.options
else:
data = config_entry.data
zone: dict[(str, str)] = data[CONF_ZONES]
return zone<|docstring|>Get the Nuvo Zones.<|endoftext|> |
057f351585d7ddc5ad16dd13d74c8718218095fdc377d7654e51e1747daaeeec | def close(self, database=None):
'If opened from database will persist it, optinally save to specified file'
import pickle
if (self._database or database):
target = (self._database or database)
pickle.dump([self._dictionary, self._words_ids, self._words_vector], open(target, 'wb')) | If opened from database will persist it, optinally save to specified file | markov/words.py | close | karolciba/playground | 0 | python | def close(self, database=None):
import pickle
if (self._database or database):
target = (self._database or database)
pickle.dump([self._dictionary, self._words_ids, self._words_vector], open(target, 'wb')) | def close(self, database=None):
import pickle
if (self._database or database):
target = (self._database or database)
pickle.dump([self._dictionary, self._words_ids, self._words_vector], open(target, 'wb'))<|docstring|>If opened from database will persist it, optinally save to specified file<|endoftext|> |
b68cf4ca89f9aa1cca6d124431187da381d4e19e2fc139aa6299a4f869acf5f6 | def CreateTempDir():
'Creates a tempdir and returns the name of the tempdir.'
temp_dir = tempfile.mkdtemp(suffix='au', prefix='tmp')
logging.debug('Using tempdir = %s', temp_dir)
return temp_dir | Creates a tempdir and returns the name of the tempdir. | build_library/generate_au_zip.py | CreateTempDir | dongsupark/scripts | 6 | python | def CreateTempDir():
temp_dir = tempfile.mkdtemp(suffix='au', prefix='tmp')
logging.debug('Using tempdir = %s', temp_dir)
return temp_dir | def CreateTempDir():
temp_dir = tempfile.mkdtemp(suffix='au', prefix='tmp')
logging.debug('Using tempdir = %s', temp_dir)
return temp_dir<|docstring|>Creates a tempdir and returns the name of the tempdir.<|endoftext|> |
af40e174ff0ae5a055d42eb26ff11b98f048a531003e37bcda8753b00163f5e0 | def _SplitAndStrip(data):
'Prunes the ldd output, and return a list of needed library names\n Example of data:\n linux-vdso.so.1 => (0x00007ffffc96a000)\n libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)\n libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)\n /lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)\n Args:\n data: list of libraries from ldd output\n Returns:\n list of libraries that we should copy\n\n '
return_list = []
for line in data.split('\n'):
if ('not found' in line):
raise _LibNotFound(line)
line = re.sub('.*not a dynamic executable.*', '', line)
line = re.sub('.* =>\\s+', '', line)
line = re.sub('\\(0x.*\\)\\s?', '', line)
line = line.strip()
if (not len(line)):
continue
logging.debug('MATCHED line = %s', line)
return_list.append(line)
return return_list | Prunes the ldd output, and return a list of needed library names
Example of data:
linux-vdso.so.1 => (0x00007ffffc96a000)
libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)
libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)
/lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)
Args:
data: list of libraries from ldd output
Returns:
list of libraries that we should copy | build_library/generate_au_zip.py | _SplitAndStrip | dongsupark/scripts | 6 | python | def _SplitAndStrip(data):
'Prunes the ldd output, and return a list of needed library names\n Example of data:\n linux-vdso.so.1 => (0x00007ffffc96a000)\n libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)\n libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)\n /lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)\n Args:\n data: list of libraries from ldd output\n Returns:\n list of libraries that we should copy\n\n '
return_list = []
for line in data.split('\n'):
if ('not found' in line):
raise _LibNotFound(line)
line = re.sub('.*not a dynamic executable.*', , line)
line = re.sub('.* =>\\s+', , line)
line = re.sub('\\(0x.*\\)\\s?', , line)
line = line.strip()
if (not len(line)):
continue
logging.debug('MATCHED line = %s', line)
return_list.append(line)
return return_list | def _SplitAndStrip(data):
'Prunes the ldd output, and return a list of needed library names\n Example of data:\n linux-vdso.so.1 => (0x00007ffffc96a000)\n libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)\n libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)\n /lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)\n Args:\n data: list of libraries from ldd output\n Returns:\n list of libraries that we should copy\n\n '
return_list = []
for line in data.split('\n'):
if ('not found' in line):
raise _LibNotFound(line)
line = re.sub('.*not a dynamic executable.*', , line)
line = re.sub('.* =>\\s+', , line)
line = re.sub('\\(0x.*\\)\\s?', , line)
line = line.strip()
if (not len(line)):
continue
logging.debug('MATCHED line = %s', line)
return_list.append(line)
return return_list<|docstring|>Prunes the ldd output, and return a list of needed library names
Example of data:
linux-vdso.so.1 => (0x00007ffffc96a000)
libbz2.so.1 => /lib/libbz2.so.1 (0x00007f3ff8782000)
libc.so.6 => /lib/libc.so.6 (0x00007f3ff83ff000)
/lib64/ld-linux-x86-64.so.2 (0x00007f3ff89b3000)
Args:
data: list of libraries from ldd output
Returns:
list of libraries that we should copy<|endoftext|> |
c876f88e84101929203c568c47fcb1bda241de7721e51ef02c8ed13124d8b649 | def DepsToCopy(ldd_files, allow_list):
'Returns a list of deps for a given dynamic executables list.\n Args:\n ldd_files: List of dynamic files that needs to have the deps evaluated\n allow_list: List of files that we should allow\n Returns:\n List of files that are dependencies\n '
libs = set()
for file_name in ldd_files:
logging.debug('Running ldd on %s', file_name)
cmd = ['/usr/bin/ldd', file_name]
stdout_data = ''
stderr_data = ''
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_data, stderr_data) = proc.communicate(input=None)
except subprocess.CalledProcessError as e:
logging.error('Command %s failed', cmd)
logging.error('error code %s', e.returncode)
logging.error('ouput %s', e.output)
raise
if (not stdout_data):
continue
stdout_data = stdout_data.decode('utf8')
stderr_data = stderr_data.decode('utf8')
logging.debug('ldd for %s = stdout = %s stderr =%s', file_name, stdout_data, stderr_data)
try:
libs |= set(_SplitAndStrip(stdout_data))
except _LibNotFound as ex:
logging.error('ldd for %s failed: %s', file_name, ex)
sys.exit(1)
result = _ExcludeDenylist(list(libs), DENY_LIST)
_EnforceAllowList(list(libs), allow_list=allow_list)
return result | Returns a list of deps for a given dynamic executables list.
Args:
ldd_files: List of dynamic files that needs to have the deps evaluated
allow_list: List of files that we should allow
Returns:
List of files that are dependencies | build_library/generate_au_zip.py | DepsToCopy | dongsupark/scripts | 6 | python | def DepsToCopy(ldd_files, allow_list):
'Returns a list of deps for a given dynamic executables list.\n Args:\n ldd_files: List of dynamic files that needs to have the deps evaluated\n allow_list: List of files that we should allow\n Returns:\n List of files that are dependencies\n '
libs = set()
for file_name in ldd_files:
logging.debug('Running ldd on %s', file_name)
cmd = ['/usr/bin/ldd', file_name]
stdout_data =
stderr_data =
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_data, stderr_data) = proc.communicate(input=None)
except subprocess.CalledProcessError as e:
logging.error('Command %s failed', cmd)
logging.error('error code %s', e.returncode)
logging.error('ouput %s', e.output)
raise
if (not stdout_data):
continue
stdout_data = stdout_data.decode('utf8')
stderr_data = stderr_data.decode('utf8')
logging.debug('ldd for %s = stdout = %s stderr =%s', file_name, stdout_data, stderr_data)
try:
libs |= set(_SplitAndStrip(stdout_data))
except _LibNotFound as ex:
logging.error('ldd for %s failed: %s', file_name, ex)
sys.exit(1)
result = _ExcludeDenylist(list(libs), DENY_LIST)
_EnforceAllowList(list(libs), allow_list=allow_list)
return result | def DepsToCopy(ldd_files, allow_list):
'Returns a list of deps for a given dynamic executables list.\n Args:\n ldd_files: List of dynamic files that needs to have the deps evaluated\n allow_list: List of files that we should allow\n Returns:\n List of files that are dependencies\n '
libs = set()
for file_name in ldd_files:
logging.debug('Running ldd on %s', file_name)
cmd = ['/usr/bin/ldd', file_name]
stdout_data =
stderr_data =
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_data, stderr_data) = proc.communicate(input=None)
except subprocess.CalledProcessError as e:
logging.error('Command %s failed', cmd)
logging.error('error code %s', e.returncode)
logging.error('ouput %s', e.output)
raise
if (not stdout_data):
continue
stdout_data = stdout_data.decode('utf8')
stderr_data = stderr_data.decode('utf8')
logging.debug('ldd for %s = stdout = %s stderr =%s', file_name, stdout_data, stderr_data)
try:
libs |= set(_SplitAndStrip(stdout_data))
except _LibNotFound as ex:
logging.error('ldd for %s failed: %s', file_name, ex)
sys.exit(1)
result = _ExcludeDenylist(list(libs), DENY_LIST)
_EnforceAllowList(list(libs), allow_list=allow_list)
return result<|docstring|>Returns a list of deps for a given dynamic executables list.
Args:
ldd_files: List of dynamic files that needs to have the deps evaluated
allow_list: List of files that we should allow
Returns:
List of files that are dependencies<|endoftext|> |
31d38b04937b5e97be089c65082ff7f5df70ab44ef73ed3281778a7a6a015e92 | def CopyRequiredFiles(dest_files_root, allow_list):
'Generates a list of files that are required for au-generator zip file\n Args:\n dest_files_root: location of the directory where we should copy the files\n allow_list: List of files that we should allow\n '
if (not dest_files_root):
logging.error('Invalid option passed for dest_files_root')
sys.exit(1)
all_files = (DYNAMIC_EXECUTABLES + STATIC_FILES)
all_files = list(map(os.path.expanduser, all_files))
for file_name in all_files:
if (not os.path.isfile(file_name)):
logging.error('file = %s does not exist', file_name)
sys.exit(1)
logging.debug(('Given files that need to be copied = %s' % ''.join(all_files)))
for file_name in all_files:
logging.debug('Copying file %s to %s', file_name, dest_files_root)
try:
shutil.copy2(file_name, dest_files_root)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, dest_files_root)
sys.exit(1)
libraries = DepsToCopy(ldd_files=DYNAMIC_EXECUTABLES, allow_list=allow_list)
lib_dir = os.path.join(dest_files_root, LIB_DIR)
os.mkdir(lib_dir)
for file_name in libraries:
logging.debug('Copying file %s to %s', file_name, lib_dir)
try:
shutil.copy2(file_name, lib_dir)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, lib_dir)
sys.exit(1)
for (source_dir, target_dir) in RECURSE_DIRS.items():
logging.debug('Processing directory %s', source_dir)
full_path = os.path.expanduser(source_dir)
if (not os.path.isdir(full_path)):
logging.error("Directory given for %s expanded to %s doens't exist.", source_dir, full_path)
sys.exit(1)
dest = os.path.join(dest_files_root, target_dir)
logging.debug('Copying directory %s to %s.', full_path, target_dir)
try:
shutil.copytree(full_path, dest)
except EnvironmentError:
logging.exception("Copying tree '%s' to %s failed", full_path, dest)
sys.exit(1) | Generates a list of files that are required for au-generator zip file
Args:
dest_files_root: location of the directory where we should copy the files
allow_list: List of files that we should allow | build_library/generate_au_zip.py | CopyRequiredFiles | dongsupark/scripts | 6 | python | def CopyRequiredFiles(dest_files_root, allow_list):
'Generates a list of files that are required for au-generator zip file\n Args:\n dest_files_root: location of the directory where we should copy the files\n allow_list: List of files that we should allow\n '
if (not dest_files_root):
logging.error('Invalid option passed for dest_files_root')
sys.exit(1)
all_files = (DYNAMIC_EXECUTABLES + STATIC_FILES)
all_files = list(map(os.path.expanduser, all_files))
for file_name in all_files:
if (not os.path.isfile(file_name)):
logging.error('file = %s does not exist', file_name)
sys.exit(1)
logging.debug(('Given files that need to be copied = %s' % .join(all_files)))
for file_name in all_files:
logging.debug('Copying file %s to %s', file_name, dest_files_root)
try:
shutil.copy2(file_name, dest_files_root)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, dest_files_root)
sys.exit(1)
libraries = DepsToCopy(ldd_files=DYNAMIC_EXECUTABLES, allow_list=allow_list)
lib_dir = os.path.join(dest_files_root, LIB_DIR)
os.mkdir(lib_dir)
for file_name in libraries:
logging.debug('Copying file %s to %s', file_name, lib_dir)
try:
shutil.copy2(file_name, lib_dir)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, lib_dir)
sys.exit(1)
for (source_dir, target_dir) in RECURSE_DIRS.items():
logging.debug('Processing directory %s', source_dir)
full_path = os.path.expanduser(source_dir)
if (not os.path.isdir(full_path)):
logging.error("Directory given for %s expanded to %s doens't exist.", source_dir, full_path)
sys.exit(1)
dest = os.path.join(dest_files_root, target_dir)
logging.debug('Copying directory %s to %s.', full_path, target_dir)
try:
shutil.copytree(full_path, dest)
except EnvironmentError:
logging.exception("Copying tree '%s' to %s failed", full_path, dest)
sys.exit(1) | def CopyRequiredFiles(dest_files_root, allow_list):
'Generates a list of files that are required for au-generator zip file\n Args:\n dest_files_root: location of the directory where we should copy the files\n allow_list: List of files that we should allow\n '
if (not dest_files_root):
logging.error('Invalid option passed for dest_files_root')
sys.exit(1)
all_files = (DYNAMIC_EXECUTABLES + STATIC_FILES)
all_files = list(map(os.path.expanduser, all_files))
for file_name in all_files:
if (not os.path.isfile(file_name)):
logging.error('file = %s does not exist', file_name)
sys.exit(1)
logging.debug(('Given files that need to be copied = %s' % .join(all_files)))
for file_name in all_files:
logging.debug('Copying file %s to %s', file_name, dest_files_root)
try:
shutil.copy2(file_name, dest_files_root)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, dest_files_root)
sys.exit(1)
libraries = DepsToCopy(ldd_files=DYNAMIC_EXECUTABLES, allow_list=allow_list)
lib_dir = os.path.join(dest_files_root, LIB_DIR)
os.mkdir(lib_dir)
for file_name in libraries:
logging.debug('Copying file %s to %s', file_name, lib_dir)
try:
shutil.copy2(file_name, lib_dir)
except EnvironmentError:
logging.exception("Copying '%s' to %s failed", file_name, lib_dir)
sys.exit(1)
for (source_dir, target_dir) in RECURSE_DIRS.items():
logging.debug('Processing directory %s', source_dir)
full_path = os.path.expanduser(source_dir)
if (not os.path.isdir(full_path)):
logging.error("Directory given for %s expanded to %s doens't exist.", source_dir, full_path)
sys.exit(1)
dest = os.path.join(dest_files_root, target_dir)
logging.debug('Copying directory %s to %s.', full_path, target_dir)
try:
shutil.copytree(full_path, dest)
except EnvironmentError:
logging.exception("Copying tree '%s' to %s failed", full_path, dest)
sys.exit(1)<|docstring|>Generates a list of files that are required for au-generator zip file
Args:
dest_files_root: location of the directory where we should copy the files
allow_list: List of files that we should allow<|endoftext|> |
e71f3e600219a028549b0cbf4c4711cb5ba473ba02fd34c0afa329488f955c56 | def WrapExecutableFiles(dest_files_root, ld_linux):
'Our dynamically linked executalbes have to be invoked use the library\n versions they were linked with inside the chroot (from libc on), as well\n as the dynamic linker they were built with inside the chroot.\n\n So, this code moves the execs to backup names, and then creates a shell\n script wrapper which invokes them in the proper way.\n '
for src_exec in DYNAMIC_EXECUTABLES:
base_exec = os.path.basename(src_exec)
local_exec = os.path.join(dest_files_root, base_exec)
local_exec_wrapped = (local_exec + '.bin')
shutil.move(local_exec, local_exec_wrapped)
fd = os.open(local_exec, (os.O_WRONLY | os.O_CREAT), 475)
with os.fdopen(fd, 'w') as script:
script.write('#!/bin/sh\n')
script.write('# Auto-generated wrapper script\n')
script.write('thisdir="$(dirname "$0")"\n')
script.write('LD_LIBRARY_PATH=\n')
script.write(('exec "$thisdir/%s/%s" --library-path "$thisdir/%s" "$thisdir/%s.bin" "$@"\n' % (LIB_DIR, ld_linux, LIB_DIR, base_exec))) | Our dynamically linked executalbes have to be invoked use the library
versions they were linked with inside the chroot (from libc on), as well
as the dynamic linker they were built with inside the chroot.
So, this code moves the execs to backup names, and then creates a shell
script wrapper which invokes them in the proper way. | build_library/generate_au_zip.py | WrapExecutableFiles | dongsupark/scripts | 6 | python | def WrapExecutableFiles(dest_files_root, ld_linux):
'Our dynamically linked executalbes have to be invoked use the library\n versions they were linked with inside the chroot (from libc on), as well\n as the dynamic linker they were built with inside the chroot.\n\n So, this code moves the execs to backup names, and then creates a shell\n script wrapper which invokes them in the proper way.\n '
for src_exec in DYNAMIC_EXECUTABLES:
base_exec = os.path.basename(src_exec)
local_exec = os.path.join(dest_files_root, base_exec)
local_exec_wrapped = (local_exec + '.bin')
shutil.move(local_exec, local_exec_wrapped)
fd = os.open(local_exec, (os.O_WRONLY | os.O_CREAT), 475)
with os.fdopen(fd, 'w') as script:
script.write('#!/bin/sh\n')
script.write('# Auto-generated wrapper script\n')
script.write('thisdir="$(dirname "$0")"\n')
script.write('LD_LIBRARY_PATH=\n')
script.write(('exec "$thisdir/%s/%s" --library-path "$thisdir/%s" "$thisdir/%s.bin" "$@"\n' % (LIB_DIR, ld_linux, LIB_DIR, base_exec))) | def WrapExecutableFiles(dest_files_root, ld_linux):
'Our dynamically linked executalbes have to be invoked use the library\n versions they were linked with inside the chroot (from libc on), as well\n as the dynamic linker they were built with inside the chroot.\n\n So, this code moves the execs to backup names, and then creates a shell\n script wrapper which invokes them in the proper way.\n '
for src_exec in DYNAMIC_EXECUTABLES:
base_exec = os.path.basename(src_exec)
local_exec = os.path.join(dest_files_root, base_exec)
local_exec_wrapped = (local_exec + '.bin')
shutil.move(local_exec, local_exec_wrapped)
fd = os.open(local_exec, (os.O_WRONLY | os.O_CREAT), 475)
with os.fdopen(fd, 'w') as script:
script.write('#!/bin/sh\n')
script.write('# Auto-generated wrapper script\n')
script.write('thisdir="$(dirname "$0")"\n')
script.write('LD_LIBRARY_PATH=\n')
script.write(('exec "$thisdir/%s/%s" --library-path "$thisdir/%s" "$thisdir/%s.bin" "$@"\n' % (LIB_DIR, ld_linux, LIB_DIR, base_exec)))<|docstring|>Our dynamically linked executalbes have to be invoked use the library
versions they were linked with inside the chroot (from libc on), as well
as the dynamic linker they were built with inside the chroot.
So, this code moves the execs to backup names, and then creates a shell
script wrapper which invokes them in the proper way.<|endoftext|> |
bde3539e1b6e25f9570f8925c738e1872ee6f1e05bb464bedf110d5d9116da22 | def CleanUp(temp_dir):
'Cleans up the tempdir\n Args:\n temp_dir = name of the directory to cleanup\n '
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
logging.debug('Removed tempdir = %s', temp_dir) | Cleans up the tempdir
Args:
temp_dir = name of the directory to cleanup | build_library/generate_au_zip.py | CleanUp | dongsupark/scripts | 6 | python | def CleanUp(temp_dir):
'Cleans up the tempdir\n Args:\n temp_dir = name of the directory to cleanup\n '
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
logging.debug('Removed tempdir = %s', temp_dir) | def CleanUp(temp_dir):
'Cleans up the tempdir\n Args:\n temp_dir = name of the directory to cleanup\n '
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
logging.debug('Removed tempdir = %s', temp_dir)<|docstring|>Cleans up the tempdir
Args:
temp_dir = name of the directory to cleanup<|endoftext|> |
01aa8d406b547421ae4a9dc5656d12667f7cf604ad97186ca5dd096fd1635a7d | def GenerateZipFile(base_name, root_dir):
'Returns true if able to generate zip file\n Args:\n base_name: name of the zip file\n root_dir: location of the directory that we should zip\n Returns:\n True if successfully generates the zip file otherwise False\n '
logging.debug('Generating zip file %s with contents from %s', base_name, root_dir)
current_dir = os.getcwd()
os.chdir(root_dir)
try:
subprocess.Popen(['zip', '-r', '-9', base_name, '.'], stdout=subprocess.PIPE).communicate()[0]
except OSError as e:
logging.error('Execution failed:%s', e.strerror)
return False
finally:
os.chdir(current_dir)
return True | Returns true if able to generate zip file
Args:
base_name: name of the zip file
root_dir: location of the directory that we should zip
Returns:
True if successfully generates the zip file otherwise False | build_library/generate_au_zip.py | GenerateZipFile | dongsupark/scripts | 6 | python | def GenerateZipFile(base_name, root_dir):
'Returns true if able to generate zip file\n Args:\n base_name: name of the zip file\n root_dir: location of the directory that we should zip\n Returns:\n True if successfully generates the zip file otherwise False\n '
logging.debug('Generating zip file %s with contents from %s', base_name, root_dir)
current_dir = os.getcwd()
os.chdir(root_dir)
try:
subprocess.Popen(['zip', '-r', '-9', base_name, '.'], stdout=subprocess.PIPE).communicate()[0]
except OSError as e:
logging.error('Execution failed:%s', e.strerror)
return False
finally:
os.chdir(current_dir)
return True | def GenerateZipFile(base_name, root_dir):
'Returns true if able to generate zip file\n Args:\n base_name: name of the zip file\n root_dir: location of the directory that we should zip\n Returns:\n True if successfully generates the zip file otherwise False\n '
logging.debug('Generating zip file %s with contents from %s', base_name, root_dir)
current_dir = os.getcwd()
os.chdir(root_dir)
try:
subprocess.Popen(['zip', '-r', '-9', base_name, '.'], stdout=subprocess.PIPE).communicate()[0]
except OSError as e:
logging.error('Execution failed:%s', e.strerror)
return False
finally:
os.chdir(current_dir)
return True<|docstring|>Returns true if able to generate zip file
Args:
base_name: name of the zip file
root_dir: location of the directory that we should zip
Returns:
True if successfully generates the zip file otherwise False<|endoftext|> |
4c6fbbe4cac0e3eff0981f2aab8ecf758880a5843a5242e4168f48dad1eb1e9a | def _ExcludeDenylist(library_list, deny_list=[]):
'Deletes the set of files from deny_list from the library_list\n Args:\n library_list: List of the library names to filter through deny_list\n deny_list: List of the deny listed names to filter\n Returns:\n Filtered library_list\n '
if (not deny_list):
return library_list
return_list = []
pattern = re.compile('|'.join(deny_list))
logging.debug('PATTERN: %s=', pattern)
for library in library_list:
if pattern.search(library):
logging.debug('DENY-LISTED = %s=', library)
continue
return_list.append(library)
logging.debug('Returning return_list=%s=', return_list)
return return_list | Deletes the set of files from deny_list from the library_list
Args:
library_list: List of the library names to filter through deny_list
deny_list: List of the deny listed names to filter
Returns:
Filtered library_list | build_library/generate_au_zip.py | _ExcludeDenylist | dongsupark/scripts | 6 | python | def _ExcludeDenylist(library_list, deny_list=[]):
'Deletes the set of files from deny_list from the library_list\n Args:\n library_list: List of the library names to filter through deny_list\n deny_list: List of the deny listed names to filter\n Returns:\n Filtered library_list\n '
if (not deny_list):
return library_list
return_list = []
pattern = re.compile('|'.join(deny_list))
logging.debug('PATTERN: %s=', pattern)
for library in library_list:
if pattern.search(library):
logging.debug('DENY-LISTED = %s=', library)
continue
return_list.append(library)
logging.debug('Returning return_list=%s=', return_list)
return return_list | def _ExcludeDenylist(library_list, deny_list=[]):
'Deletes the set of files from deny_list from the library_list\n Args:\n library_list: List of the library names to filter through deny_list\n deny_list: List of the deny listed names to filter\n Returns:\n Filtered library_list\n '
if (not deny_list):
return library_list
return_list = []
pattern = re.compile('|'.join(deny_list))
logging.debug('PATTERN: %s=', pattern)
for library in library_list:
if pattern.search(library):
logging.debug('DENY-LISTED = %s=', library)
continue
return_list.append(library)
logging.debug('Returning return_list=%s=', return_list)
return return_list<|docstring|>Deletes the set of files from deny_list from the library_list
Args:
library_list: List of the library names to filter through deny_list
deny_list: List of the deny listed names to filter
Returns:
Filtered library_list<|endoftext|> |
8064db96581957148845f892f270e2d1be8b2600fc4b4d495af85a4fbfba51cc | def _EnforceAllowList(library_list, allow_list=[]):
'Ensures that library_list contains all the items from allow_list\n Args:\n library_list: List of the library names to check\n allow_list: List of the items that ought to be in the library_list\n '
for allow_item in allow_list:
pattern = re.compile(allow_item)
logging.debug('PATTERN: %s=', pattern)
found = False
for library in library_list:
if pattern.search(library):
found = True
break
if (not found):
logging.error(('Required ALLOW_LIST items %s not found!!!' % allow_item))
exit(1) | Ensures that library_list contains all the items from allow_list
Args:
library_list: List of the library names to check
allow_list: List of the items that ought to be in the library_list | build_library/generate_au_zip.py | _EnforceAllowList | dongsupark/scripts | 6 | python | def _EnforceAllowList(library_list, allow_list=[]):
'Ensures that library_list contains all the items from allow_list\n Args:\n library_list: List of the library names to check\n allow_list: List of the items that ought to be in the library_list\n '
for allow_item in allow_list:
pattern = re.compile(allow_item)
logging.debug('PATTERN: %s=', pattern)
found = False
for library in library_list:
if pattern.search(library):
found = True
break
if (not found):
logging.error(('Required ALLOW_LIST items %s not found!!!' % allow_item))
exit(1) | def _EnforceAllowList(library_list, allow_list=[]):
'Ensures that library_list contains all the items from allow_list\n Args:\n library_list: List of the library names to check\n allow_list: List of the items that ought to be in the library_list\n '
for allow_item in allow_list:
pattern = re.compile(allow_item)
logging.debug('PATTERN: %s=', pattern)
found = False
for library in library_list:
if pattern.search(library):
found = True
break
if (not found):
logging.error(('Required ALLOW_LIST items %s not found!!!' % allow_item))
exit(1)<|docstring|>Ensures that library_list contains all the items from allow_list
Args:
library_list: List of the library names to check
allow_list: List of the items that ought to be in the library_list<|endoftext|> |
078a25e9f817d9a80aced399949718845b17d838c1878e4b9e636eafd3197653 | def CopyZipToFinalDestination(output_dir, zip_file_name):
'Copies the generated zip file to a final destination\n Args:\n output_dir: Directory where the file should be copied to\n zip_file_name: name of the zip file that should be copied\n Returns:\n True on Success False on Failure\n '
if (not os.path.isfile(zip_file_name)):
logging.error("Zip file %s doesn't exist. Returning False", zip_file_name)
return False
if (not os.path.isdir(output_dir)):
logging.debug('Creating %s', output_dir)
os.makedirs(output_dir)
logging.debug('Copying %s to %s', zip_file_name, output_dir)
shutil.copy2(zip_file_name, output_dir)
return True | Copies the generated zip file to a final destination
Args:
output_dir: Directory where the file should be copied to
zip_file_name: name of the zip file that should be copied
Returns:
True on Success False on Failure | build_library/generate_au_zip.py | CopyZipToFinalDestination | dongsupark/scripts | 6 | python | def CopyZipToFinalDestination(output_dir, zip_file_name):
'Copies the generated zip file to a final destination\n Args:\n output_dir: Directory where the file should be copied to\n zip_file_name: name of the zip file that should be copied\n Returns:\n True on Success False on Failure\n '
if (not os.path.isfile(zip_file_name)):
logging.error("Zip file %s doesn't exist. Returning False", zip_file_name)
return False
if (not os.path.isdir(output_dir)):
logging.debug('Creating %s', output_dir)
os.makedirs(output_dir)
logging.debug('Copying %s to %s', zip_file_name, output_dir)
shutil.copy2(zip_file_name, output_dir)
return True | def CopyZipToFinalDestination(output_dir, zip_file_name):
'Copies the generated zip file to a final destination\n Args:\n output_dir: Directory where the file should be copied to\n zip_file_name: name of the zip file that should be copied\n Returns:\n True on Success False on Failure\n '
if (not os.path.isfile(zip_file_name)):
logging.error("Zip file %s doesn't exist. Returning False", zip_file_name)
return False
if (not os.path.isdir(output_dir)):
logging.debug('Creating %s', output_dir)
os.makedirs(output_dir)
logging.debug('Copying %s to %s', zip_file_name, output_dir)
shutil.copy2(zip_file_name, output_dir)
return True<|docstring|>Copies the generated zip file to a final destination
Args:
output_dir: Directory where the file should be copied to
zip_file_name: name of the zip file that should be copied
Returns:
True on Success False on Failure<|endoftext|> |
25355aec21c8b6d9ef1a583ae2528236738c64d1d1591901ddf6a7bebfb9eb83 | def main():
'Main function to start the script'
parser = optparse.OptionParser()
parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help='Verbose Default: False')
parser.add_option('-o', '--output-dir', dest='output_dir', default='/tmp/au-generator', help='Specify the output location for copying the zipfile')
parser.add_option('-z', '--zip-name', dest='zip_name', default='au-generator.zip', help='Name of the zip file')
parser.add_option('-k', '--keep-temp', dest='keep_temp', default=False, action='store_true', help='Keep the temp files...')
parser.add_option('-a', '--arch', dest='arch', default='amd64', help='Arch amd64/arm64. Default: amd64')
(options, args) = parser.parse_args()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('Options are %s ', options)
temp_dir = CreateTempDir()
dest_files_root = os.path.join(temp_dir, 'au-generator')
os.makedirs(dest_files_root)
if (options.arch == 'arm64'):
ld_linux = LD_LINUX_ARM64
allow_list = ALLOW_LIST_ARM64
else:
ld_linux = LD_LINUX_AMD64
allow_list = ALLOW_LIST_AMD64
CopyRequiredFiles(dest_files_root=dest_files_root, allow_list=allow_list)
WrapExecutableFiles(dest_files_root=dest_files_root, ld_linux=ld_linux)
zip_file_name = os.path.join(temp_dir, options.zip_name)
GenerateZipFile(zip_file_name, dest_files_root)
CopyZipToFinalDestination(options.output_dir, zip_file_name)
logging.info(('Generated %s/%s' % (options.output_dir, options.zip_name)))
if (not options.keep_temp):
CleanUp(temp_dir) | Main function to start the script | build_library/generate_au_zip.py | main | dongsupark/scripts | 6 | python | def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help='Verbose Default: False')
parser.add_option('-o', '--output-dir', dest='output_dir', default='/tmp/au-generator', help='Specify the output location for copying the zipfile')
parser.add_option('-z', '--zip-name', dest='zip_name', default='au-generator.zip', help='Name of the zip file')
parser.add_option('-k', '--keep-temp', dest='keep_temp', default=False, action='store_true', help='Keep the temp files...')
parser.add_option('-a', '--arch', dest='arch', default='amd64', help='Arch amd64/arm64. Default: amd64')
(options, args) = parser.parse_args()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('Options are %s ', options)
temp_dir = CreateTempDir()
dest_files_root = os.path.join(temp_dir, 'au-generator')
os.makedirs(dest_files_root)
if (options.arch == 'arm64'):
ld_linux = LD_LINUX_ARM64
allow_list = ALLOW_LIST_ARM64
else:
ld_linux = LD_LINUX_AMD64
allow_list = ALLOW_LIST_AMD64
CopyRequiredFiles(dest_files_root=dest_files_root, allow_list=allow_list)
WrapExecutableFiles(dest_files_root=dest_files_root, ld_linux=ld_linux)
zip_file_name = os.path.join(temp_dir, options.zip_name)
GenerateZipFile(zip_file_name, dest_files_root)
CopyZipToFinalDestination(options.output_dir, zip_file_name)
logging.info(('Generated %s/%s' % (options.output_dir, options.zip_name)))
if (not options.keep_temp):
CleanUp(temp_dir) | def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--debug', dest='debug', action='store_true', default=False, help='Verbose Default: False')
parser.add_option('-o', '--output-dir', dest='output_dir', default='/tmp/au-generator', help='Specify the output location for copying the zipfile')
parser.add_option('-z', '--zip-name', dest='zip_name', default='au-generator.zip', help='Name of the zip file')
parser.add_option('-k', '--keep-temp', dest='keep_temp', default=False, action='store_true', help='Keep the temp files...')
parser.add_option('-a', '--arch', dest='arch', default='amd64', help='Arch amd64/arm64. Default: amd64')
(options, args) = parser.parse_args()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug('Options are %s ', options)
temp_dir = CreateTempDir()
dest_files_root = os.path.join(temp_dir, 'au-generator')
os.makedirs(dest_files_root)
if (options.arch == 'arm64'):
ld_linux = LD_LINUX_ARM64
allow_list = ALLOW_LIST_ARM64
else:
ld_linux = LD_LINUX_AMD64
allow_list = ALLOW_LIST_AMD64
CopyRequiredFiles(dest_files_root=dest_files_root, allow_list=allow_list)
WrapExecutableFiles(dest_files_root=dest_files_root, ld_linux=ld_linux)
zip_file_name = os.path.join(temp_dir, options.zip_name)
GenerateZipFile(zip_file_name, dest_files_root)
CopyZipToFinalDestination(options.output_dir, zip_file_name)
logging.info(('Generated %s/%s' % (options.output_dir, options.zip_name)))
if (not options.keep_temp):
CleanUp(temp_dir)<|docstring|>Main function to start the script<|endoftext|> |
121d6fd32ab11e4bd1ed85ad6e991679c829ce4df300cf4a4914f6ae19439fc9 | def enable_tty_echo(tty=None):
'\n Re-enables proper console behavior, primarily for when a reload is\n triggered at a PDB prompt.\n\n TODO: context manager for ignoring signals\n '
if (tty is None):
tty = sys.stdin
if (not tty.isatty()):
return
try:
import termios
except ImportError:
return
attr_list = termios.tcgetattr(tty)
attr_list[3] |= termios.ECHO
try:
orig_handler = signal.getsignal(signal.SIGTTOU)
except AttributeError:
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
else:
try:
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
finally:
signal.signal(signal.SIGTTOU, orig_handler)
return | Re-enables proper console behavior, primarily for when a reload is
triggered at a PDB prompt.
TODO: context manager for ignoring signals | clastic/server.py | enable_tty_echo | mahmoud/clastic | 140 | python | def enable_tty_echo(tty=None):
'\n Re-enables proper console behavior, primarily for when a reload is\n triggered at a PDB prompt.\n\n TODO: context manager for ignoring signals\n '
if (tty is None):
tty = sys.stdin
if (not tty.isatty()):
return
try:
import termios
except ImportError:
return
attr_list = termios.tcgetattr(tty)
attr_list[3] |= termios.ECHO
try:
orig_handler = signal.getsignal(signal.SIGTTOU)
except AttributeError:
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
else:
try:
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
finally:
signal.signal(signal.SIGTTOU, orig_handler)
return | def enable_tty_echo(tty=None):
'\n Re-enables proper console behavior, primarily for when a reload is\n triggered at a PDB prompt.\n\n TODO: context manager for ignoring signals\n '
if (tty is None):
tty = sys.stdin
if (not tty.isatty()):
return
try:
import termios
except ImportError:
return
attr_list = termios.tcgetattr(tty)
attr_list[3] |= termios.ECHO
try:
orig_handler = signal.getsignal(signal.SIGTTOU)
except AttributeError:
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
else:
try:
signal.signal(signal.SIGTTOU, signal.SIG_IGN)
termios.tcsetattr(tty, termios.TCSANOW, attr_list)
finally:
signal.signal(signal.SIGTTOU, orig_handler)
return<|docstring|>Re-enables proper console behavior, primarily for when a reload is
triggered at a PDB prompt.
TODO: context manager for ignoring signals<|endoftext|> |
0717c8f73ca57e97c2262537f9baf04b0c1c067d39c47705c3ca351ebc64532f | def create_transport(self, channel):
'\n Creates transport for channel based on app.config.TRANSPORT\n :param channel: channel object\n :param port: port for connection\n :return:\n '
if (self.transport == MQTT):
return self._create_mqtt_transport(channel, self.port)
elif (self.transport == HTTP):
return self._create_http_transport(channel, self.port)
elif (self.transport == COAP):
return self._create_coap_transport(channel, self.port)
else:
raise NotSupportedTransportError(f"Transport '{self.transport}' is not supported") | Creates transport for channel based on app.config.TRANSPORT
:param channel: channel object
:param port: port for connection
:return: | mainflux/transport.py | create_transport | molodoj88/mainfluxpy | 1 | python | def create_transport(self, channel):
'\n Creates transport for channel based on app.config.TRANSPORT\n :param channel: channel object\n :param port: port for connection\n :return:\n '
if (self.transport == MQTT):
return self._create_mqtt_transport(channel, self.port)
elif (self.transport == HTTP):
return self._create_http_transport(channel, self.port)
elif (self.transport == COAP):
return self._create_coap_transport(channel, self.port)
else:
raise NotSupportedTransportError(f"Transport '{self.transport}' is not supported") | def create_transport(self, channel):
'\n Creates transport for channel based on app.config.TRANSPORT\n :param channel: channel object\n :param port: port for connection\n :return:\n '
if (self.transport == MQTT):
return self._create_mqtt_transport(channel, self.port)
elif (self.transport == HTTP):
return self._create_http_transport(channel, self.port)
elif (self.transport == COAP):
return self._create_coap_transport(channel, self.port)
else:
raise NotSupportedTransportError(f"Transport '{self.transport}' is not supported")<|docstring|>Creates transport for channel based on app.config.TRANSPORT
:param channel: channel object
:param port: port for connection
:return:<|endoftext|> |
51dd037a7c09ddc6b3e2da4ac7fee21055e5b401643902220c4e0f109ad638fb | async def send_message(self, message):
'\n Base method for sending message\n :param message: message object\n :return:\n '
raise NotImplementedError | Base method for sending message
:param message: message object
:return: | mainflux/transport.py | send_message | molodoj88/mainfluxpy | 1 | python | async def send_message(self, message):
'\n Base method for sending message\n :param message: message object\n :return:\n '
raise NotImplementedError | async def send_message(self, message):
'\n Base method for sending message\n :param message: message object\n :return:\n '
raise NotImplementedError<|docstring|>Base method for sending message
:param message: message object
:return:<|endoftext|> |
fd10aa5793feb47fa2f5c905a67c6fce33505f1fe1895b4077b5c35050129ca2 | async def send_message(self, message: Message):
'\n Sends message to a topic\n :param message: message.Message instance\n :return:\n '
while (not self.connected):
(await asyncio.sleep(0.01))
(await self.message_queue.put(message)) | Sends message to a topic
:param message: message.Message instance
:return: | mainflux/transport.py | send_message | molodoj88/mainfluxpy | 1 | python | async def send_message(self, message: Message):
'\n Sends message to a topic\n :param message: message.Message instance\n :return:\n '
while (not self.connected):
(await asyncio.sleep(0.01))
(await self.message_queue.put(message)) | async def send_message(self, message: Message):
'\n Sends message to a topic\n :param message: message.Message instance\n :return:\n '
while (not self.connected):
(await asyncio.sleep(0.01))
(await self.message_queue.put(message))<|docstring|>Sends message to a topic
:param message: message.Message instance
:return:<|endoftext|> |
d7565b621f687bc7d8b832e1d5eba6b6fd00d569c02935902dae9738b5afb675 | async def connect(self):
'\n Connects to a broker\n :return:\n '
while (not self.connected):
(await self._connect())
print(('Connected to mqtt broker on address %s' % self.address))
self.connected = True | Connects to a broker
:return: | mainflux/transport.py | connect | molodoj88/mainfluxpy | 1 | python | async def connect(self):
'\n Connects to a broker\n :return:\n '
while (not self.connected):
(await self._connect())
print(('Connected to mqtt broker on address %s' % self.address))
self.connected = True | async def connect(self):
'\n Connects to a broker\n :return:\n '
while (not self.connected):
(await self._connect())
print(('Connected to mqtt broker on address %s' % self.address))
self.connected = True<|docstring|>Connects to a broker
:return:<|endoftext|> |
87bff07cb027eaab79c7aee5825f6e5b96bade29b0e4f9bf4add4c022a5d42f8 | async def _subscribe(self, topic: str):
'\n Subscribes to provided topic and waits for messages async.\n If callback function were provided\n :param topic: topic\n :return:\n '
try:
topics = [(topic, QOS_0)]
(await self.mqtt_client.subscribe(topics))
while True:
if (not self.connected):
break
message = (await self.mqtt_client.deliver_message())
self.message_received_callback(message)
except ConnectException as ce:
print(('Connection failed: %s' % ce)) | Subscribes to provided topic and waits for messages async.
If callback function were provided
:param topic: topic
:return: | mainflux/transport.py | _subscribe | molodoj88/mainfluxpy | 1 | python | async def _subscribe(self, topic: str):
'\n Subscribes to provided topic and waits for messages async.\n If callback function were provided\n :param topic: topic\n :return:\n '
try:
topics = [(topic, QOS_0)]
(await self.mqtt_client.subscribe(topics))
while True:
if (not self.connected):
break
message = (await self.mqtt_client.deliver_message())
self.message_received_callback(message)
except ConnectException as ce:
print(('Connection failed: %s' % ce)) | async def _subscribe(self, topic: str):
'\n Subscribes to provided topic and waits for messages async.\n If callback function were provided\n :param topic: topic\n :return:\n '
try:
topics = [(topic, QOS_0)]
(await self.mqtt_client.subscribe(topics))
while True:
if (not self.connected):
break
message = (await self.mqtt_client.deliver_message())
self.message_received_callback(message)
except ConnectException as ce:
print(('Connection failed: %s' % ce))<|docstring|>Subscribes to provided topic and waits for messages async.
If callback function were provided
:param topic: topic
:return:<|endoftext|> |
48cd7776fcb1f6350a4045657837645eb15c07db5cf6260174d10bbec1648849 | async def subscribe(self, topic: str, message_received_cb: Callable):
'\n Subscribes to sub channel over mqtt and\n sets callback for received message\n :param topic: topic to subscribe\n :param message_received_cb: callback for received message (first argument should be message: str)\n :return:\n '
if (message_received_cb is not None):
self._message_received_cb = message_received_cb
while (not self.connected):
(await asyncio.sleep(0.01))
(await self._subscribe(topic)) | Subscribes to sub channel over mqtt and
sets callback for received message
:param topic: topic to subscribe
:param message_received_cb: callback for received message (first argument should be message: str)
:return: | mainflux/transport.py | subscribe | molodoj88/mainfluxpy | 1 | python | async def subscribe(self, topic: str, message_received_cb: Callable):
'\n Subscribes to sub channel over mqtt and\n sets callback for received message\n :param topic: topic to subscribe\n :param message_received_cb: callback for received message (first argument should be message: str)\n :return:\n '
if (message_received_cb is not None):
self._message_received_cb = message_received_cb
while (not self.connected):
(await asyncio.sleep(0.01))
(await self._subscribe(topic)) | async def subscribe(self, topic: str, message_received_cb: Callable):
'\n Subscribes to sub channel over mqtt and\n sets callback for received message\n :param topic: topic to subscribe\n :param message_received_cb: callback for received message (first argument should be message: str)\n :return:\n '
if (message_received_cb is not None):
self._message_received_cb = message_received_cb
while (not self.connected):
(await asyncio.sleep(0.01))
(await self._subscribe(topic))<|docstring|>Subscribes to sub channel over mqtt and
sets callback for received message
:param topic: topic to subscribe
:param message_received_cb: callback for received message (first argument should be message: str)
:return:<|endoftext|> |
987ad74c40b48b4e679dab09b4b9620909ac0233527a3f4370d52f7310940c35 | def get_rest_api_instance_to_use():
'\n This function checks if there are more than one instance of demisto rest api.\n\n Returns:\n Demisto Rest Api instance to use\n '
all_instances = demisto.getModules()
number_of_rest_api_instances = 0
rest_api_instance_to_use = None
for instance_name in all_instances:
if ((all_instances[instance_name]['brand'] == BRAND) and (all_instances[instance_name]['state'] == 'active')):
rest_api_instance_to_use = instance_name
number_of_rest_api_instances += 1
if (number_of_rest_api_instances > 1):
return_error("GetFailedTasks: This script can only run with a single instance of the Demisto REST API. Specify the instance name in the 'rest_api_instance' argument.")
return rest_api_instance_to_use | This function checks if there are more than one instance of demisto rest api.
Returns:
Demisto Rest Api instance to use | Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/GetFailedTasks.py | get_rest_api_instance_to_use | nagesh-coditas/content | 1 | python | def get_rest_api_instance_to_use():
'\n This function checks if there are more than one instance of demisto rest api.\n\n Returns:\n Demisto Rest Api instance to use\n '
all_instances = demisto.getModules()
number_of_rest_api_instances = 0
rest_api_instance_to_use = None
for instance_name in all_instances:
if ((all_instances[instance_name]['brand'] == BRAND) and (all_instances[instance_name]['state'] == 'active')):
rest_api_instance_to_use = instance_name
number_of_rest_api_instances += 1
if (number_of_rest_api_instances > 1):
return_error("GetFailedTasks: This script can only run with a single instance of the Demisto REST API. Specify the instance name in the 'rest_api_instance' argument.")
return rest_api_instance_to_use | def get_rest_api_instance_to_use():
'\n This function checks if there are more than one instance of demisto rest api.\n\n Returns:\n Demisto Rest Api instance to use\n '
all_instances = demisto.getModules()
number_of_rest_api_instances = 0
rest_api_instance_to_use = None
for instance_name in all_instances:
if ((all_instances[instance_name]['brand'] == BRAND) and (all_instances[instance_name]['state'] == 'active')):
rest_api_instance_to_use = instance_name
number_of_rest_api_instances += 1
if (number_of_rest_api_instances > 1):
return_error("GetFailedTasks: This script can only run with a single instance of the Demisto REST API. Specify the instance name in the 'rest_api_instance' argument.")
return rest_api_instance_to_use<|docstring|>This function checks if there are more than one instance of demisto rest api.
Returns:
Demisto Rest Api instance to use<|endoftext|> |
cc03943450e916b86cab84c10f4a2fbe7b1c185c270e98b073e516f5bc81d1ad | def get_tenant_name():
'\n Gets the tenant name from the server url.\n :return: tenant name.\n :rtype: ``str``\n '
server_url = demisto.executeCommand('GetServerURL', {})[0].get('Contents')
tenant_name = ''
if ('/acc_' in server_url):
tenant_name = server_url.split('acc_')[(- 1)]
return tenant_name | Gets the tenant name from the server url.
:return: tenant name.
:rtype: ``str`` | Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/GetFailedTasks.py | get_tenant_name | nagesh-coditas/content | 1 | python | def get_tenant_name():
'\n Gets the tenant name from the server url.\n :return: tenant name.\n :rtype: ``str``\n '
server_url = demisto.executeCommand('GetServerURL', {})[0].get('Contents')
tenant_name =
if ('/acc_' in server_url):
tenant_name = server_url.split('acc_')[(- 1)]
return tenant_name | def get_tenant_name():
'\n Gets the tenant name from the server url.\n :return: tenant name.\n :rtype: ``str``\n '
server_url = demisto.executeCommand('GetServerURL', {})[0].get('Contents')
tenant_name =
if ('/acc_' in server_url):
tenant_name = server_url.split('acc_')[(- 1)]
return tenant_name<|docstring|>Gets the tenant name from the server url.
:return: tenant name.
:rtype: ``str``<|endoftext|> |
fc0c7835d9ea435fb6f947fe38e4cf87ac463674ccc577a7f0a9b2d131f1f2f0 | def el(tag_type, *content):
'\n Returns a list of strings that represents an HTML element.\n\n If the first argument passed to *content is a dict, then the dict\n is unpacked into attribute pairs for the element.\n\n >>> el(\'div\', {\'class\' : \'navbar\'}, "This is my Navbar!")\n [\'<div class="navbar">\', \'This is my Navbar!\', \'</div>\']\n\n '
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
close_tag = ('</%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
result.append(close_tag)
return result | Returns a list of strings that represents an HTML element.
If the first argument passed to *content is a dict, then the dict
is unpacked into attribute pairs for the element.
>>> el('div', {'class' : 'navbar'}, "This is my Navbar!")
['<div class="navbar">', 'This is my Navbar!', '</div>'] | htmlfun/core.py | el | nslamberth/htmlfun | 0 | python | def el(tag_type, *content):
'\n Returns a list of strings that represents an HTML element.\n\n If the first argument passed to *content is a dict, then the dict\n is unpacked into attribute pairs for the element.\n\n >>> el(\'div\', {\'class\' : \'navbar\'}, "This is my Navbar!")\n [\'<div class="navbar">\', \'This is my Navbar!\', \'</div>\']\n\n '
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
close_tag = ('</%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
result.append(close_tag)
return result | def el(tag_type, *content):
'\n Returns a list of strings that represents an HTML element.\n\n If the first argument passed to *content is a dict, then the dict\n is unpacked into attribute pairs for the element.\n\n >>> el(\'div\', {\'class\' : \'navbar\'}, "This is my Navbar!")\n [\'<div class="navbar">\', \'This is my Navbar!\', \'</div>\']\n\n '
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
close_tag = ('</%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
result.append(close_tag)
return result<|docstring|>Returns a list of strings that represents an HTML element.
If the first argument passed to *content is a dict, then the dict
is unpacked into attribute pairs for the element.
>>> el('div', {'class' : 'navbar'}, "This is my Navbar!")
['<div class="navbar">', 'This is my Navbar!', '</div>']<|endoftext|> |
f54c079e85040f33212cf2d39447e354c0efac967d0f453319ac37713dbe28ab | def void_el(tag_type, *content):
'Same as el but for void elements.'
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
return result | Same as el but for void elements. | htmlfun/core.py | void_el | nslamberth/htmlfun | 0 | python | def void_el(tag_type, *content):
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
return result | def void_el(tag_type, *content):
result = []
try:
if isinstance(content[0], dict):
(attrs_dict, content) = (content[0], content[1:])
attrs_pairs = []
for key in attrs_dict:
attrs_pairs.append(('%s="%s"' % (key, attrs_dict[key])))
attrs_string = ' '.join(attrs_pairs)
open_tag = ('<%s %s>' % (tag_type, attrs_string))
else:
open_tag = ('<%s>' % tag_type)
except IndexError:
open_tag = ('<%s>' % tag_type)
result.append(open_tag)
for item in content:
result.append(item)
return result<|docstring|>Same as el but for void elements.<|endoftext|> |
1a1ce7059f6109c902a3959704d547a90938a925b528526542fb82bedbebea49 | def flatten(list_of_elements):
'Returns a generator that flattens a nested list of html elements.'
for tag in list_of_elements:
if isinstance(tag, str):
(yield tag)
else:
for nested_tag in flatten(tag):
(yield nested_tag) | Returns a generator that flattens a nested list of html elements. | htmlfun/core.py | flatten | nslamberth/htmlfun | 0 | python | def flatten(list_of_elements):
for tag in list_of_elements:
if isinstance(tag, str):
(yield tag)
else:
for nested_tag in flatten(tag):
(yield nested_tag) | def flatten(list_of_elements):
for tag in list_of_elements:
if isinstance(tag, str):
(yield tag)
else:
for nested_tag in flatten(tag):
(yield nested_tag)<|docstring|>Returns a generator that flattens a nested list of html elements.<|endoftext|> |
5a5607b9d5a3dc9db48150dd3fae8f647177af598bcd184dd3bb4e00b23c88e7 | def build_doc(list_of_tags):
'Create final doc from list_of_tags.'
wrapped = el('html', list_of_tags)
return ''.join(list(flatten((['<!DOCTYPE HTML>'] + wrapped)))) | Create final doc from list_of_tags. | htmlfun/core.py | build_doc | nslamberth/htmlfun | 0 | python | def build_doc(list_of_tags):
wrapped = el('html', list_of_tags)
return .join(list(flatten((['<!DOCTYPE HTML>'] + wrapped)))) | def build_doc(list_of_tags):
wrapped = el('html', list_of_tags)
return .join(list(flatten((['<!DOCTYPE HTML>'] + wrapped))))<|docstring|>Create final doc from list_of_tags.<|endoftext|> |
7f8b05d2657adb8bf008456dca8a8c4a6489b6e47c59773593d89d16e55af451 | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.authorizingPrescription = None
" Medication order that authorizes the dispense.\n List of `FHIRReference` items referencing `['MedicationRequest']` (represented as `dict` in JSON). "
self.category = None
' Type of medication dispense.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
" Encounter / Episode associated with event.\n Type `FHIRReference` referencing `['Encounter', 'EpisodeOfCare']` (represented as `dict` in JSON). "
self.daysSupply = None
' Amount of medication expressed as a timing amount.\n Type `Quantity` (represented as `dict` in JSON). '
self.destination = None
" Where the medication was sent.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.detectedIssue = None
" Clinical issue with action.\n List of `FHIRReference` items referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.dosageInstruction = None
' How the medication is to be used by the patient or administered by\n the caregiver.\n List of `Dosage` items (represented as `dict` in JSON). '
self.eventHistory = None
" A list of relevant lifecycle events.\n List of `FHIRReference` items referencing `['Provenance']` (represented as `dict` in JSON). "
self.identifier = None
' External identifier.\n List of `Identifier` items (represented as `dict` in JSON). '
self.location = None
" Where the dispense occurred.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.medicationCodeableConcept = None
' What medication was supplied.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.medicationReference = None
" What medication was supplied.\n Type `FHIRReference` referencing `['Medication']` (represented as `dict` in JSON). "
self.note = None
' Information about the dispense.\n List of `Annotation` items (represented as `dict` in JSON). '
self.partOf = None
" Event that dispense is part of.\n List of `FHIRReference` items referencing `['Procedure']` (represented as `dict` in JSON). "
self.performer = None
' Who performed event.\n List of `MedicationDispensePerformer` items (represented as `dict` in JSON). '
self.quantity = None
' Amount dispensed.\n Type `Quantity` (represented as `dict` in JSON). '
self.receiver = None
" Who collected the medication.\n List of `FHIRReference` items referencing `['Patient', 'Practitioner']` (represented as `dict` in JSON). "
self.status = None
' preparation | in-progress | cancelled | on-hold | completed |\n entered-in-error | stopped | declined | unknown.\n Type `str`. '
self.statusReasonCodeableConcept = None
' Why a dispense was not performed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.statusReasonReference = None
" Why a dispense was not performed.\n Type `FHIRReference` referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.subject = None
" Who the dispense is for.\n Type `FHIRReference` referencing `['Patient', 'Group']` (represented as `dict` in JSON). "
self.substitution = None
' Whether a substitution was performed on the dispense.\n Type `MedicationDispenseSubstitution` (represented as `dict` in JSON). '
self.supportingInformation = None
" Information that supports the dispensing of the medication.\n List of `FHIRReference` items referencing `['Resource']` (represented as `dict` in JSON). "
self.type = None
' Trial fill, partial fill, emergency fill, etc..\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.whenHandedOver = None
' When product was given out.\n Type `FHIRDate` (represented as `str` in JSON). '
self.whenPrepared = None
' When product was packaged and reviewed.\n Type `FHIRDate` (represented as `str` in JSON). '
super(MedicationDispense, self).__init__(jsondict=jsondict, strict=strict) | Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError | fhir/resources/medicationdispense.py | __init__ | mmabey/fhir.resources | 0 | python | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.authorizingPrescription = None
" Medication order that authorizes the dispense.\n List of `FHIRReference` items referencing `['MedicationRequest']` (represented as `dict` in JSON). "
self.category = None
' Type of medication dispense.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
" Encounter / Episode associated with event.\n Type `FHIRReference` referencing `['Encounter', 'EpisodeOfCare']` (represented as `dict` in JSON). "
self.daysSupply = None
' Amount of medication expressed as a timing amount.\n Type `Quantity` (represented as `dict` in JSON). '
self.destination = None
" Where the medication was sent.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.detectedIssue = None
" Clinical issue with action.\n List of `FHIRReference` items referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.dosageInstruction = None
' How the medication is to be used by the patient or administered by\n the caregiver.\n List of `Dosage` items (represented as `dict` in JSON). '
self.eventHistory = None
" A list of relevant lifecycle events.\n List of `FHIRReference` items referencing `['Provenance']` (represented as `dict` in JSON). "
self.identifier = None
' External identifier.\n List of `Identifier` items (represented as `dict` in JSON). '
self.location = None
" Where the dispense occurred.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.medicationCodeableConcept = None
' What medication was supplied.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.medicationReference = None
" What medication was supplied.\n Type `FHIRReference` referencing `['Medication']` (represented as `dict` in JSON). "
self.note = None
' Information about the dispense.\n List of `Annotation` items (represented as `dict` in JSON). '
self.partOf = None
" Event that dispense is part of.\n List of `FHIRReference` items referencing `['Procedure']` (represented as `dict` in JSON). "
self.performer = None
' Who performed event.\n List of `MedicationDispensePerformer` items (represented as `dict` in JSON). '
self.quantity = None
' Amount dispensed.\n Type `Quantity` (represented as `dict` in JSON). '
self.receiver = None
" Who collected the medication.\n List of `FHIRReference` items referencing `['Patient', 'Practitioner']` (represented as `dict` in JSON). "
self.status = None
' preparation | in-progress | cancelled | on-hold | completed |\n entered-in-error | stopped | declined | unknown.\n Type `str`. '
self.statusReasonCodeableConcept = None
' Why a dispense was not performed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.statusReasonReference = None
" Why a dispense was not performed.\n Type `FHIRReference` referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.subject = None
" Who the dispense is for.\n Type `FHIRReference` referencing `['Patient', 'Group']` (represented as `dict` in JSON). "
self.substitution = None
' Whether a substitution was performed on the dispense.\n Type `MedicationDispenseSubstitution` (represented as `dict` in JSON). '
self.supportingInformation = None
" Information that supports the dispensing of the medication.\n List of `FHIRReference` items referencing `['Resource']` (represented as `dict` in JSON). "
self.type = None
' Trial fill, partial fill, emergency fill, etc..\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.whenHandedOver = None
' When product was given out.\n Type `FHIRDate` (represented as `str` in JSON). '
self.whenPrepared = None
' When product was packaged and reviewed.\n Type `FHIRDate` (represented as `str` in JSON). '
super(MedicationDispense, self).__init__(jsondict=jsondict, strict=strict) | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.authorizingPrescription = None
" Medication order that authorizes the dispense.\n List of `FHIRReference` items referencing `['MedicationRequest']` (represented as `dict` in JSON). "
self.category = None
' Type of medication dispense.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
" Encounter / Episode associated with event.\n Type `FHIRReference` referencing `['Encounter', 'EpisodeOfCare']` (represented as `dict` in JSON). "
self.daysSupply = None
' Amount of medication expressed as a timing amount.\n Type `Quantity` (represented as `dict` in JSON). '
self.destination = None
" Where the medication was sent.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.detectedIssue = None
" Clinical issue with action.\n List of `FHIRReference` items referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.dosageInstruction = None
' How the medication is to be used by the patient or administered by\n the caregiver.\n List of `Dosage` items (represented as `dict` in JSON). '
self.eventHistory = None
" A list of relevant lifecycle events.\n List of `FHIRReference` items referencing `['Provenance']` (represented as `dict` in JSON). "
self.identifier = None
' External identifier.\n List of `Identifier` items (represented as `dict` in JSON). '
self.location = None
" Where the dispense occurred.\n Type `FHIRReference` referencing `['Location']` (represented as `dict` in JSON). "
self.medicationCodeableConcept = None
' What medication was supplied.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.medicationReference = None
" What medication was supplied.\n Type `FHIRReference` referencing `['Medication']` (represented as `dict` in JSON). "
self.note = None
' Information about the dispense.\n List of `Annotation` items (represented as `dict` in JSON). '
self.partOf = None
" Event that dispense is part of.\n List of `FHIRReference` items referencing `['Procedure']` (represented as `dict` in JSON). "
self.performer = None
' Who performed event.\n List of `MedicationDispensePerformer` items (represented as `dict` in JSON). '
self.quantity = None
' Amount dispensed.\n Type `Quantity` (represented as `dict` in JSON). '
self.receiver = None
" Who collected the medication.\n List of `FHIRReference` items referencing `['Patient', 'Practitioner']` (represented as `dict` in JSON). "
self.status = None
' preparation | in-progress | cancelled | on-hold | completed |\n entered-in-error | stopped | declined | unknown.\n Type `str`. '
self.statusReasonCodeableConcept = None
' Why a dispense was not performed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.statusReasonReference = None
" Why a dispense was not performed.\n Type `FHIRReference` referencing `['DetectedIssue']` (represented as `dict` in JSON). "
self.subject = None
" Who the dispense is for.\n Type `FHIRReference` referencing `['Patient', 'Group']` (represented as `dict` in JSON). "
self.substitution = None
' Whether a substitution was performed on the dispense.\n Type `MedicationDispenseSubstitution` (represented as `dict` in JSON). '
self.supportingInformation = None
" Information that supports the dispensing of the medication.\n List of `FHIRReference` items referencing `['Resource']` (represented as `dict` in JSON). "
self.type = None
' Trial fill, partial fill, emergency fill, etc..\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.whenHandedOver = None
' When product was given out.\n Type `FHIRDate` (represented as `str` in JSON). '
self.whenPrepared = None
' When product was packaged and reviewed.\n Type `FHIRDate` (represented as `str` in JSON). '
super(MedicationDispense, self).__init__(jsondict=jsondict, strict=strict)<|docstring|>Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError<|endoftext|> |
093c7b0d8064e95192ce7fbb2477aef32f2603d7689560145437915872694d26 | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
" Individual who was performing.\n Type `FHIRReference` referencing `['Practitioner', 'PractitionerRole', 'Organization', 'Patient', 'Device', 'RelatedPerson']` (represented as `dict` in JSON). "
self.function = None
' Who performed the dispense and what they did.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(MedicationDispensePerformer, self).__init__(jsondict=jsondict, strict=strict) | Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError | fhir/resources/medicationdispense.py | __init__ | mmabey/fhir.resources | 0 | python | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
" Individual who was performing.\n Type `FHIRReference` referencing `['Practitioner', 'PractitionerRole', 'Organization', 'Patient', 'Device', 'RelatedPerson']` (represented as `dict` in JSON). "
self.function = None
' Who performed the dispense and what they did.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(MedicationDispensePerformer, self).__init__(jsondict=jsondict, strict=strict) | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
" Individual who was performing.\n Type `FHIRReference` referencing `['Practitioner', 'PractitionerRole', 'Organization', 'Patient', 'Device', 'RelatedPerson']` (represented as `dict` in JSON). "
self.function = None
' Who performed the dispense and what they did.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(MedicationDispensePerformer, self).__init__(jsondict=jsondict, strict=strict)<|docstring|>Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError<|endoftext|> |
05537cf29638b889347634cefac2316792c56cb26203a19650d4455a4529ca55 | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.reason = None
' Why was substitution made.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.responsibleParty = None
" Who is responsible for the substitution.\n List of `FHIRReference` items referencing `['Practitioner', 'PractitionerRole']` (represented as `dict` in JSON). "
self.type = None
' Code signifying whether a different drug was dispensed from what\n was prescribed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.wasSubstituted = None
' Whether a substitution was or was not performed on the dispense.\n Type `bool`. '
super(MedicationDispenseSubstitution, self).__init__(jsondict=jsondict, strict=strict) | Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError | fhir/resources/medicationdispense.py | __init__ | mmabey/fhir.resources | 0 | python | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.reason = None
' Why was substitution made.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.responsibleParty = None
" Who is responsible for the substitution.\n List of `FHIRReference` items referencing `['Practitioner', 'PractitionerRole']` (represented as `dict` in JSON). "
self.type = None
' Code signifying whether a different drug was dispensed from what\n was prescribed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.wasSubstituted = None
' Whether a substitution was or was not performed on the dispense.\n Type `bool`. '
super(MedicationDispenseSubstitution, self).__init__(jsondict=jsondict, strict=strict) | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n\n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.reason = None
' Why was substitution made.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.responsibleParty = None
" Who is responsible for the substitution.\n List of `FHIRReference` items referencing `['Practitioner', 'PractitionerRole']` (represented as `dict` in JSON). "
self.type = None
' Code signifying whether a different drug was dispensed from what\n was prescribed.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.wasSubstituted = None
' Whether a substitution was or was not performed on the dispense.\n Type `bool`. '
super(MedicationDispenseSubstitution, self).__init__(jsondict=jsondict, strict=strict)<|docstring|>Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError<|endoftext|> |
4a5289b72e6b06336c3441b94b1a23dfd2293ccb40a01228c92d6b30595c2bc8 | def memoized_property(func=None, settable=False, deletable=True, classlevel=False):
'Return the property attribute that only calls its getter on the first access.\n\n The result is cached as a private attribue of the same name as the property, and\n on subsequent accesses is returned, preventing the need to call the getter extra\n times.\n\n The decorator may be called in the same was as the builtin `property`, or\n providing options - see examples.\n\n Arguments:\n func: the getter function to be decorated.\n settable: whether to create a setter.\n deletable: whether to create a deleter.\n classlevel: whether to memoize at class level i.e. all instances share value.\n\n Examples:\n Use a decorator:\n >>> class DeepThought(object):\n ... @memoized_property\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n\n Initially, the getter is called:\n >>> dt.answer\n Running expensive getter...\n 42\n\n Subsequent accesses do not call the getter:\n >>> dt.answer\n 42\n\n By default, a setter is not created:\n >>> dt.answer = 6 * 9\n Traceback (most recent call last):\n ...\n AttributeError: can\'t set attribute\n\n But a deleter is:\n >>> del dt.answer\n >>> dt.answer\n Running expensive getter...\n 42\n\n The behaviours can be altered as follows:\n >>> class DeepThought(object):\n ... @memoized_property(settable=True, deletable=False)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n >>> dt.answer\n Running expensive getter...\n 42\n\n >>> dt.answer = 6 * 9\n >>> dt.answer\n 54\n\n >>> del dt.answer\n Traceback (most recent call last):\n ...\n AttributeError: can\'t delete attribute\n\n The memoization can be done at the class level\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer\n Running expensive getter...\n 42\n\n >>> dt2.answer\n 42\n\n Of course, that means the property is only settable at class level:\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True, settable=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer = -1\n >>> dt1.answer\n -1\n\n >>> dt2.answer\n -1\n\n '
if (func is None):
return partial(memoized_property, settable=settable, deletable=deletable, classlevel=classlevel)
attr_name = ('_' + func.__name__)
lookup = (type if classlevel else (lambda x: x))
def fset(self, value):
setattr(lookup(self), attr_name, value)
def fdel(self):
delattr(lookup(self), attr_name)
@wraps(func)
def fget(self):
if (not hasattr(self, attr_name)):
fset(self, func(self))
return getattr(self, attr_name)
return property(fget, (fset if settable else None), (fdel if deletable else None)) | Return the property attribute that only calls its getter on the first access.
The result is cached as a private attribue of the same name as the property, and
on subsequent accesses is returned, preventing the need to call the getter extra
times.
The decorator may be called in the same was as the builtin `property`, or
providing options - see examples.
Arguments:
func: the getter function to be decorated.
settable: whether to create a setter.
deletable: whether to create a deleter.
classlevel: whether to memoize at class level i.e. all instances share value.
Examples:
Use a decorator:
>>> class DeepThought(object):
... @memoized_property
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt = DeepThought()
Initially, the getter is called:
>>> dt.answer
Running expensive getter...
42
Subsequent accesses do not call the getter:
>>> dt.answer
42
By default, a setter is not created:
>>> dt.answer = 6 * 9
Traceback (most recent call last):
...
AttributeError: can't set attribute
But a deleter is:
>>> del dt.answer
>>> dt.answer
Running expensive getter...
42
The behaviours can be altered as follows:
>>> class DeepThought(object):
... @memoized_property(settable=True, deletable=False)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt = DeepThought()
>>> dt.answer
Running expensive getter...
42
>>> dt.answer = 6 * 9
>>> dt.answer
54
>>> del dt.answer
Traceback (most recent call last):
...
AttributeError: can't delete attribute
The memoization can be done at the class level
>>> class DeepThought(object):
... @memoized_property(classlevel=True)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt1, dt2 = DeepThought(), DeepThought()
>>> dt1.answer
Running expensive getter...
42
>>> dt2.answer
42
Of course, that means the property is only settable at class level:
>>> class DeepThought(object):
... @memoized_property(classlevel=True, settable=True)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt1, dt2 = DeepThought(), DeepThought()
>>> dt1.answer = -1
>>> dt1.answer
-1
>>> dt2.answer
-1 | src/memoprop/decorators.py | memoized_property | lewisacidic/memoized-property | 1 | python | def memoized_property(func=None, settable=False, deletable=True, classlevel=False):
'Return the property attribute that only calls its getter on the first access.\n\n The result is cached as a private attribue of the same name as the property, and\n on subsequent accesses is returned, preventing the need to call the getter extra\n times.\n\n The decorator may be called in the same was as the builtin `property`, or\n providing options - see examples.\n\n Arguments:\n func: the getter function to be decorated.\n settable: whether to create a setter.\n deletable: whether to create a deleter.\n classlevel: whether to memoize at class level i.e. all instances share value.\n\n Examples:\n Use a decorator:\n >>> class DeepThought(object):\n ... @memoized_property\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n\n Initially, the getter is called:\n >>> dt.answer\n Running expensive getter...\n 42\n\n Subsequent accesses do not call the getter:\n >>> dt.answer\n 42\n\n By default, a setter is not created:\n >>> dt.answer = 6 * 9\n Traceback (most recent call last):\n ...\n AttributeError: can\'t set attribute\n\n But a deleter is:\n >>> del dt.answer\n >>> dt.answer\n Running expensive getter...\n 42\n\n The behaviours can be altered as follows:\n >>> class DeepThought(object):\n ... @memoized_property(settable=True, deletable=False)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n >>> dt.answer\n Running expensive getter...\n 42\n\n >>> dt.answer = 6 * 9\n >>> dt.answer\n 54\n\n >>> del dt.answer\n Traceback (most recent call last):\n ...\n AttributeError: can\'t delete attribute\n\n The memoization can be done at the class level\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer\n Running expensive getter...\n 42\n\n >>> dt2.answer\n 42\n\n Of course, that means the property is only settable at class level:\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True, settable=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer = -1\n >>> dt1.answer\n -1\n\n >>> dt2.answer\n -1\n\n '
if (func is None):
return partial(memoized_property, settable=settable, deletable=deletable, classlevel=classlevel)
attr_name = ('_' + func.__name__)
lookup = (type if classlevel else (lambda x: x))
def fset(self, value):
setattr(lookup(self), attr_name, value)
def fdel(self):
delattr(lookup(self), attr_name)
@wraps(func)
def fget(self):
if (not hasattr(self, attr_name)):
fset(self, func(self))
return getattr(self, attr_name)
return property(fget, (fset if settable else None), (fdel if deletable else None)) | def memoized_property(func=None, settable=False, deletable=True, classlevel=False):
'Return the property attribute that only calls its getter on the first access.\n\n The result is cached as a private attribue of the same name as the property, and\n on subsequent accesses is returned, preventing the need to call the getter extra\n times.\n\n The decorator may be called in the same was as the builtin `property`, or\n providing options - see examples.\n\n Arguments:\n func: the getter function to be decorated.\n settable: whether to create a setter.\n deletable: whether to create a deleter.\n classlevel: whether to memoize at class level i.e. all instances share value.\n\n Examples:\n Use a decorator:\n >>> class DeepThought(object):\n ... @memoized_property\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n\n Initially, the getter is called:\n >>> dt.answer\n Running expensive getter...\n 42\n\n Subsequent accesses do not call the getter:\n >>> dt.answer\n 42\n\n By default, a setter is not created:\n >>> dt.answer = 6 * 9\n Traceback (most recent call last):\n ...\n AttributeError: can\'t set attribute\n\n But a deleter is:\n >>> del dt.answer\n >>> dt.answer\n Running expensive getter...\n 42\n\n The behaviours can be altered as follows:\n >>> class DeepThought(object):\n ... @memoized_property(settable=True, deletable=False)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt = DeepThought()\n >>> dt.answer\n Running expensive getter...\n 42\n\n >>> dt.answer = 6 * 9\n >>> dt.answer\n 54\n\n >>> del dt.answer\n Traceback (most recent call last):\n ...\n AttributeError: can\'t delete attribute\n\n The memoization can be done at the class level\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer\n Running expensive getter...\n 42\n\n >>> dt2.answer\n 42\n\n Of course, that means the property is only settable at class level:\n >>> class DeepThought(object):\n ... @memoized_property(classlevel=True, settable=True)\n ... def answer(self):\n ... print("Running expensive getter...")\n ... return 42\n\n >>> dt1, dt2 = DeepThought(), DeepThought()\n >>> dt1.answer = -1\n >>> dt1.answer\n -1\n\n >>> dt2.answer\n -1\n\n '
if (func is None):
return partial(memoized_property, settable=settable, deletable=deletable, classlevel=classlevel)
attr_name = ('_' + func.__name__)
lookup = (type if classlevel else (lambda x: x))
def fset(self, value):
setattr(lookup(self), attr_name, value)
def fdel(self):
delattr(lookup(self), attr_name)
@wraps(func)
def fget(self):
if (not hasattr(self, attr_name)):
fset(self, func(self))
return getattr(self, attr_name)
return property(fget, (fset if settable else None), (fdel if deletable else None))<|docstring|>Return the property attribute that only calls its getter on the first access.
The result is cached as a private attribue of the same name as the property, and
on subsequent accesses is returned, preventing the need to call the getter extra
times.
The decorator may be called in the same was as the builtin `property`, or
providing options - see examples.
Arguments:
func: the getter function to be decorated.
settable: whether to create a setter.
deletable: whether to create a deleter.
classlevel: whether to memoize at class level i.e. all instances share value.
Examples:
Use a decorator:
>>> class DeepThought(object):
... @memoized_property
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt = DeepThought()
Initially, the getter is called:
>>> dt.answer
Running expensive getter...
42
Subsequent accesses do not call the getter:
>>> dt.answer
42
By default, a setter is not created:
>>> dt.answer = 6 * 9
Traceback (most recent call last):
...
AttributeError: can't set attribute
But a deleter is:
>>> del dt.answer
>>> dt.answer
Running expensive getter...
42
The behaviours can be altered as follows:
>>> class DeepThought(object):
... @memoized_property(settable=True, deletable=False)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt = DeepThought()
>>> dt.answer
Running expensive getter...
42
>>> dt.answer = 6 * 9
>>> dt.answer
54
>>> del dt.answer
Traceback (most recent call last):
...
AttributeError: can't delete attribute
The memoization can be done at the class level
>>> class DeepThought(object):
... @memoized_property(classlevel=True)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt1, dt2 = DeepThought(), DeepThought()
>>> dt1.answer
Running expensive getter...
42
>>> dt2.answer
42
Of course, that means the property is only settable at class level:
>>> class DeepThought(object):
... @memoized_property(classlevel=True, settable=True)
... def answer(self):
... print("Running expensive getter...")
... return 42
>>> dt1, dt2 = DeepThought(), DeepThought()
>>> dt1.answer = -1
>>> dt1.answer
-1
>>> dt2.answer
-1<|endoftext|> |
e4d87ce4286760e35d7c040c66ca169d63db6f1833ba93e7cfc235e384b36681 | def on_cuda(self):
'Returns true or false depending on if the module is on cuda or not. Unfortunately\n there is no API method in PyTorch for this so we get this from the first parameter of the\n model and cache it.\n NOTE: this must be called outside of the init() method, because the cuda status of the module\n gets set by the modelwrapper.\n '
if (self._on_cuda is None):
self._on_cuda = next(self.parameters()).is_cuda
return self._on_cuda | Returns true or false depending on if the module is on cuda or not. Unfortunately
there is no API method in PyTorch for this so we get this from the first parameter of the
model and cache it.
NOTE: this must be called outside of the init() method, because the cuda status of the module
gets set by the modelwrapper. | gatelfpytorchjson/CustomModule.py | on_cuda | GateNLP/gate-lf-pytorch-wrapper | 1 | python | def on_cuda(self):
'Returns true or false depending on if the module is on cuda or not. Unfortunately\n there is no API method in PyTorch for this so we get this from the first parameter of the\n model and cache it.\n NOTE: this must be called outside of the init() method, because the cuda status of the module\n gets set by the modelwrapper.\n '
if (self._on_cuda is None):
self._on_cuda = next(self.parameters()).is_cuda
return self._on_cuda | def on_cuda(self):
'Returns true or false depending on if the module is on cuda or not. Unfortunately\n there is no API method in PyTorch for this so we get this from the first parameter of the\n model and cache it.\n NOTE: this must be called outside of the init() method, because the cuda status of the module\n gets set by the modelwrapper.\n '
if (self._on_cuda is None):
self._on_cuda = next(self.parameters()).is_cuda
return self._on_cuda<|docstring|>Returns true or false depending on if the module is on cuda or not. Unfortunately
there is no API method in PyTorch for this so we get this from the first parameter of the
model and cache it.
NOTE: this must be called outside of the init() method, because the cuda status of the module
gets set by the modelwrapper.<|endoftext|> |
ff36440b5643b275cc7b93b829e1e1b7f4248e9856db2441fc6df4e9754dedfd | def SRFF(init=0, has_ce=False):
'A S-R flip-flop.'
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args) | A S-R flip-flop. | mantle/xilinx/mantle3/FF.py | SRFF | phanrahan/mantle | 33 | python | def SRFF(init=0, has_ce=False):
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args) | def SRFF(init=0, has_ce=False):
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args)<|docstring|>A S-R flip-flop.<|endoftext|> |
dcc947e88322c146e84729e4c33941f5e17f1c4eb7ec9f55616ecd5eeee9dccf | def RSFF(init=0, has_ce=False):
'A R-S flip-flop.'
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args) | A R-S flip-flop. | mantle/xilinx/mantle3/FF.py | RSFF | phanrahan/mantle | 33 | python | def RSFF(init=0, has_ce=False):
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args) | def RSFF(init=0, has_ce=False):
dff = FF(init=init, has_ce=has_ce, has_set=True, has_reset=True)
dff(dff)
args = []
if has_ce:
args += ['CE', dff.CE]
args += ['S', dff.SET, 'R', dff.RESET, 'CLK', dff.CLK, 'O', dff.O]
return AnonymousCircuit(args)<|docstring|>A R-S flip-flop.<|endoftext|> |
9b110e789d847afc191cb876d48892767fbbf42ddcee7b58c4c7ac68caab7f42 | def JKFF(init=0, has_ce=False, has_set=False, has_reset=False):
'A J-K flip-flop.'
dff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT3((((~ I0) & I1) | (I0 & (~ I2))))
dff(lut)
wire(dff.O, lut.I0)
return AnonymousCircuit('J', lut.I1, 'K', lut.I2, 'O', dff.O) | A J-K flip-flop. | mantle/xilinx/mantle3/FF.py | JKFF | phanrahan/mantle | 33 | python | def JKFF(init=0, has_ce=False, has_set=False, has_reset=False):
dff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT3((((~ I0) & I1) | (I0 & (~ I2))))
dff(lut)
wire(dff.O, lut.I0)
return AnonymousCircuit('J', lut.I1, 'K', lut.I2, 'O', dff.O) | def JKFF(init=0, has_ce=False, has_set=False, has_reset=False):
dff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT3((((~ I0) & I1) | (I0 & (~ I2))))
dff(lut)
wire(dff.O, lut.I0)
return AnonymousCircuit('J', lut.I1, 'K', lut.I2, 'O', dff.O)<|docstring|>A J-K flip-flop.<|endoftext|> |
07a49917765bdd7d2d8c53e5a365a1d4b529d0daa38badbd642543e618515ca3 | def TFF(init=0, has_ce=False, has_set=False, has_reset=False):
'A T flip-flop.'
tff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT2((I0 ^ I1))
tff(lut)
wire(tff.O, lut.I0)
return AnonymousCircuit('I', lut.I1, 'O', tff.O) | A T flip-flop. | mantle/xilinx/mantle3/FF.py | TFF | phanrahan/mantle | 33 | python | def TFF(init=0, has_ce=False, has_set=False, has_reset=False):
tff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT2((I0 ^ I1))
tff(lut)
wire(tff.O, lut.I0)
return AnonymousCircuit('I', lut.I1, 'O', tff.O) | def TFF(init=0, has_ce=False, has_set=False, has_reset=False):
tff = FF(init=init, has_ce=has_ce, has_set=has_set, has_reset=has_reset)
lut = LUT2((I0 ^ I1))
tff(lut)
wire(tff.O, lut.I0)
return AnonymousCircuit('I', lut.I1, 'O', tff.O)<|docstring|>A T flip-flop.<|endoftext|> |
cc7b2e391b506efd3832f40dc221724ce9c3c0f1ee7bdde6784d6d56fc27f5e7 | def load_data(filename):
'\n load the json file into data list\n '
data = list()
with open(filename) as json_data:
questions = json.load(json_data)['questions']
for q in questions:
q_text = nltk.word_tokenize(q['text'])
label = q['category']
if label:
data.append((q_text, label))
return data | load the json file into data list | dan/dan.py | load_data | akarasahin/nlp-hw | 0 | python | def load_data(filename):
'\n \n '
data = list()
with open(filename) as json_data:
questions = json.load(json_data)['questions']
for q in questions:
q_text = nltk.word_tokenize(q['text'])
label = q['category']
if label:
data.append((q_text, label))
return data | def load_data(filename):
'\n \n '
data = list()
with open(filename) as json_data:
questions = json.load(json_data)['questions']
for q in questions:
q_text = nltk.word_tokenize(q['text'])
label = q['category']
if label:
data.append((q_text, label))
return data<|docstring|>load the json file into data list<|endoftext|> |
36f505f78e64ef24f922e55df58bbaa0782726bee74fbbd26721b3d165b3b381 | def load_words(exs):
'\n vocabuary building\n\n Keyword arguments:\n exs: list of input questions-type pairs\n '
words = set()
word2ind = {kPAD: 0, kUNK: 1}
ind2word = {0: kPAD, 1: kUNK}
for (q_text, _) in exs:
for w in q_text:
words.add(w)
words = sorted(words)
for w in words:
idx = len(word2ind)
word2ind[w] = idx
ind2word[idx] = w
words = ([kPAD, kUNK] + words)
return (words, word2ind, ind2word) | vocabuary building
Keyword arguments:
exs: list of input questions-type pairs | dan/dan.py | load_words | akarasahin/nlp-hw | 0 | python | def load_words(exs):
'\n vocabuary building\n\n Keyword arguments:\n exs: list of input questions-type pairs\n '
words = set()
word2ind = {kPAD: 0, kUNK: 1}
ind2word = {0: kPAD, 1: kUNK}
for (q_text, _) in exs:
for w in q_text:
words.add(w)
words = sorted(words)
for w in words:
idx = len(word2ind)
word2ind[w] = idx
ind2word[idx] = w
words = ([kPAD, kUNK] + words)
return (words, word2ind, ind2word) | def load_words(exs):
'\n vocabuary building\n\n Keyword arguments:\n exs: list of input questions-type pairs\n '
words = set()
word2ind = {kPAD: 0, kUNK: 1}
ind2word = {0: kPAD, 1: kUNK}
for (q_text, _) in exs:
for w in q_text:
words.add(w)
words = sorted(words)
for w in words:
idx = len(word2ind)
word2ind[w] = idx
ind2word[idx] = w
words = ([kPAD, kUNK] + words)
return (words, word2ind, ind2word)<|docstring|>vocabuary building
Keyword arguments:
exs: list of input questions-type pairs<|endoftext|> |
499cb3ada5399096516598fa14171adcf0d4d4cb00425a689803197ece37fd6c | def batchify(batch):
'\n Gather a batch of individual examples into one batch, \n which includes the question text, question length and labels \n\n Keyword arguments:\n batch: list of outputs from vectorize function\n '
question_len = list()
label_list = list()
for ex in batch:
question_len.append(len(ex[0]))
label_list.append(ex[1])
target_labels = torch.LongTensor(label_list)
x1 = torch.LongTensor(len(question_len), max(question_len)).zero_()
for i in range(len(question_len)):
question_text = batch[i][0]
vec = torch.LongTensor(question_text)
x1[(i, :len(question_text))].copy_(vec)
q_batch = {'text': x1, 'len': torch.FloatTensor(question_len), 'labels': target_labels}
return q_batch | Gather a batch of individual examples into one batch,
which includes the question text, question length and labels
Keyword arguments:
batch: list of outputs from vectorize function | dan/dan.py | batchify | akarasahin/nlp-hw | 0 | python | def batchify(batch):
'\n Gather a batch of individual examples into one batch, \n which includes the question text, question length and labels \n\n Keyword arguments:\n batch: list of outputs from vectorize function\n '
question_len = list()
label_list = list()
for ex in batch:
question_len.append(len(ex[0]))
label_list.append(ex[1])
target_labels = torch.LongTensor(label_list)
x1 = torch.LongTensor(len(question_len), max(question_len)).zero_()
for i in range(len(question_len)):
question_text = batch[i][0]
vec = torch.LongTensor(question_text)
x1[(i, :len(question_text))].copy_(vec)
q_batch = {'text': x1, 'len': torch.FloatTensor(question_len), 'labels': target_labels}
return q_batch | def batchify(batch):
'\n Gather a batch of individual examples into one batch, \n which includes the question text, question length and labels \n\n Keyword arguments:\n batch: list of outputs from vectorize function\n '
question_len = list()
label_list = list()
for ex in batch:
question_len.append(len(ex[0]))
label_list.append(ex[1])
target_labels = torch.LongTensor(label_list)
x1 = torch.LongTensor(len(question_len), max(question_len)).zero_()
for i in range(len(question_len)):
question_text = batch[i][0]
vec = torch.LongTensor(question_text)
x1[(i, :len(question_text))].copy_(vec)
q_batch = {'text': x1, 'len': torch.FloatTensor(question_len), 'labels': target_labels}
return q_batch<|docstring|>Gather a batch of individual examples into one batch,
which includes the question text, question length and labels
Keyword arguments:
batch: list of outputs from vectorize function<|endoftext|> |
107ecc7640cb9b3f8f4c96474e8daa9e1f06229dd745b646cfbb38d94e96e2d8 | def evaluate(data_loader, model, device):
'\n evaluate the current model, get the accuracy for dev/test set\n\n Keyword arguments:\n data_loader: pytorch build-in data loader output\n model: model to be evaluated\n device: cpu of gpu\n '
model.eval()
num_examples = 0
error = 0
for (idx, batch) in enumerate(data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
(top_n, top_i) = logits.topk(1)
num_examples += question_text.size(0)
error += torch.nonzero((top_i.squeeze() - torch.LongTensor(labels))).size(0)
accuracy = (1 - (error / num_examples))
print('accuracy', accuracy)
return accuracy | evaluate the current model, get the accuracy for dev/test set
Keyword arguments:
data_loader: pytorch build-in data loader output
model: model to be evaluated
device: cpu of gpu | dan/dan.py | evaluate | akarasahin/nlp-hw | 0 | python | def evaluate(data_loader, model, device):
'\n evaluate the current model, get the accuracy for dev/test set\n\n Keyword arguments:\n data_loader: pytorch build-in data loader output\n model: model to be evaluated\n device: cpu of gpu\n '
model.eval()
num_examples = 0
error = 0
for (idx, batch) in enumerate(data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
(top_n, top_i) = logits.topk(1)
num_examples += question_text.size(0)
error += torch.nonzero((top_i.squeeze() - torch.LongTensor(labels))).size(0)
accuracy = (1 - (error / num_examples))
print('accuracy', accuracy)
return accuracy | def evaluate(data_loader, model, device):
'\n evaluate the current model, get the accuracy for dev/test set\n\n Keyword arguments:\n data_loader: pytorch build-in data loader output\n model: model to be evaluated\n device: cpu of gpu\n '
model.eval()
num_examples = 0
error = 0
for (idx, batch) in enumerate(data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
(top_n, top_i) = logits.topk(1)
num_examples += question_text.size(0)
error += torch.nonzero((top_i.squeeze() - torch.LongTensor(labels))).size(0)
accuracy = (1 - (error / num_examples))
print('accuracy', accuracy)
return accuracy<|docstring|>evaluate the current model, get the accuracy for dev/test set
Keyword arguments:
data_loader: pytorch build-in data loader output
model: model to be evaluated
device: cpu of gpu<|endoftext|> |
d55d81b914c2f796caab0e6ca8ffc83b5bbad39f98bf1e71eebe28cbdc9e6bb9 | def train(args, model, train_data_loader, dev_data_loader, accuracy, device):
'\n Train the current model\n\n Keyword arguments:\n args: arguments \n model: model to be trained\n train_data_loader: pytorch build-in data loader output for training examples\n dev_data_loader: pytorch build-in data loader output for dev examples\n accuracy: previous best accuracy\n device: cpu of gpu\n '
model.train()
optimizer = torch.optim.Adamax(model.parameters())
criterion = nn.CrossEntropyLoss()
print_loss_total = 0
epoch_loss_total = 0
start = time.time()
for (idx, batch) in enumerate(train_data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
print_loss_total += loss.data.numpy()
epoch_loss_total += loss.data.numpy()
if (((idx % args.checkpoint) == 0) and (idx > 0)):
print_loss_avg = (print_loss_total / args.checkpoint)
print(('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, (time.time() - start))))
print_loss_total = 0
curr_accuracy = evaluate(dev_data_loader, model, device)
if (accuracy < curr_accuracy):
torch.save(model, args.save_model)
accuracy = curr_accuracy
return accuracy | Train the current model
Keyword arguments:
args: arguments
model: model to be trained
train_data_loader: pytorch build-in data loader output for training examples
dev_data_loader: pytorch build-in data loader output for dev examples
accuracy: previous best accuracy
device: cpu of gpu | dan/dan.py | train | akarasahin/nlp-hw | 0 | python | def train(args, model, train_data_loader, dev_data_loader, accuracy, device):
'\n Train the current model\n\n Keyword arguments:\n args: arguments \n model: model to be trained\n train_data_loader: pytorch build-in data loader output for training examples\n dev_data_loader: pytorch build-in data loader output for dev examples\n accuracy: previous best accuracy\n device: cpu of gpu\n '
model.train()
optimizer = torch.optim.Adamax(model.parameters())
criterion = nn.CrossEntropyLoss()
print_loss_total = 0
epoch_loss_total = 0
start = time.time()
for (idx, batch) in enumerate(train_data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
print_loss_total += loss.data.numpy()
epoch_loss_total += loss.data.numpy()
if (((idx % args.checkpoint) == 0) and (idx > 0)):
print_loss_avg = (print_loss_total / args.checkpoint)
print(('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, (time.time() - start))))
print_loss_total = 0
curr_accuracy = evaluate(dev_data_loader, model, device)
if (accuracy < curr_accuracy):
torch.save(model, args.save_model)
accuracy = curr_accuracy
return accuracy | def train(args, model, train_data_loader, dev_data_loader, accuracy, device):
'\n Train the current model\n\n Keyword arguments:\n args: arguments \n model: model to be trained\n train_data_loader: pytorch build-in data loader output for training examples\n dev_data_loader: pytorch build-in data loader output for dev examples\n accuracy: previous best accuracy\n device: cpu of gpu\n '
model.train()
optimizer = torch.optim.Adamax(model.parameters())
criterion = nn.CrossEntropyLoss()
print_loss_total = 0
epoch_loss_total = 0
start = time.time()
for (idx, batch) in enumerate(train_data_loader):
question_text = batch['text'].to(device)
question_len = batch['len']
labels = batch['labels']
print_loss_total += loss.data.numpy()
epoch_loss_total += loss.data.numpy()
if (((idx % args.checkpoint) == 0) and (idx > 0)):
print_loss_avg = (print_loss_total / args.checkpoint)
print(('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, (time.time() - start))))
print_loss_total = 0
curr_accuracy = evaluate(dev_data_loader, model, device)
if (accuracy < curr_accuracy):
torch.save(model, args.save_model)
accuracy = curr_accuracy
return accuracy<|docstring|>Train the current model
Keyword arguments:
args: arguments
model: model to be trained
train_data_loader: pytorch build-in data loader output for training examples
dev_data_loader: pytorch build-in data loader output for dev examples
accuracy: previous best accuracy
device: cpu of gpu<|endoftext|> |
805c687edcca5dd5e3a1b41418b91d5488b33cdb1599143713010f4c3e15c43b | @staticmethod
def vectorize(ex, word2ind):
"\n vectorize a single example based on the word2ind dict. \n\n Keyword arguments:\n exs: list of input questions-type pairs\n ex: tokenized question sentence (list)\n label: type of question sentence\n\n Output: vectorized sentence(python list) and label(int)\n e.g. ['text', 'test', 'is', 'fun'] -> [0, 2, 3, 4]\n "
vec_text = ([0] * len(ex))
return vec_text | vectorize a single example based on the word2ind dict.
Keyword arguments:
exs: list of input questions-type pairs
ex: tokenized question sentence (list)
label: type of question sentence
Output: vectorized sentence(python list) and label(int)
e.g. ['text', 'test', 'is', 'fun'] -> [0, 2, 3, 4] | dan/dan.py | vectorize | akarasahin/nlp-hw | 0 | python | @staticmethod
def vectorize(ex, word2ind):
"\n vectorize a single example based on the word2ind dict. \n\n Keyword arguments:\n exs: list of input questions-type pairs\n ex: tokenized question sentence (list)\n label: type of question sentence\n\n Output: vectorized sentence(python list) and label(int)\n e.g. ['text', 'test', 'is', 'fun'] -> [0, 2, 3, 4]\n "
vec_text = ([0] * len(ex))
return vec_text | @staticmethod
def vectorize(ex, word2ind):
"\n vectorize a single example based on the word2ind dict. \n\n Keyword arguments:\n exs: list of input questions-type pairs\n ex: tokenized question sentence (list)\n label: type of question sentence\n\n Output: vectorized sentence(python list) and label(int)\n e.g. ['text', 'test', 'is', 'fun'] -> [0, 2, 3, 4]\n "
vec_text = ([0] * len(ex))
return vec_text<|docstring|>vectorize a single example based on the word2ind dict.
Keyword arguments:
exs: list of input questions-type pairs
ex: tokenized question sentence (list)
label: type of question sentence
Output: vectorized sentence(python list) and label(int)
e.g. ['text', 'test', 'is', 'fun'] -> [0, 2, 3, 4]<|endoftext|> |
03915d0adb50d6d8840fa711a73c4661ad07b14fcee6bf9122249ab3809523cd | def forward(self, input_text, text_len, is_prob=False):
'\n Model forward pass, returns the logits of the predictions.\n \n Keyword arguments:\n input_text : vectorized question text \n text_len : batch * 1, text length for each question\n in_prob: if True, output the softmax of last layer\n\n '
logits = torch.LongTensor(([0.0] * self.n_classes))
return logits | Model forward pass, returns the logits of the predictions.
Keyword arguments:
input_text : vectorized question text
text_len : batch * 1, text length for each question
in_prob: if True, output the softmax of last layer | dan/dan.py | forward | akarasahin/nlp-hw | 0 | python | def forward(self, input_text, text_len, is_prob=False):
'\n Model forward pass, returns the logits of the predictions.\n \n Keyword arguments:\n input_text : vectorized question text \n text_len : batch * 1, text length for each question\n in_prob: if True, output the softmax of last layer\n\n '
logits = torch.LongTensor(([0.0] * self.n_classes))
return logits | def forward(self, input_text, text_len, is_prob=False):
'\n Model forward pass, returns the logits of the predictions.\n \n Keyword arguments:\n input_text : vectorized question text \n text_len : batch * 1, text length for each question\n in_prob: if True, output the softmax of last layer\n\n '
logits = torch.LongTensor(([0.0] * self.n_classes))
return logits<|docstring|>Model forward pass, returns the logits of the predictions.
Keyword arguments:
input_text : vectorized question text
text_len : batch * 1, text length for each question
in_prob: if True, output the softmax of last layer<|endoftext|> |
8d0990eeb6d4510623743de8c577a23b30a66c19e6b0eb89f426f7e20b9a9a36 | def lookup(self, var):
'\n Look up a Parameter instance in the current environment.\n\n Returns either a NameExpr or a Suspension.\n '
while self:
if (self.var == var):
return self.val
self = self.env
raise UndefinedNameError('Variable not in environment: {}'.format(var)) | Look up a Parameter instance in the current environment.
Returns either a NameExpr or a Suspension. | church/environment.py | lookup | mdickinson/alonzo | 0 | python | def lookup(self, var):
'\n Look up a Parameter instance in the current environment.\n\n Returns either a NameExpr or a Suspension.\n '
while self:
if (self.var == var):
return self.val
self = self.env
raise UndefinedNameError('Variable not in environment: {}'.format(var)) | def lookup(self, var):
'\n Look up a Parameter instance in the current environment.\n\n Returns either a NameExpr or a Suspension.\n '
while self:
if (self.var == var):
return self.val
self = self.env
raise UndefinedNameError('Variable not in environment: {}'.format(var))<|docstring|>Look up a Parameter instance in the current environment.
Returns either a NameExpr or a Suspension.<|endoftext|> |
cf6630ba7014db09fd3cdaeb308b0eac44ae4d5b5a9ca842d52e63aba9a92be4 | def lookup_by_name(self, name):
'\n Look up a name (a string) and recover the corresponding binding\n and value.\n\n Returns a pair (binding, value).\n '
while self:
if (self.var.name == name):
return (self.var, self.val)
self = self.env
raise UndefinedNameError('Undefined name: {}'.format(name)) | Look up a name (a string) and recover the corresponding binding
and value.
Returns a pair (binding, value). | church/environment.py | lookup_by_name | mdickinson/alonzo | 0 | python | def lookup_by_name(self, name):
'\n Look up a name (a string) and recover the corresponding binding\n and value.\n\n Returns a pair (binding, value).\n '
while self:
if (self.var.name == name):
return (self.var, self.val)
self = self.env
raise UndefinedNameError('Undefined name: {}'.format(name)) | def lookup_by_name(self, name):
'\n Look up a name (a string) and recover the corresponding binding\n and value.\n\n Returns a pair (binding, value).\n '
while self:
if (self.var.name == name):
return (self.var, self.val)
self = self.env
raise UndefinedNameError('Undefined name: {}'.format(name))<|docstring|>Look up a name (a string) and recover the corresponding binding
and value.
Returns a pair (binding, value).<|endoftext|> |
237b751e9e5751f952e5b1cec995c75c7ae7bfa6630d15c849237f1643661d04 | def __init__(self, var, val, env):
'\n Parameters\n ----------\n var : Parameter\n val : NameExpr or Suspension\n env : Environment\n '
self.var = var
self.val = val
self.env = env | Parameters
----------
var : Parameter
val : NameExpr or Suspension
env : Environment | church/environment.py | __init__ | mdickinson/alonzo | 0 | python | def __init__(self, var, val, env):
'\n Parameters\n ----------\n var : Parameter\n val : NameExpr or Suspension\n env : Environment\n '
self.var = var
self.val = val
self.env = env | def __init__(self, var, val, env):
'\n Parameters\n ----------\n var : Parameter\n val : NameExpr or Suspension\n env : Environment\n '
self.var = var
self.val = val
self.env = env<|docstring|>Parameters
----------
var : Parameter
val : NameExpr or Suspension
env : Environment<|endoftext|> |
65f07467e5695b315f37d2f9f66fa15409211b178393cc9716ff9662d22fdb2d | def run_migrations_offline():
"\n Run migrations in 'offline' mode.\n\n This configures the context with just a URL and not an Engine, though an\n Engine is acceptable here as well. By skipping the Engine creation we\n don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the script output.\n "
url = config.get_main_option('sqlalchemy.url')
context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'})
with context.begin_transaction():
context.run_migrations() | Run migrations in 'offline' mode.
This configures the context with just a URL and not an Engine, though an
Engine is acceptable here as well. By skipping the Engine creation we
don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the script output. | db/env.py | run_migrations_offline | atomist-playground/docker-flask-example | 239 | python | def run_migrations_offline():
"\n Run migrations in 'offline' mode.\n\n This configures the context with just a URL and not an Engine, though an\n Engine is acceptable here as well. By skipping the Engine creation we\n don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the script output.\n "
url = config.get_main_option('sqlalchemy.url')
context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'})
with context.begin_transaction():
context.run_migrations() | def run_migrations_offline():
"\n Run migrations in 'offline' mode.\n\n This configures the context with just a URL and not an Engine, though an\n Engine is acceptable here as well. By skipping the Engine creation we\n don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the script output.\n "
url = config.get_main_option('sqlalchemy.url')
context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={'paramstyle': 'named'})
with context.begin_transaction():
context.run_migrations()<|docstring|>Run migrations in 'offline' mode.
This configures the context with just a URL and not an Engine, though an
Engine is acceptable here as well. By skipping the Engine creation we
don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the script output.<|endoftext|> |
f882bae881b01539cb086338f26090d328aa6b9b3e465927cb45ffaf5438bc97 | def run_migrations_online():
"\n Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine and associate a connection\n with the context.\n "
def process_revision_directives(context, revision, directives):
if config.cmd_opts.autogenerate:
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives)
with context.begin_transaction():
context.run_migrations() | Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a connection
with the context. | db/env.py | run_migrations_online | atomist-playground/docker-flask-example | 239 | python | def run_migrations_online():
"\n Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine and associate a connection\n with the context.\n "
def process_revision_directives(context, revision, directives):
if config.cmd_opts.autogenerate:
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives)
with context.begin_transaction():
context.run_migrations() | def run_migrations_online():
"\n Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine and associate a connection\n with the context.\n "
def process_revision_directives(context, revision, directives):
if config.cmd_opts.autogenerate:
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
connectable = engine_from_config(config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives)
with context.begin_transaction():
context.run_migrations()<|docstring|>Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a connection
with the context.<|endoftext|> |
2e10dea2ed51398e2aa994b4ac58fb64e73e66e10e19e54058beae0bd4bb1441 | @click.command(name='validate', short_help=help.SHORT_HELP)
@click.option('--template-file', '-t', type=click.Path(exists=True), help=help.TEMPLATE_FILE)
def validate(template_file):
'\n Validate a SCF template.\n '
tcsam.tcsam_validate(Template.get_template_data(template_file)) | Validate a SCF template. | tcfcli/cmds/validate/cli.py | validate | Juliiii/scfcli | 1 | python | @click.command(name='validate', short_help=help.SHORT_HELP)
@click.option('--template-file', '-t', type=click.Path(exists=True), help=help.TEMPLATE_FILE)
def validate(template_file):
'\n \n '
tcsam.tcsam_validate(Template.get_template_data(template_file)) | @click.command(name='validate', short_help=help.SHORT_HELP)
@click.option('--template-file', '-t', type=click.Path(exists=True), help=help.TEMPLATE_FILE)
def validate(template_file):
'\n \n '
tcsam.tcsam_validate(Template.get_template_data(template_file))<|docstring|>Validate a SCF template.<|endoftext|> |
364108c3d7fe5392a18dc4aa3b1271bcb91d229493c09b2a4a0980e5adac3b09 | @property
def archive_name(self) -> str:
"Name of the archive\n\n Example: 'pypi__futures_3_1_1'\n\n This includes the version so that Bazel graph shows it.\n\n The naming convention matches:\n https://github.com/bazelbuild/rules_python#canonical-whl_library-naming\n "
version_label = self.version.replace('.', '_')
if (self.platform != 'purelib'):
return 'pypi__{}_{}__{}'.format(self.name, version_label, self.platform)
return 'pypi__{}_{}'.format(self.name, version_label) | Name of the archive
Example: 'pypi__futures_3_1_1'
This includes the version so that Bazel graph shows it.
The naming convention matches:
https://github.com/bazelbuild/rules_python#canonical-whl_library-naming | src/rules_pygen/rules_generator.py | archive_name | tubular/rules_pygen | 18 | python | @property
def archive_name(self) -> str:
"Name of the archive\n\n Example: 'pypi__futures_3_1_1'\n\n This includes the version so that Bazel graph shows it.\n\n The naming convention matches:\n https://github.com/bazelbuild/rules_python#canonical-whl_library-naming\n "
version_label = self.version.replace('.', '_')
if (self.platform != 'purelib'):
return 'pypi__{}_{}__{}'.format(self.name, version_label, self.platform)
return 'pypi__{}_{}'.format(self.name, version_label) | @property
def archive_name(self) -> str:
"Name of the archive\n\n Example: 'pypi__futures_3_1_1'\n\n This includes the version so that Bazel graph shows it.\n\n The naming convention matches:\n https://github.com/bazelbuild/rules_python#canonical-whl_library-naming\n "
version_label = self.version.replace('.', '_')
if (self.platform != 'purelib'):
return 'pypi__{}_{}__{}'.format(self.name, version_label, self.platform)
return 'pypi__{}_{}'.format(self.name, version_label)<|docstring|>Name of the archive
Example: 'pypi__futures_3_1_1'
This includes the version so that Bazel graph shows it.
The naming convention matches:
https://github.com/bazelbuild/rules_python#canonical-whl_library-naming<|endoftext|> |
76475df08a880965e2d35d7d5223b48e3da6b17938a486b4567c62b7db9a4a0d | @property
def lib_path(self) -> str:
"Path of the lib inside the archive\n\n Example: '@pypi__futures_3_1_1//:pkg\n "
return '@{}//:pkg'.format(self.archive_name) | Path of the lib inside the archive
Example: '@pypi__futures_3_1_1//:pkg | src/rules_pygen/rules_generator.py | lib_path | tubular/rules_pygen | 18 | python | @property
def lib_path(self) -> str:
"Path of the lib inside the archive\n\n Example: '@pypi__futures_3_1_1//:pkg\n "
return '@{}//:pkg'.format(self.archive_name) | @property
def lib_path(self) -> str:
"Path of the lib inside the archive\n\n Example: '@pypi__futures_3_1_1//:pkg\n "
return '@{}//:pkg'.format(self.archive_name)<|docstring|>Path of the lib inside the archive
Example: '@pypi__futures_3_1_1//:pkg<|endoftext|> |
d950619cc0e35bbc57243913803b94c51ebe953ab4326f139096dba58ba80c62 | def verify(self, platforms: typing.Set) -> bool:
'Verify that this dependency has the necessary wheels.'
if (len(self.wheels) == 1):
if (self.wheels[0].platform == 'purelib'):
return True
existing_platforms = {w.platform for w in self.wheels}
if (platforms == existing_platforms):
return True
else:
logger.error('Verification shows missing platform(s): %s', (platforms - existing_platforms))
return False | Verify that this dependency has the necessary wheels. | src/rules_pygen/rules_generator.py | verify | tubular/rules_pygen | 18 | python | def verify(self, platforms: typing.Set) -> bool:
if (len(self.wheels) == 1):
if (self.wheels[0].platform == 'purelib'):
return True
existing_platforms = {w.platform for w in self.wheels}
if (platforms == existing_platforms):
return True
else:
logger.error('Verification shows missing platform(s): %s', (platforms - existing_platforms))
return False | def verify(self, platforms: typing.Set) -> bool:
if (len(self.wheels) == 1):
if (self.wheels[0].platform == 'purelib'):
return True
existing_platforms = {w.platform for w in self.wheels}
if (platforms == existing_platforms):
return True
else:
logger.error('Verification shows missing platform(s): %s', (platforms - existing_platforms))
return False<|docstring|>Verify that this dependency has the necessary wheels.<|endoftext|> |
2f41632ae3fb79d704f6e2b42fcef73e43f4a6cd488e250587cdeb400a1f3a42 | def run(self) -> None:
'Main entrypoint into builder.'
logger.info('Validating')
self._validate()
logger.info('Getting wheel links via pip')
wheel_links = self._get_wheel_links()
logger.info('\nParsing dependencies from wheels\n')
deps = self._parse_wheel_dependencies(wheel_links)
logger.info('\nGenerating output file\n')
self._gen_output_file(deps) | Main entrypoint into builder. | src/rules_pygen/rules_generator.py | run | tubular/rules_pygen | 18 | python | def run(self) -> None:
logger.info('Validating')
self._validate()
logger.info('Getting wheel links via pip')
wheel_links = self._get_wheel_links()
logger.info('\nParsing dependencies from wheels\n')
deps = self._parse_wheel_dependencies(wheel_links)
logger.info('\nGenerating output file\n')
self._gen_output_file(deps) | def run(self) -> None:
logger.info('Validating')
self._validate()
logger.info('Getting wheel links via pip')
wheel_links = self._get_wheel_links()
logger.info('\nParsing dependencies from wheels\n')
deps = self._parse_wheel_dependencies(wheel_links)
logger.info('\nGenerating output file\n')
self._gen_output_file(deps)<|docstring|>Main entrypoint into builder.<|endoftext|> |
9f53f5a5cbea1da9013f11db24286aec0dad4fc76b48654a96fe9b9d4c576ba6 | def _get_wheelname_from_link(self, wheel_link: str) -> str:
'Give a wheel url, return the filename.\n\n >>> _get_wheelname_from_link(\'https://foo/__packages/idna_ssl-1.1.0-py3-none-any.whl\')\n "idna_ssl-1.1.0-py3-none-any.whl"\n '
logger.debug('getting filename for %s', wheel_link)
match = WHEEL_FILENAME_RE.search(wheel_link)
if match:
return match.group('filename')
else:
return '' | Give a wheel url, return the filename.
>>> _get_wheelname_from_link('https://foo/__packages/idna_ssl-1.1.0-py3-none-any.whl')
"idna_ssl-1.1.0-py3-none-any.whl" | src/rules_pygen/rules_generator.py | _get_wheelname_from_link | tubular/rules_pygen | 18 | python | def _get_wheelname_from_link(self, wheel_link: str) -> str:
'Give a wheel url, return the filename.\n\n >>> _get_wheelname_from_link(\'https://foo/__packages/idna_ssl-1.1.0-py3-none-any.whl\')\n "idna_ssl-1.1.0-py3-none-any.whl"\n '
logger.debug('getting filename for %s', wheel_link)
match = WHEEL_FILENAME_RE.search(wheel_link)
if match:
return match.group('filename')
else:
return | def _get_wheelname_from_link(self, wheel_link: str) -> str:
'Give a wheel url, return the filename.\n\n >>> _get_wheelname_from_link(\'https://foo/__packages/idna_ssl-1.1.0-py3-none-any.whl\')\n "idna_ssl-1.1.0-py3-none-any.whl"\n '
logger.debug('getting filename for %s', wheel_link)
match = WHEEL_FILENAME_RE.search(wheel_link)
if match:
return match.group('filename')
else:
return <|docstring|>Give a wheel url, return the filename.
>>> _get_wheelname_from_link('https://foo/__packages/idna_ssl-1.1.0-py3-none-any.whl')
"idna_ssl-1.1.0-py3-none-any.whl"<|endoftext|> |
785468568096918bab40b88f9fb1f2382f54b41d972e81a4e464ecd01bcd25de | def _parse_wheel_dependencies(self, wheel_links: str) -> typing.Set[DependencyInfo]:
"Parse wheel dependencies\n\n Build a set of dependency structs and the wheels that correspond\n with them.\n\n 1. For each wheel that pip downloaded, create a DependencyInfo\n struct and add the wheel to it\n 2. Then find any additional wheels that belong to that dependency\n for other platforms\n\n NOTE: wheel_links contains all kinds of wheels that pip found, part\n of those links aren't useful to use because they're not the right\n version/platform combination.\n "
all_deps = set([])
for wheel_filepath in glob.glob('{}/*.whl'.format(self.wheel_dir)):
logger.info('\nProcessing wheelinfo for %s', wheel_filepath)
wheel = Wheel(wheel_filepath)
extra_deps = {}
for extra in wheel.extras():
extra_deps[extra] = list(wheel.dependencies(extra=extra))
logger.debug('Wheel name is: %s', wheel.name())
dependency = DependencyInfo(name=wheel.name(), deps=(set(wheel.dependencies()) - BLACKLIST), extras=extra_deps)
wheel_filename = os.path.basename(wheel_filepath)
match = WHEEL_FILE_RE.search(wheel_filename)
if match:
(name, version) = (match.group('name'), match.group('ver'))
match_prefix = '{}-{}'.format(name, version)
else:
raise PyBazelRuleGeneratorException('Could not parse wheel file name.')
logger.debug('Will find additional wheels for other platforms using prefix: %s', match_prefix)
for (additional_filename, additional_link) in wheel_links.items():
if additional_filename.startswith(match_prefix):
logger.info('Found additional wheel: %s', additional_filename)
is_compatible = _check_compatibility(additional_filename, self.desired_python)
if (not is_compatible):
continue
filepath = os.path.abspath(os.path.join(self.wheel_dir, additional_filename))
logger.debug('Considering %s', additional_filename)
if (wheel_filename != additional_filename):
logger.debug('%s does not equal %s', wheel_filename, additional_filename)
_download(additional_link, filepath)
logger.debug('Matched %s %s', match_prefix, additional_filename)
wi = WheelInfo(name=name, filepath=filepath, url=additional_link, version=version)
dependency.add_wheel(wi)
if (dependency.name not in BLACKLIST):
if (not dependency.verify({'macos', 'linux'})):
raise PyBazelRuleGeneratorException('Dependency {} is missing wheels!'.format(dependency))
all_deps.add(dependency)
return all_deps | Parse wheel dependencies
Build a set of dependency structs and the wheels that correspond
with them.
1. For each wheel that pip downloaded, create a DependencyInfo
struct and add the wheel to it
2. Then find any additional wheels that belong to that dependency
for other platforms
NOTE: wheel_links contains all kinds of wheels that pip found, part
of those links aren't useful to use because they're not the right
version/platform combination. | src/rules_pygen/rules_generator.py | _parse_wheel_dependencies | tubular/rules_pygen | 18 | python | def _parse_wheel_dependencies(self, wheel_links: str) -> typing.Set[DependencyInfo]:
"Parse wheel dependencies\n\n Build a set of dependency structs and the wheels that correspond\n with them.\n\n 1. For each wheel that pip downloaded, create a DependencyInfo\n struct and add the wheel to it\n 2. Then find any additional wheels that belong to that dependency\n for other platforms\n\n NOTE: wheel_links contains all kinds of wheels that pip found, part\n of those links aren't useful to use because they're not the right\n version/platform combination.\n "
all_deps = set([])
for wheel_filepath in glob.glob('{}/*.whl'.format(self.wheel_dir)):
logger.info('\nProcessing wheelinfo for %s', wheel_filepath)
wheel = Wheel(wheel_filepath)
extra_deps = {}
for extra in wheel.extras():
extra_deps[extra] = list(wheel.dependencies(extra=extra))
logger.debug('Wheel name is: %s', wheel.name())
dependency = DependencyInfo(name=wheel.name(), deps=(set(wheel.dependencies()) - BLACKLIST), extras=extra_deps)
wheel_filename = os.path.basename(wheel_filepath)
match = WHEEL_FILE_RE.search(wheel_filename)
if match:
(name, version) = (match.group('name'), match.group('ver'))
match_prefix = '{}-{}'.format(name, version)
else:
raise PyBazelRuleGeneratorException('Could not parse wheel file name.')
logger.debug('Will find additional wheels for other platforms using prefix: %s', match_prefix)
for (additional_filename, additional_link) in wheel_links.items():
if additional_filename.startswith(match_prefix):
logger.info('Found additional wheel: %s', additional_filename)
is_compatible = _check_compatibility(additional_filename, self.desired_python)
if (not is_compatible):
continue
filepath = os.path.abspath(os.path.join(self.wheel_dir, additional_filename))
logger.debug('Considering %s', additional_filename)
if (wheel_filename != additional_filename):
logger.debug('%s does not equal %s', wheel_filename, additional_filename)
_download(additional_link, filepath)
logger.debug('Matched %s %s', match_prefix, additional_filename)
wi = WheelInfo(name=name, filepath=filepath, url=additional_link, version=version)
dependency.add_wheel(wi)
if (dependency.name not in BLACKLIST):
if (not dependency.verify({'macos', 'linux'})):
raise PyBazelRuleGeneratorException('Dependency {} is missing wheels!'.format(dependency))
all_deps.add(dependency)
return all_deps | def _parse_wheel_dependencies(self, wheel_links: str) -> typing.Set[DependencyInfo]:
"Parse wheel dependencies\n\n Build a set of dependency structs and the wheels that correspond\n with them.\n\n 1. For each wheel that pip downloaded, create a DependencyInfo\n struct and add the wheel to it\n 2. Then find any additional wheels that belong to that dependency\n for other platforms\n\n NOTE: wheel_links contains all kinds of wheels that pip found, part\n of those links aren't useful to use because they're not the right\n version/platform combination.\n "
all_deps = set([])
for wheel_filepath in glob.glob('{}/*.whl'.format(self.wheel_dir)):
logger.info('\nProcessing wheelinfo for %s', wheel_filepath)
wheel = Wheel(wheel_filepath)
extra_deps = {}
for extra in wheel.extras():
extra_deps[extra] = list(wheel.dependencies(extra=extra))
logger.debug('Wheel name is: %s', wheel.name())
dependency = DependencyInfo(name=wheel.name(), deps=(set(wheel.dependencies()) - BLACKLIST), extras=extra_deps)
wheel_filename = os.path.basename(wheel_filepath)
match = WHEEL_FILE_RE.search(wheel_filename)
if match:
(name, version) = (match.group('name'), match.group('ver'))
match_prefix = '{}-{}'.format(name, version)
else:
raise PyBazelRuleGeneratorException('Could not parse wheel file name.')
logger.debug('Will find additional wheels for other platforms using prefix: %s', match_prefix)
for (additional_filename, additional_link) in wheel_links.items():
if additional_filename.startswith(match_prefix):
logger.info('Found additional wheel: %s', additional_filename)
is_compatible = _check_compatibility(additional_filename, self.desired_python)
if (not is_compatible):
continue
filepath = os.path.abspath(os.path.join(self.wheel_dir, additional_filename))
logger.debug('Considering %s', additional_filename)
if (wheel_filename != additional_filename):
logger.debug('%s does not equal %s', wheel_filename, additional_filename)
_download(additional_link, filepath)
logger.debug('Matched %s %s', match_prefix, additional_filename)
wi = WheelInfo(name=name, filepath=filepath, url=additional_link, version=version)
dependency.add_wheel(wi)
if (dependency.name not in BLACKLIST):
if (not dependency.verify({'macos', 'linux'})):
raise PyBazelRuleGeneratorException('Dependency {} is missing wheels!'.format(dependency))
all_deps.add(dependency)
return all_deps<|docstring|>Parse wheel dependencies
Build a set of dependency structs and the wheels that correspond
with them.
1. For each wheel that pip downloaded, create a DependencyInfo
struct and add the wheel to it
2. Then find any additional wheels that belong to that dependency
for other platforms
NOTE: wheel_links contains all kinds of wheels that pip found, part
of those links aren't useful to use because they're not the right
version/platform combination.<|endoftext|> |
f422aad23544465eea42812123ac606dc8d7db7330b8c0afd06f16af473e84f0 | def _gen_output_file(self, deps) -> None:
'Output a file with the following structure\n\n def pypi_libraries():\n py_library(\n name="virtualenv",\n deps=[\n ] + ["@pypi_virtualenv//:pkg"],\n licenses=["notice"],\n visibility=["//visibility:public"],\n )\n\n def pypi_archives():\n existing_rules = native.existing_rules()\n if "pypi_asn1crypto" not in existing_rules:\n http_archive(\n name="pypi_asn1crypto",\n url="https://files.pythonhosted...",\n sha256="2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",\n build_file_content=_BUILD_FILE_CONTENT,\n type="zip",\n )\n '
f = tempfile.NamedTemporaryFile(delete=False, mode='w+t')
sorted_deps = list(deps)
sorted_deps.sort(key=operator.attrgetter('name'))
f.write(HEADER)
f.write('\ndef pypi_libraries():\n\n')
for dependency in sorted_deps:
logger.debug('Writing py_library for %s', dependency)
f.write((_space(4) + 'py_library(\n'))
f.write((_space(8) + 'name = "{}",\n'.format(dependency.name)))
f.write((_space(8) + 'deps = [\n'))
for subdependency in dependency.dependencies:
f.write((_space(12) + '"{}",\n'.format(subdependency)))
f.write((_space(8) + ']'))
logger.debug('Found %r dependency wheels', dependency.wheels)
if (len(dependency.wheels) == 0):
raise PyBazelRuleGeneratorException('No wheels for dependency: {}'.format(dependency))
if (len(dependency.wheels) == 1):
f.write(' + ["{}"],\n'.format(dependency.wheels[0].lib_path))
else:
f.write(' + select({\n')
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
if (wheel.platform == 'linux'):
f.write((_space(12) + '"@//tool_bazel:linux": ["{}"],\n'.format(wheel.lib_path)))
elif (wheel.platform == 'macos'):
f.write((_space(12) + '"@//tool_bazel:macos": ["{}"],\n'.format(wheel.lib_path)))
f.write((_space(8) + '}),\n'))
f.write((_space(8) + 'visibility=["//visibility:public"],\n'))
f.write((_space(4) + ')\n\n'))
f.write('\n\ndef pypi_archives():\n')
f.write((_space(4) + 'existing_rules = native.existing_rules()'))
for dependency in sorted_deps:
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
f.write(ARCHIVE_TMPL.format(archive_name=wheel.archive_name, url=wheel.url, sha256=wheel.sha256sum))
f.write(FOOTER.format(self.bzl_path))
logger.info('Finished writing to output file: %s', self.output_file)
f.close()
shutil.copy(f.name, self.output_file)
os.remove(f.name) | Output a file with the following structure
def pypi_libraries():
py_library(
name="virtualenv",
deps=[
] + ["@pypi_virtualenv//:pkg"],
licenses=["notice"],
visibility=["//visibility:public"],
)
def pypi_archives():
existing_rules = native.existing_rules()
if "pypi_asn1crypto" not in existing_rules:
http_archive(
name="pypi_asn1crypto",
url="https://files.pythonhosted...",
sha256="2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
build_file_content=_BUILD_FILE_CONTENT,
type="zip",
) | src/rules_pygen/rules_generator.py | _gen_output_file | tubular/rules_pygen | 18 | python | def _gen_output_file(self, deps) -> None:
'Output a file with the following structure\n\n def pypi_libraries():\n py_library(\n name="virtualenv",\n deps=[\n ] + ["@pypi_virtualenv//:pkg"],\n licenses=["notice"],\n visibility=["//visibility:public"],\n )\n\n def pypi_archives():\n existing_rules = native.existing_rules()\n if "pypi_asn1crypto" not in existing_rules:\n http_archive(\n name="pypi_asn1crypto",\n url="https://files.pythonhosted...",\n sha256="2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",\n build_file_content=_BUILD_FILE_CONTENT,\n type="zip",\n )\n '
f = tempfile.NamedTemporaryFile(delete=False, mode='w+t')
sorted_deps = list(deps)
sorted_deps.sort(key=operator.attrgetter('name'))
f.write(HEADER)
f.write('\ndef pypi_libraries():\n\n')
for dependency in sorted_deps:
logger.debug('Writing py_library for %s', dependency)
f.write((_space(4) + 'py_library(\n'))
f.write((_space(8) + 'name = "{}",\n'.format(dependency.name)))
f.write((_space(8) + 'deps = [\n'))
for subdependency in dependency.dependencies:
f.write((_space(12) + '"{}",\n'.format(subdependency)))
f.write((_space(8) + ']'))
logger.debug('Found %r dependency wheels', dependency.wheels)
if (len(dependency.wheels) == 0):
raise PyBazelRuleGeneratorException('No wheels for dependency: {}'.format(dependency))
if (len(dependency.wheels) == 1):
f.write(' + ["{}"],\n'.format(dependency.wheels[0].lib_path))
else:
f.write(' + select({\n')
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
if (wheel.platform == 'linux'):
f.write((_space(12) + '"@//tool_bazel:linux": ["{}"],\n'.format(wheel.lib_path)))
elif (wheel.platform == 'macos'):
f.write((_space(12) + '"@//tool_bazel:macos": ["{}"],\n'.format(wheel.lib_path)))
f.write((_space(8) + '}),\n'))
f.write((_space(8) + 'visibility=["//visibility:public"],\n'))
f.write((_space(4) + ')\n\n'))
f.write('\n\ndef pypi_archives():\n')
f.write((_space(4) + 'existing_rules = native.existing_rules()'))
for dependency in sorted_deps:
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
f.write(ARCHIVE_TMPL.format(archive_name=wheel.archive_name, url=wheel.url, sha256=wheel.sha256sum))
f.write(FOOTER.format(self.bzl_path))
logger.info('Finished writing to output file: %s', self.output_file)
f.close()
shutil.copy(f.name, self.output_file)
os.remove(f.name) | def _gen_output_file(self, deps) -> None:
'Output a file with the following structure\n\n def pypi_libraries():\n py_library(\n name="virtualenv",\n deps=[\n ] + ["@pypi_virtualenv//:pkg"],\n licenses=["notice"],\n visibility=["//visibility:public"],\n )\n\n def pypi_archives():\n existing_rules = native.existing_rules()\n if "pypi_asn1crypto" not in existing_rules:\n http_archive(\n name="pypi_asn1crypto",\n url="https://files.pythonhosted...",\n sha256="2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",\n build_file_content=_BUILD_FILE_CONTENT,\n type="zip",\n )\n '
f = tempfile.NamedTemporaryFile(delete=False, mode='w+t')
sorted_deps = list(deps)
sorted_deps.sort(key=operator.attrgetter('name'))
f.write(HEADER)
f.write('\ndef pypi_libraries():\n\n')
for dependency in sorted_deps:
logger.debug('Writing py_library for %s', dependency)
f.write((_space(4) + 'py_library(\n'))
f.write((_space(8) + 'name = "{}",\n'.format(dependency.name)))
f.write((_space(8) + 'deps = [\n'))
for subdependency in dependency.dependencies:
f.write((_space(12) + '"{}",\n'.format(subdependency)))
f.write((_space(8) + ']'))
logger.debug('Found %r dependency wheels', dependency.wheels)
if (len(dependency.wheels) == 0):
raise PyBazelRuleGeneratorException('No wheels for dependency: {}'.format(dependency))
if (len(dependency.wheels) == 1):
f.write(' + ["{}"],\n'.format(dependency.wheels[0].lib_path))
else:
f.write(' + select({\n')
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
if (wheel.platform == 'linux'):
f.write((_space(12) + '"@//tool_bazel:linux": ["{}"],\n'.format(wheel.lib_path)))
elif (wheel.platform == 'macos'):
f.write((_space(12) + '"@//tool_bazel:macos": ["{}"],\n'.format(wheel.lib_path)))
f.write((_space(8) + '}),\n'))
f.write((_space(8) + 'visibility=["//visibility:public"],\n'))
f.write((_space(4) + ')\n\n'))
f.write('\n\ndef pypi_archives():\n')
f.write((_space(4) + 'existing_rules = native.existing_rules()'))
for dependency in sorted_deps:
sorted_wheels = dependency.wheels
sorted_wheels.sort(key=operator.attrgetter('platform'))
for wheel in sorted_wheels:
f.write(ARCHIVE_TMPL.format(archive_name=wheel.archive_name, url=wheel.url, sha256=wheel.sha256sum))
f.write(FOOTER.format(self.bzl_path))
logger.info('Finished writing to output file: %s', self.output_file)
f.close()
shutil.copy(f.name, self.output_file)
os.remove(f.name)<|docstring|>Output a file with the following structure
def pypi_libraries():
py_library(
name="virtualenv",
deps=[
] + ["@pypi_virtualenv//:pkg"],
licenses=["notice"],
visibility=["//visibility:public"],
)
def pypi_archives():
existing_rules = native.existing_rules()
if "pypi_asn1crypto" not in existing_rules:
http_archive(
name="pypi_asn1crypto",
url="https://files.pythonhosted...",
sha256="2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
build_file_content=_BUILD_FILE_CONTENT,
type="zip",
)<|endoftext|> |
9e65c4c47f9f2e377cdc1f540894e9b89158408653b2cbacc6a281659e756eb7 | def __init__(self, request, name):
'Initializes the fake attribute.'
self.request = request
self.name = name | Initializes the fake attribute. | plugins/helpers/pkipplib.py | __init__ | renesugar/mac_apt | 1 | python | def __init__(self, request, name):
self.request = request
self.name = name | def __init__(self, request, name):
self.request = request
self.name = name<|docstring|>Initializes the fake attribute.<|endoftext|> |
cd940c39b203ce39a1158f235495749b0d8b0b6481b1752d754b91275176d5c7 | def __setitem__(self, key, value):
'Appends the value to the real attribute.'
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
attribute[j][1].append(value)
return
attribute.append((key, [value])) | Appends the value to the real attribute. | plugins/helpers/pkipplib.py | __setitem__ | renesugar/mac_apt | 1 | python | def __setitem__(self, key, value):
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
attribute[j][1].append(value)
return
attribute.append((key, [value])) | def __setitem__(self, key, value):
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
attribute[j][1].append(value)
return
attribute.append((key, [value]))<|docstring|>Appends the value to the real attribute.<|endoftext|> |
01a76e021b433b0ff5713f0f0b83bedc0d5b133f9908e01e5b181e1e9dae9eee | def __getitem__(self, key):
"Returns an attribute's value."
answer = []
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
answer.extend(attrvalue)
if answer:
return answer
raise KeyError(key) | Returns an attribute's value. | plugins/helpers/pkipplib.py | __getitem__ | renesugar/mac_apt | 1 | python | def __getitem__(self, key):
answer = []
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
answer.extend(attrvalue)
if answer:
return answer
raise KeyError(key) | def __getitem__(self, key):
answer = []
attributeslist = getattr(self.request, ('_%s_attributes' % self.name))
for i in range(len(attributeslist)):
attribute = attributeslist[i]
for j in range(len(attribute)):
(attrname, attrvalue) = attribute[j]
if (attrname == key):
answer.extend(attrvalue)
if answer:
return answer
raise KeyError(key)<|docstring|>Returns an attribute's value.<|endoftext|> |
0d295d9486fe25b8eeb3128a260add097ac43cf3cd98c1613ada585d3dfff3f0 | def __init__(self, data='', version=IPP_VERSION, operation_id=None, request_id=None, debug=False):
"Initializes an IPP Message object.\n \n Parameters :\n \n data : the complete IPP Message's content.\n debug : a boolean value to output debug info on stderr.\n "
self.debug = debug
self._data = data
self.parsed = False
self.setVersion(version)
self.setOperationId(operation_id)
self.setRequestId(request_id)
self.data = ''
for attrtype in self.attributes_types:
setattr(self, ('_%s_attributes' % attrtype), [[]])
self.tags = ([None] * 256)
self.tags[1] = 'operation-attributes-tag'
self.tags[2] = 'job-attributes-tag'
self.tags[3] = 'end-of-attributes-tag'
self.tags[4] = 'printer-attributes-tag'
self.tags[5] = 'unsupported-attributes-tag'
self.tags[6] = 'subscription-attributes-tag'
self.tags[7] = 'event_notification-attributes-tag'
self.tags[16] = 'unsupported'
self.tags[17] = 'reserved-for-future-default'
self.tags[18] = 'unknown'
self.tags[19] = 'no-value'
self.tags[21] = 'not-settable'
self.tags[22] = 'delete-attribute'
self.tags[23] = 'admin-define'
self.tags[32] = 'generic-integer'
self.tags[33] = 'integer'
self.tags[34] = 'boolean'
self.tags[35] = 'enum'
self.tags[48] = 'octetString-with-an-unspecified-format'
self.tags[49] = 'dateTime'
self.tags[50] = 'resolution'
self.tags[51] = 'rangeOfInteger'
self.tags[52] = 'begCollection'
self.tags[53] = 'textWithLanguage'
self.tags[54] = 'nameWithLanguage'
self.tags[55] = 'endCollection'
self.tags[64] = 'generic-character-string'
self.tags[65] = 'textWithoutLanguage'
self.tags[66] = 'nameWithoutLanguage'
self.tags[68] = 'keyword'
self.tags[69] = 'uri'
self.tags[70] = 'uriScheme'
self.tags[71] = 'charset'
self.tags[72] = 'naturalLanguage'
self.tags[73] = 'mimeMediaType'
self.tags[74] = 'memberAttrName'
self.tagvalues = {}
for i in range(len(self.tags)):
value = self.tags[i]
if (value is not None):
self.tagvalues[value] = i | Initializes an IPP Message object.
Parameters :
data : the complete IPP Message's content.
debug : a boolean value to output debug info on stderr. | plugins/helpers/pkipplib.py | __init__ | renesugar/mac_apt | 1 | python | def __init__(self, data=, version=IPP_VERSION, operation_id=None, request_id=None, debug=False):
"Initializes an IPP Message object.\n \n Parameters :\n \n data : the complete IPP Message's content.\n debug : a boolean value to output debug info on stderr.\n "
self.debug = debug
self._data = data
self.parsed = False
self.setVersion(version)
self.setOperationId(operation_id)
self.setRequestId(request_id)
self.data =
for attrtype in self.attributes_types:
setattr(self, ('_%s_attributes' % attrtype), [[]])
self.tags = ([None] * 256)
self.tags[1] = 'operation-attributes-tag'
self.tags[2] = 'job-attributes-tag'
self.tags[3] = 'end-of-attributes-tag'
self.tags[4] = 'printer-attributes-tag'
self.tags[5] = 'unsupported-attributes-tag'
self.tags[6] = 'subscription-attributes-tag'
self.tags[7] = 'event_notification-attributes-tag'
self.tags[16] = 'unsupported'
self.tags[17] = 'reserved-for-future-default'
self.tags[18] = 'unknown'
self.tags[19] = 'no-value'
self.tags[21] = 'not-settable'
self.tags[22] = 'delete-attribute'
self.tags[23] = 'admin-define'
self.tags[32] = 'generic-integer'
self.tags[33] = 'integer'
self.tags[34] = 'boolean'
self.tags[35] = 'enum'
self.tags[48] = 'octetString-with-an-unspecified-format'
self.tags[49] = 'dateTime'
self.tags[50] = 'resolution'
self.tags[51] = 'rangeOfInteger'
self.tags[52] = 'begCollection'
self.tags[53] = 'textWithLanguage'
self.tags[54] = 'nameWithLanguage'
self.tags[55] = 'endCollection'
self.tags[64] = 'generic-character-string'
self.tags[65] = 'textWithoutLanguage'
self.tags[66] = 'nameWithoutLanguage'
self.tags[68] = 'keyword'
self.tags[69] = 'uri'
self.tags[70] = 'uriScheme'
self.tags[71] = 'charset'
self.tags[72] = 'naturalLanguage'
self.tags[73] = 'mimeMediaType'
self.tags[74] = 'memberAttrName'
self.tagvalues = {}
for i in range(len(self.tags)):
value = self.tags[i]
if (value is not None):
self.tagvalues[value] = i | def __init__(self, data=, version=IPP_VERSION, operation_id=None, request_id=None, debug=False):
"Initializes an IPP Message object.\n \n Parameters :\n \n data : the complete IPP Message's content.\n debug : a boolean value to output debug info on stderr.\n "
self.debug = debug
self._data = data
self.parsed = False
self.setVersion(version)
self.setOperationId(operation_id)
self.setRequestId(request_id)
self.data =
for attrtype in self.attributes_types:
setattr(self, ('_%s_attributes' % attrtype), [[]])
self.tags = ([None] * 256)
self.tags[1] = 'operation-attributes-tag'
self.tags[2] = 'job-attributes-tag'
self.tags[3] = 'end-of-attributes-tag'
self.tags[4] = 'printer-attributes-tag'
self.tags[5] = 'unsupported-attributes-tag'
self.tags[6] = 'subscription-attributes-tag'
self.tags[7] = 'event_notification-attributes-tag'
self.tags[16] = 'unsupported'
self.tags[17] = 'reserved-for-future-default'
self.tags[18] = 'unknown'
self.tags[19] = 'no-value'
self.tags[21] = 'not-settable'
self.tags[22] = 'delete-attribute'
self.tags[23] = 'admin-define'
self.tags[32] = 'generic-integer'
self.tags[33] = 'integer'
self.tags[34] = 'boolean'
self.tags[35] = 'enum'
self.tags[48] = 'octetString-with-an-unspecified-format'
self.tags[49] = 'dateTime'
self.tags[50] = 'resolution'
self.tags[51] = 'rangeOfInteger'
self.tags[52] = 'begCollection'
self.tags[53] = 'textWithLanguage'
self.tags[54] = 'nameWithLanguage'
self.tags[55] = 'endCollection'
self.tags[64] = 'generic-character-string'
self.tags[65] = 'textWithoutLanguage'
self.tags[66] = 'nameWithoutLanguage'
self.tags[68] = 'keyword'
self.tags[69] = 'uri'
self.tags[70] = 'uriScheme'
self.tags[71] = 'charset'
self.tags[72] = 'naturalLanguage'
self.tags[73] = 'mimeMediaType'
self.tags[74] = 'memberAttrName'
self.tagvalues = {}
for i in range(len(self.tags)):
value = self.tags[i]
if (value is not None):
self.tagvalues[value] = i<|docstring|>Initializes an IPP Message object.
Parameters :
data : the complete IPP Message's content.
debug : a boolean value to output debug info on stderr.<|endoftext|> |
a469fec3a558ac28fb825df4a8ed6925588557b265ca6bb4334c9483d62e4dea | def __getattr__(self, name):
'Fakes attribute access.'
if (name in self.attributes_types):
return FakeAttribute(self, name)
else:
raise AttributeError(name) | Fakes attribute access. | plugins/helpers/pkipplib.py | __getattr__ | renesugar/mac_apt | 1 | python | def __getattr__(self, name):
if (name in self.attributes_types):
return FakeAttribute(self, name)
else:
raise AttributeError(name) | def __getattr__(self, name):
if (name in self.attributes_types):
return FakeAttribute(self, name)
else:
raise AttributeError(name)<|docstring|>Fakes attribute access.<|endoftext|> |
7aa935379cf3aed6761f20236df9337bd375d8c81bf522d670b56fc8cf8cb723 | def __str__(self):
'Returns the parsed IPP message in a readable form.'
if (not self.parsed):
return ''
mybuffer = []
mybuffer.append(('IPP version : %s.%s' % self.version))
mybuffer.append(('IPP operation Id : 0x%04x' % self.operation_id))
mybuffer.append(('IPP request Id : 0x%08x' % self.request_id))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(('%s attributes :' % attrtype.title()))
for (name, value) in attribute:
mybuffer.append((' %s : %s' % (name, value)))
if self.data:
mybuffer.append(('IPP datas : %s' % repr(self.data)))
return '\n'.join(mybuffer) | Returns the parsed IPP message in a readable form. | plugins/helpers/pkipplib.py | __str__ | renesugar/mac_apt | 1 | python | def __str__(self):
if (not self.parsed):
return
mybuffer = []
mybuffer.append(('IPP version : %s.%s' % self.version))
mybuffer.append(('IPP operation Id : 0x%04x' % self.operation_id))
mybuffer.append(('IPP request Id : 0x%08x' % self.request_id))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(('%s attributes :' % attrtype.title()))
for (name, value) in attribute:
mybuffer.append((' %s : %s' % (name, value)))
if self.data:
mybuffer.append(('IPP datas : %s' % repr(self.data)))
return '\n'.join(mybuffer) | def __str__(self):
if (not self.parsed):
return
mybuffer = []
mybuffer.append(('IPP version : %s.%s' % self.version))
mybuffer.append(('IPP operation Id : 0x%04x' % self.operation_id))
mybuffer.append(('IPP request Id : 0x%08x' % self.request_id))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(('%s attributes :' % attrtype.title()))
for (name, value) in attribute:
mybuffer.append((' %s : %s' % (name, value)))
if self.data:
mybuffer.append(('IPP datas : %s' % repr(self.data)))
return '\n'.join(mybuffer)<|docstring|>Returns the parsed IPP message in a readable form.<|endoftext|> |
1763f620e77c4c3bcf36353d248b785faab2ba4d41ba1f97fb7f76d68098d67a | def logDebug(self, msg):
'Prints a debug message.'
if self.debug:
sys.stderr.write(('%s\n' % msg))
sys.stderr.flush() | Prints a debug message. | plugins/helpers/pkipplib.py | logDebug | renesugar/mac_apt | 1 | python | def logDebug(self, msg):
if self.debug:
sys.stderr.write(('%s\n' % msg))
sys.stderr.flush() | def logDebug(self, msg):
if self.debug:
sys.stderr.write(('%s\n' % msg))
sys.stderr.flush()<|docstring|>Prints a debug message.<|endoftext|> |
08ebef5ba3217f405ff0db629b6e25f08a0fb1a1c796c5d652ddec53e5537430 | def setVersion(self, version):
"Sets the request's operation id."
if (version is not None):
try:
self.version = [int(p) for p in version.split('.')]
except AttributeError:
if (len(version) == 2):
self.version = version
else:
try:
self.version = [int(p) for p in str(float(version)).split('.')]
except ValueError:
self.version = [int(p) for p in IPP_VERSION.split('.')] | Sets the request's operation id. | plugins/helpers/pkipplib.py | setVersion | renesugar/mac_apt | 1 | python | def setVersion(self, version):
if (version is not None):
try:
self.version = [int(p) for p in version.split('.')]
except AttributeError:
if (len(version) == 2):
self.version = version
else:
try:
self.version = [int(p) for p in str(float(version)).split('.')]
except ValueError:
self.version = [int(p) for p in IPP_VERSION.split('.')] | def setVersion(self, version):
if (version is not None):
try:
self.version = [int(p) for p in version.split('.')]
except AttributeError:
if (len(version) == 2):
self.version = version
else:
try:
self.version = [int(p) for p in str(float(version)).split('.')]
except ValueError:
self.version = [int(p) for p in IPP_VERSION.split('.')]<|docstring|>Sets the request's operation id.<|endoftext|> |
81b5b12739ca7ef7dfbd29bbd3e5aa9c6c5ea38d98e90b53e1f941bc9b04299a | def setOperationId(self, opid):
"Sets the request's operation id."
self.operation_id = opid | Sets the request's operation id. | plugins/helpers/pkipplib.py | setOperationId | renesugar/mac_apt | 1 | python | def setOperationId(self, opid):
self.operation_id = opid | def setOperationId(self, opid):
self.operation_id = opid<|docstring|>Sets the request's operation id.<|endoftext|> |
68124269d0e28e2fab41d24fd6818cdaa2d9b10b5ffe0ca26ce54d8bd0c46c2d | def setRequestId(self, reqid):
"Sets the request's request id."
self.request_id = reqid | Sets the request's request id. | plugins/helpers/pkipplib.py | setRequestId | renesugar/mac_apt | 1 | python | def setRequestId(self, reqid):
self.request_id = reqid | def setRequestId(self, reqid):
self.request_id = reqid<|docstring|>Sets the request's request id.<|endoftext|> |
98915bffc304235735a5de6200db0514bb201367c25c76e6dadad63714110e52 | def dump(self):
'Generates an IPP Message.\n \n Returns the message as a string of text.\n '
mybuffer = []
if (None not in (self.version, self.operation_id)):
mybuffer.append((chr(self.version[0]) + chr(self.version[1])))
mybuffer.append(pack('>H', self.operation_id))
mybuffer.append(pack('>I', (self.request_id or 1)))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(chr(self.tagvalues[('%s-attributes-tag' % attrtype)]))
for (attrname, value) in attribute:
nameprinted = 0
for (vtype, val) in value:
mybuffer.append(chr(self.tagvalues[vtype]))
if (not nameprinted):
mybuffer.append(pack('>H', len(attrname)))
mybuffer.append(attrname)
nameprinted = 1
else:
mybuffer.append(pack('>H', 0))
if (vtype in ('integer', 'enum')):
mybuffer.append(pack('>H', 4))
mybuffer.append(pack('>I', val))
elif (vtype == 'boolean'):
mybuffer.append(pack('>H', 1))
mybuffer.append(chr(val))
else:
mybuffer.append(pack('>H', len(val)))
mybuffer.append(val)
mybuffer.append(chr(self.tagvalues['end-of-attributes-tag']))
mybuffer.append(self.data)
return ''.join(mybuffer) | Generates an IPP Message.
Returns the message as a string of text. | plugins/helpers/pkipplib.py | dump | renesugar/mac_apt | 1 | python | def dump(self):
'Generates an IPP Message.\n \n Returns the message as a string of text.\n '
mybuffer = []
if (None not in (self.version, self.operation_id)):
mybuffer.append((chr(self.version[0]) + chr(self.version[1])))
mybuffer.append(pack('>H', self.operation_id))
mybuffer.append(pack('>I', (self.request_id or 1)))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(chr(self.tagvalues[('%s-attributes-tag' % attrtype)]))
for (attrname, value) in attribute:
nameprinted = 0
for (vtype, val) in value:
mybuffer.append(chr(self.tagvalues[vtype]))
if (not nameprinted):
mybuffer.append(pack('>H', len(attrname)))
mybuffer.append(attrname)
nameprinted = 1
else:
mybuffer.append(pack('>H', 0))
if (vtype in ('integer', 'enum')):
mybuffer.append(pack('>H', 4))
mybuffer.append(pack('>I', val))
elif (vtype == 'boolean'):
mybuffer.append(pack('>H', 1))
mybuffer.append(chr(val))
else:
mybuffer.append(pack('>H', len(val)))
mybuffer.append(val)
mybuffer.append(chr(self.tagvalues['end-of-attributes-tag']))
mybuffer.append(self.data)
return .join(mybuffer) | def dump(self):
'Generates an IPP Message.\n \n Returns the message as a string of text.\n '
mybuffer = []
if (None not in (self.version, self.operation_id)):
mybuffer.append((chr(self.version[0]) + chr(self.version[1])))
mybuffer.append(pack('>H', self.operation_id))
mybuffer.append(pack('>I', (self.request_id or 1)))
for attrtype in self.attributes_types:
for attribute in getattr(self, ('_%s_attributes' % attrtype)):
if attribute:
mybuffer.append(chr(self.tagvalues[('%s-attributes-tag' % attrtype)]))
for (attrname, value) in attribute:
nameprinted = 0
for (vtype, val) in value:
mybuffer.append(chr(self.tagvalues[vtype]))
if (not nameprinted):
mybuffer.append(pack('>H', len(attrname)))
mybuffer.append(attrname)
nameprinted = 1
else:
mybuffer.append(pack('>H', 0))
if (vtype in ('integer', 'enum')):
mybuffer.append(pack('>H', 4))
mybuffer.append(pack('>I', val))
elif (vtype == 'boolean'):
mybuffer.append(pack('>H', 1))
mybuffer.append(chr(val))
else:
mybuffer.append(pack('>H', len(val)))
mybuffer.append(val)
mybuffer.append(chr(self.tagvalues['end-of-attributes-tag']))
mybuffer.append(self.data)
return .join(mybuffer)<|docstring|>Generates an IPP Message.
Returns the message as a string of text.<|endoftext|> |
c4940cdc29554912018ce06a73ae6aeb87cb0919261b5a850ba672dcb714b687 | def parse(self):
'Parses an IPP Request.\n \n NB : Only a subset of RFC2910 is implemented.\n '
self._curname = None
self._curattributes = None
self.setVersion((self._data[0], self._data[1]))
self.setOperationId(unpack('>H', self._data[2:4])[0])
self.setRequestId(unpack('>I', self._data[4:8])[0])
self.position = 8
endofattributes = self.tagvalues['end-of-attributes-tag']
maxdelimiter = self.tagvalues['event_notification-attributes-tag']
nulloffset = (lambda : 0)
try:
tag = self._data[self.position]
while (tag != endofattributes):
self.position += 1
name = self.tags[tag]
if (name is not None):
func = getattr(self, name.replace('-', '_'), nulloffset)
self.position += func()
if (self._data[self.position] > maxdelimiter):
self.position -= 1
continue
oldtag = tag
tag = self._data[self.position]
if (tag == oldtag):
self._curattributes.append([])
except IndexError:
raise IPPError('Unexpected end of IPP message.')
self.data = self._data[(self.position + 1):]
self.parsed = True | Parses an IPP Request.
NB : Only a subset of RFC2910 is implemented. | plugins/helpers/pkipplib.py | parse | renesugar/mac_apt | 1 | python | def parse(self):
'Parses an IPP Request.\n \n NB : Only a subset of RFC2910 is implemented.\n '
self._curname = None
self._curattributes = None
self.setVersion((self._data[0], self._data[1]))
self.setOperationId(unpack('>H', self._data[2:4])[0])
self.setRequestId(unpack('>I', self._data[4:8])[0])
self.position = 8
endofattributes = self.tagvalues['end-of-attributes-tag']
maxdelimiter = self.tagvalues['event_notification-attributes-tag']
nulloffset = (lambda : 0)
try:
tag = self._data[self.position]
while (tag != endofattributes):
self.position += 1
name = self.tags[tag]
if (name is not None):
func = getattr(self, name.replace('-', '_'), nulloffset)
self.position += func()
if (self._data[self.position] > maxdelimiter):
self.position -= 1
continue
oldtag = tag
tag = self._data[self.position]
if (tag == oldtag):
self._curattributes.append([])
except IndexError:
raise IPPError('Unexpected end of IPP message.')
self.data = self._data[(self.position + 1):]
self.parsed = True | def parse(self):
'Parses an IPP Request.\n \n NB : Only a subset of RFC2910 is implemented.\n '
self._curname = None
self._curattributes = None
self.setVersion((self._data[0], self._data[1]))
self.setOperationId(unpack('>H', self._data[2:4])[0])
self.setRequestId(unpack('>I', self._data[4:8])[0])
self.position = 8
endofattributes = self.tagvalues['end-of-attributes-tag']
maxdelimiter = self.tagvalues['event_notification-attributes-tag']
nulloffset = (lambda : 0)
try:
tag = self._data[self.position]
while (tag != endofattributes):
self.position += 1
name = self.tags[tag]
if (name is not None):
func = getattr(self, name.replace('-', '_'), nulloffset)
self.position += func()
if (self._data[self.position] > maxdelimiter):
self.position -= 1
continue
oldtag = tag
tag = self._data[self.position]
if (tag == oldtag):
self._curattributes.append([])
except IndexError:
raise IPPError('Unexpected end of IPP message.')
self.data = self._data[(self.position + 1):]
self.parsed = True<|docstring|>Parses an IPP Request.
NB : Only a subset of RFC2910 is implemented.<|endoftext|> |
4fcf94f305aee9c599af2b49fee4585e0e42e392f8b88eee32df6c7ab952122d | def parseTag(self):
'Extracts information from an IPP tag.'
pos = self.position
tagtype = self.tags[self._data[pos]]
pos += 1
posend = pos2 = (pos + 2)
namelength = unpack('>H', self._data[pos:pos2])[0]
if (not namelength):
name = self._curname
else:
posend += namelength
self._curname = name = self._data[pos2:posend].decode('utf8')
pos2 = (posend + 2)
valuelength = unpack('>H', self._data[posend:pos2])[0]
posend = (pos2 + valuelength)
value = self._data[pos2:posend]
if (tagtype in ('integer', 'enum')):
value = unpack('>I', value)[0]
elif (tagtype == 'boolean'):
value = bool(value)
try:
(oldname, oldval) = self._curattributes[(- 1)][(- 1)]
if (oldname == name):
oldval.append((tagtype, value))
else:
raise IndexError()
except IndexError:
self._curattributes[(- 1)].append((name, [(tagtype, value)]))
self.logDebug(('%s(%s) : %s' % (name, tagtype, value)))
return (posend - self.position) | Extracts information from an IPP tag. | plugins/helpers/pkipplib.py | parseTag | renesugar/mac_apt | 1 | python | def parseTag(self):
pos = self.position
tagtype = self.tags[self._data[pos]]
pos += 1
posend = pos2 = (pos + 2)
namelength = unpack('>H', self._data[pos:pos2])[0]
if (not namelength):
name = self._curname
else:
posend += namelength
self._curname = name = self._data[pos2:posend].decode('utf8')
pos2 = (posend + 2)
valuelength = unpack('>H', self._data[posend:pos2])[0]
posend = (pos2 + valuelength)
value = self._data[pos2:posend]
if (tagtype in ('integer', 'enum')):
value = unpack('>I', value)[0]
elif (tagtype == 'boolean'):
value = bool(value)
try:
(oldname, oldval) = self._curattributes[(- 1)][(- 1)]
if (oldname == name):
oldval.append((tagtype, value))
else:
raise IndexError()
except IndexError:
self._curattributes[(- 1)].append((name, [(tagtype, value)]))
self.logDebug(('%s(%s) : %s' % (name, tagtype, value)))
return (posend - self.position) | def parseTag(self):
pos = self.position
tagtype = self.tags[self._data[pos]]
pos += 1
posend = pos2 = (pos + 2)
namelength = unpack('>H', self._data[pos:pos2])[0]
if (not namelength):
name = self._curname
else:
posend += namelength
self._curname = name = self._data[pos2:posend].decode('utf8')
pos2 = (posend + 2)
valuelength = unpack('>H', self._data[posend:pos2])[0]
posend = (pos2 + valuelength)
value = self._data[pos2:posend]
if (tagtype in ('integer', 'enum')):
value = unpack('>I', value)[0]
elif (tagtype == 'boolean'):
value = bool(value)
try:
(oldname, oldval) = self._curattributes[(- 1)][(- 1)]
if (oldname == name):
oldval.append((tagtype, value))
else:
raise IndexError()
except IndexError:
self._curattributes[(- 1)].append((name, [(tagtype, value)]))
self.logDebug(('%s(%s) : %s' % (name, tagtype, value)))
return (posend - self.position)<|docstring|>Extracts information from an IPP tag.<|endoftext|> |
559bb0dc6e6d63c8c64dacd9d7681b5f64fedd4e268d5fa6e0bbcb30170875de | def operation_attributes_tag(self):
'Indicates that the parser enters into an operation-attributes-tag group.'
self._curattributes = self._operation_attributes
return self.parseTag() | Indicates that the parser enters into an operation-attributes-tag group. | plugins/helpers/pkipplib.py | operation_attributes_tag | renesugar/mac_apt | 1 | python | def operation_attributes_tag(self):
self._curattributes = self._operation_attributes
return self.parseTag() | def operation_attributes_tag(self):
self._curattributes = self._operation_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into an operation-attributes-tag group.<|endoftext|> |
554189674a544cb818525dcbb28ae4e27c7ffe5ce1c83f28eb5365e808ce4e97 | def job_attributes_tag(self):
'Indicates that the parser enters into a job-attributes-tag group.'
self._curattributes = self._job_attributes
return self.parseTag() | Indicates that the parser enters into a job-attributes-tag group. | plugins/helpers/pkipplib.py | job_attributes_tag | renesugar/mac_apt | 1 | python | def job_attributes_tag(self):
self._curattributes = self._job_attributes
return self.parseTag() | def job_attributes_tag(self):
self._curattributes = self._job_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into a job-attributes-tag group.<|endoftext|> |
a9456739a61977692f56793df3e18045e2cbe0fa21958e2d8cf0adca89fb5c10 | def printer_attributes_tag(self):
'Indicates that the parser enters into a printer-attributes-tag group.'
self._curattributes = self._printer_attributes
return self.parseTag() | Indicates that the parser enters into a printer-attributes-tag group. | plugins/helpers/pkipplib.py | printer_attributes_tag | renesugar/mac_apt | 1 | python | def printer_attributes_tag(self):
self._curattributes = self._printer_attributes
return self.parseTag() | def printer_attributes_tag(self):
self._curattributes = self._printer_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into a printer-attributes-tag group.<|endoftext|> |
02fb07e4c212a57f4fc4a14654eea58092de6ffc28f0fbe8a6f6a7f59463fadd | def unsupported_attributes_tag(self):
'Indicates that the parser enters into an unsupported-attributes-tag group.'
self._curattributes = self._unsupported_attributes
return self.parseTag() | Indicates that the parser enters into an unsupported-attributes-tag group. | plugins/helpers/pkipplib.py | unsupported_attributes_tag | renesugar/mac_apt | 1 | python | def unsupported_attributes_tag(self):
self._curattributes = self._unsupported_attributes
return self.parseTag() | def unsupported_attributes_tag(self):
self._curattributes = self._unsupported_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into an unsupported-attributes-tag group.<|endoftext|> |
b981df372d1aa973ff74574c7f446122ccc708ebe1fb19ef6e42455e4cd2849c | def subscription_attributes_tag(self):
'Indicates that the parser enters into a subscription-attributes-tag group.'
self._curattributes = self._subscription_attributes
return self.parseTag() | Indicates that the parser enters into a subscription-attributes-tag group. | plugins/helpers/pkipplib.py | subscription_attributes_tag | renesugar/mac_apt | 1 | python | def subscription_attributes_tag(self):
self._curattributes = self._subscription_attributes
return self.parseTag() | def subscription_attributes_tag(self):
self._curattributes = self._subscription_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into a subscription-attributes-tag group.<|endoftext|> |
144066b63460080a614605b699f0ce3f4118a35d64cbb8f84382a376750d9e97 | def event_notification_attributes_tag(self):
'Indicates that the parser enters into an event-notification-attributes-tag group.'
self._curattributes = self._event_notification_attributes
return self.parseTag() | Indicates that the parser enters into an event-notification-attributes-tag group. | plugins/helpers/pkipplib.py | event_notification_attributes_tag | renesugar/mac_apt | 1 | python | def event_notification_attributes_tag(self):
self._curattributes = self._event_notification_attributes
return self.parseTag() | def event_notification_attributes_tag(self):
self._curattributes = self._event_notification_attributes
return self.parseTag()<|docstring|>Indicates that the parser enters into an event-notification-attributes-tag group.<|endoftext|> |
e58bfffe52a3dc8adedebb412632a50dc6041d5dc1b95ca8fbe44484dfd767da | def __init__(self, url=None, username=None, password=None, charset='utf-8', language='en-us', debug=False):
'Initializes the CUPS instance.'
if (url is not None):
self.url = url.replace('ipp://', 'http://')
if self.url.endswith('/'):
self.url = self.url[:(- 1)]
else:
self.url = self.getDefaultURL()
self.username = username
self.password = password
self.charset = charset
self.language = language
self.debug = debug
self.lastError = None
self.lastErrorMessage = None
self.requestId = None | Initializes the CUPS instance. | plugins/helpers/pkipplib.py | __init__ | renesugar/mac_apt | 1 | python | def __init__(self, url=None, username=None, password=None, charset='utf-8', language='en-us', debug=False):
if (url is not None):
self.url = url.replace('ipp://', 'http://')
if self.url.endswith('/'):
self.url = self.url[:(- 1)]
else:
self.url = self.getDefaultURL()
self.username = username
self.password = password
self.charset = charset
self.language = language
self.debug = debug
self.lastError = None
self.lastErrorMessage = None
self.requestId = None | def __init__(self, url=None, username=None, password=None, charset='utf-8', language='en-us', debug=False):
if (url is not None):
self.url = url.replace('ipp://', 'http://')
if self.url.endswith('/'):
self.url = self.url[:(- 1)]
else:
self.url = self.getDefaultURL()
self.username = username
self.password = password
self.charset = charset
self.language = language
self.debug = debug
self.lastError = None
self.lastErrorMessage = None
self.requestId = None<|docstring|>Initializes the CUPS instance.<|endoftext|> |
b21aeb5663819d12e788b9b3d8d4980d17ab5feccff559ffc7d8de7707a918d8 | def getDefaultURL(self):
'Builds a default URL.'
server = (os.environ.get('CUPS_SERVER') or 'localhost')
port = (os.environ.get('IPP_PORT') or 631)
if server.startswith('/'):
return ('http://localhost:%s' % port)
else:
return ('http://%s:%s' % (server, port)) | Builds a default URL. | plugins/helpers/pkipplib.py | getDefaultURL | renesugar/mac_apt | 1 | python | def getDefaultURL(self):
server = (os.environ.get('CUPS_SERVER') or 'localhost')
port = (os.environ.get('IPP_PORT') or 631)
if server.startswith('/'):
return ('http://localhost:%s' % port)
else:
return ('http://%s:%s' % (server, port)) | def getDefaultURL(self):
server = (os.environ.get('CUPS_SERVER') or 'localhost')
port = (os.environ.get('IPP_PORT') or 631)
if server.startswith('/'):
return ('http://localhost:%s' % port)
else:
return ('http://%s:%s' % (server, port))<|docstring|>Builds a default URL.<|endoftext|> |
d431ed5fad43917f5e10e2fc4bcabfca169225567a8b7aae9a02d6be8375514c | def identifierToURI(self, service, ident):
'Transforms an identifier into a particular URI depending on requested service.'
return ('%s/%s/%s' % (self.url.replace('http://', 'ipp://'), service, ident)) | Transforms an identifier into a particular URI depending on requested service. | plugins/helpers/pkipplib.py | identifierToURI | renesugar/mac_apt | 1 | python | def identifierToURI(self, service, ident):
return ('%s/%s/%s' % (self.url.replace('http://', 'ipp://'), service, ident)) | def identifierToURI(self, service, ident):
return ('%s/%s/%s' % (self.url.replace('http://', 'ipp://'), service, ident))<|docstring|>Transforms an identifier into a particular URI depending on requested service.<|endoftext|> |
43bf782bf422da755cb66f9fb7e0ac8804943d945e6ce8ae4b5e23d3366ff517 | def nextRequestId(self):
'Increments the current request id and returns the new value.'
try:
self.requestId += 1
except TypeError:
self.requestId = 1
return self.requestId | Increments the current request id and returns the new value. | plugins/helpers/pkipplib.py | nextRequestId | renesugar/mac_apt | 1 | python | def nextRequestId(self):
try:
self.requestId += 1
except TypeError:
self.requestId = 1
return self.requestId | def nextRequestId(self):
try:
self.requestId += 1
except TypeError:
self.requestId = 1
return self.requestId<|docstring|>Increments the current request id and returns the new value.<|endoftext|> |
a79356f793e62fd76b60ae4bc0f3a86bfa73ec47c5875c7d969ae10bdad8b1e0 | def newRequest(self, operationid=None):
'Generates a new empty request.'
if (operationid is not None):
req = IPPRequest(operation_id=operationid, request_id=self.nextRequestId(), debug=self.debug)
req.operation['attributes-charset'] = ('charset', self.charset)
req.operation['attributes-natural-language'] = ('naturalLanguage', self.language)
return req | Generates a new empty request. | plugins/helpers/pkipplib.py | newRequest | renesugar/mac_apt | 1 | python | def newRequest(self, operationid=None):
if (operationid is not None):
req = IPPRequest(operation_id=operationid, request_id=self.nextRequestId(), debug=self.debug)
req.operation['attributes-charset'] = ('charset', self.charset)
req.operation['attributes-natural-language'] = ('naturalLanguage', self.language)
return req | def newRequest(self, operationid=None):
if (operationid is not None):
req = IPPRequest(operation_id=operationid, request_id=self.nextRequestId(), debug=self.debug)
req.operation['attributes-charset'] = ('charset', self.charset)
req.operation['attributes-natural-language'] = ('naturalLanguage', self.language)
return req<|docstring|>Generates a new empty request.<|endoftext|> |
88fce4f901460a874fe5379a8ab89ec055b6c081e428525061fed0393560f207 | def doRequest(self, req, url=None):
'Sends a request to the CUPS server.\n returns a new IPPRequest object, containing the parsed answer.\n '
'connexion = urllib2.Request(url=url or self.url, data=req.dump())\n connexion.add_header("Content-Type", "application/ipp")\n if self.username :\n pwmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()\n pwmanager.add_password(None, "%s%s" % (connexion.get_host(), connexion.get_selector()), self.username, self.password or "")\n authhandler = urllib2.HTTPBasicAuthHandler(pwmanager) \n opener = urllib2.build_opener(authhandler)\n urllib2.install_opener(opener)\n self.lastError = None \n self.lastErrorMessage = None\n try : \n response = urllib2.urlopen(connexion)\n except (urllib2.URLError, urllib2.HTTPError, socket.error) as error : \n self.lastError = error\n self.lastErrorMessage = str(error)\n return None\n else : \n datas = response.read()\n ippresponse = IPPRequest(datas)\n ippresponse.parse()\n return ippresponse\n '
raise IPPError('doRequest() not implemented!') | Sends a request to the CUPS server.
returns a new IPPRequest object, containing the parsed answer. | plugins/helpers/pkipplib.py | doRequest | renesugar/mac_apt | 1 | python | def doRequest(self, req, url=None):
'Sends a request to the CUPS server.\n returns a new IPPRequest object, containing the parsed answer.\n '
'connexion = urllib2.Request(url=url or self.url, data=req.dump())\n connexion.add_header("Content-Type", "application/ipp")\n if self.username :\n pwmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()\n pwmanager.add_password(None, "%s%s" % (connexion.get_host(), connexion.get_selector()), self.username, self.password or )\n authhandler = urllib2.HTTPBasicAuthHandler(pwmanager) \n opener = urllib2.build_opener(authhandler)\n urllib2.install_opener(opener)\n self.lastError = None \n self.lastErrorMessage = None\n try : \n response = urllib2.urlopen(connexion)\n except (urllib2.URLError, urllib2.HTTPError, socket.error) as error : \n self.lastError = error\n self.lastErrorMessage = str(error)\n return None\n else : \n datas = response.read()\n ippresponse = IPPRequest(datas)\n ippresponse.parse()\n return ippresponse\n '
raise IPPError('doRequest() not implemented!') | def doRequest(self, req, url=None):
'Sends a request to the CUPS server.\n returns a new IPPRequest object, containing the parsed answer.\n '
'connexion = urllib2.Request(url=url or self.url, data=req.dump())\n connexion.add_header("Content-Type", "application/ipp")\n if self.username :\n pwmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()\n pwmanager.add_password(None, "%s%s" % (connexion.get_host(), connexion.get_selector()), self.username, self.password or )\n authhandler = urllib2.HTTPBasicAuthHandler(pwmanager) \n opener = urllib2.build_opener(authhandler)\n urllib2.install_opener(opener)\n self.lastError = None \n self.lastErrorMessage = None\n try : \n response = urllib2.urlopen(connexion)\n except (urllib2.URLError, urllib2.HTTPError, socket.error) as error : \n self.lastError = error\n self.lastErrorMessage = str(error)\n return None\n else : \n datas = response.read()\n ippresponse = IPPRequest(datas)\n ippresponse.parse()\n return ippresponse\n '
raise IPPError('doRequest() not implemented!')<|docstring|>Sends a request to the CUPS server.
returns a new IPPRequest object, containing the parsed answer.<|endoftext|> |
7258921fb0e54e6674a7fbed409b0c5a059995e9edf4231a58ee78c4723eefa0 | def getPPD(self, queuename):
'Retrieves the PPD for a particular queuename.'
req = self.newRequest(IPP_GET_PRINTER_ATTRIBUTES)
req.operation['printer-uri'] = ('uri', self.identifierToURI('printers', queuename))
for attrib in ('printer-uri-supported', 'printer-type', 'member-uris'):
req.operation['requested-attributes'] = ('nameWithoutLanguage', attrib)
return self.doRequest(req) | Retrieves the PPD for a particular queuename. | plugins/helpers/pkipplib.py | getPPD | renesugar/mac_apt | 1 | python | def getPPD(self, queuename):
req = self.newRequest(IPP_GET_PRINTER_ATTRIBUTES)
req.operation['printer-uri'] = ('uri', self.identifierToURI('printers', queuename))
for attrib in ('printer-uri-supported', 'printer-type', 'member-uris'):
req.operation['requested-attributes'] = ('nameWithoutLanguage', attrib)
return self.doRequest(req) | def getPPD(self, queuename):
req = self.newRequest(IPP_GET_PRINTER_ATTRIBUTES)
req.operation['printer-uri'] = ('uri', self.identifierToURI('printers', queuename))
for attrib in ('printer-uri-supported', 'printer-type', 'member-uris'):
req.operation['requested-attributes'] = ('nameWithoutLanguage', attrib)
return self.doRequest(req)<|docstring|>Retrieves the PPD for a particular queuename.<|endoftext|> |
e820a66fe9409030fb0dae9aa0b9b444cf6fb626256365430c3dde04b15a18c9 | def getDefault(self):
"Retrieves CUPS' default printer."
return self.doRequest(self.newRequest(CUPS_GET_DEFAULT)) | Retrieves CUPS' default printer. | plugins/helpers/pkipplib.py | getDefault | renesugar/mac_apt | 1 | python | def getDefault(self):
return self.doRequest(self.newRequest(CUPS_GET_DEFAULT)) | def getDefault(self):
return self.doRequest(self.newRequest(CUPS_GET_DEFAULT))<|docstring|>Retrieves CUPS' default printer.<|endoftext|> |
2692688f0c0cffd5ccfa62c7f31799071e832188b396464dd324de27e7e03a6e | def getJobAttributes(self, jobid):
"Retrieves a print job's attributes."
req = self.newRequest(IPP_GET_JOB_ATTRIBUTES)
req.operation['job-uri'] = ('uri', self.identifierToURI('jobs', jobid))
return self.doRequest(req) | Retrieves a print job's attributes. | plugins/helpers/pkipplib.py | getJobAttributes | renesugar/mac_apt | 1 | python | def getJobAttributes(self, jobid):
req = self.newRequest(IPP_GET_JOB_ATTRIBUTES)
req.operation['job-uri'] = ('uri', self.identifierToURI('jobs', jobid))
return self.doRequest(req) | def getJobAttributes(self, jobid):
req = self.newRequest(IPP_GET_JOB_ATTRIBUTES)
req.operation['job-uri'] = ('uri', self.identifierToURI('jobs', jobid))
return self.doRequest(req)<|docstring|>Retrieves a print job's attributes.<|endoftext|> |
c03d6cc52e1bdf944b6d94d83cc2ea8ae0e848459dc0dafe685dbd2512bc0436 | def getPrinters(self):
'Returns the list of print queues names.'
req = self.newRequest(CUPS_GET_PRINTERS)
req.operation['requested-attributes'] = ('keyword', 'printer-name')
req.operation['printer-type'] = ('enum', 0)
req.operation['printer-type-mask'] = ('enum', CUPS_PRINTER_CLASS)
return [printer[1] for printer in self.doRequest(req).printer['printer-name']] | Returns the list of print queues names. | plugins/helpers/pkipplib.py | getPrinters | renesugar/mac_apt | 1 | python | def getPrinters(self):
req = self.newRequest(CUPS_GET_PRINTERS)
req.operation['requested-attributes'] = ('keyword', 'printer-name')
req.operation['printer-type'] = ('enum', 0)
req.operation['printer-type-mask'] = ('enum', CUPS_PRINTER_CLASS)
return [printer[1] for printer in self.doRequest(req).printer['printer-name']] | def getPrinters(self):
req = self.newRequest(CUPS_GET_PRINTERS)
req.operation['requested-attributes'] = ('keyword', 'printer-name')
req.operation['printer-type'] = ('enum', 0)
req.operation['printer-type-mask'] = ('enum', CUPS_PRINTER_CLASS)
return [printer[1] for printer in self.doRequest(req).printer['printer-name']]<|docstring|>Returns the list of print queues names.<|endoftext|> |
0b7043f89c062d3dbe60b2cfe505f411831442d16e683dd09490d6ce2e63f62a | def getDevices(self):
'Returns a list of devices as (deviceclass, deviceinfo, devicemakeandmodel, deviceuri) tuples.'
answer = self.doRequest(self.newRequest(CUPS_GET_DEVICES))
return zip([d[1] for d in answer.printer['device-class']], [d[1] for d in answer.printer['device-info']], [d[1] for d in answer.printer['device-make-and-model']], [d[1] for d in answer.printer['device-uri']]) | Returns a list of devices as (deviceclass, deviceinfo, devicemakeandmodel, deviceuri) tuples. | plugins/helpers/pkipplib.py | getDevices | renesugar/mac_apt | 1 | python | def getDevices(self):
answer = self.doRequest(self.newRequest(CUPS_GET_DEVICES))
return zip([d[1] for d in answer.printer['device-class']], [d[1] for d in answer.printer['device-info']], [d[1] for d in answer.printer['device-make-and-model']], [d[1] for d in answer.printer['device-uri']]) | def getDevices(self):
answer = self.doRequest(self.newRequest(CUPS_GET_DEVICES))
return zip([d[1] for d in answer.printer['device-class']], [d[1] for d in answer.printer['device-info']], [d[1] for d in answer.printer['device-make-and-model']], [d[1] for d in answer.printer['device-uri']])<|docstring|>Returns a list of devices as (deviceclass, deviceinfo, devicemakeandmodel, deviceuri) tuples.<|endoftext|> |
cc373fc5c49ca8588bfef16451d20c0b273155127447a6e2c6e8c8921fcc9394 | def getPPDs(self):
'Returns a list of PPDs as (ppdnaturallanguage, ppdmake, ppdmakeandmodel, ppdname) tuples.'
answer = self.doRequest(self.newRequest(CUPS_GET_PPDS))
return zip([d[1] for d in answer.printer['ppd-natural-language']], [d[1] for d in answer.printer['ppd-make']], [d[1] for d in answer.printer['ppd-make-and-model']], [d[1] for d in answer.printer['ppd-name']]) | Returns a list of PPDs as (ppdnaturallanguage, ppdmake, ppdmakeandmodel, ppdname) tuples. | plugins/helpers/pkipplib.py | getPPDs | renesugar/mac_apt | 1 | python | def getPPDs(self):
answer = self.doRequest(self.newRequest(CUPS_GET_PPDS))
return zip([d[1] for d in answer.printer['ppd-natural-language']], [d[1] for d in answer.printer['ppd-make']], [d[1] for d in answer.printer['ppd-make-and-model']], [d[1] for d in answer.printer['ppd-name']]) | def getPPDs(self):
answer = self.doRequest(self.newRequest(CUPS_GET_PPDS))
return zip([d[1] for d in answer.printer['ppd-natural-language']], [d[1] for d in answer.printer['ppd-make']], [d[1] for d in answer.printer['ppd-make-and-model']], [d[1] for d in answer.printer['ppd-name']])<|docstring|>Returns a list of PPDs as (ppdnaturallanguage, ppdmake, ppdmakeandmodel, ppdname) tuples.<|endoftext|> |
263ea5563efb86a56022a94b30abe46b4ea9eb42991ede75dc5280cd747cf04b | def createSubscription(self, uri, events=['all'], userdata=None, recipient=None, pullmethod=None, charset=None, naturallanguage=None, leaseduration=None, timeinterval=None, jobid=None):
'Creates a job, printer or server subscription.\n \n uri : the subscription\'s uri, e.g. ipp://server\n events : a list of events to subscribe to, e.g. ["printer-added", "printer-deleted"]\n recipient : the notifier\'s uri\n pullmethod : the pull method to use\n charset : the charset to use when sending notifications\n naturallanguage : the language to use when sending notifications\n leaseduration : the duration of the lease in seconds\n timeinterval : the interval of time during notifications\n jobid : the optional job id in case of a job subscription\n '
if (jobid is not None):
opid = IPP_CREATE_JOB_SUBSCRIPTION
uritype = 'job-uri'
else:
opid = IPP_CREATE_PRINTER_SUBSCRIPTION
uritype = 'printer-uri'
req = self.newRequest(opid)
req.operation[uritype] = ('uri', uri)
for event in events:
req.subscription['notify-events'] = ('keyword', event)
if (userdata is not None):
req.subscription['notify-user-data'] = ('octetString-with-an-unspecified-format', userdata)
if (recipient is not None):
req.subscription['notify-recipient'] = ('uri', recipient)
if (pullmethod is not None):
req.subscription['notify-pull-method'] = ('keyword', pullmethod)
if (charset is not None):
req.subscription['notify-charset'] = ('charset', charset)
if (naturallanguage is not None):
req.subscription['notify-natural-language'] = ('naturalLanguage', naturallanguage)
if (leaseduration is not None):
req.subscription['notify-lease-duration'] = ('integer', leaseduration)
if (timeinterval is not None):
req.subscription['notify-time-interval'] = ('integer', timeinterval)
if (jobid is not None):
req.subscription['notify-job-id'] = ('integer', jobid)
return self.doRequest(req) | Creates a job, printer or server subscription.
uri : the subscription's uri, e.g. ipp://server
events : a list of events to subscribe to, e.g. ["printer-added", "printer-deleted"]
recipient : the notifier's uri
pullmethod : the pull method to use
charset : the charset to use when sending notifications
naturallanguage : the language to use when sending notifications
leaseduration : the duration of the lease in seconds
timeinterval : the interval of time during notifications
jobid : the optional job id in case of a job subscription | plugins/helpers/pkipplib.py | createSubscription | renesugar/mac_apt | 1 | python | def createSubscription(self, uri, events=['all'], userdata=None, recipient=None, pullmethod=None, charset=None, naturallanguage=None, leaseduration=None, timeinterval=None, jobid=None):
'Creates a job, printer or server subscription.\n \n uri : the subscription\'s uri, e.g. ipp://server\n events : a list of events to subscribe to, e.g. ["printer-added", "printer-deleted"]\n recipient : the notifier\'s uri\n pullmethod : the pull method to use\n charset : the charset to use when sending notifications\n naturallanguage : the language to use when sending notifications\n leaseduration : the duration of the lease in seconds\n timeinterval : the interval of time during notifications\n jobid : the optional job id in case of a job subscription\n '
if (jobid is not None):
opid = IPP_CREATE_JOB_SUBSCRIPTION
uritype = 'job-uri'
else:
opid = IPP_CREATE_PRINTER_SUBSCRIPTION
uritype = 'printer-uri'
req = self.newRequest(opid)
req.operation[uritype] = ('uri', uri)
for event in events:
req.subscription['notify-events'] = ('keyword', event)
if (userdata is not None):
req.subscription['notify-user-data'] = ('octetString-with-an-unspecified-format', userdata)
if (recipient is not None):
req.subscription['notify-recipient'] = ('uri', recipient)
if (pullmethod is not None):
req.subscription['notify-pull-method'] = ('keyword', pullmethod)
if (charset is not None):
req.subscription['notify-charset'] = ('charset', charset)
if (naturallanguage is not None):
req.subscription['notify-natural-language'] = ('naturalLanguage', naturallanguage)
if (leaseduration is not None):
req.subscription['notify-lease-duration'] = ('integer', leaseduration)
if (timeinterval is not None):
req.subscription['notify-time-interval'] = ('integer', timeinterval)
if (jobid is not None):
req.subscription['notify-job-id'] = ('integer', jobid)
return self.doRequest(req) | def createSubscription(self, uri, events=['all'], userdata=None, recipient=None, pullmethod=None, charset=None, naturallanguage=None, leaseduration=None, timeinterval=None, jobid=None):
'Creates a job, printer or server subscription.\n \n uri : the subscription\'s uri, e.g. ipp://server\n events : a list of events to subscribe to, e.g. ["printer-added", "printer-deleted"]\n recipient : the notifier\'s uri\n pullmethod : the pull method to use\n charset : the charset to use when sending notifications\n naturallanguage : the language to use when sending notifications\n leaseduration : the duration of the lease in seconds\n timeinterval : the interval of time during notifications\n jobid : the optional job id in case of a job subscription\n '
if (jobid is not None):
opid = IPP_CREATE_JOB_SUBSCRIPTION
uritype = 'job-uri'
else:
opid = IPP_CREATE_PRINTER_SUBSCRIPTION
uritype = 'printer-uri'
req = self.newRequest(opid)
req.operation[uritype] = ('uri', uri)
for event in events:
req.subscription['notify-events'] = ('keyword', event)
if (userdata is not None):
req.subscription['notify-user-data'] = ('octetString-with-an-unspecified-format', userdata)
if (recipient is not None):
req.subscription['notify-recipient'] = ('uri', recipient)
if (pullmethod is not None):
req.subscription['notify-pull-method'] = ('keyword', pullmethod)
if (charset is not None):
req.subscription['notify-charset'] = ('charset', charset)
if (naturallanguage is not None):
req.subscription['notify-natural-language'] = ('naturalLanguage', naturallanguage)
if (leaseduration is not None):
req.subscription['notify-lease-duration'] = ('integer', leaseduration)
if (timeinterval is not None):
req.subscription['notify-time-interval'] = ('integer', timeinterval)
if (jobid is not None):
req.subscription['notify-job-id'] = ('integer', jobid)
return self.doRequest(req)<|docstring|>Creates a job, printer or server subscription.
uri : the subscription's uri, e.g. ipp://server
events : a list of events to subscribe to, e.g. ["printer-added", "printer-deleted"]
recipient : the notifier's uri
pullmethod : the pull method to use
charset : the charset to use when sending notifications
naturallanguage : the language to use when sending notifications
leaseduration : the duration of the lease in seconds
timeinterval : the interval of time during notifications
jobid : the optional job id in case of a job subscription<|endoftext|> |
074dff02adfdb364c39e77782fff0ca653aca5139b1f30fe2e31b462937c23ca | def cancelSubscription(self, uri, subscriptionid, jobid=None):
"Cancels a subscription.\n \n uri : the subscription's uri.\n subscriptionid : the subscription's id.\n jobid : the optional job's id.\n "
req = self.newRequest(IPP_CANCEL_SUBSCRIPTION)
if (jobid is not None):
uritype = 'job-uri'
else:
uritype = 'printer-uri'
req.operation[uritype] = ('uri', uri)
req.event_notification['notify-subscription-id'] = ('integer', subscriptionid)
return self.doRequest(req) | Cancels a subscription.
uri : the subscription's uri.
subscriptionid : the subscription's id.
jobid : the optional job's id. | plugins/helpers/pkipplib.py | cancelSubscription | renesugar/mac_apt | 1 | python | def cancelSubscription(self, uri, subscriptionid, jobid=None):
"Cancels a subscription.\n \n uri : the subscription's uri.\n subscriptionid : the subscription's id.\n jobid : the optional job's id.\n "
req = self.newRequest(IPP_CANCEL_SUBSCRIPTION)
if (jobid is not None):
uritype = 'job-uri'
else:
uritype = 'printer-uri'
req.operation[uritype] = ('uri', uri)
req.event_notification['notify-subscription-id'] = ('integer', subscriptionid)
return self.doRequest(req) | def cancelSubscription(self, uri, subscriptionid, jobid=None):
"Cancels a subscription.\n \n uri : the subscription's uri.\n subscriptionid : the subscription's id.\n jobid : the optional job's id.\n "
req = self.newRequest(IPP_CANCEL_SUBSCRIPTION)
if (jobid is not None):
uritype = 'job-uri'
else:
uritype = 'printer-uri'
req.operation[uritype] = ('uri', uri)
req.event_notification['notify-subscription-id'] = ('integer', subscriptionid)
return self.doRequest(req)<|docstring|>Cancels a subscription.
uri : the subscription's uri.
subscriptionid : the subscription's id.
jobid : the optional job's id.<|endoftext|> |
4bf97fe1156d2fecb051813ab882029bb74a9da05a858cc740fddc42aca010ff | def compute_metrics(rpeaks_truth: Sequence[Union[(np.ndarray, Sequence[int])]], rpeaks_pred: Sequence[Union[(np.ndarray, Sequence[int])]], fs: Real, thr: float=0.075, verbose: int=0) -> float:
' finished, checked,\n\n metric (scoring) function modified from the official one, with errors fixed\n\n Parameters\n ----------\n rpeaks_truth: sequence,\n sequence of ground truths of rpeaks locations from multiple records\n rpeaks_pred: sequence,\n predictions of ground truths of rpeaks locations for multiple records\n fs: real number,\n sampling frequency of ECG signal\n thr: float, default 0.075,\n threshold for a prediction to be truth positive,\n with units in seconds,\n verbose: int, default 0,\n print verbosity\n\n Returns\n -------\n rec_acc: float,\n accuracy of predictions\n '
assert (len(rpeaks_truth) == len(rpeaks_pred)), f'number of records does not match, truth indicates {len(rpeaks_truth)}, while pred indicates {len(rpeaks_pred)}'
n_records = len(rpeaks_truth)
record_flags = np.ones((len(rpeaks_truth),), dtype=float)
thr_ = (thr * fs)
if (verbose >= 1):
print(f'number of records = {n_records}')
print(f'threshold in number of sample points = {thr_}')
for (idx, (truth_arr, pred_arr)) in enumerate(zip(rpeaks_truth, rpeaks_pred)):
false_negative = 0
false_positive = 0
true_positive = 0
extended_truth_arr = np.concatenate((truth_arr.astype(int), [int((9.5 * fs))]))
for (j, t_ind) in enumerate(extended_truth_arr[:(- 1)]):
next_t_ind = extended_truth_arr[(j + 1)]
loc = np.where((np.abs((pred_arr - t_ind)) <= thr_))[0]
if (j == 0):
err = np.where(((pred_arr >= ((0.5 * fs) + thr_)) & (pred_arr <= (t_ind - thr_))))[0]
else:
err = np.array([], dtype=int)
err = np.append(err, np.where(((pred_arr >= (t_ind + thr_)) & (pred_arr <= (next_t_ind - thr_))))[0])
false_positive += len(err)
if (len(loc) >= 1):
true_positive += 1
false_positive += (len(loc) - 1)
elif (len(loc) == 0):
false_negative += 1
if ((false_negative + false_positive) > 1):
record_flags[idx] = 0
elif ((false_negative == 1) and (false_positive == 0)):
record_flags[idx] = 0.3
elif ((false_negative == 0) and (false_positive == 1)):
record_flags[idx] = 0.7
if (verbose >= 2):
print(f'''for the {idx}-th record,
true positive = {true_positive}
false positive = {false_positive}
false negative = {false_negative}''')
rec_acc = round((np.sum(record_flags) / n_records), 4)
print(f'QRS_acc: {rec_acc}')
print('Scoring complete.')
return rec_acc | finished, checked,
metric (scoring) function modified from the official one, with errors fixed
Parameters
----------
rpeaks_truth: sequence,
sequence of ground truths of rpeaks locations from multiple records
rpeaks_pred: sequence,
predictions of ground truths of rpeaks locations for multiple records
fs: real number,
sampling frequency of ECG signal
thr: float, default 0.075,
threshold for a prediction to be truth positive,
with units in seconds,
verbose: int, default 0,
print verbosity
Returns
-------
rec_acc: float,
accuracy of predictions | torch_ecg/databases/cpsc_databases/cpsc2019.py | compute_metrics | DeepPSP/torch_ecg | 9 | python | def compute_metrics(rpeaks_truth: Sequence[Union[(np.ndarray, Sequence[int])]], rpeaks_pred: Sequence[Union[(np.ndarray, Sequence[int])]], fs: Real, thr: float=0.075, verbose: int=0) -> float:
' finished, checked,\n\n metric (scoring) function modified from the official one, with errors fixed\n\n Parameters\n ----------\n rpeaks_truth: sequence,\n sequence of ground truths of rpeaks locations from multiple records\n rpeaks_pred: sequence,\n predictions of ground truths of rpeaks locations for multiple records\n fs: real number,\n sampling frequency of ECG signal\n thr: float, default 0.075,\n threshold for a prediction to be truth positive,\n with units in seconds,\n verbose: int, default 0,\n print verbosity\n\n Returns\n -------\n rec_acc: float,\n accuracy of predictions\n '
assert (len(rpeaks_truth) == len(rpeaks_pred)), f'number of records does not match, truth indicates {len(rpeaks_truth)}, while pred indicates {len(rpeaks_pred)}'
n_records = len(rpeaks_truth)
record_flags = np.ones((len(rpeaks_truth),), dtype=float)
thr_ = (thr * fs)
if (verbose >= 1):
print(f'number of records = {n_records}')
print(f'threshold in number of sample points = {thr_}')
for (idx, (truth_arr, pred_arr)) in enumerate(zip(rpeaks_truth, rpeaks_pred)):
false_negative = 0
false_positive = 0
true_positive = 0
extended_truth_arr = np.concatenate((truth_arr.astype(int), [int((9.5 * fs))]))
for (j, t_ind) in enumerate(extended_truth_arr[:(- 1)]):
next_t_ind = extended_truth_arr[(j + 1)]
loc = np.where((np.abs((pred_arr - t_ind)) <= thr_))[0]
if (j == 0):
err = np.where(((pred_arr >= ((0.5 * fs) + thr_)) & (pred_arr <= (t_ind - thr_))))[0]
else:
err = np.array([], dtype=int)
err = np.append(err, np.where(((pred_arr >= (t_ind + thr_)) & (pred_arr <= (next_t_ind - thr_))))[0])
false_positive += len(err)
if (len(loc) >= 1):
true_positive += 1
false_positive += (len(loc) - 1)
elif (len(loc) == 0):
false_negative += 1
if ((false_negative + false_positive) > 1):
record_flags[idx] = 0
elif ((false_negative == 1) and (false_positive == 0)):
record_flags[idx] = 0.3
elif ((false_negative == 0) and (false_positive == 1)):
record_flags[idx] = 0.7
if (verbose >= 2):
print(f'for the {idx}-th record,
true positive = {true_positive}
false positive = {false_positive}
false negative = {false_negative}')
rec_acc = round((np.sum(record_flags) / n_records), 4)
print(f'QRS_acc: {rec_acc}')
print('Scoring complete.')
return rec_acc | def compute_metrics(rpeaks_truth: Sequence[Union[(np.ndarray, Sequence[int])]], rpeaks_pred: Sequence[Union[(np.ndarray, Sequence[int])]], fs: Real, thr: float=0.075, verbose: int=0) -> float:
' finished, checked,\n\n metric (scoring) function modified from the official one, with errors fixed\n\n Parameters\n ----------\n rpeaks_truth: sequence,\n sequence of ground truths of rpeaks locations from multiple records\n rpeaks_pred: sequence,\n predictions of ground truths of rpeaks locations for multiple records\n fs: real number,\n sampling frequency of ECG signal\n thr: float, default 0.075,\n threshold for a prediction to be truth positive,\n with units in seconds,\n verbose: int, default 0,\n print verbosity\n\n Returns\n -------\n rec_acc: float,\n accuracy of predictions\n '
assert (len(rpeaks_truth) == len(rpeaks_pred)), f'number of records does not match, truth indicates {len(rpeaks_truth)}, while pred indicates {len(rpeaks_pred)}'
n_records = len(rpeaks_truth)
record_flags = np.ones((len(rpeaks_truth),), dtype=float)
thr_ = (thr * fs)
if (verbose >= 1):
print(f'number of records = {n_records}')
print(f'threshold in number of sample points = {thr_}')
for (idx, (truth_arr, pred_arr)) in enumerate(zip(rpeaks_truth, rpeaks_pred)):
false_negative = 0
false_positive = 0
true_positive = 0
extended_truth_arr = np.concatenate((truth_arr.astype(int), [int((9.5 * fs))]))
for (j, t_ind) in enumerate(extended_truth_arr[:(- 1)]):
next_t_ind = extended_truth_arr[(j + 1)]
loc = np.where((np.abs((pred_arr - t_ind)) <= thr_))[0]
if (j == 0):
err = np.where(((pred_arr >= ((0.5 * fs) + thr_)) & (pred_arr <= (t_ind - thr_))))[0]
else:
err = np.array([], dtype=int)
err = np.append(err, np.where(((pred_arr >= (t_ind + thr_)) & (pred_arr <= (next_t_ind - thr_))))[0])
false_positive += len(err)
if (len(loc) >= 1):
true_positive += 1
false_positive += (len(loc) - 1)
elif (len(loc) == 0):
false_negative += 1
if ((false_negative + false_positive) > 1):
record_flags[idx] = 0
elif ((false_negative == 1) and (false_positive == 0)):
record_flags[idx] = 0.3
elif ((false_negative == 0) and (false_positive == 1)):
record_flags[idx] = 0.7
if (verbose >= 2):
print(f'for the {idx}-th record,
true positive = {true_positive}
false positive = {false_positive}
false negative = {false_negative}')
rec_acc = round((np.sum(record_flags) / n_records), 4)
print(f'QRS_acc: {rec_acc}')
print('Scoring complete.')
return rec_acc<|docstring|>finished, checked,
metric (scoring) function modified from the official one, with errors fixed
Parameters
----------
rpeaks_truth: sequence,
sequence of ground truths of rpeaks locations from multiple records
rpeaks_pred: sequence,
predictions of ground truths of rpeaks locations for multiple records
fs: real number,
sampling frequency of ECG signal
thr: float, default 0.075,
threshold for a prediction to be truth positive,
with units in seconds,
verbose: int, default 0,
print verbosity
Returns
-------
rec_acc: float,
accuracy of predictions<|endoftext|> |
d87b2de548dd0f5be17e19cb17c521833a8bba361659c36c7b6eb937b3a565ef | def __init__(self, db_dir: str, working_dir: Optional[str]=None, verbose: int=2, **kwargs: Any) -> NoReturn:
' finished, to be improved,\n\n Parameters\n ----------\n db_dir: str,\n storage path of the database\n working_dir: str, optional,\n working directory, to store intermediate files and log file\n verbose: int, default 2,\n log verbosity\n kwargs: auxilliary key word arguments\n '
super().__init__(db_name='CPSC2019', db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.fs = 500
self.spacing = (1000 / self.fs)
self.rec_ext = 'mat'
self.ann_ext = 'mat'
self.rec_dir = os.path.join(self.db_dir, 'data')
self.ann_dir = os.path.join(self.db_dir, 'ref')
self.n_records = 2000
self._all_records = [f'data_{i:05d}' for i in range(1, (1 + self.n_records))]
self._all_annotations = [f'R_{i:05d}' for i in range(1, (1 + self.n_records))]
self._ls_rec()
self.data_dir = self.rec_dir
self.ref_dir = self.ann_dir | finished, to be improved,
Parameters
----------
db_dir: str,
storage path of the database
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments | torch_ecg/databases/cpsc_databases/cpsc2019.py | __init__ | DeepPSP/torch_ecg | 9 | python | def __init__(self, db_dir: str, working_dir: Optional[str]=None, verbose: int=2, **kwargs: Any) -> NoReturn:
' finished, to be improved,\n\n Parameters\n ----------\n db_dir: str,\n storage path of the database\n working_dir: str, optional,\n working directory, to store intermediate files and log file\n verbose: int, default 2,\n log verbosity\n kwargs: auxilliary key word arguments\n '
super().__init__(db_name='CPSC2019', db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.fs = 500
self.spacing = (1000 / self.fs)
self.rec_ext = 'mat'
self.ann_ext = 'mat'
self.rec_dir = os.path.join(self.db_dir, 'data')
self.ann_dir = os.path.join(self.db_dir, 'ref')
self.n_records = 2000
self._all_records = [f'data_{i:05d}' for i in range(1, (1 + self.n_records))]
self._all_annotations = [f'R_{i:05d}' for i in range(1, (1 + self.n_records))]
self._ls_rec()
self.data_dir = self.rec_dir
self.ref_dir = self.ann_dir | def __init__(self, db_dir: str, working_dir: Optional[str]=None, verbose: int=2, **kwargs: Any) -> NoReturn:
' finished, to be improved,\n\n Parameters\n ----------\n db_dir: str,\n storage path of the database\n working_dir: str, optional,\n working directory, to store intermediate files and log file\n verbose: int, default 2,\n log verbosity\n kwargs: auxilliary key word arguments\n '
super().__init__(db_name='CPSC2019', db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.fs = 500
self.spacing = (1000 / self.fs)
self.rec_ext = 'mat'
self.ann_ext = 'mat'
self.rec_dir = os.path.join(self.db_dir, 'data')
self.ann_dir = os.path.join(self.db_dir, 'ref')
self.n_records = 2000
self._all_records = [f'data_{i:05d}' for i in range(1, (1 + self.n_records))]
self._all_annotations = [f'R_{i:05d}' for i in range(1, (1 + self.n_records))]
self._ls_rec()
self.data_dir = self.rec_dir
self.ref_dir = self.ann_dir<|docstring|>finished, to be improved,
Parameters
----------
db_dir: str,
storage path of the database
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments<|endoftext|> |
f44cdc6728cc4a6b13b00ae8f24451ac95c21e63bed4a0c9a683c33c82bde5c5 | def _ls_rec(self) -> NoReturn:
' finished, checked,\n '
records_fn = os.path.join(self.db_dir, 'records.json')
if os.path.isfile(records_fn):
with open(records_fn, 'r') as f:
records_json = json.load(f)
self._all_records = records_json['rec']
self._all_annotations = records_json['ann']
return
print(f'Please allow some time for the reader to confirm the existence of corresponding data files and annotation files...')
self._all_records = [rec for rec in self._all_records if os.path.isfile(os.path.join(self.rec_dir, f'{rec}.{self.rec_ext}'))]
self._all_annotations = [ann for ann in self._all_annotations if os.path.isfile(os.path.join(self.ann_dir, f'{ann}.{self.ann_ext}'))]
common = (set([rec.split('_')[1] for rec in self._all_records]) & set([ann.split('_')[1] for ann in self._all_annotations]))
common = sorted(list(common))
self._all_records = [f'data_{item}' for item in common]
self._all_annotations = [f'R_{item}' for item in common]
with open(records_fn, 'w') as f:
records_json = {'rec': self._all_records, 'ann': self._all_annotations}
json.dump(records_json, f, ensure_ascii=False) | finished, checked, | torch_ecg/databases/cpsc_databases/cpsc2019.py | _ls_rec | DeepPSP/torch_ecg | 9 | python | def _ls_rec(self) -> NoReturn:
' \n '
records_fn = os.path.join(self.db_dir, 'records.json')
if os.path.isfile(records_fn):
with open(records_fn, 'r') as f:
records_json = json.load(f)
self._all_records = records_json['rec']
self._all_annotations = records_json['ann']
return
print(f'Please allow some time for the reader to confirm the existence of corresponding data files and annotation files...')
self._all_records = [rec for rec in self._all_records if os.path.isfile(os.path.join(self.rec_dir, f'{rec}.{self.rec_ext}'))]
self._all_annotations = [ann for ann in self._all_annotations if os.path.isfile(os.path.join(self.ann_dir, f'{ann}.{self.ann_ext}'))]
common = (set([rec.split('_')[1] for rec in self._all_records]) & set([ann.split('_')[1] for ann in self._all_annotations]))
common = sorted(list(common))
self._all_records = [f'data_{item}' for item in common]
self._all_annotations = [f'R_{item}' for item in common]
with open(records_fn, 'w') as f:
records_json = {'rec': self._all_records, 'ann': self._all_annotations}
json.dump(records_json, f, ensure_ascii=False) | def _ls_rec(self) -> NoReturn:
' \n '
records_fn = os.path.join(self.db_dir, 'records.json')
if os.path.isfile(records_fn):
with open(records_fn, 'r') as f:
records_json = json.load(f)
self._all_records = records_json['rec']
self._all_annotations = records_json['ann']
return
print(f'Please allow some time for the reader to confirm the existence of corresponding data files and annotation files...')
self._all_records = [rec for rec in self._all_records if os.path.isfile(os.path.join(self.rec_dir, f'{rec}.{self.rec_ext}'))]
self._all_annotations = [ann for ann in self._all_annotations if os.path.isfile(os.path.join(self.ann_dir, f'{ann}.{self.ann_ext}'))]
common = (set([rec.split('_')[1] for rec in self._all_records]) & set([ann.split('_')[1] for ann in self._all_annotations]))
common = sorted(list(common))
self._all_records = [f'data_{item}' for item in common]
self._all_annotations = [f'R_{item}' for item in common]
with open(records_fn, 'w') as f:
records_json = {'rec': self._all_records, 'ann': self._all_annotations}
json.dump(records_json, f, ensure_ascii=False)<|docstring|>finished, checked,<|endoftext|> |
295644d344d1ef74d8a34ebfe9e47ae99bf5a554b0233609bc1211fd5e179a7f | def get_subject_id(self, rec_no: int) -> int:
' not finished,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n\n Returns\n -------\n pid: int,\n the `subject_id` corr. to `rec_no`\n '
pid = 0
raise NotImplementedError | not finished,
Parameters
----------
rec_no: int,
number of the record, NOTE that rec_no starts from 1
Returns
-------
pid: int,
the `subject_id` corr. to `rec_no` | torch_ecg/databases/cpsc_databases/cpsc2019.py | get_subject_id | DeepPSP/torch_ecg | 9 | python | def get_subject_id(self, rec_no: int) -> int:
' not finished,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n\n Returns\n -------\n pid: int,\n the `subject_id` corr. to `rec_no`\n '
pid = 0
raise NotImplementedError | def get_subject_id(self, rec_no: int) -> int:
' not finished,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n\n Returns\n -------\n pid: int,\n the `subject_id` corr. to `rec_no`\n '
pid = 0
raise NotImplementedError<|docstring|>not finished,
Parameters
----------
rec_no: int,
number of the record, NOTE that rec_no starts from 1
Returns
-------
pid: int,
the `subject_id` corr. to `rec_no`<|endoftext|> |
a0527c851718ef3849c4bcf8c1b226bac4743ec8f89dfad62aaacabc9d020701 | def load_data(self, rec: Union[(int, str)], units: str='mV', keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n data: ndarray,\n the ecg data\n '
fp = os.path.join(self.data_dir, f'{self._get_rec_name(rec)}.{self.rec_ext}')
data = loadmat(fp)['ecg']
if (units.lower() in ['uv', 'μv']):
data = (1000 * data).astype(int)
if (not keep_dim):
data = data.flatten()
return data | finished, checked,
Parameters
----------
rec_no: int,
number of the record, NOTE that rec_no starts from 1
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
Returns
-------
data: ndarray,
the ecg data | torch_ecg/databases/cpsc_databases/cpsc2019.py | load_data | DeepPSP/torch_ecg | 9 | python | def load_data(self, rec: Union[(int, str)], units: str='mV', keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n data: ndarray,\n the ecg data\n '
fp = os.path.join(self.data_dir, f'{self._get_rec_name(rec)}.{self.rec_ext}')
data = loadmat(fp)['ecg']
if (units.lower() in ['uv', 'μv']):
data = (1000 * data).astype(int)
if (not keep_dim):
data = data.flatten()
return data | def load_data(self, rec: Union[(int, str)], units: str='mV', keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec_no: int,\n number of the record, NOTE that rec_no starts from 1\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n data: ndarray,\n the ecg data\n '
fp = os.path.join(self.data_dir, f'{self._get_rec_name(rec)}.{self.rec_ext}')
data = loadmat(fp)['ecg']
if (units.lower() in ['uv', 'μv']):
data = (1000 * data).astype(int)
if (not keep_dim):
data = data.flatten()
return data<|docstring|>finished, checked,
Parameters
----------
rec_no: int,
number of the record, NOTE that rec_no starts from 1
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
Returns
-------
data: ndarray,
the ecg data<|endoftext|> |
0e31cf3528c769ba52efa8d7819ae55f4c1608ca6b38771c170319510789c196 | def load_ann(self, rec: Union[(int, str)], keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n ann: ndarray,\n array of indices of R peaks\n '
fp = os.path.join(self.ann_dir, f'{self._get_ann_name(rec)}.{self.ann_ext}')
ann = loadmat(fp)['R_peak'].astype(int)
if (not keep_dim):
ann = ann.flatten()
return ann | finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
Returns
-------
ann: ndarray,
array of indices of R peaks | torch_ecg/databases/cpsc_databases/cpsc2019.py | load_ann | DeepPSP/torch_ecg | 9 | python | def load_ann(self, rec: Union[(int, str)], keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n ann: ndarray,\n array of indices of R peaks\n '
fp = os.path.join(self.ann_dir, f'{self._get_ann_name(rec)}.{self.ann_ext}')
ann = loadmat(fp)['R_peak'].astype(int)
if (not keep_dim):
ann = ann.flatten()
return ann | def load_ann(self, rec: Union[(int, str)], keep_dim: bool=True) -> np.ndarray:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n keep_dim: bool, default True,\n whether or not to flatten the data of shape (n,1)\n \n Returns\n -------\n ann: ndarray,\n array of indices of R peaks\n '
fp = os.path.join(self.ann_dir, f'{self._get_ann_name(rec)}.{self.ann_ext}')
ann = loadmat(fp)['R_peak'].astype(int)
if (not keep_dim):
ann = ann.flatten()
return ann<|docstring|>finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
Returns
-------
ann: ndarray,
array of indices of R peaks<|endoftext|> |
f14c2335e8961ae93716cb097828f75085cbe107d1ab0f7fa573dc29995d2ef6 | def load_rpeaks(self, rec: Union[(int, str)], keep_dim: bool=True) -> Dict[(str, np.ndarray)]:
'\n alias of `self.load_ann`\n '
return self.load_ann(rec=rec, keep_dim=keep_dim) | alias of `self.load_ann` | torch_ecg/databases/cpsc_databases/cpsc2019.py | load_rpeaks | DeepPSP/torch_ecg | 9 | python | def load_rpeaks(self, rec: Union[(int, str)], keep_dim: bool=True) -> Dict[(str, np.ndarray)]:
'\n \n '
return self.load_ann(rec=rec, keep_dim=keep_dim) | def load_rpeaks(self, rec: Union[(int, str)], keep_dim: bool=True) -> Dict[(str, np.ndarray)]:
'\n \n '
return self.load_ann(rec=rec, keep_dim=keep_dim)<|docstring|>alias of `self.load_ann`<|endoftext|> |
b054c7180f23617758b1fd4be06905b8df9fa45308c810bcbbc66d2981a74d94 | def _get_rec_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n rec_name: str,\n filename of the record\n '
if isinstance(rec, int):
assert (rec in range(1, (self.n_records + 1))), f'rec should be in range(1,{(self.n_records + 1)})'
rec_name = self.all_records[(rec - 1)]
elif isinstance(rec, str):
assert (rec in self.all_records), f'rec {rec} not found'
rec_name = rec
return rec_name | finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns
-------
rec_name: str,
filename of the record | torch_ecg/databases/cpsc_databases/cpsc2019.py | _get_rec_name | DeepPSP/torch_ecg | 9 | python | def _get_rec_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n rec_name: str,\n filename of the record\n '
if isinstance(rec, int):
assert (rec in range(1, (self.n_records + 1))), f'rec should be in range(1,{(self.n_records + 1)})'
rec_name = self.all_records[(rec - 1)]
elif isinstance(rec, str):
assert (rec in self.all_records), f'rec {rec} not found'
rec_name = rec
return rec_name | def _get_rec_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n rec_name: str,\n filename of the record\n '
if isinstance(rec, int):
assert (rec in range(1, (self.n_records + 1))), f'rec should be in range(1,{(self.n_records + 1)})'
rec_name = self.all_records[(rec - 1)]
elif isinstance(rec, str):
assert (rec in self.all_records), f'rec {rec} not found'
rec_name = rec
return rec_name<|docstring|>finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns
-------
rec_name: str,
filename of the record<|endoftext|> |
227232da170484bd67b19090f69a90aa2dc770506bd1baed1e9df3f1035600a8 | def _get_ann_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n ann_name: str,\n filename of annotations of the record `rec`\n '
rec_name = self._get_rec_name(rec)
ann_name = rec_name.replace('data', 'R')
return ann_name | finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns
-------
ann_name: str,
filename of annotations of the record `rec` | torch_ecg/databases/cpsc_databases/cpsc2019.py | _get_ann_name | DeepPSP/torch_ecg | 9 | python | def _get_ann_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n ann_name: str,\n filename of annotations of the record `rec`\n '
rec_name = self._get_rec_name(rec)
ann_name = rec_name.replace('data', 'R')
return ann_name | def _get_ann_name(self, rec: Union[(int, str)]) -> str:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n\n Returns\n -------\n ann_name: str,\n filename of annotations of the record `rec`\n '
rec_name = self._get_rec_name(rec)
ann_name = rec_name.replace('data', 'R')
return ann_name<|docstring|>finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns
-------
ann_name: str,
filename of annotations of the record `rec`<|endoftext|> |
b0599455cbd3315b685a5ec97ea2f48bed70687c116c9eb1333d44ba75b8633f | def plot(self, rec: Union[(int, str)], data: Optional[np.ndarray]=None, ann: Optional[np.ndarray]=None, ticks_granularity: int=0) -> NoReturn:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n data: ndarray, optional,\n ecg signal to plot,\n if given, data of `rec` will not be used,\n this is useful when plotting filtered data\n ann: ndarray, optional,\n annotations (rpeak indices) for `data`,\n ignored if `data` is None\n ticks_granularity: int, default 0,\n the granularity to plot axis ticks, the higher the more,\n 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks)\n '
if ('plt' not in dir()):
import matplotlib.pyplot as plt
if (data is None):
_data = self.load_data(rec, units='μV', keep_dim=False)
else:
units = self._auto_infer_units(data)
if (units == 'mV'):
_data = (data * 1000)
elif (units == 'μV'):
_data = data.copy()
duration = (len(_data) / self.fs)
secs = np.linspace(0, duration, len(_data))
if ((ann is None) or (data is None)):
rpeak_secs = (self.load_rpeaks(rec, keep_dim=False) / self.fs)
else:
rpeak_secs = (np.array(ann) / self.fs)
fig_sz_w = int((DEFAULT_FIG_SIZE_PER_SEC * duration))
y_range = np.max(np.abs(_data))
fig_sz_h = ((6 * y_range) / 1500)
(fig, ax) = plt.subplots(figsize=(fig_sz_w, fig_sz_h))
ax.plot(secs, _data, color='black', linewidth='2.0')
ax.axhline(y=0, linestyle='-', linewidth='1.0', color='red')
if (ticks_granularity >= 1):
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.yaxis.set_major_locator(plt.MultipleLocator(500))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
if (ticks_granularity >= 2):
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.04))
ax.yaxis.set_minor_locator(plt.MultipleLocator(100))
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
for r in rpeak_secs:
ax.axvspan((r - 0.01), (r + 0.01), color='green', alpha=0.9)
ax.axvspan((r - 0.075), (r + 0.075), color='green', alpha=0.3)
ax.set_xlim(secs[0], secs[(- 1)])
ax.set_ylim((- y_range), y_range)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Voltage [μV]')
if kwargs.get('save_path', None):
plt.savefig(kwargs['save_path'], dpi=200, bbox_inches='tight')
else:
plt.show() | finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
data: ndarray, optional,
ecg signal to plot,
if given, data of `rec` will not be used,
this is useful when plotting filtered data
ann: ndarray, optional,
annotations (rpeak indices) for `data`,
ignored if `data` is None
ticks_granularity: int, default 0,
the granularity to plot axis ticks, the higher the more,
0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks) | torch_ecg/databases/cpsc_databases/cpsc2019.py | plot | DeepPSP/torch_ecg | 9 | python | def plot(self, rec: Union[(int, str)], data: Optional[np.ndarray]=None, ann: Optional[np.ndarray]=None, ticks_granularity: int=0) -> NoReturn:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n data: ndarray, optional,\n ecg signal to plot,\n if given, data of `rec` will not be used,\n this is useful when plotting filtered data\n ann: ndarray, optional,\n annotations (rpeak indices) for `data`,\n ignored if `data` is None\n ticks_granularity: int, default 0,\n the granularity to plot axis ticks, the higher the more,\n 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks)\n '
if ('plt' not in dir()):
import matplotlib.pyplot as plt
if (data is None):
_data = self.load_data(rec, units='μV', keep_dim=False)
else:
units = self._auto_infer_units(data)
if (units == 'mV'):
_data = (data * 1000)
elif (units == 'μV'):
_data = data.copy()
duration = (len(_data) / self.fs)
secs = np.linspace(0, duration, len(_data))
if ((ann is None) or (data is None)):
rpeak_secs = (self.load_rpeaks(rec, keep_dim=False) / self.fs)
else:
rpeak_secs = (np.array(ann) / self.fs)
fig_sz_w = int((DEFAULT_FIG_SIZE_PER_SEC * duration))
y_range = np.max(np.abs(_data))
fig_sz_h = ((6 * y_range) / 1500)
(fig, ax) = plt.subplots(figsize=(fig_sz_w, fig_sz_h))
ax.plot(secs, _data, color='black', linewidth='2.0')
ax.axhline(y=0, linestyle='-', linewidth='1.0', color='red')
if (ticks_granularity >= 1):
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.yaxis.set_major_locator(plt.MultipleLocator(500))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
if (ticks_granularity >= 2):
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.04))
ax.yaxis.set_minor_locator(plt.MultipleLocator(100))
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
for r in rpeak_secs:
ax.axvspan((r - 0.01), (r + 0.01), color='green', alpha=0.9)
ax.axvspan((r - 0.075), (r + 0.075), color='green', alpha=0.3)
ax.set_xlim(secs[0], secs[(- 1)])
ax.set_ylim((- y_range), y_range)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Voltage [μV]')
if kwargs.get('save_path', None):
plt.savefig(kwargs['save_path'], dpi=200, bbox_inches='tight')
else:
plt.show() | def plot(self, rec: Union[(int, str)], data: Optional[np.ndarray]=None, ann: Optional[np.ndarray]=None, ticks_granularity: int=0) -> NoReturn:
' finished, checked,\n\n Parameters\n ----------\n rec: int or str,\n number of the record, NOTE that rec_no starts from 1,\n or the record name\n data: ndarray, optional,\n ecg signal to plot,\n if given, data of `rec` will not be used,\n this is useful when plotting filtered data\n ann: ndarray, optional,\n annotations (rpeak indices) for `data`,\n ignored if `data` is None\n ticks_granularity: int, default 0,\n the granularity to plot axis ticks, the higher the more,\n 0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks)\n '
if ('plt' not in dir()):
import matplotlib.pyplot as plt
if (data is None):
_data = self.load_data(rec, units='μV', keep_dim=False)
else:
units = self._auto_infer_units(data)
if (units == 'mV'):
_data = (data * 1000)
elif (units == 'μV'):
_data = data.copy()
duration = (len(_data) / self.fs)
secs = np.linspace(0, duration, len(_data))
if ((ann is None) or (data is None)):
rpeak_secs = (self.load_rpeaks(rec, keep_dim=False) / self.fs)
else:
rpeak_secs = (np.array(ann) / self.fs)
fig_sz_w = int((DEFAULT_FIG_SIZE_PER_SEC * duration))
y_range = np.max(np.abs(_data))
fig_sz_h = ((6 * y_range) / 1500)
(fig, ax) = plt.subplots(figsize=(fig_sz_w, fig_sz_h))
ax.plot(secs, _data, color='black', linewidth='2.0')
ax.axhline(y=0, linestyle='-', linewidth='1.0', color='red')
if (ticks_granularity >= 1):
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.yaxis.set_major_locator(plt.MultipleLocator(500))
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
if (ticks_granularity >= 2):
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.04))
ax.yaxis.set_minor_locator(plt.MultipleLocator(100))
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
for r in rpeak_secs:
ax.axvspan((r - 0.01), (r + 0.01), color='green', alpha=0.9)
ax.axvspan((r - 0.075), (r + 0.075), color='green', alpha=0.3)
ax.set_xlim(secs[0], secs[(- 1)])
ax.set_ylim((- y_range), y_range)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Voltage [μV]')
if kwargs.get('save_path', None):
plt.savefig(kwargs['save_path'], dpi=200, bbox_inches='tight')
else:
plt.show()<|docstring|>finished, checked,
Parameters
----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
data: ndarray, optional,
ecg signal to plot,
if given, data of `rec` will not be used,
this is useful when plotting filtered data
ann: ndarray, optional,
annotations (rpeak indices) for `data`,
ignored if `data` is None
ticks_granularity: int, default 0,
the granularity to plot axis ticks, the higher the more,
0 (no ticks) --> 1 (major ticks) --> 2 (major + minor ticks)<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.