_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q400
|
PoolManager.shutdown
|
train
|
def shutdown(cls):
"""Close all connections on in all pools"""
for pid in list(cls._pools.keys()):
|
python
|
{
"resource": ""
}
|
q401
|
PoolManager.size
|
train
|
def size(cls, pid):
"""Return the number of connections in the pool
:param str pid: The pool id
:rtype int
"""
with cls._lock:
|
python
|
{
"resource": ""
}
|
q402
|
PoolManager.report
|
train
|
def report(cls):
"""Return the state of the all of the registered pools.
:rtype: dict
"""
return {
'timestamp': datetime.datetime.utcnow().isoformat(),
|
python
|
{
"resource": ""
}
|
q403
|
PoolManager._maybe_remove_pool
|
train
|
def _maybe_remove_pool(cls, pid):
"""If the pool has no open connections, remove it
|
python
|
{
"resource": ""
}
|
q404
|
Session.close
|
train
|
def close(self):
"""Explicitly close the connection and remove it from the connection
pool if pooling is enabled. If the connection is already closed
:raises: psycopg2.InterfaceError
"""
if not self._conn:
raise psycopg2.InterfaceError('Connection not open')
LOGGER.info('Closing connection %r in %s', self._conn, self.pid)
|
python
|
{
"resource": ""
}
|
q405
|
Session.pid
|
train
|
def pid(self):
"""Return the pool ID used for connection pooling.
:rtype: str
"""
return hashlib.md5(':'.join([self.__class__.__name__,
|
python
|
{
"resource": ""
}
|
q406
|
Session.set_encoding
|
train
|
def set_encoding(self, value=DEFAULT_ENCODING):
"""Set the client encoding for the session if the value specified
is different than the current client encoding.
:param str value: The
|
python
|
{
"resource": ""
}
|
q407
|
Session._cleanup
|
train
|
def _cleanup(self):
"""Remove the connection from the stack, closing out the cursor"""
if self._cursor:
LOGGER.debug('Closing the cursor on %s', self.pid)
self._cursor.close()
|
python
|
{
"resource": ""
}
|
q408
|
Session._get_cursor
|
train
|
def _get_cursor(self, connection, name=None):
"""Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor
|
python
|
{
"resource": ""
}
|
q409
|
Session._register_unicode
|
train
|
def _register_unicode(connection):
"""Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things
"""
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE,
|
python
|
{
"resource": ""
}
|
q410
|
Session._status
|
train
|
def _status(self):
"""Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
|
python
|
{
"resource": ""
}
|
q411
|
Results.items
|
train
|
def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
|
python
|
{
"resource": ""
}
|
q412
|
get_current_user
|
train
|
def get_current_user():
"""Return the current username for the logged in user
:rtype: str
"""
if pwd is None:
|
python
|
{
"resource": ""
}
|
q413
|
uri
|
train
|
def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str:
|
python
|
{
"resource": ""
}
|
q414
|
uri_to_kwargs
|
train
|
def uri_to_kwargs(uri):
"""Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict
"""
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values
|
python
|
{
"resource": ""
}
|
q415
|
TornadoSession._ensure_pool_exists
|
train
|
def _ensure_pool_exists(self):
"""Create the pool in the pool manager if it does not exist."""
if self.pid not
|
python
|
{
"resource": ""
}
|
q416
|
TornadoSession._create_connection
|
train
|
def _create_connection(self, future):
"""Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result
"""
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
|
python
|
{
"resource": ""
}
|
q417
|
TornadoSession._exec_cleanup
|
train
|
def _exec_cleanup(self, cursor, fd):
"""Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor
"""
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
|
python
|
{
"resource": ""
}
|
q418
|
TornadoSession._cleanup_fd
|
train
|
def _cleanup_fd(self, fd, close=False):
"""Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup
"""
self._ioloop.remove_handler(fd)
|
python
|
{
"resource": ""
}
|
q419
|
TornadoSession._on_io_events
|
train
|
def _on_io_events(self, fd=None, _events=None):
"""Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
"""
if
|
python
|
{
"resource": ""
}
|
q420
|
TornadoSession._poll_connection
|
train
|
def _poll_connection(self, fd):
"""Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection
"""
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
|
python
|
{
"resource": ""
}
|
q421
|
main
|
train
|
def main():
"""Setup.py entry point."""
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
|
python
|
{
"resource": ""
}
|
q422
|
SetupUpdate._do_readme_update
|
train
|
def _do_readme_update(self):
"""Patch README.rst to reflect the data files used in release."""
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
|
python
|
{
"resource": ""
}
|
q423
|
SetupUpdate._do_east_asian
|
train
|
def _do_east_asian(self):
"""Fetch and update east-asian tables."""
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
|
python
|
{
"resource": ""
}
|
q424
|
SetupUpdate._do_zero_width
|
train
|
def _do_zero_width(self):
"""Fetch and update zero width tables."""
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
|
python
|
{
"resource": ""
}
|
q425
|
SetupUpdate._make_table
|
train
|
def _make_table(values):
"""Return a tuple of lookup tables for given values."""
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
|
python
|
{
"resource": ""
}
|
q426
|
SetupUpdate._do_retrieve
|
train
|
def _do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
|
python
|
{
"resource": ""
}
|
q427
|
SetupUpdate._parse_east_asian
|
train
|
def _parse_east_asian(fname, properties=(u'W', u'F',)):
"""Parse unicode east-asian width tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
|
python
|
{
"resource": ""
}
|
q428
|
SetupUpdate._parse_category
|
train
|
def _parse_category(fname, categories):
"""Parse unicode category tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
|
python
|
{
"resource": ""
}
|
q429
|
SetupUpdate._do_write
|
train
|
def _do_write(fname, variable, version, date, table):
"""Write combining tables to filesystem as python code."""
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'"""{variable_proper} table. Created by setup.py."""\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
|
python
|
{
"resource": ""
}
|
q430
|
report_ucs_msg
|
train
|
def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
"""
Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
|
python
|
{
"resource": ""
}
|
q431
|
validate_args
|
train
|
def validate_args(opts):
"""Validate and return options provided by docopt parsing."""
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
|
python
|
{
"resource": ""
}
|
q432
|
Screen.hint_width
|
train
|
def hint_width(self):
"""Width of a column segment."""
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
|
python
|
{
"resource": ""
}
|
q433
|
Screen.head_item
|
train
|
def head_item(self):
"""Text of a single column heading."""
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
|
python
|
{
"resource": ""
}
|
q434
|
Screen.msg_intro
|
train
|
def msg_intro(self):
"""Introductory message disabled above heading."""
delim = self.style.attr_minor(self.style.delimiter)
|
python
|
{
"resource": ""
}
|
q435
|
Screen.num_columns
|
train
|
def num_columns(self):
"""Number of columns displayed."""
if self.term.is_a_tty:
|
python
|
{
"resource": ""
}
|
q436
|
Pager.on_resize
|
train
|
def on_resize(self, *args):
"""Signal handler callback for SIGWINCH."""
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
|
python
|
{
"resource": ""
}
|
q437
|
Pager._set_lastpage
|
train
|
def _set_lastpage(self):
"""Calculate value of class attribute ``last_page``."""
|
python
|
{
"resource": ""
}
|
q438
|
Pager.display_initialize
|
train
|
def display_initialize(self):
"""Display 'please wait' message, and narrow build warning."""
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
|
python
|
{
"resource": ""
}
|
q439
|
Pager.initialize_page_data
|
train
|
def initialize_page_data(self):
"""Initialize the page data for the given screen."""
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
|
python
|
{
"resource": ""
}
|
q440
|
Pager.page_data
|
train
|
def page_data(self, idx, offset):
"""
Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)]
"""
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
|
python
|
{
"resource": ""
}
|
q441
|
Pager._run_notty
|
train
|
def _run_notty(self, writer):
"""Pager run method for terminals that are not a tty."""
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
|
python
|
{
"resource": ""
}
|
q442
|
Pager._run_tty
|
train
|
def _run_tty(self, writer, reader):
"""Pager run method for terminals that are a tty."""
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
|
python
|
{
"resource": ""
}
|
q443
|
Pager.run
|
train
|
def run(self, writer, reader):
"""
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving
|
python
|
{
"resource": ""
}
|
q444
|
Pager.process_keystroke
|
train
|
def process_keystroke(self, inp, idx, offset):
"""
Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
if inp.lower() in
|
python
|
{
"resource": ""
}
|
q445
|
Pager._process_keystroke_movement
|
train
|
def _process_keystroke_movement(self, inp, idx, offset):
"""Process keystrokes that adjust index and offset."""
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
|
python
|
{
"resource": ""
}
|
q446
|
Pager.draw
|
train
|
def draw(self, writer, idx, offset):
"""
Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must
|
python
|
{
"resource": ""
}
|
q447
|
Pager.draw_heading
|
train
|
def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
|
python
|
{
"resource": ""
}
|
q448
|
Pager.draw_status
|
train
|
def draw_status(self, writer, idx):
"""
Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int
"""
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end =
|
python
|
{
"resource": ""
}
|
q449
|
Pager.page_view
|
train
|
def page_view(self, data):
"""
Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator
"""
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val
|
python
|
{
"resource": ""
}
|
q450
|
back_tick
|
train
|
def back_tick(cmd, ret_err=False, as_str=True, raise_err=None):
""" Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
|
python
|
{
"resource": ""
}
|
q451
|
unique_by_index
|
train
|
def unique_by_index(sequence):
""" unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs
|
python
|
{
"resource": ""
}
|
q452
|
ensure_permissions
|
train
|
def ensure_permissions(mode_flags=stat.S_IWUSR):
"""decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification.
"""
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
|
python
|
{
"resource": ""
}
|
q453
|
get_install_names
|
train
|
def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
|
python
|
{
"resource": ""
}
|
q454
|
get_install_id
|
train
|
def get_install_id(filename):
""" Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id
"""
lines
|
python
|
{
"resource": ""
}
|
q455
|
set_install_name
|
train
|
def set_install_name(filename, oldname, newname):
""" Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname`
"""
names = get_install_names(filename)
|
python
|
{
"resource": ""
}
|
q456
|
set_install_id
|
train
|
def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
|
python
|
{
"resource": ""
}
|
q457
|
get_rpaths
|
train
|
def get_rpaths(filename):
""" Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename`
"""
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
|
python
|
{
"resource": ""
}
|
q458
|
dir2zip
|
train
|
def dir2zip(in_dir, zip_fname):
""" Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write
"""
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
|
python
|
{
"resource": ""
}
|
q459
|
find_package_dirs
|
train
|
def find_package_dirs(root_path):
""" Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path`
"""
package_sdirs =
|
python
|
{
"resource": ""
}
|
q460
|
cmp_contents
|
train
|
def cmp_contents(filename1, filename2):
""" Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as
|
python
|
{
"resource": ""
}
|
q461
|
get_archs
|
train
|
def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
|
python
|
{
"resource": ""
}
|
q462
|
validate_signature
|
train
|
def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
|
python
|
{
"resource": ""
}
|
q463
|
fuse_trees
|
train
|
def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')):
""" Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries
"""
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
|
python
|
{
"resource": ""
}
|
q464
|
fuse_wheels
|
train
|
def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
|
python
|
{
"resource": ""
}
|
q465
|
delocate_tree_libs
|
train
|
def delocate_tree_libs(lib_dict, lib_path, root_path):
""" Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``.
"""
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
|
python
|
{
"resource": ""
}
|
q466
|
copy_recurse
|
train
|
def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
""" Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
|
python
|
{
"resource": ""
}
|
q467
|
_copy_required
|
train
|
def _copy_required(lib_path, copy_filt_func, copied_libs):
""" Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we
|
python
|
{
"resource": ""
}
|
q468
|
delocate_path
|
train
|
def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs):
""" Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
|
python
|
{
"resource": ""
}
|
q469
|
_merge_lib_dict
|
train
|
def _merge_lib_dict(d1, d2):
""" Merges lib_dict `d2` into lib_dict `d1`
"""
for required, requirings in d2.items():
if required in d1:
|
python
|
{
"resource": ""
}
|
q470
|
delocate_wheel
|
train
|
def delocate_wheel(in_wheel,
out_wheel = None,
lib_sdir = '.dylibs',
lib_filt_func = None,
copy_filt_func = filter_system_libs,
require_archs = None,
check_verbose = False,
):
""" Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
|
python
|
{
"resource": ""
}
|
q471
|
patch_wheel
|
train
|
def patch_wheel(in_wheel, patch_fname, out_wheel=None):
""" Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not
|
python
|
{
"resource": ""
}
|
q472
|
check_archs
|
train
|
def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
|
python
|
{
"resource": ""
}
|
q473
|
bads_report
|
train
|
def bads_report(bads, path_prefix=None):
""" Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
|
python
|
{
"resource": ""
}
|
q474
|
tree_libs
|
train
|
def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
|
python
|
{
"resource": ""
}
|
q475
|
get_prefix_stripper
|
train
|
def get_prefix_stripper(strip_prefix):
""" Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
|
python
|
{
"resource": ""
}
|
q476
|
stripped_lib_dict
|
train
|
def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of
|
python
|
{
"resource": ""
}
|
q477
|
wheel_libs
|
train
|
def wheel_libs(wheel_fname, filt_func = None):
""" Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel
|
python
|
{
"resource": ""
}
|
q478
|
rewrite_record
|
train
|
def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
|
python
|
{
"resource": ""
}
|
q479
|
add_platforms
|
train
|
def add_platforms(in_wheel, platforms, out_path=None, clobber=False):
""" Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written.
"""
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
|
python
|
{
"resource": ""
}
|
q480
|
temporal_betweenness_centrality
|
train
|
def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'):
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not
|
python
|
{
"resource": ""
}
|
q481
|
allegiance
|
train
|
def allegiance(community):
"""
Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1
"""
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
|
python
|
{
"resource": ""
}
|
q482
|
rand_poisson
|
train
|
def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
"""
Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed.
"""
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
|
python
|
{
"resource": ""
}
|
q483
|
temporal_efficiency
|
train
|
def temporal_efficiency(tnet=None, paths=None, calc='global'):
r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
|
python
|
{
"resource": ""
}
|
q484
|
TemporalNetwork.network_from_array
|
train
|
def network_from_array(self, array):
"""impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array.
"""
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
|
python
|
{
"resource": ""
}
|
q485
|
TemporalNetwork._drop_duplicate_ij
|
train
|
def _drop_duplicate_ij(self):
"""
Drops duplicate entries from the network dataframe.
"""
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
|
python
|
{
"resource": ""
}
|
q486
|
TemporalNetwork._drop_diagonal
|
train
|
def _drop_diagonal(self):
"""
Drops self-contacts from the network dataframe.
"""
self.network = self.network.where(
|
python
|
{
"resource": ""
}
|
q487
|
TemporalNetwork.add_edge
|
train
|
def add_edge(self, edgelist):
"""
Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
|
python
|
{
"resource": ""
}
|
q488
|
TemporalNetwork.drop_edge
|
train
|
def drop_edge(self, edgelist):
"""
Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
|
python
|
{
"resource": ""
}
|
q489
|
TemporalNetwork.calc_networkmeasure
|
train
|
def calc_networkmeasure(self, networkmeasure, **measureparams):
"""
Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure]
"""
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
|
python
|
{
"resource": ""
}
|
q490
|
TemporalNetwork.generatenetwork
|
train
|
def generatenetwork(self, networktype, **networkparams):
"""
Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network.
"""
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
|
python
|
{
"resource": ""
}
|
q491
|
TemporalNetwork.save_aspickle
|
train
|
def save_aspickle(self, fname):
"""
Saves object as pickle.
fname : str
file path.
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
|
python
|
{
"resource": ""
}
|
q492
|
postpro_fisher
|
train
|
def postpro_fisher(data, report=None):
"""
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 *
|
python
|
{
"resource": ""
}
|
q493
|
postpro_boxcox
|
train
|
def postpro_boxcox(data, report=None):
"""
Performs box cox transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more
|
python
|
{
"resource": ""
}
|
q494
|
binarize_rdp
|
train
|
def binarize_rdp(netin, level, sign='pos', axis='time'):
"""
Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
|
python
|
{
"resource": ""
}
|
q495
|
binarize
|
train
|
def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
"""
Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis :
|
python
|
{
"resource": ""
}
|
q496
|
process_input
|
train
|
def process_input(netIn, allowedformats, outputformat='G'):
"""
Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class
"""
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
|
python
|
{
"resource": ""
}
|
q497
|
clean_community_indexes
|
train
|
def clean_community_indexes(communityID):
"""
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
|
python
|
{
"resource": ""
}
|
q498
|
multiple_contacts_get_values
|
train
|
def multiple_contacts_get_values(C):
"""
Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
"""
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] +=
|
python
|
{
"resource": ""
}
|
q499
|
check_distance_funciton_input
|
train
|
def check_distance_funciton_input(distance_func_name, netinfo):
"""
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.