docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Scp a remote file to local
Args:
remote_path (str)
local_path (str) | def scp_file_remote_to_local(self, remote_path, local_path):
sshadd_command = [
'ssh-add',
'/Users/pyrat/.ssh/ubuntuNode'
]
self.info_log(
"executing command: %s" %
' '.join(sshadd_command)
)
p = subprocess.Popen(sshadd_command)
p.wait()
scp_command = [
'scp',
'-o',
'StrictHostKeyChecking=no',
'%s@%s:"%s"' %
(
self.browser_config.get('username'),
self.get_ip(),
remote_path
),
local_path
]
self.info_log(
"executing command: %s" %
' '.join(scp_command)
)
p = subprocess.Popen(scp_command)
p.wait() | 1,054,908 |
generate background term from SNPs
Args:
vTot: variance of Yc+Yi
vCommon: variance of Yc
XX: kinship matrix
a: common scales, it can be set for debugging purposes
c: indipendent scales, it can be set for debugging purposes | def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
vSpecific = vTot-vCommon
SP.random.seed(0)
if c==None: c = SP.randn(self.P)
XX += 1e-3 * SP.eye(XX.shape[0])
L = LA.cholesky(XX,lower=True)
# common effect
R = self.genWeights(self.N,self.P)
A = self.genTraitEffect()
if a is not None: A[0,:] = a
Yc = SP.dot(L,SP.dot(R,A))
Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())
# specific effect
R = SP.randn(self.N,self.P)
Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())
return Yc, Yi | 1,055,177 |
Decorator for warning user of depricated functions before use.
Args:
newmethod (str): Name of method to use instead. | def depricated_name(newmethod):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Function {} is depricated, please use {} instead.".format(func.__name__, newmethod),
category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return wrapper
return decorator | 1,055,274 |
load data file
Args:
cache_genotype: load genotypes fully into memory (default: False)
cache_phenotype: load phentopyes fully intro memry (default: True) | def load(self,cache_genotype=False,cache_phenotype=True):
self.f = h5py.File(self.file_name,'r')
self.pheno = self.f['phenotype']
self.geno = self.f['genotype']
#TODO: load all row and column headers for genotype and phenotype
#parse out thse we alwasy need for convenience
self.genoM = self.geno['matrix']
self.phenoM = self.pheno['matrix']
self.sample_ID = self.geno['row_header']['sample_ID'][:]
self.genoChrom = self.geno['col_header']['chrom'][:]
self.genoPos = self.geno['col_header']['pos'][:]
if 'pos_cum' in list(self.geno['col_header'].keys()):
self.genoPos_cum = self.geno['col_header']['pos_cum'][:]
else:
self.genoPos_cum = None
self.phenotype_ID = self.pheno['col_header']['phenotype_ID'][:]
#cache?
if cache_genotype:
self.genoM = self.genoM[:]
if cache_phenotype:
self.phenoM = self.phenoM[:]
# Additional pheno col header
headers = list(self.pheno['col_header'].keys())
if 'gene_ID' in headers:
self.eqtl = True
self.geneID = self.pheno['col_header']['gene_ID'][:]
self.gene_pos = SP.array([self.pheno['col_header']['gene_chrom'][:],self.pheno['col_header']['gene_start'][:],self.pheno['col_header']['gene_end']],dtype='int').T
self.geneIDs= list(set(self.geneID))
else:
self.eqtl = False
if 'environment' in headers:
self.E = self.pheno['col_header/environment'][:]
self.Es = list(set(self.E))
else:
self.E = None
#dimensions
self.N = self.genoM.shape[0]
self.S = self.genoM.shape[1]
self.P = self.phenoM.shape[1]
assert (self.genoM.shape[0]==self.phenoM.shape[0]), 'dimension missmatch' | 1,055,395 |
sample a particular set of individuals (Irow) or phenotypes (Icol_pheno) or genotypes (Icol_geno)
Args:
Irow: indices for a set of individuals
Icol_pheno: indices for a set of phenotypes
Icol_geno: indices for a set of SNPs
Returns:
QTLdata opject holding the specified subset of the data | def subSample(self,Irow=None,Icol_geno=None,Icol_pheno=None):
C = copy.copy(self)
if Irow is not None:
C.genoM = C.genoM[Irow]
C.phenoM = C.phenoM[Irow]
C.sample_ID = C.sample_ID[Irow]
if Icol_geno is not None:
C.genoM = C.genoM[:,Icol_geno]
C.genoPos = C.genoPos[Icol_geno]
C.genoChrom = C.genoChrom[Icol_geno]
C.genoPos_cum = C.genoPos_cum[Icol_geno]
if Icol_pheno is not None:
C.phenoM = C.phenoM[:,Icol_pheno]
C.phenotype_ID = C.phenotype_ID[Icol_pheno]
if C.eqtl:
C.geneID = C.geneID[Icol_pheno]
C.gene_pos = C.gene_pos[Icol_pheno,:]
C.geneIDs = list(set(C.geneID))
if C.E != None:
C.E = C.E[Icol_pheno]
C.Es = list(set(C.E))
C.N = C.genoM.shape[0]
C.S = C.genoM.shape[1]
C.P = C.phenoM.shape[1]
return C | 1,055,402 |
Parse the test config to a dictionary
Args:
test_config_string (str) this string come from the --test-config
flag of the bro executable run command | def test_config_to_dict(test_config_string):
test_config = {}
if test_config_string:
for config in test_config_string.split(','):
key, value = config.split('=')
test_config[key] = value
return test_config | 1,055,407 |
Parse the browser config and look for brome specific config
Args:
browser_config (dict) | def parse_brome_config_from_browser_config(browser_config):
config = {}
brome_keys = [key for key in browser_config if key.find(':') != -1]
for brome_key in brome_keys:
section, option = brome_key.split(':')
value = browser_config[brome_key]
if section not in config:
config[section] = {}
config[section][option] = value
return config | 1,055,408 |
Calculate digest of a readable object
Args:
origin -- a readable object for which calculate digest
algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms.
block_size -- the size of the block to read at each iteration | def calc_digest(origin, algorithm="sha1", block_size=None):
try:
hashM = hashlib.new(algorithm)
except ValueError:
raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm))
while True:
chunk = origin.read(block_size) if block_size else origin.read()
if not chunk:
break
hashM.update(chunk)
return hashM.hexdigest() | 1,055,621 |
Returns all possible suffixes of an array (lazy evaluated)
Args:
arr: input array
Returns:
Array of all possible suffixes (as tuples) | def get_suffixes(arr):
arr = tuple(arr)
return [arr]
return (arr[i:] for i in range(len(arr))) | 1,055,749 |
Randomly choses an item according to defined weights
Args:
item_probabilities: list of (item, probability)-tuples
Returns:
random item according to the given weights | def weighted_choice(item_probabilities):
probability_sum = sum(x[1] for x in item_probabilities)
assert probability_sum > 0
random_value = random.random() * probability_sum
summed_probability = 0
for item, value in item_probabilities:
summed_probability += value
if summed_probability > random_value:
return item | 1,055,750 |
Convenience method to sample from this distribution.
Args:
size (int or tuple): Shape of return value. Each element is drawn
independently from this distribution. | def rvs(self, size=1):
return np.random.multivariate_normal(self.mean, self.cov, size) | 1,055,802 |
calculate digest for the given file or readable/seekable object
Args:
origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)
Returns:
String rapresenting the digest for the given origin | def _calc_digest(self, origin):
if hasattr(origin, 'read') and hasattr(origin, 'seek'):
pos = origin.tell()
digest = hashtools.calc_digest(origin, algorithm=self._conf['hash_alg'])
origin.seek(pos)
else:
digest = hashtools.calc_file_digest(origin, algorithm=self._conf['hash_alg'])
return digest | 1,056,062 |
Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder | def _makedirs(self, path):
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['dmode'])
os.umask(oldmask)
except OSError as e:
if(e.errno == errno.EACCES):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception('fsdb folder already exists but it is not a regular folder: "{0}"'.format(path))
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
else:
raise e | 1,056,065 |
Add new element to fsdb.
Args:
origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)
Returns:
String rapresenting the digest of the file | def add(self, origin):
digest = self._calc_digest(origin)
if self.exists(digest):
self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest))
return digest
absPath = self.get_file_path(digest)
absFolderPath = os.path.dirname(absPath)
# make all parent directories if they do not exist
self._makedirs(absFolderPath)
self._copy_content(origin, absPath)
self.logger.debug('Added file: "{0}" [{1}]'.format(digest, absPath))
return digest | 1,056,066 |
Remove an existing file from fsdb.
File with the given digest will be removed from fsdb and
the directory tree will be cleaned (remove empty folders)
Args:
digest -- digest of the file to remove | def remove(self, digest):
# remove file
absPath = self.get_file_path(digest)
os.remove(absPath)
# clean directory tree
tmpPath = os.path.dirname(absPath)
while tmpPath != self.fsdbRoot:
if os.path.islink(tmpPath):
raise Exception('fsdb found a link in db tree: "{0}"'.format(tmpPath))
if len(os.listdir(tmpPath)) > 0:
break
os.rmdir(tmpPath)
tmpPath = os.path.dirname(tmpPath)
self.logger.debug('Removed file: "{0}" [{1}]'.format(absPath, digest)) | 1,056,067 |
Retrieve the absolute path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file | def get_file_path(self, digest):
relPath = Fsdb.generate_tree_path(digest, self._conf['depth'])
return os.path.join(self.fsdbRoot, relPath) | 1,056,069 |
Check the integrity of the file with the given digest
Args:
digest -- digest of the file to check
Returns:
True if the file is not corrupted | def check(self, digest):
path = self.get_file_path(digest)
if self._calc_digest(path) != digest:
self.logger.warning("found corrupted file: '{0}'".format(path))
return False
return True | 1,056,070 |
Generate a relative path from the given fileDigest
relative path has a numbers of directories levels according to @depth
Args:
fileDigest -- digest for which the relative path will be generate
depth -- number of levels to use in relative path generation
Returns:
relative path for the given digest | def generate_tree_path(fileDigest, depth):
if(depth < 0):
raise Exception("depth level can not be negative")
if(os.path.split(fileDigest)[1] != fileDigest):
raise Exception("fileDigest cannot contain path separator")
# calculate min length for the given depth (2^1+2^2+...+2^depth+ 1)
min = (2**(depth + 1)) - 1
if(len(fileDigest) < min):
raise Exception("fileDigest too short for the given depth")
path = ""
index = 0
for p in range(1, depth + 1):
jump = 2**p
path = os.path.join(path, fileDigest[index:index + jump])
index += jump
path = os.path.join(path, fileDigest[index:])
return path | 1,056,075 |
estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters | def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):
assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf
if beta<0.0:
beta=-beta
std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0))
beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)
pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0)
power = (pvals<cutoff).mean()
return power, pvals | 1,056,242 |
Convert a size value in bytes to its equivalent in IEC notation.
See `<http://physics.nist.gov/cuu/Units/binary.html>`_.
Parameters:
size (int): Number of bytes.
compact (bool): If ``True``, the result contains no spaces.
Return:
String representation of ``size``.
Raises:
ValueError: Negative or out of bounds value for ``size``. | def bytes2iec(size, compact=False):
postfn = lambda text: text.replace(' ', '') if compact else text
if size < 0:
raise ValueError("Negative byte size value {}".format(size))
if size < 1024:
return postfn('{:4d} bytes'.format(size))
scaled = size
for iec_unit in IEC_UNITS[1:]:
scaled /= 1024.0
if scaled < 1024:
return postfn('{:6.1f} {}'.format(scaled, iec_unit))
raise ValueError("Byte size value {} out of bounds".format(size)) | 1,056,295 |
Convert a size specification, optionally containing a scaling
unit in IEC notation, to a number of bytes.
Parameters:
size_spec (str): Number, optionally followed by a unit.
only_positive (bool): Allow only positive values?
Return:
Numeric bytes size.
Raises:
ValueError: Unknown unit specifiers, or bad leading integer. | def iec2bytes(size_spec, only_positive=True):
scale = 1
try:
size = int(0 + size_spec) # return numeric values as-is
except (TypeError, ValueError):
spec = size_spec.strip().lower()
for exp, iec_unit in enumerate(IEC_UNITS[1:], 1):
iec_unit = iec_unit.lower()
if spec.endswith(iec_unit):
spec = spec[:-len(iec_unit)]
scale = 2 ** (10 * exp)
break
elif spec.endswith(iec_unit[0]):
spec = spec[:-1]
scale = 2 ** (10 * exp)
break
else:
if spec.endswith('b'):
spec = spec[:-1]
try:
if '.' in spec:
size = float(spec.strip())
else:
size = int(spec.strip(), base=0)
except (TypeError, ValueError) as cause:
raise ValueError('Invalid bytes size specification {!r}: {}'.format(size_spec, cause))
if only_positive and size < 0:
raise ValueError('Invalid negative bytes size specification {!r}'.format(size_spec))
return int(size * scale) | 1,056,296 |
Merge adjacent numbers in an iterable of numbers.
Parameters:
numbers (list): List of integers or numeric strings.
indicator (str): Delimiter to indicate generated ranges.
base (int): Passed to the `int()` conversion when comparing numbers.
Return:
list of str: Condensed sequence with either ranges or isolated numbers. | def merge_adjacent(numbers, indicator='..', base=0):
integers = list(sorted([(int("%s" % i, base), i) for i in numbers]))
idx = 0
result = []
while idx < len(numbers):
end = idx + 1
while end < len(numbers) and integers[end-1][0] == integers[end][0] - 1:
end += 1
result.append("%s%s%s" % (integers[idx][1], indicator, integers[end-1][1])
if end > idx + 1
else "%s" % integers[idx][1])
idx = end
return result | 1,056,297 |
Generates sentences from a given corpus
Args:
generation_type: 'markov' | 'hmm' | 'hmm_past'
Returns:
Properly formatted string of generated sentences | def generate_text(self, generation_type='markov'):
assert generation_type in ['markov', 'hmm', 'hmm_past']
if generation_type == "markov":
return self._text_generator(next_token=self._generate_next_token)
elif generation_type == "hmm":
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM)
elif generation_type == "hmm_past":
return self._text_generator(next_token=self._generate_next_token_hmm, emit=self._emitHMM_with_past) | 1,056,317 |
Execute a command
Args:
command (str)
Returns:
process (object) | def execute_command(self, command):
self.runner.info_log("Executing command: %s" % command)
process = Popen(
command,
stdout=open(os.devnull, 'w'),
stderr=open('runner.log', 'a'),
)
return process | 1,056,328 |
Calculate how many padding bytes needed for ``fmt`` to be aligned to
``align``.
Args:
fmt (str): :mod:`struct` format.
align (int): alignment (2, 4, 8, etc.)
Returns:
str: padding format (e.g., various number of 'x').
>>> calc_padding('b', 2)
'x'
>>> calc_padding('b', 3)
'xx' | def calc_padding(fmt, align):
remain = struct.calcsize(fmt) % align
if remain == 0:
return ""
return 'x' * (align - remain) | 1,056,372 |
Align ``offset`` up to ``align`` boundary.
Args:
offset (int): value to be aligned.
align (int): alignment boundary.
Returns:
int: aligned offset.
>>> align_up(3, 2)
4
>>> align_up(3, 1)
3 | def align_up(offset, align):
remain = offset % align
if remain == 0:
return offset
else:
return offset + (align - remain) | 1,056,373 |
Convert 6 bytes into a MAC string.
Args:
bin (str): hex string of lenth 6.
Returns:
str: String representation of the MAC address in lower case.
Raises:
Exception: if ``len(bin)`` is not 6. | def bin_to_mac(bin, size=6):
if len(bin) != size:
raise Exception("Invalid MAC address: %s" % (bin))
return ':'.join([binascii.hexlify(o) for o in bin]) | 1,056,374 |
Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, or `girc`.
verb (str): Event name, `all`, or `raw`.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first).
Note: `all` will not match `raw` events. If you wish to receive both
`raw` and all other events, you need to register these separately. | def register_event(self, direction, verb, child_fn, priority=10):
event_managers = []
if direction in ('in', 'both'):
event_managers.append(self._events_in)
if direction in ('out', 'both'):
event_managers.append(self._events_out)
if direction == 'girc':
event_managers.append(self._girc_events)
for event_manager in event_managers:
event_manager.register(verb, child_fn, priority=priority) | 1,056,499 |
Sets user info for this server, to be used before connection.
Args:
nick (str): Nickname to use.
user (str): Username to use.
real (str): Realname to use. | def set_user_info(self, nick, user='*', real='*'):
if self.connected:
raise Exception("Can't set user info now, we're already connected!")
# server will pickup list when they exist
if not self.connected:
self.nick = nick
self.connect_info['user'] = {
'nick': nick,
'user': user,
'real': real,
} | 1,056,500 |
Connects to the given server.
Args:
auto_reconnect (bool): Automatically reconnect on disconnection.
Other arguments to this function are as usually supplied to
:meth:`asyncio.BaseEventLoop.create_connection`. | def connect(self, *args, auto_reconnect=False, **kwargs):
connection_info = {
'auto_reconnect': auto_reconnect,
'args': args,
'kwargs': kwargs,
}
self.connect_info['connection'] = connection_info
# confirm we have user info set
if 'user' not in self.connect_info:
raise Exception('`set_user_info` must be called before connecting to server.')
# create connection and run
connection = loop.create_connection(lambda: self,
*args, **kwargs)
asyncio.Task(connection) | 1,056,505 |
Authenticate to a server using SASL plain, or does so on connection.
Args:
name (str): Name to auth with.
password (str): Password to auth with.
identity (str): Identity to auth with (defaults to name). | def sasl_plain(self, name, password, identity=None):
if identity is None:
identity = name
self.sasl('plain', name, password, identity) | 1,056,532 |
Measure a list of states with a measurement matrix in the presence of
measurement noise.
Args:
states (array): states to measure. Shape is NxSTATE_DIM.
measurement_matrix (array): Each state in *states* is measured with this
matrix. Should be MEAS_DIMxSTATE_DIM in shape.
measurement_covariance (array): Measurement noise covariance. Should be
MEAS_DIMxMEAS_DIM.
Returns:
(array): NxMEAS_DIM array of measurements. | def measure_states(states, measurement_matrix, measurement_covariance):
# Sanitise input
measurement_matrix = np.atleast_2d(measurement_matrix)
measurement_covariance = np.atleast_2d(measurement_covariance)
measurement_dim = measurement_matrix.shape[0]
if measurement_covariance.shape != (measurement_dim, measurement_dim):
raise ValueError(("Measurement matrix and covariance have inconsistent "
"shapes {} and {}").format(measurement_matrix.shape,
measurement_covariance.shape))
states = np.atleast_2d(states)
# Special case: no output
if states.shape[0] == 0:
return np.zeros((0, measurement_dim))
# Measure states
measurement_means = measurement_matrix.dot(states.T).T
measurement_noises = np.random.multivariate_normal(
mean=np.zeros(measurement_dim), cov=measurement_covariance,
size=states.shape[0]
)
return measurement_means + measurement_noises | 1,056,552 |
Generate states by simulating a linear system with constant process matrix
and process noise covariance.
Args:
state_count (int): Number of states to generate.
process_matrix (array): Square array
process_covariance (array): Square array specifying process noise
covariance.
initial_state (array or None): If omitted, use zero-filled vector as
initial state. | def generate_states(state_count, process_matrix, process_covariance,
initial_state=None):
# Sanitise input
process_matrix = np.atleast_2d(process_matrix)
process_covariance = np.atleast_2d(process_covariance)
state_dim = process_matrix.shape[0]
if process_matrix.shape != (state_dim, state_dim):
raise ValueError("Process matrix has inconsistent shape: {}".format(
process_matrix.shape))
if process_covariance.shape != (state_dim, state_dim):
raise ValueError("Process covariance has inconsistent shape: {}".format(
process_covariance.shape))
if initial_state is None:
initial_state = np.zeros(process_matrix.shape[0])
states = [initial_state]
while len(states) < state_count:
states.append(
process_matrix.dot(states[-1]) + np.random.multivariate_normal(
mean=np.zeros(state_dim), cov=process_covariance
)
)
return np.vstack(states) | 1,056,553 |
Samples an observation's value.
Args:
value: A numeric value signifying the value to be sampled. | def observe(self, value):
self._buffer.append(value)
if len(self._buffer) == _BUFFER_SIZE:
self._flush() | 1,056,657 |
Retrieves the value estimate for the requested quantile rank.
The requested quantile rank must be registered in the estimator's
invariants a priori!
Args:
rank: A floating point quantile rank along the interval [0, 1].
Returns:
A numeric value for the quantile estimate. | def query(self, rank):
self._flush()
current = self._head
if not current:
return 0
mid_rank = math.floor(rank * self._observations)
max_rank = mid_rank + math.floor(
self._invariant(mid_rank, self._observations) / 2)
rank = 0.0
while current._successor:
rank += current._rank
if rank + current._successor._rank + current._successor._delta > max_rank:
return current._value
current = current._successor
return current._value | 1,056,658 |
Disconnect all servers with a message.
Args:
message (str): Quit message to use on each connection. | def shutdown(self, message=None):
for name, server in self.servers.items():
server.quit(message) | 1,056,751 |
Create an IRC server connection slot.
The server will actually be connected to when
:meth:`girc.client.ServerConnection.connect` is called later.
Args:
server_name (str): Name of the server, to be used for functions and accessing the
server later through the reactor.
Returns:
server (girc.client.ServerConnection): A not-yet-connected server. | def create_server(self, server_name, *args, **kwargs):
server = ServerConnection(name=server_name, reactor=self)
if args or kwargs:
server.set_connect_info(*args, **kwargs)
# register cached events
for verb, infos in self._event_handlers.items():
for info in infos:
server.register_event(info['direction'], verb, info['handler'],
priority=info['priority'])
self.servers[server_name] = server
return server | 1,056,752 |
Register an event with all servers.
Args:
direction (str): `in`, `out`, `both`, `raw`.
verb (str): Event name.
child_fn (function): Handler function.
priority (int): Handler priority (lower priority executes first). | def register_event(self, direction, verb, child_fn, priority=10):
if verb not in self._event_handlers:
self._event_handlers[verb] = []
self._event_handlers[verb].append({
'handler': child_fn,
'direction': direction,
'priority': priority,
})
for name, server in self.servers.items():
server.register_event(direction, verb, child_fn, priority=priority) | 1,056,755 |
Set phenotype matrix
Args:
Y: phenotype matrix [N, P]
standardize: if True, phenotype is standardized (zero mean, unit variance) | def setY(self,Y,standardize=False):
assert Y.shape[0]==self.N, 'CVarianceDecomposition:: Incompatible shape'
assert Y.shape[1]==self.P, 'CVarianceDecomposition:: Incompatible shape'
if standardize:
Y=preprocess.standardize(Y)
#check that missing values match the current structure
assert (~(SP.isnan(Y).any(axis=1))==self.Iok).all(), 'CVarianceDecomposition:: pattern of missing values needs to match Y given at initialization'
self.Y = Y
self.vd.setPheno(Y)
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | 1,056,935 |
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions | def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):
assert self.P == 1, 'Incompatible number of traits'
assert K!=None or is_noise, 'Specify covariance structure'
if is_noise:
assert self.noisPos==None, 'noise term already exists'
K = SP.eye(self.Nt)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.Nt, 'Incompatible shape'
assert K.shape[1]==self.Nt, 'Incompatible shape'
if Ks!=None:
assert Ks.shape[0]==self.N, 'Incompatible shape'
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
self.vd.addTerm(limix.CSingleTraitTerm(K))
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | 1,056,937 |
add fixed effect to the model
Args:
F: fixed effect matrix [N,1]
A: design matrix [K,P] (e.g. SP.ones((1,P)) common effect; SP.eye(P) any effect) | def addFixedEffect(self,F=None,A=None):
if A==None:
A = SP.eye(self.P)
if F==None:
F = SP.ones((self.N,1))
assert A.shape[1]==self.P, 'Incompatible shape'
assert F.shape[0]==self.N, 'Incompatible shape'
if F.shape[1]>1:
for m in range(F.shape[1]):
self.vd.addFixedEffTerm(A,F[:,m:m+1])
else:
self.vd.addFixedEffTerm(A,F)
#TODO: what is this gp object doing, is this initialization correct?
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | 1,056,939 |
Initialize GP objetct
Args:
fast: if fast==True initialize gpkronSum gp | def initGP(self,fast=False):
if fast:
assert self.n_terms==2, 'CVarianceDecomposition: for fast inference number of terms must be == 2'
assert self.P>1, 'CVarianceDecomposition: for fast inference number of traits must be > 1'
self.vd.initGPkronSum()
else:
self.vd.initGP()
self.gp=self.vd.getGP()
self.init=True
self.fast=fast | 1,056,940 |
Uses 2 term single trait model to get covar params for initialization
Args:
termx: non-noise term terms that is used for initialization | def _getScalesDiag(self,termx=0):
assert self.P>1, 'CVarianceDecomposition:: diagonal init_method allowed only for multi trait models'
assert self.noisPos!=None, 'CVarianceDecomposition:: noise term has to be set'
assert termx<self.n_terms-1, 'CVarianceDecomposition:: termx>=n_terms-1'
assert self.covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
assert self.covar_type[termx] not in ['lowrank','block','fixed'], 'CVarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
scales = []
res = self.estimateHeritabilities(self.vd.getTerm(termx).getK())
scaleg = SP.sqrt(res['varg'].mean())
scalen = SP.sqrt(res['varn'].mean())
for term_i in range(self.n_terms):
if term_i==termx:
_scales = scaleg*self.diag[term_i]
elif term_i==self.noisPos:
_scales = scalen*self.diag[term_i]
else:
_scales = 0.*self.diag[term_i]
if self.offset[term_i]>0:
_scales = SP.concatenate((_scales,SP.array([SP.sqrt(self.offset[term_i])])))
scales.append(_scales)
return SP.concatenate(scales) | 1,056,941 |
Train the gp
Args:
fast: if true and the gp has not been initialized, initializes a kronSum gp
scales0: initial variance components params
fixed0: initial fixed effect params | def trainGP(self,fast=False,scales0=None,fixed0=None,lambd=None):
assert self.n_terms>0, 'CVarianceDecomposition:: No variance component terms'
if not self.init: self.initGP(fast=fast)
# set lambda
if lambd!=None: self.gp.setLambda(lambd)
# set scales0
if scales0!=None:
self.setScales(scales0)
# init gp params
self.vd.initGPparams()
# set fixed0
if fixed0!=None:
params = self.gp.getParams()
params['dataTerm'] = fixed0
self.gp.setParams(params)
# LIMIX CVARIANCEDECOMPOSITION TRAINING
conv =self.vd.trainGP()
self.cache['Sigma'] = None
self.cache['Hessian'] = None
return conv | 1,056,944 |
Train the model repeadly up to a number specified by the users with random restarts and
return a list of all relative minima that have been found
Args:
fast: Boolean. if set to True initalize kronSumGP
verbose: Boolean. If set to True, verbose output is produced. (default True)
n_times: number of re-starts of the optimization. (default 10) | def findLocalOptima(self,fast=False,verbose=True,n_times=10,lambd=None):
if not self.init: self.initGP(fast)
opt_list = []
fixed0 = SP.zeros_like(self.gp.getParams()['dataTerm'])
# minimises n_times
for i in range(n_times):
scales1 = self._getScalesRand()
fixed1 = 1e-1*SP.randn(fixed0.shape[0],fixed0.shape[1])
conv = self.trainGP(fast=fast,scales0=scales1,fixed0=fixed1,lambd=lambd)
if conv:
# compare with previous minima
temp=1
for j in range(len(opt_list)):
if SP.allclose(abs(self.getScales()),abs(opt_list[j]['scales'])):
temp=0
opt_list[j]['counter']+=1
break
if temp==1:
opt = {}
opt['counter'] = 1
opt['LML'] = self.getLML()
opt['scales'] = self.getScales()
opt_list.append(opt)
# sort by LML
LML = SP.array([opt_list[i]['LML'] for i in range(len(opt_list))])
index = LML.argsort()[::-1]
out = []
if verbose:
print("\nLocal mimima\n")
print("n_times\t\tLML")
print("------------------------------------")
for i in range(len(opt_list)):
out.append(opt_list[index[i]])
if verbose:
print(("%d\t\t%f" % (opt_list[index[i]]['counter'], opt_list[index[i]]['LML'])))
print("")
return out | 1,056,946 |
get random initialization of variances based on the empirical trait variance
Args:
scales: if scales==None: set them randomly,
else: set scales to term_num (if term_num==None: set to all terms)
term_num: set scales to term_num | def setScales(self,scales=None,term_num=None):
if scales==None:
for term_i in range(self.n_terms):
n_scales = self.vd.getTerm(term_i).getNumberScales()
self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales)))
elif term_num==None:
assert scales.shape[0]==self.vd.getNumberScales(), 'incompatible shape'
index = 0
for term_i in range(self.n_terms):
index1 = index+self.vd.getTerm(term_i).getNumberScales()
self.vd.getTerm(term_i).setScales(scales[index:index1])
index = index1
else:
assert scales.shape[0]==self.vd.getTerm(term_num).getNumberScales(), 'incompatible shape'
self.vd.getTerm(term_num).setScales(scales) | 1,056,947 |
Returns the Parameters
Args:
term_i: index of the term we are interested in
if term_i==None returns the whole vector of parameters | def getScales(self,term_i=None):
if term_i==None:
RV = self.vd.getScales()
else:
assert term_i<self.n_terms, 'Term index non valid'
RV = self.vd.getScales(term_i)
return RV | 1,056,948 |
Returns explicitly the estimated trait covariance matrix
Args:
term_i: index of the term we are interested in | def getEstTraitCovar(self,term_i=None):
assert self.P>1, 'Trait covars not defined for single trait analysis'
if term_i==None:
RV=SP.zeros((self.P,self.P))
for term_i in range(self.n_terms): RV+=self.vd.getTerm(term_i).getTraitCovar().K()
else:
assert term_i<self.n_terms, 'Term index non valid'
RV = self.vd.getTerm(term_i).getTraitCovar().K()
return RV | 1,056,949 |
Returns the estimated trait correlation matrix
Args:
term_i: index of the term we are interested in | def getEstTraitCorrCoef(self,term_i=None):
cov = self.getEstTraitCovar(term_i)
stds=SP.sqrt(cov.diagonal())[:,SP.newaxis]
RV = cov/stds/stds.T
return RV | 1,056,950 |
Set the kernel for predictions
Args:
term_i: index of the term we are interested in
Ks: (TODO: is this the covariance between train and test or the covariance between test points?) | def setKstar(self,term_i,Ks):
assert Ks.shape[0]==self.N
#if Kss!=None:
#assert Kss.shape[0]==Ks.shape[1]
#assert Kss.shape[1]==Ks.shape[1]
self.vd.getTerm(term_i).getKcf().setK0cross(Ks) | 1,056,959 |
Registers a given index:
* Creates and opens an index for it (if it doesn't exist yet)
* Sets some default values on it (unless they're already set)
Args:
index (PonyWhoosh.Index): An instance of PonyWhoosh.Index class | def register_index(self, index):
self._indexes[index._name] = index
self.create_index(index)
return index | 1,057,132 |
Registers a single model for fulltext search. This basically creates
a simple PonyWhoosh.Index for the model and calls self.register_index on it.
Args:
*fields: all the fields indexed from the model.
**kw: The options for each field, sortedby, stored ... | def register_model(self, *fields, **kw):
index = PonyWhooshIndex(pw=self)
index._kw = kw
index._fields = fields
def inner(model):
index._name = model._table_
if not index._name:
index._name = model.__name__
self._entities[index._name] = model
index._schema_attrs = {}
index._primary_key_is_composite = model._pk_is_composite_
index._primary_key = [f.name for f in model._pk_attrs_]
index._primary_key_type = 'list'
type_attribute = {}
for field in model._attrs_:
if field.is_relation:
continue
assert hasattr(field, "name") and hasattr(field, "py_type")
fname = field.name
if hasattr(field.name, "__name__"):
fname = field.name.__name__
stored = kw.get("stored", False)
if fname in index._primary_key:
kw["stored"] = True
# we're not supporting this kind of data
ftype = field.py_type.__name__
if ftype in ['date', 'datetime', 'datetime.date']:
kw["stored"] = stored
continue
fwhoosh = fwhoosh = whoosh.fields.TEXT(**kw)
if field == model._pk_:
index._primary_key_type = ftype
fwhoosh = whoosh.fields.ID(stored=True, unique=True)
if fname in index._fields:
if not field.is_string:
if ftype in ['int', 'float']:
fwhoosh = whoosh.fields.NUMERIC(**kw)
elif ftype == 'bool':
fwhoosh = whoosh.fields.BOOLEAN(stored=True)
type_attribute[fname] = ftype
index._schema_attrs[fname] = fwhoosh
kw["stored"] = stored
index._schema = whoosh.fields.Schema(**index._schema_attrs)
self.register_index(index)
def _middle_save_(obj, status):
writer = index._whoosh.writer(timeout=self.writer_timeout)
dict_obj = obj.to_dict()
def dumps(v):
if sys.version_info[0] < 3:
if isinstance(v, int):
return unicode(v)
if isinstance(v, float):
return '%.9f' % v
return unicode(v)
else:
if isinstance(v, int):
return str(v)
if isinstance(v, float):
return int(float(v))
return str(v)
attrs = {}
if sys.version_info[0] < 3:
for k, v in dict_obj.iteritems():
if k in index._schema_attrs.keys():
attrs[k] = dumps(v)
else:
for k, v in dict_obj.items():
if k in list(index._schema_attrs.keys()):
attrs[k] = dumps(v)
if status == 'inserted':
writer.add_document(**attrs)
elif status == 'updated':
writer.update_document(**attrs)
elif status in set(['marked_to_delete', 'deleted', 'cancelled']):
writer.delete_by_term(primary, attrs[primary])
writer.commit()
return obj._after_save_
index._model = model
model._after_save_ = _middle_save_
model._pw_index_ = index
model.search = model._pw_index_.search
return model
return inner | 1,057,133 |
Function to check if a address is unicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options | def ucast_ip_mask(ip_addr_and_mask, return_tuple=True):
regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$")
if return_tuple:
while not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
print("Not a good unicast IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | 1,057,141 |
Function to check if a address is unicast
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
return_tuple: Set to True it returns a IP, set to False returns True or False
Returns: see return_tuple for return options | def ucast_ip(ip_addr, return_tuple=True):
regex_ucast_ip = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$")
if return_tuple:
while not regex_ucast_ip.match(ip_addr):
print("Not a good unicast IP.")
print("Please try again.")
ip_addr = input("Please enter a unicast IP address in the following format x.x.x.x: ")
return ip_addr
elif not return_tuple:
if not regex_ucast_ip.match(ip_addr):
return False
else:
return True | 1,057,142 |
Function to check if a address is multicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options | def mcast_ip_mask(ip_addr_and_mask, return_tuple=True):
regex_mcast_ip_and_mask = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2][0-9])|[3-9]))$")
if return_tuple:
while not regex_mcast_ip_and_mask.match(ip_addr_and_mask):
print("Not a good multicast IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a multicast IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_mcast_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | 1,057,143 |
Function to check if a address is multicast
Args:
ip_addr: Multicast IP address in the following format 239.1.1.1
return_tuple: Set to True it returns a IP, set to False returns True or False
Returns: see return_tuple for return options | def mcast_ip(ip_addr, return_tuple=True):
regex_mcast_ip = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9])))$")
if return_tuple:
while not regex_mcast_ip.match(ip_addr):
print("Not a good multicast IP.")
print("Please try again.")
ip_addr = input("Please enter a multicast IP address in the following format x.x.x.x: ")
return ip_addr
elif not return_tuple:
if not regex_mcast_ip.match(ip_addr):
return False
else:
return True | 1,057,144 |
Function to check if a address and CIDR mask is good
Args:
ip_addr_and_mask: IP address and mask in the following format 192.168.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options | def ip_mask(ip_addr_and_mask, return_tuple=True):
regex_ip_and_mask = __re.compile("^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$")
if return_tuple:
while not regex_ip_and_mask.match(ip_addr_and_mask):
print("Not a good IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | 1,057,145 |
Function to check if a address is good
Args:
ip_addr: IP address in the following format 192.168.1.1
return_tuple: Set to True it returns a IP, set to False returns True or False
Returns: see return_tuple for return options | def ip(ip_addr, return_tuple=True):
regex_ip = __re.compile("^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$")
if return_tuple:
while not regex_ip.match(ip_addr):
print("Not a good IP.")
print("Please try again.")
ip_addr = input("Please enter a IP address in the following format x.x.x.x: ")
return ip_addr
elif not return_tuple:
if not regex_ip.match(ip_addr):
return False
else:
return True | 1,057,146 |
Function to verify a good CIDR value
Args:
cidr: CIDR value 0 to 32
return_cidr: Set to True it returns a CIDR value, set to False returns True or False
Returns: see return_cidr for return options | def cidr_check(cidr, return_cidr=True):
try:
if int(cidr) < 0 or int(cidr) > 32:
good_cidr = False
else:
good_cidr = True
if return_cidr:
while not good_cidr:
print("Sorry the CIDR value %s is not a valid value must be a value of 0 to 32. Please try again."
% (cidr,))
cidr = input("What is the mask for in CIDR format?: ")
if int(cidr) < 0 or int(cidr) > 32:
good_cidr = False
else:
good_cidr = True
return cidr
elif not return_cidr:
return good_cidr
except ValueError:
LOGGER.critical('Function cidr_check expected a number but got {item}'.format(item=cidr))
raise ValueError("The input needs to be a number!!") | 1,057,147 |
Function to figure out the IP's between neighbors address
Args:
ip_addr: Unicast IP address in the following format 192.168.1.1
cidr: CIDR value of 30, or 31
Returns: returns Our IP and the Neighbor IP in a tuple | def get_neighbor_ip(ip_addr, cidr="30"):
our_octet = None
neighbor_octet = None
try:
ip_addr_split = ip_addr.split(".")
max_counter = 0
if int(cidr) == 30:
ranger = 4
elif int(cidr) == 31:
ranger = 2
while max_counter < 256:
try:
if int(ip_addr_split[3]) >= max_counter and int(ip_addr_split[3]) < (max_counter + ranger):
if ranger == 4:
our_octet = max_counter + 1
neighbor_octet = max_counter + 2
break
elif ranger == 2:
our_octet = max_counter
neighbor_octet = max_counter + 1
break
max_counter += ranger
except UnboundLocalError:
print("The mask between the neighbors must be 30, or 31")
exit("BAD NEIGHBOR MASK")
if int(ip_addr_split[3]) == our_octet:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
elif int(ip_addr_split[3]) == neighbor_octet:
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
else:
our_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], our_octet)
neighbor_ip_addr = "%s.%s.%s.%i" % (ip_addr_split[0], ip_addr_split[1], ip_addr_split[2], neighbor_octet)
return our_ip_addr, neighbor_ip_addr
except IndexError:
LOGGER.critical('Function get_neighbor_ip IndexError ip_addr {item} cidr {cidr}'.format(item=ip_addr,
cidr=cidr))
raise IndexError("You have entered invalid input, you must enter a ipv4 address") | 1,057,148 |
Function to return a whole subnet value from a IP address and CIDR pair
Args:
ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns the corrected whole subnet | def whole_subnet_maker(ip_addr, cidr):
if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False:
LOGGER.critical('Function whole_subnet_maker ip_addr {item}'.format(item=ip_addr))
raise ValueError("Not a good ipv4 address")
if not cidr_check(cidr, False):
LOGGER.critical('Function whole_subnet_maker cidr {item}'.format(item=cidr))
raise ValueError("Not a good CIDR value should be 0 to 32")
def subnet_corrector(octet, cidr):
cidr_int = int(cidr)
octet_int = int(octet)
if cidr_int >= 24:
cidr_int = __mask_conversion[cidr_int]["OCT4"]
elif cidr_int >= 16:
cidr_int = __mask_conversion[cidr_int]["OCT3"]
elif cidr_int >= 8:
cidr_int = __mask_conversion[cidr_int]["OCT2"]
elif cidr_int >= 1:
cidr_int = __mask_conversion[cidr_int]["OCT1"]
cidr_count = 0
cidr_v = 256 - cidr_int
cidr_2 = 256 - cidr_int
while cidr_count < 300:
if octet_int >= cidr_count and octet_int <= cidr_2:
cidr_int = cidr_count
cidr_count = cidr_2
cidr_2 = cidr_2 + cidr_v
return str(cidr_int)
ip_addr_split = ip_addr.split(".")
if int(cidr) >= 24:
octet = subnet_corrector(ip_addr_split[3], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + ip_addr_split[2] + "." + octet
return completed
elif int(cidr) >= 16:
octet = subnet_corrector(ip_addr_split[2], cidr)
completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + octet + ".0"
return completed
elif int(cidr) >= 8:
octet = subnet_corrector(ip_addr_split[1], cidr)
completed = ip_addr_split[0] + "." + octet + ".0.0"
return completed
elif int(cidr) >= 1:
octet = subnet_corrector(ip_addr_split[0], cidr)
completed = octet + ".0.0.0"
return completed
else:
return "0.0.0.0" | 1,057,149 |
Function to return a subnet range value from a IP address and CIDR pair
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 1 to 32
Returns: returns a dictionary of info | def subnet_range(ip_net, cidr):
subnets_dict = dict()
subnet = whole_subnet_maker(ip_net, cidr)
subnets_dict['IP'] = ip_net
subnets_dict['NET'] = subnet
subnets_dict['CIDR'] = '%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)
if int(cidr) >= 24:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[3]) + 1
last_ip = (int(subnet_split[3]) + 1) + (253 - int(__mask_conversion[int(cidr)]['OCT4']))
bcast_ip = (int(subnet_split[3]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT4']))
temp = '%s.%s.%s.' % (subnet_split[0], subnet_split[1], subnet_split[2])
subnets_dict['RANGE'] = '%s%i to %s%i' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 16:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[2])
last_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
bcast_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
temp = '%s.%s.' % (subnet_split[0], subnet_split[1])
subnets_dict['RANGE'] = '%s%i.1 to %s%i.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 8:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[1])
last_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
bcast_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
temp = '%s.' % (subnet_split[0],)
subnets_dict['RANGE'] = '%s%i.0.1 to %s%i.255.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 1:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[0])
last_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
bcast_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
subnets_dict['RANGE'] = '%i.0.0.1 to %i.255.255.254' % (first_ip, last_ip)
subnets_dict['BCAST'] = '%i.255.255.255' % (bcast_ip,)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
return subnets_dict | 1,057,150 |
Function to return every subnet a ip can belong to with a longer prefix
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns a list of subnets | def all_subnets_longer_prefix(ip_net, cidr):
subnets_list = list()
while int(cidr) <= 32:
try:
subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))
except Exception as e:
LOGGER.critical('Function all_subnets_longer_prefix {item}'.format(item=e))
pass
cidr = str(int(cidr) + 1)
return subnets_list | 1,057,151 |
Function to return every subnet a ip can belong to with a shorter prefix
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
include_default: If you want the list to inlclude the default route set to True
Returns: returns a list of subnets | def all_subnets_shorter_prefix(ip_net, cidr, include_default=False):
subnets_list = list()
if include_default:
while int(cidr) >= 0:
try:
subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))
except Exception as e:
LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e))
cidr = str(int(cidr) - 1)
else:
while int(cidr) > 0:
try:
subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))
except Exception as e:
LOGGER.critical('Function all_subnets_shorter_prefix {item}'.format(item=e))
cidr = str(int(cidr) - 1)
return subnets_list | 1,057,152 |
Function to verify item entered is a number
Args:
check: Thing to check for a number
return_number: Set to True it returns a number value, set to False returns True or False
Returns: Check return_number for return options | def number_check(check, return_number=True):
try:
int(check)
good = True
except ValueError:
LOGGER.critical('Function number_check ValueError {item}'.format(item=check))
good = False
if return_number:
while not good:
print("That is not a number.")
print("Please try again.")
check = input("Please enter a number?: ")
try:
int(check)
good = True
except ValueError:
LOGGER.critical('Function number_check ValueError {item}'.format(item=check))
good = False
return check
else:
return good | 1,057,154 |
Determine if a file is a packet trace that is supported by this module.
Args:
path (str): path to the trace file.
Returns:
bool: True if the file is a valid packet trace. | def is_packet_trace(path):
path = os.path.abspath(path)
if not os.path.isfile(path):
return False
try:
f = open(path, 'rb')
except:
return False
magic = f.read(4)
f.close()
return magic in FILE_TYPE_HANDLER | 1,057,197 |
Read a packet trace file, return a :class:`wltrace.common.WlTrace` object.
This function first reads the file's magic
(first ``FILE_TYPE_HANDLER`` bytes), and automatically determine the
file type, and call appropriate handler to process the file.
Args:
path (str): the file's path to be loaded.
Returns:
``WlTrace`` object. | def load_trace(path, *args, **kwargs):
with open(path, 'rb') as f:
magic = f.read(MAGIC_LEN)
if magic not in FILE_TYPE_HANDLER:
raise Exception('Unknown file magic: %s' % (binascii.hexlify(magic)))
return FILE_TYPE_HANDLER[magic](path, *args, **kwargs) | 1,057,198 |
Extended pretty printing for location strings.
Args:
format_spec str: Coordinate formatting system to use
Returns:
Human readable string representation of ``Point`` object
Raises:
ValueError: Unknown value for ``format_spec`` | def __format__(self, format_spec='dd'):
text = super(Station.__base__, self).__format__(format_spec)
if self.alt_id:
return '%s (%s - %s)' % (self.name, self.alt_id, text)
else:
return '%s (%s)' % (self.name, text) | 1,057,252 |
Pull locations from a user's config file.
Args:
filename (str): Config file to parse
Returns:
dict: List of locations from config file | def read_locations(filename):
data = ConfigParser()
if filename == '-':
data.read_file(sys.stdin)
else:
data.read(filename)
if not data.sections():
logging.debug('Config file is empty')
locations = {}
for name in data.sections():
if data.has_option(name, 'locator'):
latitude, longitude = utils.from_grid_locator(data.get(name,
'locator'))
else:
latitude = data.getfloat(name, 'latitude')
longitude = data.getfloat(name, 'longitude')
locations[name] = (latitude, longitude)
return locations | 1,057,333 |
Pull locations from a user's CSV file.
Read gpsbabel_'s CSV output format
.. _gpsbabel: http://www.gpsbabel.org/
Args:
filename (str): CSV file to parse
Returns:
tuple of dict and list: List of locations as ``str`` objects | def read_csv(filename):
field_names = ('latitude', 'longitude', 'name')
data = utils.prepare_csv_read(filename, field_names, skipinitialspace=True)
locations = {}
args = []
for index, row in enumerate(data, 1):
name = '%02i:%s' % (index, row['name'])
locations[name] = (row['latitude'], row['longitude'])
args.append(name)
return locations, args | 1,057,334 |
Initialise a new ``LocationsError`` object.
Args:
function (str): Function where error is raised
data (tuple): Location number and data | def __init__(self, function=None, data=None):
super(LocationsError, self).__init__()
self.function = function
self.data = data | 1,057,336 |
Initialise a new ``NumberedPoint`` object.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
name (str): Location's name or command line position
units (str): Unit type to be used for distances | def __init__(self, latitude, longitude, name, units='km'):
super(NumberedPoint, self).__init__(latitude, longitude, units)
self.name = name | 1,057,338 |
Returns a transformed Geometry.
Arguments:
geom -- any coercible Geometry value or Envelope
to_sref -- SpatialReference or EPSG ID as int | def transform(geom, to_sref):
# If we have an envelope, assume it's in the target sref.
try:
geom = getattr(geom, 'polygon', Envelope(geom).polygon)
except (TypeError, ValueError):
pass
else:
geom.AssignSpatialReference(to_sref)
try:
geom_sref = geom.GetSpatialReference()
except AttributeError:
return transform(Geometry(geom), to_sref)
if geom_sref is None:
raise Exception('Cannot transform from unknown spatial reference')
# Reproject geom if necessary
if not geom_sref.IsSame(to_sref):
geom = geom.Clone()
geom.TransformTo(to_sref)
return geom | 1,057,592 |
Creates an envelope from lower-left and upper-right coordinates.
Arguments:
args -- min_x, min_y, max_x, max_y or a four-tuple | def __init__(self, *args):
if len(args) == 1:
args = args[0]
try:
extent = list(map(float, args))
except (TypeError, ValueError) as exc:
exc.args = ('Cannot create Envelope from "%s"' % repr(args),)
raise
try:
self.min_x, self.max_x = sorted(extent[::2])
self.min_y, self.max_y = sorted(extent[1::2])
except ValueError as exc:
exc.args = ('Sequence length should be "4", not "%d"' % len(args),)
raise | 1,057,594 |
Expands this envelope by the given Envelope or tuple.
Arguments:
other -- Envelope, two-tuple, or four-tuple | def expand(self, other):
if len(other) == 2:
other += other
mid = len(other) // 2
self.ll = map(min, self.ll, other[:mid])
self.ur = map(max, self.ur, other[mid:]) | 1,057,597 |
Returns true if this envelope intersects another.
Arguments:
other -- Envelope or tuple of (minX, minY, maxX, maxY) | def intersects(self, other):
try:
return (self.min_x <= other.max_x and
self.max_x >= other.min_x and
self.min_y <= other.max_y and
self.max_y >= other.min_y)
except AttributeError:
return self.intersects(Envelope(other)) | 1,057,599 |
Returns a new envelope rescaled from center by the given factor(s).
Arguments:
xfactor -- int or float X scaling factor
yfactor -- int or float Y scaling factor | def scale(self, xfactor, yfactor=None):
yfactor = xfactor if yfactor is None else yfactor
x, y = self.centroid
xshift = self.width * xfactor * 0.5
yshift = self.height * yfactor * 0.5
return Envelope(x - xshift, y - yshift, x + xshift, y + yshift) | 1,057,600 |
desc: >
Provides information about the exceptions that a
function or method can raise.
args:
- name: desc
desc: >
Provides a description of what conditions will cause
this exception to be raised
type: str
- name: type
desc: The type of exception raised
type: str | def __init__(
self,
desc,
type,
):
self.desc = type_assert(desc, str)
self.type = type_assert(type, str) | 1,057,677 |
Build managed property interface.
Args:
attr (str): Property's name
Returns:
property: Managed property interface | def _manage_location(attr):
return property(lambda self: getattr(self, '_%s' % attr),
lambda self, value: self._set_location(attr, value)) | 1,057,766 |
Generate a human readable DM/DMS location string.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
mode (str): Coordinate formatting system to use
unistr (bool): Whether to use extended character set | def _dms_formatter(latitude, longitude, mode, unistr=False):
if unistr:
chars = ('°', '′', '″')
else:
chars = ('°', "'", '"')
latitude_dms = tuple(map(abs, utils.to_dms(latitude, mode)))
longitude_dms = tuple(map(abs, utils.to_dms(longitude, mode)))
text = []
if mode == 'dms':
text.append('%%02i%s%%02i%s%%02i%s' % chars % latitude_dms)
else:
text.append('%%02i%s%%05.2f%s' % chars[:2] % latitude_dms)
text.append('S' if latitude < 0 else 'N')
if mode == 'dms':
text.append(', %%03i%s%%02i%s%%02i%s' % chars % longitude_dms)
else:
text.append(', %%03i%s%%05.2f%s' % chars[:2] % longitude_dms)
text.append('W' if longitude < 0 else 'E')
return text | 1,057,767 |
Initialise a new ``TimedPoint`` object.
Args:
latitude (float, tuple or list): Location's latitude
longitude (float, tuple or list): Location's longitude
angle (str): Type for specified angles
units (str): Units type to be used for distances
timezone (int): Offset from UTC in minutes
time (datetime.datetime): Time associated with the location | def __init__(self, latitude, longitude, units='metric',
angle='degrees', timezone=0, time=None):
super(TimedPoint, self).__init__(latitude, longitude, units, angle,
timezone)
self.time = time | 1,057,768 |
Recursively marshal a Python object to a BSON-compatible dict
that can be passed to PyMongo, Motor, etc...
Args:
obj: object, It's members can be nested Python
objects which will be converted to dictionaries
types: tuple-of-types, The BSON primitive types, typically
you would not change this
fields: None-list-of-str, Explicitly marshal only these fields
Returns:
dict | def marshal_bson(
obj,
types=BSON_TYPES,
fields=None,
):
return marshal_dict(
obj,
types,
fields=fields,
) | 1,058,285 |
Yields tile (x, y, z) tuples for a bounding box and zoom levels.
Arguments:
bbox - bounding box as a 4-length sequence
zlevs - sequence of tile zoom levels | def from_bbox(bbox, zlevs):
env = Envelope(bbox)
for z in zlevs:
corners = [to_tile(*coord + (z,)) for coord in (env.ul, env.lr)]
xs, ys = [range(p1, p2 + 1) for p1, p2 in zip(*corners)]
for coord in itertools.product(xs, ys, (z,)):
yield coord | 1,058,442 |
Returns a tuple of (longitude, latitude) from a map tile xyz coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Lon..2Flat._to_tile_numbers_2
Arguments:
xtile - x tile location as int or float
ytile - y tile location as int or float
zoom - zoom level as int or float | def to_lonlat(xtile, ytile, zoom):
n = 2.0 ** zoom
lon = xtile / n * 360.0 - 180.0
# Caculate latitude in radians and convert to degrees constrained from -90
# to 90. Values too big for tile coordinate pairs are invalid and could
# overflow.
try:
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
except OverflowError:
raise ValueError('Invalid tile coordinate for zoom level %d' % zoom)
lat = math.degrees(lat_rad)
return lon, lat | 1,058,443 |
Returns a tuple of (xtile, ytile) from a (longitude, latitude) coordinate.
See http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
Arguments:
lon - longitude as int or float
lat - latitude as int or float
zoom - zoom level as int or float | def to_tile(lon, lat, zoom):
lat_rad = math.radians(lat)
n = 2.0 ** zoom
xtile = int((lon + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) +
(1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return xtile, ytile | 1,058,444 |
Initialise a new ``_GpxElem`` object.
Args:
latitude (float): Element's latitude
longitude (float): Element's longitude
name (str): Name for Element
description (str): Element's description
elevation (float): Element's elevation
time (utils.Timestamp): Time the data was generated | def __init__(self, latitude, longitude, name=None, description=None,
elevation=None, time=None):
super(_GpxElem, self).__init__(latitude, longitude, time=time)
self.name = name
self.description = description
self.elevation = elevation | 1,058,881 |
Calculate distances between locations in segments.
Args:
method (str): Method used to calculate distance
Returns:
list of list of float: Groups of distance between points in
segments | def distance(self, method='haversine'):
distances = []
for segment in self:
if len(segment) < 2:
distances.append([])
else:
distances.append(segment.distance(method))
return distances | 1,058,885 |
Calculate bearing between locations in segments.
Args:
format (str): Format of the bearing string to return
Returns:
list of list of float: Groups of bearings between points in
segments | def bearing(self, format='numeric'):
bearings = []
for segment in self:
if len(segment) < 2:
bearings.append([])
else:
bearings.append(segment.bearing(format))
return bearings | 1,058,886 |
Calculate final bearing between locations in segments.
Args:
format (str): Format of the bearing string to return
Returns:
list of list of float: Groups of bearings between points in
segments | def final_bearing(self, format='numeric'):
bearings = []
for segment in self:
if len(segment) < 2:
bearings.append([])
else:
bearings.append(segment.final_bearing(format))
return bearings | 1,058,887 |
Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment | def range(self, location, distance):
return (segment.range(location, distance) for segment in self) | 1,058,890 |
Calculate destination locations for given distance and bearings.
Args:
bearing (float): Bearing to move on in degrees
distance (float): Distance in kilometres
Returns:
list of list of Point: Groups of points shifted by ``distance``
and ``bearing`` | def destination(self, bearing, distance):
return (segment.destination(bearing, distance) for segment in self) | 1,058,891 |
Calculate sunrise times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunrise events, or end of twilight
Returns:
list of list of datetime.datetime: The time for the sunrise for
each point in each segment | def sunrise(self, date=None, zenith=None):
return (segment.sunrise(date, zenith) for segment in self) | 1,058,892 |
Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment | def sunset(self, date=None, zenith=None):
return (segment.sunset(date, zenith) for segment in self) | 1,058,893 |
Calculate sunrise/sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate rise/set events, or twilight times
Returns:
list of list of 2-tuple of datetime.datetime: The time for the
sunrise and sunset events for each point in each segment | def sun_events(self, date=None, zenith=None):
return (segment.sun_events(date, zenith) for segment in self) | 1,058,894 |
Import information from GPX metadata.
Args:
elements (etree.Element): GPX metadata subtree | def import_metadata(self, elements):
metadata_elem = lambda name: etree.QName(GPX_NS, name)
for child in elements.getchildren():
tag_ns, tag_name = child.tag[1:].split('}')
if not tag_ns == GPX_NS:
continue
if tag_name in ('name', 'desc', 'keywords'):
setattr(self, tag_name, child.text)
elif tag_name == 'time':
self.time = utils.Timestamp.parse_isoformat(child.text)
elif tag_name == 'author':
self.author['name'] = child.findtext(metadata_elem('name'))
aemail = child.find(metadata_elem('email'))
if aemail:
self.author['email'] = '%s@%s' % (aemail.get('id'),
aemail.get('domain'))
self.author['link'] = child.findtext(metadata_elem('link'))
elif tag_name == 'bounds':
self.bounds = {
'minlat': child.get('minlat'),
'maxlat': child.get('maxlat'),
'minlon': child.get('minlon'),
'maxlon': child.get('maxlon'),
}
elif tag_name == 'extensions':
self.extensions = child.getchildren()
elif tag_name == 'copyright':
if child.get('author'):
self.copyright['name'] = child.get('author')
self.copyright['year'] = child.findtext(metadata_elem('year'))
self.copyright['license'] = child.findtext(metadata_elem('license'))
elif tag_name == 'link':
link = {
'href': child.get('href'),
'type': child.findtext(metadata_elem('type')),
'text': child.findtext(metadata_elem('text')),
}
self.link.append(link) | 1,058,897 |
Helper function to simplify ``__repr__`` methods.
Args:
obj: Object to pull argument values for
remap (dict): Argument pairs to remap before output
Returns:
str: Self-documenting representation of ``value`` | def repr_assist(obj, remap=None):
if not remap:
remap = {}
data = []
for arg in inspect.getargspec(getattr(obj.__class__, '__init__'))[0]:
if arg == 'self':
continue
elif arg in remap:
value = remap[arg]
else:
try:
value = getattr(obj, arg)
except AttributeError:
value = getattr(obj, '_%s' % arg)
if isinstance(value, (type(None), list, basestring, datetime.date,
datetime.time)):
data.append(repr(value))
else:
data.append(str(value))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(data)) | 1,058,918 |
Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data | def prepare_read(data, method='readlines', mode='r'):
if hasattr(data, 'readlines'):
data = getattr(data, method)()
elif isinstance(data, list):
if method == 'read':
return ''.join(data)
elif isinstance(data, basestring):
data = getattr(open(data, mode), method)()
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data | 1,058,919 |
Prepare various input types for CSV parsing.
Args:
data (iter): Data to read
field_names (tuple of str): Ordered names to assign to fields
Returns:
csv.DictReader: CSV reader suitable for parsing
Raises:
TypeError: Invalid value for data | def prepare_csv_read(data, field_names, *args, **kwargs):
if hasattr(data, 'readlines') or isinstance(data, list):
pass
elif isinstance(data, basestring):
data = open(data)
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return csv.DictReader(data, field_names, *args, **kwargs) | 1,058,920 |
Prepare various input types for XML parsing.
Args:
data (iter): Data to read
objectify (bool): Parse using lxml's objectify data binding
Returns:
etree.ElementTree: Tree suitable for parsing
Raises:
TypeError: Invalid value for data | def prepare_xml_read(data, objectify=False):
mod = _objectify if objectify else etree
if hasattr(data, 'readlines'):
data = mod.parse(data).getroot()
elif isinstance(data, list):
data = mod.fromstring(''.join(data))
elif isinstance(data, basestring):
data = mod.parse(open(data)).getroot()
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data | 1,058,921 |
Create a simple namespace-aware objectify element creator.
Args:
namespace (str): Namespace to work in
Returns:
function: Namespace-aware element creator | def element_creator(namespace=None):
ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace,
annotate=False)
def create_elem(tag, attr=None, text=None):
if not attr:
attr = {}
if text:
element = getattr(ELEMENT_MAKER, tag)(text, **attr)
else:
element = getattr(ELEMENT_MAKER, tag)(**attr)
return element
return create_elem | 1,058,922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.