Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
382,800 | def flush(self):
address = self.remote_address
while len(self._batches) > 0:
self._socket.sendto(self._batches[0], address)
self._batches.popleft()
return self | Send buffered metrics in batch requests |
382,801 | def fetch_raw(self):
for results in super(LogQuery, self).execute():
if in results and results[]:
yield results[] | Execute the query and return by batches.
Optional keyword arguments are passed to Query.execute(). Whether
this is real-time or stored logs is dependent on the value of
``fetch_type``.
:return: generator of dict results |
382,802 | def response_handler(msg: Dict[str, str]) -> None:
from wdom.document import getElementByWdomId
id = msg[]
elm = getElementByWdomId(id)
if elm:
elm.on_response(msg)
else:
logger.warning(.format(id)) | Handle response sent by browser. |
382,803 | def estimate_noiseperbl(data):
datamean = data.mean(axis=2).imag
(datameanmin, datameanmax) = rtlib.sigma_clip(datamean.flatten())
good = n.where( (datamean>datameanmin) & (datamean<datameanmax) )
noiseperbl = datamean[good].std()
logger.debug( % (100.*len(good[0])/len(datamean.flatten()), datameanmin, datameanmax, noiseperbl))
return noiseperbl | Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
Takes mean across pols and channels for now, as in detect_bispectra. |
382,804 | def worker(self):
fullseqs = self.sample_loci()
liters = itertools.product(*self.imap.values())
hashval = uuid.uuid4().hex
weights = []
for ridx, lidx in enumerate(liters):
a,b,c,d = lidx
sub = {}
for i in lidx:
if self.rmap[i] == "p1":
sub["A"] = fullseqs[i]
elif self.rmap[i] == "p2":
sub["B"] = fullseqs[i]
elif self.rmap[i] == "p3":
sub["C"] = fullseqs[i]
else:
sub["D"] = fullseqs[i]
nex = []
for tax in list("ABCD"):
nex.append(">{} {}".format(tax, sub[tax]))
nsites, nvar = count_var(nex)
if nvar > self.minsnps:
nexus = "{} {}\n".format(4, len(fullseqs[a])) + "\n".join(nex)
treeorder = self.run_tree_inference(nexus, "{}.{}".format(hashval, ridx))
weights.append(treeorder)
rfiles = glob.glob(os.path.join(tempfile.tempdir, "*{}*".format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile)
trees = ["ABCD", "ACBD", "ADBC"]
wdict = {i:float(weights.count(i))/len(weights) for i in trees}
return wdict | Calculates the quartet weights for the test at a random
subsampled chunk of loci. |
382,805 | def from_config(cls, config, name, section_key="score_caches"):
sentinel_logger.info("Loading RedisSentinel from config.".format(name))
section = config[section_key][name]
kwargs = {k: v for k, v in section.items() if k != "class"}
return cls.from_parameters(**kwargs) | score_caches:
redis_sentinel:
class: ores.score_caches.RedisSentinel
prefix: ores-derp
ttl: 9001
socket_timeout: 0.1
cluster: mymaster
hosts:
- localhost:5000
- localhost:5001
- localhost:5002 |
382,806 | def most_probable_alleles(allele_list):
all_alleles = defaultdict()
for allele, pvalue in allele_list:
allele = re.split(, allele)
if len(allele) < 2:
continue
allele = .join([allele[0], allele[1]])
try:
all_alleles[allele].append(float(pvalue))
except KeyError:
all_alleles[allele] = [float(pvalue)]
if len(all_alleles.keys()) <= 2:
return all_alleles.keys()
else:
return sorted(all_alleles.keys(), key=lambda x: \
(-len(all_alleles[x]), sum(all_alleles[x])))[0:2] | This module accepts a list of tuples of (allele, p_value) pairs. It returns the 2 most probable
alleles for that group. |
382,807 | def package_info(pkg_name):
indent = " "
for config, _ in _iter_packages():
if pkg_name == config["name"]:
print("Package:", pkg_name)
print(indent, "Platform:", config["platform"])
print(indent, "Version:", config["version"])
print(indent, "Path:", config["path"])
print(indent, "Worlds:")
for world in config["maps"]:
world_info(world["name"], world_config=world, initial_indent=" ") | Prints the information of a package.
Args:
pkg_name (str): The name of the desired package to get information |
382,808 | def line(self, plunge, bearing, *args, **kwargs):
lon, lat = stereonet_math.line(plunge, bearing)
args, kwargs = self._point_plot_defaults(args, kwargs)
return self.plot([lon], [lat], *args, **kwargs) | Plot points representing linear features on the axes. Additional
arguments and keyword arguments are passed on to `plot`.
Parameters
----------
plunge, bearing : number or sequence of numbers
The plunge and bearing of the line(s) in degrees. The plunge is
measured in degrees downward from the end of the feature specified
by the bearing.
**kwargs
Additional parameters are passed on to `plot`.
Returns
-------
A sequence of Line2D artists representing the point(s) specified by
`strike` and `dip`. |
382,809 | def variance(numbers, type=):
mean = average(numbers)
variance = 0
for number in numbers:
variance += (mean - number) ** 2
if type == :
return variance / len(numbers)
else:
return variance / (len(numbers) - 1) | Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average() |
382,810 | def domain(value,
allow_empty = False,
allow_ips = False,
**kwargs):
is_recursive = kwargs.pop(, False)
if not value and not allow_empty:
raise errors.EmptyValueError( % value)
elif not value:
return None
if not isinstance(value, basestring):
raise errors.CannotCoerceError(
% type(value))
if in value:
raise errors.SlashInDomainError()
if in value:
raise errors.SlashInDomainError()
if in value:
raise errors.AtInDomainError()
if in value:
raise errors.ColonInDomainError()
value = value.strip().lower()
for item in string_.whitespace:
if item in value:
raise errors.WhitespaceInDomainError(
)
if value in SPECIAL_USE_DOMAIN_NAMES:
return value
if allow_ips:
try:
ip_address(value, allow_empty = allow_empty)
is_valid = True
except (ValueError, TypeError, AttributeError):
is_valid = False
if is_valid:
return value
is_valid = DOMAIN_REGEX.match(value)
if not is_valid and not is_recursive:
with_prefix = + value
try:
url(with_prefix, force_run = True, is_recursive = True)
except ValueError:
raise errors.InvalidDomainError( % value)
return value | Validate that ``value`` is a valid domain name.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:[email protected]``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
.. hint::
Leading and trailing whitespace will be automatically stripped.
:param value: The value to validate.
:type value: :class:`str <python:str>` / :obj:`None <python:None>`
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if
``value`` is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>`
if ``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will raise a :class:`InvalidDomainError` if ``value`` is an IP
address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: :class:`str <python:str>` / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or
:obj:`None <python:None>`
:raises InvalidDomainError: if ``value`` is not a valid domain name or
empty with ``allow_empty`` set to ``True``
:raises SlashInDomainError: if ``value`` contains a slash or backslash
:raises AtInDomainError: if ``value`` contains an ``@`` symbol
:raises ColonInDomainError: if ``value`` contains a ``:`` symbol
:raises WhitespaceInDomainError: if ``value`` contains whitespace |
382,811 | def calcTightAnchors(args, d, patches):
centerPoint = (int(args.worldSize/2), int(args.worldSize/2))
anchors = []
if patches == 0:
pass
elif patches == 1:
anchors.append(centerPoint)
elif patches % 2 == 0:
dsout = int((patches-2)//2) + 1
add_anchors(centerPoint, d, dsout, anchors, True)
if d != 0:
anchors = list(set(anchors))
anchors.sort()
if dsout != 1:
return (anchors +
calcTightAnchors(args, d, patches-2)
)[:patches*patches]
else:
dsout = int((patches-1)//2)
add_anchors(centerPoint, d, dsout, anchors, False)
if dsout != 1:
return anchors + calcTightAnchors(d, patches-2)
return anchors | Recursively generates the number of anchor points specified in the
patches argument, such that all patches are d cells away
from their nearest neighbors. |
382,812 | def create_conf_file (self):
cmd_obj = self.distribution.get_command_obj("install")
cmd_obj.ensure_finalized()
data = []
for d in [, , , , , ]:
attr = % d
if cmd_obj.root:
cutoff = len(cmd_obj.root)
if cmd_obj.root:
_drive, tail = os.path.splitdrive(val)
if tail.startswith(os.sep):
tail = tail[1:]
self.install_lib = os.path.join(cmd_obj.root, tail)
else:
self.install_lib = val
data.append("%s = %r" % (attr, cnormpath(val)))
self.distribution.create_conf_file(data, directory=self.install_lib)
return self.get_conf_output() | Create configuration file. |
382,813 | def _set_config(c):
glfw.glfwWindowHint(glfw.GLFW_RED_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, c[])
glfw.glfwWindowHint(glfw.GLFW_SAMPLES, c[])
glfw.glfwWindowHint(glfw.GLFW_STEREO, c[])
if not c[]:
raise RuntimeError(
) | Set gl configuration for GLFW |
382,814 | def video_in_option(self, param, profile=):
if profile == :
field = param
else:
field = .format(profile, param)
return utils.pretty(
[opt for opt in self.video_in_options.split()
if .format(field) in opt][0]) | Return video input option.
Params:
param - parameter, such as 'DayNightColor'
profile - 'Day', 'Night' or 'Normal' |
382,815 | def open(self):
if self._rpc is not None:
return self._rpc
self.load_config()
if not config.scgi_url:
raise error.UserError("You need to configure a XMLRPC connection, read"
" https://pyrocore.readthedocs.io/en/latest/setup.html")
self._rpc = xmlrpc.RTorrentProxy(config.scgi_url)
self.versions, self.version_info = self._rpc._set_mappings()
self.engine_id = self._rpc.session.name()
time_usec = self._rpc.system.time_usec()
if time_usec < 2**32:
self.LOG.warn("Your xmlrpc-c is broken (64 bit integer support missing,"
" %r returned instead)" % (type(time_usec),))
self.engine_software = "rTorrent %s/%s" % self.versions
if "+ssh:" in config.scgi_url:
self.startup = int(self._rpc.startup_time() or time.time())
else:
self._session_dir = self._rpc.session.path()
if not self._session_dir:
raise error.UserError("You need a session directory, read"
" https://pyrocore.readthedocs.io/en/latest/setup.html")
if not os.path.exists(self._session_dir):
raise error.UserError("Non-existing session directory %r" % self._session_dir)
self._download_dir = os.path.expanduser(self._rpc.directory.default())
if not os.path.exists(self._download_dir):
raise error.UserError("Non-existing download directory %r" % self._download_dir)
self.startup = os.path.getmtime(os.path.join(self._session_dir, "rtorrent.lock"))
self.LOG.debug(repr(self))
return self._rpc | Open connection. |
382,816 | def convert_table(self, markup):
for table in re.findall(self.re["html-table"], markup):
wiki = table
wiki = re.sub(r"<table(.*?)>", "{|\\1", wiki)
wiki = re.sub(r"<tr(.*?)>", "|-\\1", wiki)
wiki = re.sub(r"<td(.*?)>", "|\\1|", wiki)
wiki = wiki.replace("</td>", "\n")
wiki = wiki.replace("</tr>", "\n")
wiki = wiki.replace("</table>", "\n|}")
markup = markup.replace(table, wiki)
return markup | Subtitutes <table> content to Wikipedia markup. |
382,817 | def merge(self, schema):
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self | Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for
bidirectional import which produce cyclic includes.
@returns: self
@rtype: L{Schema} |
382,818 | def _setup_redis(self):
if not self.closed:
try:
self.logger.debug("Creating redis connection to host " +
str(self.settings[]))
self.redis_conn = redis.StrictRedis(host=self.settings[],
port=self.settings[],
db=self.settings[])
self.redis_conn.info()
self.redis_connected = True
self.logger.info("Successfully connected to redis")
except KeyError as e:
self.logger.error( + str(e),
{: traceback.format_exc()})
except:
self.logger.error("Couldnex': traceback.format_exc()})
raise | Returns a Redis Client |
382,819 | def convert_column(data, schemae):
ctype = schemae.converted_type
if ctype == parquet_thrift.ConvertedType.DECIMAL:
scale_factor = Decimal("10e-{}".format(schemae.scale))
if schemae.type == parquet_thrift.Type.INT32 or schemae.type == parquet_thrift.Type.INT64:
return [Decimal(unscaled) * scale_factor for unscaled in data]
return [Decimal(intbig(unscaled)) * scale_factor for unscaled in data]
elif ctype == parquet_thrift.ConvertedType.DATE:
return [datetime.date.fromordinal(d) for d in data]
elif ctype == parquet_thrift.ConvertedType.TIME_MILLIS:
return [datetime.timedelta(milliseconds=d) for d in data]
elif ctype == parquet_thrift.ConvertedType.TIMESTAMP_MILLIS:
return [datetime.datetime.utcfromtimestamp(d / 1000.0) for d in data]
elif ctype == parquet_thrift.ConvertedType.UTF8:
return [codecs.decode(item, "utf-8") for item in data]
elif ctype == parquet_thrift.ConvertedType.UINT_8:
return _convert_unsigned(data, )
elif ctype == parquet_thrift.ConvertedType.UINT_16:
return _convert_unsigned(data, )
elif ctype == parquet_thrift.ConvertedType.UINT_32:
return _convert_unsigned(data, )
elif ctype == parquet_thrift.ConvertedType.UINT_64:
return _convert_unsigned(data, )
elif ctype == parquet_thrift.ConvertedType.JSON:
return [json.loads(s) for s in codecs.iterdecode(data, "utf-8")]
elif ctype == parquet_thrift.ConvertedType.BSON and bson:
return [bson.BSON(s).decode() for s in data]
else:
logger.info("Converted type ' not handled",
parquet_thrift.ConvertedType._VALUES_TO_NAMES[ctype])
return data | Convert known types from primitive to rich. |
382,820 | def _add_scheme():
lists = [
urllib.parse.uses_relative,
urllib.parse.uses_netloc,
urllib.parse.uses_query,
]
for l in lists:
l.append() | urllib.parse doesn't support the mongodb scheme, but it's easy
to make it so. |
382,821 | def get_body_region(defined):
scope = defined.get_scope()
pymodule = defined.get_module()
lines = pymodule.lines
node = defined.get_ast()
start_line = node.lineno
if defined.get_doc() is None:
start_line = node.body[0].lineno
elif len(node.body) > 1:
start_line = node.body[1].lineno
start = lines.get_line_start(start_line)
scope_start = pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= start_line:
start = pymodule.source_code.index(, start) + 1
while pymodule.source_code[start].isspace():
start += 1
end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code))
return start, end | Return the start and end offsets of function body |
382,822 | def delta_crl_distribution_points(self):
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = []
if self.freshest_crl_value is not None:
for distribution_point in self.freshest_crl_value:
distribution_point_name = distribution_point[]
if distribution_point_name.name == :
continue
for general_name in distribution_point_name.chosen:
if general_name.name == :
self._delta_crl_distribution_points.append(distribution_point)
return self._delta_crl_distribution_points | Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects |
382,823 | def on_train_begin(self, **kwargs:Any)->None:
"Initializes the best value."
self.best = float() if self.operator == np.less else -float() | Initializes the best value. |
382,824 | def get_selected_subassistant_path(self, **kwargs):
path = [self]
previous_subas_list = None
currently_searching = self.get_subassistant_tree()[1]
while settings.SUBASSISTANT_N_STRING.format(len(path) - 1) in kwargs and \
kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
for sa, subas_list in currently_searching:
if sa.name == kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)]:
currently_searching = subas_list
path.append(sa)
break
if subas_list == previous_subas_list:
raise exceptions.AssistantNotFoundException(
.format(
n=kwargs[settings.SUBASSISTANT_N_STRING.format(len(path) - 1)],
p=path))
previous_subas_list = subas_list
return path | Recursively searches self._tree - has format of (Assistant: [list_of_subassistants]) -
for specific path from first to last selected subassistants.
Args:
kwargs: arguments containing names of the given assistants in form of
subassistant_0 = 'name', subassistant_1 = 'another_name', ...
Returns:
list of subassistants objects from tree sorted from first to last |
382,825 | def _browser_init(self):
if self.session:
return
self.session = requests.Session()
headers = {}
if self.user_agent:
headers[] = self.user_agent
self.session.headers.update(headers)
if self._auth_method in [None, "", "HTTPBasicAuth"]:
if self._auth_username is not None:
self.session.auth = (self._auth_username, self._auth_password) | Init the browsing instance if not setup
:rtype: None |
382,826 | def readShocks(self):
for var_name in self.shock_vars:
setattr(self,var_name,getattr(self,var_name+)[self.t_sim,:]) | Reads values of shock variables for the current period from history arrays. For each var-
iable X named in self.shock_vars, this attribute of self is set to self.X_hist[self.t_sim,:].
This method is only ever called if self.read_shocks is True. This can be achieved by using
the method makeShockHistory() (or manually after storing a "handcrafted" shock history).
Parameters
----------
None
Returns
-------
None |
382,827 | def getFingerprintForExpression(self, body, sparsity=1.0):
return self._expressions.resolveExpression(self._retina, body, sparsity) | Resolve an expression
Args:
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns:
Fingerprint
Raises:
CorticalioException: if the request was not successful |
382,828 | def ray_triangle_id(triangles,
ray_origins,
ray_directions,
triangles_normal=None,
tree=None,
multiple_hits=True):
triangles = np.asanyarray(triangles, dtype=np.float64)
ray_origins = np.asanyarray(ray_origins, dtype=np.float64)
ray_directions = np.asanyarray(ray_directions, dtype=np.float64)
else:
plane_normals = triangles_normal[ray_candidates]
location, valid = intersections.planes_lines(
plane_origins=plane_origins,
plane_normals=plane_normals,
line_origins=line_origins,
line_directions=line_directions)
if (len(triangle_candidates) == 0 or
not valid.any()):
return [], [], []
barycentric = triangles_mod.points_to_barycentric(
triangle_candidates[valid], location)
hit = np.logical_and((barycentric > -tol.zero).all(axis=1),
(barycentric < (1 + tol.zero)).all(axis=1))
index_tri = ray_candidates[valid][hit]
index_ray = ray_id[valid][hit]
location = location[hit]
vector = location - ray_origins[index_ray]
distance = util.diagonal_dot(vector, ray_directions[index_ray])
forward = distance > -1e-6
index_tri = index_tri[forward]
index_ray = index_ray[forward]
location = location[forward]
distance = distance[forward]
if multiple_hits:
return index_tri, index_ray, location
if len(index_ray) == 0:
return index_tri, index_ray, location
first = np.zeros(len(index_ray), dtype=np.bool)
groups = grouping.group(index_ray)
for group in groups:
index = group[distance[group].argmin()]
first[index] = True
return index_tri[first], index_ray[first], location[first] | Find the intersections between a group of triangles and rays
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
ray_origins : (m, 3) float
Ray origin points
ray_directions : (m, 3) float
Ray direction vectors
triangles_normal : (n, 3) float
Normal vector of triangles, optional
tree : rtree.Index
Rtree object holding triangle bounds
Returns
-----------
index_triangle : (h,) int
Index of triangles hit
index_ray : (h,) int
Index of ray that hit triangle
locations : (h, 3) float
Position of intersection in space |
382,829 | def register_service(cls, service):
logger.debug(.format(service.name))
return local_store.instance.register(service) | Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register() |
382,830 | def _import_astorb_to_database(
self,
astorbDictList):
self.log.info()
print "Refreshing the orbital elements database table"
dbSettings = self.settings["database settings"]["atlasMovers"]
insert_list_of_dictionaries_into_database_tables(
dbConn=self.atlasMoversDBConn,
log=self.log,
dictList=astorbDictList,
dbTableName="orbital_elements",
uniqueKeyList=["name"],
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=dbSettings
)
print "Finished refreshing the orbital elements database table"
self.log.info()
return None | *import the astorb orbital elements to database*
**Key Arguments:**
- ``astorbDictList`` -- the astorb database parsed as a list of dictionaries
**Return:**
- None |
382,831 | def persist(self):
os.makedirs(self.__symbol_folder, exist_ok=True)
os.makedirs(self.__aliases_folder, exist_ok=True)
os.makedirs(self.__comments_folder, exist_ok=True)
for name, sym in self.__symbols.items():
with open(self.__get_pickle_path(self.__symbol_folder, name, True), ) as _:
pickle.dump(sym, _)
for name, aliases in self.__aliases.items():
if aliases:
with open(self.__get_pickle_path(self.__aliases_folder, name, True), ) as _:
pickle.dump(aliases, _)
for name, comment in self.__comments.items():
if comment:
with open(self.__get_pickle_path(self.__comments_folder, name, True), ) as _:
pickle.dump(comment, _) | Banana banana |
382,832 | def thumbnail(self):
if self._thumbnail:
return self._thumbnail
thumbnail = self.meta.get(, [])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = url_from_path(join(
self.name, get_thumb(self.settings, thumbnail)))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return self._thumbnail
else:
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in self.settings[]:
size = f.size
if size is None:
size = get_size(f.src_path)
if size[] > size[]:
self._thumbnail = (url_quote(self.name) + +
f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = (url_quote(self.name) + +
media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return self._thumbnail
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = (url_quote(self.name) + +
album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return self._thumbnail
self.logger.error(, self)
return None | Path to the thumbnail of the album. |
382,833 | def parse(self):
for line in self.stream:
line = line.rstrip()
self.nline += 1
if self.SUPYBOT_EMPTY_REGEX.match(line):
continue
ts, msg = self._parse_supybot_timestamp(line)
if self.SUPYBOT_EMPTY_COMMENT_REGEX.match(msg):
continue
elif self.SUPYBOT_EMPTY_COMMENT_ACTION_REGEX.match(msg):
continue
elif self.SUPYBOT_EMPTY_BOT_REGEX.match(msg):
continue
itype, nick, body = self._parse_supybot_msg(msg)
item = self._build_item(ts, itype, nick, body)
yield item | Parse a Supybot IRC stream.
Returns an iterator of dicts. Each dicts contains information
about the date, type, nick and body of a single log entry.
:returns: iterator of parsed lines
:raises ParseError: when an invalid line is found parsing the given
stream |
382,834 | def compact_bucket(db, buck_key, limit):
records = db.lrange(str(buck_key), 0, -1)
loader = limits.BucketLoader(limit.bucket_class, db, limit,
str(buck_key), records, stop_summarize=True)
buck_record = msgpack.dumps(dict(bucket=loader.bucket.dehydrate(),
uuid=str(uuid.uuid4())))
result = db.linsert(str(buck_key), , loader.last_summarize_rec,
buck_record)
if result < 0:
LOG.warning("Bucket compaction on %s failed; will retry" % buck_key)
return
db.ltrim(str(buck_key), loader.last_summarize_idx + 1, -1) | Perform the compaction operation. This reads in the bucket
information from the database, builds a compacted bucket record,
inserts that record in the appropriate place in the database, then
removes outdated updates.
:param db: A database handle for the Redis database.
:param buck_key: A turnstile.limits.BucketKey instance containing
the bucket key.
:param limit: The turnstile.limits.Limit object corresponding to
the bucket. |
382,835 | def fetch(self):
soup = self.session.get_results_soup()
self.courses = CoursesList(soup) | Fetch this student's courses page. It's recommended to do that when
creating the object (this is the default) because the remote sessions
are short. |
382,836 | def hdfFromKwargs(hdf=None, **kwargs):
if not hdf:
hdf = HDF()
for key, value in kwargs.iteritems():
if isinstance(value, dict):
for k,v in value.iteritems():
dkey = "%s.%s"%(key,k)
args = {dkey:v}
hdfFromKwargs(hdf=hdf, **args)
elif isinstance(value, (list, tuple)):
for i, item in enumerate(value):
ikey = "%s.%s"%(key,i)
if isinstance(item, (list, tuple)):
args = {ikey:item}
hdfFromKwargs(hdf=hdf, **args)
elif isinstance(item, dict):
args = {ikey:item}
hdfFromKwargs(hdf=hdf, **args)
elif getattr(item, "HDF_ATTRIBUTES", False):
attrs = {}
for attr in item.HDF_ATTRIBUTES:
attrs[attr] = getattr(item, attr, "")
hdfFromKwargs(hdf=hdf, **{ikey:attrs})
else:
hdf.setValue(ikey, str(item))
elif getattr(value, "HDF_ATTRIBUTES", False):
attrs = {}
for attr in value.HDF_ATTRIBUTES:
attrs[attr] = getattr(value, attr, "")
hdfFromKwargs(hdf=hdf, **{key:attrs})
else:
hdf.setValue(key, str(value))
return hdf | If given an instance that has toHDF() method that method is invoked to get that object's HDF representation |
382,837 | def set_confound_pipeline(self, confound_pipeline):
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + + confound_pipeline):
print()
self.get_pipeline_alternatives()
else:
self.confound_pipeline = confound_pipeline | There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is. |
382,838 | def rest(o) -> Optional[ISeq]:
if o is None:
return None
if isinstance(o, ISeq):
s = o.rest
if s is None:
return lseq.EMPTY
return s
n = to_seq(o)
if n is None:
return lseq.EMPTY
return n.rest | If o is a ISeq, return the elements after the first in o. If o is None,
returns an empty seq. Otherwise, coerces o to a seq and returns the rest. |
382,839 | def get_coords(x, y, params):
n_x = x * 2.0 / params.plane_w * params.plane_ratio - 1.0
n_y = y * 2.0 / params.plane_h - 1.0
mb_x = params.zoom * n_x
mb_y = params.zoom * n_y
return mb_x, mb_y | Transforms the given coordinates from plane-space to Mandelbrot-space (real and imaginary).
:param x: X coordinate on the plane.
:param y: Y coordinate on the plane.
:param params: Current application parameters.
:type params: params.Params
:return: Tuple containing the re-mapped coordinates in Mandelbrot-space. |
382,840 | def read_entity(self, entity_id, mount_point=DEFAULT_MOUNT_POINT):
api_path = .format(
mount_point=mount_point,
id=entity_id,
)
response = self._adapter.get(url=api_path)
return response.json() | Query an entity by its identifier.
Supported methods:
GET: /auth/{mount_point}/entity/id/{id}. Produces: 200 application/json
:param entity_id: Identifier of the entity.
:type entity_id: str
:param mount_point: The "path" the secret engine was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict |
382,841 | def decompress(images, delete_png=False, delete_json=False, folder=None):
if type(images) == str:
return decompress([images])
filenames = copy(images)
decompressed_images = []
for orig_filename in filenames:
debug(.format(orig_filename))
try:
filename, extension = os.path.splitext(orig_filename)
if folder:
basename = os.path.basename(filename)
new_filename = os.path.join(folder, basename + )
else:
new_filename = filename +
if os.path.isfile(new_filename):
decompressed_images.append(new_filename)
msg = "Aborting decompress, TIFF already exists:" \
" {}".format(orig_filename)
raise AssertionError(msg)
if extension != :
msg = "Aborting decompress, not a " \
"PNG: {}".format(orig_filename)
raise AssertionError(msg)
img = Image.open(orig_filename)
img.load()
info = {}
with open(filename + , ) as f:
tags = json.load(f)
for tag,val in tags.items():
if tag == :
continue
if type(val) == list:
val = tuple(val)
if type(val[0]) == list:
val = tuple(tuple(x) for x in val)
info[int(tag)] = val
if in tags:
img.putpalette(tags[])
debug(.format(new_filename))
img.save(new_filename, tiffinfo=info)
decompressed_images.append(new_filename)
if delete_png:
os.remove(orig_filename)
if delete_json:
os.remove(filename + )
except (IOError, AssertionError) as e:
print(.format(e))
return decompressed_images | Reverse compression from tif to png and save them in original format
(ome.tif). TIFF-tags are gotten from json-files named the same as given
images.
Parameters
----------
images : list of filenames
Image to decompress.
delete_png : bool
Wheter to delete PNG images.
delete_json : bool
Wheter to delete TIFF-tags stored in json files on compress.
Returns
-------
list of filenames
List of decompressed files. |
382,842 | def add_line_to_file(self, line, filename, expect=None, shutit_pexpect_child=None, match_regexp=None, loglevel=logging.DEBUG):
shutit_global.shutit_global_object.yield_to_draw()
if isinstance(line, str):
lines = [line]
elif isinstance(line, list):
lines = line
match_regexp = None
fail = False
for fline in lines:
if match_regexp is None:
this_match_regexp = fline
else:
this_match_regexp = match_regexp
if not self.replace_text(fline,
filename,
pattern=this_match_regexp,
shutit_pexpect_child=shutit_pexpect_child,
expect=expect,
loglevel=loglevel):
fail = True
if fail:
return False
return True | Deprecated.
Use replace/insert_text instead.
Adds line to file if it doesn't exist (unless Force is set, which it is not by default).
Creates the file if it doesn't exist.
Must be exactly the line passed in to match.
Returns True if line(s) added OK, False if not.
If you have a lot of non-unique lines to add, it's a good idea to have a sentinel value to add first, and then if that returns true, force the remainder.
@param line: Line to add. If a list, processed per-item, and match_regexp ignored.
@param filename: Filename to add it to.
@param expect: See send()
@param shutit_pexpect_child: See send()
@param match_regexp: If supplied, a regexp to look for in the file instead of the line itself, handy if the line has awkward characters in it.
@type line: string
@type filename: string
@type match_regexp: string |
382,843 | def listfolder(p):
for entry in scandir.scandir(p):
if entry.is_dir():
yield entry.name | generator of list folder in the path.
folders only |
382,844 | def handler_view(self, request, resource_name, ids=None):
signal_request.send(sender=self, request=request)
time_start = time.time()
self.update_urls(request, resource_name=resource_name, ids=ids)
resource = self.resource_map[resource_name]
allowed_http_methods = resource.Meta.allowed_methods
if request.method not in allowed_http_methods:
response = HttpResponseNotAllowed(
permitted_methods=allowed_http_methods)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
if resource.Meta.authenticators and not (
request.method == "GET" and
resource.Meta.disable_get_authentication):
user = resource.authenticate(request)
if user is None or not user.is_authenticated():
response = HttpResponse("Not Authenticated", status=401)
signal_response.send(
sender=self, request=request, response=response,
duration=time.time() - time_start)
return response
kwargs = dict(request=request)
if ids is not None:
kwargs[] = ids.split(",")
try:
if request.method == "GET":
response = self.handler_view_get(resource, **kwargs)
elif request.method == "POST":
response = self.handler_view_post(resource, **kwargs)
elif request.method == "PUT":
response = self.handler_view_put(resource, **kwargs)
elif request.method == "DELETE":
response = self.handler_view_delete(resource, **kwargs)
except JSONAPIError as e:
response = HttpResponse(
json.dumps({"errors": [e.data]}, cls=DatetimeDecimalEncoder),
content_type=self.CONTENT_TYPE, status=e.status)
signal_response.send(sender=self, request=request, response=response,
duration=time.time() - time_start)
return response | Handler for resources.
.. versionadded:: 0.5.7
Content-Type check
:return django.http.HttpResponse |
382,845 | def psicomputations(variance, Z, variational_posterior, return_psi2_n=False):
mu = variational_posterior.mean
S = variational_posterior.variance
psi0 = (variance*(np.square(mu)+S)).sum(axis=1)
Zv = variance * Z
psi1 = np.dot(mu,Zv.T)
if return_psi2_n:
psi2 = psi1[:,:,None] * psi1[:,None,:] + np.dot(S[:,None,:] * Zv[None,:,:], Zv.T)
else:
psi2 = np.dot(S.sum(axis=0) * Zv, Zv.T) + tdot(psi1.T)
return psi0, psi1, psi2 | Compute psi-statistics for ss-linear kernel |
382,846 | def subscribe(self, sr):
if not sr.startswith():
sr = self.subreddit(sr).name
data = dict(action=, sr=sr)
j = self.post(, , data=data)
return assert_truthy(j) | Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred) |
382,847 | def resource(self, api_path=None, base_path=, chunk_size=None):
if isinstance(self.token, dict):
self.session = self._get_oauth_session()
return super(OAuthClient, self).resource(api_path, base_path, chunk_size)
raise MissingToken("You must set_token() before creating a resource with OAuthClient") | Overrides :meth:`resource` provided by :class:`pysnow.Client` with extras for OAuth
:param api_path: Path to the API to operate on
:param base_path: (optional) Base path override
:param chunk_size: Response stream parser chunk size (in bytes)
:return:
- :class:`Resource` object
:raises:
- InvalidUsage: If a path fails validation |
382,848 | def gen_etree(self):
relations_elem = self.gen_relations()
header = E()
header.append(relations_elem)
self.gen_body()
tree = E()
tree.append(header)
body = E()
for segment in self.body[]:
body.append(segment)
for group in self.body[]:
body.append(group)
tree.append(body)
return tree | convert an RST tree (DGParentedTree -> lxml etree) |
382,849 | def repeat(col, n):
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n)) | Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')] |
382,850 | def rename_pool(service, old_name, new_name):
validator(value=old_name, valid_type=six.string_types)
validator(value=new_name, valid_type=six.string_types)
cmd = [, , service, , , , old_name, new_name]
check_call(cmd) | Rename a Ceph pool from old_name to new_name
:param service: six.string_types. The Ceph user name to run the command under
:param old_name: six.string_types
:param new_name: six.string_types
:return: None |
382,851 | def register(cls, config={}):
if cls.accessor is not None:
if cls.instance is None:
cls.instance = cls.accessor(config) | This function is basically a shortcut of boot for accessors
that have only the config dict argument.
Args
----
config (dict): the configuration dictionary |
382,852 | def service_running(service_name, **kwargs):
if init_is_systemd():
return service(, service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
cmd = [, service_name]
for key, value in six.iteritems(kwargs):
parameter = % (key, value)
cmd.append(parameter)
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError:
return False
else:
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
return service(, service_name)
return False | Determine whether a system service is running.
:param service_name: the name of the service
:param **kwargs: additional args to pass to the service command. This is
used to pass additional key=value arguments to the
service command line for managing specific instance
units (e.g. service ceph-osd status id=2). The kwargs
are ignored in systemd services. |
382,853 | def read(self):
data = None
while True:
last_offset = self.tell()
try:
(chunk, record_type) = self.__try_read_record()
if record_type == _RECORD_TYPE_NONE:
self.__sync()
elif record_type == _RECORD_TYPE_FULL:
if data is not None:
logging.warning(
"Ordering corruption: Got FULL record while already "
"in a chunk at offset %d", last_offset)
return chunk
elif record_type == _RECORD_TYPE_FIRST:
if data is not None:
logging.warning(
"Ordering corruption: Got FIRST record while already "
"in a chunk at offset %d", last_offset)
data = chunk
elif record_type == _RECORD_TYPE_MIDDLE:
if data is None:
logging.warning(
"Ordering corruption: Got MIDDLE record before FIRST "
"record at offset %d", last_offset)
else:
data += chunk
elif record_type == _RECORD_TYPE_LAST:
if data is None:
logging.warning(
"Ordering corruption: Got LAST record but no chunk is in "
"progress at offset %d", last_offset)
else:
result = data + chunk
data = None
return result
else:
raise errors.InvalidRecordError(
"Unsupported record type: %s" % record_type)
except errors.InvalidRecordError, e:
logging.warning("Invalid record encountered at %s (%s). Syncing to "
"the next block", last_offset, e)
data = None
self.__sync() | Reads record from current position in reader.
Returns:
original bytes stored in a single record. |
382,854 | def get_subnets_for_net(self, net):
try:
subnet_list = self.neutronclient.list_subnets(network_id=net)
subnet_dat = subnet_list.get()
return subnet_dat
except Exception as exc:
LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s",
{: net, : str(exc)})
return None | Returns the subnets in a network. |
382,855 | async def get_access_token(consumer_key, consumer_secret,
oauth_token, oauth_token_secret,
oauth_verifier, **kwargs):
client = BasePeonyClient(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=oauth_token,
access_token_secret=oauth_token_secret,
api_version="",
suffix="")
response = await client.api.oauth.access_token.get(
_suffix="",
oauth_verifier=oauth_verifier
)
return parse_token(response) | get the access token of the user
Parameters
----------
consumer_key : str
Your consumer key
consumer_secret : str
Your consumer secret
oauth_token : str
OAuth token from :func:`get_oauth_token`
oauth_token_secret : str
OAuth token secret from :func:`get_oauth_token`
oauth_verifier : str
OAuth verifier from :func:`get_oauth_verifier`
Returns
-------
dict
Access tokens |
382,856 | def create_pipeline(self, name, description, **kwargs):
if not (name and description):
return requests.codes.bad_request, None
kwargs.update({:name, :description})
new_pl = StreakPipeline(**kwargs)
uri = .join([
self.api_uri,
self.pipelines_suffix
])
code, r_data = self._req(, uri, new_pl.to_dict())
return code, r_data | Creates a pipeline with the provided attributes.
Args:
name required name string
kwargs {name, description, orgWide, aclEntries} user
specifiable ones only
return (status code, pipeline_dict) (as created) |
382,857 | def wait_for_and_switch_to_alert(driver, timeout=settings.LARGE_TIMEOUT):
start_ms = time.time() * 1000.0
stop_ms = start_ms + (timeout * 1000.0)
for x in range(int(timeout * 10)):
try:
alert = driver.switch_to.alert
dummy_variable = alert.text
return alert
except NoAlertPresentException:
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
break
time.sleep(0.1)
raise Exception("Alert was not present after %s seconds!" % timeout) | Wait for a browser alert to appear, and switch to it. This should be usable
as a drop-in replacement for driver.switch_to.alert when the alert box
may not exist yet.
@Params
driver - the webdriver object (required)
timeout - the time to wait for the alert in seconds |
382,858 | def clearData(self):
self._counts = np.zeros_like(self._bins)
self.histo.setOpts(height=self._counts) | Clears all histograms (keeps bins) |
382,859 | def first_time_setup(self):
if not self._auto_unlock_key_position():
pw = password.create_passwords()[0]
attrs = {: self.keyring}
gkr.item_create_sync(self.default_keyring
,gkr.ITEM_GENERIC_SECRET
,self.keyring
,attrs
,pw
,True)
found_pos = self._auto_unlock_key_position()
item_info = gkr.item_get_info_sync(self.default_keyring, found_pos)
gkr.create_sync(self.keyring, item_info.get_secret()) | First time running Open Sesame?
Create keyring and an auto-unlock key in default keyring. Make sure
these things don't already exist. |
382,860 | def sample(self, fraction, seed=None, exact=False):
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
if (fraction > 1 or fraction < 0):
raise ValueError( + str(fraction))
if (self.num_rows() == 0 or self.num_columns() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact)) | Sample a fraction of the current SFrame's rows.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to sample.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783 |
382,861 | def _str_to_datetime(self, str_value):
try:
ldt = [int(f) for f in str_value.split()]
dt = datetime.datetime(*ldt)
except (ValueError, TypeError):
return None
return dt | Parses a `YYYY-MM-DD` string into a datetime object. |
382,862 | def render_to_response(self, context, **response_kwargs):
response_kwargs[] =
return self.response_class(
self.convert_context_to_json(context),
**response_kwargs
) | Returns a JSON response, transforming 'context' to make the payload. |
382,863 | def execute(self, query, args=None):
del self.messages[:]
db = self._get_db()
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
if args is not None:
query = query % db.literal(args)
try:
r = None
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r | Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any |
382,864 | def x_runtime(f, *args, **kwargs):
_t0 = now()
r = f(*args, **kwargs)
_t1 = now()
r.headers[] = .format(Decimal(str(_t1 - _t0)))
return r | X-Runtime Flask Response Decorator. |
382,865 | def _update_resource_view(self, log=False):
update = False
if in self.data and self._load_from_hdx(, self.data[]):
update = True
else:
if in self.data:
resource_views = self.get_all_for_resource(self.data[])
for resource_view in resource_views:
if self.data[] == resource_view[]:
self.old_data = self.data
self.data = resource_view.data
update = True
break
if update:
if log:
logger.warning( % self.data[])
self._merge_hdx_update(, )
return update | Check if resource view exists in HDX and if so, update resource view
Returns:
bool: True if updated and False if not |
382,866 | def pseudo_organization(organization, classification, default=None):
if organization and classification:
raise ScrapeValueError()
elif classification:
return _make_pseudo_id(classification=classification)
elif organization:
if isinstance(organization, Organization):
return organization._id
elif isinstance(organization, str):
return organization
else:
return _make_pseudo_id(**organization)
elif default is not None:
return _make_pseudo_id(classification=default)
else:
return None | helper for setting an appropriate ID for organizations |
382,867 | def wait_pid(pid, timeout=None, callback=None):
def check_timeout(delay):
if timeout is not None:
if time.time() >= stop_at:
if callback:
callback(pid)
else:
raise TimeoutExpired
time.sleep(delay)
return min(delay * 2, 0.04)
if timeout is not None:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
delay = 0.0001
while 1:
try:
retpid, status = waitcall()
except OSError as err:
if err.errno == errno.EINTR:
delay = check_timeout(delay)
continue
elif err.errno == errno.ECHILD:
while 1:
if pid_exists(pid):
delay = check_timeout(delay)
else:
return
else:
raise
else:
if retpid == 0:
delay = check_timeout(delay)
continue
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
raise RuntimeError("unknown process exit status") | Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired (if specified). |
382,868 | def call(self):
from wx_loader import wx
dlg = wx.TextEntryDialog(None, self.title, self.title, defaultValue=str(self.default))
if dlg.ShowModal() != wx.ID_OK:
return None
return dlg.GetValue() | show a value dialog |
382,869 | def _single_tree_paths(self, tree):
skel = tree.consolidate()
tree = defaultdict(list)
for edge in skel.edges:
svert = edge[0]
evert = edge[1]
tree[svert].append(evert)
tree[evert].append(svert)
def dfs(path, visited):
paths = []
stack = [ (path, visited) ]
while stack:
path, visited = stack.pop(0)
vertex = path[-1]
children = tree[vertex]
visited[vertex] = True
children = [ child for child in children if not visited[child] ]
if len(children) == 0:
paths.append(path)
for child in children:
stack.append(
(path + [child], copy.deepcopy(visited))
)
return paths
root = skel.edges[0,0]
paths = dfs([root], defaultdict(bool))
root = np.argmax([ len(_) for _ in paths ])
root = paths[root][-1]
paths = dfs([ root ], defaultdict(bool))
return [ np.flip(skel.vertices[path], axis=0) for path in paths ] | Get all traversal paths from a single tree. |
382,870 | def get_module_name(package):
distribution = get_distribution(package.DISTRIBUTION_NAME)
entry_info = distribution.get_entry_info(package.DIST_GROUP, package.ENTRY_POINT)
if not entry_info:
raise RuntimeError(
"Can't find entry info for distribution: %r (group: %r, entry point: %r)" % (
package.DISTRIBUTION_NAME, package.DIST_GROUP, package.ENTRY_POINT
)
)
return entry_info.module_name | package must have these attributes:
e.g.:
package.DISTRIBUTION_NAME = "DragonPyEmulator"
package.DIST_GROUP = "console_scripts"
package.ENTRY_POINT = "DragonPy"
:return: a string like: "dragonpy.core.cli" |
382,871 | def template_subst(template, subs, delims=(, )):
subst_text = template
for (k,v) in subs.items():
subst_text = subst_text.replace(
delims[0] + k + delims[1], v)
return subst_text | Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed. |
382,872 | def emit(self, action, event, **kwargs):
for listener in self._listeners:
listener.put_nowait((action, event, kwargs)) | Send an event to all the client listening for notifications
:param action: Action name
:param event: Event to send
:param kwargs: Add this meta to the notification (project_id for example) |
382,873 | def find_document_type_by_name(self, entity_name, active=,
match_case=True):
all_types = self.get_dictionary()
if match_case:
filtered = filter(
lambda x: x[] == active and x[].find(entity_name) >= 0,
all_types)
else:
token = entity_name.lower()
filtered = filter(
lambda x: x[] == active and x[].lower().find(token) >= 0,
all_types)
return filtered | search document types by name and active(Y/N) status
:param entity_name: entity name
:return: |
382,874 | def normalize_volume(volume):
idAU0paPZOMZchuDv1iDv8typevolumemetadata_languageenkey1value1key2value2key3value3attachmentsida910e1kjdo2d192d1dko1p2kd1209dtypeattachmenturlfsdb:///624bffa8a6f90813b7982d0e5b4c1475ebec40e3metadatadownload_countmimeapplication/jsonnametmp9fyat_notesthis file is awsomesha1624bffa8a6f90813b7982d0e5b4c1475ebec40e3size
res = dict()
res[] =
res[] = volume[]
if in volume:
res[] = volume[]
source = volume[]
attachments = source[]
del(source[])
del(source[ + source[]])
res[] = source
atts = list()
for attachment in attachments:
atts.append(Archivant.normalize_attachment(attachment))
res[] = atts
return res | convert volume metadata from es to archivant format
This function makes side effect on input volume
output example::
{
'id': 'AU0paPZOMZchuDv1iDv8',
'type': 'volume',
'metadata': {'_language': 'en',
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'},
'attachments': [{'id': 'a910e1kjdo2d192d1dko1p2kd1209d',
'type' : 'attachment',
'url': 'fsdb:///624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'metadata': {'download_count': 0,
'mime': 'application/json',
'name': 'tmp9fyat_',
'notes': 'this file is awsome',
'sha1': '624bffa8a6f90813b7982d0e5b4c1475ebec40e3',
'size': 10}
}]
} |
382,875 | def _set_zoning(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=zoning.zoning, is_container=, presence=False, yang_name="zoning", rest_name="zoning", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__zoning = t
if hasattr(self, ):
self._set() | Setter method for zoning, mapped from YANG variable /zoning (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_zoning is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_zoning() directly. |
382,876 | def dumps(self, contentType=None, version=None):
buf = six.StringIO()
ret = self.dump(buf, contentType, version)
if ret is None:
return buf.getvalue()
return (ret[0], ret[1], buf.getvalue()) | [OPTIONAL] Identical to :meth:`dump`, except the serialized form
is returned as a string representation. As documented in
:meth:`dump`, the return value can optionally be a three-element
tuple of (contentType, version, data) if the provided content-type
should be overridden or enhanced. The default implementation just
wraps :meth:`dump`. |
382,877 | def isin_start(elems, line):
found = False
elems = [elems] if type(elems) is not list else elems
for e in elems:
if line.lstrip().lower().startswith(e):
found = True
break
return found | Check if an element from a list starts a string.
:type elems: list
:type line: str |
382,878 | def get_languages(self):
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.url + "/languages"
)
return data | :calls: `GET /repos/:owner/:repo/languages <http://developer.github.com/v3/repos>`_
:rtype: dict of string to integer |
382,879 | def _get_cache_size(replace=False):
if not replace:
size = _cached_search_compile.cache_info().currsize
else:
size = _cached_replace_compile.cache_info().currsize
return size | Get size of cache. |
382,880 | def get_session_identifiers(cls, folder=None, inputfile=None):
sessions = []
if inputfile and folder:
raise MQ2Exception(
)
if folder:
if not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
filename = os.path.join(root, filename)
for ext in SUPPORTED_FILES:
if filename.endswith(ext):
wbook = xlrd.open_workbook(filename)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
elif inputfile:
if os.path.isdir(inputfile):
return sessions
for ext in SUPPORTED_FILES:
if inputfile.endswith(ext):
wbook = xlrd.open_workbook(inputfile)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
return sessions | Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use |
382,881 | def walk_revctrl(dirname=, ff=):
file_finder = None
items = []
if not ff:
distutils.log.error()
sys.exit(1)
for ep in pkg_resources.iter_entry_points():
if ff == ep.name:
distutils.log.info(, ep.name)
file_finder = ep.load()
finder_items = []
with pythonpath_off():
for item in file_finder(dirname):
if not basename(item).startswith((, , )):
finder_items.append(item)
distutils.log.info(, len(finder_items))
items.extend(finder_items)
if file_finder is None:
distutils.log.error(,
ff, if ff == else ff)
sys.exit(1)
return items or [] | Return files found by the file-finder 'ff'. |
382,882 | def _check_env_vars_set(self, dir_env_var, file_env_var):
return (
os.environ.get(file_env_var) is not None or
os.environ.get(dir_env_var) is not None
) | Check to see if the default cert dir/file environment vars are present.
:return: bool |
382,883 | def _get_subject_uri(self, guid=None):
uri = self.uri +
if guid:
uri += + urllib.quote_plus(guid)
return uri | Returns the full path that uniquely identifies
the subject endpoint. |
382,884 | def _get_list(self, key, operation, create=False):
return self._get_by_type(key, operation, create, b, []) | Get (and maybe create) a list by name. |
382,885 | def _init_draw(self):
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) | Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation. |
382,886 | def walkscan(x0, y0, xn=0.25, xp=0.25, yn=0.25, yp=0.25):
if xn < 0: raise ValueError("Negative x probabilty must be non-negative")
if xp < 0: raise ValueError("Positive x probabilty must be non-negative")
if yn < 0: raise ValueError("Negative y probabilty must be non-negative")
if yp < 0: raise ValueError("Positive y probabilty must be non-negative")
total = xp + xn + yp + yn
xn /= total
xp /= total
yn /= total
yp /= total
cxn = xn
cxp = cxn + xp
cyn = cxp + yn
x, y = x0, y0
while True:
yield x, y
probability = random.random()
if probability <= cxn:
x -= 1
elif probability <= cxp:
x += 1
elif probability <= cyn:
y -= 1
else:
y += 1 | Scan pixels in a random walk pattern with given step probabilities. The
random walk will continue indefinitely unless a skip transformation is used
with the 'stop' parameter set or a clip transformation is used with the
'abort' parameter set to True. The probabilities are normalized to sum to 1.
:param x0: Initial x-coordinate
:type x0: int
:param y0: Initial y-coordinate
:type y0: int
:param xn: Probability of moving in the negative x direction
:type xn: float
:param xp: Probability of moving in the positive x direction
:type xp: float
:param yn: Probability of moving in the negative y direction
:type yn: float
:param yp: Probability of moving in the positive y direction
:type yp: float |
382,887 | def add(i):
r=ck.check_writing({:work[]})
if r[]>0: return r
o=i.get(,)
r=ck.access({:,
:work[],
:work[],
:})
if r[]>0: return r
p=r[]
pm=os.path.join(p,cfg[])
pma=os.path.join(p,cfg[])
r=ck.load_text_file({:pm})
if r[]>0: return r
spm=r[]
r=ck.load_text_file({:pma})
if r[]>0: return r
spma=r[]
desc=i.get(,)
license=i.get(,)
copyright=i.get(,)
developer=i.get(,)
developer_email=i.get(,)
developer_webpage=i.get(,)
actions=i.get(,{})
func=i.get(,)
if func!=:
actions[func]={}
quiet=i.get(,)
if quiet!= and o==:
if desc==:
ck.out()
r=ck.inp({:})
desc=r[]
if license== and ck.cfg.get(,)!=:
ck.out()
r=ck.inp({:+ck.cfg[]+})
license=r[]
if license==: license=ck.cfg[]
if copyright== and ck.cfg.get(,)!=:
ck.out()
r=ck.inp({:+ck.cfg[]+})
copyright=r[]
if copyright==: copyright=ck.cfg[]
if developer== and ck.cfg.get(,)!=:
ck.out()
r=ck.inp({:s developer (or Enter to use "default_developer"): stringdefault_developerdefault_developer_emailtextAdd module\+ck.cfg[]+})
developer_email=r[]
if developer_email==: developer_email=ck.cfg[]
if developer_webpage== and ck.cfg.get(,)!=:
ck.out()
r=ck.inp({:s developer webpage (or Enter to use "default_developer_webpage"): stringdefault_developer_webpage*textAdd action function (or Enter to stop): stringtextSupport web (y/N): stringyesyyesfor_webtextAdd action description: stringdescdesc$
return r | Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
(desc) - module description
(license) - module license
(copyright) - module copyright
(developer) - module developer
(developer_email) - module developer
(developer_webpage) - module developer
(actions) - dict with actions {"func1":{}, "func2":{} ...}
(dict) - other meta description to add to entry
(quiet) - minimal interaction
(func) - just add one dummy action
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of the 'add' kernel function
} |
382,888 | def df2arff(df, dataset_name, pods_data):
def java_simple_date(date_format):
date_format = date_format.replace(, ).replace(, ).replace(, ).replace(, )
return date_format.replace(, ).replace(, ).replace(, ).replace(, )
def tidy_field(atr):
return str(atr).replace(, ).replace(, )
types = {: [str], : [int, np.int64, np.uint8], : [np.float64]}
d = {}
d[] = []
for atr in df.columns:
if isinstance(atr, str):
if len(atr)>8 and atr[:9] == :
import json
elements = json.loads(atr[9:-1])
d[].append((tidy_field(elements[1]),
list(elements[0].keys())))
mask = {}
c = pd.Series(index=df.index)
for key, val in elements[0].items():
mask = df[atr]==val
c[mask] = key
df[atr] = c
continue
if len(atr)>7 and atr[:8] == :
name = atr[8:-1]
d[].append((tidy_field(name), ))
df[atr] = df[atr].astype(int)
continue
if len(atr)>7 and atr[:8]==:
from matplotlib.dates import num2date
elements = atr[8:-1].split()
d[].append((elements[0] + + java_simple_date(elements[1]), ))
df[atr] = num2date(df[atr].values)
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>9 and atr[:10]==:
def timestamp2date(values):
import datetime
new = []
for value in values:
new.append(np.datetime64(datetime.datetime.fromtimestamp(value)))
return np.asarray(new)
elements = atr[10:-1].split()
d[].append((elements[0] + + java_simple_date(elements[1]), ))
df[atr] = timestamp2date(df[atr].values)
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>10 and atr[:11]==:
elements = atr[11:-1].split()
d[].append((elements[0] + + java_simple_date(elements[1]), ))
df[atr] = df[atr].dt.strftime(elements[1])
continue
if len(atr)>11 and atr[:12]==:
def decyear2date(values):
new = []
for i, decyear in enumerate(values):
year = int(np.floor(decyear))
dec = decyear-year
end = np.datetime64(str(year+1)+)
start = np.datetime64(str(year)+)
diff=end-start
days = dec*(diff/np.timedelta64(1, ))
add = np.timedelta64(int(np.round(days)), )
new.append(start+add)
return np.asarray(new)
elements = atr[12:-1].split()
d[].append((elements[0] + + java_simple_date(elements[1]), ))
df[atr] = decyear2date(df[atr].values)
df[atr] = df[atr].dt.strftime(elements[1])
continue
field = tidy_field(atr)
el = df[atr][0]
type_assigned=False
for t in types:
if isinstance(el, tuple(types[t])):
d[].append((field, t))
type_assigned=True
break
if not type_assigned:
import json
d[].append((field+, ))
df[atr] = df[atr].apply(json.dumps)
d[] = []
for ind, row in df.iterrows():
d[].append(list(row))
import textwrap as tw
width = 78
d[] = dataset_name + "\n\n"
if in pods_data and pods_data[]:
d[] += "\n".join(tw.wrap(pods_data[], width)) + "\n\n"
if in pods_data and pods_data[]:
d[] += "\n".join(tw.wrap(pods_data[], width))
if in pods_data and pods_data[]:
d[] += "\n\n" + "Citation" "\n\n" + "\n".join(tw.wrap(pods_data[], width))
d[] = dataset_name
import arff
string = arff.dumps(d)
import re
string = re.sub(r,
r,
string)
f = open(dataset_name + , )
f.write(string)
f.close() | Write an arff file from a data set loaded in from pods |
382,889 | def outliers(df,output_type = ,dtype = ,sensitivity = 1.5):
if dtype in (,,,):
if not dtype == :
df = pd.to_numeric(df,errors = )
quart25, quart75 = percentiles(df,q = [.25,.75])
out_range= sensitivity * (quart75 - quart25)
lower_bound,upper_bound = quart25-out_range, quart75+out_range
bool_array = (df < lower_bound)|(df > upper_bound)
else:
value_counts = df.value_counts()
quart25 = cum_percentile(value_counts,.25)
quart75 = cum_percentile(value_counts,.75)
out_values = int(sensitivity * (quart75 - quart25) + quart75 + 1)
if out_values >= len(value_counts):
bool_array = _utils.bc_vec(df,value = False)
else:
outlier_values = value_counts[value_counts <= value_counts.iloc[out_values]].index
bool_array = df.isin(outlier_values)
if output_type == :
return df[bool_array]
return bool_array | Returns potential outliers as either a boolean array or a subset of the original.
Parameters:
df - array_like
Series or dataframe to check
output_type - string, default 'values'
if 'values' is specified, then will output the values in the series that are suspected
outliers. Else, a boolean array will be outputted, where True means the value is an outlier
dtype - string, default 'number'
the way to treat the object. Possible values are 'number','datetime',
'timedelt','datetimetz','category',or 'object'
sensitivity - number, default 1.5
The value to multipy by the iter-quartile range when determining outliers. This number is used
for categorical data as well. |
382,890 | def Read(self, length):
if not self.IsFile():
raise IOError("%s is not a file." % self.pathspec.last.path)
available = min(self.size - self.offset, length)
if available > 0:
try:
data = self.fd.read_random(self.offset, available,
self.pathspec.last.ntfs_type,
self.pathspec.last.ntfs_id)
except RuntimeError as e:
raise IOError(e)
self.offset += len(data)
return data
return b"" | Read from the file. |
382,891 | def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False, reuse=False):
def _l2_regularizer(weights):
if l2_coef is not None:
return l2_coef * tf.reduce_sum(tf.square(weights))
else:
return 0.0
lm_embeddings = bilm_ops[]
mask = bilm_ops[]
n_lm_layers = int(lm_embeddings.get_shape()[1])
lm_dim = int(lm_embeddings.get_shape()[3])
with tf.control_dependencies([lm_embeddings, mask]):
mask_float = tf.cast(mask, )
broadcast_mask = tf.expand_dims(mask_float, axis=-1)
def _do_ln(x):
x_masked = x * broadcast_mask
N = tf.reduce_sum(mask_float) * lm_dim
mean = tf.reduce_sum(x_masked) / N
variance = tf.reduce_sum(((x_masked - mean) * broadcast_mask)**2) / N
return tf.nn.batch_normalization(
x, mean, variance, None, None, 1E-12
)
if use_top_only:
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
sum_pieces = tf.squeeze(layers[-1], squeeze_dims=1)
reg = 0.0
else:
with tf.variable_scope("aggregation", reuse=reuse):
W = tf.get_variable(
.format(name),
shape=(n_lm_layers, ),
initializer=tf.zeros_initializer,
regularizer=_l2_regularizer,
trainable=True,
)
normed_weights = tf.split(
tf.nn.softmax(W + 1.0 / n_lm_layers), n_lm_layers
)
layers = tf.split(lm_embeddings, n_lm_layers, axis=1)
pieces = []
for w, t in zip(normed_weights, layers):
if do_layer_norm:
pieces.append(w * _do_ln(tf.squeeze(t, squeeze_dims=1)))
else:
pieces.append(w * tf.squeeze(t, squeeze_dims=1))
sum_pieces = tf.add_n(pieces)
reg = [
r for r in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if r.name.find(.format(name)) >= 0
]
if len(reg) != 1:
raise ValueError
with tf.variable_scope("aggregation", reuse=reuse):
gamma = tf.get_variable(
.format(name),
shape=(1, ),
initializer=tf.ones_initializer,
regularizer=None,
trainable=True,
)
weighted_lm_layers = sum_pieces * gamma
weighted_lm_layers_masked = sum_pieces * broadcast_mask
weighted_lm_layers_sum = tf.reduce_sum(weighted_lm_layers_masked, 1)
mask_sum = tf.reduce_sum(mask_float, 1)
mask_sum = tf.maximum(mask_sum, [1])
weighted_lm_layers_mean = weighted_lm_layers_sum / tf.expand_dims(mask_sum, - 1)
word_emb_2n = tf.squeeze(layers[0], [1])
word_emb_1n = tf.slice(word_emb_2n, [0, 0, 0], [-1, -1, lm_dim // 2])
lstm_outputs1 = tf.squeeze(layers[1], [1])
lstm_outputs2 = tf.squeeze(layers[2], [1])
ret = {: weighted_lm_layers,
: weighted_lm_layers_mean,
: reg,
: word_emb_1n,
: lstm_outputs1,
: lstm_outputs2, }
return ret | Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a layer specific weighted average of the biLM layers, and
the second the l2 regularizer loss term.
The regularization terms are also add to tf.GraphKeys.REGULARIZATION_LOSSES
Input:
name = a string prefix used for the trainable variable names
bilm_ops = the tensorflow ops returned to compute internal
representations from a biLM. This is the return value
from BidirectionalLanguageModel(...)(ids_placeholder)
l2_coef: the l2 regularization coefficient $\lambda$.
Pass None or 0.0 for no regularization.
use_top_only: if True, then only use the top layer.
do_layer_norm: if True, then apply layer normalization to each biLM
layer before normalizing
reuse: reuse an aggregation variable scope.
Output:
{
'weighted_op': op to compute weighted average for output,
'regularization_op': op to compute regularization term
} |
382,892 | def prep_vrn_file(in_file, vcaller, work_dir, somatic_info, writer_class, seg_file=None, params=None):
data = somatic_info.tumor_data
if not params:
params = PARAMS
out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0],
vcaller))
if not utils.file_uptodate(out_file, in_file):
ready_bed = None
if ready_bed and utils.file_exists(ready_bed):
sub_file = _create_subset_file(in_file, ready_bed, work_dir, data)
else:
sub_file = in_file
max_depth = max_normal_germline_depth(sub_file, params, somatic_info)
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = writer_class(out_handle)
writer.write_header()
bcf_in = pysam.VariantFile(sub_file)
for rec in bcf_in:
stats = _is_possible_loh(rec, bcf_in, params, somatic_info, max_normal_depth=max_depth)
if chromhacks.is_autosomal(rec.chrom) and stats is not None:
writer.write_row(rec, stats)
return out_file | Select heterozygous variants in the normal sample with sufficient depth.
writer_class implements write_header and write_row to write VCF outputs
from a record and extracted tumor/normal statistics. |
382,893 | def _send(self, data):
if not self._sock:
self.connect()
self._do_send(data) | Send data to statsd. |
382,894 | def to_feather(self, fname):
from pandas.io.feather_format import to_feather
to_feather(self, fname) | Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path |
382,895 | def _label_path_from_index(self, index):
label_file = os.path.join(self.data_path, , index + )
assert os.path.exists(label_file), .format(label_file)
return label_file | given image index, find out annotation path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of annotation file |
382,896 | def main():
parser = CommandLine()
if len(sys.argv)==1:
parser.parser.print_help()
sys.exit(1)
parser.parse()
myArgs = parser.args
procsPerAssembly = int(myArgs.maxProcs / myArgs.simultaneous)
setattr(myArgs, "maxProcs", procsPerAssembly)
merparser = MerParse(myArgs.inputConfig, myArgs.sweep, myArgs.sstart,
myArgs.sstop, myArgs.sinterval, myArgs.maxProcs,
asPrefix = myArgs.prefix,
asSI = myArgs.index,
genus = myArgs.genus,
species = myArgs.species)
configPaths = merparser.sweeper_output()
cwd = os.path.abspath(os.getcwd())
allAssembliesDir = os.path.join(cwd, "assemblies")
if not os.path.exists(allAssembliesDir):
os.makedirs(allAssembliesDir)
instances = []
for runName in configPaths:
configPath = configPaths.get(runName)
thisInstance = MerRunner(runName, configPath, myArgs.cleanup)
instances.append(thisInstance)
if len(instances) == 0:
print("There are no meraculous folders in this directory. Exiting")
elif len(instances) > 0:
pool = ThreadPool(myArgs.simultaneous)
results = pool.map(mer_runner_dummy, instances)
pool.close()
pool.join() | 1. Reads in a meraculous config file and outputs all of the associated config
files to $PWD/configs
2. The name of each run and the path to the directory is passed to a
multiprocessing core that controls which assemblies are executed and when. |
382,897 | def _get_resource_type_cls(self, name, resource):
if not in resource:
raise ResourceTypeNotDefined(name)
try:
return self.inspect_resources[resource[]]
except KeyError:
for custom_member in self._custom_members:
if custom_member.resource_type == resource[]:
return custom_member
return None | Attempts to return troposphere class that represents Type of
provided resource. Attempts to find the troposphere class who's
`resource_type` field is the same as the provided resources `Type`
field.
:param resource: Resource to find troposphere class for
:return: None: If no class found for provided resource
type: Type of provided resource
:raise ResourceTypeNotDefined:
Provided resource does not have a `Type` field |
382,898 | def _cond_select_value_nonrecur(d,cond_match=None,**kwargs):
if( in kwargs):
cond_func = kwargs[]
else:
cond_func = _text_cond
if( in kwargs):
cond_func_args = kwargs[]
else:
cond_func_args = []
rslt = {}
for key in d:
value = d[key]
if(cond_func(value,cond_match,*cond_func_args)):
rslt[key] = d[key]
else:
pass
return(rslt) | d = {
"ActiveArea":"50829",
"Artist":"315",
"AsShotPreProfileMatrix":"50832",
"AnalogBalance":"50727",
"AsShotICCProfile":"50831",
"AsShotProfileName":"50934",
"AntiAliasStrength":"50738",
"AsShotNeutral":"50728",
"AsShotWhiteXY":"50729"
}
_cond_select_value_nonrecur(d,"50")
_cond_select_value_nonrecur(d,"72")
regex = re.compile("8$")
_cond_select_value_nonrecur(d,regex) |
382,899 | def get_issues(self, repo, keys):
key1, key2 = keys
key3 = key1[:-1]
url = self.base_url + "/api/0/" + repo + "/" + key1
response = self.session.get(url, params=dict(status=))
if not bool(response):
error = response.json()
code = error[]
if code == :
return []
else:
raise IOError( % (url, error))
issues = []
for result in response.json()[key2]:
idx = six.text_type(result[])
result[] = "/".join([self.base_url, repo, key3, idx])
issues.append((repo, result))
return issues | Grab all the issues |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.