Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,500 | def A_multiple_hole_cylinder(Do, L, holes):
r
side_o = pi*Do*L
cap_circle = pi*Do**2/4*2
A = cap_circle + side_o
for Di, n in holes:
side_i = pi*Di*L
cap_removed = pi*Di**2/4*2
A = A + side_i*n - cap_removed*n
return A | r'''Returns the surface area of a cylinder with multiple holes.
Calculation will naively return a negative value or other impossible
result if the number of cylinders added is physically impossible.
Holes may be of different shapes, but must be perpendicular to the
axis of the cylinder.
.. math::
A = \pi D_o L + 2\cdot \frac{\pi D_o^2}{4} +
\sum_{i}^n \left( \pi D_i L - 2\cdot \frac{\pi D_i^2}{4}\right)
Parameters
----------
Do : float
Diameter of the exterior of the cylinder, [m]
L : float
Length of the cylinder, [m]
holes : list
List of tuples containing (diameter, count) pairs of descriptions for
each of the holes sizes.
Returns
-------
A : float
Surface area [m^2]
Examples
--------
>>> A_multiple_hole_cylinder(0.01, 0.1, [(0.005, 1)])
0.004830198704894308 |
18,501 | def project_gdf(gdf, to_crs=None, to_latlong=False):
assert len(gdf) > 0,
start_time = time.time()
if not hasattr(gdf, ):
gdf.gdf_name =
if to_crs is not None:
projected_gdf = gdf.to_crs(to_crs)
else:
if to_latlong:
latlong_crs = settings.default_crs
projected_gdf = gdf.to_crs(latlong_crs)
log(.format(gdf.gdf_name, time.time()-start_time))
else:
if (gdf.crs is not None) and ( in gdf.crs) and (gdf.crs[] == ):
return gdf
avg_longitude = gdf[].unary_union.centroid.x
utm_zone = int(math.floor((avg_longitude + 180) / 6.) + 1)
utm_crs = {: ,
: ,
: ,
: utm_zone,
: }
projected_gdf = gdf.to_crs(utm_crs)
log(.format(gdf.gdf_name, utm_zone, time.time()-start_time))
projected_gdf.gdf_name = gdf.gdf_name
return projected_gdf | Project a GeoDataFrame to the UTM zone appropriate for its geometries'
centroid.
The simple calculation in this function works well for most latitudes, but
won't work for some far northern locations like Svalbard and parts of far
northern Norway.
Parameters
----------
gdf : GeoDataFrame
the gdf to be projected
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, projects to latlong instead of to UTM
Returns
-------
GeoDataFrame |
18,502 | def dqdv_cycles(cycles, **kwargs):
ica_dfs = list()
cycle_group = cycles.groupby("cycle")
for cycle_number, cycle in cycle_group:
v, dq = dqdv_cycle(cycle, splitter=True, **kwargs)
_ica_df = pd.DataFrame(
{
"voltage": v,
"dq": dq,
}
)
_ica_df["cycle"] = cycle_number
_ica_df = _ica_df[[, , ]]
ica_dfs.append(_ica_df)
ica_df = pd.concat(ica_dfs)
return ica_df | Convenience functions for creating dq-dv data from given capacity and
voltage cycles.
Returns a DataFrame with a 'voltage' and a 'incremental_capacity'
column.
Args:
cycles (pandas.DataFrame): the cycle data ('cycle', 'voltage',
'capacity', 'direction' (1 or -1)).
Returns:
pandas.DataFrame with columns 'cycle', 'voltage', 'dq'.
Example:
>>> cycles_df = my_data.get_cap(
>>> ... categorical_column=True,
>>> ... method = "forth-and-forth",
>>> ... label_cycle_number=True,
>>> ... )
>>> ica_df = ica.dqdv_cycles(cycles_df) |
18,503 | def _remove_header(self, data, options):
version_info = self._get_version_info(options[])
header_size = version_info[]
if options[][]:
header_size += version_info[]
data = data[header_size:]
return data | Remove header from data |
18,504 | def body(self):
if not self._responses:
return None
if self._responses[-1].code >= 400:
return self._error_message()
return self._deserialize() | Returns the HTTP response body, deserialized if possible.
:rtype: mixed |
18,505 | def _matches(self, entities=None, extensions=None, domains=None,
regex_search=False):
if extensions is not None:
if isinstance(extensions, six.string_types):
extensions = [extensions]
extensions = + .join(extensions) +
if re.search(extensions, self.filename) is None:
return False
if domains is not None:
domains = listify(domains)
if not set(self.domains) & set(domains):
return False
if entities is not None:
for name, val in entities.items():
if (name not in self.tags) ^ (val is None):
return False
if val is None:
continue
def make_patt(x):
patt = % x
if isinstance(x, (int, float)):
patt = + patt
if not regex_search:
patt = % patt
return patt
ent_patts = [make_patt(x) for x in listify(val)]
patt = .join(ent_patts)
if re.search(patt, str(self.tags[name].value)) is None:
return False
return True | Checks whether the file matches all of the passed entities and
extensions.
Args:
entities (dict): A dictionary of entity names -> regex patterns.
extensions (str, list): One or more file extensions to allow.
domains (str, list): One or more domains the file must match.
regex_search (bool): Whether to require exact match (False) or
regex search (True) when comparing the query string to each
entity.
Returns:
True if _all_ entities and extensions match; False otherwise. |
18,506 | def set_value(file, element, value):
*
try:
root = ET.parse(file)
relement = root.find(element)
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
relement.text = str(value)
root.write(file)
return True | Sets the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.set_value /tmp/test.xml ".//element" "new value" |
18,507 | def get_commit_date(commit, tz_name):
return set_date_tzinfo(
datetime.fromtimestamp(mktime(commit.committed_date)),
tz_name=tz_name) | Get datetime of commit comitted_date |
18,508 | def calculate_variable(self, variable = None, period = None, use_baseline = False):
if use_baseline:
assert self.baseline_simulation is not None, "self.baseline_simulation is None"
simulation = self.baseline_simulation
else:
assert self.simulation is not None
simulation = self.simulation
tax_benefit_system = simulation.tax_benefit_system
assert period is not None
if not isinstance(period, periods.Period):
period = periods.period(period)
assert simulation is not None
assert tax_benefit_system is not None
assert variable in tax_benefit_system.variables, "{} is not a valid variable".format(variable)
period_size_independent = tax_benefit_system.get_variable(variable).is_period_size_independent
definition_period = tax_benefit_system.get_variable(variable).definition_period
if period_size_independent is False and definition_period != u:
values = simulation.calculate_add(variable, period = period)
elif period_size_independent is True and definition_period == u and period.size_in_months > 1:
values = simulation.calculate(variable, period = period.first_month)
elif period_size_independent is True and definition_period == u and period.size_in_months == 1:
values = simulation.calculate(variable, period = period)
elif period_size_independent is True and definition_period == u and period.size_in_months > 12:
values = simulation.calculate(variable, period = period.start.offset(, ).period())
elif period_size_independent is True and definition_period == u and period.size_in_months == 12:
values = simulation.calculate(variable, period = period)
elif period_size_independent is True and definition_period == u:
values = simulation.calculate(variable, period = period.this_year)
elif definition_period == u:
values = simulation.calculate(variable, period = period)
else:
values = None
assert values is not None, .format(variable)
return values | Compute and return the variable values for period and baseline or reform tax_benefit_system |
18,509 | def get_host_ipv6addr_info(ipv6addr=None, mac=None,
discovered_data=None,
return_fields=None, **api_opts):
infoblox = _get_infoblox(**api_opts)
return infoblox.get_host_ipv6addr_object(ipv6addr, mac, discovered_data, return_fields) | Get host ipv6addr information
CLI Example:
.. code-block:: bash
salt-call infoblox.get_host_ipv6addr_info ipv6addr=2001:db8:85a3:8d3:1349:8a2e:370:7348 |
18,510 | def __step4(self):
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step | Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6. |
18,511 | def clear_mpi_env_vars():
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in [, ]:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment) | from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes. |
18,512 | def sort_languages(self, order=Qt.AscendingOrder):
self.beginResetModel()
self.__languages = sorted(self.__languages, key=lambda x: (x.name), reverse=order)
self.endResetModel() | Sorts the Model languages.
:param order: Order. ( Qt.SortOrder ) |
18,513 | def _close(self, id):
connection = self.connections[id]
connection.connectionLost(Failure(CONNECTION_DONE))
return {} | Respond to a CLOSE command, dumping some data onto the stream. As with
WRITE, this returns an empty acknowledgement.
An occurrence of I{Close} on the wire, together with the response
generated by this method, might have this apperance::
C: -Command: Close
C: -Ask: 1
C: Id: [email protected]>[email protected]:q2q-example:0
C:
S: -Answer: 1
S: |
18,514 | def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
path = path.encode()
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError()
else:
raise ValueError(
% (response.status, base_url, path))
return flask_url_fetcher | Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL. |
18,515 | def output(self,delimiter = , freqlist = None):
for type, prob in self:
if freqlist:
if isinstance(type,list) or isinstance(type, tuple):
yield " ".join(type) + delimiter + str(freqlist[type]) + delimiter + str(prob)
else:
yield type + delimiter + str(freqlist[type]) + delimiter + str(prob)
else:
if isinstance(type,list) or isinstance(type, tuple):
yield " ".join(type) + delimiter + str(prob)
else:
yield type + delimiter + str(prob) | Generator yielding formatted strings expressing the time and probabily for each item in the distribution |
18,516 | def IsPathSuffix(mod_path, path):
return (mod_path.endswith(path) and
(len(mod_path) == len(path) or
mod_path[:-len(path)].endswith(os.sep))) | Checks whether path is a full path suffix of mod_path.
Args:
mod_path: Must be an absolute path to a source file. Must not have
file extension.
path: A relative path. Must not have file extension.
Returns:
True if path is a full path suffix of mod_path. False otherwise. |
18,517 | def parse_requirements() -> Tuple[PackagesType, PackagesType, Set[str]]:
essential_packages: PackagesType = {}
other_packages: PackagesType = {}
duplicates: Set[str] = set()
with open("requirements.txt", "r") as req_file:
section: str = ""
for line in req_file:
line = line.strip()
if line.startswith("
section = parse_section_name(line)
continue
if not line or line.startswith("
continue
module, version = parse_package(line)
if module in essential_packages or module in other_packages:
duplicates.add(module)
if section.startswith("ESSENTIAL"):
essential_packages[module] = version
else:
other_packages[module] = version
return essential_packages, other_packages, duplicates | Parse all dependencies out of the requirements.txt file. |
18,518 | def services(self):
if self._resources is None:
self.__init()
if "services" in self._resources:
url = self._url + "/services"
return _services.Services(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
return None | Gets the services object which will provide the ArcGIS Server's
admin information about services and folders. |
18,519 | def compute(self, sensorToBodyByColumn, sensorToSpecificObjectByColumn):
votesByCell = np.zeros(self.cellCount, dtype="int")
self.activeSegmentsByColumn = []
for (connections,
activeSensorToBodyCells,
activeSensorToSpecificObjectCells) in zip(self.connectionsByColumn,
sensorToBodyByColumn,
sensorToSpecificObjectByColumn):
overlaps = connections.computeActivity({
"sensorToBody": activeSensorToBodyCells,
"sensorToSpecificObject": activeSensorToSpecificObjectCells,
})
activeSegments = np.where(overlaps >= 2)[0]
votes = connections.mapSegmentsToCells(activeSegments)
votes = np.unique(votes)
votesByCell[votes] += 1
self.activeSegmentsByColumn.append(activeSegments)
candidates = np.where(votesByCell == np.max(votesByCell))[0]
self.activeCells = np.intersect1d(self.activeCells, candidates)
if self.activeCells.size == 0:
self.activeCells = candidates
self.inhibitedCells = np.setdiff1d(np.where(votesByCell > 0)[0],
self.activeCells) | Compute the
"body's location relative to a specific object"
from an array of
"sensor's location relative to a specific object"
and an array of
"sensor's location relative to body"
These arrays consist of one module per cortical column.
This is a metric computation, similar to that of the
SensorToSpecificObjectModule, but with voting. In effect, the columns vote
on "the body's location relative to a specific object".
Note: Each column can vote for an arbitrary number of cells, but it can't
vote for a single cell more than once. This is necessary because we don't
want ambiguity in a column to cause some cells to get extra votes. There are
a few ways that this could be biologically plausible:
- Explanation 1: Nearby dendritic segments are independent coincidence
detectors, but perhaps their dendritic spikes don't sum. Meanwhile,
maybe dendritic spikes from far away dendritic segments do sum.
- Explanation 2: Dendritic spikes from different columns are separated
temporally, not spatially. All the spikes from one column "arrive" at
the cell at the same time, but the dendritic spikes from other columns
arrive at other times. With each of these temporally-separated dendritic
spikes, the unsupported cells are inhibited, or the spikes' effects are
summed.
- Explanation 3: Another population of cells within the cortical column
might calculate the "body's location relative to a specific object" in
this same "metric" way, but without tallying any votes. Then it relays
this SDR subcortically, voting 0 or 1 times for each cell.
@param sensorToBodyInputs (list of numpy arrays)
The "sensor's location relative to the body" input from each cortical column
@param sensorToSpecificObjectInputs (list of numpy arrays)
The "sensor's location relative to specific object" input from each
cortical column |
18,520 | def choices(self):
if self._choices:
return self._choices
for n in os.listdir(self._voicedir):
if len(n) == 1 and os.path.isdir(os.path.join(self._voicedir, n)):
self._choices.append(n)
return self._choices | Available choices for characters to be generated. |
18,521 | def swagger_schema(self, request):
if self.parent is None:
return {}
spec = APISpec(
self.parent.name, self.parent.cfg.get(, ),
plugins=[], basePatch=self.prefix
)
for paths, handler in self.handlers.items():
spec.add_tag({
: handler.name,
: utils.dedent(handler.__doc__ or ),
})
for path in paths:
operations = {}
for http_method in handler.methods:
method = getattr(handler, http_method.lower())
operation = OrderedDict({
: [handler.name],
: method.__doc__,
: [],
: {200: {: {: { + handler.name}}}}
})
operation.update(utils.load_yaml_from_docstring(method.__doc__) or {})
operations[http_method.lower()] = operation
spec.add_path(self.prefix + path, operations=operations)
if getattr(handler, , None):
kwargs = {}
if getattr(handler.meta, , None):
kwargs[] = utils.dedent(handler.meta.model.__doc__ or )
spec.definition(handler.name, schema=handler.Schema, **kwargs)
return deepcopy(spec.to_dict()) | Render API Schema. |
18,522 | def pos(self):
tags = []
if self.tree.xpath():
info = self.tree.xpath()[0]
if info == :
tags.append()
if info == :
tags.append()
if info == :
tags.append()
return tags | Tries to decide about the part of speech. |
18,523 | def init(db_url, alembic_ini=None, debug=False, create=False):
engine = create_engine(db_url, echo=debug)
if create:
BASE.metadata.create_all(engine)
if alembic_ini is not None:
from alembic.config import Config
from alembic import command
alembic_cfg = Config(alembic_ini)
command.stamp(alembic_cfg, "head")
scopedsession = scoped_session(sessionmaker(bind=engine))
return scopedsession | Create the tables in the database using the information from the
url obtained.
:arg db_url, URL used to connect to the database. The URL contains
information with regards to the database engine, the host to
connect to, the user and password and the database name.
ie: <engine>://<user>:<password>@<host>/<dbname>
:kwarg alembic_ini, path to the alembic ini file. This is necessary
to be able to use alembic correctly, but not for the unit-tests.
:kwarg debug, a boolean specifying wether we should have the verbose
output of sqlalchemy or not.
:return a session that can be used to query the database. |
18,524 | def get_canonical_request(self, req, cano_headers, signed_headers):
url = urlparse(req.url)
path = self.amz_cano_path(url.path)
split = req.url.split(, 1)
qs = split[1] if len(split) == 2 else
qs = self.amz_cano_querystring(qs)
payload_hash = req.headers[]
req_parts = [req.method.upper(), path, qs, cano_headers,
signed_headers, payload_hash]
cano_req = .join(req_parts)
return cano_req | Create the AWS authentication Canonical Request string.
req -- Requests PreparedRequest object. Should already
include an x-amz-content-sha256 header
cano_headers -- Canonical Headers section of Canonical Request, as
returned by get_canonical_headers()
signed_headers -- Signed Headers, as returned by
get_canonical_headers() |
18,525 | def create_collection(self, name):
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
os.mkdir(fp) | Create a new collection as member of self.
See DAVResource.create_collection() |
18,526 | def get_local_config_file(cls, filename):
if os.path.isfile(filename):
return filename
else:
try:
config_repo = _get_repo()
if len(config_repo) == 0:
raise Exception()
config_repo = os.path.join(config_repo, filename)
if os.path.isfile(config_repo):
return config_repo
else:
raise Exception()
except Exception:
home = os.getenv("HOME", os.path.expanduser("~"))
config_home = os.path.join(home, filename)
if os.path.isfile(config_home):
return config_home
return None | Find local file to setup default values.
There is a pre-fixed logic on how the search of the configuration
file is performed. If the highes priority configuration file is found,
there is no need to search for the next. From highest to lowest
priority:
1. **Local:** Configuration file found in the current working
directory.
2. **Project:** Configuration file found in the root of the current
working ``git`` repository.
3. **User:** Configuration file found in the user's ``$HOME``.
:param str filename: Raw name of the configuration file.
:return: Union[:class:`.str`, :data:`None`] - Configuration file with
the highest priority, :data:`None` if no config file is found. |
18,527 | def mode_key_up(self, viewer, keyname):
if keyname not in self.mode_map:
return False
bnch = self.mode_map[keyname]
if self._kbdmode == bnch.name:
if bnch.type == :
if self._button == 0:
self.reset_mode(viewer)
else:
self._delayed_reset = True
return True
return False | This method is called when a key is pressed in a mode and was
not handled by some other handler with precedence, such as a
subcanvas. |
18,528 | def set_backend(backend_name: str):
global _backend, _backend_name
_backend_name = backend_name
assert not ncluster_globals.task_launched, "Not allowed to change backend after launching a task (this pattern is error-prone)"
if backend_name == :
_backend = aws_backend
elif backend_name == :
_backend = local_backend
else:
assert False, f"Unknown backend {backend_name}"
ncluster_globals.LOGDIR_ROOT = _backend.LOGDIR_ROOT | Sets backend (local or aws) |
18,529 | def imrescale(img, scale, return_scale=False, interpolation=):
h, w = img.shape[:2]
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(
.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
.format(
type(scale)))
new_size = _scale_size((w, h), scale_factor)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img | Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image. |
18,530 | def get_courses_in_account(self, account_id, params={}):
if "published" in params:
params["published"] = "true" if params["published"] else ""
url = ACCOUNTS_API.format(account_id) + "/courses"
courses = []
for data in self._get_paged_resource(url, params=params):
courses.append(CanvasCourse(data=data))
return courses | Returns a list of courses for the passed account ID.
https://canvas.instructure.com/doc/api/accounts.html#method.accounts.courses_api |
18,531 | def run(bam_file, data, out_dir):
qsig = config_utils.get_program("qsignature", data["config"])
res_qsig = config_utils.get_resources("qsignature", data["config"])
jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"]))
if not qsig:
logger.info("There is no qsignature tool. Skipping...")
return None
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
utils.safe_makedir(out_dir)
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in([], data))
return None
if mixup_check == "qsignature_full":
down_bam = bam_file
else:
down_bam = _slice_bam_chr21(bam_file, data)
position = _slice_vcf_chr21(position, out_dir)
out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_bam} ")
if not os.path.exists(out_file):
file_qsign_out = "{0}.qsig.vcf".format(down_bam)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data))
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return out_file
return None | Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (string) output normalized vcf file |
18,532 | def xpathContextSetCache(self, active, value, options):
ret = libxml2mod.xmlXPathContextSetCache(self._o, active, value, options)
return ret | Creates/frees an object cache on the XPath context. If
activates XPath objects (xmlXPathObject) will be cached
internally to be reused. @options: 0: This will set the
XPath object caching: @value: This will set the maximum
number of XPath objects to be cached per slot There are 5
slots for: node-set, string, number, boolean, and misc
objects. Use <0 for the default number (100). Other values
for @options have currently no effect. |
18,533 | def parse(string, is_file=False, obj=False):
try:
if obj is False:
if is_file:
return system_json.load(string)
return system_json.loads(string, encoding=)
else:
if is_file:
return system_json.load(
string,
object_hook=lambda d: namedtuple(, d.keys())
(*d.values()), ensure_ascii=False, encoding=)
return system_json.loads(
string,
object_hook=lambda d: namedtuple(, d.keys())
(*d.values()), encoding=)
except (Exception, BaseException) as error:
try:
if current_app.config[]:
raise error
except RuntimeError as flask_error:
raise error
return None | Convert a JSON string to dict/object |
18,534 | def from_requirement(cls, provider, requirement, parent):
candidates = provider.find_matches(requirement)
if not candidates:
raise NoVersionsAvailable(requirement, parent)
return cls(
candidates=candidates,
information=[RequirementInformation(requirement, parent)],
) | Build an instance from a requirement. |
18,535 | def get_chat_administrators(self, *args, **kwargs):
return get_chat_administrators(*args, **self._merge_overrides(**kwargs)).run() | See :func:`get_chat_administrators` |
18,536 | def _run_listeners(self, line):
for regex, callbacks in self.listeners.iteritems():
match = regex.match(line)
if not match:
continue
for callback in callbacks:
callback(*match.groups()) | Each listener's associated regular expression is matched against raw IRC
input. If there is a match, the listener's associated function is called
with all the regular expression's matched subgroups. |
18,537 | def ipfn_df(self, df, aggregates, dimensions, weight_col=):
steps = len(aggregates)
tables = [df]
for inc in range(steps - 1):
tables.append(df.copy())
original = df.copy()
inc = 0
for features in dimensions:
if inc == (steps - 1):
table_update = df
table_current = tables[inc]
else:
table_update = tables[inc + 1]
table_current = tables[inc]
tmp = table_current.groupby(features)[weight_col].sum()
xijk = aggregates[inc]
feat_l = []
for feature in features:
feat_l.append(np.unique(table_current[feature]))
table_update.set_index(features, inplace=True)
table_current.set_index(features, inplace=True)
for feature in product(*feat_l):
den = tmp.loc[feature]
if den == 0:
table_update.loc[feature, weight_col] =\
table_current.loc[feature, weight_col] *\
xijk.loc[feature]
else:
table_update.loc[feature, weight_col] = \
table_current.loc[feature, weight_col].astype(float) * \
xijk.loc[feature] / den
table_update.reset_index(inplace=True)
table_current.reset_index(inplace=True)
inc += 1
feat_l = []
max_conv = 0
inc = 0
for features in dimensions:
tmp = df.groupby(features)[weight_col].sum()
ori_ijk = aggregates[inc]
temp_conv = max(abs(tmp / ori_ijk - 1))
if temp_conv > max_conv:
max_conv = temp_conv
inc += 1
return df, max_conv | Runs the ipfn method from a dataframe df, aggregates/marginals and the dimension(s) preserved.
For example:
from ipfn import ipfn
import pandas as pd
age = [30, 30, 30, 30, 40, 40, 40, 40, 50, 50, 50, 50]
distance = [10,20,30,40,10,20,30,40,10,20,30,40]
m = [8., 4., 6., 7., 3., 6., 5., 2., 9., 11., 3., 1.]
df = pd.DataFrame()
df['age'] = age
df['distance'] = distance
df['total'] = m
xip = df.groupby('age')['total'].sum()
xip.loc[30] = 20
xip.loc[40] = 18
xip.loc[50] = 22
xpj = df.groupby('distance')['total'].sum()
xpj.loc[10] = 18
xpj.loc[20] = 16
xpj.loc[30] = 12
xpj.loc[40] = 14
dimensions = [['age'], ['distance']]
aggregates = [xip, xpj]
IPF = ipfn(df, aggregates, dimensions)
df = IPF.iteration()
print(df)
print(df.groupby('age')['total'].sum(), xip) |
18,538 | def edit_distance_matrix(train_x, train_y=None):
if train_y is None:
ret = np.zeros((train_x.shape[0], train_x.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_x):
if x_index == y_index:
ret[x_index][y_index] = 0
elif x_index < y_index:
ret[x_index][y_index] = edit_distance(x, y)
else:
ret[x_index][y_index] = ret[y_index][x_index]
return ret
ret = np.zeros((train_x.shape[0], train_y.shape[0]))
for x_index, x in enumerate(train_x):
for y_index, y in enumerate(train_y):
ret[x_index][y_index] = edit_distance(x, y)
return ret | Calculate the edit distance.
Args:
train_x: A list of neural architectures.
train_y: A list of neural architectures.
Returns:
An edit-distance matrix. |
18,539 | def transfer(self, transfer_payload=None, *, from_user, to_user):
if self.persist_id is None:
raise EntityNotYetPersistedError((
))
return self.plugin.transfer(self.persist_id, transfer_payload,
from_user=from_user, to_user=to_user) | Transfer this entity to another owner on the backing
persistence layer
Args:
transfer_payload (dict): Payload for the transfer
from_user (any): A user based on the model specified by the
persistence layer
to_user (any): A user based on the model specified by the
persistence layer
Returns:
str: Id of the resulting transfer action on the persistence
layer
Raises:
:exc:`~.EntityNotYetPersistedError`: If the entity being
transferred is not associated with an id on the
persistence layer (:attr:`~Entity.persist_id`) yet
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.EntityTransferError`: If the entity fails to be
transferred on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred |
18,540 | def __findFirstMissing(self, array, start, end):
if (start > end):
return end + 1
if (start != array[start]):
return start
mid = int((start + end) / 2)
if (array[mid] == mid):
return self.__findFirstMissing(array, mid + 1, end)
return self.__findFirstMissing(array, start, mid) | Find the smallest elements missing in a sorted array.
Returns:
int: The smallest element missing. |
18,541 | def colored(cls, color, message):
return getattr(cls, color.upper()) + message + cls.DEFAULT | Small function to wrap a string around a color
Args:
color (str): name of the color to wrap the string with, must be one
of the class properties
message (str): String to wrap with the color
Returns:
str: the colored string |
18,542 | def _raw_to_der(self, raw_signature):
component_length = self._sig_component_length()
if len(raw_signature) != int(2 * component_length):
raise ValueError("Invalid signature")
r_bytes = raw_signature[:component_length]
s_bytes = raw_signature[component_length:]
r = int_from_bytes(r_bytes, "big")
s = int_from_bytes(s_bytes, "big")
return encode_dss_signature(r, s) | Convert signature from RAW encoding to DER encoding. |
18,543 | def _pick_selected_option(cls):
for option in cls.select_el:
if not hasattr(option, "selected"):
return None
if option.selected:
return option.value
return None | Select handler for authors. |
18,544 | def cublasCher2k(handle, uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc):
status = _libcublas.cublasCher2k_v2(handle,
_CUBLAS_FILL_MODE[uplo],
_CUBLAS_OP[trans],
n, k, ctypes.byref(cuda.cuFloatComplex(alpha.real,
alpha.imag)),
int(A), lda, int(B), ldb,
ctypes.byref(cuda.cuFloatComplex(beta.real,
beta.imag)),
int(C), ldc)
cublasCheckStatus(status) | Rank-2k operation on Hermitian matrix. |
18,545 | def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
indexer = midnight_utcs.get_indexer(special_opens_or_closes.index)
opens_or_closes.values[indexer] = special_opens_or_closes.values | Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment. |
18,546 | def establish(self, call_id, timeout, limit=None,
retry=None, max_retries=None):
rejected = 0
retried = 0
results = []
result_queue = self.result_queues[call_id]
try:
with Timeout(timeout, False):
while True:
result = result_queue.get()
if result is None:
rejected += 1
if retry is not None:
if retried == max_retries:
break
retry()
retried += 1
continue
results.append(result)
if len(results) == limit:
break
finally:
del result_queue
self.remove_result_queue(call_id)
if not results:
if rejected:
raise Rejected( % rejected
if rejected != 1 else
)
else:
raise WorkerNotFound()
return results | Waits for the call is accepted by workers and starts to collect the
results. |
18,547 | def from_schema(cls, schema, *args, **kwargs):
return cls(schema.get(u"id", u""), schema, *args, **kwargs) | Construct a resolver from a JSON schema object.
:argument schema schema: the referring schema
:rtype: :class:`RefResolver` |
18,548 | def whoami(self) -> dict:
if not self.access_token:
return {}
self._try_refresh_access_token()
return self.session.get(self.WHOAMI_URL).json() | Returns the basic information about the authenticated character.
Obviously doesn't do anything if this Preston instance is not
authenticated, so it returns an empty dict.
Args:
None
Returns:
character info if authenticated, otherwise an empty dict |
18,549 | def _set_axis_position(self, axes, axis, option):
positions = {: [, ], : [, ]}[axis]
axis = axes.xaxis if axis == else axes.yaxis
if option in [None, False]:
axis.set_visible(False)
for pos in positions:
axes.spines[pos].set_visible(False)
else:
if option is True:
option = positions[0]
if in option:
axis.set_ticklabels([])
axis.set_label_text()
if option != :
option = option.split()[0]
axis.set_ticks_position(option)
axis.set_label_position(option)
if not self.overlaid and not self.show_frame and self.projection != :
pos = (positions[1] if (option and (option == or positions[0] in option))
else positions[0])
axes.spines[pos].set_visible(False) | Set the position and visibility of the xaxis or yaxis by
supplying the axes object, the axis to set, i.e. 'x' or 'y'
and an option to specify the position and visibility of the axis.
The option may be None, 'bare' or positional, i.e. 'left' and
'right' for the yaxis and 'top' and 'bottom' for the xaxis.
May also combine positional and 'bare' into for example 'left-bare'. |
18,550 | def hwpack_names():
ls = hwpack_dir().listdir()
ls = [x.name for x in ls]
ls = [x for x in ls if x != ]
arduino_included = in ls
ls = [x for x in ls if x != ]
ls.sort()
if arduino_included:
ls = [] + ls
return ls | return installed hardware package names. |
18,551 | def stream(self):
stream = self._p4dict.get()
if stream:
return Stream(stream, self._connection) | Which stream, if any, the client is under |
18,552 | def updateObj(self, event):
name = self.objList.get("active")
self.SearchVar.set(name)
self.object_info.set(str(self.kbos.get(name, )))
return | Put this object in the search box |
18,553 | def _checkJobGraphAcylicDFS(self, stack, visited, extraEdges):
if self not in visited:
visited.add(self)
stack.append(self)
for successor in self._children + self._followOns + extraEdges[self]:
successor._checkJobGraphAcylicDFS(stack, visited, extraEdges)
assert stack.pop() == self
if self in stack:
stack.append(self)
raise JobGraphDeadlockException("A cycle of job dependencies has been detected " % stack) | DFS traversal to detect cycles in augmented job graph. |
18,554 | def request(self, host, handler, request_body, verbose):
proxy_handler = urllib2.ProxyHandler()
opener = urllib2.build_opener(proxy_handler)
fhandle = opener.open(request)
return(self.parse_response(fhandle)) | Send xml-rpc request using proxy |
18,555 | def clear_lock(remote=None):
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
msg = (
.format(repo[], repo[], exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(repo[]):
try:
os.remove(repo[])
except OSError as exc:
if exc.errno == errno.EISDIR:
try:
shutil.rmtree(repo[])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
msg = .format(repo[])
log.debug(msg)
success.append(msg)
return success, failed
if isinstance(remote, dict):
return _do_clear_lock(remote)
cleared = []
errors = []
for repo in init():
if remote:
try:
if remote not in repo[]:
continue
except TypeError:
if six.text_type(remote) not in repo[]:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked. |
18,556 | def process_result(self, raw_result):
if self.results_class and raw_result:
if isinstance(raw_result, dict_type):
return self.results_class(raw_result)
elif isinstance(raw_result, collections.Iterable):
return [self.results_class(result) for result in raw_result]
return raw_result | Performs actions on the raw result from the XML-RPC response.
If a `results_class` is defined, the response will be converted
into one or more object instances of that class. |
18,557 | def check_validity(self):
if not isinstance(self.pianoroll, np.ndarray):
raise TypeError("`pianoroll` must be a numpy array.")
if not (np.issubdtype(self.pianoroll.dtype, np.bool_)
or np.issubdtype(self.pianoroll.dtype, np.number)):
raise TypeError("The data type of `pianoroll` must be np.bool_ or "
"a subdtype of np.number.")
if self.pianoroll.ndim != 2:
raise ValueError("`pianoroll` must have exactly two dimensions.")
if self.pianoroll.shape[1] != 128:
raise ValueError("The length of the second axis of `pianoroll` "
"must be 128.")
if not isinstance(self.program, int):
raise TypeError("`program` must be int.")
if self.program < 0 or self.program > 127:
raise ValueError("`program` must be in between 0 to 127.")
if not isinstance(self.is_drum, bool):
raise TypeError("`is_drum` must be bool.")
if not isinstance(self.name, string_types):
raise TypeError("`name` must be a string.") | Raise error if any invalid attribute found. |
18,558 | def up(self,x,L_change = 12):
y = L_change*ssd.upsample(x,L_change)
y = signal.sosfilt(self.sos,y)
return y | Upsample and filter the signal |
18,559 | def get_phi_ss(imt, mag, params):
C = params[imt]
if mag <= 5.0:
phi = C["a"]
elif mag > 6.5:
phi = C["b"]
else:
phi = C["a"] + (mag - 5.0) * ((C["b"] - C["a"]) / 1.5)
return phi | Returns the single station phi (or it's variance) for a given magnitude
and intensity measure type according to equation 5.14 of Al Atik (2015) |
18,560 | def remove_attribute(self, ont_id: str, operator: Account, attrib_key: str, payer: Account, gas_limit: int,
gas_price: int):
pub_key = operator.get_public_key_bytes()
b58_payer_address = payer.get_address_base58()
tx = self.new_remove_attribute_transaction(ont_id, pub_key, attrib_key, b58_payer_address, gas_limit, gas_price)
tx.sign_transaction(operator)
tx.add_sign_transaction(payer)
return self.__sdk.get_network().send_raw_transaction(tx) | This interface is used to send a Transaction object which is used to remove attribute.
:param ont_id: OntId.
:param operator: an Account object which indicate who will sign for the transaction.
:param attrib_key: a string which is used to indicate which attribute we want to remove.
:param payer: an Account object which indicate who will pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: a hexadecimal transaction hash value. |
18,561 | def copy(self,*args,**kwargs):
deep=kwargs.get(,True)
if len(args) > 0:
return self.updated_copy(*args)
else :
return copy.deepcopy(self) if deep else copy.copy(self) | Returns a copy of the current data object
:param flag: if an argument is provided, this returns an updated copy of current object (ie. equivalent to obj.copy();obj.update(flag)), optimising the memory (
:keyword True deep: deep copies the object (object data will be copied as well). |
18,562 | def filter_queryset(self, value, queryset):
filter_kwargs = {self.field_name: value}
return queryset.filter(**filter_kwargs) | Filter the queryset to all instances matching the given attribute. |
18,563 | def download(url: str, filename: str,
skip_cert_verify: bool = True) -> None:
log.info("Downloading from {} to {}", url, filename)
f.write(u.read()) | Downloads a URL to a file.
Args:
url: URL to download from
filename: file to save to
skip_cert_verify: skip SSL certificate check? |
18,564 | def make_save_locals_impl():
try:
if in sys.builtin_module_names:
import __pypy__
save_locals = __pypy__.locals_to_fast
except:
pass
else:
if in sys.builtin_module_names:
def save_locals_pypy_impl(frame):
save_locals(frame)
return save_locals_pypy_impl
try:
import ctypes
locals_to_fast = ctypes.pythonapi.PyFrame_LocalsToFast
except:
pass
else:
def save_locals_ctypes_impl(frame):
locals_to_fast(ctypes.py_object(frame), ctypes.c_int(0))
return save_locals_ctypes_impl
return None | Factory for the 'save_locals_impl' method. This may seem like a complicated pattern but it is essential that the method is created at
module load time. Inner imports after module load time would cause an occasional debugger deadlock due to the importer lock and debugger
lock being taken in different order in different threads. |
18,565 | def get_child_objective_banks(self, objective_bank_id):
if self._catalog_session is not None:
return self._catalog_session.get_child_catalogs(catalog_id=objective_bank_id)
return ObjectiveBankLookupSession(
self._proxy,
self._runtime).get_objective_banks_by_ids(
list(self.get_child_objective_bank_ids(objective_bank_id))) | Gets the children of the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` to query
return: (osid.learning.ObjectiveBankList) - the children of the
objective bank
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
18,566 | def toString(self, format_=, structureSuffix=):
if format_ == :
return % (
self.id, self.sequence, self.id, structureSuffix,
self.structure)
else:
if six.PY3:
return super().toString(format_=format_)
else:
return AARead.toString(self, format_=format_) | Convert the read to a string in PDB format (sequence & structure). This
consists of two FASTA records, one for the sequence then one for the
structure.
@param format_: Either 'fasta-ss' or 'fasta'. In the former case, the
structure information is returned. Otherwise, plain FASTA is
returned.
@param structureSuffix: The C{str} suffix to append to the read id
for the second FASTA record, containing the structure information.
@raise ValueError: If C{format_} is not 'fasta'.
@return: A C{str} representing the read sequence and structure in PDB
FASTA format. |
18,567 | def add_type(type_, func=None):
def _check_type(type_):
if not (isinstance(type_, python.class_types) or
hasattr(type_, )):
raise TypeError(r%r
% (type_,))
if isinstance(type_, list):
type_ = tuple(type_)
if type_ in TYPE_MAP:
raise KeyError( % (type_,))
if isinstance(type_, types.TupleType):
for x in type_:
_check_type(x)
else:
_check_type(type_)
TYPE_MAP[type_] = func | Adds a custom type to L{TYPE_MAP}. A custom type allows fine grain control
of what to encode to an AMF data stream.
@raise TypeError: Unable to add as a custom type (expected a class or callable).
@raise KeyError: Type already exists.
@see: L{get_type} and L{remove_type} |
18,568 | def is_zero_user(self):
return self.upvote_num + self.thank_num + \
self.question_num + self.answer_num == 0 | 返回当前用户是否为三零用户,其实是四零: 赞同0,感谢0,提问0,回答0.
:return: 是否是三零用户
:rtype: bool |
18,569 | def is_entailed_by(self, other):
if not set(self.include.keys()).issubset(set(other.include.keys())):
return False
if not self.exclude.isuperset(other.exclude):
return False
if not self.prototype.is_entailed_by(other.prototype):
return False
return True | Means merging other with self does not produce any new information. |
18,570 | def _read_multiline(self, init_data):
result = init_data
first = True
while True:
last_index = self.current_file.tell()
line_raw = self.current_file.readline()
return result | Reads multiline symbols (ususally comments)
:param init_data: initial data (parsed from the line containing keyword)
:return: parsed value of the multiline symbol
:rtype: str |
18,571 | def get_title (self):
if self.title is None:
url = u""
if self.base_url:
url = self.base_url
elif self.url:
url = self.url
self.title = url
if "/" in url:
title = url.rsplit("/", 1)[1]
if title:
self.title = title
return self.title | Return title of page the URL refers to.
This is per default the filename or the URL. |
18,572 | def cleanup(self):
for _id, shard in self._shards.items():
if shard.get(, False):
Servers().remove(shard[])
if shard.get(, False):
ReplicaSets().remove(shard[])
for mongos in self._routers:
Servers().remove(mongos)
for config_id in self._configsvrs:
self.configdb_singleton.remove(config_id)
self._configsvrs = []
self._routers = []
self._shards = {} | cleanup configuration: stop and remove all servers |
18,573 | def listModules(self):
modules = list(self.modules.items())
modules.sort()
return [module for name, module in modules] | Return an alphabetical list of all modules. |
18,574 | def _bnot16(ins):
output = _16bit_oper(ins.quad[2])
output.append()
output.append()
REQUIRES.add()
return output | Negates top (Bitwise NOT) of the stack (16 bits in HL) |
18,575 | def find_and_patch_entry(soup, entry):
link = soup.find("a", {"class": "headerlink"}, href="
tag = soup.new_tag("a")
tag["name"] = APPLE_REF_TEMPLATE.format(entry.type, entry.name)
if link:
link.parent.insert(0, tag)
return True
elif entry.anchor.startswith("module-"):
soup.h1.parent.insert(0, tag)
return True
else:
return False | Modify soup so Dash.app can generate TOCs on the fly. |
18,576 | def _handleAnonymousEvents(self, component, action, data, client):
try:
event = self.anonymous_events[component][action][]
self.log("Firing anonymous event: ", component, action,
str(data)[:20], lvl=network)
self.fireEvent(event(action, data, client))
except Exception as e:
self.log("Critical error during anonymous event handling:",
component, action, e,
type(e), lvl=critical, exc=True) | Handler for anonymous (public) events |
18,577 | def _make_git(config_info):
git_args = {}
def _add_value(value, key):
args_key, args_value = _GIT_ARG_FNS[key](value)
git_args[args_key] = args_value
devpipeline_core.toolsupport.args_builder("git", config_info, _GIT_ARGS, _add_value)
if git_args.get("uri"):
return devpipeline_scm.make_simple_scm(Git(git_args), config_info)
else:
raise Exception("No git uri ({})".format(config_info.config.name)) | This function initializes and Git SCM tool object. |
18,578 | def baseglob(pat, base):
return [f for f in glob(pat) if f.startswith(base)] | Given a pattern and a base, return files that match the glob pattern
and also contain the base. |
18,579 | def prefix_dirs(path):
_dirname = posixpath.dirname
path = path.strip()
out = []
while path != :
path = _dirname(path)
out.append(path)
return reversed(out) | Return an iterable of all prefix directories of path, descending from root. |
18,580 | def focus_right(pymux):
" Move focus to the right. "
_move_focus(pymux,
lambda wp: wp.xpos + wp.width + 1,
lambda wp: wp.ypos) | Move focus to the right. |
18,581 | def mode(series):
if series.notnull().sum() == 0:
return np.nan
else:
return series.value_counts().idxmax() | pandas mode is "empty if nothing has 2+ occurrences."
this method always returns something:
nan if the series is empty/nan), breaking ties arbitrarily |
18,582 | def send(command, data):
global _lock
try:
_lock.acquire()
data = data.encode()
assert len(data) < 1000000,
msg = b % (command.value, len(data), data)
logging.getLogger(__name__).debug( % msg)
_out_file.write(msg)
_out_file.flush()
finally:
_lock.release() | Send command to Training Service.
command: CommandType object.
data: string payload. |
18,583 | def from_parameter(cls: Type[UnlockParameterType], parameter: str) -> Optional[Union[SIGParameter, XHXParameter]]:
sig_param = SIGParameter.from_parameter(parameter)
if sig_param:
return sig_param
else:
xhx_param = XHXParameter.from_parameter(parameter)
if xhx_param:
return xhx_param
return None | Return UnlockParameter instance from parameter string
:param parameter: Parameter string
:return: |
18,584 | def save_act(self, path=None):
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, ) as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f) | Save model to a pickle located at `path` |
18,585 | async def peer(client: Client, peer_signed_raw: str) -> ClientResponse:
return await client.post(MODULE + , {: peer_signed_raw}, rtype=RESPONSE_AIOHTTP) | POST a Peer signed raw document
:param client: Client to connect to the api
:param peer_signed_raw: Peer signed raw document
:return: |
18,586 | def removeFileSafely(filename,clobber=True):
if filename is not None and filename.strip() != :
if os.path.exists(filename) and clobber: os.remove(filename) | Delete the file specified, but only if it exists and clobber is True. |
18,587 | def compute_amount(self):
self.amount = self.base_amount * self.aliquot / 100
return self.amount | Auto-assign and return the total amount for this tax. |
18,588 | def ask_user(d):
msg = bold() % (
version(),
)
print(msg)
msg =
print(msg)
The presentation title will be included on the title slide.projectPresentation titleauthorauthorAuthor name(s)hieroglyph.theme\nnamedescslide_themeSlide Themenamename'] for t in themes]
),
)
print("")
sphinx.quickstart.ask_user(d) | Wrap sphinx.quickstart.ask_user, and add additional questions. |
18,589 | def request(self,
method,
path,
options=None,
payload=None,
heartbeater=None,
retry_count=0):
def _request(authHeaders, options, payload, heartbeater, retry_count):
tenantId = authHeaders[]
requestUrl = self.baseUrl + tenantId + path
if options:
requestUrl += + urlencode(options)
payload = StringProducer(json.dumps(payload)) if payload else None
d = self.agent.request(method=method,
uri=requestUrl,
headers=None,
bodyProducer=payload)
d.addCallback(self.cbRequest,
method,
path,
options,
payload,
heartbeater,
retry_count)
return d
d = self.agent.getAuthHeaders()
d.addCallback(_request, options, payload, heartbeater, retry_count)
return d | Make a request to the Service Registry API.
@param method: HTTP method ('POST', 'GET', etc.).
@type method: C{str}
@param path: Path to be appended to base URL ('/sessions', etc.).
@type path: C{str}
@param options: Options to be encoded as query parameters in the URL.
@type options: C{dict}
@param payload: Optional body
@type payload: C{dict}
@param heartbeater: Optional heartbeater passed in when
creating a session.
@type heartbeater: L{HeartBeater} |
18,590 | def SendReply(self, response, tag=None):
if not isinstance(response, rdfvalue.RDFValue):
raise ValueError("SendReply can only send RDFValues")
if self.rdf_flow.parent_flow_id:
response = rdf_flow_objects.FlowResponse(
client_id=self.rdf_flow.client_id,
request_id=self.rdf_flow.parent_request_id,
response_id=self.GetNextResponseId(),
payload=response,
flow_id=self.rdf_flow.parent_flow_id,
tag=tag)
self.flow_responses.append(response)
else:
reply = rdf_flow_objects.FlowResult(
client_id=self.rdf_flow.client_id,
flow_id=self.rdf_flow.flow_id,
hunt_id=self.rdf_flow.parent_hunt_id,
payload=response,
tag=tag)
self.replies_to_write.append(reply)
self.replies_to_process.append(reply)
self.rdf_flow.num_replies_sent += 1 | Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is ignored.
Args:
response: An RDFValue() instance to be sent to the parent.
tag: If specified, tag the result with this tag.
Raises:
ValueError: If responses is not of the correct type. |
18,591 | def get_command_response_from_cache(self, device_id, command, command2):
key = self.create_key_from_command(command, command2)
command_cache = self.get_cache_from_file(device_id)
if device_id not in command_cache:
command_cache[device_id] = {}
return False
elif key not in command_cache[device_id]:
return False
response = command_cache[device_id][key]
expired = False
if response[] < int(time()):
self.logger.info("cache expired for device %s", device_id)
expired = True
if os.path.exists(LOCK_FILE):
self.logger.info("cache locked - will wait to rebuild %s", device_id)
else:
self.logger.info("cache unlocked - will rebuild %s", device_id)
newpid = os.fork()
if newpid == 0:
self.rebuild_cache(device_id, command, command2)
if expired:
self.logger.info("returning expired cached device status %s", device_id)
else:
self.logger.info("returning unexpired cached device status %s", device_id)
return response[] | Gets response |
18,592 | def _negf(ins):
output = _float_oper(ins.quad[2])
output.append()
output.extend(_fpush())
REQUIRES.add()
return output | Changes sign of top of the stack (48 bits) |
18,593 | def shrink_text_file(filename, max_size, removal_marker=None):
file_size = os.path.getsize(filename)
assert file_size > max_size
with open(filename, ) as output_file:
with open(filename, ) as input_file:
output_file.seek(max_size // 2)
output_file.readline()
if output_file.tell() == file_size:
return
if removal_marker:
output_file.write(removal_marker.encode())
input_file.seek(-max_size // 2, os.SEEK_END)
input_file.readline()
copy_all_lines_from_to(input_file, output_file)
output_file.truncate() | Shrink a text file to approximately maxSize bytes
by removing lines from the middle of the file. |
18,594 | def list_adb_devices_by_usb_id():
out = adb.AdbProxy().devices([])
clean_lines = new_str(out, ).strip().split()
results = []
for line in clean_lines:
tokens = line.strip().split()
if len(tokens) > 2 and tokens[1] == :
results.append(tokens[2])
return results | List the usb id of all android devices connected to the computer that
are detected by adb.
Returns:
A list of strings that are android device usb ids. Empty if there's
none. |
18,595 | def _getDriver(self):
driverCls = get_driver(Provider.GCE)
return driverCls(self._clientEmail,
self._googleJson,
project=self._projectId,
datacenter=self._zone) | Connect to GCE |
18,596 | def set_edist_powerlaw_gamma(self, gmin, gmax, delta, ne_cc):
if not (gmin >= 1):
raise ValueError( % (gmin,))
if not (gmax >= gmin):
raise ValueError( % (gmax, gmin))
if not (delta >= 0):
raise ValueError( % (delta,))
if not (ne_cc >= 0):
raise ValueError( % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLG
self.in_vals[IN_VAL_EMIN] = (gmin - 1) * E0_MEV
self.in_vals[IN_VAL_EMAX] = (gmax - 1) * E0_MEV
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self | Set the energy distribution function to a power law in the Lorentz factor
**Call signature**
*gmin*
The minimum Lorentz factor of the distribution
*gmax*
The maximum Lorentz factor of the distribution
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining. |
18,597 | def apply_filters(query, args):
pre_joins = []
for querystring_key, filter_value in args.items(multi=True):
if querystring_key in filter_registry:
cls_inst = filter_registry[querystring_key]
query = cls_inst.apply_filter(query, args, pre_joins)
elif querystring_key in PaginationKeys._value_list:
pass
else:
raise InvalidQueryString(querystring_key, filter_value)
return query | Apply all QueryFilters, validating the querystring in the process. |
18,598 | def decaying(start, stop, decay):
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
nr_upd = 1.
while True:
yield clip(start * 1./(1. + decay * nr_upd))
nr_upd += 1 | Yield an infinite series of linearly decaying values. |
18,599 | async def lock(self, container = None):
"Wait for lock acquire"
if container is None:
container = RoutineContainer.get_container(self.scheduler)
if self.locked:
pass
elif self.lockroutine:
await LockedEvent.createMatcher(self)
else:
await container.wait_for_send(LockEvent(self.context, self.key, self))
self.locked = True | Wait for lock acquire |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.