Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,700 | def add_section(self, section_name: str) -> None:
section_name = self._whitespace_re.sub(, section_name)
self._sections.append(section_name)
setattr(self, section_name, Section()) | Add a section to the :class:`SampleSheet`. |
4,701 | def load(self, options):
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug(, self)
merged = self.merge()
log.debug(, merged)
return merged | Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema} |
4,702 | def _set_lookup_prop(self, result_data):
if self._lookup_prop:
return
if result_data.get("id"):
self._lookup_prop = "id"
elif result_data.get("title"):
self._lookup_prop = "name"
else:
return
logger.debug("Setting lookup method for xunit to `%s`", self._lookup_prop) | Set lookup property based on processed testcases if not configured. |
4,703 | def _compute(self, arrays, dates, assets, mask):
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) | For each row in the input, compute a like-shaped array of per-row
ranks. |
4,704 | def _is_type(self, instance, type):
if type not in self._types:
raise UnknownType(type)
type = self._types[type]
if isinstance(instance, bool):
type = _flatten(type)
if int in type and bool not in type:
return False
return isinstance(instance, type) | Check if an ``instance`` is of the provided (JSON Schema) ``type``. |
4,705 | def build(self):
if self._category_text_iter is None:
raise CategoryTextIterNotSetError()
nlp = self.get_nlp()
category_document_iter = (
(category, self._clean_function(raw_text))
for category, raw_text
in self._category_text_iter
)
term_doc_matrix = self._build_from_category_spacy_doc_iter(
(
(category, nlp(text))
for (category, text)
in category_document_iter
if text.strip() !=
)
)
return term_doc_matrix | Generate a TermDocMatrix from data in parameters.
Returns
----------
term_doc_matrix : TermDocMatrix
The object that this factory class builds. |
4,706 | def definition_name(cls):
outer_definition_name = cls.outer_definition_name()
if outer_definition_name is None:
return six.text_type(cls.__name__)
return u % (outer_definition_name, cls.__name__) | Helper method for creating definition name.
Names will be generated to include the classes package name,
scope (if the class is nested in another definition) and class
name.
By default, the package name for a definition is derived from
its module name. However, this value can be overriden by
placing a 'package' attribute in the module that contains the
definition class. For example:
package = 'some.alternate.package'
class MyMessage(Message):
...
>>> MyMessage.definition_name()
some.alternate.package.MyMessage
Returns:
Dot-separated fully qualified name of definition. |
4,707 | def __dfs(self, start, weights, depth_limit):
adj = self._adj
stack = [(start, depth_limit, iter(sorted(adj[start], key=weights)))]
visited = {start}
disconnected = defaultdict(list)
edges = defaultdict(list)
while stack:
parent, depth_now, children = stack[-1]
try:
child = next(children)
except StopIteration:
stack.pop()
else:
if child not in visited:
edges[parent].append(child)
visited.add(child)
if depth_now > 1:
front = adj[child].keys() - {parent}
if front:
stack.append((child, depth_now - 1, iter(sorted(front, key=weights))))
elif child not in disconnected:
disconnected[parent].append(child)
return visited, edges, disconnected | modified NX dfs |
4,708 | def bind_kernel(**kwargs):
from IPython.zmq.ipkernel import IPKernelApp
from IPython.parallel.apps.ipengineapp import IPEngineApp
if IPKernelApp.initialized() and isinstance(IPKernelApp._instance, IPKernelApp):
return
if IPEngineApp.initialized():
try:
app = IPEngineApp.instance()
except MultipleInstanceError:
pass
else:
return app.bind_kernel(**kwargs)
raise RuntimeError("bind_kernel be called from an IPEngineApp instance") | Bind an Engine's Kernel to be used as a full IPython kernel.
This allows a running Engine to be used simultaneously as a full IPython kernel
with the QtConsole or other frontends.
This function returns immediately. |
4,709 | def predict_mhci_binding(job, peptfile, allele, peplen, univ_options, mhci_options):
work_dir = os.getcwd()
input_files = {
: peptfile}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
peptides = read_peptide_file(os.path.join(os.getcwd(), ))
if not peptides:
return job.fileStore.writeGlobalFile(job.fileStore.getLocalTempFile())
parameters = [mhci_options[],
allele,
peplen,
input_files[]]
with open(.join([work_dir, ]), ) as predfile:
docker_call(tool=, tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options[], outfile=predfile, interactive=True,
tool_version=mhci_options[])
output_file = job.fileStore.writeGlobalFile(predfile.name)
job.fileStore.logToMaster(
% (univ_options[], allele, peplen))
return output_file | Predict binding for each peptide in `peptfile` to `allele` using the IEDB mhci binding
prediction tool.
:param toil.fileStore.FileID peptfile: The input peptide fasta
:param str allele: Allele to predict binding against
:param str peplen: Length of peptides to process
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhci_options: Options specific to mhci binding prediction
:return: fsID for file containing the predictions
:rtype: toil.fileStore.FileID |
4,710 | def _set_current_page(self, current_page, last_page):
if not current_page:
current_page = self.resolve_current_page()
if current_page > last_page:
if last_page > 0:
return last_page
return 1
if not self._is_valid_page_number(current_page):
return 1
return current_page | Get the current page for the request.
:param current_page: The current page of results
:type current_page: int
:param last_page: The last page of results
:type last_page: int
:rtype: int |
4,711 | def on_service_modify(self, svc_ref, old_properties):
with self._lock:
try:
service = self.services[svc_ref]
except KeyError:
return self.on_service_arrival(svc_ref)
else:
self._ipopo_instance.update(
self, service, svc_ref, old_properties
)
return None | Called when a service has been modified in the framework
:param svc_ref: A service reference
:param old_properties: Previous properties values
:return: A tuple (added, (service, reference)) if the dependency has
been changed, else None |
4,712 | def cast_to_python(self, value):
if value is not None:
value = UserGroup(self._swimlane, value)
return value | Convert JSON definition to UserGroup object |
4,713 | def to_flat_dict(self, **kwargs):
if kwargs:
return self.filter(**kwargs).to_flat_dict()
return {param.uniquetwig: param for param in self._params} | Convert the :class:`ParameterSet` to a flat dictionary, with keys being
uniquetwigs to access the parameter and values being the :class:`Parameter`
objects themselves.
:return: dict of :class:`Parameter`s |
4,714 | def _file_path(self, uid):
file_name = % (uid)
return os.path.join(self.dayone_journal_path, file_name) | Create and return full file path for DayOne entry |
4,715 | def lambda_not_found_response(*args):
response_data = jsonify(ServiceErrorResponses._NO_LAMBDA_INTEGRATION)
return make_response(response_data, ServiceErrorResponses.HTTP_STATUS_CODE_502) | Constructs a Flask Response for when a Lambda function is not found for an endpoint
:return: a Flask Response |
4,716 | def add_extensions(self, extensions):
for ext in extensions:
if not isinstance(ext, X509Extension):
raise ValueError("One of the elements is not an X509Extension")
add_result = _lib.X509_add_ext(self._x509, ext._extension, -1)
if not add_result:
_raise_current_error() | Add extensions to the certificate.
:param extensions: The extensions to add.
:type extensions: An iterable of :py:class:`X509Extension` objects.
:return: ``None`` |
4,717 | def user_events(self, user_obj=None):
query = dict(user_id=user_obj[]) if user_obj else dict()
return self.event_collection.find(query) | Fetch all events by a specific user. |
4,718 | def getPrefixDirectories(self, engineRoot, delimiter=):
return delimiter.join(self.resolveRoot(self.prefixDirs, engineRoot)) | Returns the list of prefix directories for this library, joined using the specified delimiter |
4,719 | def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""):
self._AWSIoTMQTTClient.configureCredentials(CAFilePath, KeyPath, CertificatePath) | **Description**
Used to configure the rootCA, private key and certificate files. Should be called before connect. This is a public
facing API inherited by application level public clients.
**Syntax**
.. code:: python
myShadowClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
myJobsClient.clearLastWill("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE")
**Parameters**
*CAFilePath* - Path to read the root CA file. Required for all connection types.
*KeyPath* - Path to read the private key. Required for X.509 certificate based connection.
*CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection.
**Returns**
None |
4,720 | def proto_IC_ramp_gain(abf=exampleABF):
standard_inspect(abf)
swhlab.ap.detect(abf)
swhlab.ap.check_AP_raw(abf)
swhlab.plot.save(abf,tag="01-raw",resize=False)
swhlab.ap.check_AP_deriv(abf)
swhlab.plot.save(abf,tag="02-deriv")
swhlab.ap.check_AP_phase(abf)
swhlab.plot.save(abf,tag="03-phase")
swhlab.ap.plot_values(abf,,continuous=True)
pylab.subplot(211)
pylab.axhline(40,color=,lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag=)
swhlab.ap.plot_values(abf,,continuous=True)
pylab.subplot(211)
pylab.axhline(-100,color=,lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag=) | increasing ramps in (?) pA steps. |
4,721 | def nlargest(self, n=None):
if n is None:
return sorted(self.counts(), key=itemgetter(1), reverse=True)
else:
return heapq.nlargest(n, self.counts(), key=itemgetter(1)) | List the n most common elements and their counts.
List is from the most
common to the least. If n is None, the list all element counts.
Run time should be O(m log m) where m is len(self)
Args:
n (int): The number of elements to return |
4,722 | def _fset(self, name):
def fset(inst, value):
value = self.fparse(inst, value)
setattr(inst, name, value)
return fset | Build and returns the property's *fdel* method for the member defined by *name*. |
4,723 | def reverse(self):
enabled = self.lib.iperf_get_test_reverse(self._test)
if enabled:
self._reverse = True
else:
self._reverse = False
return self._reverse | Toggles direction of test
:rtype: bool |
4,724 | def write(self, __text: str) -> None:
if __text == os.linesep:
self.handle.write(__text)
else:
frame = inspect.currentframe()
if frame is None:
filename =
lineno = 0
else:
outer = frame.f_back
filename = outer.f_code.co_filename.split(os.sep)[-1]
lineno = outer.f_lineno
self.handle.write(.format(filename[-15:],
lineno, __text)) | Write text to the debug stream.
Args:
__text: Text to write |
4,725 | def parse_safari (url_data):
from ..bookmarks.safari import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name) | Parse a Safari bookmark file. |
4,726 | def _add_q(self, q_object):
self._criteria = self._criteria._combine(q_object, q_object.connector) | Add a Q-object to the current filter. |
4,727 | def _self_event(self, event_name, cmd, *pargs, **kwargs):
if hasattr(self, event_name):
getattr(self, event_name)(cmd, *pargs, **kwargs) | Call self event |
4,728 | def t_BIN_STRING(self, t):
r[01]*\
value = t.value[1:-2]
while value and value[0] == and len(value) % 8:
value = value[1:]
return t | r'\'[01]*\'[bB] |
4,729 | def constructor(self, random, args):
self._use_ants = True
candidate = []
while len(candidate) < len(self.components):
feasible_components = []
if len(candidate) == 0:
feasible_components = self.components
else:
remaining_capacity = self.capacity - sum([c.element for c in candidate])
if self.duplicates:
feasible_components = [c for c in self.components if c.element <= remaining_capacity]
else:
feasible_components = [c for c in self.components if c not in candidate and c.element <= remaining_capacity]
if len(feasible_components) == 0:
break
else:
if random.random() <= self.bias:
next_component = max(feasible_components)
else:
next_component = selectors.fitness_proportionate_selection(random, feasible_components, {: 1})[0]
candidate.append(next_component)
return candidate | Return a candidate solution for an ant colony optimization. |
4,730 | def parse_int(value, base_unit=None):
convert = {
: {: 1, : 1024, : 1024 * 1024, : 1024 * 1024 * 1024},
: {: 1, : 1000, : 1000 * 60, : 1000 * 60 * 60, : 1000 * 60 * 60 * 24},
: {: -1000, : 1, : 60, : 60 * 60, : 60 * 60 * 24},
: {: -1000 * 60, : -60, : 1, : 60, : 60 * 24}
}
value, unit = strtol(value)
if value is not None:
if not unit:
return value
if base_unit and base_unit not in convert:
base_value, base_unit = strtol(base_unit, False)
else:
base_value = 1
if base_unit in convert and unit in convert[base_unit]:
multiplier = convert[base_unit][unit]
if multiplier < 0:
value /= -multiplier
else:
value *= multiplier
return int(value/base_value) | >>> parse_int('1') == 1
True
>>> parse_int(' 0x400 MB ', '16384kB') == 64
True
>>> parse_int('1MB', 'kB') == 1024
True
>>> parse_int('1000 ms', 's') == 1
True
>>> parse_int('1GB', 'MB') is None
True
>>> parse_int(0) == 0
True |
4,731 | def match_phase(qpi, model, n0, r0, c0=None, pha_offset=0,
fix_pha_offset=False, nrel=.10, rrel=.05, crel=.05,
stop_dn=.0005, stop_dr=.0010, stop_dc=1, min_iter=3,
max_iter=100, ret_center=False, ret_pha_offset=False,
ret_qpi=False, ret_num_iter=False, ret_interim=False,
verbose=0, verbose_h5path="./match_phase_error.h5"):
if not isinstance(qpi, qpimage.QPImage):
raise ValueError("`qpi` must be instance of `QPImage`!")
for var in ["medium index", "pixel size", "wavelength"]:
if var not in qpi:
raise ValueError("meta data not defined in `qpi`!")
if c0 is None:
c0 = [qpi.shape[0] / 2, qpi.shape[1] / 2]
model_kwargs = {"radius": r0,
"sphere_index": n0,
"medium_index": qpi["medium index"],
"wavelength": qpi["wavelength"],
"pixel_size": qpi["pixel size"],
"grid_size": qpi.shape,
"center": c0
}
spi = SpherePhaseInterpolator(model=model,
model_kwargs=model_kwargs,
pha_offset=pha_offset,
nrel=nrel,
rrel=rrel,
verbose=verbose)
recorder = []
interim = []
interim.append([0, spi.params])
phase = qpi.pha
range_ipol = 47
range_off = 13
dc = max(qpi["wavelength"], crel * r0) / qpi["pixel size"]
if verbose:
print("Starting phase fitting.")
ii = 0
message = None
if "identifier" in qpi:
ident = qpi["identifier"]
else:
ident = str(time.time())
while True:
if verbose >= 2:
export_phase_error_hdf5(h5path=verbose_h5path,
identifier=ident,
index=ii,
phase=phase,
mphase=spi.get_phase(),
model=model,
n0=n0,
r0=r0,
spi_params=spi.params)
ii += 1
r_old = spi.radius
n_old = spi.sphere_index
rs = np.linspace(
spi.range_r[0], spi.range_r[1], range_ipol, endpoint=True)
assert np.allclose(np.min(np.abs(rs - spi.radius)), 0)
lsqs = []
for ri in rs:
phasei = spi.get_phase(rintp=ri)
lsqs.append(sq_phase_diff(phase, phasei))
idr = np.argmin(lsqs)
spi.radius = rs[idr]
ns = np.linspace(
spi.range_n[0], spi.range_n[1], range_ipol, endpoint=True)
assert np.allclose(np.min(np.abs(ns - spi.sphere_index)), 0)
lsqs = []
for ni in ns:
phasei = spi.get_phase(nintp=ni)
lsqs.append(sq_phase_diff(phase, phasei))
idn = np.argmin(lsqs)
spi.sphere_index = ns[idn]
x = np.linspace(-dc, dc, range_off, endpoint=True)
assert np.allclose(np.min(np.abs(x)), 0)
xintp, yintp = np.meshgrid(x, x)
lsqs = []
for xoff, yoff in zip(xintp.flatten(), yintp.flatten()):
phasei = spi.get_phase(delta_offset_x=xoff, delta_offset_y=yoff)
err = sq_phase_diff(phase, phasei)
lsqs.append(err)
idc = np.argmin(lsqs)
deltax = xintp.flatten()[idc]
deltay = yintp.flatten()[idc]
spi.posx_offset = spi.posx_offset - deltax
spi.posy_offset = spi.posy_offset - deltay
if not fix_pha_offset:
cabphase = spi.get_phase() - spi.pha_offset
cabphase[np.abs(cabphase) > .01 * np.abs(cabphase).max()] = np.nan
cb_border = max(5, min(cabphase.shape) // 5)
cabphase[cb_border:-cb_border, cb_border:-cb_border] = np.nan
phai_offset = np.nanmean(cabphase - phase)
if np.isnan(phai_offset):
phai_offset = 0
spi.pha_offset = - phai_offset
if verbose == 1:
print("Iteration {}: n={:.5e}, r={:.5e}m".format(ii,
spi.sphere_index,
spi.radius))
elif verbose >= 2:
print("Iteration {}: {}".format(ii, spi.params))
interim.append([ii, spi.params])
if (idn > range_ipol / 2 - range_ipol / 10 and
idn < range_ipol / 2 + range_ipol / 10):
spi.dn /= 2
if verbose >= 2:
print("Halved search interval: spi.dn={:.8f}".format(spi.dn))
if (idr > range_ipol / 2 - range_ipol / 10 and
idr < range_ipol / 2 + range_ipol / 10):
spi.dr /= 2
if verbose >= 2:
print("Halved search interval: spi.dr={:.8f}".format(spi.dr))
if deltax**2 + deltay**2 < dc**2:
dc /= 2
if verbose >= 2:
print("Halved search interval: dc={:.8f}".format(dc))
if ii < min_iter:
if verbose:
print("Keep iterating because `min_iter`={}.".format(min_iter))
continue
elif ii >= max_iter:
ii *= -1
if verbose:
print("Stopping iteration: reached `max_iter`={}".format(
max_iter))
message = "fail, reached maximum number of iterations"
break
if stop_dc:
curoff = np.sqrt(deltax**2 + deltay**2)
if curoff > stop_dc:
if verbose:
print("Keep iterating because center location moved by "
+ "{} > `stop_dc`={}.".format(curoff, stop_dc))
continue
if (abs(spi.radius - r_old) / spi.radius < stop_dr and
abs(spi.sphere_index - n_old) < stop_dn):
if verbose:
print("Stopping iteration: `stop_dr` and `stop_dn` satisfied")
message = "success, satisfied stopping criteria"
break
thisresult = (spi.sphere_index, spi.radius)
recorder.append(thisresult)
if recorder.count(thisresult) > 2:
ii *= -1
warnings.warn("Aborting stuck iteration for {}!".format(qpi))
if verbose:
print("Stop iteration: encountered same parameters twice.")
message = "fail, same parameters encountered twice"
break
if verbose >= 2:
infostring = ""
if not abs(spi.sphere_index - n_old) < stop_dn:
infostring += " delta_n = {} > {}".format(
abs(spi.sphere_index - n_old), stop_dn)
if not abs(spi.radius - r_old) / spi.radius < stop_dr:
infostring += " delta_r = {} > {}".format(
abs(spi.radius - r_old) / spi.radius, stop_dr)
print("Keep iterating: {} (no convergence)".format(infostring))
if verbose:
print("Number of iterations: {}".format(ii))
print("Stopping rationale: {}".format(message))
if verbose >= 2:
export_phase_error_hdf5(h5path=verbose_h5path,
identifier=ident,
index=ii,
phase=phase,
mphase=spi.get_phase(),
model=model,
n0=n0,
r0=r0,
spi_params=spi.params)
res = [spi.sphere_index, spi.radius]
if ret_center:
res += [[spi.posx_offset, spi.posy_offset]]
if ret_pha_offset:
res += [spi.pha_offset]
if ret_qpi:
res += [spi.compute_qpi()]
if ret_num_iter:
res += [ii]
if ret_interim:
res += [interim]
return res | Fit a scattering model to a quantitative phase image
Parameters
----------
qpi: qpimage.QPImage
QPI data to fit (e.g. experimental data)
model: str
Name of the light-scattering model
(see :const:`qpsphere.models.available`)
n0: float
Initial refractive index of the sphere
r0: float
Initial radius of the sphere [m]
c0: tuple of (float, float)
Initial center position of the sphere in ndarray index
coordinates [px]; if set to `None` (default), the center
of the image is used.
pha_offset: float
Initial phase offset [rad]
fix_pha_offset: bool
If True, do not fit the phase offset `pha_offset`. The phase
offset is determined from the mean of all pixels whose absolute
phase is
- below 1% of the modeled phase and
- within a 5px or 20% border (depending on which is larger)
around the phase image.
nrel: float
Determines the border of the interpolation range for the
refractive index: [n-(n-nmed)*nrel, n+(n-nmed)*nrel]
with nmed=qpi["medium_index"] and, initially, n=n0.
rrel: float
Determines the border of the interpolation range for the
radius: [r*(1-rrel), r*(1+rrel)] with, initially, r=r0.
crel: float
Determines the border of the interpolation range for the
center position: [cxy - dc, cxy + dc] with the center
position (along x or y) cxy, and the interval radius dc
defined by dc=max(lambda, crel * r0) with the vacuum
wavelength lambda=qpi["wavelenght"].
stop_dn: float
Stopping criterion for refractive index
stop_dr: float
Stopping criterion for radius
stop_dc: float
Stopping criterion for lateral offsets
min_iter: int
Minimum number of fitting iterations to perform
max_iter: int
Maximum number of fitting iterations to perform
ret_center: bool
If True, return the fitted center coordinates
ret_pha_offset: bool
If True, return the fitted phase offset
ret_qpi: bool
If True, return the final fit as a data set
ret_num_iter: bool
If True, return the number of iterations
ret_interim: bool
If True, return intermediate parameters of each iteration
verbose: int
Higher values increase verbosity
verbose_h5path: str
Path to hdf5 output file, created when `verbosity >= 2`
Returns
-------
n: float
Fitted refractive index
r: float
Fitted radius [m]
c: tuple of (float, float)
Only returned if `ret_center` is True
Center position of the sphere in ndarray index coordinates [px]
pha_offset: float
Only returned if `ret_pha_offset` is True
Fitted phase offset [rad]
qpi: qpimage.QPImage
Only returned if `ret_qpi` is True
Simulation using `model` with the final fit parameters
num_iter: int
Only returned if `ret_num_iter` is True
Number of iterations performed; negative number is
returned when iteration fails
interim: list
Only returned if `ret_interim` is True
Intermediate fitting parameters |
4,732 | def resizeEvent(self, event):
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
self.sig_resized.emit(event) | Reimplement Qt method |
4,733 | def save_dtrajs(self, prefix=, output_dir=,
output_format=, extension=):
r
clustering = self._chain[-1]
reader = self._chain[0]
from pyemma.coordinates.clustering.interface import AbstractClustering
assert isinstance(clustering, AbstractClustering)
trajfiles = None
if isinstance(reader, FeatureReader):
trajfiles = reader.filenames
clustering.save_dtrajs(
trajfiles, prefix, output_dir, output_format, extension) | r"""Saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
prefix : str
prepend prefix to filenames.
output_dir : str (optional)
save files to this directory. Defaults to current working directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj') |
4,734 | def geoid(self):
if self.valuetype_class.is_geoid():
return self
for c in self.table.columns:
if c.parent == self.name and c.valuetype_class.is_geoid():
return c | Return first child of the column, or self that is marked as a geographic identifier |
4,735 | def reset_namespace(self):
self.shellwidget.reset_namespace(warning=self.reset_warning,
message=True) | Resets the namespace by removing all names defined by the user |
4,736 | def export_true_table():
tester_list = [
("inspect.isroutine", lambda v: inspect.isroutine(v)),
("inspect.isfunction", lambda v: inspect.isfunction(v)),
("inspect.ismethod", lambda v: inspect.ismethod(v)),
("isinstance.property", lambda v: isinstance(v, property)),
("isinstance.staticmethod", lambda v: isinstance(v, staticmethod)),
("isinstance.classmethod", lambda v: isinstance(v, classmethod)),
]
class_attr_value_paris = [
("attribute", MyClass.attribute),
("property_method", MyClass.property_method),
("regular_method", MyClass.regular_method),
("static_method", MyClass.static_method),
("class_method", MyClass.class_method),
("__dict__[]", Base.__dict__["static_method"]),
("__dict__[]", Base.__dict__["class_method"]),
]
myclass = MyClass()
instance_attr_value_paris = [
("attribute", myclass.attribute),
("property_method", myclass.property_method),
("regular_method", myclass.regular_method),
("static_method", myclass.static_method),
("class_method", MyClass.class_method),
]
print(inspect.getargspec(MyClass.regular_method))
print(inspect.getargspec(MyClass.static_method))
print(inspect.getargspec(MyClass.class_method))
print(inspect.getargspec(myclass.regular_method))
print(inspect.getargspec(myclass.static_method))
print(inspect.getargspec(myclass.class_method))
def create_true_table_dataframe(index_tester, column_attr):
df = pd.DataFrame()
for attr, value in column_attr:
col = list()
for name, tester in index_tester:
try:
if tester(value):
flag = 1
else:
flag = 0
except:
flag = -99
col.append(flag)
df[attr] = col
df.index = [name for name, _ in index_tester]
return df
version = "%s.%s" % (sys.version_info.major, sys.version_info.minor)
df = create_true_table_dataframe(tester_list, class_attr_value_paris)
df.to_csv("%s_class.csv" % version, index=True)
df = create_true_table_dataframe(tester_list, instance_attr_value_paris)
df.to_csv("%s_instance.csv" % version, index=True) | Export value, checker function output true table.
Help to organize thought.
klass.__dict__ 指的是在类定义中定义的, 从父类继承而来的不在此中。 |
4,737 | def keep_recent_datasets(max_dataset_history, info=None):
history = settings.value(, [])
if isinstance(history, str):
history = [history]
if info is not None and info.filename is not None:
new_dataset = info.filename
if new_dataset in history:
lg.debug(new_dataset + )
history.remove(new_dataset)
if len(history) > max_dataset_history:
lg.debug( + history[-1])
history.pop()
lg.debug( + new_dataset + )
history.insert(0, new_dataset)
settings.setValue(, history)
return None
else:
return history | Keep track of the most recent recordings.
Parameters
----------
max_dataset_history : int
maximum number of datasets to remember
info : str, optional TODO
path to file
Returns
-------
list of str
paths to most recent datasets (only if you don't specify
new_dataset) |
4,738 | def set_referencepixel(self, pix):
assert len(pix) == len(self._coord["crpix"])
self._coord["crpix"] = pix[::-1] | Set the reference pixel of the given axis in this coordinate. |
4,739 | def get_default_gateway():
try:
with open("/proc/self/net/route") as routes:
for line in routes:
parts = line.split()
if == parts[1]:
hip = parts[2]
if hip is not None and len(hip) is 8:
return "%i.%i.%i.%i" % (int(hip[6:8], 16), int(hip[4:6], 16), int(hip[2:4], 16), int(hip[0:2], 16))
except:
logger.warn("get_default_gateway: ", exc_info=True) | Attempts to read /proc/self/net/route to determine the default gateway in use.
:return: String - the ip address of the default gateway or None if not found/possible/non-existant |
4,740 | def stats (self, antnames):
nbyant = np.zeros (self.nants, dtype=np.int)
sum = np.zeros (self.nants, dtype=np.complex)
sumsq = np.zeros (self.nants)
q = np.abs (self.normvis - 1)
for i in range (self.nsamps):
i1, i2 = self.blidxs[i]
nbyant[i1] += 1
nbyant[i2] += 1
sum[i1] += q[i]
sum[i2] += q[i]
sumsq[i1] += q[i]**2
sumsq[i2] += q[i]**2
avg = sum / nbyant
std = np.sqrt (sumsq / nbyant - avg**2)
navg = 1. / np.median (avg)
nstd = 1. / np.median (std)
for i in range (self.nants):
print ( %
(i, antnames[i], nbyant[i], avg[i], std[i], avg[i] * navg, std[i] * nstd)) | XXX may be out of date. |
4,741 | def _finish_transaction_with_retry(self, command_name, explict_retry):
try:
return self._finish_transaction(command_name, explict_retry)
except ServerSelectionTimeoutError:
raise
except ConnectionFailure as exc:
try:
return self._finish_transaction(command_name, True)
except ServerSelectionTimeoutError:
raise exc
except OperationFailure as exc:
if exc.code not in _RETRYABLE_ERROR_CODES:
raise
try:
return self._finish_transaction(command_name, True)
except ServerSelectionTimeoutError:
raise exc | Run commit or abort with one retry after any retryable error.
:Parameters:
- `command_name`: Either "commitTransaction" or "abortTransaction".
- `explict_retry`: True when this is an explict commit retry attempt,
ie the application called session.commit_transaction() twice. |
4,742 | def get_label_at_address(self, address, offset = None):
if offset:
address = address + offset
module = self.get_name()
function = None
offset = address - self.get_base()
start = self.get_entry_point()
if start and start <= address:
function = "start"
offset = address - start
try:
symbol = self.get_symbol_at_address(address)
if symbol:
(SymbolName, SymbolAddress, SymbolSize) = symbol
new_offset = address - SymbolAddress
if new_offset <= offset:
function = SymbolName
offset = new_offset
except WindowsError:
pass
return _ModuleContainer.parse_label(module, function, offset) | Creates a label from the given memory address.
If the address belongs to the module, the label is made relative to
it's base address.
@type address: int
@param address: Memory address.
@type offset: None or int
@param offset: (Optional) Offset value.
@rtype: str
@return: Label pointing to the given address. |
4,743 | def computeExpectations(self, A_n, output=, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False, useGeneral = False, state_dependent = False):
dims = len(np.shape(A_n))
N = self.N
K = self.K
if dims == 3:
print("expecting dim=1 or dim=2")
return None
if (useGeneral):
state_list = np.zeros([K,2],int)
if (state_dependent):
for k in range(K):
state_list[k,0] = k
state_list[k,1] = k
A_in = A_n
else:
A_in = np.zeros([1,N], dtype=np.float64)
if dims == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
A_in[0,:] = A_n
for k in range(K):
state_list[k,0] = 0
state_list[k,1] = k
general_results = self.computeGeneralExpectations(A_in, self.u_kn, state_list,
compute_uncertainty=compute_uncertainty,
uncertainty_method=uncertainty_method,
warning_cutoff=warning_cutoff,
return_theta=return_theta)
returns = []
if output == :
returns.append(general_results[0])
if compute_uncertainty:
indices = np.eye(K,dtype=bool)
returns.append(np.sqrt(general_results[1][indices]))
if output == :
A_im = np.matrix(general_results[0])
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
if compute_uncertainty:
return np.sqrt(general_results[1])
if return_theta:
returns.append(general_results[2])
else:
if dims == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
A_n = np.array(A_n, np.float64)
Log_W_nk = np.zeros([N, K * 2], np.float64)
N_k = np.zeros([K * 2], np.int32)
f_k = np.zeros([K], np.float64)
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
A_i = np.zeros([K], np.float64)
A_min = np.min(A_n)
A_n = A_n - (A_min - 1)
for l in range(K):
Log_W_nk[:, K + l] = np.log(A_n) + self.Log_W_nk[:, l]
f_k[l] = -_logsum(Log_W_nk[:, K + l])
Log_W_nk[:, K + l] += f_k[l]
A_i[l] = np.exp(-f_k[l])
if compute_uncertainty or return_theta:
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(Log_W_nk), N_k, method=uncertainty_method)
returns = []
if output == :
if compute_uncertainty:
dA_i = np.zeros([K], np.float64)
for k in range(0, K):
dA_i[k] = np.abs(A_i[k]) * np.sqrt(
Theta_ij[K + k, K + k] + Theta_ij[k, k] - 2.0 * Theta_ij[k, K + k])
A_i += (A_min - 1)
returns.append(np.array(A_i))
if compute_uncertainty:
returns.append(np.array(dA_i))
if output == :
A_im = np.matrix(A_i)
A_ij = A_im - A_im.transpose()
returns.append(np.array(A_ij))
if compute_uncertainty:
dA_ij = np.zeros([K, K], dtype=np.float64)
for i in range(0, K):
for j in range(0, K):
try:
dA_ij[i, j] = np.sqrt(
+ A_i[i] * Theta_ij[i, i] * A_i[i]
- A_i[i] * Theta_ij[i, j] * A_i[j]
- A_i[i] * Theta_ij[i, K + i] * A_i[i]
+ A_i[i] * Theta_ij[i, K + j] * A_i[j]
- A_i[j] * Theta_ij[j, i] * A_i[i]
+ A_i[j] * Theta_ij[j, j] * A_i[j]
+ A_i[j] * Theta_ij[j, K + i] * A_i[i]
- A_i[j] * Theta_ij[j, K + j] * A_i[j]
- A_i[i] * Theta_ij[K + i, i] * A_i[i]
+ A_i[i] * Theta_ij[K + i, j] * A_i[j]
+ A_i[i] * Theta_ij[K + i, K + i] * A_i[i]
- A_i[i] * Theta_ij[K + i, K + j] * A_i[j]
+ A_i[j] * Theta_ij[K + j, i] * A_i[i]
- A_i[j] * Theta_ij[K + j, j] * A_i[j]
- A_i[j] * Theta_ij[K + j, K + i] * A_i[i]
+ A_i[j] * Theta_ij[K + j, K + j] * A_i[j]
)
except:
dA_ij[i, j] = 0.0
returns.append(dA_ij)
if return_theta:
returns.append(Theta_ij)
return returns | Compute the expectation of an observable of a phase space function.
Compute the expectation of an observable of phase space
function A(x) at all states where potentials are generated,
including states for which no samples were drawn.
We assume observables are not function of the state. u is not
an observable -- it changes depending on the state. u_k is an
observable; the energy of state k does not depend on the
state. To compute the estimators of the energy at all K
states, use . . .
Parameters
----------
A_n : np.ndarray, float
A_n (N_max np float64 array) - A_n[n] = A(x_n)
output : string, optional
Either output averages, and uncertainties, or output a matrix of differences, with uncertainties.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method,
or None to use default See help for computeAsymptoticCovarianceMatrix()
for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
useGeneral: bool, whether to use the GeneralExpectations formalism = False,
state_dependent: bool, whether the expectations are state-dependent.
Returns
-------
A : np.ndarray, float
if output is 'averages'
A_i (K np float64 array) - A_i[i] is the estimate for the expectation of A(x) for state i.
if output is 'differences'
dA : np.ndarray, float
dA_i (K np float64 array) - dA_i[i] is uncertainty estimate (one standard deviation) for A_i[i]
or
dA_ij (K np float64 array) - dA_ij[i,j] is uncertainty estimate (one standard deviation) for the difference in A beteen i and j
Notes
-----
The reported statistical uncertainty should, in the asymptotic limit,
reflect one standard deviation for the normal distribution of the estimate.
The true expectation should fall within the interval [-dA, +dA] centered on the estimate 68% of the time, and within
the interval [-2 dA, +2 dA] centered on the estimate 95% of the time.
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit.
This 'breakdown' can be exacerbated by the computation of observables like indicator functions for histograms that are sparsely populated.
References
----------
See Section IV of [1].
Examples
--------
>>> from pymbar import testsystems
>>> (x_n, u_kn, N_k, s_n) = testsystems.HarmonicOscillatorsTestCase().sample(mode='u_kn')
>>> mbar = MBAR(u_kn, N_k)
>>> A_n = x_n
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n)
>>> A_n = u_kn[0,:]
>>> (A_ij, dA_ij) = mbar.computeExpectations(A_n, output='differences') |
4,744 | def _trim_text(text, max_width):
width = get_cwidth(text)
if width > max_width:
if len(text) == width:
trimmed_text = (text[:max(1, max_width-3)] + )[:max_width]
return trimmed_text, len(trimmed_text)
else:
trimmed_text =
for c in text:
if get_cwidth(trimmed_text + c) <= max_width - 3:
trimmed_text += c
trimmed_text +=
return (trimmed_text, get_cwidth(trimmed_text))
else:
return text, width | Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple. |
4,745 | def _close_list(self):
list_type = self.current_parent_element[][]
tag = LIST_TYPES[list_type]
html = .format(
t=tag
)
self.cleaned_html += html
self.current_parent_element[] =
self.current_parent_element[] = {} | Add an close list tag corresponding to the currently open
list found in current_parent_element. |
4,746 | def padding(self, px):
if not isinstance(px, list):
px = [px] * 4
x = max(0, self.x - px[3])
y = max(0, self.y - px[0])
x2 = self.x + self.width + px[1]
y2 = self.y + self.height + px[2]
return Box.from_xy(x, y, x2, y2) | Add padding around four sides of box
:param px: padding value in pixels.
Can be an array in the format of [top right bottom left] or single value.
:return: New padding added box |
4,747 | def prefix_keys(self, prefix, strip_prefix=False):
keys = self.keys(key_from=prefix)
start = 0
if strip_prefix:
start = len(prefix)
for key in keys:
if not key.startswith(prefix):
break
yield key[start:] | Get all keys that begin with ``prefix``.
:param prefix: Lexical prefix for keys to search.
:type prefix: bytes
:param strip_prefix: True to strip the prefix from yielded items.
:type strip_prefix: bool
:yields: All keys in the store that begin with ``prefix``. |
4,748 | def _validate_data(self):
msg = "Error! Expected {} timestamps, found {}.".format(
len(self._data_points), len(self._timestamps))
if len(self._data_points) != len(self._timestamps):
raise MonsoonError(msg) | Verifies that the data points contained in the class are valid. |
4,749 | def list_buckets(self):
method =
url = get_target_url(self._endpoint_url)
headers = {: self._user_agent}
region =
if self._region:
region = self._region
headers = sign_v4(method, url, region,
headers, self._access_key,
self._secret_key,
self._session_token,
None)
response = self._http.urlopen(method, url,
body=None,
headers=headers)
if self._trace_output_stream:
dump_http(method, url, headers, response,
self._trace_output_stream)
if response.status != 200:
raise ResponseError(response, method).get_exception()
try:
return parse_list_buckets(response.data)
except InvalidXMLError:
if self._endpoint_url.endswith("s3.amazonaws.com") and (not self._access_key or not self._secret_key):
raise AccessDenied(response) | List all buckets owned by the user.
Example:
bucket_list = minio.list_buckets()
for bucket in bucket_list:
print(bucket.name, bucket.created_date)
:return: An iterator of buckets owned by the current user. |
4,750 | def transform_26_27(inst, new_inst, i, n, offset,
instructions, new_asm):
if inst.opname in (, ):
i += 1
assert i < n
assert instructions[i].opname ==
new_inst.offset = offset
new_inst.opname = (
if inst.opname == else
)
new_asm.backpatch[-1].remove(inst)
new_inst.arg = % (inst.offset + inst.arg + 3)
new_asm.backpatch[-1].add(new_inst)
else:
xlate26_27(new_inst)
return xdis.op_size(new_inst.opcode, opcode_27) | Change JUMP_IF_FALSE and JUMP_IF_TRUE to
POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE |
4,751 | def needs_confirmation(self):
if EMAIL_CONFIRMATION:
self.is_active = False
self.save()
return True
else:
return False | set is_active to False if needs email confirmation |
4,752 | def init(self, back=None):
back = self.backends(back)
for fsb in back:
fstr = .format(fsb)
if fstr in self.servers:
self.servers[fstr]() | Initialize the backend, only do so if the fs supports an init function |
4,753 | def symbolic_ref(cwd,
ref,
value=None,
opts=,
git_opts=,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
FOOfooFOO--delete
cwd = _expand_path(cwd, user)
command = [] + _format_git_opts(git_opts)
command.append()
opts = _format_opts(opts)
if value is not None and any(x in opts for x in (, )):
raise SaltInvocationError(
)
command.extend(opts)
command.append(ref)
if value:
command.extend(value)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)[] | .. versionadded:: 2015.8.0
Interface to `git-symbolic-ref(1)`_
cwd
The path to the git checkout
ref
Symbolic ref to read/modify
value
If passed, then the symbolic ref will be set to this value and an empty
string will be returned.
If not passed, then the ref to which ``ref`` points will be returned,
unless ``--delete`` is included in ``opts`` (in which case the symbolic
ref will be deleted).
opts
Any additional options to add to the command line, in a single string
git_opts
Any additional options to add to git command itself (not the
``symbolic-refs`` subcommand), in a single string. This is useful for
passing ``-c`` to run git with temporary changes to the git
configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-symbolic-ref(1)`: http://git-scm.com/docs/git-symbolic-ref
CLI Examples:
.. code-block:: bash
# Get ref to which HEAD is pointing
salt myminion git.symbolic_ref /path/to/repo HEAD
# Set/overwrite symbolic ref 'FOO' to local branch 'foo'
salt myminion git.symbolic_ref /path/to/repo FOO refs/heads/foo
# Delete symbolic ref 'FOO'
salt myminion git.symbolic_ref /path/to/repo FOO opts='--delete' |
4,754 | def setData(self, index, value, role=QtCore.Qt.UserRole):
self._stim.overwriteComponent(value, index.row(), index.column())
self.samplerateChanged.emit(self.samplerate()) | Sets the component at *index* to *value* |
4,755 | def is_venv(directory, executable=):
path=os.path.join(directory, , executable)
return os.path.isfile(path) | :param directory: base directory of python environment |
4,756 | def _plotting(self, rank_metric, results, graph_num, outdir,
format, figsize, pheno_pos=, pheno_neg=):
if self._outdir is None: return
top_term = self.res2d.index[:graph_num]
pool = Pool(self._processes)
for gs in top_term:
hit = results.get(gs)[]
NES = if self.module != else
term = gs.replace(,).replace(":","_")
outfile = .format(self.outdir, term, self.module, self.format)
pool.apply_async(gseaplot, args=(rank_metric, term, hit, results.get(gs)[NES],
results.get(gs)[],results.get(gs)[],
results.get(gs)[],
pheno_pos, pheno_neg,
figsize, , outfile))
if self.module == :
outfile2 = "{0}/{1}.heatmap.{2}".format(self.outdir, term, self.format)
pool.apply_async(heatmap, args=(self.heatmat.iloc[hit, :], 0, term,
(self._width, len(hit)/2+2), ,
True, True, outfile2))
pool.close()
pool.join() | Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table |
4,757 | def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None, summary=, prefer_list=False,
default_slice=None, default_dims={},
show_examples=True,
example_call="filename, name=[], ...",
plugin=None):
full_name = % (module, plotter_name)
if plotter_cls is not None:
docstrings.params[ % (full_name)] = \
plotter_cls.show_keys(
indent=4, func=str,
include_links=None)
doc_str = (
) % full_name
else:
doc_str =
summary = summary or (
% (
module, plotter_name))
if plotter_cls is not None:
_versions.update(get_versions(key=lambda s: s == plugin))
class PlotMethod(cls._plot_method_base_cls):
__doc__ = cls._gen_doc(summary, full_name, identifier,
example_call, doc_str, show_examples)
_default_slice = default_slice
_default_dims = default_dims
_plotter_cls = plotter_cls
_prefer_list = prefer_list
_plugin = plugin
_summary = summary
setattr(cls, identifier, PlotMethod(identifier, module, plotter_name)) | Register a plotter for making plots
This class method registeres a plot function for the :class:`Project`
class under the name of the given `identifier`
Parameters
----------
%(Project._register_plotter.parameters)s
Other Parameters
----------------
prefer_list: bool
Determines the `prefer_list` parameter in the `from_dataset`
method. If True, the plotter is expected to work with instances of
:class:`psyplot.InteractiveList` instead of
:class:`psyplot.InteractiveArray`.
%(ArrayList.from_dataset.parameters.default_slice)s
default_dims: dict
Default dimensions that shall be used for plotting (e.g.
{'x': slice(None), 'y': slice(None)} for longitude-latitude plots)
show_examples: bool, optional
If True, examples how to access the plotter documentation are
included in class documentation
example_call: str, optional
The arguments and keyword arguments that shall be included in the
example of the generated plot method. This call will then appear as
``>>> psy.plot.%%(identifier)s(%%(example_call)s)`` in the
documentation
plugin: str
The name of the plugin |
4,758 | def authenticate_user(self, response, **kwargs):
host = urlparse(response.url).hostname
try:
auth_header = self.generate_request_header(response, host)
except KerberosExchangeError:
return response
log.debug("authenticate_user(): Authorization header: {0}".format(
auth_header))
response.request.headers[] = auth_header
response.content
response.raw.release_conn()
_r = response.connection.send(response.request, **kwargs)
_r.history.append(response)
log.debug("authenticate_user(): returning {0}".format(_r))
return _r | Handles user authentication with gssapi/kerberos |
4,759 | def partition_items(count, bin_size):
num_bins = int(math.ceil(count / float(bin_size)))
bins = [0] * num_bins
for i in range(count):
bins[i % num_bins] += 1
return bins | Given the total number of items, determine the number of items that
can be added to each bin with a limit on the bin size.
So if you want to partition 11 items into groups of 3, you'll want
three of three and one of two.
>>> partition_items(11, 3)
[3, 3, 3, 2]
But if you only have ten items, you'll have two groups of three and
two of two.
>>> partition_items(10, 3)
[3, 3, 2, 2] |
4,760 | def coef(self):
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])} | Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly. |
4,761 | def unified(old, new):
for diff in difflib.ndiff(old.splitlines(), new.splitlines()):
if diff[0] == " ":
yield diff
elif diff[0] == "?":
continue
else:
yield termcolor.colored(diff, "red" if diff[0] == "-" else "green", attrs=["bold"]) | Returns a generator yielding a unified diff between `old` and `new`. |
4,762 | def brozzler_new_job(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
prog=os.path.basename(argv[0]),
description=,
formatter_class=BetterArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
, metavar=,
help=)
add_rethinkdb_options(arg_parser)
add_common_options(arg_parser, argv)
args = arg_parser.parse_args(args=argv[1:])
configure_logging(args)
rr = rethinker(args)
frontier = brozzler.RethinkDbFrontier(rr)
try:
brozzler.new_job_file(frontier, args.job_conf_file)
except brozzler.InvalidJobConf as e:
print(, args.job_conf_file, file=sys.stderr)
print( + yaml.dump(e.errors).rstrip().replace(, ), file=sys.stderr)
sys.exit(1) | Command line utility entry point for queuing a new brozzler job. Takes a
yaml brozzler job configuration file, creates job, sites, and pages objects
in rethinkdb, which brozzler-workers will look at and start crawling. |
4,763 | def create_temp_file(self, **mkstemp_kwargs) -> Tuple[int, str]:
kwargs = {**self.default_mkstemp_kwargs, **mkstemp_kwargs}
handle, location = tempfile.mkstemp(**kwargs)
self._temp_files.add(location)
return handle, location | Creates a temp file.
:param mkstemp_kwargs: named arguments to be passed to `tempfile.mkstemp`
:return: tuple where the first element is the file handle and the second is the location of the temp file |
4,764 | def pointlist(points, sr):
assert all(isinstance(pt, Point) or len(pt) == 2
for pt in points), "Point(s) not in [x, y] form"
return [coord if isinstance(coord, Point)
else Point(coord[0], coord[1], sr)
for coord in points] | Convert a list of the form [[x, y] ...] to a list of Point instances
with the given x, y coordinates. |
4,765 | def iter_contributors(self, anon=False, number=-1, etag=None):
url = self._build_url(, base_url=self._api)
params = {}
if anon:
params = {: True}
return self._iter(int(number), url, User, params, etag) | Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`User <github3.users.User>`\ s |
4,766 | def add(self, child):
if isinstance(child, Case):
self.add_case(child)
else:
raise ModelError() | Adds a typed child object to the conditional derived variable.
@param child: Child object to be added. |
4,767 | def time_delay_from_earth_center(self, right_ascension, declination, t_gps):
return self.time_delay_from_location(np.array([0, 0, 0]),
right_ascension,
declination,
t_gps) | Return the time delay from the earth center |
4,768 | def evals_get(self, service_staff_id, start_date, end_date, session):
request = TOPRequest()
request[] = service_staff_id
request[] = start_date
request[] = end_date
self.create(self.execute(request, session))
return self.staff_eval_details | taobao.wangwang.eservice.evals.get 获取评价详细
根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价 |
4,769 | def _should_fuzz_node(self, fuzz_node, stage):
if stage == ClientFuzzer.STAGE_ANY:
return True
if fuzz_node.name.lower() == stage.lower():
if self._index_in_path == len(self._fuzz_path) - 1:
return True
else:
return False | The matching stage is either the name of the last node, or ClientFuzzer.STAGE_ANY.
:return: True if we are in the correct model node |
4,770 | def get_state_machine(self):
if self.parent:
if self.is_root_state:
return self.parent
else:
return self.parent.get_state_machine()
return None | Get a reference of the state_machine the state belongs to
:rtype rafcon.core.state_machine.StateMachine
:return: respective state machine |
4,771 | def has_isotropic_cells(self):
return self.is_uniform and np.allclose(self.cell_sides[:-1],
self.cell_sides[1:]) | ``True`` if `grid` is uniform and `cell_sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False |
4,772 | def search_google(self, query, *, max_results=100, **kwargs):
response = self._call(
mc_calls.Query,
query,
max_results=max_results,
**kwargs
)
clusters = response.body.get(, [])
results = defaultdict(list)
for cluster in clusters:
result_type = QueryResultType(cluster[][]).name
entries = cluster.get(, [])
if len(entries) > 0:
for entry in entries:
item_key = next(
key
for key in entry
if key not in [, , ]
)
results[f"{result_type}s"].append(entry[item_key])
return dict(results) | Search Google Music for content.
Parameters:
query (str): Search text.
max_results (int, Optional): Maximum number of results per type to retrieve.
Google only accepts values up to 100.
Default: ``100``
kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``,
``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``,
``videos`` set to ``True`` will include that result type in the
returned dict.
Setting none of them will include all result types in the returned dict.
Returns:
dict: A dict of results separated into keys: ``'albums'``, ``'artists'``,
``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``,
``'songs'``, ``'stations'``, ``'videos'``.
Note:
Free account search is restricted so may not contain hits for all result types. |
4,773 | def _approx_eq_(self, other: Any, atol: float) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
if isinstance(other.value, sympy.Mod):
return self.value == other.value
if self.period != other.period:
return False
low = min(self.value, other.value)
high = max(self.value, other.value)
if high - low > self.period / 2:
low += self.period
return cirq.protocols.approx_eq(low, high, atol=atol) | Implementation of `SupportsApproximateEquality` protocol. |
4,774 | def sort(self):
self._matches.sort_values(
by=[constants.SIZE_FIELDNAME, constants.NGRAM_FIELDNAME,
constants.COUNT_FIELDNAME, constants.LABEL_FIELDNAME,
constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME],
ascending=[False, True, False, True, True, True], inplace=True) | Sorts all results rows.
Sorts by: size (descending), n-gram, count (descending), label,
text name, siglum. |
4,775 | def up(path, service_names=None):
[janus]
debug_ret = {}
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
result = _get_convergence_plans(project, service_names)
ret = project.up(service_names)
if debug:
for container in ret:
if service_names is None or container.get()[1:] in service_names:
container.inspect_if_not_inspected()
debug_ret[container.get()] = container.inspect()
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, , result, debug_ret) | Create and start containers defined in the docker-compose.yml file
located in path, service_names is a python list, if omitted create and
start all containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will create and start only the specified services
CLI Example:
.. code-block:: bash
salt myminion dockercompose.up /path/where/docker-compose/stored
salt myminion dockercompose.up /path/where/docker-compose/stored '[janus]' |
4,776 | def _run_callback(self, callback):
try:
ret = callback()
if ret is not None:
from . import gen
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback) | Runs a callback with error handling.
For use in subclasses. |
4,777 | def extract_single_dist_for_current_platform(self, reqs, dist_key):
distributions = self._resolve_distributions_by_platform(reqs, platforms=[])
try:
matched_dist = assert_single_element(list(
dist
for _, dists in distributions.items()
for dist in dists
if dist.key == dist_key
))
except (StopIteration, ValueError) as e:
raise self.SingleDistExtractionError(
"Exactly one dist was expected to match name {} in requirements {}: {}"
.format(dist_key, reqs, e))
return matched_dist | Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`. |
4,778 | def get_notes():
notes = {:{}, :{}}
notes[][] = 26.370
notes[][] = 31.929
notes[] = 0.500
notes[] = 437.000
notes[] = 35
notes[] = 19*2*12
notes[] = 12.000
notes[] = 26.000
notes[][] = 0.588
notes[][] = 0.612
notes[kk] = notes[kk]*1.e-3
return notes | By convention : D is a length of the element, d is a gap |
4,779 | def display_completions_like_readline(event):
b = event.current_buffer
if b.completer is None:
return
complete_event = CompleteEvent(completion_requested=True)
completions = list(b.completer.get_completions(b.document, complete_event))
common_suffix = get_common_complete_suffix(b.document, completions)
if len(completions) == 1:
b.delete_before_cursor(-completions[0].start_position)
b.insert_text(completions[0].text)
elif common_suffix:
b.insert_text(common_suffix)
elif completions:
_display_completions_like_readline(event.cli, completions) | Key binding handler for readline-style tab completion.
This is meant to be as similar as possible to the way how readline displays
completions.
Generate the completions immediately (blocking) and display them above the
prompt in columns.
Usage::
# Call this handler when 'Tab' has been pressed.
registry.add_binding(Keys.ControlI)(display_completions_like_readline) |
4,780 | def wind_shear(shear: str, unit_alt: str = , unit_wind: str = , spoken: bool = False) -> str:
if not shear or not in shear or not in shear:
return
shear = shear[2:].rstrip(unit_wind.upper()).split()
wdir = core.spoken_number(shear[1][:3]) if spoken else shear[1][:3]
return f | Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt |
4,781 | def most_likely_alpha(data, xmin, alpharange=(1.5,3.5), n_alpha=201):
alpha_vector = np.linspace(alpharange[0],alpharange[1],n_alpha)
return alpha_vector[discrete_max_likelihood_arg(data, xmin,
alpharange=alpharange,
n_alpha=n_alpha)] | Return the most likely alpha for the data given an xmin |
4,782 | def output_solution(self, fd, z, z_est, error_sqrsum):
col_width = 11
sep = ("=" * col_width + " ") * 4 + "\n"
fd.write("State Estimation\n")
fd.write("-" * 16 + "\n")
fd.write(sep)
fd.write("Type".center(col_width) + " ")
fd.write("Name".center(col_width) + " ")
fd.write("Measurement".center(col_width) + " ")
fd.write("Estimation".center(col_width) + " ")
fd.write("\n")
fd.write(sep)
c = 0
for t in [PF, PT, QF, QT, PG, QG, VM, VA]:
for meas in self.measurements:
if meas.type == t:
n = meas.b_or_l.name[:col_width].ljust(col_width)
fd.write(t.ljust(col_width) + " ")
fd.write(n + " ")
fd.write("%11.5f " % z[c])
fd.write("%11.5f\n" % z_est[c])
c += 1
fd.write("\nWeighted sum of error squares = %.4f\n" % error_sqrsum) | Prints comparison of measurements and their estimations. |
4,783 | def get_conversion_factor(self, new_unit):
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor | Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit. |
4,784 | def _create_flat_pointers(dct, key_stack=()):
for k in dct.keys():
current_key = key_stack + (k,)
if isinstance(dct[k], BaseMapping):
for flat_ptr in _create_flat_pointers(dct[k], current_key):
yield flat_ptr
else:
yield (current_key, dct, k) | Create a flattened dictionary of "key stacks" -> (value container, key) |
4,785 | async def set(
self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _cas_token=None, _conn=None
):
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
ns_key = self.build_key(key, namespace=namespace)
res = await self._set(
ns_key, dumps(value), ttl=self._get_ttl(ttl), _cas_token=_cas_token, _conn=_conn
)
logger.debug("SET %s %d (%.4f)s", ns_key, True, time.monotonic() - start)
return res | Stores the value in the given key with ttl if specified
:param key: str
:param value: obj
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True if the value was set
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout |
4,786 | def refresh(self):
_logger().log(5, )
self.resize()
self._update(self.editor.contentsRect(), 0,
force_update_margins=True) | Refreshes the editor panels (resize and update margins) |
4,787 | def protein_statistics(self):
d = {}
d[] = self.id
d[] = [x.id for x in self.sequences]
d[] = self.num_sequences
if self.representative_sequence:
d[] = self.representative_sequence.id
d[] = self.representative_sequence.gene_name
d[] = self.representative_sequence.uniprot
d[] = self.representative_sequence.description
d[] = self.num_structures
d[] = [x.id for x in self.get_experimental_structures()]
d[] = self.num_structures_experimental
d[] = [x.id for x in self.get_homology_models()]
d[] = self.num_structures_homology
if self.representative_structure:
d[] = self.representative_structure.id
d[] = self.representative_chain
d[] = self.representative_chain_seq_coverage
d[] = self.description
if self.representative_structure.is_experimental:
d[] = self.representative_structure.resolution
d[] = len(self.sequence_alignments)
d[] = len(self.structure_alignments)
return d | Get a dictionary of basic statistics describing this protein |
4,788 | def buildElement(element, items, itemName):
def assertNonnegative(i,name):
if i < 0:
raise RuntimeError("Negative value %s reported for %s" %(i,name) )
else:
return float(i)
itemTimes = []
itemClocks = []
itemMemory = []
for item in items:
itemTimes.append(assertNonnegative(float(item["time"]), "time"))
itemClocks.append(assertNonnegative(float(item["clock"]), "clock"))
itemMemory.append(assertNonnegative(float(item["memory"]), "memory"))
assert len(itemClocks) == len(itemTimes) == len(itemMemory)
itemWaits=[]
for index in range(0,len(itemTimes)):
itemWaits.append(itemTimes[index] - itemClocks[index])
itemWaits.sort()
itemTimes.sort()
itemClocks.sort()
itemMemory.sort()
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
element[itemName]=Expando(
total_number=float(len(items)),
total_time=float(sum(itemTimes)),
median_time=float(itemTimes[old_div(len(itemTimes),2)]),
average_time=float(old_div(sum(itemTimes),len(itemTimes))),
min_time=float(min(itemTimes)),
max_time=float(max(itemTimes)),
total_clock=float(sum(itemClocks)),
median_clock=float(itemClocks[old_div(len(itemClocks),2)]),
average_clock=float(old_div(sum(itemClocks),len(itemClocks))),
min_clock=float(min(itemClocks)),
max_clock=float(max(itemClocks)),
total_wait=float(sum(itemWaits)),
median_wait=float(itemWaits[old_div(len(itemWaits),2)]),
average_wait=float(old_div(sum(itemWaits),len(itemWaits))),
min_wait=float(min(itemWaits)),
max_wait=float(max(itemWaits)),
total_memory=float(sum(itemMemory)),
median_memory=float(itemMemory[old_div(len(itemMemory),2)]),
average_memory=float(old_div(sum(itemMemory),len(itemMemory))),
min_memory=float(min(itemMemory)),
max_memory=float(max(itemMemory)),
name=itemName
)
return element[itemName] | Create an element for output. |
4,789 | def _import_all_troposphere_modules(self):
dirname = os.path.join(os.path.dirname(__file__))
module_names = [
pkg_name
for importer, pkg_name, is_pkg in
pkgutil.walk_packages([dirname], prefix="troposphere.")
if not is_pkg and pkg_name not in self.EXCLUDE_MODULES]
module_names.append()
modules = []
for name in module_names:
modules.append(importlib.import_module(name))
def members_predicate(m):
return inspect.isclass(m) and not inspect.isbuiltin(m)
members = []
for module in modules:
members.extend((m[1] for m in inspect.getmembers(
module, members_predicate)))
return set(members) | Imports all troposphere modules and returns them |
4,790 | def getFeatureID(self, location):
if not self.contains(location):
return self.EMPTY_FEATURE
return self.SPHERICAL_SURFACE | Returns the feature index associated with the provided location.
In the case of a sphere, it is always the same if the location is valid. |
4,791 | def add_unit(unit,**kwargs):
new_unit = Unit()
new_unit.dimension_id = unit["dimension_id"]
new_unit.name = unit[]
new_unit.abbreviation = unit[]
new_unit.description = unit[]
new_unit.lf = unit[]
new_unit.cf = unit[]
if ( in unit) and (unit[] is not None):
new_unit.project_id = unit[]
db.DBSession.add(new_unit)
db.DBSession.flush()
return JSONObject(new_unit) | Add the unit defined into the object "unit" to the DB
If unit["project_id"] is None it means that the unit is global, otherwise is property of a project
If the unit exists emits an exception
A minimal example:
.. code-block:: python
new_unit = dict(
name = 'Teaspoons per second',
abbreviation = 'tsp s^-1',
cf = 0, # Constant conversion factor
lf = 1.47867648e-05, # Linear conversion factor
dimension_id = 2,
description = 'A flow of one teaspoon per second.',
)
add_unit(new_unit) |
4,792 | def calculate_dates(self, dt):
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end | Given a dt, find that day's close and period start (close - offset). |
4,793 | def get_zone_info(self, controller, zone, return_variable):
_LOGGER.debug("Begin - controller= %s, zone= %s, get status", controller, zone)
resp_msg_signature = self.create_response_signature("04 02 00 @zz 07", zone)
send_msg = self.create_send_message("F0 @cc 00 7F 00 00 @kk 01 04 02 00 @zz 07 00 00", controller, zone)
try:
self.lock.acquire()
_LOGGER.debug(, zone)
self.send_data(send_msg)
_LOGGER.debug("Zone: %s Sent: %s", zone, send_msg)
matching_message = self.get_response_message(resp_msg_signature)
if matching_message is not None:
_LOGGER.debug("matching message to use= %s", matching_message)
_LOGGER.debug("matching message length= %s", len(matching_message))
if return_variable == 4:
return_value = [matching_message[11], matching_message[12], matching_message[13]]
else:
return_value = matching_message[return_variable + 11]
else:
return_value = None
_LOGGER.warning("Did not receive expected Russound power state for controller %s and zone %s.", controller, zone)
finally:
self.lock.release()
_LOGGER.debug("Released lock for zone %s", zone)
_LOGGER.debug("End - controller= %s, zone= %s, get status \n", controller, zone)
return return_value | Get all relevant info for the zone
When called with return_variable == 4, then the function returns a list with current
volume, source and ON/OFF status.
When called with 0, 1 or 2, it will return an integer with the Power, Source and Volume |
4,794 | def expand(self, percentage):
ex_h = math.ceil(self.height * percentage / 100)
ex_w = math.ceil(self.width * percentage / 100)
x = max(0, self.x - ex_w)
y = max(0, self.y - ex_h)
x2 = self.x + self.width + ex_w
y2 = self.y + self.height + ex_h
return Box.from_xy(x, y, x2, y2) | Expands the box co-ordinates by given percentage on four sides. Ignores negative values.
:param percentage: Percentage to expand
:return: New expanded Box |
4,795 | def Offset(self, vtableOffset):
vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos)
vtableEnd = self.Get(N.VOffsetTFlags, vtable)
if vtableOffset < vtableEnd:
return self.Get(N.VOffsetTFlags, vtable + vtableOffset)
return 0 | Offset provides access into the Table's vtable.
Deprecated fields are ignored by checking the vtable's length. |
4,796 | def shorter_name(key):
key_short = key
for sep in [, ]:
ind = key_short.rfind(sep)
if ind is not None:
key_short = key_short[ind+1:]
else:
key_short = key_short
return key_short.replace(, ).replace(, ) | Return a shorter name for an id.
Does this by only taking the last part of the URI,
after the last / and the last #. Also replaces - and . with _.
Parameters
----------
key: str
Some URI
Returns
-------
key_short: str
A shortened, but more ambiguous, identifier |
4,797 | def find(self, vid=None, pid=None, serial=None, interface=None, \
path=None, release_number=None, manufacturer=None,
product=None, usage=None, usage_page=None):
result = []
for dev in self.device_list:
if vid not in [0, None] and dev.vendor_id != vid:
continue
if pid not in [0, None] and dev.product_id != pid:
continue
if serial and dev.serial_number != serial:
continue
if path and dev.path != path:
continue
if manufacturer and dev.manufacturer_string != manufacturer:
continue
if product and dev.product_string != product:
continue
if release_number != None and dev.release_number != release_number:
continue
if interface != None and dev.interface_number != interface:
continue
if usage != None and dev.usage != usage:
continue
if usage_page != None and dev.usage_page != usage_page:
continue
result.append(dev)
return result | Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path. |
4,798 | def derivative(self, point):
r
point = self.domain.element(point)
norm = point.norm()
if norm == 0:
raise ValueError()
return InnerProductOperator(point / norm) | r"""Derivative of this operator in ``point``.
``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
point : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point.norm() == 0``, in which case the derivative is not well
defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D \|\cdot\|)(y)(x) = \langle y / \|y\|, x \rangle
Examples
--------
>>> r3 = odl.rn(3)
>>> op = NormOperator(r3)
>>> derivative = op.derivative([1, 0, 0])
>>> derivative([1, 0, 0])
1.0 |
4,799 | def get_next_rngruns(self):
available_runs = [result[][] for result in
self.get_results()]
yield from DatabaseManager.get_next_values(available_runs) | Yield the next RngRun values that can be used in this campaign. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.