language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java
|
public String remove(String content, String selector) {
Element body = parseContent(content);
List<Element> elements = body.select(selector);
if (elements.size() > 0) {
for (Element element : elements) {
element.remove();
}
return body.html();
} else {
// nothing changed
return content;
}
}
|
python
|
def get_vlan_brief_output_last_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
last_vlan_id = ET.SubElement(output, "last-vlan-id")
last_vlan_id.text = kwargs.pop('last_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
python
|
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
|
python
|
def _CheckAttribute(self, attribute, value):
"""Check that the value is of the expected type.
Args:
attribute: An instance of Attribute().
value: An instance of RDFValue.
Raises:
ValueError: when the value is not of the expected type.
AttributeError: When the attribute is not of type Attribute().
"""
if not isinstance(attribute, Attribute):
raise AttributeError("Attribute %s must be of type aff4.Attribute()" %
attribute)
if not isinstance(value, attribute.attribute_type):
raise ValueError("Value for attribute %s must be of type %s()" %
(attribute, attribute.attribute_type.__name__))
|
python
|
def load_py_config(conf_file):
# type: (str) -> None
""" Import configuration from a python file.
This will just import the file into python. Sky is the limit. The file
has to deal with the configuration all by itself (i.e. call conf.init()).
You will also need to add your src directory to sys.paths if it's not the
current working directory. This is done automatically if you use yaml
config as well.
Args:
conf_file (str):
Path to the py module config. This function will not check the file
name or extension and will just crash if the given file does not
exist or is not a valid python file.
"""
if sys.version_info >= (3, 5):
from importlib import util
spec = util.spec_from_file_location('pelconf', conf_file)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
elif sys.version_info >= (3, 3):
from importlib import machinery
loader = machinery.SourceFileLoader('pelconf', conf_file)
_ = loader.load_module()
elif sys.version_info <= (3, 0):
import imp
imp.load_source('pelconf', conf_file)
|
python
|
def correlation_matrix(nums_with_uncert):
"""
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
data = np.vstack([ufunc._mcpts for ufunc in ufuncs])
return np.corrcoef(data.T, rowvar=0)
|
java
|
static public void main(String ... args) throws Exception {
FileInputStream fin = new FileInputStream(args[0]);
XMLReader<Sample> reader = new XMLReader<Sample>();
for( Map<String,Object> map : reader.read(fin, Sample.class) ) {
System.out.println("Next...");
for( Map.Entry<String,Object> entry : map.entrySet() ) {
System.out.println("\t" + entry.getKey() + " (" + (entry.getValue() != null ? entry.getValue().getClass().getName() : "") + "): " + entry.getValue());
}
}
}
|
python
|
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
|
python
|
def pfx_path(path):
""" Prefix a path with the OS path separator if it is not already """
if path[0] != os.path.sep: return os.path.sep + path
else: return path
|
python
|
def item(ctx, appid, title):
"""Market-related commands."""
ctx.obj['appid'] = appid
ctx.obj['title'] = title
|
python
|
def customize_ruleset(self, custom_ruleset_file=None):
"""
Updates the ruleset to include a set of custom rules. These rules will
be _added_ to the existing ruleset or replace the existing rule with
the same ID.
Args:
custom_ruleset_file (optional): The filepath to the custom rules.
Defaults to `None`. If `custom_ruleset_file` isn't passed, the
environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be
checked. If a filepath isn't specified by either of these
methods, the ruleset will not be updated.
Raises:
`IOError` if the specified file does not exist.
Examples:
To include the rules defined in `axe-core-custom-rules.js`::
page.a11y_audit.config.customize_ruleset(
"axe-core-custom-rules.js"
)
Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`
to specify the path to the file containing the custom rules.
Documentation for how to write rules:
https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md
An example of a custom rules file can be found at
https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
"""
custom_file = custom_ruleset_file or os.environ.get(
"BOKCHOY_A11Y_CUSTOM_RULES_FILE"
)
if not custom_file:
return
with open(custom_file, "r") as additional_rules:
custom_rules = additional_rules.read()
if "var customRules" not in custom_rules:
raise A11yAuditConfigError(
"Custom rules file must include \"var customRules\""
)
self.custom_rules = custom_rules
|
python
|
def get_about(self, element_to_query=None):
"""
Returns ElementTree containing the result of
<host>/System/deviceInfo
or if element_to_query is not None, the value of that element
"""
url = '%s/System/deviceInfo' % self._base
_LOGGING.info('url: %s', url)
response = requests.get(
url, auth=HTTPBasicAuth(self._username, self._password))
_LOGGING.debug('response: %s', response)
_LOGGING.debug("status_code %s", response.status_code)
if response.status_code != 200:
log_response_errors(response)
return None
if element_to_query is None:
return response.text
else:
try:
tree = ElementTree.fromstring(response.text)
element_to_query = './/{%s}%s' % (
self._xml_namespace, element_to_query)
result = tree.findall(element_to_query)
if len(result) > 0:
_LOGGING.debug('element_to_query: %s result: %s',
element_to_query, result[0])
return result[0].text.strip()
else:
_LOGGING.error(
'There was a problem finding element: %s',
element_to_query)
_LOGGING.error('Entire response: %s', response.text)
except AttributeError as attib_err:
_LOGGING.error('Entire response: %s', response.text)
_LOGGING.error(
'There was a problem finding element:'
' %s AttributeError: %s', element_to_query, attib_err)
return
return
|
java
|
@SuppressWarnings("unused")
@Internal
@UsedByGeneratedCode
protected final void addProperty(@Nonnull BeanProperty<T, Object> property) {
ArgumentUtils.requireNonNull("property", property);
beanProperties.put(property.getName(), property);
}
|
java
|
public List<Element> findElements(String tag) {
List<Element> result = new ArrayList<>();
LinkedList<Element> stack = new LinkedList<>(this.getChildren());
if (tag.equalsIgnoreCase(this.getMessageMLTag())) {
result.add(this);
}
while (!stack.isEmpty()) {
Element child = stack.pop();
stack.addAll(0, child.getChildren());
if (tag.equalsIgnoreCase(child.getMessageMLTag())) {
result.add(child);
}
}
return result;
}
|
java
|
public List<StmtEphemeralTableScan> getEphemeralTableScans() {
List<StmtEphemeralTableScan> scans = new ArrayList<>();
if (m_joinTree != null) {
m_joinTree.extractEphemeralTableQueries(scans);
}
return scans;
}
|
java
|
public void setPermissions(String userNameorApikey, EnumSet<Permissions> permissions) {
assertNotEmpty(userNameorApikey, "userNameorApikey");
assertNotEmpty(permissions, "permissions");
final JsonArray jsonPermissions = new JsonArray();
for (Permissions s : permissions) {
final JsonPrimitive permission = new JsonPrimitive(s.toString());
jsonPermissions.add(permission);
}
// get existing permissions
JsonObject perms = getPermissionsObject();
// now set back
JsonElement elem = perms.get("cloudant");
if (elem == null) {
perms.addProperty("_id", "_security");
elem = new JsonObject();
perms.add("cloudant", elem);
}
elem.getAsJsonObject().add(userNameorApikey, jsonPermissions);
HttpConnection put = Http.PUT(apiV2DBSecurityURI, "application/json");
put.setRequestBody(client.getGson().toJson(perms));
// CouchDbExceptions will be thrown for non-2XX cases
client.couchDbClient.executeToResponse(put);
}
|
python
|
def buildURL(self, action, **query):
"""Build a URL relative to the server base_url, with the given
query parameters added."""
base = urllib.parse.urljoin(self.server.base_url, action)
return appendArgs(base, query)
|
java
|
public static java.sql.Time rollTime(java.util.Date startDate, int period, int amount) {
GregorianCalendar gc = new GregorianCalendar();
gc.setTime(startDate);
gc.add(period, amount);
return new java.sql.Time(gc.getTime().getTime());
}
|
python
|
def list_to_csv_response(data, title='report', header=None, widths=None):
""" Make 2D list into a csv response for download data.
"""
response = HttpResponse(content_type="text/csv; charset=UTF-8")
cw = csv.writer(response)
for row in chain([header] if header else [], data):
cw.writerow([force_text(s).encode(response.charset) for s in row])
return response
|
java
|
public static String asTokenSeparatedValues(String token, Collection<String> strings) {
StringBuilder newString = new StringBuilder();
boolean first = true;
for(String str : strings) {
if(! first) {
newString.append(token);
}
first = false;
newString.append(str);
}
return newString.toString();
}
|
python
|
def add_func(self, func, *arg, **kwargs):
"""QADATASTRUCT的指标/函数apply入口
Arguments:
func {[type]} -- [description]
Returns:
[type] -- [description]
"""
return self.groupby(level=1, sort=False).apply(func, *arg, **kwargs)
|
python
|
def clamp(n, lower, upper):
"""
Restricts the given number to a lower and upper bound (inclusive)
:param n: input number
:param lower: lower bound (inclusive)
:param upper: upper bound (inclusive)
:return: clamped number
"""
if lower > upper:
lower, upper = upper, lower
return max(min(upper, n), lower)
|
python
|
def delete(self, event):
"""Abort running task if it exists."""
super(CeleryReceiver, self).delete(event)
AsyncResult(event.id).revoke(terminate=True)
|
python
|
def verify(self, signed):
'''
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to decrypt message')
return buf[0:size]
|
python
|
def _iter_loci(meta, clusters, s2p, filtered, n_cluster):
"""
Go through all locus and decide if they are part
of the same TU or not.
:param idx: int cluster id
:param s2p: dict with [loci].coverage[start] = # of sequences there
:param filtered: dict with clusters object
:param n_cluster: int cluster id
:return:
* filtered: dict of cluster objects
* n_cluster: int cluster id
"""
global CONFLICT
loci = dict(zip(meta, [clusters[idc] for idc in meta]))
n_loci = len(meta)
n_loci_prev = n_loci + 1
cicle = 0
# [logger.note("BEFORE %s %s %s" % (c.id, idl, len(c.loci2seq[idl]))) for idl in c.loci2seq]
internal_cluster = {}
if n_loci == 1:
n_cluster += 1
filtered[n_cluster] = clusters[meta[0]]
filtered[n_cluster].update(id=n_cluster)
filtered[n_cluster].set_freq(s2p[1])
while n_loci < n_loci_prev and n_loci != 1:
n_loci_prev = n_loci
cicle += 1
if (cicle % 1) == 0:
logger.debug("_iter_loci:number of cicle: %s with n_loci %s" % (cicle, n_loci))
loci_similarity = _calculate_similarity(loci)
internal_cluster = _merge_similar(loci, loci_similarity)
n_loci = len(internal_cluster)
loci = internal_cluster
logger.debug("_iter_loci: n_loci %s" % n_loci)
if n_loci > 1:
n_internal_cluster = sorted(internal_cluster.keys(), reverse=True)[0]
CONFLICT += 1
internal_cluster = _solve_conflict(internal_cluster, s2p, n_internal_cluster)
internal_cluster = _clean_cluster(internal_cluster)
for idc in internal_cluster:
n_cluster += 1
logger.debug("_iter_loci: add to filtered %s" % n_cluster)
filtered[n_cluster] = internal_cluster[idc]
filtered[n_cluster].id = n_cluster
filtered[n_cluster].update(id=n_cluster)
filtered[n_cluster].set_freq(s2p[1])
logger.debug("_iter_loci: filtered %s" % filtered.keys())
# for new_c in internal_cluster.values():
# [logger.note("%s %s %s %s" % (meta, new_c.id, idl, len(new_c.loci2seq[idl]))) for idl in new_c.loci2seq]
return filtered, n_cluster
|
java
|
public synchronized void reset(short[] shortArray) throws IOException {
_writer.flush();
_writer.position(DATA_START_POSITION);
for(int i = 0; i < shortArray.length; i++) {
_writer.writeShort(shortArray[i]);
}
_writer.flush();
}
|
java
|
public void finalizeConfig() {
// See if not set by configuration, if there are defaults
// in order from the Endpoint, Service, or Bus.
configureConduitFromEndpointInfo(this, endpointInfo);
logConfig();
if (getClient().getDecoupledEndpoint() != null) {
this.endpointInfo.setProperty("org.apache.cxf.ws.addressing.replyto",
getClient().getDecoupledEndpoint());
}
}
|
python
|
def urltool(classqname, filt, reverse):
"""
Dump all urls branching from a class as OpenAPI 3 documentation
The class must be given as a FQPN which points to a Klein() instance.
Apply optional [FILT] as a regular expression searching within urls. For
example, to match all urls beginning with api, you might use '^/api'
"""
filt = re.compile(filt or '.*')
rootCls = namedAny(classqname)
rules = list(_iterClass(rootCls))
arr = []
for item in sorted(rules):
if item.subKlein:
continue
matched = filt.search(item.rulePath)
matched = not matched if reverse else matched
if matched:
arr.append(tuple(item.toOpenAPIPath()))
openapi3 = openapi.OpenAPI()
for pathPath, pathItem in arr:
if pathPath in openapi3.paths:
openapi3.paths[pathPath].merge(pathItem)
else:
openapi3.paths[pathPath] = pathItem
print(yaml.dump(openapi3, default_flow_style=False))
|
python
|
def create_conf_loader(*args, **kwargs): # pragma: no cover
"""Create a default configuration loader.
.. deprecated:: 1.0.0b1
Use :func:`create_config_loader` instead. This function will be removed
in version 1.0.1.
"""
import warnings
warnings.warn(
'"create_conf_loader" has been renamed to "create_config_loader".',
DeprecationWarning
)
return create_config_loader(*args, **kwargs)
|
java
|
public static void setDebugEnabled(ClassLoader classLoader, boolean isDebug)
throws Exception
{
Class<?> greenPepperClass = classLoader.loadClass("com.greenpepper.GreenPepper");
Method setDebugEnabledMethod = greenPepperClass.getMethod("setDebugEnabled", boolean.class);
setDebugEnabledMethod.invoke( null, isDebug );
}
|
python
|
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
|
python
|
def triple_exponential_moving_average(data, period):
"""
Triple Exponential Moving Average.
Formula:
TEMA = (3*EMA - 3*EMA(EMA)) + EMA(EMA(EMA))
"""
catch_errors.check_for_period_error(data, period)
tema = ((3 * ema(data, period) - (3 * ema(ema(data, period), period))) +
ema(ema(ema(data, period), period), period)
)
return tema
|
java
|
protected void initInterceptors() {
interceptors = new ArrayList<>();
ControllerUtils.collectRouteInterceptors(controllerMethod).forEach(handlerClass -> {
try {
interceptors.add(handlerClass.newInstance());
} catch (InstantiationException | IllegalAccessException e) {
throw new PippoRuntimeException(e);
}
});
}
|
java
|
@Override
public CircuitBreaker circuitBreaker(String name, Supplier<CircuitBreakerConfig> circuitBreakerConfigSupplier) {
return computeIfAbsent(name, () -> CircuitBreaker.of(name, Objects.requireNonNull(Objects.requireNonNull(circuitBreakerConfigSupplier, SUPPLIER_MUST_NOT_BE_NULL).get(), CONFIG_MUST_NOT_BE_NULL)));
}
|
python
|
def verify_response(self, response, otp, nonce, return_response=False):
"""
Returns True if the OTP is valid (status=OK) and return_response=False,
otherwise (return_response = True) it returns the server response as a
dictionary.
Throws an exception if the OTP is replayed, the server response message
verification failed or the client id is invalid, returns False
otherwise.
"""
try:
status = re.search(r'status=([A-Z0-9_]+)', response) \
.groups()
if len(status) > 1:
message = 'More than one status= returned. Possible attack!'
raise InvalidValidationResponse(message, response)
status = status[0]
except (AttributeError, IndexError):
return False
signature, parameters = \
self.parse_parameters_from_response(response)
# Secret key is specified, so we verify the response message
# signature
if self.key:
generated_signature = \
self.generate_message_signature(parameters)
# Signature located in the response does not match the one we
# have generated
if signature != generated_signature:
logger.warn("signature mismatch for parameters=%r", parameters)
raise SignatureVerificationError(generated_signature,
signature)
param_dict = self.get_parameters_as_dictionary(parameters)
if 'otp' in param_dict and param_dict['otp'] != otp:
message = 'Unexpected OTP in response. Possible attack!'
raise InvalidValidationResponse(message, response, param_dict)
if 'nonce' in param_dict and param_dict['nonce'] != nonce:
message = 'Unexpected nonce in response. Possible attack!'
raise InvalidValidationResponse(message, response, param_dict)
if status == 'OK':
if return_response:
return param_dict
else:
return True
elif status == 'NO_SUCH_CLIENT':
raise InvalidClientIdError(self.client_id)
elif status == 'REPLAYED_OTP':
raise StatusCodeError(status)
return False
|
java
|
public static DiffException createDiffException(final ErrorKeys errorId,
final String message)
{
return new DiffException(errorId.toString() + ":\r\n" + message);
}
|
java
|
public static String getCalendarJavaDateFormat(String dateFormat) {
dateFormat = CmsStringUtil.substitute(dateFormat, "%", ""); // remove all "%"
dateFormat = CmsStringUtil.substitute(dateFormat, "m", "${month}");
dateFormat = CmsStringUtil.substitute(dateFormat, "H", "${hour}");
dateFormat = CmsStringUtil.substitute(dateFormat, "Y", "${4anno}");
dateFormat = dateFormat.toLowerCase();
dateFormat = CmsStringUtil.substitute(dateFormat, "${month}", "M");
dateFormat = CmsStringUtil.substitute(dateFormat, "${hour}", "H");
dateFormat = CmsStringUtil.substitute(dateFormat, "y", "yy");
dateFormat = CmsStringUtil.substitute(dateFormat, "${4anno}", "yyyy");
dateFormat = CmsStringUtil.substitute(dateFormat, "m", "mm"); // minutes with two digits
dateFormat = dateFormat.replace('e', 'd'); // day of month
dateFormat = dateFormat.replace('i', 'h'); // 12 hour format
dateFormat = dateFormat.replace('p', 'a'); // pm/am String
return dateFormat;
}
|
java
|
public static void writeScriptFile(
InputStream stream,
String scriptString,
Reader reader,
LineEndingStyle style,
File scriptfile
) throws IOException
{
try (FileWriter writer = new FileWriter(scriptfile)) {
if (null != scriptString) {
ScriptfileUtils.writeReader(new StringReader(scriptString), writer, style);
} else if (null != reader) {
ScriptfileUtils.writeReader(reader, writer, style);
} else if (null != stream) {
ScriptfileUtils.writeStream(stream, writer, style);
} else {
throw new IllegalArgumentException("no script source argument");
}
}
}
|
python
|
def hex2bin(fin, fout, start=None, end=None, size=None, pad=None):
"""Hex-to-Bin convertor engine.
@return 0 if all OK
@param fin input hex file (filename or file-like object)
@param fout output bin file (filename or file-like object)
@param start start of address range (optional)
@param end end of address range (inclusive; optional)
@param size size of resulting file (in bytes) (optional)
@param pad padding byte (optional)
"""
try:
h = IntelHex(fin)
except HexReaderError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: bad HEX file: %s" % str(e)
print(txt)
return 1
# start, end, size
if size != None and size != 0:
if end == None:
if start == None:
start = h.minaddr()
end = start + size - 1
else:
if (end+1) >= size:
start = end + 1 - size
else:
start = 0
try:
if pad is not None:
# using .padding attribute rather than pad argument to function call
h.padding = pad
h.tobinfile(fout, start, end)
except IOError:
e = sys.exc_info()[1] # current exception
txt = "ERROR: Could not write to file: %s: %s" % (fout, str(e))
print(txt)
return 1
return 0
|
python
|
def get_queryset(self):
"""
Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet
"""
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs
|
python
|
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
linestyle='-',
title=True, expected_value=None,
expected_color='red', rotated=False,
plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linestyle : str, optional
What line style to use for the histogram. Default is '-'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : bool, optional
Add a title with a estimated value +/- uncertainty. The estimated value
is the pecentile halfway between the max/min of ``percentiles``, while
the uncertainty is given by the max/min of the ``percentiles``. If no
percentiles are specified, defaults to quoting the median +/- 95/5
percentiles.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2,
density=True)
if percentiles is None:
percentiles = [5., 50., 95.]
if len(percentiles) > 0:
plotp = numpy.percentile(values, percentiles)
else:
plotp = []
for val in plotp:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
if len(percentiles) > 0:
minp = min(percentiles)
maxp = max(percentiles)
medp = (maxp + minp) / 2.
else:
minp = 5
medp = 50
maxp = 95
values_min = numpy.percentile(values, minp)
values_med = numpy.percentile(values, medp)
values_max = numpy.percentile(values, maxp)
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '${0}$'.format(str_utils.format_value(
values_med, negerror, plus_error=poserror))
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
|
java
|
public BetaConstraints createBetaNodeConstraint(final BuildContext context,
final List<BetaNodeFieldConstraint> list,
final boolean disableIndexing) {
BetaConstraints constraints;
switch ( list.size() ) {
case 0 :
constraints = EmptyBetaConstraints.getInstance();
break;
case 1 :
constraints = new SingleBetaConstraints( list.get( 0 ),
context.getKnowledgeBase().getConfiguration(),
disableIndexing );
break;
case 2 :
constraints = new DoubleBetaConstraints( list.toArray( new BetaNodeFieldConstraint[list.size()] ),
context.getKnowledgeBase().getConfiguration(),
disableIndexing );
break;
case 3 :
constraints = new TripleBetaConstraints( list.toArray( new BetaNodeFieldConstraint[list.size()] ),
context.getKnowledgeBase().getConfiguration(),
disableIndexing );
break;
case 4 :
constraints = new QuadroupleBetaConstraints( list.toArray( new BetaNodeFieldConstraint[list.size()] ),
context.getKnowledgeBase().getConfiguration(),
disableIndexing );
break;
default :
constraints = new DefaultBetaConstraints( list.toArray( new BetaNodeFieldConstraint[list.size()] ),
context.getKnowledgeBase().getConfiguration(),
disableIndexing );
}
return constraints;
}
|
java
|
public String read() throws IOException {
CurrentInProgressMetadataWritable cip = localWritable.get();
if (readAndUpdateVersion(cip)) {
return cip.getPath();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(fullyQualifiedZNode + " is currently clear.");
}
}
return null;
}
|
java
|
public void putWithVersion(RowCursor cursor, Result<Boolean> cont)
{
_tableService.put(cursor, PutType.PUT, cont);
}
|
java
|
public void updateArray(int i, java.sql.Array array) throws SQLException {
try {
rsetImpl.updateArray(i, array);
} catch (SQLException ex) {
FFDCFilter.processException(ex, "com.ibm.ws.rsadapter.jdbc.WSJdbcResultSet.updateArray", "3993", this);
throw WSJdbcUtil.mapException(this, ex);
} catch (NullPointerException nullX) {
// No FFDC code needed; we might be closed.
throw runtimeXIfNotClosed(nullX);
}
}
|
python
|
def get_driver(self, name, version):
"""Authenticates and creates new API driver to perform scope stuff
:param name: Name of driver
:param version: Version of driver
:return: driver
"""
user_credentials = self.get_user_credentials() # get credentials
return discovery.build(
name, version,
http=self.authenticate(user_credentials)
)
|
java
|
public static byte[] encode(byte[] in) {
return org.apache.commons.codec.binary.Base64.encodeBase64(in);
}
|
java
|
public static IntegerVector subtract(IntegerVector vector1,
IntegerVector vector2) {
if (vector2.length() != vector1.length())
throw new IllegalArgumentException(
"Vectors of different sizes cannot be added");
// If vector is a sparse vector, simply get the non zero values and
// add them to this instance.
if (vector2 instanceof SparseVector)
subtractSparseValues(vector1, vector2);
else if (vector2 instanceof TernaryVector)
subtractTernaryValues(vector1, (TernaryVector)vector2);
else {
// Otherwise, inspect all values of vector, and only add the non
// zero values.
for (int i = 0; i < vector2.length(); ++i) {
int value = vector2.get(i);
// In the case that vector1 is sparse, only add non zero values.
if (value != 0d)
vector1.add(i, -1 * value);
}
}
return vector1;
}
|
python
|
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the members by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
with self.pipe as pipe:
f = Future()
res = pipe.zscan(self.redis_key(name), cursor=cursor,
match=match, count=count,
score_cast_func=score_cast_func)
def cb():
f.set((res[0], [(self.valueparse.decode(k), v)
for k, v in res[1]]))
pipe.on_execute(cb)
return f
|
python
|
def scan_url(self, this_url):
""" Submit a URL to be scanned by VirusTotal.
:param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the
standard request rate) so as to perform a batch scanning request with one single call. The
URLs must be separated by a new line character.
:return: JSON response that contains scan_id and permalink.
"""
params = {'apikey': self.api_key, 'url': this_url}
try:
response = requests.post(self.base + 'url/scan', params=params, proxies=self.proxies)
except requests.RequestException as e:
return dict(error=e.message)
return _return_response_and_status_code(response)
|
python
|
def get_app_region_products(self, app_uri):
"""获得指定应用所在区域的产品信息
Args:
- app_uri: 应用的完整标识
Returns:
返回产品信息列表,若失败则返回None
"""
apps, retInfo = self.list_apps()
if apps is None:
return None
for app in apps:
if (app.get('uri') == app_uri):
return self.get_region_products(app.get('region'))
return
|
java
|
public static boolean is_chavarganta(String str)
{
String s1 = VarnaUtil.getAntyaVarna(str);
if (is_chavarga(s1)) return true;
return false;
}
|
python
|
def relation_to_role_and_interface(relation_name):
"""
Given the name of a relation, return the role and the name of the interface
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
"""
_metadata = metadata()
for role in ('provides', 'requires', 'peers'):
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
if interface:
return role, interface
return None, None
|
java
|
protected void setLength(long length) throws IOException {
if(length<0) throw new IllegalArgumentException("Invalid length: "+length);
synchronized(this) {
file.seek(8);
file.writeLong(length);
if(length==0) setFirstIndex(0);
}
}
|
python
|
def show_storage_container_acl(kwargs=None, storage_conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Show a storage container's acl
CLI Example:
.. code-block:: bash
salt-cloud -f show_storage_container_acl my-azure name=myservice
name:
Name of existing container.
lease_id:
If specified, show_storage_container_acl only succeeds if the
container's lease is active and matches this ID.
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_storage_container function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('An storage container name must be specified as "name"')
if not storage_conn:
storage_conn = get_storage_conn(conn_kwargs=kwargs)
data = storage_conn.get_container_acl(
container_name=kwargs['name'],
x_ms_lease_id=kwargs.get('lease_id', None),
)
return data
|
python
|
def vertex_enumeration_gen(g, qhull_options=None):
"""
Generator version of `vertex_enumeration`.
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
qhull_options : str, optional(default=None)
Options to pass to `scipy.spatial.ConvexHull`. See the `Qhull
manual <http://www.qhull.org>`_ for details.
Yields
-------
tuple(ndarray(float, ndim=1))
Tuple of Nash equilibrium mixed actions.
"""
try:
N = g.N
except AttributeError:
raise TypeError('input must be a 2-player NormalFormGame')
if N != 2:
raise NotImplementedError('Implemented only for 2-player games')
brps = [_BestResponsePolytope(
g.players[1-i], idx=i, qhull_options=qhull_options
) for i in range(N)]
labelings_bits_tup = \
tuple(_ints_arr_to_bits(brps[i].labelings) for i in range(N))
equations_tup = tuple(brps[i].equations for i in range(N))
trans_recips = tuple(brps[i].trans_recip for i in range(N))
return _vertex_enumeration_gen(labelings_bits_tup, equations_tup,
trans_recips)
|
python
|
def gen_source(self, ast, name, customize, is_lambda=False, returnNone=False):
"""convert SyntaxTree to Python source code"""
rn = self.return_none
self.return_none = returnNone
old_name = self.name
self.name = name
# if code would be empty, append 'pass'
if len(ast) == 0:
self.println(self.indent, 'pass')
else:
self.customize(customize)
if is_lambda:
self.write(self.traverse(ast, is_lambda=is_lambda))
else:
self.text = self.traverse(ast, is_lambda=is_lambda)
self.println(self.text)
self.name = old_name
self.return_none = rn
|
python
|
def response_change(self, request, obj):
"""Determine the HttpResponse for the change_view stage."""
opts = self.opts.app_label, self.opts.model_name
pk_value = obj._get_pk_val()
if '_continue' in request.POST:
msg = _(
'The %(name)s block was changed successfully. You may edit it again below.'
) % {'name': force_text(self.opts.verbose_name)}
self.message_user(request, msg, messages.SUCCESS)
# We redirect to the save and continue page, which updates the
# parent window in javascript and redirects back to the edit page
# in javascript.
return HttpResponseRedirect(reverse(
'admin:%s_%s_continue' % opts,
args=(pk_value,),
current_app=self.admin_site.name
))
# Update column and close popup - don't bother with a message as they won't see it
return self.response_rerender(request, obj, 'admin/glitter/update_column.html')
|
java
|
private boolean recycle(Segment seg) {
if(_recycleList.size() < _recycleLimit) {
return _recycleList.add(seg);
}
return false;
}
|
python
|
def Weber(mp, rhop, dp, rhog, D, Vterminal=4):
r'''Calculates saltation velocity of the gas for pneumatic conveying,
according to [1]_ as described in [2]_, [3]_, [4]_, and [5]_.
If Vterminal is under 3 m/s, use equation 1; otherwise, equation 2.
.. math::
Fr_s = \left(7 + \frac{8}{3}V_{terminal}\right)\mu^{0.25}
\left(\frac{d_p}{D}\right)^{0.1}
Fr_s = 15\mu^{0.25}\left(\frac{d_p}{D}\right)^{0.1}
Fr_s = \frac{V_{salt}}{\sqrt{gD}}
\mu = \frac{m_p}{\frac{\pi}{4}D^2V \rho_f}
Parameters
----------
mp : float
Solid mass flow rate, [kg/s]
rhop : float
Particle density, [kg/m^3]
dp : float
Particle diameter, [m]
rhog : float
Gas density, [kg/m^3]
D : float
Diameter of pipe, [m]
Vterminal : float
Terminal velocity of particle settling in gas, [m/s]
Returns
-------
V : float
Saltation velocity of gas, [m/s]
Notes
-----
Model is rearanged to be explicit in terms of saltation velocity
internally.
Examples
--------
Examples are only a self-test.
>>> Weber(mp=1, rhop=1000., dp=1E-3, rhog=1.2, D=0.1, Vterminal=4)
15.227445436331474
References
----------
.. [1] Weber, M. 1981. Principles of hydraulic and pneumatic conveying in
pipes. Bulk Solids Handling 1: 57-63.
.. [2] Rabinovich, Evgeny, and Haim Kalman. "Threshold Velocities of
Particle-Fluid Flows in Horizontal Pipes and Ducts: Literature Review."
Reviews in Chemical Engineering 27, no. 5-6 (January 1, 2011).
doi:10.1515/REVCE.2011.011.
.. [3] Setia, G., S. S. Mallick, R. Pan, and P. W. Wypych. "Modeling
Minimum Transport Boundary for Fluidized Dense-Phase Pneumatic Conveying
Systems." Powder Technology 277 (June 2015): 244-51.
doi:10.1016/j.powtec.2015.02.050.
.. [4] Bansal, A., S. S. Mallick, and P. W. Wypych. "Investigating
Straight-Pipe Pneumatic Conveying Characteristics for Fluidized
Dense-Phase Pneumatic Conveying." Particulate Science and Technology
31, no. 4 (July 4, 2013): 348-56. doi:10.1080/02726351.2012.732677.
.. [5] Gomes, L. M., and A. L. Amarante Mesquita. "On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying." Brazilian
Journal of Chemical Engineering 31, no. 1 (March 2014): 35-46.
doi:10.1590/S0104-66322014000100005
'''
if Vterminal <= 3:
term1 = (7 + 8/3.*Vterminal)*(dp/D)**0.1
else:
term1 = 15.*(dp/D)**0.1
term2 = 1./(g*D)**0.5
term3 = mp/rhog/(pi/4*D**2)
V = (term1/term2*term3**0.25)**(1/1.25)
return V
|
java
|
static ByteBufferRange decode(String string) {
int prefix = string.indexOf(':');
int sep = string.indexOf('-', prefix + 1);
checkArgument(prefix >= 0 && sep >= 0, "Invalid split string: %s", string);
char[] start = new char[prefix + sep - (prefix + 1)];
string.getChars(0, prefix, start, 0);
string.getChars(prefix + 1, sep, start, prefix);
char[] end = new char[prefix + string.length() - (sep + 1)];
string.getChars(0, prefix, end, 0);
string.getChars(sep + 1, string.length(), end, prefix);
byte[] startBytes, endBytes;
try {
startBytes = Hex.decodeHex(start);
endBytes = Hex.decodeHex(end);
} catch (DecoderException e) {
throw new IllegalArgumentException(format("Invalid split string: %s", string));
}
return new ByteBufferRangeImpl(ByteBuffer.wrap(startBytes), ByteBuffer.wrap(endBytes), -1, false);
}
|
java
|
public static NodeImpl makeIterableAndSetMapKey(final NodeImpl node, final Object key) {
return new NodeImpl( //
node.name, //
node.parent, //
true, //
null, //
key, //
node.kind, //
node.parameterTypes, //
node.parameterIndex, //
node.value, //
node.containerClass, //
node.typeArgumentIndex //
);
}
|
python
|
def mix(color1, color2, pos=0.5):
"""
Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos)
"""
opp_pos = 1 - pos
red = color1[0] * pos + color2[0] * opp_pos
green = color1[1] * pos + color2[1] * opp_pos
blue = color1[2] * pos + color2[2] * opp_pos
return int(red), int(green), int(blue)
|
python
|
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.GET, name, field)
|
java
|
public static int compareQualifiers(final byte[] a, final int offset_a,
final byte[] b, final int offset_b) {
final long left = Internal.getOffsetFromQualifier(a, offset_a);
final long right = Internal.getOffsetFromQualifier(b, offset_b);
if (left == right) {
return 0;
}
return (left < right) ? -1 : 1;
}
|
python
|
def get_alter_table_sql(self, diff):
"""
Get the ALTER TABLE SQL statement
:param diff: The table diff
:type diff: eloquent.dbal.table_diff.TableDiff
:rtype: list
"""
#sql = self._get_simple_alter_table_sql(diff)
from_table = diff.from_table
if not isinstance(from_table, Table):
raise Exception('SQLite platform requires for the alter table the table diff '
'referencing the original table')
table = from_table.clone()
columns = {}
old_column_names = {}
new_column_names = {}
column_sql = []
for column_name, column in table.get_columns().items():
column_name = column_name.lower()
columns[column_name] = column
old_column_names[column_name] = column.get_name()
new_column_names[column_name] = column.get_name()
for column_name, column in diff.removed_columns.items():
column_name = column_name.lower()
if column_name in columns:
del columns[column_name]
del old_column_names[column_name]
del new_column_names[column_name]
for old_column_name, column in diff.renamed_columns.items():
old_column_name = old_column_name.lower()
if old_column_name in columns:
del columns[old_column_name]
columns[column.get_name().lower()] = column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column.get_name()
for old_column_name, column_diff in diff.changed_columns.items():
if old_column_name in columns:
del columns[old_column_name]
columns[column_diff.column.get_name().lower()] = column_diff.column
if old_column_name in new_column_names:
new_column_names[old_column_name] = column_diff.column.get_name()
for column_name, column in diff.added_columns.items():
columns[column_name.lower()] = column
sql = []
table_sql = []
data_table = Table('__temp__' + table.get_name())
new_table = Table(table.get_name(), columns,
self.get_primary_index_in_altered_table(diff),
self.get_foreign_keys_in_altered_table(diff))
new_table.add_option('alter', True)
sql = self.get_pre_alter_table_index_foreign_key_sql(diff)
sql.append('CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s'
% (data_table.get_name(), ', '.join(old_column_names.values()), table.get_name()))
sql.append(self.get_drop_table_sql(from_table))
sql += self.get_create_table_sql(new_table)
sql.append('INSERT INTO %s (%s) SELECT %s FROM %s'
% (new_table.get_name(),
', '.join(new_column_names.values()),
', '.join(old_column_names.values()),
data_table.get_name()))
sql.append(self.get_drop_table_sql(data_table))
sql += self.get_post_alter_table_index_foreign_key_sql(diff)
return sql
|
java
|
public void stopServer()
{
// _heartbeatState.notifyHeartbeatStop();
SocketPool pool = _clusterSocketPool.get();
if (pool != null) {
pool.getFactory().notifyHeartbeatStop();
}
}
|
python
|
def install(self):
"""Install packages from the packages_dict."""
self.distro = distro_check()
package_list = self.packages_dict.get(self.distro)
self._installer(package_list=package_list.get('packages'))
|
java
|
public void setPresence(Object newPresence, final Respoke.TaskCompletionListener completionListener) {
if (isConnected()) {
Object presenceToSet = newPresence;
if (null == presenceToSet) {
presenceToSet = "available";
}
JSONObject typeData = new JSONObject();
JSONObject data = new JSONObject();
try {
typeData.put("type", presenceToSet);
data.put("presence", typeData);
final Object finalPresence = presenceToSet;
signalingChannel.sendRESTMessage("post", "/v1/presence", data, new RespokeSignalingChannel.RESTListener() {
@Override
public void onSuccess(Object response) {
presence = finalPresence;
Respoke.postTaskSuccess(completionListener);
}
@Override
public void onError(final String errorMessage) {
Respoke.postTaskError(completionListener, errorMessage);
}
});
} catch (JSONException e) {
Respoke.postTaskError(completionListener, "Error encoding presence to json");
}
} else {
Respoke.postTaskError(completionListener, "Can't complete request when not connected. Please reconnect!");
}
}
|
java
|
void findNodes(List<RBBINode> dest, int kind) {
if (fType == kind) {
dest.add(this);
}
if (fLeftChild != null) {
fLeftChild.findNodes(dest, kind);
}
if (fRightChild != null) {
fRightChild.findNodes(dest, kind);
}
}
|
python
|
def wrap_viscm(cmap, dpi=100, saveplot=False):
'''Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not.
'''
from viscm import viscm
viscm(cmap)
fig = plt.gcf()
fig.set_size_inches(22, 10)
plt.show()
if saveplot:
fig.savefig('figures/eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)
fig.savefig('figures/eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi)
|
python
|
def define_as_input(self, pin, pullup=False):
"""Set the input or output mode for a specified pin. Mode should be
either GPIO.OUT or GPIO.IN.
"""
self._validate_channel(pin)
# Set bit to 1 for input or 0 for output.
self.iodir[int(pin/8)] |= 1 << (int(pin%8))
self._write_iodir()
self.pullup(pin, pullup)
|
java
|
@Override
public void update(ChannelData inputData) {
this.handshakeErrorTracker = createSSLHandshakeErrorTracker(inputData);
this.sslConfig.updateChannelData(inputData);
}
|
java
|
public static Method getMethodByName(Class<?> clazz, boolean ignoreCase, String methodName) throws SecurityException {
if (null == clazz || StrUtil.isBlank(methodName)) {
return null;
}
final Method[] methods = getMethods(clazz);
if (ArrayUtil.isNotEmpty(methods)) {
for (Method method : methods) {
if (StrUtil.equals(methodName, method.getName(), ignoreCase)) {
return method;
}
}
}
return null;
}
|
java
|
public Matrix4x3d scale(double xyz, Matrix4x3d dest) {
return scale(xyz, xyz, xyz, dest);
}
|
java
|
public static void showAbout() {
final CmsPopup popup = new CmsPopup(Messages.get().key(Messages.GUI_ABOUT_DIALOG_TITLE_0));
FlowPanel container = new FlowPanel();
int height = 450;
if (Window.getClientHeight() < height) {
height = Math.max(300, Window.getClientHeight() - 50);
}
container.setHeight(height + "px");
Frame frame = new Frame();
frame.setWidth("100%");
frame.setHeight("100%");
frame.setUrl(CmsCoreProvider.get().getAboutLink());
container.add(frame);
popup.setMainContent(container);
popup.center();
popup.addDialogClose(null);
CmsPushButton closeButton = new CmsPushButton();
closeButton.setText(Messages.get().key(Messages.GUI_CLOSE_0));
closeButton.setUseMinWidth(true);
closeButton.setButtonStyle(ButtonStyle.TEXT, ButtonColor.BLUE);
closeButton.addClickHandler(new ClickHandler() {
/**
* @see com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt.event.dom.client.ClickEvent)
*/
public void onClick(ClickEvent event) {
popup.hide();
}
});
popup.addButton(closeButton);
popup.addDialogClose(null);
}
|
java
|
public synchronized void setMaxCacheSizeInBytes(final int newsize) {
if (validState) {
log.info(this.getClass().getName() + "::setMaxCacheSizeInBytes newsize=" + newsize
+ " flushing write-cache");
privateSync(true, false);
clearReadCaches();
}
if (newsize >= 1024) { // 1KB minimal
maxCacheSizeInBytes = newsize;
createReadCaches();
}
}
|
python
|
def _subst_libs(env, libs):
"""
Substitute environment variables and split into list.
"""
if SCons.Util.is_String(libs):
libs = env.subst(libs)
if SCons.Util.is_String(libs):
libs = libs.split()
elif SCons.Util.is_Sequence(libs):
_libs = []
for l in libs:
_libs += _subst_libs(env, l)
libs = _libs
else:
# libs is an object (Node, for example)
libs = [libs]
return libs
|
python
|
def update(self):
"""Update hook
"""
super(AnalysisRequestAnalysesView, self).update()
analyses = self.context.getAnalyses(full_objects=True)
self.analyses = dict([(a.getServiceUID(), a) for a in analyses])
self.selected = self.analyses.keys()
|
java
|
protected boolean isLazy(final Class<?> type, final boolean lazy) {
if (isHkExtension(type) && lazy) {
logger.warn("@LazyBinding is ignored, because @HK2Managed set: {}", type.getName());
return false;
}
return lazy;
}
|
python
|
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
|
java
|
public static Recipient[] toRecipient(List<String> to) {
Recipient[] addresses = new Recipient[to.size()];
int i = 0;
for (String t : to) {
addresses[i++] = new Recipient(t);
}
return addresses;
}
|
python
|
def _format_preconditions(preconditions: List[List[icontract._Contract]], prefix: Optional[str] = None) -> List[str]:
"""
Format preconditions as reST.
:param preconditions: preconditions of a function
:param prefix: prefix of the ``:requires:`` and ``:requires else:`` directive
:return: list of lines
"""
if not preconditions:
return []
result = [] # type: List[str]
for i, group in enumerate(preconditions):
if i == 0:
if prefix is not None:
result.append(":{} requires:".format(prefix))
else:
result.append(":requires:")
else:
if prefix is not None:
result.append(":{} requires else:".format(prefix))
else:
result.append(":requires else:")
for precondition in group:
result.append(" * {}".format(_format_contract(contract=precondition)))
return result
|
python
|
def parse_ports(ports_text):
"""Parse ports text
e.g. ports_text = "12345,13000-15000,20000-30000"
"""
ports_set = set()
for bit in ports_text.split(','):
if '-' in bit:
low, high = bit.split('-', 1)
ports_set = ports_set.union(range(int(low), int(high) + 1))
else:
ports_set.add(int(bit))
return sorted(list(ports_set))
|
java
|
private String cleanupExpressions(String drl) {
// execute cleanup
for ( final DSLMappingEntry entry : this.cleanup ) {
drl = entry.getKeyPattern().matcher( drl ).replaceAll( entry.getValuePattern() );
}
return drl;
}
|
python
|
def compile_contracts(self, target_path: Path) -> ContractManager:
""" Store compiled contracts JSON at `target_path`. """
self.checksum_contracts()
if self.overall_checksum is None:
raise ContractSourceManagerCompilationError('Checksumming failed.')
contracts_compiled = self._compile_all_contracts()
target_path.parent.mkdir(parents=True, exist_ok=True)
with target_path.open(mode='w') as target_file:
target_file.write(
json.dumps(
dict(
contracts=contracts_compiled,
contracts_checksums=self.contracts_checksums,
overall_checksum=self.overall_checksum,
contracts_version=None,
),
sort_keys=True,
indent=4,
),
)
return ContractManager(target_path)
|
java
|
RuleReturnScope invoke(TreeParser treeParser) throws Exception
{
assert treeParser != null;
return (RuleReturnScope)method.invoke(treeParser, arguments);
}
|
python
|
def ambiguate(sequence1, sequence2, delete_ambiguous=False):
""" delete_ambiguous: Marks sequences for deletion by replacing all
chars with 'X'. These seqs are deleted later with remove_empty """
delete = False
combination = list()
z = list(zip(sequence1, sequence2))
for (a, b) in z:
if a == b:
combination.append(a)
else:
if a == '-' or b == '-':
combination.append('-')
else:
if delete_ambiguous:
delete = True
ambig = get_ambiguity(a, b)
combination.append(ambig)
if delete:
return 'X' * len(combination)
return ''.join(combination)
|
python
|
def process_associations(self, limit):
"""
Loop through the xml file and process the article-breed, article-phene,
breed-phene, phene-gene associations, and the external links to LIDA.
:param limit:
:return:
"""
myfile = '/'.join((self.rawdir, self.files['data']['file']))
f = gzip.open(myfile, 'rb')
filereader = io.TextIOWrapper(f, newline="")
filereader.readline() # remove the xml declaration line
for event, elem in ET.iterparse(filereader): # iterparse is not deprecated
self.process_xml_table(
elem, 'Article_Breed', self._process_article_breed_row, limit)
self.process_xml_table(
elem, 'Article_Phene', self._process_article_phene_row, limit)
self.process_xml_table(
elem, 'Breed_Phene', self._process_breed_phene_row, limit)
self.process_xml_table(
elem, 'Lida_Links', self._process_lida_links_row, limit)
self.process_xml_table(
elem, 'Phene_Gene', self._process_phene_gene_row, limit)
self.process_xml_table(
elem, 'Group_MPO', self._process_group_mpo_row, limit)
f.close()
return
|
python
|
def _set_packagebase(self, variant):
self.setEnabled(variant is not None)
self.variant = variant
is_package = isinstance(variant, Package)
prev_index = self.currentIndex()
disabled_tabs = set()
for d in self.tabs.itervalues():
index = d["index"]
if (not d["lazy"]) or (self.currentIndex() == index):
self.widget(index).set_variant(variant)
tab_index = self.tabs["variants"]["index"]
if (isinstance(variant, Variant) and variant.index is not None) \
or (is_package and variant.num_variants):
n = variant.num_variants if is_package else variant.parent.num_variants
label = "variants (%d)" % n
self.setTabEnabled(tab_index, True)
else:
label = "variants"
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
self.setTabText(tab_index, label)
tab_index = self.tabs["tools"]["index"]
if variant and variant.tools:
label = "tools (%d)" % len(variant.tools)
self.setTabEnabled(tab_index, True)
else:
label = "tools"
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
self.setTabText(tab_index, label)
"""
tab_index = self.tabs["help"]
if self.help_widget.success:
self.setTabEnabled(tab_index, True)
else:
self.setTabEnabled(tab_index, False)
disabled_tabs.add(tab_index)
"""
if prev_index in disabled_tabs:
self.setCurrentIndex(0)
|
python
|
def _get_url(self, url):
"""
Returns normalized url. If schema is not given, would fall to
filesystem (``file:///``) schema.
"""
url = str(url)
if url != 'default' and not '://' in url:
url = ':///'.join(('file', url))
return url
|
java
|
public final ListDatasetsPagedResponse listDatasets(String parent, String filter) {
PROJECT_PATH_TEMPLATE.validate(parent, "listDatasets");
ListDatasetsRequest request =
ListDatasetsRequest.newBuilder().setParent(parent).setFilter(filter).build();
return listDatasets(request);
}
|
java
|
public void setEventAggregates(java.util.Collection<EventAggregate> eventAggregates) {
if (eventAggregates == null) {
this.eventAggregates = null;
return;
}
this.eventAggregates = new java.util.ArrayList<EventAggregate>(eventAggregates);
}
|
java
|
public synchronized void add(@Nonnull String jobFullName, int n) throws IOException {
addWithoutSaving(jobFullName, n);
save();
}
|
python
|
def rollback(name, **kwargs):
'''
Roll back the given dataset to a previous snapshot.
name : string
name of snapshot
recursive : boolean
destroy any snapshots and bookmarks more recent than the one
specified.
recursive_all : boolean
destroy any more recent snapshots and bookmarks, as well as any
clones of those snapshots.
force : boolean
used with the -R option to force an unmount of any clone file
systems that are to be destroyed.
.. warning::
When a dataset is rolled back, all data that has changed since
the snapshot is discarded, and the dataset reverts to the state
at the time of the snapshot. By default, the command refuses to
roll back to a snapshot other than the most recent one.
In order to do so, all intermediate snapshots and bookmarks
must be destroyed by specifying the -r option.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.rollback myzpool/mydataset@yesterday
'''
## Configure command
# NOTE: initialize the defaults
flags = []
# NOTE: set extra config from kwargs
if kwargs.get('recursive_all', False):
flags.append('-R')
if kwargs.get('recursive', False):
flags.append('-r')
if kwargs.get('force', False):
if kwargs.get('recursive_all', False) or kwargs.get('recursive', False):
flags.append('-f')
else:
log.warning('zfs.rollback - force=True can only be used with recursive_all=True or recursive=True')
## Rollback to snapshot
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='rollback',
flags=flags,
target=name,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'rolledback')
|
python
|
def get_methods(self):
"""
Returns a list of `MethodClassAnalysis` objects
"""
for c in self.classes.values():
for m in c.get_methods():
yield m
|
python
|
def eeg_add_channel(raw, channel, sync_index_eeg=0, sync_index_channel=0, channel_type=None, channel_name=None):
"""
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
"""
if channel_name is None:
if isinstance(channel, pd.core.series.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_eeg
if diff > 0:
channel = list(channel)[diff:len(channel)]
channel = channel + [np.nan]*diff
if diff < 0:
channel = [np.nan]*diff + list(channel)
channel = list(channel)[0:len(channel)]
# Adjust to raw size
if len(channel) < len(raw):
channel = list(channel) + [np.nan]*(len(raw)-len(channel))
else:
channel = list(channel)[0:len(raw)] # Crop to fit the raw data
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
raw.add_channels([channel], force_update_info=True)
return(raw)
|
java
|
public Matrix4d translate(Vector3fc offset) {
return translate(offset.x(), offset.y(), offset.z());
}
|
python
|
def global_variable_action(self, text, loc, var):
"""Code executed after recognising a global variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("GLOBAL_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_global_var(var.name, var.type)
self.codegen.global_var(var.name)
return index
|
python
|
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
Example
-------
>>> from unyt import g, cm
>>> data = [3, 4, 5]*g/cm**3
>>> data.to_astropy()
<Quantity [3., 4., 5.] g / cm3>
"""
return self.value * _astropy.units.Unit(str(self.units), **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.