language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python
|
def direction(self):
"""Returns the direction of the variable if it is a parameter. Possible values
are ["": no intent, "(in)", "(out)", "(inout)"].
"""
if self._direction is None:
if hasattr(self, "parameters"):
#This is actually a function that inherited from ValueElement, it is always
#intent(out) for our purposes.
self._direction = "(out)"
else:
intent = [m for m in self.modifiers if "intent" in m]
if len(intent) > 0:
self._direction = intent[0].replace("intent", "").strip()
else:
self._direction = ""
return self._direction
|
python
|
def _pathogenSamplePlot(self, pathogenName, sampleNames, ax):
"""
Make an image of a graph giving pathogen read count (Y axis) versus
sample id (X axis).
@param pathogenName: A C{str} pathogen name.
@param sampleNames: A sorted C{list} of sample names.
@param ax: A matplotlib C{axes} instance.
"""
readCounts = []
for i, sampleName in enumerate(sampleNames):
try:
readCount = self.pathogenNames[pathogenName][sampleName][
'uniqueReadCount']
except KeyError:
readCount = 0
readCounts.append(readCount)
highlight = 'r'
normal = 'gray'
sdMultiple = 2.5
minReadsForHighlighting = 10
highlighted = []
if len(readCounts) == 1:
if readCounts[0] > minReadsForHighlighting:
color = [highlight]
highlighted.append(sampleNames[0])
else:
color = [normal]
else:
mean = np.mean(readCounts)
sd = np.std(readCounts)
color = []
for readCount, sampleName in zip(readCounts, sampleNames):
if (readCount > (sdMultiple * sd) + mean and
readCount >= minReadsForHighlighting):
color.append(highlight)
highlighted.append(sampleName)
else:
color.append(normal)
nSamples = len(sampleNames)
x = np.arange(nSamples)
yMin = np.zeros(nSamples)
ax.set_xticks([])
ax.set_xlim((-0.5, nSamples - 0.5))
ax.vlines(x, yMin, readCounts, color=color)
if highlighted:
title = '%s\nIn red: %s' % (
pathogenName, fill(', '.join(highlighted), 50))
else:
# Add a newline to keep the first line of each title at the
# same place as those titles that have an "In red:" second
# line.
title = pathogenName + '\n'
ax.set_title(title, fontsize=10)
ax.tick_params(axis='both', which='major', labelsize=8)
ax.tick_params(axis='both', which='minor', labelsize=6)
|
java
|
private Schema read(JsonReader reader, Set<String> knownRecords) throws IOException {
JsonToken token = reader.peek();
switch (token) {
case NULL:
return null;
case STRING: {
// Simple type or know record type
String name = reader.nextString();
if (knownRecords.contains(name)) {
return Schema.recordOf(name);
}
return Schema.of(Schema.Type.valueOf(name.toUpperCase()));
}
case BEGIN_ARRAY:
// Union type
return readUnion(reader, knownRecords);
case BEGIN_OBJECT: {
reader.beginObject();
String name = reader.nextName();
if (!"type".equals(name)) {
throw new IOException("Property \"type\" missing.");
}
Schema.Type schemaType = Schema.Type.valueOf(reader.nextString().toUpperCase());
Schema schema;
switch (schemaType) {
case ENUM:
schema = readEnum(reader);
break;
case ARRAY:
schema = readArray(reader, knownRecords);
break;
case MAP:
schema = readMap(reader, knownRecords);
break;
case RECORD:
schema = readRecord(reader, knownRecords);
break;
default:
schema = Schema.of(schemaType);
}
reader.endObject();
return schema;
}
}
throw new IOException("Malformed schema input.");
}
|
python
|
def get_dim(self, X):
"""
Get the output dimensionality of this basis.
This makes a cheap call to transform with the initial parameter values
to ascertain the dimensionality of the output features.
Parameters
----------
X : ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
Returns
-------
int :
The dimensionality of the basis.
"""
# Cache
if not hasattr(self, '_D'):
self._D = self.transform(X[[0]], *self.params_values()).shape[1]
return self._D
|
java
|
protected GVRVideoSceneObjectPlayer<ExoPlayer> makeExoPlayer(String movieFileName ) {
GVRVideoSceneObjectPlayer<ExoPlayer> gvrVideoSceneObjectPlayer = null;
try {
//final Context context = activityContext;
final Context context = gvrContext.getContext();
final String movieFileNameFinal = movieFileName;
DataSource.Factory dataSourceFactory = new DataSource.Factory() {
@Override
public DataSource createDataSource() {
return new AssetDataSource(context);
}
};
final MediaSource mediaSource = new ExtractorMediaSource(Uri.parse("asset:///" + movieFileName),
dataSourceFactory,
new DefaultExtractorsFactory(), null, null);
final SimpleExoPlayer player = ExoPlayerFactory.newSimpleInstance(context,
new DefaultTrackSelector());
player.prepare(mediaSource);
Log.e(TAG, "Load movie " + movieFileNameFinal + ".");
gvrVideoSceneObjectPlayer = new GVRVideoSceneObjectPlayer<ExoPlayer>() {
@Override
public ExoPlayer getPlayer() {
return player;
}
@Override
public void setSurface(final Surface surface) {
player.addListener(new Player.DefaultEventListener() {
@Override
public void onPlayerStateChanged(boolean playWhenReady, int playbackState) {
switch (playbackState) {
case Player.STATE_BUFFERING:
break;
case Player.STATE_ENDED:
player.seekTo(0);
break;
case Player.STATE_IDLE:
break;
case Player.STATE_READY:
break;
default:
break;
}
}
});
player.setVideoSurface(surface);
}
@Override
public void release() {
player.release();
}
@Override
public boolean canReleaseSurfaceImmediately() {
return false;
}
@Override
public void pause() {
player.setPlayWhenReady(false);
}
@Override
public void start() {
Log.e(TAG, "movie start.");
player.setPlayWhenReady(true);
}
@Override
public boolean isPlaying() {
return player.getPlayWhenReady();
}
};
}
catch (Exception e) {
Log.e(TAG, "Exception makeExoPlayer: " + e);
}
return gvrVideoSceneObjectPlayer;
}
|
python
|
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
num_expired = 0
if by_tags:
for value in event.summary.value:
if value.tag in self.tensors_by_tag:
tag_reservoir = self.tensors_by_tag[value.tag]
num_expired += tag_reservoir.FilterItems(
_NotExpired, _TENSOR_RESERVOIR_KEY)
else:
for tag_reservoir in six.itervalues(self.tensors_by_tag):
num_expired += tag_reservoir.FilterItems(
_NotExpired, _TENSOR_RESERVOIR_KEY)
if num_expired > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, num_expired)
logger.warn(purge_msg)
|
java
|
public NotifictionResultEnum createNotifictionResultEnumFromString(EDataType eDataType, String initialValue) {
NotifictionResultEnum result = NotifictionResultEnum.get(initialValue);
if (result == null)
throw new IllegalArgumentException("The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'");
return result;
}
|
python
|
def _validate_alias_file_content(alias_file_path, url=''):
"""
Make sure the alias name and alias command in the alias file is in valid format.
Args:
The alias file path to import aliases from.
"""
alias_table = get_config_parser()
try:
alias_table.read(alias_file_path)
for alias_name, alias_command in reduce_alias_table(alias_table):
_validate_alias_name(alias_name)
_validate_alias_command(alias_command)
_validate_alias_command_level(alias_name, alias_command)
_validate_pos_args_syntax(alias_name, alias_command)
except Exception as exception: # pylint: disable=broad-except
error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)
error_msg = error_msg.replace(alias_file_path, url or alias_file_path)
raise CLIError(error_msg)
|
java
|
@SuppressWarnings("SameParameterValue")
private static void insertionSort(int[] order, double[] values, int start, int n, int limit) {
for (int i = start + 1; i < n; i++) {
int t = order[i];
double v = values[order[i]];
int m = Math.max(i - limit, start);
for (int j = i; j >= m; j--) {
if (j == 0 || values[order[j - 1]] <= v) {
if (j < i) {
System.arraycopy(order, j, order, j + 1, i - j);
order[j] = t;
}
break;
}
}
}
}
|
java
|
public boolean isInstalled()
throws ManagedSdkVerificationException, ManagedSdkVersionMismatchException {
if (getSdkHome() == null) {
return false;
}
if (!Files.isDirectory(getSdkHome())) {
return false;
}
if (!Files.isRegularFile(getGcloudPath())) {
return false;
}
// Verify the versions match up for fixed version installs
if (version != Version.LATEST) {
try {
String versionFileContents =
new String(Files.readAllBytes(getSdkHome().resolve("VERSION")), StandardCharsets.UTF_8)
.trim();
if (!versionFileContents.equals(version.getVersion())) {
throw new ManagedSdkVersionMismatchException(
"Installed sdk version: "
+ versionFileContents
+ " does not match expected version: "
+ version.getVersion()
+ ".");
}
} catch (IOException ex) {
throw new ManagedSdkVerificationException(ex);
}
}
return true;
}
|
java
|
public String getSubString(long pos, int length) throws SQLException {
if (pos < 1) {
throw ExceptionMapper.getSqlException("position must be >= 1");
}
if (length < 0) {
throw ExceptionMapper.getSqlException("length must be > 0");
}
try {
String val = toString();
return val.substring((int) pos - 1, Math.min((int) pos - 1 + length, val.length()));
} catch (Exception e) {
throw new SQLException(e);
}
}
|
java
|
public static long[] random(int card, int capacity, Random random) {
if(card < 0 || card > capacity) {
throw new IllegalArgumentException("Cannot set " + card + " out of " + capacity + " bits.");
}
// FIXME: Avoid recomputing the cardinality.
if(card < capacity >>> 1) {
long[] bitset = BitsUtil.zero(capacity);
for(int todo = card; todo > 0; //
todo = (todo == 1) ? (card - cardinality(bitset)) : (todo - 1)) {
setI(bitset, random.nextInt(capacity));
}
return bitset;
}
else {
long[] bitset = BitsUtil.ones(capacity);
for(int todo = capacity - card; todo > 0; //
todo = (todo == 1) ? (cardinality(bitset) - card) : (todo - 1)) {
clearI(bitset, random.nextInt(capacity));
}
return bitset;
}
}
|
java
|
public void delete(long msgId) {
String url = WxEndpoint.get("url.mass.message.delete");
String request = "{\"msg_id\":%s}";
logger.debug("delete message: {}", msgId);
wxClient.post(url, String.format(request, msgId));
}
|
python
|
def _derZ(self,x,y,z):
'''
Returns the first derivative of the function with respect to Z at each
value in (x,y,z). Only called internally by HARKinterpolator3D._derZ.
'''
m = len(x)
temp = np.zeros((m,self.funcCount))
for j in range(self.funcCount):
temp[:,j] = self.functions[j](x,y,z)
temp[np.isnan(temp)] = np.inf
i = np.argmin(temp,axis=1)
y = temp[np.arange(m),i]
dfdz = np.zeros_like(x)
for j in range(self.funcCount):
c = i == j
dfdz[c] = self.functions[j].derivativeZ(x[c],y[c],z[c])
return dfdz
|
python
|
def to_series(self, only=None,
intensive_columns=["temperature", "pressure"],
check_data=True):
"""
Produce a data record for `ChemicalEquation`.
All possible linear differences for all numeric attributes are computed
and stored in the returned `pandas.Series` object (see examples below).
This allows for easy application and manipulation of
`Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical
equations (see examples below).
Parameters
----------
only : ``"reactants"``, ``"products"``, optional
Instead of the standard behaviour (difference of sums), sum numeric
attributes of either reactants or products only. If given, absolute
coefficients are used.
intensive_columns : iterable of `str`, optional
A set of column names representing intensive properties (e.g. bulk
properties) whose values are not summable. Those must be constant
throughout the chemical equation.
check_data : `bool`, optional
Whether to check data object for inconsistencies.
Returns
-------
series : `pandas.Series`
Data record of attribute differences, whose name is the canonical
string representation of the `ChemicalEquation` or, if `only` is
given, a string representing either reactants or products (see
examples below).
Raises
------
ValueError
Raised if `self.data` wasn't defined (e.g. is `None`), if `only`
is something other than ``"reactants"`` or ``"products"``, or if
two or more distinct values for an intensive property have been
found.
Examples
--------
>>> from pyrrole import ChemicalEquation
>>> from pyrrole.atoms import create_data, read_cclib
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out",
... "AcOH(g)"),
... read_cclib("data/acetate/[email protected]",
... "AcOH(aq)"))
>>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)",
... data)
>>> equilibrium.to_series()
charge 0.000000
enthalpy -0.010958
entropy -0.000198
freeenergy -0.010759
mult 0.000000
natom 0.000000
nbasis 0.000000
nmo 0.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g) <=> AcOH(aq), dtype: float64
Sums of either reactants or products can be computed:
>>> equilibrium.to_series("reactants")
charge 0.000000
enthalpy -228.533374
entropy 0.031135
freeenergy -228.564509
mult 1.000000
natom 8.000000
nbasis 68.000000
nmo 68.000000
pressure 1.000000
temperature 298.150000
Name: AcOH(g), dtype: float64
"""
if self.data is None:
# TODO: should an empty Series be returned?
raise ValueError("data not defined")
# TODO: find a way to keep categorical columns. Keep if they match?
columns = self.data.select_dtypes('number').columns
intensive_columns = [column for column in columns
if column in intensive_columns]
extensive_columns = [column for column in columns
if column not in intensive_columns]
columns = extensive_columns + intensive_columns
if only is None:
species = self.species
elif only == "reactants":
species = sorted(self.reactants)
elif only == "products":
species = sorted(self.products)
else:
raise ValueError("only must be either 'reactants' or 'products' "
"('{}' given)".format(only))
if check_data:
_check_data(self.data.loc[species])
if all([s in self.data.index for s in species]):
series = (self.data.loc[species, extensive_columns]
.mul(self.coefficient, axis="index").sum("index"))
for column in intensive_columns:
vals = self.data[column].unique()
if len(vals) > 1:
raise ValueError("different values for {}: "
"{}".format(column, vals))
series[column] = vals[0]
else:
series = _pd.Series(_np.nan, index=columns)
if only is None:
name = self.__str__()
else:
coefficients = self.coefficient[species]
name = _get_chemical_equation_piece(species, coefficients)
if only == "reactants":
series[extensive_columns] = -series[extensive_columns]
# Avoid negative zero
# (see https://stackoverflow.com/a/11010791/4039050)
series = series + 0.
return series.rename(name)
|
java
|
public void cleanupParams(int size, long interval) {
timer.cancel();
timer.schedule(new Cleanup(content,size), interval, interval);
}
|
python
|
def run(self, command):
"""
Runs a command as if from the command-line
without the need for using popen or subprocess
"""
if isinstance(command, basestring):
command = command.split()
else:
command = list(command)
self.external.omero_cli(command)
|
java
|
public static String replaceAll(final String input, final Pattern regex,
final Function<MatchResult, String> replacementFunction) {
final StringBuffer output = new StringBuffer();
final Matcher matcher = regex.matcher(input);
while (matcher.find()) {
final MatchResult match = matcher.toMatchResult();
final String replacement = replacementFunction.apply(match);
if (replacement == null) {
throw new IllegalArgumentException(
String.format("Replacement function returned null for match %s", match.group()));
}
if (!replacement.equals(match.group())) {
matcher.appendReplacement(output, replacement);
}
}
matcher.appendTail(output);
return output.toString();
}
|
java
|
public GenericUtilities.TypeCategory getTypeCategory() {
if (hasParameters() && variable == null && extension == null) {
return GenericUtilities.TypeCategory.PARAMETERIZED;
} else if (!hasParameters() && variable != null && extension == null) {
if ("*".equals(variable)) {
return GenericUtilities.TypeCategory.WILDCARD;
} else {
return GenericUtilities.TypeCategory.TYPE_VARIABLE;
}
} else if (!hasParameters() && variable != null && extension != null) {
if ("+".equals(variable)) {
return GenericUtilities.TypeCategory.WILDCARD_EXTENDS;
} else if ("-".equals(variable)) {
return GenericUtilities.TypeCategory.WILDCARD_SUPER;
}
}
// this should never happen
throw new IllegalStateException("The Generic Object Type is badly initialized");
}
|
java
|
public static CmsInfoButton getJavaStatisticButton(CmsMemoryStatus memory) {
Map<String, String> infoMap = new LinkedHashMap<String, String>();
infoMap.put(
CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_JAVA_HEAP_MAX_0),
CmsFileUtil.formatFilesize(
memory.getMaxMemory() * 1048576,
A_CmsUI.getCmsObject().getRequestContext().getLocale()));
infoMap.put(
CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_JAVA_HEAP_TOTAL_0),
CmsFileUtil.formatFilesize(
memory.getTotalMemory() * 1048576,
A_CmsUI.getCmsObject().getRequestContext().getLocale()));
infoMap.put(
CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_JAVA_HEAP_USED_0),
CmsFileUtil.formatFilesize(
memory.getUsedMemory() * 1048576,
A_CmsUI.getCmsObject().getRequestContext().getLocale()));
infoMap.put(
CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_JAVA_HEAP_FREE_0),
CmsFileUtil.formatFilesize(
memory.getFreeMemory() * 1048576,
A_CmsUI.getCmsObject().getRequestContext().getLocale()));
CmsInfoButton info = new CmsInfoButton(infoMap);
VerticalLayout prog = new VerticalLayout();
Label label = new Label();
label.setContentMode(ContentMode.HTML);
label.setValue(
"<p>" + CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_FLEXCACHE_LABEL_MEMORY_BLOCK_0) + "</p>");
prog.addComponent(label);
prog.addComponent(getProgressBar((((float)memory.getUsage() / 100))));
info.addAdditionalElement(prog, 0);
info.setWindowCaption(CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_FLEX_0));
info.setDescription(CmsVaadinUtils.getMessageText(Messages.GUI_CACHE_FLEX_0));
return info;
}
|
python
|
def origin_displacement(self):
"""
planar distance of start point from self.location along :math:`-Z` axis
"""
return self.start_point.sub(self.location.origin).dot(-self.location.zDir)
|
python
|
def _get_api_url(self, secure=None, **formatters):
'''Constructs Postmark API url
:param secure: Use the https Postmark API.
:param \*\*formatters: :func:`string.format` keyword arguments to
format the url with.
:rtype: Postmark API url
'''
if self.endpoint is None:
raise NotImplementedError('endpoint must be defined on a subclass')
if secure is None:
secure = self.secure
if secure:
api_url = POSTMARK_API_URL_SECURE
else:
api_url = POSTMARK_API_URL
url = urljoin(api_url, self.endpoint)
if formatters:
url = url.format(**formatters)
return url
|
python
|
def on_use_runtime_value_toggled(self, widget, path):
"""Try to set the use runtime value flag to the newly entered one
"""
try:
data_port_id = self.list_store[path][self.ID_STORAGE_ID]
self.toggle_runtime_value_usage(data_port_id)
except TypeError as e:
logger.exception("Error while trying to change the use_runtime_value flag")
|
python
|
def add_input(self, input):
'''
Add a single build XML output file to our data.
'''
events = xml.dom.pulldom.parse(input)
context = []
for (event,node) in events:
if event == xml.dom.pulldom.START_ELEMENT:
context.append(node)
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
x_f = self.x_name_(*context)
if x_f:
events.expandNode(node)
# expanding eats the end element, hence walking us out one level
context.pop()
# call handler
(x_f[1])(node)
elif event == xml.dom.pulldom.END_ELEMENT:
context.pop()
|
java
|
public List<Object> getClauseValue(Parameter param) {
Parameter match = null;
if (typedParameter != null && typedParameter.jpaParameters != null) {
for (Parameter p : typedParameter.jpaParameters) {
if (p.equals(param)) {
match = p;
if (typedParameter.getType().equals(Type.NAMED)) {
List<FilterClause> clauses = typedParameter.getParameters().get(":" + p.getName());
if (clauses != null) {
return clauses.get(0).getValue();
}
} else {
List<FilterClause> clauses = typedParameter.getParameters().get("?" + p.getPosition());
if (clauses != null) {
return clauses.get(0).getValue();
} else {
UpdateClause updateClause = typedParameter.getUpdateParameters().get("?" + p.getPosition());
if (updateClause != null) {
List<Object> value = new ArrayList<Object>();
value.add(updateClause.getValue());
return value;
}
}
}
break;
}
}
if (match == null) {
throw new IllegalArgumentException("parameter is not a parameter of the query");
}
}
logger.error("parameter{} is not a parameter of the query", param);
throw new IllegalArgumentException("parameter is not a parameter of the query");
}
|
java
|
public Config loadConfig(final @Nullable String confname, final String rootPath, final boolean reset) {
final Config config = loadConfig(confname, rootPath);
if (reset) COREUTILS_CONTEXT.eventBus().post(new ConfigurationChangedEvent(config));
return config;
}
|
java
|
private void setAccessibleSafely(AccessibleObject classMember)
{
try {
// We do this in a try/catch to avoid false positives caused by existing SecurityManagers.
classMember.setAccessible(true);
} catch (SecurityException se) {
logger.debug("Failed to call setAccessible for [{}]", classMember.toString(), se);
}
}
|
python
|
def _dump_inline_table(section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
retval = ""
if isinstance(section, dict):
val_list = []
for k, v in section.items():
val = _dump_inline_table(v)
val_list.append(k + " = " + val)
retval += "{ " + ", ".join(val_list) + " }\n"
return retval
else:
return str(_dump_value(section))
|
python
|
def pack_command(self, *args):
"Pack a series of arguments into a value Redis command"
args_output = SYM_EMPTY.join([
SYM_EMPTY.join(
(SYM_DOLLAR, str(len(k)).encode(), SYM_CRLF, k, SYM_CRLF))
for k in imap(self.encoder.encode, args)])
output = SYM_EMPTY.join(
(SYM_STAR, str(len(args)).encode(), SYM_CRLF, args_output))
return output
|
java
|
@Override
public final void copyStartElement(InputElementStack elemStack, AttributeCollector ac)
throws IOException, XMLStreamException
{
/* In case of repairing stream writer, we can actually just
* go ahead and first output the element: stream writer should
* be able to resolve namespace mapping for the element
* automatically, as necessary.
*/
String prefix = elemStack.getPrefix();
String uri = elemStack.getNsURI();
writeStartElement(prefix, elemStack.getLocalName(), uri);
/* 04-Sep-2006, TSa: Although we could really just ignore all
* namespace declarations, some apps prefer (or even expect...)
* that ns bindings are preserved as much as possible. So, let's
* just try to output them as they are (could optimize and skip
* ones related to the start element [same prefix or URI], but
* for now let's not bother)
*/
int nsCount = elemStack.getCurrentNsCount();
if (nsCount > 0) { // yup, got some...
for (int i = 0; i < nsCount; ++i) {
writeNamespace(elemStack.getLocalNsPrefix(i), elemStack.getLocalNsURI(i));
}
}
/* And then let's just output attributes, if any (whether to copy
* implicit, aka "default" attributes, is configurable)
*/
int attrCount = mCfgCopyDefaultAttrs ? ac.getCount() : ac.getSpecifiedCount();
/* Unlike in non-ns and simple-ns modes, we can not simply literally
* copy the attributes here. It is possible that some namespace
* prefixes have been remapped... so need to be bit more careful.
*/
if (attrCount > 0) {
for (int i = 0; i < attrCount; ++i) {
// First; need to make sure that the prefix-to-ns mapping
// attribute has is valid... and can not output anything
// before that's done (since remapping will output a namespace
// declaration!)
uri = ac.getURI(i);
prefix = ac.getPrefix(i);
// With attributes, missing/empty prefix always means 'no
// namespace', can take a shortcut:
if (prefix == null || prefix.length() == 0) {
;
} else {
// and otherwise we'll always have a prefix as attributes
// can not make use of the def. namespace...
prefix = findOrCreateAttrPrefix(prefix, uri, mCurrElem);
}
/* Hmmh. Since the prefix we use may be different from what
* collector has, we can not use pass-through method of
* the collector, but need to call XmlWriter directly:
*/
if (prefix == null || prefix.length() == 0) {
mWriter.writeAttribute(ac.getLocalName(i), ac.getValue(i));
} else {
mWriter.writeAttribute(prefix, ac.getLocalName(i), ac.getValue(i));
}
}
}
}
|
python
|
def reelect_app(self, request, app):
"""tries to connect to the same app on differnet host from dist-info"""
# disconnect app explicitly to break possibly existing connection
app.disconnect()
endpoints_size = len(app.locator.endpoints)
# try x times, where x is the number of different endpoints in app locator.
for _ in xrange(0, endpoints_size + 1):
# last chance to take app from common pool
if len(app.locator.endpoints) == 0:
request.logger.info(
"giving up on connecting to dist-info hosts, falling back to common pool processing")
app = yield self.proxy.reelect_app(request, app)
raise gen.Return(app)
try:
# always create new locator to prevent locking as we do connect with timeout
# however lock can be still held during TCP timeout
locator = Locator(endpoints=app.locator.endpoints)
request.logger.info("connecting to locator %s", locator.endpoints[0])
# first try to connect to locator only on remote host with timeout
yield gen.with_timeout(self.service_connect_timeout, locator.connect())
request.logger.debug("connected to locator %s for %s", locator.endpoints[0], app.name)
app = Service(app.name, locator=locator, timeout=RESOLVE_TIMEOUT)
# try to resolve and connect to application itself
yield gen.with_timeout(self.service_connect_timeout, app.connect())
request.logger.debug("connected to application %s via %s", app.name, app.endpoints)
except gen.TimeoutError:
# on timeout try next endpoint first
request.logger.warning("timed out while connecting to application")
continue
except ServiceError as err:
request.logger.warning("got error while resolving app - %s", err)
if err.category in LOCATORCATEGORY and err.code == ESERVICENOTAVAILABLE:
# if the application is down - also try next endpoint
continue
else:
raise err
finally:
# drop first endpoint to start next connection from different endpoint
# we do this, as default logic of connection attempts in locator do not fit here
app.locator.endpoints = app.locator.endpoints[1:]
# return connected app
raise gen.Return(app)
raise PluginApplicationError(42, 42, "could not connect to application")
|
java
|
protected void reportKeyGroupOfElementAtIndex(int index, int keyGroup) {
final int keyGroupIndex = keyGroup - firstKeyGroup;
elementKeyGroups[index] = keyGroupIndex;
++counterHistogram[keyGroupIndex];
}
|
python
|
def confd_state_internal_callpoints_typepoint_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
callpoints = ET.SubElement(internal, "callpoints")
typepoint = ET.SubElement(callpoints, "typepoint")
id = ET.SubElement(typepoint, "id")
id.text = kwargs.pop('id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
java
|
public int getAttributeNode(int nodeHandle, String namespaceURI, String name) {
int nsIndex = m_nsNames.stringToIndex(namespaceURI),
nameIndex = m_localNames.stringToIndex(name);
nodeHandle &= NODEHANDLE_MASK;
nodes.readSlot(nodeHandle, gotslot);
short type = (short) (gotslot[0] & 0xFFFF);
// If nodeHandle points to element next slot would be first attribute
if (type == ELEMENT_NODE)
nodeHandle++;
// Iterate through Attribute Nodes
while (type == ATTRIBUTE_NODE) {
if ((nsIndex == (gotslot[0] << 16)) && (gotslot[3] == nameIndex))
return nodeHandle | m_docHandle;
// Goto next sibling
nodeHandle = gotslot[2];
nodes.readSlot(nodeHandle, gotslot);
}
return NULL;
}
|
java
|
static <R> BiPredicate<R, Throwable> resultPredicateFor(Predicate<R> resultPredicate) {
return (t, u) -> {
if (u == null) {
return resultPredicate.test(t);
} else {
// resultPredicate is only defined over the success type.
// It doesn't know how to handle a failure of type Throwable,
// so we return false here.
return false;
}
};
}
|
java
|
private ArrayList<Range<T>> subtractContained(Range<T> contained)
{
ArrayList<Range<T>> difference = new ArrayList<Range<T>>(2);
if (!left.equals(contained.left))
difference.add(new Range<T>(left, contained.left, partitioner));
if (!right.equals(contained.right))
difference.add(new Range<T>(contained.right, right, partitioner));
return difference;
}
|
python
|
def parse_value(self, value_string: str):
"""
Parses the amount string.
"""
self.value = Decimal(value_string)
return self.value
|
java
|
public static final <T extends Calendar> Function<T, LocalDate> calendarToLocalDate(Chronology chronology) {
return FnLocalDate.calendarToLocalDate(chronology);
}
|
java
|
private void setIdValue(Object target, JsonNode idValue) throws IllegalAccessException {
Field idField = configuration.getIdField(target.getClass());
ResourceIdHandler idHandler = configuration.getIdHandler(target.getClass());
if (idValue != null) {
idField.set(target, idHandler.fromString(idValue.asText()));
}
}
|
java
|
protected void removeSequence(String sequenceName)
{
// lookup the sequence map for calling DB
Map mapForDB = (Map) sequencesDBMap.get(getBrokerForClass()
.serviceConnectionManager().getConnectionDescriptor().getJcdAlias());
if(mapForDB != null)
{
synchronized(SequenceManagerHighLowImpl.class)
{
mapForDB.remove(sequenceName);
}
}
}
|
java
|
static public Set<String> getDescendantPathNames(File dir, boolean includeDirectories) {
Set<String> paths = new HashSet<String>();
if( dir.exists() && dir.isDirectory() ) {
String[] names = dir.list();
for(String name : names){
File child = new File(dir,name);
getPathNames(child, paths, null, includeDirectories);
}
}
return paths;
}
|
python
|
def make(self):
"""
Make the lock file.
"""
try:
# Create the lock file
self.mkfile(self.lock_file)
except Exception as e:
self.die('Failed to generate lock file: {}'.format(str(e)))
|
python
|
def output(*args, **kwargs):
'''Analog of print() but with an indent option
'''
indent = kwargs.pop('indent', 0)
sep = kwargs.pop('sep', None)
kwargs['sep'] = u'' # Sanity
if sep is None:
sep = u' '
indent_str = u' ' * (indent * tab_width)
text = sep.join(map(str_, args))
color = kwargs.pop('color', None)
if color:
color.bright = kwargs.pop('bright', None)
text = ColorText(text, color)
print(indent_str + text, **kwargs)
|
java
|
@Override
public T data(Object... values) {
if (values == null || values.length == 0) {
return (T) this;
}
if (this.data == null) {
if (this.type == AxisType.category) {
data = new ArrayList<Object>();
} else {
throw new RuntimeException("数据轴不能添加类目信息!");
}
}
this.data.addAll(Arrays.asList(values));
return (T) this;
}
|
java
|
public void setLandmarks(java.util.Collection<Landmark> landmarks) {
if (landmarks == null) {
this.landmarks = null;
return;
}
this.landmarks = new java.util.ArrayList<Landmark>(landmarks);
}
|
java
|
private static int firstIslamicStartYearFromGrego(int year) {
// ad hoc conversion, improve under #10752
// rough est for now, ok for grego 1846-2138,
// otherwise occasionally wrong (for 3% of years)
int cycle, offset, shift = 0;
if (year >= 1977) {
cycle = (year - 1977) / 65;
offset = (year - 1977) % 65;
shift = 2*cycle + ((offset >= 32)? 1: 0);
} else {
cycle = (year - 1976) / 65 - 1;
offset = -(year - 1976) % 65;
shift = 2*cycle + ((offset <= 32)? 1: 0);
}
return year - 579 + shift;
}
|
python
|
def bind(cls, origin, handler, *, name=None):
"""Bind this object to the given origin and handler.
:param origin: An instance of `Origin`.
:param handler: An instance of `bones.HandlerAPI`.
:return: A subclass of this class.
"""
name = cls.__name__ if name is None else name
attrs = {
"_origin": origin, "_handler": handler,
"__module__": "origin", # Could do better?
}
return type(name, (cls,), attrs)
|
java
|
@Override
public void setOverriding(boolean b)
{
if (this.keyHook != null)
{
this.keyHook.isDown = false;
this.keyHook.justPressed = false;
this.keyHook.isOverridingPresses = b;
}
}
|
python
|
def delete(self, tag, ref):
"""Delete from the vgroup the member identified by its tag
and reference number.
Args::
tag tag of the member to delete
ref reference number of the member to delete
Returns::
None
Only the link of the member with the vgroup is deleted.
The member object is not deleted.
C library equivalent : Vdeletatagref
"""
_checkErr('delete', _C.Vdeletetagref(self._id, tag, ref),
"error deleting member")
|
java
|
@Override protected void initGraphics() {
super.initGraphics();
mouseEventHandler = e -> {
final EventType TYPE = e.getEventType();
if (MouseEvent.MOUSE_PRESSED == TYPE) {
tile.setActive(!tile.isActive());
tile.fireEvent(SWITCH_PRESSED);
} else if(MouseEvent.MOUSE_RELEASED == TYPE) {
tile.fireEvent(SWITCH_RELEASED);
}
};
selectedListener = o -> moveThumb();
timeline = new Timeline();
titleText = new Text();
titleText.setFill(tile.getTitleColor());
Helper.enableNode(titleText, !tile.getTitle().isEmpty());
text = new Text(tile.getText());
text.setFill(tile.getUnitColor());
Helper.enableNode(text, tile.isTextVisible());
description = new Label(tile.getDescription());
description.setAlignment(tile.getDescriptionAlignment());
description.setWrapText(true);
description.setTextFill(tile.getTextColor());
Helper.enableNode(description, !tile.getDescription().isEmpty());
switchBorder = new Rectangle();
switchBackground = new Rectangle();
switchBackground.setMouseTransparent(true);
switchBackground.setFill(tile.isActive() ? tile.getActiveColor() : tile.getBackgroundColor());
thumb = new Circle();
thumb.setMouseTransparent(true);
thumb.setEffect(shadow);
getPane().getChildren().addAll(titleText, text, description, switchBorder, switchBackground, thumb);
}
|
java
|
public void animateContentViewTranslationX(float x, float alpha,
Animator.AnimatorListener listener,
int duration) {
if (DBG) Log.v(TAG, "animateContentViewTranslationX - " +
"x=" + x + ", alpha=" + alpha);
mContentView.animate().cancel();
mContentView.animate()
.alpha(alpha)
.translationX(x)
.setListener(listener)
.setDuration(duration)
.start();
}
|
python
|
def _bind_model(self):
"""
This method binds the various model objects together in the correct hierarchy
and adds referneces to this database object in the groups.
"""
if self.groups[0].level != 0:
self.log.info("Got invalid first group: {0}".format(self.groups[0]))
raise ValueError("Invalid group tree: first group must have level of 0 (got {0})".format(self.groups[0].level))
# The KeePassX source code maintains that first group to have incremented
# level is a child of the previous group with a lower level.
#
# [R]
# | A (1)
# +-| B (2)
# | | C (2)
# | D (1)
# +-| E (2)
# | F (2)
# +-| G (3)
# | H (3)
# | I (3)
#
class Stack(list):
""" A class to make parsing code slightly more semantic. """
def push(self, el):
self.append(el)
# This is a different parsing approach than taken by KeePassX (or other python
# libs), but seems a little more intuitive. It could be further simplified
# by noting that current_parent is always parent_stack[-1], but this is a bit
# more readable.
parent_stack = Stack([self.root])
current_parent = self.root
prev_group = None
for g in self.groups:
g.db = self # Bind database to group objects
if prev_group is not None: # first iteration is exceptional
if g.level > prev_group.level: # Always true for iteration 1 since root has level of -1
# Dropping down a level; the previous group is the parent
current_parent = prev_group
parent_stack.push(current_parent)
elif g.level < prev_group.level:
# Pop off parents until we have a parent with a level that is less than current
while g.level <= current_parent.level:
current_parent = parent_stack.pop()
parent_stack.push(current_parent) # We want to make sure that the top of the stack always matches current parent
# bi-directional child-parent binding
g.parent = current_parent
current_parent.children.append(g)
prev_group = g
# Bind group objects to entries
for entry in self.entries:
for group in self.groups:
if entry.group_id == group.id:
group.entries.append(entry)
entry.group = group
break
else:
# KeePassX adds these to the first group (i.e. root.children[0])
raise NotImplementedError("Orphaned entries not (yet) supported.")
|
python
|
def _generic_model(self, z3_model):
"""
Converts a Z3 model to a name->primitive dict.
"""
model = { }
for m_f in z3_model:
n = _z3_decl_name_str(m_f.ctx.ctx, m_f.ast).decode()
m = m_f()
me = z3_model.eval(m)
model[n] = self._abstract_to_primitive(me.ctx.ctx, me.ast)
return model
|
java
|
protected void process(DBIDRef id, ArrayDBIDs ids, DBIDArrayIter it, int n, WritableDBIDDataStore pi, WritableDoubleDataStore lambda, WritableDoubleDataStore m) {
slinkstep3(id, it, n, pi, lambda, m);
slinkstep4(id, it, n, pi, lambda);
}
|
python
|
def publish_workflow_submission(self,
user_id,
workflow_id_or_name,
parameters):
"""Publish workflow submission parameters."""
msg = {
"user": user_id,
"workflow_id_or_name": workflow_id_or_name,
"parameters": parameters
}
self._publish(msg)
|
java
|
public static void addHeaders(BoundRequestBuilder builder,
Map<String, String> headerMap) {
for (Entry<String, String> entry : headerMap.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
builder.addHeader(name, value);
}
}
|
python
|
async def quit(self):
"""
Sends a SMTP 'QUIT' command. - Ends the session.
For further details, please check out `RFC 5321 § 4.1.1.10`_.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response. If the connection is already closed when calling this
method, returns (-1, None).
.. _`RFC 5321 § 4.1.1.10`: https://tools.ietf.org/html/rfc5321#section-4.1.1.10
"""
code = -1
message = None
try:
code, message = await self.do_cmd("QUIT")
except ConnectionError:
# We voluntarily ignore this kind of exceptions since... the
# connection seems already closed.
pass
except SMTPCommandFailedError:
pass
await self.close()
return code, message
|
python
|
def join(
self,
inner_enumerable,
outer_key=lambda x: x,
inner_key=lambda x: x,
result_func=lambda x: x
):
"""
Return enumerable of inner equi-join between two enumerables
:param inner_enumerable: inner enumerable to join to self
:param outer_key: key selector of outer enumerable as lambda expression
:param inner_key: key selector of inner enumerable as lambda expression
:param result_func: lambda expression to transform result of join
:return: new Enumerable object
"""
if not isinstance(inner_enumerable, Enumerable3):
raise TypeError(
u"inner_enumerable parameter must be an instance of Enumerable"
)
return Enumerable3(
itertools.product(
filter(
lambda x: outer_key(x) in map(inner_key, inner_enumerable),
self
),
filter(
lambda y: inner_key(y) in map(outer_key, self),
inner_enumerable
)
)
)\
.where(lambda x: outer_key(x[0]) == inner_key(x[1]))\
.select(result_func)
|
java
|
public void createErrorMessage(String errorMessage) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
pw.println(errorMessage);
pw.close();
messager.printMessage(Diagnostic.Kind.ERROR,
sw.toString());
}
|
python
|
def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer):
"""Convert an SimpleRNN layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
hidden_size = keras_layer.output_dim
input_size = keras_layer.input_shape[-1]
output_all = keras_layer.return_sequences
reverse_input = keras_layer.go_backwards
if keras_layer.consume_less not in ['cpu', 'gpu']:
raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less)
W_h = np.zeros((hidden_size, hidden_size))
W_x = np.zeros((hidden_size, input_size))
b = np.zeros((hidden_size,))
if keras_layer.consume_less == 'cpu':
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2]
else:
W_h = keras_layer.get_weights()[1].T
W_x = keras_layer.get_weights()[0].T
b = keras_layer.get_weights()[2]
# Set actication type
activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation)
# Add to the network
builder.add_simple_rnn(
name = layer,
W_h = W_h, W_x = W_x, b = b,
hidden_size = hidden_size,
input_size = input_size,
activation = activation_str,
input_names = input_names,
output_names = output_names,
output_all=output_all,
reverse_input=reverse_input)
|
java
|
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldDescriptorsForValues(
double tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return usingConfig(
config.usingDoubleToleranceForFieldDescriptors(
tolerance, asList(firstFieldDescriptor, rest)));
}
|
java
|
private List<Bean> findReferences(BeanId reference, Collection<Bean> predecessors,
ArrayList<Bean> matches, ArrayList<Bean> checked) {
for (Bean predecessor : predecessors) {
findReferences(reference, predecessor, matches, checked);
}
return matches;
}
|
python
|
def compose(*decorators):
"""Helper to compose decorators::
@a
@b
def f():
pass
Is equivalent to::
@compose(a, b)
def f():
...
"""
def composed(f):
for decor in reversed(decorators):
f = decor(f)
return f
return composed
|
java
|
private static void deleteCacheFile(final File cacheFile) {
try {
if (cacheFile.delete()) {
LOG.warn(String.format(MSG_CACHE_FILE_IS_DAMAGED_AND_DELETED, cacheFile.getPath()));
} else {
LOG.warn(String.format(MSG_CACHE_FILE_IS_DAMAGED, cacheFile.getPath()));
}
} catch (final Exception e) {
LOG.warn(String.format(MSG_CACHE_FILE_IS_DAMAGED, cacheFile.getPath()));
}
}
|
python
|
def vhosts():
'''
Show the settings as parsed from the config file (currently
only shows the virtualhost settings) (``apachectl -S``).
Because each additional virtual host adds to the execution
time, this command may require a long timeout be specified
by using ``-t 10``.
CLI Example:
.. code-block:: bash
salt -t 10 '*' apache.vhosts
'''
cmd = '{0} -S'.format(_detect_os())
ret = {}
namevhost = ''
out = __salt__['cmd.run'](cmd)
for line in out.splitlines():
if not line:
continue
comps = line.split()
if 'is a NameVirtualHost' in line:
namevhost = comps[0]
ret[namevhost] = {}
else:
if comps[0] == 'default':
ret[namevhost]['default'] = {}
ret[namevhost]['default']['vhost'] = comps[2]
ret[namevhost]['default']['conf'] = re.sub(
r'\(|\)',
'',
comps[3]
)
if comps[0] == 'port':
ret[namevhost][comps[3]] = {}
ret[namevhost][comps[3]]['vhost'] = comps[3]
ret[namevhost][comps[3]]['conf'] = re.sub(
r'\(|\)',
'',
comps[4]
)
ret[namevhost][comps[3]]['port'] = comps[1]
return ret
|
python
|
def set_editor_ids_order(self, ordered_editor_ids):
"""
Order the root file items in the Outline Explorer following the
provided list of editor ids.
"""
if self.ordered_editor_ids != ordered_editor_ids:
self.ordered_editor_ids = ordered_editor_ids
if self.sort_files_alphabetically is False:
self.__sort_toplevel_items()
|
java
|
public void close() {
for (Entry<Object, Object> singletons : singletonCache.entrySet()) {
Object key = singletons.getKey();
Object obj = singletons.getValue();
if (key instanceof BeanBox) {
BeanBox box = (BeanBox) key;
if (box.getPreDestroy() != null)
try {
box.getPreDestroy().invoke(obj);
} catch (Exception e) {
// Eat it here, but usually need log it
}
}
}
bindCache.clear();
beanBoxMetaCache.clear();
singletonCache.clear();
}
|
python
|
def NS(s, o):
"""
Nash Sutcliffe efficiency coefficient
input:
s: simulated
o: observed
output:
ns: Nash Sutcliffe efficient coefficient
"""
# s,o = filter_nan(s,o)
return 1 - np.sum((s-o)**2)/np.sum((o-np.mean(o))**2)
|
java
|
@Deprecated
public Report setDeviceInfo(String key, Object value) {
diagnostics.device.put(key, value);
return this;
}
|
java
|
Element getElementById(String name) {
for (Attr attr : attributes) {
if (attr.isId() && name.equals(attr.getValue())) {
return this;
}
}
/*
* TODO: Remove this behavior.
* The spec explicitly says that this is a bad idea. From
* Document.getElementById(): "Attributes with the name "ID"
* or "id" are not of type ID unless so defined.
*/
if (name.equals(getAttribute("id"))) {
return this;
}
for (NodeImpl node : children) {
if (node.getNodeType() == Node.ELEMENT_NODE) {
Element element = ((ElementImpl) node).getElementById(name);
if (element != null) {
return element;
}
}
}
return null;
}
|
java
|
public Expression<Float> eq(float value) {
String valueString = "'" + value + "'";
return new Expression<Float>(this, Operation.eq, valueString);
}
|
java
|
public <V> void setColumnValue(T instance, String columnName, V value) {
Field field = fields.get(columnName);
if (field == null) {
throw new IllegalArgumentException("Column not found: "
+ columnName);
}
try {
field.set(instance, value);
} catch (IllegalAccessException e) {
throw new RuntimeException(e); // should never get here
}
}
|
python
|
def _fit_strel_to_im_2d(im, strel, r, x, y):
r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit.
"""
elem = strel.copy()
x_dim, y_dim = im.shape
x_min = x-r
x_max = x+r+1
y_min = y-r
y_max = y+r+1
if x_min < 0:
x_adj = -x_min
elem = elem[x_adj:, :]
x_min = 0
elif x_max > x_dim:
x_adj = x_max - x_dim
elem = elem[:-x_adj, :]
if y_min < 0:
y_adj = -y_min
elem = elem[:, y_adj:]
y_min = 0
elif y_max > y_dim:
y_adj = y_max - y_dim
elem = elem[:, :-y_adj]
ex, ey = elem.shape
im[x_min:x_min+ex, y_min:y_min+ey] += elem
return im
|
java
|
private void processFiles() throws CompilationFailedException, IOException, URISyntaxException {
GroovyShell groovy = new GroovyShell(conf);
setupContextClassLoader(groovy);
Script s = groovy.parse(getScriptSource(isScriptFile, script));
if (args.isEmpty()) {
try(BufferedReader reader = new BufferedReader(new InputStreamReader(System.in))) {
PrintWriter writer = new PrintWriter(System.out);
processReader(s, reader, writer);
writer.flush();
}
} else {
Iterator i = args.iterator();
while (i.hasNext()) {
String filename = (String) i.next();
//TODO: These are the arguments for -p and -i. Why are we searching using Groovy script extensions?
// Where is this documented?
File file = huntForTheScriptFile(filename);
processFile(s, file);
}
}
}
|
java
|
private int generateCrc(byte[] data, int off, int len, RecordType type) {
Crc32c crc = new Crc32c();
crc.update(type.value());
crc.update(data, off, len);
return (int) LevelDbConstants.maskCrc(crc.getValue());
}
|
java
|
@Override
public List<CPDefinitionSpecificationOptionValue> findByCPSpecificationOptionId(
long CPSpecificationOptionId) {
return findByCPSpecificationOptionId(CPSpecificationOptionId,
QueryUtil.ALL_POS, QueryUtil.ALL_POS, null);
}
|
python
|
def _fulfillment_from_details(data, _depth=0):
"""Load a fulfillment for a signing spec dictionary
Args:
data: tx.output[].condition.details dictionary
"""
if _depth == 100:
raise ThresholdTooDeep()
if data['type'] == 'ed25519-sha-256':
public_key = base58.b58decode(data['public_key'])
return Ed25519Sha256(public_key=public_key)
if data['type'] == 'threshold-sha-256':
threshold = ThresholdSha256(data['threshold'])
for cond in data['subconditions']:
cond = _fulfillment_from_details(cond, _depth+1)
threshold.add_subfulfillment(cond)
return threshold
raise UnsupportedTypeError(data.get('type'))
|
java
|
public GVRAnimation setOnFinish(GVROnFinish callback) {
mOnFinish = callback;
// Do the instance-of test at set-time, not at use-time
mOnRepeat = callback instanceof GVROnRepeat ? (GVROnRepeat) callback
: null;
if (mOnRepeat != null) {
mRepeatCount = -1; // loop until iterate() returns false
}
return this;
}
|
python
|
def load_config_from_cli_arguments(self, *args, **kwargs):
"""
Get config values of passed in CLI options.
:param dict kwargs: CLI options
"""
self._load_config_from_cli_argument(key='handlers_package', **kwargs)
self._load_config_from_cli_argument(key='auth', **kwargs)
self._load_config_from_cli_argument(key='user_stream', **kwargs)
self._load_config_from_cli_argument(key='min_seconds_between_errors', **kwargs)
self._load_config_from_cli_argument(key='sleep_seconds_on_consecutive_errors', **kwargs)
|
java
|
@Override
public UniversalIdStrMessage fromBytes(byte[] data) {
try {
UniversalIdStrMessage other = BaseUniversalMessage.fromBytes(data,
UniversalIdStrMessage.class);
fromMap(other.toMap());
return this;
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
|
java
|
protected byte[] initPlainData(int payloadSize, @Nullable byte[] initVector) {
byte[] plainData = new byte[OVERHEAD_SIZE + payloadSize];
if (initVector == null) {
ByteBuffer byteBuffer = ByteBuffer.wrap(plainData);
byteBuffer.putLong(INITV_TIMESTAMP_OFFSET, millisToSecsAndMicros(System.currentTimeMillis()));
byteBuffer.putLong(INITV_SERVERID_OFFSET, ThreadLocalRandom.current().nextLong());
} else {
System.arraycopy(initVector, 0, plainData, INITV_BASE, min(INITV_SIZE, initVector.length));
}
return plainData;
}
|
java
|
@Autowired
public void setDataTypeImportOrder(List<IPortalDataType> dataTypeImportOrder) {
final ArrayList<PortalDataKey> dataKeyImportOrder =
new ArrayList<>(dataTypeImportOrder.size() * 2);
for (final IPortalDataType portalDataType : dataTypeImportOrder) {
final List<PortalDataKey> supportedDataKeys = portalDataType.getDataKeyImportOrder();
for (final PortalDataKey portalDataKey : supportedDataKeys) {
/*
* Special Handling: Need to prevent some keys from entering our
* sorted collection because they attempt to import both a group
* part and the membership parts of a group (either local or PAGS)
* in one go. We import several entities at once, so it's important
* to do these in 2 phases.
*/
if (!KEYS_TO_IGNORE.contains(portalDataKey)) {
dataKeyImportOrder.add(portalDataKey);
}
}
}
dataKeyImportOrder.trimToSize();
this.dataKeyImportOrder = Collections.unmodifiableList(dataKeyImportOrder);
}
|
java
|
public com.google.api.ads.admanager.axis.v201902.Targeting getBuiltInTargeting() {
return builtInTargeting;
}
|
python
|
def getopenfilenames(parent=None, caption='', basedir='', filters='',
selectedfilter='', options=None):
"""Wrapper around QtGui.QFileDialog.getOpenFileNames static method
Returns a tuple (filenames, selectedfilter) -- when dialog box is canceled,
returns a tuple (empty list, empty string)
Compatible with PyQt >=v4.4 (API #1 and #2) and PySide >=v1.0"""
return _qfiledialog_wrapper('getOpenFileNames', parent=parent,
caption=caption, basedir=basedir,
filters=filters, selectedfilter=selectedfilter,
options=options)
|
java
|
public void setBinding(String binding)
throws JspException {
if (binding!= null && !UIComponentTag.isValueReference(binding)) {
// PENDING i18n
throw new JspException("Invalid Expression:"+binding);
}
this.binding = binding;
}
|
python
|
def create_vhost(self, name, tracing=False):
"""
Create an individual vhost.
:param name: The vhost name
:type name: str
:param tracing: Set to ``True`` to enable tracing
:type tracing: bool
"""
data = {'tracing': True} if tracing else {}
self._api_put(
'/api/vhosts/{0}'.format(urllib.parse.quote_plus(name)),
data=data,
)
|
python
|
def rmtree_p(self):
""" Like :meth:`rmtree`, but does not raise an exception if the
directory does not exist. """
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
|
python
|
def populateViewTree(self, view):
'''
Populates the View tree.
'''
vuid = view.getUniqueId()
text = view.__smallStr__()
if view.getParent() is None:
self.viewTree.insert('', Tkinter.END, vuid, text=text)
else:
self.viewTree.insert(view.getParent().getUniqueId(), Tkinter.END, vuid, text=text, tags=('ttk'))
self.viewTree.set(vuid, 'T', '*' if view.isTarget() else ' ')
self.viewTree.tag_bind('ttk', '<1>', self.viewTreeItemClicked)
|
python
|
def compile_excludes(self):
"""Compile a set of regexps for files to be exlcuded from scans."""
self.compiled_exclude_files = []
for pattern in self.exclude_files:
try:
self.compiled_exclude_files.append(re.compile(pattern))
except re.error as e:
raise ValueError(
"Bad python regex in exclude '%s': %s" % (pattern, str(e)))
|
java
|
public Number getTotalCount(Criterion... criterion) throws HibernateException {
Criteria criteria = getSession().createCriteria(entityClass);
addCriterionsToCriteria(criteria, criterion);
criteria.setProjection(Projections.rowCount());
return (Long) criteria.uniqueResult();
}
|
java
|
public void setModel(SoundCloudTrack track) {
mModel = track;
if (mModel != null) {
Picasso.with(getContext())
.load(SoundCloudArtworkHelper.getArtworkUrl(mModel, SoundCloudArtworkHelper.XLARGE))
.placeholder(R.color.grey_light)
.fit()
.centerInside()
.into(mArtwork);
mArtist.setText(mModel.getArtist());
mTitle.setText(mModel.getTitle());
long min = mModel.getDurationInMilli() / 60000;
long sec = (mModel.getDurationInMilli() % 60000) / 1000;
mDuration.setText(String.format(getResources().getString(R.string.duration), min, sec));
}
}
|
python
|
def main():
'''
Organization function
-setups logging
-gets inputdata
-calls plotting function
'''
args = get_args()
try:
utils.make_output_dir(args.outdir)
utils.init_logs(args)
args.format = nanoplotter.check_valid_format(args.format)
settings = vars(args)
settings["path"] = path.join(args.outdir, args.prefix)
sources = {
"fastq": args.fastq,
"bam": args.bam,
"cram": args.cram,
"fastq_rich": args.fastq_rich,
"fastq_minimal": args.fastq_minimal,
"summary": args.summary,
"fasta": args.fasta,
"ubam": args.ubam,
}
if args.pickle:
datadf = pickle.load(open(args.pickle, 'rb'))
else:
datadf = get_input(
source=[n for n, s in sources.items() if s][0],
files=[f for f in sources.values() if f][0],
threads=args.threads,
readtype=args.readtype,
combine="simple",
barcoded=args.barcoded)
if args.store:
pickle.dump(
obj=datadf,
file=open(settings["path"] + "NanoPlot-data.pickle", 'wb'))
if args.raw:
datadf.to_csv(settings["path"] + "NanoPlot-data.tsv.gz",
sep="\t",
index=False,
compression="gzip")
settings["statsfile"] = [make_stats(datadf, settings, suffix="")]
datadf, settings = filter_and_transform_data(datadf, settings)
if settings["filtered"]: # Bool set when filter was applied in filter_and_transform_data()
settings["statsfile"].append(make_stats(datadf, settings, suffix="_post_filtering"))
if args.barcoded:
barcodes = list(datadf["barcode"].unique())
plots = []
for barc in barcodes:
logging.info("Processing {}".format(barc))
settings["path"] = path.join(args.outdir, args.prefix + barc + "_")
dfbarc = datadf[datadf["barcode"] == barc]
if len(dfbarc) > 5:
settings["title"] = barc
plots.extend(
make_plots(dfbarc, settings)
)
else:
sys.stderr.write("Found barcode {} less than 5x, ignoring...\n".format(barc))
logging.info("Found barcode {} less than 5 times, ignoring".format(barc))
else:
plots = make_plots(datadf, settings)
make_report(plots, settings)
logging.info("Finished!")
except Exception as e:
logging.error(e, exc_info=True)
print("\n\n\nIf you read this then NanoPlot has crashed :-(")
print("Please try updating NanoPlot and see if that helps...\n")
print("If not, please report this issue at https://github.com/wdecoster/NanoPlot/issues")
print("If you could include the log file that would be really helpful.")
print("Thanks!\n\n\n")
raise
|
java
|
public AmqpTemplate getAmqpTemplate(String queueName)
{
// このメソッドを呼び出す前に、
// this#initContextMap()を利用してcontextMapを初期しておくこと。
if (this.contextMap == null)
{
return null;
}
RabbitmqClusterContext context = this.contextMap.get(queueName);
if (context == null)
{
return null;
}
return context.getAmqpTemplate();
}
|
java
|
public void setColumns(int n) {
if (n < 1) throw new IllegalArgumentException("Min column count is 1");
if (n > 5) throw new IllegalArgumentException("Max column count is 5");
if (n == columns) return;
this.columns = n;
refresh(this.columns);
}
|
java
|
private Region find(long size) {
validate(!VALIDATING || Long.bitCount(size) == 1);
Node<Region> currentNode = getRoot();
Region currentRegion = currentNode.getPayload();
if (currentRegion == null || (currentRegion.available() & size) == 0) {
//no region big enough for us...
return null;
} else {
while (true) {
Region left = currentNode.getLeft().getPayload();
if (left != null && (left.available() & size) != 0) {
currentNode = currentNode.getLeft();
currentRegion = currentNode.getPayload();
} else if ((currentRegion.availableHere() & size) != 0) {
long mask = size - 1;
long a = (currentRegion.start() + mask) & ~mask;
return new Region(a, a + size - 1);
} else {
Region right = currentNode.getRight().getPayload();
if (right != null && (right.available() & size) != 0) {
currentNode = currentNode.getRight();
currentRegion = currentNode.getPayload();
} else {
throw new AssertionError();
}
}
}
}
}
|
python
|
def detect_direct_function_shadowing(contract):
"""
Detects and obtains functions which are shadowed immediately by the provided ancestor contract.
:param contract: The ancestor contract which we check for function shadowing within.
:return: A list of tuples (overshadowing_function, overshadowed_immediate_base_contract, overshadowed_function)
-overshadowing_function is the function defined within the provided contract that overshadows another
definition.
-overshadowed_immediate_base_contract is the immediate inherited-from contract that provided the shadowed
function (could have provided it through inheritance, does not need to directly define it).
-overshadowed_function is the function definition which is overshadowed by the provided contract's definition.
"""
functions_declared = {function.full_name: function for function in contract.functions_and_modifiers_not_inherited}
results = {}
for base_contract in reversed(contract.immediate_inheritance):
for base_function in base_contract.functions_and_modifiers:
# We already found the most immediate shadowed definition for this function, skip to the next.
if base_function.full_name in results:
continue
# If this function is implemented and it collides with a definition in our immediate contract, we add
# it to our results.
if base_function.is_implemented and base_function.full_name in functions_declared:
results[base_function.full_name] = (functions_declared[base_function.full_name], base_contract, base_function)
return list(results.values())
|
java
|
public void setPaxLoggingManager( PaxLoggingManager paxLoggingManager, String name )
{
m_delegate = paxLoggingManager.getLogger( name, JCL_FQCN );
}
|
java
|
protected void _initPathStartPoint() {
_touch();
if (m_moveToPoint == null)
m_moveToPoint = new Point(m_description);
else
m_moveToPoint.assignVertexDescription(m_description);
}
|
python
|
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
|
python
|
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.