language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
} |
java | public static <T, E extends Exception, E2 extends Exception> void parse(final Iterator<? extends T> iter, long offset, long count,
final int processThreadNum, final int queueSize, final Try.Consumer<? super T, E> elementParser, final Try.Runnable<E2> onComplete) throws E, E2 {
parse(N.asList(iter), offset, count, 0, processThreadNum, queueSize, elementParser, onComplete);
} |
python | def render_css_classes(self):
"""
Return a string containing the css classes for the module.
>>> mod = DashboardModule(enabled=False, draggable=True,
... collapsible=True, deletable=True)
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable'
>>> mod.css_classes.append('foo')
>>> mod.render_css_classes()
'dashboard-module disabled draggable collapsible deletable foo'
>>> mod.enabled = True
>>> mod.render_css_classes()
'dashboard-module draggable collapsible deletable foo'
"""
ret = ['dashboard-module']
if not self.enabled:
ret.append('disabled')
if self.draggable:
ret.append('draggable')
if self.collapsible:
ret.append('collapsible')
if self.deletable:
ret.append('deletable')
ret += self.css_classes
return ' '.join(ret) |
java | public OvhPayment deposit_depositId_payment_GET(String depositId) throws IOException {
String qPath = "/me/deposit/{depositId}/payment";
StringBuilder sb = path(qPath, depositId);
String resp = exec(qPath, "GET", sb.toString(), null);
return convertTo(resp, OvhPayment.class);
} |
python | def iflatten_dict_values(node, depth=0):
"""
>>> from utool.util_dict import * # NOQA
"""
if isinstance(node, dict):
_iter = (iflatten_dict_values(value) for value in six.itervalues(node))
return util_iter.iflatten(_iter)
else:
return node |
python | def get_base_cnv_regions(data, work_dir, genome_default="transcripts1e4", include_gene_names=True):
"""Retrieve set of target regions for CNV analysis.
Subsets to extended transcript regions for WGS experiments to avoid
long runtimes.
"""
cov_interval = dd.get_coverage_interval(data)
base_regions = get_sv_bed(data, include_gene_names=include_gene_names)
# if we don't have a configured BED or regions to use for SV caling
if not base_regions:
# For genome calls, subset to regions near genes as targets
if cov_interval == "genome":
base_regions = get_sv_bed(data, genome_default, work_dir, include_gene_names=include_gene_names)
if base_regions:
base_regions = remove_exclude_regions(base_regions, base_regions, [data])
# Finally, default to the defined variant regions
if not base_regions:
base_regions = dd.get_variant_regions(data) or dd.get_sample_callable(data)
return bedutils.clean_file(base_regions, data) |
python | def ensure_mkdir_p(mode=0o777):
"""Decorator to ensure `mkdir_p` is called to the function's return value.
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
path = f(*args, **kwargs)
mkdir_p(path, mode=mode)
return path
return decorated
return decorator |
python | def upload(ctx, yes=False):
"""Upload the package to PyPI."""
import callee
version = callee.__version__
# check the packages version
# TODO: add a 'release' to automatically bless a version as release one
if version.endswith('-dev'):
fatal("Can't upload a development version (%s) to PyPI!", version)
# run the upload if it has been confirmed by the user
if not yes:
answer = input("Do you really want to upload to PyPI [y/N]? ")
yes = answer.strip().lower() == 'y'
if not yes:
logging.warning("Aborted -- not uploading to PyPI.")
return -2
logging.debug("Uploading version %s to PyPI...", version)
setup_py_upload = ctx.run('python setup.py sdist upload')
if not setup_py_upload.ok:
fatal("Failed to upload version %s to PyPI!", version,
cause=setup_py_upload)
logging.info("PyPI upload completed successfully.")
# add a Git tag and push
git_tag = ctx.run('git tag %s' % version)
if not git_tag.ok:
fatal("Failed to add a Git tag for uploaded version %s", version,
cause=git_tag)
git_push = ctx.run('git push && git push --tags')
if not git_push.ok:
fatal("Failed to push the release upstream.", cause=git_push) |
java | protected String getRealInputAttrName(String inputAttrName, String id, boolean isUser) {
boolean isInputAttrValueDN = UniqueNameHelper.isDN(id) != null;
boolean isInputAttrIdentifier = isIdentifierTypeProperty(inputAttrName);
if (!isInputAttrIdentifier && isInputAttrValueDN) {
// To suppress the below message from coming in trace.log due to defect 94474
/*
* if (tc.isWarningEnabled()) {
* //Tr.warning(tc, "the propertyForInput " + inputAttrName + " doesn't match the format of input value " + id + ", switch to uniqueName");
* Tr.warning(tc, WIMMessageKey.INVALID_PROPERTY_VALUE_FORMAT, inputAttrName);
* }
*/
inputAttrName = "uniqueName";
//take it as uniqueName because we don't know if it's externalName or uniqueName value
} else if (isInputAttrIdentifier && !isInputAttrValueDN) {
// if dealing with LoginAccount or Group
if (isUser) {
// To suppress the below message from coming in trace.log due to defect 94474
/*
* if (tc.isWarningEnabled()) {
* //Tr.warning(tc, "the propertyForInput " + inputAttrName + " doesn't match the format of input value " + id + ", switch to principalName");
* Tr.warning(tc, WIMMessageKey.INVALID_PROPERTY_VALUE_FORMAT, inputAttrName);
* }
*/
inputAttrName = "principalName"; // loginaccounts
}
//OR
else {
// To suppress the below message from coming in trace.log due to defect 94474
/*
* if (tc.isWarningEnabled()) {
* //Tr.warning(tc, "the propertyForInput " + inputAttrName + " doesn't match the format of input value " + id + ", swith to cn");
* Tr.warning(tc, WIMMessageKey.INVALID_PROPERTY_VALUE_FORMAT, inputAttrName);
* }
*/
inputAttrName = "cn"; // groups
}
}
return inputAttrName;
} |
python | def _read_register(self, reg):
"""Read 16 bit register value."""
self.buf[0] = reg
with self.i2c_device as i2c:
i2c.write(self.buf, end=1, stop=False)
i2c.readinto(self.buf, end=2)
return self.buf[0] << 8 | self.buf[1] |
python | def merge_strings(list_of_strings: Union[List[str], Tuple[str]]) -> dict:
"""
Pack the list of strings into two arrays: the concatenated chars and the \
individual string lengths. :func:`split_strings()` does the inverse.
:param list_of_strings: The :class:`tuple` or :class:`list` of :class:`str`-s \
or :class:`bytes`-s to pack.
:return: :class:`dict` with "strings" and "lengths" \
:class:`numpy.ndarray`-s.
"""
if not isinstance(list_of_strings, (tuple, list)):
raise TypeError("list_of_strings must be either a tuple or a list")
if len(list_of_strings) == 0:
return {"strings": numpy.array([], dtype="S1"),
"lengths": numpy.array([], dtype=int),
"str": None}
with_str = not isinstance(list_of_strings[0], bytes)
if with_str:
if not isinstance(list_of_strings[0], str):
raise TypeError("list_of_strings must contain either bytes or strings")
strings = numpy.array(["".join(list_of_strings).encode("utf-8")])
else:
merged = bytearray(sum(len(s) for s in list_of_strings))
offset = 0
for s in list_of_strings:
merged[offset:offset + len(s)] = s
offset += len(s)
strings = numpy.frombuffer(merged, dtype="S%d" % len(merged))
lengths = [0] * len(list_of_strings)
for i, s in enumerate(list_of_strings):
lengths[i] = len(s)
lengths = squeeze_bits(numpy.array(lengths, dtype=int))
return {"strings": strings, "lengths": lengths, "str": with_str} |
java | public BuildProject buildProject(String name, String reference) {
return buildProject(name, reference, null);
} |
java | public FoxHttpRequestBuilder addRequestHeader(String name, String value) {
foxHttpRequest.getRequestHeader().addHeader(name, value);
return this;
} |
python | def _rshift_logical(self, shift_amount):
"""
Logical shift right with a concrete shift amount
:param int shift_amount: Number of bits to shift right.
:return: The new StridedInterval after right shifting
:rtype: StridedInterval
"""
if self.is_empty:
return self
# If straddling the south pole, we'll have to split it into two, perform logical right shift on them
# individually, then union the result back together for better precision. Note that it's an improvement from
# the original WrappedIntervals paper.
ssplit = self._ssplit()
if len(ssplit) == 1:
l = self.lower_bound >> shift_amount
u = self.upper_bound >> shift_amount
stride = max(self.stride >> shift_amount, 1)
return StridedInterval(bits=self.bits,
lower_bound=l,
upper_bound=u,
stride=stride,
uninitialized=self.uninitialized
)
else:
a = ssplit[0]._rshift_logical(shift_amount)
b = ssplit[1]._rshift_logical(shift_amount)
return a.union(b) |
java | private final String load(String key, String defaultValue, String comment) {
if(options.containsKey(key)) {
return options.get(key);
} else if (config.getProperty(key) != null) {
return config.getString(key);
} else {
if (defaultValue != null) {
try {
config.addProperty(key, defaultValue);
if (comment != null) {
propertiesConfigurationLayout.setComment(key, comment);
} else {
propertiesConfigurationLayout
.setComment(
key,
"Automatically added default value. Please see Client Properties documentation on ExtWebDriver homepage.");
}
config.save(config.getPath());
} catch (ConfigurationException e) {
logger.error("Error saving updated property file ('" + config.getPath() + "')"
+ e);
}
}
return defaultValue;
}
} |
java | public void shutdown() {
shutdownCalled = true;
if (isRunning()) {
watchdog.waitForProcessStarted();
watchdog.destroyProcess();
watchdog.waitForTerminationAfterDestroy(2, SECONDS);
if (isRunning()) {
watchdog.destroyProcessForcefully();
watchdog.waitForTerminationAfterDestroy(1, SECONDS);
if (isRunning()) {
LOGGER.severe(String.format("Unable to kill process with PID %s", watchdog.getProcessId()));
}
}
}
// if shutdown() was called by something other than the shutdown hook, we don't need the shutdown hook anymore
try {
if (shutDownHook != null) {
Runtime.getRuntime().removeShutdownHook(shutDownHook);
}
} catch (IllegalStateException e) {
// ignore.. happens when the shutdown hook is in use, that's okay
}
} |
python | def to_python(self, value, resource):
"""Dictionary to Python object"""
if isinstance(value, dict):
d = {
self.aliases.get(k, k): self.to_python(v, resource) if isinstance(v, (dict, list)) else v
for k, v in six.iteritems(value)
}
return type(self.class_name, (), d)
elif isinstance(value, list):
return [self.to_python(x, resource) if isinstance(x, (dict, list)) else x for x in value]
else:
return value |
java | public static IConjunct negate( IConjunct conjunct)
{
AnyOf anyOf = new AnyOf();
for( Iterator<IDisjunct> disjuncts = conjunct.getDisjuncts();
disjuncts.hasNext();)
{
Conjunction conjunction = new Conjunction();
for( Iterator<IAssertion> assertions = disjuncts.next().getAssertions();
assertions.hasNext();)
{
conjunction.add( assertions.next().negate());
}
anyOf.add( simplify( conjunction));
}
return Cnf.convert( anyOf);
} |
java | public int initialSize()
{
if (tc.isEntryEnabled()) Tr.entry(tc, "initialSize",this);
if (tc.isEntryEnabled()) Tr.exit(tc, "initialSize",new Integer(_initialSize));
return _initialSize;
} |
java | @GetMapping(ENDPOINT_REDIRECT)
public View redirectToProvider(final HttpServletRequest request, final HttpServletResponse response) {
val wsfedId = request.getParameter(PARAMETER_NAME);
try {
val cfg = configurations.stream().filter(c -> c.getId().equals(wsfedId)).findFirst().orElse(null);
if (cfg == null) {
throw new IllegalArgumentException("Could not locate WsFederation configuration for " + wsfedId);
}
val service = determineService(request);
val id = wsFederationHelper.getRelyingPartyIdentifier(service, cfg);
val url = cfg.getAuthorizationUrl(id, cfg.getId());
wsFederationCookieManager.store(request, response, cfg.getId(), service, cfg);
return new RedirectView(url);
} catch (final Exception e) {
LOGGER.error(e.getMessage(), e);
}
throw new UnauthorizedServiceException(UnauthorizedServiceException.CODE_UNAUTHZ_SERVICE, StringUtils.EMPTY);
} |
python | def slice(self, window_size=1, step_size=1, cumulative=False,
count_only=False, subcorpus=True, feature_name=None):
"""
Returns a generator that yields ``(key, subcorpus)`` tuples for
sequential time windows.
Two common slicing patterns are the "sliding time-window" and the
"time-period" patterns. Whereas time-period slicing divides the corpus
into subcorpora by sequential non-overlapping time periods, subcorpora
generated by time-window slicing can overlap.
.. figure:: _static/images/bibliocoupling/timeline.timeslice.png
:width: 400
:align: center
**Time-period** slicing, with a window-size of 4 years.
.. figure:: _static/images/bibliocoupling/timeline.timewindow.png
:width: 400
:align: center
**Time-window** slicing, with a window-size of 4 years and a
step-size of 1 year.
*Sliding time-window* -- Set ``step_size=1``, and ``window_size`` to
the desired value.
*Time-period* -- ``step_size`` and ``window_size`` should have the same
value.
The value of ``key`` is always the first year in the slice.
Examples
--------
.. code-block:: python
>>> from tethne.readers.wos import read
>>> corpus = read('/path/to/data')
>>> for key, subcorpus in corpus.slice():
... print key, len(subcorpus)
2005, 5
2006, 5
Parameters
----------
window_size : int
(default: 1) Size of the time window, in years.
step_size : int
(default: 1) Number of years to advance window at each step.
Returns
-------
generator
"""
if 'date' not in self.indices:
self.index('date')
start = min(self.indices['date'].keys())
end = max(self.indices['date'].keys())
while start <= end - (window_size - 1):
selector = ('date', range(start, start + window_size, 1))
if cumulative:
year = start + window_size
else:
year = start
if count_only:
yield year, len(self.select(selector))
elif feature_name:
yield year, self.subfeatures(selector, feature_name)
elif subcorpus:
yield year, self.subcorpus(selector)
else:
yield year, self.select(selector)
if cumulative:
window_size += step_size
else:
start += step_size |
python | def have_consistent_saxis(magmoms):
"""
This method checks that all Magmom objects in a list have a
consistent spin quantization axis. To write MAGMOM tags to a
VASP INCAR, a global SAXIS value for all magmoms has to be used.
If saxis are inconsistent, can create consistent set with:
Magmom.get_consistent_set(magmoms)
:param magmoms: list of magmoms (Magmoms, scalars or vectors)
:return: bool
"""
magmoms = [Magmom(magmom) for magmom in magmoms]
ref_saxis = magmoms[0].saxis
match_ref = [magmom.saxis == ref_saxis for magmom in magmoms]
if np.all(match_ref):
return True
else:
return False |
python | def save_file(self, filename, text):
"""Save the given text under the given condition filename and the
current path.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation end date. If
such an directory does not exist, it is created immediately.
"""
_defaultdir = self.DEFAULTDIR
try:
if not filename.endswith('.py'):
filename += '.py'
try:
self.DEFAULTDIR = (
'init_' + hydpy.pub.timegrids.sim.lastdate.to_string('os'))
except AttributeError:
pass
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text)
except BaseException:
objecttools.augment_excmessage(
'While trying to write the conditions file `%s`'
% filename)
finally:
self.DEFAULTDIR = _defaultdir |
java | public static boolean writeImage(BufferedImage image, String filename,
int dpi) throws IOException
{
return writeImage(image, filename, dpi, 1.0f);
} |
java | public void setTranslation(Tuple3D<?> translation) {
this.m03 = translation.getX();
this.m13 = translation.getY();
this.m23 = translation.getZ();
} |
java | public static void createFile(SshKeyPair key, String passphrase,
String comment, int format, File toFile) throws IOException {
SshPrivateKeyFile pub = create(key, passphrase, comment, format);
FileOutputStream out = new FileOutputStream(toFile);
try {
out.write(pub.getFormattedKey());
out.flush();
} finally {
out.close();
}
} |
java | public static int checkElementIndex(int index, int size, final String eid) {
if (isSizeIllegal(size)) {
throw new EidIllegalArgumentException(ensureEid(eid));
}
if (isIndexAndSizeIllegal(index, size)) {
throw new EidIndexOutOfBoundsException(ensureEid(eid));
}
return index;
} |
java | private Delta createDelta(PermissionUpdateRequest request) {
MapDeltaBuilder builder = Deltas.mapBuilder();
for (String permissionString : request.getPermitted()) {
builder.put("perm_" + validated(permissionString), 1);
}
for (String permissionString : request.getRevoked()) {
builder.remove("perm_" + validated(permissionString));
}
if (request.isRevokeRest()) {
builder.removeRest();
}
return builder.build();
} |
java | public static double[] decode(
String cvAccession,
byte[] data,
int dataSize
) {
switch (cvAccession) {
case ACC_NUMPRESS_LINEAR: {
double[] buffer = new double[dataSize * 2];
int nbrOfDoubles = MSNumpress.decodeLinear(data, dataSize, buffer);
double[] result = new double[nbrOfDoubles];
System.arraycopy(buffer, 0, result, 0, nbrOfDoubles);
return result;
}
case ACC_NUMPRESS_SLOF: {
double[] result = new double[dataSize / 2];
MSNumpress.decodeSlof(data, dataSize, result);
return result;
}
case ACC_NUMPRESS_PIC: {
double[] buffer = new double[dataSize * 2];
int nbrOfDoubles = MSNumpress.decodePic(data, dataSize, buffer);
double[] result = new double[nbrOfDoubles];
System.arraycopy(buffer, 0, result, 0, nbrOfDoubles);
return result;
}
}
throw new IllegalArgumentException("'" + cvAccession + "' is not a numpress compression term");
} |
python | def pass_defaults(func):
"""Decorator that returns a function named wrapper.
When invoked, wrapper invokes func with default kwargs appended.
Parameters
----------
func : callable
The function to append the default kwargs to
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
merged = {}
merged.update(self.defaults)
merged.update(kwargs)
return func(self, *args, **merged)
return wrapper |
java | public static Iterable<MBlockPos> getAllInBox(MBlockPos from, MBlockPos to)
{
return new BlockIterator(from, to).asIterable();
} |
java | public List<String> toList() {
ArrayList<String> list = new ArrayList<String>();
for (StringGrabber sg : mSgList) {
list.add(sg.toString());
}
return list;
} |
java | @Override
public void postProcess(BeanDefinitionBuilder builder, XmlRepositoryConfigurationSource config) {
Element element = config.getElement();
ParsingUtils.setPropertyReference(builder, element, SIMPLEDB_TEMPLATE_REF, "simpleDbOperations");
} |
python | def fit_arrays(uv, xy):
""" Performs a generalized fit between matched lists of positions
given by the 2 column arrays xy and uv.
This function fits for translation, rotation, and scale changes
between 'xy' and 'uv', allowing for different scales and
orientations for X and Y axes.
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
Output:
(Xo,Yo),Rot,(Scale,Sx,Sy)
where
Xo,Yo: offset,
Rot: rotation,
Scale: average scale change, and
Sx,Sy: scale changes in X and Y separately.
Algorithm and nomenclature provided by: Colin Cox (11 Nov 2004)
"""
if not isinstance(xy,np.ndarray):
# cast input list as numpy ndarray for fitting
xy = np.array(xy)
if not isinstance(uv,np.ndarray):
# cast input list as numpy ndarray for fitting
uv = np.array(uv)
# Set up products used for computing the fit
Sx = xy[:,0].sum()
Sy = xy[:,1].sum()
Su = uv[:,0].sum()
Sv = uv[:,1].sum()
Sux = np.dot(uv[:,0], xy[:,0])
Svx = np.dot(uv[:,1], xy[:,0])
Suy = np.dot(uv[:,0], xy[:,1])
Svy = np.dot(uv[:,1], xy[:,1])
Sxx = np.dot(xy[:,0], xy[:,0])
Syy = np.dot(xy[:,1], xy[:,1])
Sxy = np.dot(xy[:,0], xy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
return build_fit(P, Q, 'general') |
java | public List<Taglet> getCustomTaglets(Element e) {
switch (e.getKind()) {
case CONSTRUCTOR:
return getConstructorCustomTaglets();
case METHOD:
return getMethodCustomTaglets();
case ENUM_CONSTANT:
case FIELD:
return getFieldCustomTaglets();
case ANNOTATION_TYPE:
case INTERFACE:
case CLASS:
case ENUM:
return getTypeCustomTaglets();
case MODULE:
return getModuleCustomTaglets();
case PACKAGE:
return getPackageCustomTaglets();
case OTHER:
return getOverviewCustomTaglets();
default:
throw new AssertionError("unknown element: " + e + " ,kind: " + e.getKind());
}
} |
java | @Override
protected String doBase(String name, Object value) {
if (value instanceof InetAddress) {
return ((InetAddress) value).getHostAddress();
} else if (value instanceof String) {
return doBase(name, (String) value);
} else {
throw new IndexException("Field '{}' requires an inet address, but found '{}'", name, value);
}
} |
python | def ValidateEndConfig(self, config_obj, errors_fatal=True):
"""Given a generated client config, attempt to check for common errors."""
errors = []
if not config.CONFIG["Client.fleetspeak_enabled"]:
location = config_obj.Get("Client.server_urls", context=self.context)
if not location:
errors.append("Empty Client.server_urls")
for url in location:
if not url.startswith("http"):
errors.append("Bad Client.server_urls specified %s" % url)
key_data = config_obj.GetRaw(
"Client.executable_signing_public_key",
default=None,
context=self.context)
if key_data is None:
errors.append("Missing Client.executable_signing_public_key.")
elif not key_data.startswith("-----BEGIN PUBLIC"):
errors.append(
"Invalid Client.executable_signing_public_key: %s" % key_data)
else:
rsa_key = rdf_crypto.RSAPublicKey()
rsa_key.ParseFromHumanReadable(key_data)
if not config.CONFIG["Client.fleetspeak_enabled"]:
certificate = config_obj.GetRaw(
"CA.certificate", default=None, context=self.context)
if certificate is None or not certificate.startswith("-----BEGIN CERTIF"):
errors.append("CA certificate missing from config.")
for bad_opt in ["Client.private_key"]:
if config_obj.Get(bad_opt, context=self.context, default=""):
errors.append("Client cert in conf, this should be empty at deployment"
" %s" % bad_opt)
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors |
python | def get_image(self, digest, blob, mime_type, index, size=500):
"""Return an image for the given content, only if it already exists in
the image cache."""
# Special case, for now (XXX).
if mime_type.startswith("image/"):
return ""
cache_key = f"img:{index}:{size}:{digest}"
return self.cache.get(cache_key) |
python | def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model |
java | public Resource parse(XmlPullParser xpp) throws IOException, FHIRFormatError, XmlPullParserException {
if (xpp.getNamespace() == null)
throw new FHIRFormatError("This does not appear to be a FHIR resource (no namespace '"+xpp.getNamespace()+"') (@ /) "+Integer.toString(xpp.getEventType()));
if (!xpp.getNamespace().equals(FHIR_NS))
throw new FHIRFormatError("This does not appear to be a FHIR resource (wrong namespace '"+xpp.getNamespace()+"') (@ /)");
return parseResource(xpp);
} |
java | public void addTransition(DuzztAction action, DuzztState succ) {
transitions.put(action, new DuzztTransition(action, succ));
} |
python | def revs(self, branch='master', limit=None, skip=None, num_datapoints=None):
"""
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame
"""
if limit is not None:
limit = math.floor(float(limit) / len(self.repos))
if num_datapoints is not None:
num_datapoints = math.floor(float(num_datapoints) / len(self.repos))
df = pd.DataFrame(columns=['repository', 'rev'])
if _has_joblib:
ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(
delayed(_revs_func)
(x, branch, limit, skip, num_datapoints) for x in self.repos
)
for d in ds:
df = df.append(d)
else:
for repo in self.repos:
try:
revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
revs['repository'] = repo.repo_name
df = df.append(revs)
except GitCommandError:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
df.reset_index()
return df |
java | public String getId()
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "getId");
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "getId", aoBrowserSession.toString());
return aoBrowserSession.toString();
} |
python | def create_game(self, map_name):
"""Create a game for the agents to join.
Args:
map_name: The map to use.
"""
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
for controller in self._controllers:
controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
# Form the create game message.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path),
disable_fog=False)
# Set up for two agents.
for _ in range(self._num_agents):
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controllers[0].create_game(create) |
python | def resolve_name(name, module=None):
"""Resolve a dotted name to a module and its parts. This is stolen
wholesale from unittest.TestLoader.loadTestByName.
"""
parts = name.split('.')
parts_copy = parts[:]
if module is None:
while parts_copy: # pragma: no cover
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
obj = getattr(obj, part)
return obj |
python | def _createGsshaPyObjects(self, eventChunk):
"""
Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method
"""
## TODO: Add Support for RADAR file format type values
# Create GSSHAPY PrecipEvent
event = PrecipEvent(description=eventChunk['description'],
nrGag=eventChunk['nrgag'],
nrPds=eventChunk['nrpds'])
# Associate PrecipEvent with PrecipFile
event.precipFile = self
gages = []
for coord in eventChunk['coords']:
# Create GSSHAPY PrecipGage object
gage = PrecipGage(description=coord['description'],
x=coord['x'],
y=coord['y'])
# Associate PrecipGage with PrecipEvent
gage.event = event
# Append to gages list for association with PrecipValues
gages.append(gage)
for valLine in eventChunk['valLines']:
for index, value in enumerate(valLine['values']):
# Create GSSHAPY PrecipValue object
val = PrecipValue(valueType=valLine['type'],
dateTime=valLine['dateTime'],
value=value)
# Associate PrecipValue with PrecipEvent and PrecipGage
val.event = event
val.gage = gages[index] |
python | def _post_activity(self, activity, unserialize=True):
""" Posts a activity to feed """
# I think we always want to post to feed
feed_url = "{proto}://{server}/api/user/{username}/feed".format(
proto=self._pump.protocol,
server=self._pump.client.server,
username=self._pump.client.nickname
)
data = self._pump.request(feed_url, method="POST", data=activity)
if not data:
return False
if "error" in data:
raise PumpException(data["error"])
if unserialize:
if "target" in data:
# we probably want to unserialize target if it's there
# true for collection.{add,remove}
self.unserialize(data["target"])
else:
# copy activity attributes into object
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
self.unserialize(data["object"])
return True |
python | def pass_allowedremoterelieve_v1(self):
"""Update the outlet link sequence |dam_outlets.R|."""
flu = self.sequences.fluxes.fastaccess
sen = self.sequences.senders.fastaccess
sen.r[0] += flu.allowedremoterelieve |
python | def get_pipeline_stage(self, pipeline_key, stage_key = None, sort_by = None):
'''Gets a list of one/all stage objects in a pipeline. Performs a single GET.
Args:
pipeline_key key for pipeline
stage_key key for stage (default: None i.e. ALL)
sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'
may or may not be supported
returns (status code for the GET request, dict of stages)
It is not a list hence the .values() before return
'''
if not pipeline_key:
return requests.codes.bad_request, None
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key,
self.stages_suffix
])
if stage_key:
uri = '/'.join([
uri,
stage_key
])
if sort_by:
if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:
uri += self.sort_by_postfix + sort_by
else:
return requests.codes.bad_request, {'success' : 'False',
'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''}
code, data = self._req('get', uri)
#format is ambigious so we need to rely on user input
if stage_key:
data = list(data.values())
return code, data |
python | def _render(self):
''' Standard rendering of bar graph. '''
cm_chars = self._comp_style(self.icons[_ic] * self._num_complete_chars)
em_chars = self._empt_style(self.icons[_ie] * self._num_empty_chars)
return f'{self._first}{cm_chars}{em_chars}{self._last} {self._lbl}' |
java | public void stop()
{
if (workerTimer != null)
{
try
{
workerTimer.cancel();
}
catch (Throwable e) //NOSONAR
{
LOG.warn(this.name + " cache, stop error " + e.getMessage());
}
}
nodesCache.clear();
propertiesCache.clear();
cache.clear();
} |
python | def _syllable_condenser(self, words_syllables):
"""Reduce a list of [sentence [word [syllable]]] to [sentence [syllable]].
:param syllables_words: Elided text
:return: Text tokenized only at the sentence and syllable level
:rtype : list
"""
sentences_syllables = []
for sentence in words_syllables:
syllables_sentence = []
for word in sentence:
syllables_sentence += word
sentences_syllables.append(syllables_sentence)
return sentences_syllables |
java | public void setImmunePrototypeProperty(Object value)
{
if ((prototypePropertyAttributes & READONLY) != 0) {
throw new IllegalStateException();
}
prototypeProperty = (value != null) ? value : UniqueTag.NULL_VALUE;
prototypePropertyAttributes = DONTENUM | PERMANENT | READONLY;
} |
java | @Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case AfplibPackage.GSCC__CELLWI:
setCELLWI((Integer)newValue);
return;
case AfplibPackage.GSCC__CELLHI:
setCELLHI((Integer)newValue);
return;
case AfplibPackage.GSCC__CELLWFR:
setCELLWFR((Integer)newValue);
return;
case AfplibPackage.GSCC__CELLHFR:
setCELLHFR((Integer)newValue);
return;
}
super.eSet(featureID, newValue);
} |
python | def clear_cache(ip=None):
"""Clear the client cache or remove key matching the given ip."""
if ip:
with ignored(Exception):
client = CLIENT_CACHE[ip]
del CLIENT_CACHE[ip]
client.close()
else:
for client in CLIENT_CACHE.values():
with ignored(Exception):
client.close()
CLIENT_CACHE.clear() |
java | private void readCalendars(Project ganttProject)
{
m_mpxjCalendar = m_projectFile.addCalendar();
m_mpxjCalendar.setName(ProjectCalendar.DEFAULT_BASE_CALENDAR_NAME);
Calendars gpCalendar = ganttProject.getCalendars();
setWorkingDays(m_mpxjCalendar, gpCalendar);
setExceptions(m_mpxjCalendar, gpCalendar);
m_eventManager.fireCalendarReadEvent(m_mpxjCalendar);
} |
python | def prompt(self):
"""
Open a prompt to navigate to a different subreddit or comment"
"""
name = self.term.prompt_input('Enter page: /')
if name:
# Check if opening a submission url or a subreddit url
# Example patterns for submissions:
# comments/571dw3
# /comments/571dw3
# /r/pics/comments/571dw3/
# https://www.reddit.com/r/pics/comments/571dw3/at_disneyland
submission_pattern = re.compile(r'(^|/)comments/(?P<id>.+?)($|/)')
match = submission_pattern.search(name)
if match:
url = 'https://www.reddit.com/comments/{0}'.format(match.group('id'))
self.selected_page = self.open_submission_page(url)
else:
self.selected_page = self.open_subreddit_page(name) |
python | def _writeContaminantTable(self, session, fileObject, mapTable, contaminants, replaceParamFile):
"""
This method writes the contaminant transport mapping table case.
"""
# Write the contaminant mapping table header
fileObject.write('%s\n' % (mapTable.name))
fileObject.write('NUM_CONTAM %s\n' % (mapTable.numContam))
# Write out each contaminant and it's values
for contaminant in contaminants:
fileObject.write(
'"%s" "%s" %s\n' % (contaminant.name, contaminant.indexMap.name, contaminant.outputFilename))
# Add trailing zeros to values / replacement parameter
precipConcString = vwp(contaminant.precipConc, replaceParamFile)
partitionString = vwp(contaminant.partition, replaceParamFile)
try:
precipConc = '%.2f' % precipConcString
except:
precipConc = '%s' % precipConcString
try:
partition = '%.2f' % partitionString
except:
partition = '%s' % partitionString
# Write global variables for the contaminant
fileObject.write('PRECIP_CONC%s%s\n' % (' ' * 10, precipConc))
fileObject.write('PARTITION%s%s\n' % (' ' * 12, partition))
fileObject.write('NUM_IDS %s\n' % contaminant.numIDs)
# Write value lines
self._writeValues(session, fileObject, mapTable, contaminant, replaceParamFile) |
java | public void setupSFields()
{
this.getRecord(UserGroup.USER_GROUP_FILE).getField(UserGroup.DESCRIPTION).setupDefaultView(this.getNextLocation(ScreenConstants.NEXT_LOGICAL, ScreenConstants.ANCHOR_DEFAULT), this, ScreenConstants.DEFAULT_DISPLAY);
} |
java | public void setMetricResults(java.util.Collection<CurrentMetricResult> metricResults) {
if (metricResults == null) {
this.metricResults = null;
return;
}
this.metricResults = new java.util.ArrayList<CurrentMetricResult>(metricResults);
} |
python | def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans |
python | def cmd(
name,
func=None,
arg=(),
**kwargs):
'''
Execute a runner asynchronous:
USAGE:
.. code-block:: yaml
run_cloud:
runner.cmd:
- func: cloud.create
- arg:
- my-ec2-config
- myinstance
run_cloud:
runner.cmd:
- func: cloud.create
- kwargs:
provider: my-ec2-config
instances: myinstance
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if func is None:
func = name
local_opts = {}
local_opts.update(__opts__)
local_opts['async'] = True # ensure this will be run asynchronous
local_opts.update({
'fun': func,
'arg': arg,
'kwarg': kwargs
})
runner = salt.runner.Runner(local_opts)
runner.run()
return ret |
java | @UiThread
public void notifyChildRemoved(int parentPosition, int childPosition) {
int flatParentPosition = getFlatParentPosition(parentPosition);
ExpandableWrapper<P, C> parentWrapper = mFlatItemList.get(flatParentPosition);
parentWrapper.setParent(mParentList.get(parentPosition));
if (parentWrapper.isExpanded()) {
mFlatItemList.remove(flatParentPosition + childPosition + 1);
notifyItemRemoved(flatParentPosition + childPosition + 1);
}
} |
java | static <T1, T2> Flowable<Notification<Tuple2<T1, T2>>> createWithTwoOutParameters(Single<Connection> connection,
String sql, Flowable<List<Object>> parameterGroups, List<ParameterPlaceholder> parameterPlaceholders,
Class<T1> cls1, Class<T2> cls2) {
return connection.toFlowable().flatMap(con -> createWithParameters(con, sql, parameterGroups,
parameterPlaceholders,
(stmt, parameters) -> createWithTwoParameters(stmt, parameters, parameterPlaceholders, cls1, cls2)));
} |
java | public static String findTitle(final String xml) {
// Convert the string to a document to make it easier to get the proper title
Document doc = null;
try {
doc = XMLUtilities.convertStringToDocument(xml);
} catch (Exception ex) {
LOG.debug("Unable to convert String to a DOM Document", ex);
}
return findTitle(doc);
} |
python | def scripthash_to_address(scripthash):
"""
Convert a script hash to a public address.
Args:
scripthash (bytes):
Returns:
str: base58 encoded string representing the wallet address.
"""
sb = bytearray([ADDRESS_VERSION]) + scripthash
c256 = bin_dbl_sha256(sb)[0:4]
outb = sb + bytearray(c256)
return base58.b58encode(bytes(outb)).decode("utf-8") |
java | @Override
public ResultSet getCatalogs() throws SQLException
{
checkClosed();
VoltTable result = new VoltTable(new VoltTable.ColumnInfo("TABLE_CAT",VoltType.STRING));
result.addRow(new Object[] { catalogString });
return new JDBC4ResultSet(null, result);
} |
python | def config_dir(self):
"Return dir(self)."
my_vars = set(self)
skips = self.bad_names | my_vars
yield from (
attr
for attr in dir(type(self))
if (
attr not in skips and
not (
attr.startswith('_') and
not attr.startswith('__') and
hasattr(self, attr[1:])
) and hasattr(self, attr)
)
)
yield from my_vars |
java | @NotNull
public OptionalBoolean mapToBoolean(@NotNull ToBooleanFunction<? super T> mapper) {
if (!isPresent()) return OptionalBoolean.empty();
return OptionalBoolean.of(mapper.applyAsBoolean(value));
} |
python | def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs,
output_filter_mask=None, out_annotation=None):
"""Gradient-based (saliency) variant effect prediction
Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of
variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant
for a given output. This value is then multiplied by the input, as recommended by
[Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf).
# Arguments
model: Keras model
ref: Input sequence with the reference genotype in the mutation position
ref_rc: Reverse complement of the 'ref' argument
alt: Input sequence with the alternative genotype in the mutation position
alt_rc: Reverse complement of the 'alt' argument
mutation_positions: Position on which the mutation was placed in the forward sequences
out_annotation_all_outputs: Output labels of the model.
output_filter_mask: Mask of boolean values indicating which model outputs should be used.
Use this or 'out_annotation'
out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the
predictions should be calculated.
# Returns
Dictionary with three different entries:
- ref: Gradient * input at the mutation position using the reference sequence.
Forward or reverse-complement sequence is chose based on sequence direction caused
the bigger absolute difference ('diff')
- alt: Gradient * input at the mutation position using the alternative sequence. Forward or
reverse-complement sequence is chose based on sequence direction caused the bigger
absolute difference ('diff')
- diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence
direction caused the bigger absolute difference.
"""
seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc}
for k in seqs:
if not isinstance(seqs[k], (list, tuple, np.ndarray)):
raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.")
assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"])
assert get_seq_len(ref)[0] == mutation_positions.shape[0]
assert len(mutation_positions.shape) == 1
# determine which outputs should be selected
if output_filter_mask is None:
if out_annotation is None:
output_filter_mask = np.arange(out_annotation_all_outputs.shape[0])
else:
output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0]
# make sure the labels are assigned correctly
out_annotation = out_annotation_all_outputs[output_filter_mask]
# Generate the necessary gradient functions
sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation)
# ANALOGOUS TO ISM:
# predict
preds = {}
for k in seqs:
preds[k] = {}
if "_rc" in k:
mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions
else:
mutated_positions_here = mutation_positions
for l in out_annotation:
preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__,
score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model)
diff_ret_dGrad = {}
pred_out = {"ref": {}, "alt": {}}
for k in preds["ref"]:
# TODO make list (and dict)-ready
diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"])
diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"])
sel = general_sel(diff_fwd, diff_rc)
replace_by_sel(diff_fwd, diff_rc, sel)
diff_ret_dGrad[k] = diff_fwd
# Overwrite the fwd values with rc values if rc was selected
replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel)
replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel)
pred_out["ref"][k] = preds["ref"][k]["dGrad"]
pred_out["alt"][k] = preds["alt"][k]["dGrad"]
return {"diff": pd.DataFrame(diff_ret_dGrad),
"ref": pd.DataFrame(pred_out["ref"]),
"alt": pd.DataFrame(pred_out["alt"])} |
java | public <T extends State> void deserializeFromSequenceFile(final Class<? extends Writable> keyClass,
final Class<T> stateClass, final Path inputFilePath, final Collection<T> states, final boolean deleteAfter) {
this.futures.add(new NamedFuture(this.executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
Configuration conf = new Configuration(ParallelRunner.this.fs.getConf());
WritableShimSerialization.addToHadoopConfiguration(conf);
try (@SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(
ParallelRunner.this.fs, inputFilePath, conf)) {
Writable key = keyClass.newInstance();
T state = stateClass.newInstance();
while (reader.next(key)) {
state = (T) reader.getCurrentValue(state);
states.add(state);
state = stateClass.newInstance();
}
if (deleteAfter) {
HadoopUtils.deletePath(ParallelRunner.this.fs, inputFilePath, false);
}
}
return null;
}
}), "Deserialize state from file " + inputFilePath));
} |
python | def make_class_method_decorator(classkey, modname=None):
"""
register a class to be injectable
classkey is a key that identifies the injected class
REMEMBER to call inject_instance in __init__
Args:
classkey : the class to be injected into
modname : the global __name__ of the module youa re injecting from
Returns:
closure_decorate_class_method (func): decorator for injectable methods
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> class CheeseShop(object):
... def __init__(self):
... import utool as ut
... ut.inject_all_external_modules(self)
>>> cheeseshop_method = ut.make_class_method_decorator(CheeseShop)
>>> shop1 = CheeseShop()
>>> assert not hasattr(shop1, 'has_cheese'), 'have not injected yet'
>>> @cheeseshop_method
>>> def has_cheese(self):
>>> return False
>>> shop2 = CheeseShop()
>>> assert shop2.has_cheese() is False, 'external method not injected'
>>> print('Cheese shop does not have cheese. All is well.')
"""
global __APP_MODNAME_REGISTER__
#if util_arg.VERBOSE or VERBOSE_CLASS:
if VERBOSE_CLASS:
print('[util_class] register via make_class_method_decorator classkey=%r, modname=%r'
% (classkey, modname))
if modname == '__main__':
# skips reinjects into main
print('WARNING: cannot register classkey=%r functions as __main__' % (classkey,))
return lambda func: func
# register that this module was injected into
if isinstance(classkey, tuple):
classname, _ = classkey
__CLASSNAME_CLASSKEY_REGISTER__[classname].append(modname)
elif isinstance(classkey, type):
classname = classkey.__name__
if modname is not None:
assert modname == classkey.__module__, (
'modname=%r does not agree with __module__=%r' % (
modname, classkey.__module__))
modname = classkey.__module__
# Convert to new classkey format
classkey = (classname, modname)
__CLASSNAME_CLASSKEY_REGISTER__[classname].append(modname)
else:
print('Warning not using classkey for %r %r' % (classkey, modname))
raise AssertionError('classkey no longer supported. Use class_inject_key instead')
closure_decorate_class_method = functools.partial(decorate_class_method,
classkey=classkey)
return closure_decorate_class_method |
python | def get_userstory_by_ref(self, ref):
"""
Get a :class:`UserStory` by ref.
:param ref: :class:`UserStory` reference
"""
response = self.requester.get(
'/{endpoint}/by_ref?ref={us_ref}&project={project_id}',
endpoint=UserStory.endpoint,
us_ref=ref,
project_id=self.id
)
return UserStory.parse(self.requester, response.json()) |
python | def to_name(self) -> str:
"""
Convert to ANSI color name
:return: ANSI color name
"""
return {
self.BLACK: 'black',
self.RED: 'red',
self.GREEN: 'green',
self.YELLOW: 'yellow',
self.BLUE: 'blue',
self.MAGENTA: 'magenta',
self.CYAN: 'cyan',
self.WHITE: 'white'
}[self.color] |
java | public static String buildGlueExpression(List<Column> partitionKeys, List<String> partitionValues)
{
if (partitionValues == null || partitionValues.isEmpty()) {
return null;
}
if (partitionKeys == null || partitionValues.size() != partitionKeys.size()) {
throw new PrestoException(HIVE_METASTORE_ERROR, "Incorrect number of partition values: " + partitionValues);
}
List<String> predicates = new LinkedList<>();
for (int i = 0; i < partitionValues.size(); i++) {
if (!Strings.isNullOrEmpty(partitionValues.get(i))) {
predicates.add(buildPredicate(partitionKeys.get(i), partitionValues.get(i)));
}
}
return JOINER.join(predicates);
} |
python | def do_dot2(self, args, arguments):
"""
::
Usage:
dot2 FILENAME FORMAT
Export the data in cvs format to a file. Former cvs command
Arguments:
FILENAME The filename
FORMAT the export format, pdf, png, ...
"""
filename = arguments['FILENAME']
output_format = arguments['FORMAT']
base = filename.replace(".dot", "")
out = base + "." + output_format
if output_format == "pdf":
exec_command = "dot -Tps %s | epstopdf --filter --ooutput %s" % (
file, out)
else:
exec_command = "dot -T%s %s -o %s 2>/tmp/err" % (output_format, file, out)
os.system(exec_command) |
java | @SuppressWarnings("ConstantConditions")
public static NaaccrDictionary getDefaultUserDictionaryByVersion(String naaccrVersion) {
if (naaccrVersion == null)
throw new RuntimeException("Version is required for getting the default user dictionary.");
if (!NaaccrFormat.isVersionSupported(naaccrVersion))
throw new RuntimeException("Unsupported default user dictionary version: " + naaccrVersion);
NaaccrDictionary result = _INTERNAL_DICTIONARIES.get("user_" + naaccrVersion);
if (result == null) {
String resName = "user-defined-naaccr-dictionary-" + naaccrVersion + ".xml";
try (Reader reader = new InputStreamReader(Thread.currentThread().getContextClassLoader().getResource(resName).openStream(), StandardCharsets.UTF_8)) {
result = readDictionary(reader);
_INTERNAL_DICTIONARIES.put("user_" + naaccrVersion, result);
}
catch (IOException e) {
throw new RuntimeException("Unable to get base dictionary for version " + naaccrVersion, e);
}
}
return result;
} |
java | protected void initFailedJobCommandFactory() {
if (failedJobCommandFactory == null) {
failedJobCommandFactory = new DefaultFailedJobCommandFactory();
}
if (postParseListeners == null) {
postParseListeners = new ArrayList<BpmnParseListener>();
}
postParseListeners.add(new DefaultFailedJobParseListener());
} |
python | def _set_interface_reliable_messaging(self, v, load=False):
"""
Setter method for interface_reliable_messaging, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/interface_reliable_messaging (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_reliable_messaging is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_reliable_messaging() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_reliable_messaging.interface_reliable_messaging, is_container='container', presence=True, yang_name="interface-reliable-messaging", rest_name="reliable-messaging", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Reliable messaging on this interface', u'alt-name': u'reliable-messaging'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_reliable_messaging must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_reliable_messaging.interface_reliable_messaging, is_container='container', presence=True, yang_name="interface-reliable-messaging", rest_name="reliable-messaging", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RSVP Reliable messaging on this interface', u'alt-name': u'reliable-messaging'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__interface_reliable_messaging = t
if hasattr(self, '_set'):
self._set() |
python | def create(request):
"""Create a new poll"""
errors = []
success = False
listOfResponses = ['', '', ''] # 3 Blank lines by default
title = ''
description = ''
id = ''
if request.method == 'POST': # User saved the form
# Retrieve parameters
title = request.form.get('title')
description = request.form.get('description')
listOfResponses = []
for rep in request.form.getlist('rep[]'):
if rep != '':
listOfResponses.append(rep)
# Test if everything is ok
if title == "":
errors.append("Please set a title !")
if len(listOfResponses) == 0:
errors.append("Please set at least one response !")
# Can we save the new question ?
if len(errors) == 0:
# Yes. Let save data
curDB.execute("INSERT INTO Poll (title, description) VALUES (?, ?)", (title, description))
# The id of the poll
id = curDB.lastrowid
# Insert responses
for rep in listOfResponses:
curDB.execute("INSERT INTO Response (pollId, title) VALUES (?, ?)", (id, rep))
coxDB.commit()
success = True
# Minimum of 3 lines of questions
while len(listOfResponses) < 3:
listOfResponses.append('')
return {'errors': errors, 'success': success, 'listOfResponses': listOfResponses, 'title': title, 'description': description, 'id': id} |
java | private void addJQueryFile(Content head, DocPath filePath) {
HtmlTree jqyeryScriptFile = HtmlTree.SCRIPT(
pathToRoot.resolve(DocPaths.JQUERY_FILES.resolve(filePath)).getPath());
head.addContent(jqyeryScriptFile);
} |
java | public void loginAndRedirectBack(Object userIdentifier, String defaultLandingUrl) {
login(userIdentifier);
RedirectToLoginUrl.redirectToOriginalUrl(this, defaultLandingUrl);
} |
python | def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line |
python | def get_host(url):
"""
Given a url, return its scheme, host and port (None if it's not there).
For example: ::
>>> get_host('http://google.com/mail/')
('http', 'google.com', None)
>>> get_host('google.com:80')
('http', 'google.com', 80)
"""
# This code is actually similar to urlparse.urlsplit, but much
# simplified for our needs.
port = None
scheme = 'http'
if '//' in url:
scheme, url = url.split('://', 1)
if '/' in url:
url, _path = url.split('/', 1)
if ':' in url:
url, port = url.split(':', 1)
port = int(port)
return scheme, url, port |
java | public StringGrabber replaceEnclosedIn(String startToken, String endToken, String replacement) {
StringCropper cropper = getCropper();
final List<StrPosition> stringEnclosedInWithDetails = cropper.getStringEnclosedInWithDetails(sb.toString(), startToken, endToken);
final List<Integer> splitList = new ArrayList<Integer>();
for (StrPosition sp : stringEnclosedInWithDetails) {
int splitPointFirst = sp.startIndex - 1;
int splitPointSecond = sp.endIndex;
splitList.add(splitPointFirst);
splitList.add(splitPointSecond);
}
final Integer[] splitIndexes = splitList.toArray(new Integer[] {});
List<String> splitStringList = cropper.splitByIndex(sb.toString(), splitIndexes);
final StringBuilder tempSb = new StringBuilder();
final int strNum = splitStringList.size();
boolean nextIsValue = false;
int countOfReplacement = 0;
for (int i = 0; i < strNum; i++) {
String strPart = splitStringList.get(i);
if (nextIsValue) {
// position to add into replacement
tempSb.append(replacement);
countOfReplacement++;
if (strPart.startsWith(endToken)) {
// is string Blank
tempSb.append(strPart);
}
} else {
tempSb.append(strPart);
}
if (strPart.endsWith(startToken)) {
nextIsValue = true;
} else {
nextIsValue = false;
}
}
sb = tempSb;
return StringGrabber.this;
} |
python | def write_chunks(self, data, start, step, count) -> None:
'''
Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
'''
self.mglo.write_chunks(data, start, step, count) |
python | def delete(self, config_file=None):
"""Deletes the credentials file specified in `config_file`. If no
file is specified, it deletes the default user credential file.
Args:
config_file (str): Path to configuration file. Defaults to delete
the user default location if `None`.
.. Tip::
To see if there is a default user credential file stored, do the
following::
>>> creds = Credentials()
>>> print(creds)
Credentials(username=eschbacher, key=abcdefg,
base_url=https://eschbacher.carto.com/)
"""
path_to_remove = config_file or _DEFAULT_PATH
try:
os.remove(path_to_remove)
print('Credentials at {} successfully removed.'.format(
path_to_remove))
except OSError as err:
warnings.warn('No credential file found at {}.'.format(
path_to_remove)) |
java | private void execute(final Level level, final Object message, final Throwable t) {
if (!m_logger.isEnabledFor(level)) return;
if (m_asynchLoggerPool == null) {
m_logger.log(level, message, t);
return;
}
final Runnable runnableLoggingTask = createRunnableLoggingTask(level, message, t);
try {
m_asynchLoggerPool.execute(runnableLoggingTask);
} catch (RejectedExecutionException e) {
m_logger.log(Level.DEBUG, "Failed to execute logging task. Running in-line", e);
runnableLoggingTask.run();
}
} |
python | def register(self, name):
"""
Register configuration for an editor instance.
Arguments:
name (string): Config name from available ones in
``settings.CODEMIRROR_SETTINGS``.
Raises:
UnknowConfigError: If given config name does not exist in
``settings.CODEMIRROR_SETTINGS``.
Returns:
dict: Registred config dict.
"""
if name not in settings.CODEMIRROR_SETTINGS:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_SETTINGS'.")
raise UnknowConfigError(msg.format(name))
parameters = copy.deepcopy(self.default_internal_config)
parameters.update(copy.deepcopy(
settings.CODEMIRROR_SETTINGS[name]
))
# Add asset bundles name
if 'css_bundle_name' not in parameters:
css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME
parameters['css_bundle_name'] = css_template_name.format(
settings_name=name
)
if 'js_bundle_name' not in parameters:
js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME
parameters['js_bundle_name'] = js_template_name.format(
settings_name=name
)
self.registry[name] = parameters
return parameters |
python | def is_manifestation_model(instance, attribute, value):
"""Must include a ``manifestationOfWork`` key."""
instance_name = instance.__class__.__name__
is_creation_model(instance, attribute, value)
manifestation_of = value.get('manifestationOfWork')
if not isinstance(manifestation_of, str):
err_str = ("'manifestationOfWork' must be given as a string in the "
"'{attr}' parameter of a '{cls}'. Given "
"'{value}'").format(attr=attribute.name,
cls=instance_name,
value=manifestation_of)
print(err_str) |
java | public static String getText(Activity context, int id) {
TextView view = findViewById(context, id);
String text = "";
if (view != null) {
text = view.getText().toString();
} else {
Log.e("Caffeine", "Null view given to getText(). \"\" will be returned.");
}
return text;
} |
java | public Interval<C> coalesce(final Interval<C> that) {
if(this.overlaps(that)) {
return new Interval(this.dimension, this.range.span(that.range));
}
return NULL;
} |
java | public MultimapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldsForValues(
int firstFieldNumber, int... rest) {
return usingConfig(config.ignoringFieldAbsenceOfFields(asList(firstFieldNumber, rest)));
} |
python | def create_indexes(self, indexes):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
if not isinstance(indexes, list):
raise TypeError("indexes must be a list")
names = []
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError("%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self.write_concern,
parse_write_concern_error=True)
return names |
java | public WikiFormat removeStyle(WikiStyle style)
{
if (!fStyles.contains(style)) {
return this;
}
WikiFormat clone = getClone();
clone.fStyles.remove(style);
return clone;
} |
java | public EqualsVerifierApi<T> suppress(Warning... warnings) {
Collections.addAll(warningsToSuppress, warnings);
Validations.validateWarnings(warningsToSuppress);
Validations.validateWarningsAndFields(warningsToSuppress, allIncludedFields, allExcludedFields);
Validations.validateNonnullFields(nonnullFields, warningsToSuppress);
return this;
} |
python | def get_db_prep_value(self, value, connection, prepared=False):
"""Convert a value to DB storage.
Returns the state name.
"""
if not prepared:
value = self.get_prep_value(value)
return value.state.name |
java | public HttpSession getSession(boolean arg0) {
final HttpSession httpSession = request.getSession(arg0);
if (httpSession == null) {
return null;
}
else {
return new HttpSessionWrapper(httpSession);
}
} |
python | def do_action_for(self, context, request):
"""/@@API/doActionFor: Perform workflow transition on values returned
by jsonapi "read" function.
Required parameters:
- action: The workflow transition to apply to found objects.
Parameters used to locate objects are the same as used for the "read"
method.
"""
savepoint = transaction.savepoint()
workflow = getToolByName(context, 'portal_workflow')
uc = getToolByName(context, 'uid_catalog')
action = request.get('action', '')
if not action:
raise BadRequest("No action specified in request")
ret = {
"url": router.url_for("doActionFor", force_external=True),
"success": True,
"error": False,
}
data = read(context, request)
objects = data.get('objects', [])
if len(objects) == 0:
raise BadRequest("No matching objects found")
for obj_dict in objects:
try:
obj = uc(UID=obj_dict['UID'])[0].getObject()
workflow.doActionFor(obj, action)
obj.reindexObject()
except Exception as e:
savepoint.rollback()
msg = "Cannot execute '{0}' on {1} ({2})".format(
action, obj, e.message)
msg = msg.replace("${action_id}", action)
raise BadRequest(msg)
return ret |
java | protected ListView<T> newChoices(final String id, final IModel<CheckboxModelBean<T>> model)
{
final ListView<T> choices = new ListView<T>("choices", model.getObject().getChoices())
{
/** The serialVersionUID. */
private static final long serialVersionUID = 1L;
/**
* {@inheritDoc}
*/
@Override
protected void populateItem(final ListItem<T> item)
{
item.add(new Check<>("checkbox", item.getModel()));
item.add(new Label("label", new PropertyModel<String>(item.getDefaultModel(),
model.getObject().getLabelPropertyExpression())));
}
};
choices.setReuseItems(true);
return choices;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.