language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def get_input(*args, secret=False, required=False, blank=False, **kwargs):
"""
secret: Don't show user input when they are typing.
required: Keep prompting if the user enters an empty value.
blank: turn all empty strings into None.
"""
while True:
if secret:
value = getpass.getpass(*args, **kwargs)
else:
value = input(*args, **kwargs)
if blank:
value = value if value else None
if not required or value:
break
return value |
python | def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = member.__func__
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if not _io_discrepancy(member) and self.imported_member(
node, member, name
):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if class_node not in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == "__class__" and class_node.parent is None:
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
# This should be called for Jython, where some builtin
# methods aren't caught by isbuiltin branch.
_build_from_function(node, name, member, self._module)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
return None |
java | private void setUserInfo(CmsUser user, String key, String value) {
if (!CmsStringUtil.isEmptyOrWhitespaceOnly(value)) {
user.getAdditionalInfo().put(key, value);
}
} |
java | public static String firstRegistryOf(String ... checkFirst) {
for (String registry : checkFirst) {
if (registry != null) {
return registry;
}
}
// Check environment as last resort
return System.getenv("DOCKER_REGISTRY");
} |
java | @Override
public byte[] get(String table, String key) {
String CQL = MessageFormat.format(CQL_SELECT_ONE, calcTableName(table));
Row row = getSessionManager().executeOne(CQL, getConsistencyLevelGet(), key);
ByteBuffer data = row != null ? row.getBytes(columnValue) : null;
return data != null ? data.array() : null;
} |
java | private static IndexableExpression getIndexableExpressionFromFilters(
ExpressionType targetComparator, ExpressionType altTargetComparator,
AbstractExpression coveringExpr, int coveringColId, StmtTableScan tableScan,
List<AbstractExpression> filtersToCover,
boolean allowIndexedJoinFilters, boolean filterAction)
{
List<AbstractExpression> binding = null;
AbstractExpression indexableExpr = null;
AbstractExpression otherExpr = null;
ComparisonExpression normalizedExpr = null;
AbstractExpression originalFilter = null;
for (AbstractExpression filter : filtersToCover) {
// ENG-8203: Not going to try to use index with sub-query expression
if (filter.hasSubquerySubexpression()) {
// Including RowSubqueryExpression and SelectSubqueryExpression
// SelectSubqueryExpression also can be scalar sub-query
continue;
}
// Expression type must be resolvable by an index scan
if ((filter.getExpressionType() == targetComparator) ||
(filter.getExpressionType() == altTargetComparator)) {
normalizedExpr = (ComparisonExpression) filter;
indexableExpr = filter.getLeft();
otherExpr = filter.getRight();
binding = bindingIfValidIndexedFilterOperand(tableScan, indexableExpr, otherExpr,
coveringExpr, coveringColId);
if (binding != null) {
if ( ! allowIndexedJoinFilters) {
if (otherExpr.hasTupleValueSubexpression()) {
// This filter can not be used with the index, possibly due to interactions
// wih IN LIST processing that would require a three-way NLIJ.
binding = null;
continue;
}
}
// Additional restrictions apply to LIKE pattern arguments
if (targetComparator == ExpressionType.COMPARE_LIKE) {
if (otherExpr instanceof ParameterValueExpression) {
ParameterValueExpression pve = (ParameterValueExpression)otherExpr;
// Can't use an index for parameterized LIKE filters,
// e.g. "T1.column LIKE ?"
// UNLESS the parameter was artificially substituted
// for a user-specified constant AND that constant was a prefix pattern.
// In that case, the parameter has to be added to the bound list
// for this index/statement.
ConstantValueExpression cve = pve.getOriginalValue();
if (cve == null || ! cve.isPrefixPatternString()) {
binding = null; // the filter is not usable, so the binding is invalid
continue;
}
// Remember that the binding list returned by
// bindingIfValidIndexedFilterOperand above
// is often a "shared object" and is intended to be treated as immutable.
// To add a parameter to it, first copy the List.
List<AbstractExpression> moreBinding =
new ArrayList<>(binding);
moreBinding.add(pve);
binding = moreBinding;
} else if (otherExpr instanceof ConstantValueExpression) {
// Can't use an index for non-prefix LIKE filters,
// e.g. " T1.column LIKE '%ish' "
ConstantValueExpression cve = (ConstantValueExpression)otherExpr;
if ( ! cve.isPrefixPatternString()) {
// The constant is not an index-friendly prefix pattern.
binding = null; // the filter is not usable, so the binding is invalid
continue;
}
} else {
// Other cases are not indexable, e.g. " T1.column LIKE T2.column "
binding = null; // the filter is not usable, so the binding is invalid
continue;
}
}
if (targetComparator == ExpressionType.COMPARE_IN) {
if (otherExpr.hasTupleValueSubexpression()) {
// This is a fancy edge case where the expression could only be indexed
// if it:
// A) does not reference the indexed table and
// B) has ee support for a three-way NLIJ where the table referenced in
// the list element expression feeds values from its current row to the
// Materialized scan which then re-evaluates its expressions to
// re-populate the temp table that drives the injected NLIJ with
// this index scan.
// This is a slightly more twisted variant of the three-way NLIJ that
// would be needed to support compound key indexing on a combination
// of (fixed) IN LIST elements and join key values from other tables.
// Punt for now on indexing this IN LIST filter.
binding = null; // the filter is not usable, so the binding is invalid
continue;
}
if (otherExpr instanceof ParameterValueExpression) {
// It's OK to use an index for a parameterized IN filter,
// e.g. "T1.column IN ?"
// EVEN if the parameter was -- someday -- artificially substituted
// for an entire user-specified list of constants.
// As of now, that is beyond the capabilities of the ad hoc statement
// parameterizer, so "T1.column IN (3, 4)" can use the plan for
// "T1.column IN (?, ?)" that might have been originally cached for
// "T1.column IN (1, 2)" but "T1.column IN (1, 2, 3)" would need its own
// "T1.column IN (?, ?, ?)" plan, etc. per list element count.
}
//TODO: Some day, there may be an optimization here that allows an entire
// IN LIST of constants to be serialized as a single value instead of a
// VectorValue composed of ConstantValue arguments.
// What's TBD is whether that would get its own AbstractExpression class or
// just be a special case of ConstantValueExpression.
else {
assert (otherExpr instanceof VectorValueExpression);
}
}
originalFilter = filter;
if (filterAction == EXCLUDE_FROM_POST_FILTERS) {
filtersToCover.remove(filter);
}
break;
}
}
if ((filter.getExpressionType() == ComparisonExpression.reverses.get(targetComparator)) ||
(filter.getExpressionType() == ComparisonExpression.reverses.get(altTargetComparator))) {
normalizedExpr = (ComparisonExpression) filter;
normalizedExpr = normalizedExpr.reverseOperator();
indexableExpr = filter.getRight();
otherExpr = filter.getLeft();
binding = bindingIfValidIndexedFilterOperand(tableScan, indexableExpr, otherExpr,
coveringExpr, coveringColId);
if (binding != null) {
if ( ! allowIndexedJoinFilters) {
if (otherExpr.hasTupleValueSubexpression()) {
// This filter can not be used with the index, probably due to interactions
// with IN LIST processing of another key component that would require a
// three-way NLIJ to be injected.
binding = null;
continue;
}
}
originalFilter = filter;
if (filterAction == EXCLUDE_FROM_POST_FILTERS) {
filtersToCover.remove(filter);
}
break;
}
}
}
if (binding == null) {
// ran out of candidate filters.
return null;
}
return new IndexableExpression(originalFilter, normalizedExpr, binding);
} |
python | def add_deviation_element(keyword, element):
"""Add an element to the <keyword>'s list of deviations.
Can be used by plugins that add support for specific extension
statements."""
if keyword in _valid_deviations:
_valid_deviations[keyword].append(element)
else:
_valid_deviations[keyword] = [element] |
java | public void updateConfigInfo(final HttpServletRequest request,
final XSLTConfig xcfg)
throws ServletException {
PresentationState ps = getPresentationState(request);
if (ps == null) {
// Still can't do a thing
return;
}
if (xcfg.nextCfg == null) {
xcfg.nextCfg = new XSLTFilterConfigInfo();
} else {
xcfg.cfg.updateFrom(xcfg.nextCfg);
}
xcfg.cfg.setAppRoot(ps.getAppRoot());
xcfg.nextCfg.setAppRoot(ps.getAppRoot());
/** Transfer the state */
if (ps.getNoXSLTSticky()) {
xcfg.cfg.setDontFilter(true);
xcfg.nextCfg.setDontFilter(true);
} else {
xcfg.cfg.setDontFilter(ps.getNoXSLT());
ps.setNoXSLT(false);
}
/* ============== Don't filter ================= */
if (xcfg.cfg.getDontFilter()) {
// I think that's enough
return;
}
/* ============== Locale ================= */
Locale l = request.getLocale();
String lang = l.getLanguage();
if ((lang == null) || (lang.length() == 0)) {
lang = xcfg.cfg.getDefaultLang();
}
String country = l.getCountry();
if ((country == null) || (country.length() == 0)) {
country = xcfg.cfg.getDefaultCountry();
}
xcfg.cfg.setLocaleInfo(XSLTFilterConfigInfo.makeLocale(lang, country));
/* locale always sticky */
xcfg.nextCfg.setLocaleInfo(XSLTFilterConfigInfo.makeLocale(lang, country));
/* ============== Browser type ================= */
String temp = ps.getBrowserType();
if (temp != null) {
xcfg.cfg.setBrowserType(temp);
}
if (!ps.getBrowserTypeSticky()) {
ps.setBrowserType(null);
} else {
xcfg.nextCfg.setBrowserType(temp);
}
/* ============== Skin name ================= */
temp = ps.getSkinName();
if (temp != null) {
xcfg.cfg.setSkinName(temp);
}
if (!ps.getSkinNameSticky()) {
ps.setSkinName(null);
} else {
xcfg.nextCfg.setSkinName(temp);
}
/* ============== Content type ================= */
xcfg.cfg.setContentType(ps.getContentType());
if (!ps.getContentTypeSticky()) {
ps.setContentType(null);
} else {
xcfg.nextCfg.setContentType(ps.getContentType());
}
/* ============== Refresh ================= */
xcfg.cfg.setForceReload(ps.getForceXSLTRefresh());
ps.setForceXSLTRefresh(false);
/* I don't think we ever want to allow this
info.setReloadAlways(ps.getForceXSLTRefreshAlways());
*/
} |
python | def fetch_gene_fasta(gene_bed, fasta_obj):
"""Retreive gene sequences in FASTA format.
Parameters
----------
gene_bed : BedLine
BedLine object representing a single gene
fasta_obj : pysam.Fastafile
fasta object for index retreival of sequence
Returns
-------
gene_fasta : str
sequence of gene in FASTA format
"""
gene_fasta = ''
strand = gene_bed.strand
exons = gene_bed.get_exons()
if strand == '-':
exons.reverse() # order exons 5' to 3', so reverse if '-' strand
# iterate over exons
for i, exon in enumerate(exons):
exon_seq = fasta_obj.fetch(reference=gene_bed.chrom,
start=exon[0],
end=exon[1]).upper()
if strand == '-':
exon_seq = utils.rev_comp(exon_seq)
exon_fasta = '>{0};exon{1}\n{2}\n'.format(gene_bed.gene_name,
i, exon_seq)
# get splice site sequence
if len(exons) == 1:
# splice sites don't matter if there is no splicing
ss_fasta = ''
elif i == 0:
# first exon only, get 3' SS
ss_fasta = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
elif i == (len(exons) - 1):
# last exon only, get 5' SS
ss_fasta = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
else:
# middle exon, get bot 5' and 3' SS
fasta_3ss = _fetch_3ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
fasta_5ss = _fetch_5ss_fasta(fasta_obj, gene_bed.gene_name, i,
gene_bed.chrom, strand, exon[0], exon[1])
ss_fasta = fasta_5ss + fasta_3ss
gene_fasta += exon_fasta + ss_fasta
return gene_fasta |
python | def delete_cloud_integration(self, id, **kwargs): # noqa: E501
"""Delete a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data |
java | public void trim(int ntrees) {
if (ntrees > trees.size()) {
throw new IllegalArgumentException("The new model size is larger than the current size.");
}
if (ntrees <= 0) {
throw new IllegalArgumentException("Invalid new model size: " + ntrees);
}
List<Tree> model = new ArrayList<>(ntrees);
for (int i = 0; i < ntrees; i++) {
model.add(trees.get(i));
}
trees = model;
} |
python | def to_df(self, variables=None, format='wide', sparse=True,
sampling_rate=None, include_sparse=True, include_dense=True,
**kwargs):
''' Merge columns into a single pandas DataFrame.
Args:
variables (list): Optional list of variable names to retain;
if None, all variables are written out.
format (str): Whether to return a DataFrame in 'wide' or 'long'
format. In 'wide' format, each row is defined by a unique
onset/duration, and each variable is in a separate column. In
'long' format, each row is a unique combination of onset,
duration, and variable name, and a single 'amplitude' column
provides the value.
sparse (bool): If True, variables will be kept in a sparse
format provided they are all internally represented as such.
If False, a dense matrix (i.e., uniform sampling rate for all
events) will be exported. Will be ignored if at least one
variable is dense.
sampling_rate (float): If a dense matrix is written out, the
sampling rate (in Hz) to use for downsampling. Defaults to the
value currently set in the instance.
kwargs: Optional keyword arguments to pass onto each Variable's
to_df() call (e.g., condition, entities, and timing).
include_sparse (bool): Whether or not to include sparse Variables.
include_dense (bool): Whether or not to include dense Variables.
Returns: A pandas DataFrame.
'''
if not include_sparse and not include_dense:
raise ValueError("You can't exclude both dense and sparse "
"variables! That leaves nothing!")
if variables is None:
variables = list(self.variables.keys())
if not include_sparse:
variables = [v for v in variables if
isinstance(self.variables[v], DenseRunVariable)]
if not include_dense:
variables = [v for v in variables if not
isinstance(self.variables[v], DenseRunVariable)]
if not variables:
return None
_vars = [self.variables[v] for v in variables]
if sparse and all(isinstance(v, SimpleVariable) for v in _vars):
variables = _vars
else:
sampling_rate = sampling_rate or self.sampling_rate
# Make sure all variables have the same sampling rate
variables = list(self.resample(sampling_rate, variables,
force_dense=True,
in_place=False).values())
return super(BIDSRunVariableCollection, self).to_df(variables, format,
**kwargs) |
java | public static <T> String toJson(T t) {
if (Objects.isNull(t)) {
log.warn("t is blank. ");
return "";
}
try {
return OBJECT_MAPPER.writeValueAsString(t);
} catch (Exception e) {
log.error(e.getMessage(), e);
return "";
}
} |
java | public Observable<Void> validateWorkflowAsync(String resourceGroupName, String workflowName, WorkflowInner validate) {
return validateWorkflowWithServiceResponseAsync(resourceGroupName, workflowName, validate).map(new Func1<ServiceResponse<Void>, Void>() {
@Override
public Void call(ServiceResponse<Void> response) {
return response.body();
}
});
} |
java | public static void checkXerbla() {
double[] x = new double[9];
System.out.println("Check whether we're catching XERBLA errors. If you see something like \"** On entry to DGEMM parameter number 4 had an illegal value\", it didn't work!");
try {
NativeBlas.dgemm('N', 'N', 3, -1, 3, 1.0, x, 0, 3, x, 0, 3, 0.0, x, 0, 3);
} catch (IllegalArgumentException e) {
check("checking XERBLA", e.getMessage().contains("XERBLA"));
return;
}
assert (false); // shouldn't happen
} |
java | private final int getLowestSetBit() {
if (intLen == 0)
return -1;
int j, b;
for (j=intLen-1; (j > 0) && (value[j+offset] == 0); j--)
;
b = value[j+offset];
if (b == 0)
return -1;
return ((intLen-1-j)<<5) + Integer.numberOfTrailingZeros(b);
} |
java | public String generate(Config config) {
isNotNull(config, "Config");
StringBuilder xml = new StringBuilder();
XmlGenerator gen = new XmlGenerator(xml);
xml.append("<hazelcast ")
.append("xmlns=\"http://www.hazelcast.com/schema/config\"\n")
.append("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n")
.append("xsi:schemaLocation=\"http://www.hazelcast.com/schema/config ")
.append("http://www.hazelcast.com/schema/config/hazelcast-config-4.0.xsd\">");
gen.open("group")
.node("name", config.getGroupConfig().getName())
.node("password", getOrMaskValue(config.getGroupConfig().getPassword()))
.close()
.node("license-key", getOrMaskValue(config.getLicenseKey()))
.node("instance-name", config.getInstanceName());
manCenterXmlGenerator(gen, config);
gen.appendProperties(config.getProperties());
securityXmlGenerator(gen, config);
wanReplicationXmlGenerator(gen, config);
networkConfigXmlGenerator(gen, config);
advancedNetworkConfigXmlGenerator(gen, config);
mapConfigXmlGenerator(gen, config);
replicatedMapConfigXmlGenerator(gen, config);
cacheConfigXmlGenerator(gen, config);
queueXmlGenerator(gen, config);
multiMapXmlGenerator(gen, config);
collectionXmlGenerator(gen, "list", config.getListConfigs().values());
collectionXmlGenerator(gen, "set", config.getSetConfigs().values());
topicXmlGenerator(gen, config);
semaphoreXmlGenerator(gen, config);
lockXmlGenerator(gen, config);
countDownLatchXmlGenerator(gen, config);
ringbufferXmlGenerator(gen, config);
atomicLongXmlGenerator(gen, config);
atomicReferenceXmlGenerator(gen, config);
executorXmlGenerator(gen, config);
durableExecutorXmlGenerator(gen, config);
scheduledExecutorXmlGenerator(gen, config);
eventJournalXmlGenerator(gen, config);
merkleTreeXmlGenerator(gen, config);
partitionGroupXmlGenerator(gen, config);
cardinalityEstimatorXmlGenerator(gen, config);
listenerXmlGenerator(gen, config);
serializationXmlGenerator(gen, config);
reliableTopicXmlGenerator(gen, config);
liteMemberXmlGenerator(gen, config);
nativeMemoryXmlGenerator(gen, config);
servicesXmlGenerator(gen, config);
hotRestartXmlGenerator(gen, config);
flakeIdGeneratorXmlGenerator(gen, config);
crdtReplicationXmlGenerator(gen, config);
pnCounterXmlGenerator(gen, config);
quorumXmlGenerator(gen, config);
cpSubsystemConfig(gen, config);
xml.append("</hazelcast>");
return format(xml.toString(), INDENT);
} |
python | def compile_dir(env, src_path, dst_path, pattern=r'^.*\.html$', encoding='utf-8', base_dir=None):
"""Compiles a directory of Jinja2 templates to python code.
:param env: a Jinja2 Environment instance.
:param src_path: path to the source directory.
:param dst_path: path to the destination directory.
:param encoding: template encoding.
:param base_dir: the base path to be removed from the compiled template filename.
"""
from os import path, listdir, mkdir
file_re = re.compile(pattern)
if base_dir is None:
base_dir = src_path
for filename in listdir(src_path):
src_name = path.join(src_path, filename)
dst_name = path.join(dst_path, filename)
if path.isdir(src_name):
mkdir(dst_name)
compile_dir(env, src_name, dst_name, encoding=encoding, base_dir=base_dir)
elif path.isfile(src_name) and file_re.match(filename):
compile_file(env, src_name, dst_name, encoding=encoding, base_dir=base_dir) |
python | def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result |
python | def delete(self, *names: str, pipeline=False):
"""Delete one or more keys specified by names.
Args:
names (str): Names of keys to delete
pipeline (bool): True, start a transaction block. Default false.
"""
if pipeline:
self._pipeline.delete(*names)
else:
self._db.delete(*names) |
python | def standardGeographyQuery(self,
sourceCountry=None,
optionalCountryDataset=None,
geographyLayers=None,
geographyIDs=None,
geographyQuery=None,
returnSubGeographyLayer=False,
subGeographyLayer=None,
subGeographyQuery=None,
outSR=4326,
returnGeometry=False,
returnCentroids=False,
generalizationLevel=0,
useFuzzySearch=False,
featureLimit=1000):
"""
The GeoEnrichment service provides a helper method that returns
standard geography IDs and features for the supported geographic
levels in the United States and Canada.
As indicated throughout this documentation guide, the GeoEnrichment
service uses the concept of a study area to define the location of
the point or area that you want to enrich with additional
information. Locations can also be passed as one or many named
statistical areas. This form of a study area lets you define an
area by the ID of a standard geographic statistical feature, such
as a census or postal area. For example, to obtain enrichment
information for a U.S. state, county or ZIP Code or a Canadian
province or postal code, the Standard Geography Query helper method
allows you to search and query standard geography areas so that
they can be used in the GeoEnrichment method to obtain facts about
the location.
The most common workflow for this service is to find a FIPS
(standard geography ID) for a geographic name. For example, you can
use this service to find the FIPS for the county of San Diego which
is 06073. You can then use this FIPS ID within the GeoEnrichment
service study area definition to get geometry and optional
demographic data for the county. This study area definition is
passed as a parameter to the GeoEnrichment service to return data
defined in the enrichment pack and optionally return geometry for
the feature.
For examples and more help with this function see:
http://resources.arcgis.com/en/help/arcgis-rest-api/#/Standard_geography_query/02r30000000q000000/
Inputs:
sourceCountry - Optional parameter to specify the source country
for the search. Use this parameter to limit the search and
query of standard geographic features to one country. This
parameter supports both the two-digit and three-digit country
codes illustrated in the coverage table.
optionalCountryDataset - Optional parameter to specify a
specific dataset within a defined country.
geographyLayers - Optional parameter to specify which standard
geography layers are being queried or searched. If this
parameter is not provided, all layers within the defined
country will be queried.
geographyIDs - Optional parameter to specify which IDs for the
standard geography layers are being queried or searched. You
can use this parameter to return attributes and/or geometry for
standard geographic areas for administrative areas where you
already know the ID, for example, if you know the Federal
Information Processing Standard (FIPS) Codes for a U.S. state
or county; or, in Canada, to return the geometry and attributes
for a Forward Sortation Area (FSA).
geographyQuery - Optional parameter to specify the text to query
and search the standard geography layers specified. You can use
this parameter to query and find standard geography features
that meet an input term, for example, for a list of all the
U.S. counties that contain the word "orange". The
geographyQuery parameter can be a string that contains one or
more words.
returnSubGeographyLayer - Use this optional parameter to return
all the subgeographic areas that are within a parent geography.
For example, you could return all the U.S. counties for a given
U.S. state or you could return all the Canadian postal areas
(FSAs) within a Census Metropolitan Area (city).
When this parameter is set to true, the output features will be
defined in the subGeographyLayer. The output geometries will be
in the spatial reference system defined by outSR.
subGeographyLayer - Use this optional parameter to return all
the subgeographic areas that are within a parent geography. For
example, you could return all the U.S. counties within a given
U.S. state or you could return all the Canadian postal areas
(FSAs) within a Census Metropolitan Areas (city).
When this parameter is set to true, the output features will be
defined in the subGeographyLayer. The output geometries will be
in the spatial reference system defined by outSR.
subGeographyQuery - Optional parameter to filter the results of
the subgeography features that are returned by a search term.
You can use this parameter to query and find subgeography
features that meet an input term. This parameter is used to
filter the list of subgeography features that are within a
parent geography. For example, you may want a list of all the
ZIP Codes that are within "San Diego County" and filter the
results so that only ZIP Codes that start with "921" are
included in the output response. The subgeography query is a
string that contains one or more words.
outSR - Optional parameter to request the output geometries in a
specified spatial reference system.
returnGeometry - Optional parameter to request the output
geometries in the response.
returnCentroids - Optional Boolean parameter to request the
output geometry to return the center point for each feature.
Use this parameter to return all the geometries as points. For
example, you could return all U.S. ZIP Code centroids (points)
rather than providing the boundaries.
generalizationLevel - Optional integer that specifies the level
of generalization or detail in the area representations of the
administrative boundary or standard geographic data layers.
Values must be whole integers from 0 through 6, where 0 is most
detailed and 6 is most generalized.
useFuzzySearch - Optional Boolean parameter to define if text
provided in the geographyQuery parameter should utilize fuzzy
search logic. Fuzzy searches are based on the Levenshtein
Distance or Edit Distance algorithm.
featureLimit - Optional integer value where you can limit the
number of features that are returned from the geographyQuery.
"""
url = self._base_url + self._url_standard_geography_query_execute
params = {
"f" : "json"
}
if not sourceCountry is None:
params['sourceCountry'] = sourceCountry
if not optionalCountryDataset is None:
params['optionalCountryDataset'] = optionalCountryDataset
if not geographyLayers is None:
params['geographylayers'] = geographyLayers
if not geographyIDs is None:
params['geographyids'] = json.dumps(geographyIDs)
if not geographyQuery is None:
params['geographyQuery'] = geographyQuery
if not returnSubGeographyLayer is None and \
isinstance(returnSubGeographyLayer, bool):
params['returnSubGeographyLayer'] = returnSubGeographyLayer
if not subGeographyLayer is None:
params['subGeographyLayer'] = json.dumps(subGeographyLayer)
if not subGeographyQuery is None:
params['subGeographyQuery'] = subGeographyQuery
if not outSR is None and \
isinstance(outSR, int):
params['outSR'] = outSR
if not returnGeometry is None and \
isinstance(returnGeometry, bool):
params['returnGeometry'] = returnGeometry
if not returnCentroids is None and \
isinstance(returnCentroids, bool):
params['returnCentroids'] = returnCentroids
if not generalizationLevel is None and \
isinstance(generalizationLevel, int):
params['generalizationLevel'] = generalizationLevel
if not useFuzzySearch is None and \
isinstance(useFuzzySearch, bool):
params['useFuzzySearch'] = json.dumps(useFuzzySearch)
if featureLimit is None:
featureLimit = 1000
elif isinstance(featureLimit, int):
params['featureLimit'] = featureLimit
else:
params['featureLimit'] = 1000
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
java | public void marshall(CreateResourceServerRequest createResourceServerRequest, ProtocolMarshaller protocolMarshaller) {
if (createResourceServerRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(createResourceServerRequest.getUserPoolId(), USERPOOLID_BINDING);
protocolMarshaller.marshall(createResourceServerRequest.getIdentifier(), IDENTIFIER_BINDING);
protocolMarshaller.marshall(createResourceServerRequest.getName(), NAME_BINDING);
protocolMarshaller.marshall(createResourceServerRequest.getScopes(), SCOPES_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def relaxation_operators(p):
"""
Return the amplitude damping Kraus operators
"""
k0 = np.array([[1.0, 0.0], [0.0, np.sqrt(1 - p)]])
k1 = np.array([[0.0, np.sqrt(p)], [0.0, 0.0]])
return k0, k1 |
java | private boolean getBooleanAttributeValue(Node source) {
Attr attribute = (Attr)source;
String value = attribute.getValue();
return "true".equalsIgnoreCase(value);
} |
java | private void handleHttpResponse(HttpResponse response, Class<? extends Response> responseClass, KickflipCallback cb) throws IOException {
//Object parsedResponse = response.parseAs(responseClass);
if (isSuccessResponse(response)) {
// Http Success
handleKickflipResponse(response, responseClass, cb);
//cb.onSuccess(responseClass.cast(parsedResponse));
} else {
// Http Failure
if (VERBOSE)
Log.i(TAG, String.format("RESPONSE (F): %s body: %s", shortenUrlString(response.getRequest().getUrl().toString()), response.getContent().toString()));
postExceptionToCallback(cb, UNKNOWN_ERROR_CODE);
}
} |
java | synchronized void onExport(ExportTraceServiceRequest request) {
if (isCompleted() || exportRequestObserver == null) {
return;
}
try {
exportRequestObserver.onNext(request);
} catch (Exception e) { // Catch client side exceptions.
onComplete(e);
}
} |
java | public Locale getLocale()
throws JspException
{
Locale loc = null;
if (_language != null || _country != null) {
// language is required
if (_language == null) {
String s = Bundle.getString("Tags_LocaleRequiresLanguage", new Object[]{_country});
registerTagError(s, null);
return super.getUserLocale();
}
if (_country == null)
loc = new Locale(_language);
else
loc = new Locale(_language, _country);
}
else
loc = super.getUserLocale();
return loc;
} |
python | def listdir(store, path=None):
"""Obtain a directory listing for the given path. If `store` provides a `listdir`
method, this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'listdir'):
# pass through
return store.listdir(path)
else:
# slow version, iterate through all keys
return _listdir_from_keys(store, path) |
java | public INDArray[] output(boolean train, @NonNull INDArray[] input, INDArray[] inputMasks){
return output(train, input, inputMasks, null);
} |
java | private boolean matches(File file, byte[] digest, Configuration conf) throws IOException {
byte[] fileDigest = getDigest(file, conf);
return Arrays.equals(fileDigest, digest);
} |
java | private boolean allConnectionsFromOtherStage(final ExecutionGroupVertex groupVertex, final boolean forward) {
if (forward) {
for (int i = 0; i < groupVertex.getNumberOfBackwardLinks(); i++) {
if (this.stage == groupVertex.getBackwardEdge(i).getSourceVertex().getStageNumber()) {
return false;
}
}
} else {
for (int i = 0; i < groupVertex.getNumberOfForwardLinks(); i++) {
if (this.stage == groupVertex.getForwardEdge(i).getTargetVertex().getStageNumber()) {
return false;
}
}
}
return true;
} |
java | @Override
public void eUnset(int featureID)
{
switch (featureID)
{
case SimpleAntlrPackage.OPTIONS__OPTION_VALUES:
getOptionValues().clear();
return;
}
super.eUnset(featureID);
} |
python | def validate_submission(self, filename):
"""Validates submission.
Args:
filename: submission filename
Returns:
submission metadata or None if submission is invalid
"""
self._prepare_temp_dir()
# Convert filename to be absolute path, relative path might cause problems
# with mounting directory in Docker
filename = os.path.abspath(filename)
# extract submission
if not self._extract_submission(filename):
return None
# verify submission size
if not self._verify_submission_size():
return None
# Load metadata
metadata = self._load_and_verify_metadata()
if not metadata:
return None
submission_type = metadata['type']
# verify docker container size
if not self._verify_docker_image_size(metadata['container_gpu']):
return None
# Try to run submission on sample data
self._prepare_sample_data(submission_type)
if not self._run_submission(metadata):
logging.error('Failure while running submission')
return None
if not self._verify_output(submission_type):
logging.warning('Some of the outputs of your submission are invalid or '
'missing. You submission still will be evaluation '
'but you might get lower score.')
return metadata |
python | def _categorize(self, category):
"""Remove torrents with unwanted category from self.torrents"""
self.torrents = [result for result in self.torrents
if result.category == category] |
java | @Override
public String getMessage() {
if (getCause() instanceof UnknownHostException) {
return "Host (" + getHost() + ") is unreachable !!!!";
}
if (getCause() instanceof ConnectException) {
return "Host (" + getHost() + ") is unreachable at Port (" + getPort() + ")!!!!";
}
return getCause().getMessage();
} |
python | def _set_country(self, c):
"""
callback if we used Tor's GETINFO ip-to-country
"""
self.location.countrycode = c.split()[0].split('=')[1].strip().upper() |
java | private static boolean authenticateAuthorizationHeader(Map<String,String> headers) {
String hdr = headers.get(Listener.AUTHORIZATION_HEADER_NAME);
if (hdr == null)
hdr = headers.get(Listener.AUTHORIZATION_HEADER_NAME.toLowerCase());
headers.remove(Listener.AUTHORIZATION_HEADER_NAME);
headers.remove(Listener.AUTHORIZATION_HEADER_NAME.toLowerCase());
if (hdr != null && hdr.startsWith("Basic"))
return checkBasicAuthenticationHeader(hdr, headers);
else if (hdr != null && hdr.startsWith("Bearer"))
return checkBearerAuthenticationHeader(hdr, headers);
return false;
} |
python | def update_features(self, poly):
"""Evaluate wavelength at xpos using the provided polynomial."""
for feature in self.features:
feature.wavelength = poly(feature.xpos) |
java | @Override
public void writeStringData(String value, int offset, int length)
{
char []cBuf = _charBuffer;
int cBufLength = cBuf.length;
for (int i = 0; i < length; i += cBufLength) {
int sublen = Math.min(length - i, cBufLength);
value.getChars(offset + i, offset + i + sublen, cBuf, 0);
writeStringChunk(cBuf, 0, sublen);
}
} |
java | public WxCardAPISignature createWxCardJsAPISignature(WxCardAPISignature wxCardAPISignature){
if(wxCardAPITicket == null || wxCardAPITicket.expired()) {
getWxCardAPITicket();
}
long timestamp = System.currentTimeMillis() / 1000;
String nonce = RandomStringGenerator.getRandomStringByLength(16);
String ticket = wxCardAPITicket.getTicket();
List<String> parameters = new ArrayList<>();
if(wxCardAPISignature.isChooseCard()) {
parameters.add(wxClient.getClientId());
}
parameters.add(ticket);
parameters.add(wxCardAPISignature.getCardId());
parameters.add(nonce);
parameters.add(String.valueOf(timestamp));
if(!(wxCardAPISignature.getCardType() == null || "".equals(wxCardAPISignature.getCardType()))) {
parameters.add(wxCardAPISignature.getCardType());
}
if(!(wxCardAPISignature.getCode() == null || "".equals(wxCardAPISignature.getCode()))) {
parameters.add(wxCardAPISignature.getCode());
}
if(!(wxCardAPISignature.getBalance() == null || "".equals(wxCardAPISignature.getBalance()))) {
parameters.add(wxCardAPISignature.getBalance());
}
if(!(wxCardAPISignature.getOpenId() == null || "".equals(wxCardAPISignature.getOpenId()))) {
parameters.add(wxCardAPISignature.getOpenId());
}
if(!(wxCardAPISignature.getLocationId() == null || "".equals(wxCardAPISignature.getLocationId()))) {
parameters.add(wxCardAPISignature.getLocationId());
}
try {
String signature = SHA1.getSHA1((String[])parameters.toArray());
wxCardAPISignature.setNonce(nonce);
wxCardAPISignature.setTimestamp(timestamp);
wxCardAPISignature.setSignature(signature);
return wxCardAPISignature;
} catch (AesException e) {
logger.error("createWxCardJsAPISignature failed", e);
throw new WxRuntimeException(999, e.getMessage());
}
} |
java | public static synchronized ILoadBalancer getNamedLoadBalancer(String name) {
ILoadBalancer lb = namedLBMap.get(name);
if (lb != null) {
return lb;
} else {
try {
lb = registerNamedLoadBalancerFromclientConfig(name, getNamedConfig(name));
} catch (ClientException e) {
throw new RuntimeException("Unable to create load balancer", e);
}
namedLBMap.put(name, lb);
return lb;
}
} |
java | public void setContainer(DatePickerContainer container) {
this.container = container;
options.container = container == DatePickerContainer.SELF ? getElement().getId() : container.getCssName();
} |
java | private static String adopt2JavaPattern(String pattern)
{
pattern = normalizePath(pattern);
// any character except '/', one or more times
pattern = pattern.replaceAll("\\" + ANY_NAME, "[^/]+");
// any character except '/' exactly one time
pattern = pattern.replaceAll(ANY_CHAR, "[^/]{1}");
return pattern;
} |
java | @Override
public void start() {
mTerminated = false;
mStarted = true;
// First, sort the nodes (if necessary). This will ensure that sortedNodes
// contains the animation nodes in the correct order.
sortNodes();
int numSortedNodes = mSortedNodes.size();
for (int i = 0; i < numSortedNodes; ++i) {
Node node = mSortedNodes.get(i);
// First, clear out the old listeners
ArrayList<AnimatorListener> oldListeners = node.animation.getListeners();
if (oldListeners != null && oldListeners.size() > 0) {
final ArrayList<AnimatorListener> clonedListeners = new
ArrayList<AnimatorListener>(oldListeners);
for (AnimatorListener listener : clonedListeners) {
if (listener instanceof DependencyListener ||
listener instanceof AnimatorSetListener) {
node.animation.removeListener(listener);
}
}
}
}
// nodesToStart holds the list of nodes to be started immediately. We don't want to
// start the animations in the loop directly because we first need to set up
// dependencies on all of the nodes. For example, we don't want to start an animation
// when some other animation also wants to start when the first animation begins.
final ArrayList<Node> nodesToStart = new ArrayList<Node>();
for (int i = 0; i < numSortedNodes; ++i) {
Node node = mSortedNodes.get(i);
if (mSetListener == null) {
mSetListener = new AnimatorSetListener(this);
}
if (node.dependencies == null || node.dependencies.size() == 0) {
nodesToStart.add(node);
} else {
int numDependencies = node.dependencies.size();
for (int j = 0; j < numDependencies; ++j) {
Dependency dependency = node.dependencies.get(j);
dependency.node.animation.addListener(
new DependencyListener(this, node, dependency.rule));
}
node.tmpDependencies = (ArrayList<Dependency>) node.dependencies.clone();
}
node.animation.addListener(mSetListener);
}
// Now that all dependencies are set up, start the animations that should be started.
if (mStartDelay <= 0) {
for (Node node : nodesToStart) {
node.animation.start();
mPlayingSet.add(node.animation);
}
} else {
mDelayAnim = ValueAnimator.ofFloat(0f, 1f);
mDelayAnim.setDuration(mStartDelay);
mDelayAnim.addListener(new AnimatorListenerAdapter() {
boolean canceled = false;
public void onAnimationCancel(Animator anim) {
canceled = true;
}
public void onAnimationEnd(Animator anim) {
if (!canceled) {
int numNodes = nodesToStart.size();
for (int i = 0; i < numNodes; ++i) {
Node node = nodesToStart.get(i);
node.animation.start();
mPlayingSet.add(node.animation);
}
}
}
});
mDelayAnim.start();
}
if (mListeners != null) {
ArrayList<AnimatorListener> tmpListeners =
(ArrayList<AnimatorListener>) mListeners.clone();
int numListeners = tmpListeners.size();
for (int i = 0; i < numListeners; ++i) {
tmpListeners.get(i).onAnimationStart(this);
}
}
if (mNodes.size() == 0 && mStartDelay == 0) {
// Handle unusual case where empty AnimatorSet is started - should send out
// end event immediately since the event will not be sent out at all otherwise
mStarted = false;
if (mListeners != null) {
ArrayList<AnimatorListener> tmpListeners =
(ArrayList<AnimatorListener>) mListeners.clone();
int numListeners = tmpListeners.size();
for (int i = 0; i < numListeners; ++i) {
tmpListeners.get(i).onAnimationEnd(this);
}
}
}
} |
java | public static <R> Func0<R> toFunc(final Action0 action, final R result) {
return new Func0<R>() {
@Override
public R call() {
action.call();
return result;
}
};
} |
java | public void invalidate(@Nullable Uri uri) {
if (uri != null) {
cache.clearKeyUri(uri.toString());
}
} |
java | public Content throwsTagOutput(Element element, DocTree throwsTag) {
ContentBuilder body = new ContentBuilder();
CommentHelper ch = utils.getCommentHelper(element);
Element exception = ch.getException(configuration, throwsTag);
Content excName;
if (exception == null) {
excName = new RawHtml(ch.getExceptionName(throwsTag).toString());
} else if (exception.asType() == null) {
excName = new RawHtml(utils.getFullyQualifiedName(exception));
} else {
LinkInfoImpl link = new LinkInfoImpl(configuration, LinkInfoImpl.Kind.MEMBER,
exception.asType());
link.excludeTypeBounds = true;
excName = htmlWriter.getLink(link);
}
body.addContent(HtmlTree.CODE(excName));
List<? extends DocTree> description = ch.getDescription(configuration, throwsTag);
Content desc = htmlWriter.commentTagsToContent(throwsTag, element, description, false);
if (desc != null && !desc.isEmpty()) {
body.addContent(" - ");
body.addContent(desc);
}
HtmlTree result = HtmlTree.DD(body);
return result;
} |
java | public DenseMatrix inverse() {
int m = lu.nrows();
int n = lu.ncols();
if (m != n) {
throw new IllegalArgumentException(String.format("Matrix is not square: %d x %d", m, n));
}
DenseMatrix inv = Matrix.zeros(n, n);
for (int i = 0; i < n; i++) {
inv.set(i, piv[i], 1.0);
}
solve(inv);
return inv;
} |
python | def sign(self, encoded):
""" Return authentication signature of encoded bytes """
signature = self._hmac.copy()
signature.update(encoded)
return signature.hexdigest().encode('utf-8') |
java | public static SameDiff restoreFromTrainingConfigZip(File file) throws IOException {
ZipFile zipFile = new ZipFile(file);
ZipEntry config = zipFile.getEntry(TRAINING_CONFIG_JSON_ZIP_ENTRY_NAME);
TrainingConfig trainingConfig = null;
try(InputStream stream = zipFile.getInputStream(config)) {
byte[] read = IOUtils.toByteArray(stream);
trainingConfig = ObjectMapperHolder.getJsonMapper().readValue(read,TrainingConfig.class);
}
SameDiff ret = null;
ZipEntry sameDiffFile = zipFile.getEntry(SAMEDIFF_FILE_ENTRY_NAME);
try(InputStream stream = zipFile.getInputStream(sameDiffFile)) {
byte[] read = IOUtils.toByteArray(stream);
ret = SameDiff.fromFlatBuffers(ByteBuffer.wrap(read));
}
ret.setTrainingConfig(trainingConfig);
ret.initializeTraining();
return ret;
} |
python | def dframe(self, dimensions=None, multi_index=False):
"""Convert dimension values to DataFrame.
Returns a pandas dataframe of columns along each dimension,
either completely flat or indexed by key dimensions.
Args:
dimensions: Dimensions to return as columns
multi_index: Convert key dimensions to (multi-)index
Returns:
DataFrame of columns corresponding to each dimension
"""
import pandas as pd
if dimensions is None:
dimensions = [d.name for d in self.dimensions()]
else:
dimensions = [self.get_dimension(d, strict=True).name for d in dimensions]
column_names = dimensions
dim_vals = OrderedDict([(dim, self.dimension_values(dim)) for dim in column_names])
df = pd.DataFrame(dim_vals)
if multi_index:
df = df.set_index([d for d in dimensions if d in self.kdims])
return df |
python | def function(self, rule, args, **kwargs):
"""
Callback method for rule tree traversing. Will be called at proper time
from :py:class:`pynspect.rules.FunctionRule.traverse` method.
:param pynspect.rules.Rule rule: Reference to rule.
:param args: Optional function arguments.
:param dict kwargs: Optional callback arguments.
"""
return '<div class="pynspect-rule-function"><h3 class="pynspect-rule-function-name">{}</h3><ul class="pynspect-rule-function-arguments>{}</ul></div>'.format(rule.function, ''.join(['<li class="pynspect-rule-function-argument">{}</li>'.format(v) for v in args])) |
java | @Override
@SuppressWarnings("unchecked")
public PlainDate apply(PlainDate entity) {
ChronoOperator<PlainDate> operator = (ChronoOperator<PlainDate>) this.opDelegate;
return operator.apply(entity);
} |
java | protected void doPropagateAssertObject(InternalFactHandle factHandle,
PropagationContext context,
InternalWorkingMemory workingMemory,
ObjectSink sink) {
sink.assertObject( factHandle,
context,
workingMemory );
} |
python | def reshape(x,input_dim):
'''
Reshapes x into a matrix with input_dim columns
'''
x = np.array(x)
if x.size ==input_dim:
x = x.reshape((1,input_dim))
return x |
java | public Date getThere(Date start, Distance distance)
{
TimeSpan timeSpan = getTimeSpan(distance);
return timeSpan.addDate(start);
} |
java | public static <T, E> AtomFeedParser<T, E> create(
HttpResponse response,
XmlNamespaceDictionary namespaceDictionary,
Class<T> feedClass,
Class<E> entryClass)
throws IOException, XmlPullParserException {
InputStream content = response.getContent();
try {
Atom.checkContentType(response.getContentType());
XmlPullParser parser = Xml.createParser();
parser.setInput(content, null);
AtomFeedParser<T, E> result =
new AtomFeedParser<T, E>(namespaceDictionary, parser, content, feedClass, entryClass);
content = null;
return result;
} finally {
if (content != null) {
content.close();
}
}
} |
python | def _invariant(self, rank, n):
"""Computes the delta value for the sample."""
minimum = n + 1
for i in self._invariants:
delta = i._delta(rank, n)
if delta < minimum:
minimum = delta
return math.floor(minimum) |
java | protected Uberspect instantiateUberspector(String classname)
{
Object o = null;
try {
o = ClassUtils.getNewInstance(classname);
} catch (ClassNotFoundException e) {
this.log.warn(String.format("The specified uberspector [%s]"
+ " does not exist or is not accessible to the current classloader.", classname));
} catch (IllegalAccessException e) {
this.log.warn(String.format("The specified uberspector [%s] does not have a public default constructor.",
classname));
} catch (InstantiationException e) {
this.log.warn(String.format("The specified uberspector [%s] cannot be instantiated.", classname));
} catch (ExceptionInInitializerError e) {
this.log.warn(String.format("Exception while instantiating the Uberspector [%s]: %s", classname, e
.getMessage()));
}
if (!(o instanceof Uberspect)) {
if (o != null) {
this.log.warn("The specified class for Uberspect [" + classname + "] does not implement "
+ Uberspect.class.getName());
}
return null;
}
return (Uberspect) o;
} |
python | def pad(self, minibatch):
"""Pad a batch of examples using this field.
If ``self.nesting_field.sequential`` is ``False``, each example in the batch must
be a list of string tokens, and pads them as if by a ``Field`` with
``sequential=True``. Otherwise, each example must be a list of list of tokens.
Using ``self.nesting_field``, pads the list of tokens to
``self.nesting_field.fix_length`` if provided, or otherwise to the length of the
longest list of tokens in the batch. Next, using this field, pads the result by
filling short examples with ``self.nesting_field.pad_token``.
Example:
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>>
>>> nesting_field = Field(pad_token='<c>', init_token='<w>', eos_token='</w>')
>>> field = NestedField(nesting_field, init_token='<s>', eos_token='</s>')
>>> minibatch = [
... [list('john'), list('loves'), list('mary')],
... [list('mary'), list('cries')],
... ]
>>> padded = field.pad(minibatch)
>>> pp.pprint(padded)
[ [ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'j', 'o', 'h', 'n', '</w>', '<c>'],
['<w>', 'l', 'o', 'v', 'e', 's', '</w>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>']],
[ ['<w>', '<s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<w>', 'm', 'a', 'r', 'y', '</w>', '<c>'],
['<w>', 'c', 'r', 'i', 'e', 's', '</w>'],
['<w>', '</s>', '</w>', '<c>', '<c>', '<c>', '<c>'],
['<c>', '<c>', '<c>', '<c>', '<c>', '<c>', '<c>']]]
Arguments:
minibatch (list): Each element is a list of string if
``self.nesting_field.sequential`` is ``False``, a list of list of string
otherwise.
Returns:
list: The padded minibatch. or (padded, sentence_lens, word_lengths)
"""
minibatch = list(minibatch)
if not self.nesting_field.sequential:
return super(NestedField, self).pad(minibatch)
# Save values of attributes to be monkeypatched
old_pad_token = self.pad_token
old_init_token = self.init_token
old_eos_token = self.eos_token
old_fix_len = self.nesting_field.fix_length
# Monkeypatch the attributes
if self.nesting_field.fix_length is None:
max_len = max(len(xs) for ex in minibatch for xs in ex)
fix_len = max_len + 2 - (self.nesting_field.init_token,
self.nesting_field.eos_token).count(None)
self.nesting_field.fix_length = fix_len
self.pad_token = [self.pad_token] * self.nesting_field.fix_length
if self.init_token is not None:
# self.init_token = self.nesting_field.pad([[self.init_token]])[0]
self.init_token = [self.init_token]
if self.eos_token is not None:
# self.eos_token = self.nesting_field.pad([[self.eos_token]])[0]
self.eos_token = [self.eos_token]
# Do padding
old_include_lengths = self.include_lengths
self.include_lengths = True
self.nesting_field.include_lengths = True
padded, sentence_lengths = super(NestedField, self).pad(minibatch)
padded_with_lengths = [self.nesting_field.pad(ex) for ex in padded]
word_lengths = []
final_padded = []
max_sen_len = len(padded[0])
for (pad, lens), sentence_len in zip(padded_with_lengths, sentence_lengths):
if sentence_len == max_sen_len:
lens = lens
pad = pad
elif self.pad_first:
lens[:(max_sen_len - sentence_len)] = (
[0] * (max_sen_len - sentence_len))
pad[:(max_sen_len - sentence_len)] = (
[self.pad_token] * (max_sen_len - sentence_len))
else:
lens[-(max_sen_len - sentence_len):] = (
[0] * (max_sen_len - sentence_len))
pad[-(max_sen_len - sentence_len):] = (
[self.pad_token] * (max_sen_len - sentence_len))
word_lengths.append(lens)
final_padded.append(pad)
padded = final_padded
# Restore monkeypatched attributes
self.nesting_field.fix_length = old_fix_len
self.pad_token = old_pad_token
self.init_token = old_init_token
self.eos_token = old_eos_token
self.include_lengths = old_include_lengths
if self.include_lengths:
return padded, sentence_lengths, word_lengths
return padded |
python | def make_funcs(dataset, setdir, store):
"""Functions available for listing columns and filters."""
return {
'cat': lambda *lists: [x for lst in lists for x in lst],
'comments': lambda: None,
'detail_route': detail_route,
'format': lambda fmt, *args: fmt.format(*args),
'get': partial(getnode, dataset, setdir, store),
'join': lambda sep, *args: sep.join([x for x in args if x]),
'len': len,
'link': (lambda href, title, target=None:
{'href': href or "",
'title': title or "",
'target': '_blank' if target is None else target}),
'list': lambda *x: filter(None, list(x)),
'max': max,
'min': min,
'status': lambda: ['#STATUS#'],
'sum': sum,
'tags': lambda: ['#TAGS#'],
'trace': print_trace,
} |
python | def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to decreasing length.
This causes inputs of similar lengths to be processed in the same batch,
facilitating early stopping for short sequences.
Longer sequences are sorted first so that if you're going to get OOMs,
you'll see it in the first batch.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys |
java | @Override
public GetSizeConstraintSetResult getSizeConstraintSet(GetSizeConstraintSetRequest request) {
request = beforeClientExecution(request);
return executeGetSizeConstraintSet(request);
} |
python | def basis_selector_oracle(qubits: List[int], bitstring: str) -> Program:
"""
Defines an oracle that selects the ith element of the computational basis.
Flips the sign of the state :math:`\\vert x\\rangle>`
if and only if x==bitstring and does nothing otherwise.
:param qubits: The qubits the oracle is called on. The qubits are assumed to be ordered from
most significant qubit to least significant qubit.
:param bitstring: The desired bitstring, given as a string of ones and zeros. e.g. "101"
:return: A program representing this oracle.
"""
if len(qubits) != len(bitstring):
raise ValueError("The bitstring should be the same length as the number of qubits.")
oracle_prog = Program()
# In the case of one qubit, we just want to flip the phase of state relative to the other.
if len(bitstring) == 1:
oracle_prog.inst(Z(qubits[0]))
return oracle_prog
else:
bitflip_prog = Program()
for i, qubit in enumerate(qubits):
if bitstring[i] == '0':
bitflip_prog.inst(X(qubit))
oracle_prog += bitflip_prog
controls = qubits[:-1]
target = qubits[-1]
operation = np.array([[1, 0], [0, -1]])
gate_name = 'Z'
n_qubit_controlled_z = (ControlledProgramBuilder()
.with_controls(controls)
.with_target(target)
.with_operation(operation)
.with_gate_name(gate_name)
.build())
oracle_prog += n_qubit_controlled_z
oracle_prog += bitflip_prog
return oracle_prog |
python | def ResolvePrefix(self, subject, attribute_prefix, timestamp=None,
limit=None):
"""Resolve all attributes for a subject starting with a prefix."""
subject = utils.SmartUnicode(subject)
if timestamp in [None, self.NEWEST_TIMESTAMP, self.ALL_TIMESTAMPS]:
start, end = 0, (2**63) - 1
# Does timestamp represent a range?
elif isinstance(timestamp, (list, tuple)):
start, end = timestamp # pylint: disable=unpacking-non-sequence
else:
raise ValueError("Invalid timestamp: %s" % timestamp)
start = int(start)
end = int(end)
# TODO(hanuszczak): Make this function accept only one attribute prefix and
# only a unicode object.
if isinstance(attribute_prefix, string_types):
attribute_prefix = [attribute_prefix]
try:
record = self.subjects[subject]
except KeyError:
return []
# Holds all the attributes which matched. Keys are attribute names, values
# are lists of timestamped data.
results = {}
nr_results = 0
for prefix in attribute_prefix:
for attribute, values in iteritems(record):
if limit and nr_results >= limit:
break
# TODO(hanuszczak): After resolving the TODO comment above this call to
# `unicode` should be redundant.
if str(attribute).startswith(prefix):
for encoded_value, ts in values:
results_list = results.setdefault(attribute, [])
# If we are always after the latest ts we clear older ones.
if (results_list and timestamp in [self.NEWEST_TIMESTAMP, None] and
results_list[0][1] < ts):
results_list = []
results[attribute] = results_list
# Timestamp outside the range, drop it.
elif ts < start or ts > end:
continue
value = self._value_converter.Decode(attribute, encoded_value)
results_list.append((attribute, ts, value))
nr_results += 1
if limit and nr_results >= limit:
break
result = []
for attribute_name, values in sorted(iteritems(results)):
# Values are triples of (attribute_name, timestamp, data). We want to
# sort by timestamp.
for _, ts, data in sorted(values, key=lambda x: x[1], reverse=True):
# Return triples (attribute_name, data, timestamp).
result.append((attribute_name, data, ts))
return result |
java | public ClassLoader getExternalClassLoader()
{
Map<String,String> opts = _ap.getAnnotationProcessorEnvironment().getOptions();
String classpath = opts.get("-classpath");
if ( classpath != null )
{
String [] cpEntries = classpath.split( File.pathSeparator );
ArrayList a = new ArrayList();
for ( String e : cpEntries )
{
try
{
File f = (new File(e)).getCanonicalFile();
URL u = f.toURL();
a.add(u);
}
catch (Exception ex)
{
System.err.println( "getExternalClassLoader(): bad cp entry=" + e );
System.err.println( "Exception processing e=" + ex );
}
}
URL [] urls = new URL[a.size()];
urls = (URL[]) a.toArray(urls);
return new URLClassLoader( urls, ControlChecker.class.getClassLoader() );
}
return null;
} |
java | private void addFile(File root, File file, String prefix, ZipOutputStream zos) throws IOException {
FileInputStream fis = new FileInputStream(file);
ZipEntry jarEntry = new ZipEntry(prefix + normalizePath(root, file));
zos.putNextEntry(jarEntry);
StreamUtils.copyStream(fis, zos, false);
fis.close();
} |
python | def recurse(self, value, max_depth=6, _depth=0, **kwargs):
"""
Given ``value``, recurse (using the parent serializer) to handle
coercing of newly defined values.
"""
string_max_length = kwargs.get('string_max_length', None)
_depth += 1
if _depth >= max_depth:
try:
value = text_type(repr(value))[:string_max_length]
except Exception as e:
import traceback
traceback.print_exc()
self.manager.logger.exception(e)
return text_type(type(value))
return self.manager.transform(value, max_depth=max_depth,
_depth=_depth, **kwargs) |
python | def __EncodedAttribute_decode_gray8(self, da, extract_as=ExtractAs.Numpy):
"""Decode a 8 bits grayscale image (JPEG_GRAY8 or GRAY8) and returns a 8 bits gray scale image.
:param da: :class:`DeviceAttribute` that contains the image
:type da: :class:`DeviceAttribute`
:param extract_as: defaults to ExtractAs.Numpy
:type extract_as: ExtractAs
:return: the decoded data
- In case String string is choosen as extract method, a tuple is returned:
width<int>, height<int>, buffer<str>
- In case Numpy is choosen as extract method, a :class:`numpy.ndarray` is
returned with ndim=2, shape=(height, width) and dtype=numpy.uint8.
- In case Tuple or List are choosen, a tuple<tuple<int>> or list<list<int>>
is returned.
.. warning::
The PyTango calls that return a :class:`DeviceAttribute`
(like :meth:`DeviceProxy.read_attribute` or :meth:`DeviceProxy.command_inout`)
automatically extract the contents by default. This method requires
that the given :class:`DeviceAttribute` is obtained from a
call which **DOESN'T** extract the contents. Example::
dev = tango.DeviceProxy("a/b/c")
da = dev.read_attribute("my_attr", extract_as=tango.ExtractAs.Nothing)
enc = tango.EncodedAttribute()
data = enc.decode_gray8(da)
"""
if hasattr(da, 'value'):
raise TypeError("DeviceAttribute argument must have been obtained from "
"a call which doesn't extract the contents")
if extract_as not in _allowed_extract:
raise TypeError("extract_as must be one of Numpy, String, Tuple, List")
return self._decode_gray8(da, extract_as) |
python | def isometric(lat: float, ell: Ellipsoid = None, deg: bool = True):
"""
computes isometric latitude of a point on an ellipsoid
Parameters
----------
lat : float or numpy.ndarray of float
geodetic latitude
ell : Ellipsoid, optional
reference ellipsoid (default WGS84)
deg : bool, optional
degrees input/output (False: radians in/out)
Returns
-------
isolat : float or numpy.ndarray of float
isometric latiude
Notes
-----
Isometric latitude is an auxiliary latitude proportional to the spacing
of parallels of latitude on an ellipsoidal mercator projection.
Based on Deakin, R.E., 2010, 'The Loxodrome on an Ellipsoid', Lecture Notes,
School of Mathematical and Geospatial Sciences, RMIT University,
January 2010
"""
if ell is None:
ell = Ellipsoid()
f = ell.f # flattening of ellipsoid
if deg is True:
lat = np.deg2rad(lat)
e2 = f * (2 - f) # eccentricity-squared
e = np.sqrt(e2) # eccentricity of ellipsoid
x = e * np.sin(lat)
y = (1 - x) / (1 + x)
z = np.pi / 4 + lat / 2
# calculate the isometric latitude
isolat = np.log(np.tan(z) * (y**(e / 2)))
if deg is True:
isolat = np.degrees(isolat)
return isolat |
java | private TTTState<I, D> getAnyState(Iterable<? extends I> suffix) {
return getAnySuccessor(hypothesis.getInitialState(), suffix);
} |
java | @Override
public byte[] getValue() {
StringBuilder sb = new StringBuilder(64);
sb.append(component).append(MetricUtils.AT).append(taskId).append(MetricUtils.AT)
.append(streamId).append(MetricUtils.AT).append(metricType).append(MetricUtils.AT)
.append(host).append(MetricUtils.AT).append(port).append(MetricUtils.AT)
.append(metricGroup).append(MetricUtils.AT).append(metricName);
return sb.toString().getBytes();
} |
python | def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self |
java | public Geometry getGeometryN(int n) {
if (isEmpty()) {
return null;
}
if (n >= 0 && n < points.length) {
return points[n];
}
throw new ArrayIndexOutOfBoundsException(n);
// return this;
} |
python | def coombs_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under Coombs rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
elecType = profile.getElecType()
if elecType == "soc" or elecType == "csv":
return self.coombssoc_winners(profile)
elif elecType == "toc":
return self.coombstoc_winners(profile)
else:
print("ERROR: unsupported profile type")
exit() |
java | private static boolean postAggregatorDirectColumnIsOk(
final RowSignature aggregateRowSignature,
final DruidExpression expression,
final RexNode rexNode
)
{
if (!expression.isDirectColumnAccess()) {
return false;
}
// Check if a cast is necessary.
final ExprType toExprType = Expressions.exprTypeForValueType(
aggregateRowSignature.getColumnType(expression.getDirectColumn())
);
final ExprType fromExprType = Expressions.exprTypeForValueType(
Calcites.getValueTypeForSqlTypeName(rexNode.getType().getSqlTypeName())
);
return toExprType.equals(fromExprType);
} |
java | public void trace(final Marker marker, final String message) {
log.trace(marker, sanitize(message));
} |
python | def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response) |
python | def find_lines(config=None, config_path=None, regex=None, saltenv='base'):
'''
Return all the lines (as text) that match the expression in the ``regex``
argument.
config
The configuration sent as text.
.. note::
This argument is ignored when ``config_path`` is specified.
config_path
The absolute or remote path to the file with the configuration to be
parsed. This argument supports the usual Salt filesystem URIs, e.g.,
``salt://``, ``https://``, ``ftp://``, ``s3://``, etc.
regex
The regular expression to match the lines against.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. This
argument is ignored when ``config_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' ciscoconfparse.find_lines config_path=https://bit.ly/2mAdq7z regex='ip address'
Output example:
.. code-block:: text
cisco-ios-router:
- ip address dhcp
- ip address 172.20.0.1 255.255.255.0
- no ip address
'''
lines = find_objects(config=config,
config_path=config_path,
regex=regex,
saltenv=saltenv)
return [line.text for line in lines] |
python | def get_vpn(self, vpn_name):
"""Returns a specific VPN name details from CPNR server."""
request_url = self._build_url(['VPN', vpn_name])
return self._do_request('GET', request_url) |
java | @Override
public GetReplicationJobsResult getReplicationJobs(GetReplicationJobsRequest request) {
request = beforeClientExecution(request);
return executeGetReplicationJobs(request);
} |
java | public Stream<String> containing(double lat, double lon) {
return shapes.keySet().stream()
// select if contains lat lon
.filter(name -> shapes.get(name).contains(lat, lon));
} |
java | private String zeroEndByteArrayToString(byte[] bytes) throws IOException
{
int i = 0;
for ( i = 0; i < bytes.length && bytes[i] != 0; i++ );
return new String( bytes, 0, i );
} |
python | def to_pngs(pdf_path):
''' Converts a multi-page pdfs to a list of pngs via the `sips` command
:returns: A list of converted pngs
'''
pdf_list = split_pdf(pdf_path)
pngs = []
for pdf in pdf_list:
pngs.append(to_png(pdf))
os.remove(pdf) # Clean up
return pngs |
python | def is_date(value,
minimum = None,
maximum = None,
coerce_value = False,
**kwargs):
"""Indicate whether ``value`` is a :class:`date <python:datetime.date>`.
:param value: The value to evaluate.
:param minimum: If supplied, will make sure that ``value`` is on or after
this value.
:type minimum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>`
/ :obj:`None <python:None>`
:param maximum: If supplied, will make sure that ``value`` is on or before this
value.
:type maximum: :class:`datetime <python:datetime.datetime>` /
:class:`date <python:datetime.date>` / compliant :class:`str <python:str>`
/ :obj:`None <python:None>`
:param coerce_value: If ``True``, will return ``True`` if ``value`` can be
coerced to a :class:`date <python:datetime.date>`. If ``False``,
will only return ``True`` if ``value`` is a date value only. Defaults to
``False``.
:type coerce_value: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.date(value,
minimum = minimum,
maximum = maximum,
coerce_value = coerce_value,
**kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True |
java | private boolean tryResolve(final CLClause c, final int pivot, final CLClause d) {
assert !c.dumped() && !c.satisfied();
assert !d.dumped() && !d.satisfied();
boolean res = true;
assert this.seen.empty();
this.stats.steps++;
for (int i = 0; i < c.lits().size(); i++) {
final int lit = c.lits().get(i);
if (lit == pivot) { continue; }
assert marked(lit) == 0;
mark(lit);
}
this.stats.steps++;
for (int p = 0; res && p < d.lits().size(); p++) {
final int lit = d.lits().get(p);
if (lit == -pivot) { continue; }
final int m = marked(lit);
if (m > 0) { continue; }
if (m < 0) { res = false; } else { mark(lit); }
}
unmark();
return res;
} |
python | def add(self, val):
"""Add the element *val* to the list."""
_lists = self._lists
_keys = self._keys
_maxes = self._maxes
key = self._key(val)
if _maxes:
pos = bisect_right(_maxes, key)
if pos == len(_maxes):
pos -= 1
_lists[pos].append(val)
_keys[pos].append(key)
_maxes[pos] = key
else:
idx = bisect_right(_keys[pos], key)
_lists[pos].insert(idx, val)
_keys[pos].insert(idx, key)
self._expand(pos)
else:
_lists.append([val])
_keys.append([key])
_maxes.append(key)
self._len += 1 |
java | @SuppressWarnings("rawtypes")
@Override
protected Object convertToType(Class type, Object value) throws Throwable {
return KeyFactory.stringToKey(value.toString());
} |
java | public void close()
{
LOGGER.entering(CLASS_NAME, "close");
synchronized (lock)
{
if (serviceAgent != null)
{
serviceRegistration.unregister();
if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer("Service Agent " + this + " stopping...");
serviceAgent.stop();
if (LOGGER.isLoggable(Level.FINE)) LOGGER.fine("Service Agent " + this + " stopped successfully");
}
}
LOGGER.exiting(CLASS_NAME, "close");
} |
python | def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer |
python | def get_keygrip(user_id, sp=subprocess):
"""Get a keygrip of the primary GPG key of the specified user."""
args = gpg_command(['--list-keys', '--with-keygrip', user_id])
output = check_output(args=args, sp=sp).decode('utf-8')
return re.findall(r'Keygrip = (\w+)', output)[0] |
java | public DirectedMultigraph<String> readDirectedMultigraph(
File f, Indexer<String> vertexLabels) throws IOException {
throw new UnsupportedOperationException();
} |
python | def _set_range_common(self, k_sugar, k_start, k_end, value):
"""
Checks to see if the client-side convenience key is present, and if so
converts the sugar convenience key into its real server-side
equivalents.
:param string k_sugar: The client-side convenience key
:param string k_start: The server-side key specifying the beginning of
the range
:param string k_end: The server-side key specifying the end of the
range
"""
if not isinstance(value, (list, tuple, _Unspec)):
raise ArgumentError.pyexc(
"Range specification for {0} must be a list, tuple or UNSPEC"
.format(k_sugar))
if self._user_options.get(k_start, UNSPEC) is not UNSPEC or (
self._user_options.get(k_end, UNSPEC) is not UNSPEC):
raise ArgumentError.pyexc(
"Cannot specify {0} with either {1} or {2}"
.format(k_sugar, k_start, k_end))
if not value:
self._set_common(k_start, UNSPEC, set_user=False)
self._set_common(k_end, UNSPEC, set_user=False)
self._user_options[k_sugar] = UNSPEC
return
if len(value) not in (1, 2):
raise ArgumentError.pyexc("Range specification "
"must have one or two elements",
value)
value = value[::]
if len(value) == 1:
value.append(UNSPEC)
for p, ix in ((k_start, 0), (k_end, 1)):
self._set_common(p, value[ix], set_user=False)
self._user_options[k_sugar] = value |
python | def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary() |
java | @Override
public void saturate(IAtomContainer atomContainer) throws CDKException {
logger.info("Saturating atomContainer by adjusting bond orders...");
boolean allSaturated = allSaturated(atomContainer);
if (!allSaturated) {
logger.info("Saturating bond orders is needed...");
IBond[] bonds = new IBond[atomContainer.getBondCount()];
for (int i = 0; i < bonds.length; i++)
bonds[i] = atomContainer.getBond(i);
boolean succeeded = saturate(bonds, atomContainer);
if (!succeeded) {
throw new CDKException("Could not saturate this atomContainer!");
}
}
} |
python | def view_task(self, task):
"""View the given task
:param task: the task to view
:type task: :class:`jukeboxcore.djadapter.models.Task`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Viewing task %s', task.name)
self.cur_task = None
self.pages_tabw.setCurrentIndex(7)
self.task_dep_le.setText(task.name)
statusmap = {"New": 0, "Open": 1, "Done":2}
self.task_status_cb.setCurrentIndex(statusmap.get(task.status, -1))
dt = dt_to_qdatetime(task.deadline) if task.deadline else None
self.task_deadline_de.setDateTime(dt)
self.task_link_le.setText(task.element.name)
userrootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
userrootitem = treemodel.TreeItem(userrootdata)
for user in task.users.all():
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, userrootitem)
self.task_user_model = treemodel.TreeModel(userrootitem)
self.task_user_tablev.setModel(self.task_user_model)
self.cur_task = task |
python | def _bse_cli_list_basis_sets(args):
'''Handles the list-basis-sets subcommand'''
metadata = api.filter_basis_sets(args.substr, args.family, args.role, args.data_dir)
if args.no_description:
liststr = metadata.keys()
else:
liststr = format_columns([(k, v['description']) for k, v in metadata.items()])
return '\n'.join(liststr) |
python | def enable_mfa_device(self,
user_name,
serial_number,
authentication_code_1,
authentication_code_2):
"""Enable MFA Device for user."""
user = self.get_user(user_name)
if serial_number in user.mfa_devices:
raise IAMConflictException(
"EntityAlreadyExists",
"Device {0} already exists".format(serial_number)
)
user.enable_mfa_device(
serial_number,
authentication_code_1,
authentication_code_2
) |
python | def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) |
java | private long pendingVideoMessages() {
IMessageOutput out = msgOutReference.get();
if (out != null) {
OOBControlMessage pendingRequest = new OOBControlMessage();
pendingRequest.setTarget("ConnectionConsumer");
pendingRequest.setServiceName("pendingVideoCount");
out.sendOOBControlMessage(this, pendingRequest);
if (pendingRequest.getResult() != null) {
return (Long) pendingRequest.getResult();
}
}
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.