language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public <T> T cast(Object obj, Class<T> clz) {
if (!clz.isAssignableFrom(obj.getClass())) {
return null;
}
return clz.cast(obj);
} |
python | def intersect_exposure_and_aggregate_hazard(self):
"""This function intersects the exposure with the aggregate hazard.
If the the exposure is a continuous raster exposure, this function
will set the aggregate hazard layer.
However, this function will set the impact layer.
"""
LOGGER.info('ANALYSIS : Intersect Exposure and Aggregate Hazard')
if is_raster_layer(self.exposure):
self.set_state_process(
'impact function',
'Zonal stats between exposure and aggregate hazard')
# Be careful, our own zonal stats will take care of different
# projections between the two layers. We don't want to reproject
# rasters.
# noinspection PyTypeChecker
self._aggregate_hazard_impacted = zonal_stats(
self.exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process('impact function', 'Add default values')
self._aggregate_hazard_impacted = add_default_values(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
# I know it's redundant, it's just to be sure that we don't have
# any impact layer for that IF.
self._exposure_summary = None
else:
indivisible_keys = [f['key'] for f in indivisible_exposure]
geometry = self.exposure.geometryType()
exposure = self.exposure.keywords.get('exposure')
is_divisible = exposure not in indivisible_keys
if geometry in [
QgsWkbTypes.LineGeometry,
QgsWkbTypes.PolygonGeometry] and is_divisible:
self.set_state_process(
'exposure', 'Make exposure layer valid')
self._exposure = clean_layer(self.exposure)
self.debug_layer(self.exposure)
self.set_state_process(
'impact function', 'Make aggregate hazard layer valid')
self._aggregate_hazard_impacted = clean_layer(
self._aggregate_hazard_impacted)
self.debug_layer(self._aggregate_hazard_impacted)
self.set_state_process(
'impact function',
'Intersect divisible features with the aggregate hazard')
self._exposure_summary = intersection(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# If the layer has the size field, it means we need to
# recompute counts based on the old and new size.
fields = self._exposure_summary.keywords['inasafe_fields']
if size_field['key'] in fields:
self.set_state_process(
'impact function',
'Recompute counts')
LOGGER.info(
'InaSAFE will not use these counts, as we have ratios '
'since the exposure preparation step.')
self._exposure_summary = recompute_counts(
self._exposure_summary)
self.debug_layer(self._exposure_summary)
else:
self.set_state_process(
'impact function',
'Highest class of hazard is assigned to the exposure')
self._exposure_summary = assign_highest_value(
self._exposure, self._aggregate_hazard_impacted)
self.debug_layer(self._exposure_summary)
# set title using definition
# the title will be overwritten anyway by standard title
# set this as fallback.
self._exposure_summary.keywords['title'] = (
layer_purpose_exposure_summary['name'])
if qgis_version() >= 21800:
self._exposure_summary.setName(
self._exposure_summary.keywords['title'])
else:
self._exposure_summary.setLayerName(
self._exposure_summary.keywords['title']) |
python | def _parse (self):
"""Parse the BDF mime structure and record the locations of the binary
blobs. Sets up various data fields in the BDFData object."""
feedparser = FeedParser (Message)
binarychunks = {}
sizeinfo = None
headxml = None
self.fp.seek (0, 0)
while True:
data = self.fp.readline ()
if not data:
break
feedparser.feed (data)
skip = (data == '\n' and
len (feedparser._msgstack) == 3 and
feedparser._msgstack[-1].get_content_type () in ('application/octet-stream',
'binary/octet-stream'))
if skip:
# We just finished reading the headers for a huge binary blob.
# Time to remember where the data chunk is and pretend it doesn't
# exist.
msg = feedparser._msgstack[-1]
ident = msg['Content-Location']
assert ident.endswith ('.bin'), 'confusion #1 in hacky MIME parsing!'
binarychunks[ident] = self.fp.tell ()
if sizeinfo is None:
headxml, sizeinfo, tagpfx = _extract_size_info (feedparser)
kind = ident.split ('/')[-1]
assert kind in sizeinfo, 'no size info for binary chunk kind %s in MIME!' % kind
self.fp.seek (sizeinfo[kind] + 1, 1) # skip ahead by data chunk size
sample = self.fp.read (16)
assert sample.startswith ('--MIME'), 'crap, unexpected chunk size in MIME parsing: %r' % sample
self.fp.seek (-16, 1) # go back
# check that two major kinds of data are read at least once
if any([k.split('/')[3] == '3' for k in binarychunks.iterkeys()]):
break
if headxml is None:
raise RuntimeError ('never found any binary data')
self.mimemsg = feedparser.close ()
self.headxml = headxml
self.sizeinfo = sizeinfo
self.binarychunks = binarychunks
headsize, intsize = self.calc_intsize()
# Compute some miscellaneous parameters that we'll need.
# self.n_integrations = len (self.mimemsg.get_payload ()) - 1
self.n_integrations = os.stat(self.fp.name).st_size/intsize
self.n_antennas = int (headxml.find (tagpfx + nanttag).text)
self.n_baselines = (self.n_antennas * (self.n_antennas - 1)) // 2
ds = headxml.find (tagpfx + dstag)
nbb = 0
nspw = 0
nchan = 0
crosspolstr = None
for bb in ds.findall (tagpfx + basebandtag):
nbb += 1
for spw in bb.getchildren ():
nspw += 1
nchan += int (spw.get ('numSpectralPoint'))
if crosspolstr is None:
crosspolstr = spw.get ('crossPolProducts')
elif spw.get ('crossPolProducts') != crosspolstr:
raise Exception ('can only handle spectral windows with identical cross pol products')
self.n_basebands = nbb
self.n_spws = nspw
self.n_channels = nchan
self.crosspols = crosspolstr.split ()
self.n_pols = len(self.crosspols)
# if bdf info pkl not present, write it
if os.path.exists(os.path.dirname(self.pklname)) and self.pklname and (not os.path.exists(self.pklname)):
logger.info('Writing bdf pkl info to %s...' % (self.pklname))
with open(self.pklname,'wb') as pkl:
# Compute some miscellaneous parameters that we'll need.
pickle.dump( (self.mimemsg, self.headxml, self.sizeinfo, self.binarychunks, self.n_integrations, self.n_antennas, self.n_baselines, self.n_basebands, self.n_spws, self.n_channels, self.crosspols), pkl)
return self |
java | public void setViews(Collection<View> views) throws IOException {
BulkChange bc = new BulkChange(this);
try {
this.views.clear();
for (View v : views) {
addView(v);
}
} finally {
bc.commit();
}
} |
python | def as_proximal_lang_operator(op, norm_bound=None):
"""Wrap ``op`` as a ``proximal.BlackBox``.
This is intended to be used with the `ProxImaL language solvers.
<https://github.com/comp-imaging/proximal>`_
For documentation on the proximal language (ProxImaL) see [Hei+2016].
Parameters
----------
op : `Operator`
Linear operator to be wrapped. Its domain and range must implement
``shape``, and elements in these need to implement ``asarray``.
norm_bound : float, optional
An upper bound on the spectral norm of the operator. Note that this is
the norm as defined by ProxImaL, and hence use the unweighted spaces.
Returns
-------
``proximal.BlackBox`` : proximal_lang_operator
The wrapped operator.
Notes
-----
If the data representation of ``op``'s domain and range is of type
`NumpyTensorSpace` this incurs no significant overhead. If the data
space is implemented with CUDA or some other non-local representation,
the overhead is significant.
References
----------
[Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using
Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016.
"""
# TODO: use out parameter once "as editable array" is added
def forward(inp, out):
out[:] = op(inp).asarray()
def adjoint(inp, out):
out[:] = op.adjoint(inp).asarray()
import proximal
return proximal.LinOpFactory(input_shape=op.domain.shape,
output_shape=op.range.shape,
forward=forward,
adjoint=adjoint,
norm_bound=norm_bound) |
java | public void setOptimizationOptions(Map<String, Boolean> options) {
if (options == null) throw new IllegalArgumentException("provided option map must not be null");
optimizationOptions = options;
} |
python | def get_key(keyfile=None):
"""
Read the key content from secret_file
"""
keyfile = keyfile or application_path(settings.SECRETKEY.SECRET_FILE)
with file(keyfile, 'rb') as f:
return f.read() |
python | def on_right_click(self, widget):
""" Here with right click the change of cell is changed """
if self.opened:
return
self.state = (self.state + 1) % 3
self.set_icon()
self.game.check_if_win() |
java | @SuppressWarnings("unchecked")
@Override
protected void initParser() throws MtasConfigException {
super.initParser();
if (config != null) {
// always word, no mappings
wordType = new MtasParserType<>(MAPPING_TYPE_WORD, null, false);
for (int i = 0; i < config.children.size(); i++) {
MtasConfiguration current = config.children.get(i);
if (current.name.equals("filters")) {
for (int j = 0; j < current.children.size(); j++) {
if (current.children.get(j).name.equals("filter")) {
MtasConfiguration filter = current.children.get(j);
String typeFilter = filter.attributes.get("type");
String nameFilter = filter.attributes.get("name");
if(typeFilter!=null) {
if(typeFilter.equals(FILTER_TYPE_REPLACE)) {
String value = filter.attributes.get("value");
String replace = filter.attributes.get("replace");
if(nameFilter!=null && value!=null && replace!=null) {
String[] names = nameFilter.split(Pattern.quote(","));
for(String name : names) {
try {
int nameInt = Integer.parseInt(name);
HashMap<String, String> nameMap;
if(!filterReplace.containsKey(nameInt)) {
nameMap = new HashMap<>();
filterReplace.put(nameInt, nameMap);
} else {
nameMap = filterReplace.get(nameInt);
}
nameMap.put(value, replace);
} catch (NumberFormatException e) {
log.info(e);
}
}
} else {
throw new MtasConfigException("no name, value or replace for filter "
+ typeFilter );
}
} else {
throw new MtasConfigException("unknown filter type "
+ typeFilter );
}
} else {
throw new MtasConfigException("no type provided for filter" );
}
}
}
} else if (current.name.equals("mappings")) {
for (int j = 0; j < current.children.size(); j++) {
if (current.children.get(j).name.equals("mapping")) {
MtasConfiguration mapping = current.children.get(j);
String typeMapping = mapping.attributes.get("type");
String nameMapping = mapping.attributes.get("name");
if ((typeMapping != null)) {
if (typeMapping.equals(MAPPING_TYPE_WORD)) {
MtasCRMParserMappingWordAnnotation m = new MtasCRMParserMappingWordAnnotation();
m.processConfig(mapping);
wordType.addItem(m);
} else if (typeMapping.equals(MAPPING_TYPE_WORD_ANNOTATION)
&& (nameMapping != null)) {
MtasCRMParserMappingWordAnnotation m = new MtasCRMParserMappingWordAnnotation();
m.processConfig(mapping);
if (wordAnnotationTypes.containsKey(nameMapping)) {
wordAnnotationTypes.get(nameMapping).addItem(m);
} else {
MtasParserType<MtasParserMapping<?>> t = new MtasParserType<>(
typeMapping, nameMapping, false);
t.addItem(m);
wordAnnotationTypes.put(nameMapping, t);
}
} else if (typeMapping.equals(MAPPING_TYPE_CRM_SENTENCE)) {
MtasCRMParserMappingCRMSentence m = new MtasCRMParserMappingCRMSentence();
m.processConfig(mapping);
if (crmSentenceTypes.containsKey(nameMapping)) {
crmSentenceTypes.get(nameMapping).addItem(m);
} else {
MtasParserType<MtasParserMapping<?>> t = new MtasParserType<>(
MAPPING_TYPE_GROUP, nameMapping, true);
t.addItem(m);
crmSentenceTypes.put(nameMapping, t);
}
} else if (typeMapping.equals(MAPPING_TYPE_CRM_CLAUSE)) {
MtasCRMParserMappingCRMSentence m = new MtasCRMParserMappingCRMSentence();
m.processConfig(mapping);
if (crmClauseTypes.containsKey(nameMapping)) {
crmClauseTypes.get(nameMapping).addItem(m);
} else {
MtasParserType<MtasParserMapping<?>> t = new MtasParserType<>(
MAPPING_TYPE_GROUP, nameMapping, true);
t.addItem(m);
crmClauseTypes.put(nameMapping, t);
}
} else if (typeMapping.equals(MAPPING_TYPE_CRM_PAIR)) {
MtasCRMParserMappingCRMPair m = new MtasCRMParserMappingCRMPair();
m.processConfig(mapping);
if (crmPairTypes.containsKey(nameMapping)) {
crmPairTypes.get(nameMapping).addItem(m);
} else {
MtasParserType<MtasParserMapping<?>> t = new MtasParserType<>(
MAPPING_TYPE_RELATION, nameMapping, true);
t.addItem(m);
crmPairTypes.put(nameMapping, t);
}
} else {
throw new MtasConfigException("unknown mapping type "
+ typeMapping + " or missing name");
}
}
}
}
} else if (current.name.equals("functions")) {
for (int j = 0; j < current.children.size(); j++) {
if (current.children.get(j).name.equals("function")) {
MtasConfiguration function = current.children.get(j);
String nameFunction = function.attributes.get("name");
String typeFunction = function.attributes.get("type");
String splitFunction = function.attributes.get("split");
if (nameFunction != null && typeFunction != null) {
MtasCRMParserFunction mtasCRMParserFunction = new MtasCRMParserFunction(
typeFunction, splitFunction);
if (!functions.containsKey(typeFunction)) {
functions.put(typeFunction,
new HashMap<String, MtasCRMParserFunction>());
}
functions.get(typeFunction).put(nameFunction,
mtasCRMParserFunction);
MtasConfiguration subCurrent = current.children.get(j);
for (int k = 0; k < subCurrent.children.size(); k++) {
if (subCurrent.children.get(k).name.equals("condition")) {
MtasConfiguration subSubCurrent = subCurrent.children
.get(k);
if (subSubCurrent.attributes.containsKey("value")) {
String[] valuesCondition = subSubCurrent.attributes
.get("value").split(Pattern.quote(","));
ArrayList<MtasCRMParserFunctionOutput> valueOutputList = new ArrayList<>();
for (int l = 0; l < subSubCurrent.children.size(); l++) {
if (subSubCurrent.children.get(l).name
.equals("output")) {
String valueOutput = subSubCurrent.children
.get(l).attributes.get("value");
String nameOutput = subSubCurrent.children
.get(l).attributes.get("name");
if (nameOutput != null) {
MtasCRMParserFunctionOutput o = new MtasCRMParserFunctionOutput(
nameOutput, valueOutput);
valueOutputList.add(o);
}
}
}
if (!valueOutputList.isEmpty()) {
for (String valueCondition : valuesCondition) {
if (mtasCRMParserFunction.output
.containsKey(valueCondition)) {
mtasCRMParserFunction.output.get(valueCondition)
.addAll(
(Collection<? extends MtasCRMParserFunctionOutput>) valueOutputList
.clone());
} else {
mtasCRMParserFunction.output.put(valueCondition,
(ArrayList<MtasCRMParserFunctionOutput>) valueOutputList
.clone());
}
}
}
}
}
}
}
}
}
}
}
}
} |
python | def expand (self, user=False, vars=False, glob=False, resolve=False):
"""Return a new :class:`Path` with various expansions performed. All
expansions are disabled by default but can be enabled by passing in
true values in the keyword arguments.
user : bool (default False)
Expand ``~`` and ``~user`` home-directory constructs. If a username is
unmatched or ``$HOME`` is unset, no change is made. Calls
:func:`os.path.expanduser`.
vars : bool (default False)
Expand ``$var`` and ``${var}`` environment variable constructs. Unknown
variables are not substituted. Calls :func:`os.path.expandvars`.
glob : bool (default False)
Evaluate the path as a :mod:`glob` expression and use the matched path.
If the glob does not match anything, do not change anything. If the
glob matches more than one path, raise an :exc:`IOError`.
resolve : bool (default False)
Call :meth:`resolve` on the return value before returning it.
"""
from os import path
from glob import glob
text = text_type (self)
if user:
text = path.expanduser (text)
if vars:
text = path.expandvars (text)
if glob:
results = glob (text)
if len (results) == 1:
text = results[0]
elif len (results) > 1:
raise IOError ('glob of %r should\'ve returned 0 or 1 matches; got %d'
% (text, len (results)))
other = self.__class__ (text)
if resolve:
other = other.resolve ()
return other |
python | def path_to_tuple(path, windows=False):
"""
Split `chan_path` into individual parts and form a tuple (used as key).
"""
if windows:
path_tup = tuple(path.split('\\'))
else:
path_tup = tuple(path.split('/'))
#
# Normalize UTF-8 encoding to consistent form so cache lookups will work, see
# https://docs.python.org/3.6/library/unicodedata.html#unicodedata.normalize
path_tup = tuple(normalize('NFD', part) for part in path_tup)
return path_tup |
java | private String getWord(String[] nodeFeatures) {
String word = nodeFeatures[formIndex];
// Filter if neccessary.
if (filter != null && !filter.accept(word))
return IteratorFactory.EMPTY_TOKEN;
return word;
} |
java | public void setDbWorkUser(String dbWorkUser) {
setExtProperty(CmsDbPoolV11.KEY_DATABASE_POOL + '.' + getPool() + '.' + CmsDbPoolV11.KEY_POOL_USER, dbWorkUser);
} |
python | def deliver(self, project, new_project_name, to_user, share_users, force_send, path_filter, user_message):
"""
Remove access to project_name for to_user, copy to new_project_name if not None,
send message to service to email user so they can have access.
:param project: RemoteProject pre-existing project to be delivered
:param new_project_name: str name of non-existing project to copy project_name to, if None we don't copy
:param to_user: RemoteUser user we are handing over the project to
:param share_users: [RemoteUser] who will have project shared with them once to_user accepts the project
:param force_send: boolean enables resending of email for existing projects
:param path_filter: PathFilter: filters what files are shared
:param user_message: str message to be sent with the share
:return: str email we sent deliver to
"""
if self._is_current_user(to_user):
raise ShareWithSelfError(SHARE_WITH_SELF_MESSAGE.format("deliver"))
if not to_user.email:
self._raise_user_missing_email_exception("deliver")
self.remove_user_permission(project, to_user)
if new_project_name:
project = self._copy_project(project, new_project_name, path_filter)
return self._share_project(D4S2Api.DELIVER_DESTINATION, project, to_user,
force_send, user_message=user_message, share_users=share_users) |
java | @Override
public int getCacheSize() {
CacheConfig commonCacheConfig = ServerCache.getCacheService().getCacheConfig();
if (commonCacheConfig != null) {
return commonCacheConfig.cacheSize;
}
return 0;
} |
python | def POST(self): # pylint: disable=arguments-differ
""" Display main course list page """
if not self.app.welcome_page:
raise web.seeother("/courselist")
return self.show_page(self.app.welcome_page) |
java | public static User decode(int contextId, String encodedString) {
// Added proxy call to help in testing
return decode(contextId, encodedString, User.getAuthenticationExtension());
} |
python | def is_valid_channel(self,
channel,
conda_url='https://conda.anaconda.org',
non_blocking=True):
"""Check if a conda channel is valid."""
logger.debug(str((channel, conda_url)))
if non_blocking:
method = self._is_valid_channel
return self._create_worker(method, channel, conda_url)
else:
return self._is_valid_channel(channel, conda_url=conda_url) |
java | private OnItemLongClickListener createItemLongClickListener() {
return new OnItemLongClickListener() {
@Override
public boolean onItemLongClick(AdapterView<?> parent, View view, int position,
long id) {
Pair<Integer, Integer> itemPosition =
getItemPosition(position - getHeaderViewsCount());
int groupIndex = itemPosition.first;
int childIndex = itemPosition.second;
long packedId;
if (childIndex != -1) {
packedId = getPackedPositionForChild(groupIndex, childIndex);
} else if (groupIndex != -1) {
packedId = getPackedPositionForGroup(groupIndex);
} else {
packedId = getPackedPositionForChild(Integer.MAX_VALUE, position);
}
return notifyOnItemLongClicked(view, getPackedPosition(position), packedId);
}
};
} |
java | public java.rmi.Remote getPort(Class serviceEndpointInterface) throws javax.xml.rpc.ServiceException {
try {
if (com.google.api.ads.adwords.axis.v201809.cm.AdGroupExtensionSettingServiceInterface.class.isAssignableFrom(serviceEndpointInterface)) {
com.google.api.ads.adwords.axis.v201809.cm.AdGroupExtensionSettingServiceSoapBindingStub _stub = new com.google.api.ads.adwords.axis.v201809.cm.AdGroupExtensionSettingServiceSoapBindingStub(new java.net.URL(AdGroupExtensionSettingServiceInterfacePort_address), this);
_stub.setPortName(getAdGroupExtensionSettingServiceInterfacePortWSDDServiceName());
return _stub;
}
}
catch (java.lang.Throwable t) {
throw new javax.xml.rpc.ServiceException(t);
}
throw new javax.xml.rpc.ServiceException("There is no stub implementation for the interface: " + (serviceEndpointInterface == null ? "null" : serviceEndpointInterface.getName()));
} |
java | public java.util.List<AttachmentDetails> getAttachmentSet() {
if (attachmentSet == null) {
attachmentSet = new com.amazonaws.internal.SdkInternalList<AttachmentDetails>();
}
return attachmentSet;
} |
java | private static ConfigurationModule addAll(final ConfigurationModule conf,
final OptionalParameter<String> param,
final File folder) {
ConfigurationModule result = conf;
final File[] files = folder.listFiles();
if (files != null) {
for (final File f : files) {
if (f.canRead() && f.exists() && f.isFile()) {
result = result.set(param, f.getAbsolutePath());
}
}
}
return result;
} |
java | public static List<CommonSynonymDictionary.SynonymItem> convert(List<Term> sentence, boolean withUndefinedItem)
{
List<CommonSynonymDictionary.SynonymItem> synonymItemList = new ArrayList<CommonSynonymDictionary.SynonymItem>(sentence.size());
for (Term term : sentence)
{
CommonSynonymDictionary.SynonymItem item = get(term.word);
if (item == null)
{
if (withUndefinedItem)
{
item = CommonSynonymDictionary.SynonymItem.createUndefined(term.word);
synonymItemList.add(item);
}
}
else
{
synonymItemList.add(item);
}
}
return synonymItemList;
} |
python | def rss_parse(self, response):
"""
Extracts all article links and initiates crawling them.
:param obj response: The scrapy response
"""
# get last_update zip url
match = re.match(re_export, response.text)
if match:
last_update_zip_url = match.group(1)
# fetch zip file
r = requests.get(last_update_zip_url)
# unzip
z = zipfile.ZipFile(io.BytesIO(r.content))
extracted = z.namelist()
z.extractall('/tmp')
csv_file_path = '/tmp/%s' % extracted[0]
# read csv to get all urls
urls = set() # set to remove duplicates
with open(csv_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
for row in csv_reader:
urls.add(row[-1])
# rm the file
os.remove(csv_file_path)
for url in urls:
yield scrapy.Request(url, lambda resp: self.article_parse(
resp, 'gdelt')) |
java | private MonitorInformation getMonitorInformation(L location) {
final K monitorKey = getLocationKey(location);
MonitorInformation monitorInformation = monitorInformations.get(monitorKey);
if (monitorInformation == null) {
// Not found, let's call delegate
if (delegate.isMonitored(location)) {
monitorInformation = new MonitorInformation(true, delegate.getMonitor(location));
} else {
monitorInformation = NULL_MONITOR_INFORMATION;
}
monitorInformations.put(monitorKey, monitorInformation);
}
return monitorInformation;
} |
python | def ExportInstance(r, instance, mode, destination, shutdown=None,
remove_instance=None, x509_key_name=None,
destination_x509_ca=None):
"""
Exports an instance.
@type instance: string
@param instance: Instance name
@type mode: string
@param mode: Export mode
@rtype: string
@return: Job ID
"""
body = {
"destination": destination,
"mode": mode,
}
if shutdown is not None:
body["shutdown"] = shutdown
if remove_instance is not None:
body["remove_instance"] = remove_instance
if x509_key_name is not None:
body["x509_key_name"] = x509_key_name
if destination_x509_ca is not None:
body["destination_x509_ca"] = destination_x509_ca
return r.request("put", "/2/instances/%s/export" % instance, content=body) |
java | private void resolveTypesUsingImports(Expression expression) {
if (expression instanceof NodeWithType) {
NodeWithType<?, ?> nodeWithType = ((NodeWithType<?, ?>) expression);
nodeWithType.setType(getQualifiedName(nodeWithType.getType()));
}
// Recurse downward in the expression
expression
.getChildNodes()
.stream()
.filter(Expression.class::isInstance)
.map(Expression.class::cast)
.forEach(this::resolveTypesUsingImports);
} |
python | def is_public(self):
"""Return True iff this function should be considered public."""
if self.dunder_all is not None:
return self.name in self.dunder_all
else:
return not self.name.startswith('_') |
python | def get(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None, **kwargs):
"""Request wrapper that automatically handles async requests."""
if (searchtype and searchtype != 'xref') or namespace in ['formula']:
response = request(identifier, namespace, domain, None, 'JSON', searchtype, **kwargs).read()
status = json.loads(response.decode())
if 'Waiting' in status and 'ListKey' in status['Waiting']:
identifier = status['Waiting']['ListKey']
namespace = 'listkey'
while 'Waiting' in status and 'ListKey' in status['Waiting']:
time.sleep(2)
response = request(identifier, namespace, domain, operation, 'JSON', **kwargs).read()
status = json.loads(response.decode())
if not output == 'JSON':
response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read()
else:
response = request(identifier, namespace, domain, operation, output, searchtype, **kwargs).read()
return response |
python | def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise |
java | private VisibilityModifier getVisibility(int flags) {
if (hasFlag(flags, Opcodes.ACC_PRIVATE)) {
return VisibilityModifier.PRIVATE;
} else if (hasFlag(flags, Opcodes.ACC_PROTECTED)) {
return VisibilityModifier.PROTECTED;
} else if (hasFlag(flags, Opcodes.ACC_PUBLIC)) {
return VisibilityModifier.PUBLIC;
} else {
return VisibilityModifier.DEFAULT;
}
} |
python | def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"])
print()
print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.") |
java | @Override
public Iterable<JavaFileObject> list(JavaFileManager.Location location,
String packageName,
Set<JavaFileObject.Kind> kinds,
boolean recurse)
throws IOException {
Iterable<JavaFileObject> stdList = stdFileManager.list(location, packageName, kinds, recurse);
if (location==CLASS_PATH && packageName.equals("REPL")) {
// if the desired list is for our JShell package, lazily iterate over
// first the standard list then any generated classes.
return () -> new Iterator<JavaFileObject>() {
boolean stdDone = false;
Iterator<? extends JavaFileObject> it;
@Override
public boolean hasNext() {
if (it == null) {
it = stdList.iterator();
}
if (it.hasNext()) {
return true;
}
if (stdDone) {
return false;
} else {
stdDone = true;
it = generatedClasses().iterator();
return it.hasNext();
}
}
@Override
public JavaFileObject next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return it.next();
}
};
} else {
return stdList;
}
} |
python | def extend_array(edges, binsz, lo, hi):
"""Extend an array to encompass lo and hi values."""
numlo = int(np.ceil((edges[0] - lo) / binsz))
numhi = int(np.ceil((hi - edges[-1]) / binsz))
edges = copy.deepcopy(edges)
if numlo > 0:
edges_lo = np.linspace(edges[0] - numlo * binsz, edges[0], numlo + 1)
edges = np.concatenate((edges_lo[:-1], edges))
if numhi > 0:
edges_hi = np.linspace(edges[-1], edges[-1] + numhi * binsz, numhi + 1)
edges = np.concatenate((edges, edges_hi[1:]))
return edges |
python | def new_(self, df=pd.DataFrame(), db=None, quiet=False,
nbload_libs=True):
"""
Returns a new DataSwim instance from a dataframe
"""
ds2 = Ds(df, db, nbload_libs)
if quiet is False:
self.ok("A new instance was created")
return ds2 |
java | @Override
public int read() throws IOException {
if (ostart >= ofinish) {
int temp;
for (temp = 0; temp == 0; temp = getMoreData())
;
if (temp == -1) {
return -1;
}
}
return obuffer[ostart++] & 255;
} |
java | @Override
public void setIsolationLevel(IsolationLevel level) throws IllegalStateException {
if(level != IsolationLevels.SNAPSHOT){
throw new IllegalStateException("Only IsolationLevels.SNAPSHOT level supported.");
}else{
super.setIsolationLevel(level);
}
} |
python | def validate(self, auth_rest):
"""Validate user credentials whether format is right for Sha1
:param auth_rest: User credentials' part without auth_type
:return: Dict with a hash and a salt part of user credentials
:raises ValueError: If credentials' part doesn't contain delimiter
between a salt and a hash.
"""
try:
auth_salt, auth_hash = auth_rest.split('$')
except ValueError:
raise ValueError("Missing '$' in %s" % auth_rest)
if len(auth_salt) == 0:
raise ValueError("Salt must have non-zero length!")
if len(auth_hash) != 40:
raise ValueError("Hash must have 40 chars!")
if not all(c in string.hexdigits for c in auth_hash):
raise ValueError("Hash must be hexadecimal!")
return dict(salt=auth_salt, hash=auth_hash) |
java | public CMAPreviewApiKey fetchOne(String spaceId, String keyId) {
assertNotNull(spaceId, "entry");
assertNotNull(keyId, "keyId");
return service.fetchOne(spaceId, keyId).blockingFirst();
} |
python | def get_cli_parser(func, skip_first=0, parser=None):
"""makes a parser for parsing cli arguments for `func`.
:param callable func: the function the parser will parse
:param int skip_first: skip this many first arguments of the func
:param ArgumentParser parser: bind func to this parser.
"""
help_msg, func_args = _get_func_args(func)
if not parser:
parser = ArgumentParser(description=help_msg)
for i, arg in enumerate(func_args):
arg_name, arg_type, arg_default, arg_required, arg_help = arg
if i < skip_first:
continue
if arg_default is not _empty:
parser.add_argument(
arg_name, type=arg_type, default=arg_default,
required=arg_required, help=arg_help
)
else:
parser.add_argument(
arg_name, type=arg_type, required=arg_required, help=arg_help
)
return parser |
python | def rc_channels_raw_send(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi, force_mavlink1=False):
'''
The RAW values of the RC channels received. The standard PPM
modulation is as follows: 1000 microseconds: 0%, 2000
microseconds: 100%. Individual receivers/transmitters
might violate this specification.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t)
chan1_raw : RC channel 1 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan2_raw : RC channel 2 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan3_raw : RC channel 3 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan4_raw : RC channel 4 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan5_raw : RC channel 5 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan6_raw : RC channel 6 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan7_raw : RC channel 7 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
chan8_raw : RC channel 8 value, in microseconds. A value of UINT16_MAX implies the channel is unused. (uint16_t)
rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
'''
return self.send(self.rc_channels_raw_encode(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi), force_mavlink1=force_mavlink1) |
java | @Override
public void setAction(final Runnable action) {
super.setAction(new Runnable() {
@Override
public void run() {
if (!isRunning) {
thread = new Thread(wrappedAction(action), "wunderboss-daemon-thread[" + name + "]");
thread.start();
isRunning = true;
}
}
});
} |
python | def convert_ulog2csv(ulog_file_name, messages, output, delimiter):
"""
Coverts and ULog file to a CSV file.
:param ulog_file_name: The ULog filename to open and read
:param messages: A list of message names
:param output: Output file path
:param delimiter: CSV delimiter
:return: None
"""
msg_filter = messages.split(',') if messages else None
ulog = ULog(ulog_file_name, msg_filter)
data = ulog.data_list
output_file_prefix = ulog_file_name
# strip '.ulg'
if output_file_prefix.lower().endswith('.ulg'):
output_file_prefix = output_file_prefix[:-4]
# write to different output path?
if output:
base_name = os.path.basename(output_file_prefix)
output_file_prefix = os.path.join(output, base_name)
for d in data:
fmt = '{0}_{1}_{2}.csv'
output_file_name = fmt.format(output_file_prefix, d.name, d.multi_id)
fmt = 'Writing {0} ({1} data points)'
# print(fmt.format(output_file_name, len(d.data['timestamp'])))
with open(output_file_name, 'w') as csvfile:
# use same field order as in the log, except for the timestamp
data_keys = [f.field_name for f in d.field_data]
data_keys.remove('timestamp')
data_keys.insert(0, 'timestamp') # we want timestamp at first position
# we don't use np.savetxt, because we have multiple arrays with
# potentially different data types. However the following is quite
# slow...
# write the header
csvfile.write(delimiter.join(data_keys) + '\n')
# write the data
last_elem = len(data_keys)-1
for i in range(len(d.data['timestamp'])):
for k in range(len(data_keys)):
csvfile.write(str(d.data[data_keys[k]][i]))
if k != last_elem:
csvfile.write(delimiter)
csvfile.write('\n') |
java | public PagedList<SiteInner> beginChangeVnet(final String resourceGroupName, final String name, final VirtualNetworkProfile vnetInfo) {
ServiceResponse<Page<SiteInner>> response = beginChangeVnetSinglePageAsync(resourceGroupName, name, vnetInfo).toBlocking().single();
return new PagedList<SiteInner>(response.body()) {
@Override
public Page<SiteInner> nextPage(String nextPageLink) {
return beginChangeVnetNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
} |
java | public static void addEnvContextParam(Document doc, Element root) {
Element ctxParam = doc.createElement("context-param");
Element paramName = doc.createElement("param-name");
paramName.appendChild(doc.createTextNode("shiroEnvironmentClass"));
ctxParam.appendChild(paramName);
Element paramValue = doc.createElement("param-value");
paramValue.appendChild(doc.createTextNode("com.meltmedia.cadmium.servlets.shiro.WebEnvironment"));
ctxParam.appendChild(paramValue);
addRelativeTo(root, ctxParam, "listener", false);
} |
java | public com.google.api.ads.adwords.axis.v201809.cm.CampaignLabel getCampaignLabel() {
return campaignLabel;
} |
python | def accuracy(Ntp, Ntn, Nfp, Nfn, eps=numpy.spacing(1)):
"""Accuracy
Parameters
----------
Ntp : int >= 0
Number of true positives.
Ntn : int >= 0
Number of true negatives.
Nfp : int >= 0
Number of false positives.
Nfn : int >= 0
Number of false negatives.
eps : float
eps.
Default value numpy.spacing(1)
Returns
-------
acc: float
Accuracy
"""
return float((Ntp + Ntn) / (Ntp + Ntn + Nfn + Nfp + eps)) |
python | def start_element (self, tag, attrs):
"""Search for links and store found URLs in a list."""
log.debug(LOG_CHECK, "LinkFinder tag %s attrs %s", tag, attrs)
log.debug(LOG_CHECK, "line %d col %d old line %d old col %d", self.parser.lineno(), self.parser.column(), self.parser.last_lineno(), self.parser.last_column())
if tag == "base" and not self.base_ref:
self.base_ref = attrs.get_true("href", u'')
tagattrs = self.tags.get(tag, self.universal_attrs)
# parse URLs in tag (possibly multiple URLs in CSS styles)
for attr in tagattrs.intersection(attrs):
if tag == "meta" and not is_meta_url(attr, attrs):
continue
if tag == "form" and not is_form_get(attr, attrs):
continue
# name of this link
name = self.get_link_name(tag, attrs, attr)
# possible codebase
base = u''
if tag == 'applet':
base = attrs.get_true('codebase', u'')
if not base:
base = self.base_ref
# note: value can be None
value = attrs.get(attr)
if tag == 'link' and attrs.get('rel') == 'dns-prefetch':
if ':' in value:
value = value.split(':', 1)[1]
value = 'dns:' + value.rstrip('/')
# parse tag for URLs
self.parse_tag(tag, attr, value, name, base)
log.debug(LOG_CHECK, "LinkFinder finished tag %s", tag) |
java | public final void invalidateKey(Data key, String dataStructureName, String sourceUuid) {
checkNotNull(key, "key cannot be null");
checkNotNull(sourceUuid, "sourceUuid cannot be null");
Invalidation invalidation = newKeyInvalidation(key, dataStructureName, sourceUuid);
invalidateInternal(invalidation, getPartitionId(key));
} |
python | def show_new_activity(last_seen=None, cap=1000, template='grouped', include=None, exclude=None):
"""
Inclusion tag to show new activity,
either since user was last seen or today (if not last_seen).
Note that passing in last_seen is up to you.
Usage: {% show_new_activity %}
Or, to show since last seen: {% show_new_activity last_seen %}
Can also cap the number of items returned. Default is 1000.
Usage: {% show_new_activity last_seen 50 %}
Allows passing template, controlling level of detail.
Template choices are:
* 'plain': simple list
* 'grouped': items are grouped by content type
* 'detailed': items are grouped and can use custom template snippets
Usage: {% show_new_activity last_seen 50 'plain' %}
If no template choice argument is passed, 'grouped' will be used.
Also accepts "include" and "exclude" options to control which activities are returned.
Content types should be passed in by name.
* 'include' will **only** return passed content types
* 'exclude' will **not** return passed content types
Include is evaluated before exclude.
Usage: {% show_new_activity last_seen 50 'plain' exclude="comment,post" %}
"""
if not last_seen or last_seen is '':
last_seen = datetime.date.today()
actions = Activity.objects.filter(timestamp__gte=last_seen)
if include:
include_types = include.split(',')
actions = actions.filter(content_type__model__in=include_types)
if exclude:
exclude_types = exclude.split(',')
actions = actions.exclude(content_type__model__in=exclude_types)
# Now apply cap
actions = actions[:cap]
if template=='detailed':
template = 'activity_monitor/includes/detailed.html'
actions = group_activities(actions)
elif template=='grouped':
template = 'activity_monitor/includes/grouped_list.html'
actions = group_activities(actions)
else:
template = 'activity_monitor/includes/activity_list.html'
return {'actions': actions, 'selected_template': template} |
python | def get_iface_mode(iface):
"""Return the interface mode.
params:
- iface: the iwconfig interface
"""
p = subprocess.Popen(["iwconfig", iface], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, err = p.communicate()
match = re.search(br"mode:([a-zA-Z]*)", output.lower())
if match:
return plain_str(match.group(1))
return "unknown" |
python | def _execute(self, app_, file_):
"""Run app with file as input.
:param app_: application to run.
:param file_: file to run app with.
:return: success True, else False
:rtype: bool
"""
app_name = os.path.basename(app_)
args = [app_]
args.extend(self.args[app_])
args.append(file_)
process = subprocess.Popen(args)
time.sleep(1)
status = {True: Status.SUCCESS, False: Status.FAILED}
crashed = process.poll()
result = status[crashed is None]
self.stats_.add(app_name, result)
if result is Status.SUCCESS:
# process did not crash, so just terminate it
process.terminate() |
python | def check_basic_auth(self, username, password):
"""
This function is called to check if a username /
password combination is valid via the htpasswd file.
"""
valid = self.users.check_password(
username, password
)
if not valid:
log.warning('Invalid login from %s', username)
valid = False
return (
valid,
username
) |
java | protected void store(SecurityEvent auditEvent) throws Exception {
if (myDataStore == null)
throw new InternalErrorException("No data store provided to persist audit events");
myDataStore.store(auditEvent);
} |
java | public void marshall(RecoveryPointByResource recoveryPointByResource, ProtocolMarshaller protocolMarshaller) {
if (recoveryPointByResource == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(recoveryPointByResource.getRecoveryPointArn(), RECOVERYPOINTARN_BINDING);
protocolMarshaller.marshall(recoveryPointByResource.getCreationDate(), CREATIONDATE_BINDING);
protocolMarshaller.marshall(recoveryPointByResource.getStatus(), STATUS_BINDING);
protocolMarshaller.marshall(recoveryPointByResource.getEncryptionKeyArn(), ENCRYPTIONKEYARN_BINDING);
protocolMarshaller.marshall(recoveryPointByResource.getBackupSizeBytes(), BACKUPSIZEBYTES_BINDING);
protocolMarshaller.marshall(recoveryPointByResource.getBackupVaultName(), BACKUPVAULTNAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | @Override
public String getOptStringProperty(final String name) throws OptionsException {
Object val = getOptProperty(name);
if (val == null) {
return null;
}
if (!(val instanceof String)) {
throw new OptionsException("org.bedework.calenv.bad.option.value");
}
return (String)val;
} |
java | @NotNull
protected final JpaStream findStream(@NotNull final StreamId streamId) {
Contract.requireArgNotNull("streamId", streamId);
verifyStreamEntityExists(streamId);
final String sql = createJpqlStreamSelect(streamId);
final TypedQuery<JpaStream> query = getEm().createQuery(sql, JpaStream.class);
setJpqlParameters(query, streamId);
final List<JpaStream> streams = query.getResultList();
if (streams.size() == 0) {
throw new StreamNotFoundException(streamId);
}
final JpaStream stream = streams.get(0);
if (stream.getState() == StreamState.SOFT_DELETED) {
// TODO Remove after event store has a way to distinguish between
// never-existing and soft deleted
// streams
throw new StreamNotFoundException(streamId);
}
return stream;
} |
java | @ArgumentsChecked
@Throws({ IllegalNullArgumentException.class, IllegalNullElementsException.class })
public static <T> void noNullElements(final boolean condition, @Nonnull final T[] array) {
if (condition) {
Check.noNullElements(array);
}
} |
python | def _smooth_fragment_list(self, real_wave_mfcc_audio_length, ns_string):
"""
Remove NONSPEECH fragments from list if needed,
and set HEAD/TAIL begin/end.
"""
self.log(u"Called _smooth_fragment_list")
self.smflist[0].begin = TimeValue("0.000")
self.smflist[-1].end = real_wave_mfcc_audio_length
if ns_string in [None, gc.PPV_TASK_ADJUST_BOUNDARY_NONSPEECH_REMOVE]:
self.log(u"Remove all NONSPEECH fragments")
self.smflist.remove_nonspeech_fragments(zero_length_only=False)
else:
self.log(u"Remove NONSPEECH fragments with zero length only")
self.smflist.remove_nonspeech_fragments(zero_length_only=True) |
java | public static String disentangleBiMap(final BiMap<Character, Character> rules,
final String obfuscated)
{
return obfuscateBiMap(rules.inverse(), obfuscated);
} |
python | def from_epw_file(cls, epwfile, timestep=1):
"""Create a wea object using the solar irradiance values in an epw file.
Args:
epwfile: Full path to epw weather file.
timestep: An optional integer to set the number of time steps per hour.
Default is 1 for one value per hour. Note that this input
will only do a linear interpolation over the data in the EPW
file. While such linear interpolations are suitable for most
thermal simulations, where thermal lag "smooths over" the effect
of momentary increases in solar energy, it is not recommended
for daylight simulations, where momentary increases in solar
energy can mean the difference between glare and visual comfort.
"""
is_leap_year = False # epw file is always for 8760 hours
epw = EPW(epwfile)
direct_normal, diffuse_horizontal = \
cls._get_data_collections(epw.direct_normal_radiation.values,
epw.diffuse_horizontal_radiation.values,
epw.metadata, 1, is_leap_year)
if timestep != 1:
print ("Note: timesteps greater than 1 on epw-generated Wea's \n" +
"are suitable for thermal models but are not recommended \n" +
"for daylight models.")
# interpolate the data
direct_normal = direct_normal.interpolate_to_timestep(timestep)
diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep)
# create sunpath to check if the sun is up at a given timestep
sp = Sunpath.from_location(epw.location)
# add correct values to the emply data collection
for i, dt in enumerate(cls._get_datetimes(timestep, is_leap_year)):
# set irradiance values to 0 when the sun is not up
sun = sp.calculate_sun_from_date_time(dt)
if sun.altitude < 0:
direct_normal[i] = 0
diffuse_horizontal[i] = 0
return cls(epw.location, direct_normal, diffuse_horizontal,
timestep, is_leap_year) |
python | def search_files_sql_builder(search):
"""
Create and populate an instance of :class:`meteorpi_db.SQLBuilder` for a given
:class:`meteorpi_model.FileRecordSearch`. This can then be used to retrieve the results of the search, materialise
them into :class:`meteorpi_model.FileRecord` instances etc.
:param FileRecordSearch search:
The search to realise
:return:
A :class:`meteorpi_db.SQLBuilder` configured from the supplied search
"""
b = SQLBuilder(tables="""archive_files f
INNER JOIN archive_semanticTypes s2 ON f.semanticType=s2.uid
INNER JOIN archive_observations o ON f.observationId=o.uid
INNER JOIN archive_semanticTypes s ON o.obsType=s.uid
INNER JOIN archive_observatories l ON o.observatory=l.uid""", where_clauses=[])
b.add_set_membership(search.obstory_ids, 'l.publicId')
b.add_sql(search.repository_fname, 'f.repositoryFname = %s')
b.add_sql(search.observation_type, 's.name = %s')
b.add_sql(search.observation_id, 'o.uid = %s')
b.add_sql(search.time_min, 'f.fileTime > %s')
b.add_sql(search.time_max, 'f.fileTime < %s')
b.add_sql(search.lat_min, 'l.latitude >= %s')
b.add_sql(search.lat_max, 'l.latitude <= %s')
b.add_sql(search.long_min, 'l.longitude >= %s')
b.add_sql(search.long_max, 'l.longitude <= %s')
b.add_sql(search.mime_type, 'f.mimeType = %s')
b.add_sql(search.semantic_type, 's2.name = %s')
b.add_metadata_query_properties(meta_constraints=search.meta_constraints, id_column="fileId", id_table="f")
# Check for import / export filters
if search.exclude_imported:
b.where_clauses.append('NOT EXISTS (SELECT * FROM archive_observationImport i WHERE i.observationId = o.uid')
if search.exclude_export_to is not None:
b.where_clauses.append("""
NOT EXISTS (SELECT * FROM archive_fileExport ex
INNER JOIN archive_exportConfig c ON ex.exportConfig = c.uid
WHERE ex.fileId = f.uid AND c.exportConfigID = %s)
""")
b.sql_args.append(SQLBuilder.map_value(search.exclude_export_to))
return b |
python | def Ncomp_SVHT_MG_DLD_approx(X, zscore=True):
""" This function implements the approximate calculation of the
optimal hard threshold for singular values, by Matan Gavish
and David L. Donoho:
"The optimal hard threshold for singular values is 4 / sqrt(3)"
http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=6846297
Parameters
----------
X: 2-D numpy array of size [n_T, n_V]
The data to estimate the optimal rank for selecting principal
components.
zscore: Boolean
Whether to z-score the data before calculating number of components.
Returns
-------
ncomp: integer
The optimal number of components determined by the method of MG
and DLD
"""
beta = X.shape[0] / X.shape[1]
if beta > 1:
beta = 1 / beta
omega = 0.56 * beta ** 3 - 0.95 * beta ** 2 + 1.82 * beta + 1.43
if zscore:
sing = np.linalg.svd(_zscore(X), False, False)
else:
sing = np.linalg.svd(X, False, False)
thresh = omega * np.median(sing)
ncomp = int(np.sum(np.logical_and(sing > thresh, np.logical_not(
np.isclose(sing, thresh)))))
# In the line above, we look for the singular values larger than
# the threshold but excluding those that happen to be "just" larger
# than the threshold by an amount close to the numerical precision.
# This is to prevent close-to-zero singular values to be included if
# the median of the eigenvalues is close to 0 (which could happen
# when the input X has lower rank than its minimal size.
return ncomp |
java | @Override
public Element useMarker(SVGPlot plot, Element parent, double x, double y, int stylenr, double size) {
Element marker = plot.svgCircle(x, y, size * .5);
final String col;
if(stylenr == -1) {
col = dotcolor;
}
else if(stylenr == -2) {
col = greycolor;
}
else {
col = colors.getColor(stylenr);
}
SVGUtil.setStyle(marker, SVGConstants.CSS_FILL_PROPERTY + ":" + col);
parent.appendChild(marker);
return marker;
} |
java | public String cql() {
if (sb == null) {
throw new AbacusException("This CQLBuilder has been closed after cql() was called previously");
}
init(true);
String cql = null;
try {
cql = sb.toString();
} finally {
Objectory.recycle(sb);
sb = null;
activeStringBuilderCounter.decrementAndGet();
}
if (logger.isDebugEnabled()) {
logger.debug(cql);
}
return cql;
} |
python | def is_uid(uid, validate=False):
"""Checks if the passed in uid is a valid UID
:param uid: The uid to check
:param validate: If False, checks if uid is a valid 23 alphanumeric uid. If
True, also verifies if a brain exists for the uid passed in
:type uid: string
:return: True if a valid uid
:rtype: bool
"""
if not isinstance(uid, basestring):
return False
if uid == '0':
return True
if len(uid) != 32:
return False
if not UID_RX.match(uid):
return False
if not validate:
return True
# Check if a brain for this uid exists
uc = get_tool('uid_catalog')
brains = uc(UID=uid)
if brains:
assert (len(brains) == 1)
return len(brains) > 0 |
python | def is_causal_sink(graph: BELGraph, node: BaseEntity) -> bool:
"""Return true if the node is a causal sink.
- Does have causal in edge(s)
- Doesn't have any causal out edge(s)
"""
return has_causal_in_edges(graph, node) and not has_causal_out_edges(graph, node) |
java | public synchronized EvernoteBusinessNotebookHelper getBusinessNotebookHelper() throws TException, EDAMUserException, EDAMSystemException {
if (mBusinessNotebookHelper == null || isBusinessAuthExpired()) {
mBusinessNotebookHelper = createBusinessNotebookHelper();
}
return mBusinessNotebookHelper;
} |
python | def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) |
java | public static Couple<String, String> parse(String pathstub) {
String path = pathstub, type = null;
int dot = pathstub.lastIndexOf('.') + 1;
int slash = pathstub.lastIndexOf('/');
if (dot > 0 && dot > slash) {
path = pathstub.substring(0, dot - 1);
type = pathstub.substring(dot);
}
return new Couple<String, String>(path, type);
} |
python | def edit_tc_pool(self, zone_name, owner_name, ttl, pool_info, rdata_info, backup_record):
"""Updates an existing TC Pool in the specified zone.
:param zone_name: The zone that contains the RRSet. The trailing dot is optional.
:param owner_name: The owner name for the RRSet.
If no trailing dot is supplied, the owner_name is assumed to be relative (foo).
If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.)
:param ttl: The updated TTL value for the RRSet.
:param pool_info: dict of information about the pool
:param rdata_info: dict of information about the records in the pool.
The keys in the dict are the A and CNAME records that make up the pool.
The values are the rdataInfo for each of the records
:param backup_record: dict of information about the backup (all-fail) records in the pool.
There are two key/value in the dict:
rdata - the A or CNAME for the backup record
failoverDelay - the time to wait to fail over (optional, defaults to 0)
"""
rrset = self._build_tc_rrset(backup_record, pool_info, rdata_info, ttl)
return self.rest_api_connection.put("/v1/zones/" + zone_name + "/rrsets/A/" + owner_name, json.dumps(rrset)) |
java | static <T> T convertToCustomClass(Object object, Class<T> clazz) {
return deserializeToClass(object, clazz, ErrorPath.EMPTY);
} |
python | def copy_to(source, dest, engine_or_conn, **flags):
"""Export a query or select to a file. For flags, see the PostgreSQL
documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html.
Examples: ::
select = MyTable.select()
with open('/path/to/file.tsv', 'w') as fp:
copy_to(select, fp, conn)
query = session.query(MyModel)
with open('/path/to/file/csv', 'w') as fp:
copy_to(query, fp, engine, format='csv', null='.')
:param source: SQLAlchemy query or select
:param dest: Destination file pointer, in write mode
:param engine_or_conn: SQLAlchemy engine, connection, or raw_connection
:param **flags: Options passed through to COPY
If an existing connection is passed to `engine_or_conn`, it is the caller's
responsibility to commit and close.
"""
dialect = postgresql.dialect()
statement = getattr(source, 'statement', source)
compiled = statement.compile(dialect=dialect)
conn, autoclose = raw_connection_from(engine_or_conn)
cursor = conn.cursor()
query = cursor.mogrify(compiled.string, compiled.params).decode()
formatted_flags = '({})'.format(format_flags(flags)) if flags else ''
copy = 'COPY ({}) TO STDOUT {}'.format(query, formatted_flags)
cursor.copy_expert(copy, dest)
if autoclose:
conn.close() |
java | public OperationFuture<List<Server>> powerOff(Group... groups) {
return serverService().powerOff(
getServerSearchCriteria(groups)
);
} |
python | def get_profiles(self, overwrite=False):
"""Get all the minimum needs profiles.
:returns: The minimum needs by name.
:rtype: list
"""
def sort_by_locale(unsorted_profiles, locale):
"""Sort the profiles by language settings.
The profiles that are in the same language as the QGIS' locale
will be sorted out first.
:param unsorted_profiles: The user profiles profiles
:type unsorted_profiles: list
:param locale: The language settings string
:type locale: str
:returns: Ordered profiles
:rtype: list
"""
if locale is None:
return unsorted_profiles
locale = '_%s' % locale[:2]
profiles_our_locale = []
profiles_remaining = []
for profile_name in unsorted_profiles:
if locale in profile_name:
profiles_our_locale.append(profile_name)
else:
profiles_remaining.append(profile_name)
return profiles_our_locale + profiles_remaining
# We ignore empty root_directory to avoid load min needs profile
# to test directory when test is running.
if not self.root_directory:
profiles = []
return profiles
else:
locale_minimum_needs_dir = os.path.join(
self.root_directory, 'minimum_needs')
path_name = resources_path('minimum_needs')
if not os.path.exists(locale_minimum_needs_dir):
os.makedirs(locale_minimum_needs_dir)
# load default min needs profile
for file_name in os.listdir(path_name):
source_file = os.path.join(path_name, file_name)
destination_file = os.path.join(
locale_minimum_needs_dir, file_name)
if not os.path.exists(destination_file) or overwrite:
copy(source_file, destination_file)
# move old min needs profile under user profile to inasafe
# subdirectory
self.move_old_profile(locale_minimum_needs_dir)
profiles = [
profile[:-5] for profile in
os.listdir(locale_minimum_needs_dir) if
profile[-5:] == '.json']
profiles = sort_by_locale(profiles, self.locale)
return profiles |
java | private ConfigurationRequest getConfigurationRequest(Map<String, String> userNames) {
ConfigurationRequest configurationRequest = new ConfigurationRequest();
configurationRequest.addField(new TextField("sender",
"Sender",
"[email protected]",
"The sender of sent out mail alerts",
ConfigurationField.Optional.OPTIONAL));
configurationRequest.addField(new TextField("subject",
"E-Mail Subject",
"Graylog alert for stream: ${stream.title}: ${check_result.resultDescription}",
"The subject of sent out mail alerts",
ConfigurationField.Optional.NOT_OPTIONAL));
configurationRequest.addField(new TextField("body",
"E-Mail Body",
FormattedEmailAlertSender.bodyTemplate,
"The template to generate the body from",
ConfigurationField.Optional.OPTIONAL,
TextField.Attribute.TEXTAREA));
configurationRequest.addField(new ListField(CK_USER_RECEIVERS,
"User Receivers",
Collections.emptyList(),
userNames,
"Graylog usernames that should receive this alert",
ConfigurationField.Optional.OPTIONAL));
configurationRequest.addField(new ListField(CK_EMAIL_RECEIVERS,
"E-Mail Receivers",
Collections.emptyList(),
Collections.emptyMap(),
"E-Mail addresses that should receive this alert",
ConfigurationField.Optional.OPTIONAL,
ListField.Attribute.ALLOW_CREATE));
return configurationRequest;
} |
java | public long getLength(String path)
{
try {
ZipEntry entry = getZipEntry(path);
long length = entry != null ? entry.getSize() : -1;
return length;
} catch (IOException e) {
log.log(Level.FINE, e.toString(), e);
return -1;
}
} |
java | public void execute( Example example )
{
try
{
List<Fixture> fixtures = getFixtureList();
Example headers = example.at( 0, 0 );
if (columns == null) {
columns = getHeaderColumns(headers);
}
if (example.hasSibling()) {
RowFixtureSplitter splitter = new RowFixtureSplitter();
splitter.split(example.at(1), fixtures, columns);
for (RowFixture rowFixture : splitter.getMatch()) {
Example row = rowFixture.getRow();
executeRow(row.firstChild(), headers, rowFixture.getAdapter());
if (shouldStop(stats)) {
row.addChild().annotate(Annotations.stopped());
break;
}
}
if (mustProcessMissing() && canContinue(stats)) {
for (Example row : splitter.getMissing()) {
missingRow(row);
if (shouldStop(stats)) {
row.addChild().annotate(Annotations.stopped());
break;
}
}
}
if (mustProcessSurplus() && canContinue(stats)) {
for (Fixture adapter : splitter.getSurplus()) {
addSurplusRow(example, headers, adapter);
if (shouldStop(stats)) {
example.lastSibling().addChild().annotate(Annotations.stopped());
break;
}
}
}
}
}
catch (Exception e)
{
stats.exception();
example.firstChild().annotate( exception( e ) );
if (shouldStop( stats ))
{
example.addChild().annotate(Annotations.stopped());
}
}
} |
python | def setComponents(self, components):
"""Clears and sets the components contained in this widget
:param components: list of documentation for subclasses of AbStractStimulusComponents
:type Components: list<dict>
"""
layout = self.layout()
for comp in components:
attrWidget = ComponentAttributerChecker(comp)
layout.addWidget(attrWidget) |
java | @Nonnull
public static FileIOError createDir (@Nonnull final File aDir)
{
ValueEnforcer.notNull (aDir, "Directory");
// Does the directory already exist?
if (aDir.exists ())
return EFileIOErrorCode.TARGET_ALREADY_EXISTS.getAsIOError (EFileIOOperation.CREATE_DIR, aDir);
// Is the parent directory writable?
final File aParentDir = aDir.getParentFile ();
if (aParentDir != null && aParentDir.exists () && !aParentDir.canWrite ())
return EFileIOErrorCode.SOURCE_PARENT_NOT_WRITABLE.getAsIOError (EFileIOOperation.CREATE_DIR, aDir);
try
{
final EFileIOErrorCode eError = aDir.mkdir () ? EFileIOErrorCode.NO_ERROR : EFileIOErrorCode.OPERATION_FAILED;
return eError.getAsIOError (EFileIOOperation.CREATE_DIR, aDir);
}
catch (final SecurityException ex)
{
return EFileIOErrorCode.getSecurityAsIOError (EFileIOOperation.CREATE_DIR, ex);
}
} |
python | def user(self, extra_params=None):
"""
The User currently assigned to the Ticket
"""
if self.get('assigned_to_id', None):
users = self.space.users(
id=self['assigned_to_id'],
extra_params=extra_params
)
if users:
return users[0] |
java | @Override
public final void visitClass(@Nullable JavaTypeElement oldType, @Nullable JavaTypeElement newType) {
depth++;
doVisitClass(oldType, newType);
} |
java | public static int getColorAttr(Context ctx, @AttrRes int colorAttrId){
int[] attrs = new int[] { colorAttrId /* index 0 */};
TypedArray ta = ctx.obtainStyledAttributes(attrs);
int colorFromTheme = ta.getColor(0, 0);
ta.recycle();
return colorFromTheme;
} |
python | def make_saml_response(binding, http_args):
"""
Creates a SAML response.
:param binding: SAML response binding
:param http_args: http arguments
:return: response.Response
"""
if binding == BINDING_HTTP_REDIRECT:
headers = dict(http_args["headers"])
return SeeOther(str(headers["Location"]))
return Response(http_args["data"], headers=http_args["headers"]) |
python | def init_log_rate(output_f, N=None, message='', print_rate=None):
"""Initialze the log_rate function. Returnas a partial function to call for
each event.
If N is not specified but print_rate is specified, the initial N is
set to 100, and after the first message, the N value is adjusted to
emit print_rate messages per second
"""
if print_rate and not N:
N = 100
if not N:
N = 5000
d = [0, # number of items processed
time(), # start time. This one gets replaced after first message
N, # ticker to next message
N, # frequency to log a message
message,
print_rate,
deque([], maxlen=4) # Deque for averaging last N rates
]
assert isinstance(output_f, Callable)
f = partial(_log_rate, output_f, d)
f.always = output_f
f.count = lambda: d[0]
return f |
java | public void closeElement() throws XMLException, IOException {
if (!Type.START_ELEMENT.equals(event.type))
throw new IOException("Invalid call of closeElement: it must be called on a start element");
if (event.isClosed) return;
ElementContext ctx = event.context.getFirst();
do {
try { next(); }
catch (EOFException e) {
throw new XMLException(getPosition(), "Unexpected end", "closeElement(" + ctx.text.asString() + ")");
}
if (Type.END_ELEMENT.equals(event.type)) {
if (event.context.getFirst() == ctx)
return;
}
} while (true);
} |
java | public static RelationGraph from(@NonNull Collection<RelationEdge> edges) {
RelationGraph gPrime = new RelationGraph();
edges.forEach(e -> {
if (!gPrime.containsVertex(e.getFirstVertex())) {
gPrime.addVertex(e.getFirstVertex());
}
if (!gPrime.containsVertex(e.getSecondVertex())) {
gPrime.addVertex(e.getSecondVertex());
}
gPrime.addEdge(e);
});
return gPrime;
} |
java | public PlanetFactorySchematicResponse getUniverseSchematicsSchematicId(Integer schematicId, String datasource,
String ifNoneMatch) throws ApiException {
ApiResponse<PlanetFactorySchematicResponse> resp = getUniverseSchematicsSchematicIdWithHttpInfo(schematicId,
datasource, ifNoneMatch);
return resp.getData();
} |
java | private int drainToSize(int minPooled, int maxDiscard)
{
// Stop draining if we have reached the maximum drain amount.
// This slows the drain process, allowing for the pool to become
// active before becoming fully drained (to minimum level).
int numDiscarded = 0;
Object o = null;
while (numDiscarded < maxDiscard) {
o = buffer.popWithLimit(minPooled);
if (o != null) {
++numDiscarded;
if (discardStrategy != null) {
discardStrategy.discard(o);
}
} else
break;
}
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(tc, "drainToSize: numDiscarded=" + numDiscarded + ", inactive=" + ivInactiveNoDrainCount + ", " + this);
if (beanPerf != null) { // Update PMI data
beanPerf.poolDrained(buffer.size(), numDiscarded);
}
return numDiscarded;
} |
java | @Override
public void showLoading() {
final FragmentIf fragmentIf = getFragmentIf();
fragmentIf.executeOnUIThread(new Runnable() {
@Override
public void run() {
if (loading != null) {
loading.show(fragmentIf);
}
}
});
} |
java | @Override
public void onAdd(Response response) {
notifyWebSocket("response", response);
logger.info("Reporter observed response for user [" + response.getUser().getUsername() + "]");
} |
java | public int compareSwappedTo(IntDoublePair other) {
int fdiff = Double.compare(this.second, other.second);
if(fdiff != 0) {
return fdiff;
}
return this.first - other.first;
} |
java | public Observable<DetectorResponseInner> getHostingEnvironmentDetectorResponseAsync(String resourceGroupName, String name, String detectorName) {
return getHostingEnvironmentDetectorResponseWithServiceResponseAsync(resourceGroupName, name, detectorName).map(new Func1<ServiceResponse<DetectorResponseInner>, DetectorResponseInner>() {
@Override
public DetectorResponseInner call(ServiceResponse<DetectorResponseInner> response) {
return response.body();
}
});
} |
python | def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
script = self._printer.generate(job)
stdin, stdout, stderr = self._ssh.exec_command('sbatch')
stdin.write(script)
stdin.flush()
stdin.channel.shutdown_write()
return stdout.read() |
java | public String showRPN() throws Exception
{
final StringBuilder sb = new StringBuilder();
showRPN(sb);
return sb.toString();
} |
java | public void setMaxBoxWd(Integer newMaxBoxWd) {
Integer oldMaxBoxWd = maxBoxWd;
maxBoxWd = newMaxBoxWd;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.FNC__MAX_BOX_WD, oldMaxBoxWd, maxBoxWd));
} |
python | def _children(self):
"""Yield all direct children of this object."""
if isinstance(self.condition, CodeExpression):
yield self.condition
for codeobj in self.body._children():
yield codeobj
for codeobj in self.else_body._children():
yield codeobj |
java | public Client<? extends Options, ? extends OptionsBuilder, ? extends RequestBuilder> newClient() {
if (clientClassName == null) {
return new DefaultClient();
} else {
try {
return (Client) Thread.currentThread().getContextClassLoader().loadClass(clientClassName).newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} |
python | def get_column_type(column, column_values):
"""
Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1.
"""
try:
return _get_column_type_heur(column, column_values)
except util.TableDefinitionError as e:
logging.error("Column type couldn't be determined: {}".format(e.message))
return ColumnType.text, None, None, 1 |
java | public static StringBuilder replaceResources(StringBuilder sb, ResourceBundle reg, Map<String, Object> map, PropertyOwner propertyOwner)
{
return Utility.replaceResources(sb, reg, map, propertyOwner, false);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.