language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public static Endpoint determineEndpointForRequest(final RequestAbstractType authnRequest,
final SamlRegisteredServiceServiceProviderMetadataFacade adaptor,
final String binding) {
var endpoint = (Endpoint) null;
if (authnRequest instanceof LogoutRequest) {
endpoint = adaptor.getSingleLogoutService(binding);
} else {
val endpointReq = getAssertionConsumerServiceFromRequest(authnRequest, binding);
endpoint = endpointReq == null
? adaptor.getAssertionConsumerService(binding)
: endpointReq;
}
if (endpoint == null || StringUtils.isBlank(endpoint.getBinding())) {
throw new SamlException("Assertion consumer service does not define a binding");
}
val location = StringUtils.isBlank(endpoint.getResponseLocation()) ? endpoint.getLocation() : endpoint.getResponseLocation();
if (StringUtils.isBlank(location)) {
throw new SamlException("Assertion consumer service does not define a target location");
}
return endpoint;
} |
python | def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix) |
java | public T addParams(Iterable<? extends TemplateParam> newParams) {
Set<String> seenParamKeys = new HashSet<>();
if (this.params == null) {
this.params = ImmutableList.copyOf(newParams);
} else {
for (TemplateParam oldParam : this.params) {
seenParamKeys.add(oldParam.name());
}
this.params =
ImmutableList.<TemplateParam>builder().addAll(this.params).addAll(newParams).build();
}
// Check new params.
for (TemplateParam param : newParams) {
if (param.name().equals("ij")) {
errorReporter.report(param.nameLocation(), INVALID_PARAM_NAMED_IJ);
}
if (!seenParamKeys.add(param.name())) {
errorReporter.report(param.nameLocation(), PARAM_ALREADY_DECLARED, param.name());
}
}
return (T) this;
} |
java | @Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case AfplibPackage.TEXT_ORIENTATION__IAXIS:
return IAXIS_EDEFAULT == null ? iAxis != null : !IAXIS_EDEFAULT.equals(iAxis);
case AfplibPackage.TEXT_ORIENTATION__BAXIS:
return BAXIS_EDEFAULT == null ? bAxis != null : !BAXIS_EDEFAULT.equals(bAxis);
}
return super.eIsSet(featureID);
} |
java | public DERObject toASN1Object()
{
ASN1EncodableVector v = new ASN1EncodableVector();
v.add(algId);
v.add(keyData);
return new DERSequence(v);
} |
python | def set_gid(self):
"""Change the group of the running process"""
if self.group:
gid = getgrnam(self.group).gr_gid
try:
os.setgid(gid)
except Exception:
message = ("Unable to switch ownership to {0}:{1}. " +
"Did you start the daemon as root?")
print(message.format(self.user, self.group))
sys.exit(1) |
python | def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path) |
python | def put(text, cbname):
""" Put the given string into the given clipboard. """
global _lastSel
_checkTkInit()
if cbname == 'CLIPBOARD':
_theRoot.clipboard_clear()
if text:
# for clipboard_append, kwds can be -displayof, -format, or -type
_theRoot.clipboard_append(text)
return
if cbname == 'PRIMARY':
_lastSel = text
_theRoot.selection_handle(ch_handler, selection='PRIMARY')
# we need to claim/own it so that ch_handler is used
_theRoot.selection_own(selection='PRIMARY')
# could add command arg for a func to be called when we lose ownership
return
raise RuntimeError("Unexpected clipboard name: "+str(cbname)) |
java | public void print(int i) throws IOException {
if (writer != null) {
writer.write(String.valueOf(i));
} else {
write(String.valueOf(i));
}
} |
java | private void recycleHeaderIfExists(WrapperView wv) {
View header = wv.mHeader;
if (header != null) {
// reset the headers visibility when adding it to the cache
header.setVisibility(View.VISIBLE);
mHeaderCache.add(header);
}
} |
java | @Override
public <T> int insertBatch(String entityName, List<T> entities) {
return insertBatch(entityName, entities.toArray());
} |
java | public void setReplacementTags(java.util.Collection<MessageTag> replacementTags) {
if (replacementTags == null) {
this.replacementTags = null;
return;
}
this.replacementTags = new com.amazonaws.internal.SdkInternalList<MessageTag>(replacementTags);
} |
python | def ranges_intersect(rset):
"""
Recursively calls the range_intersect() - pairwise version.
>>> ranges_intersect([(48, 65), (45, 55), (50, 56)])
[50, 55]
"""
if not rset:
return None
a = rset[0]
for b in rset[1:]:
if not a:
return None
a = range_intersect(a, b)
return a |
python | def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0):
"""Log in Naver and get cookie
Agrs:
user_id: Naver account's login id
password: Naver account's login password
Returns:
True: Login success
False: Login failed
Remarks:
self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
"""
self.user_id = user_id
self.password = password
if self.user_id == None or self.password == None:
print "[*] Error __init__: user_id and password is needed"
return False
try:
cookie = naver_login(user_id, password)
except:
return False
self.session.cookies.set('NID_AUT', cookie["NID_AUT"])
self.session.cookies.set('NID_SES', cookie["NID_SES"])
s = self.getRegisterUserInfo(svctype, auth)
if s is True:
return True
else:
print "[*] Error getRegisterUserInfo: failed"
return False |
java | public void updateTreeContent(List<CmsGalleryTreeEntry> galleryTreeEntries, List<String> selectedGalleries) {
clearList();
m_selectedGalleries = selectedGalleries;
if (!galleryTreeEntries.isEmpty()) {
m_itemIterator = new TreeItemGenerator(galleryTreeEntries);
loadMoreItems();
} else {
showIsEmptyLabel();
}
onContentChange();
} |
python | def json_2_application(json_obj):
"""
transform JSON obj coming from Ariane to ariane_clip3 object
:param json_obj: the JSON obj coming from Ariane
:return: ariane_clip3 Application object
"""
LOGGER.debug("Application.json_2_application")
return Application(appid=json_obj['applicationID'],
name=json_obj['applicationName'],
description=json_obj['applicationDescription'],
short_name=json_obj['applicationShortName'],
color_code=json_obj['applicationColorCode'],
company_id=json_obj['applicationCompanyID'],
team_id=json_obj['applicationTeamID'],
osi_ids=json_obj['applicationOSInstancesID']) |
python | def upsampling_feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1)):
"""
An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer.
It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs]
"""
_, h_early, w_early, c_early = early_feat.get_shape().as_list()
_, h_late, w_late, c_late = late_feat.get_shape().as_list()
s_x = int(w_early / w_late)
s_y = int(h_early / h_late)
assert h_late * s_y == h_early and w_late * s_x == w_early
with tf.variable_scope(name) as scope:
tiled = tile_2d(late_feat, s_x, s_y, "tile_2d", reorder_required=False)
concated = tf.concat([early_feat, tiled], axis=-1)
return tf.layers.conv2d(concated, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same") |
python | def _copy(src, dst):
"""Copy src to dst, copying recursively if src is a directory."""
try:
shutil.copy(src, dst)
except IsADirectoryError:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copytree(src, dst) |
python | def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
text (bool): if the file should be opened in text mode
Returns:
Tuple[`int`, `fsnative`]:
A tuple containing the file descriptor and the file path
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative`
path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkstemp(suffix, prefix, dir, text) |
python | def any_embedded_linux(self):
"""Check whether the current board is any embedded Linux device."""
return self.any_raspberry_pi or self.any_beaglebone or \
self.any_orange_pi or self.any_giant_board or self.any_jetson_board |
python | def preloop(self):
''' Keep persistent command history. '''
if not self.already_prelooped:
self.already_prelooped = True
open('.psiturk_history', 'a').close() # create file if it doesn't exist
readline.read_history_file('.psiturk_history')
for i in range(readline.get_current_history_length()):
if readline.get_history_item(i) is not None:
self.history.append(readline.get_history_item(i)) |
java | public static HttpResponseStatus parseLine(AsciiString line) {
try {
int space = line.forEachByte(FIND_ASCII_SPACE);
return space == -1 ? valueOf(line.parseInt()) : valueOf(line.parseInt(0, space), line.toString(space + 1));
} catch (Exception e) {
throw new IllegalArgumentException("malformed status line: " + line, e);
}
} |
java | @Override protected void render() {
TvShowViewModel tvShow = getContent();
Picasso.with(context)
.load(tvShow.getPoster())
.placeholder(R.drawable.tv_show_placeholder)
.into(thumbnailImageView);
titleTextView.setText(tvShow.getTitle().toUpperCase());
seasonsCounterTextView.setText(tvShow.getNumberOfSeasons() + " seasons");
} |
python | def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict |
java | private static void reorderLine(Bidi bidi, byte minLevel, byte maxLevel) {
/* nothing to do? */
if (maxLevel<=(minLevel|1)) {
return;
}
BidiRun[] runs;
BidiRun tempRun;
byte[] levels;
int firstRun, endRun, limitRun, runCount;
/*
* Reorder only down to the lowest odd level
* and reorder at an odd minLevel in a separate, simpler loop.
* See comments above for why minLevel is always incremented.
*/
++minLevel;
runs = bidi.runs;
levels = bidi.levels;
runCount = bidi.runCount;
/* do not include the WS run at paraLevel<=old minLevel except in the simple loop */
if (bidi.trailingWSStart < bidi.length) {
--runCount;
}
while (--maxLevel >= minLevel) {
firstRun = 0;
/* loop for all sequences of runs */
for ( ; ; ) {
/* look for a sequence of runs that are all at >=maxLevel */
/* look for the first run of such a sequence */
while (firstRun < runCount && levels[runs[firstRun].start] < maxLevel) {
++firstRun;
}
if (firstRun >= runCount) {
break; /* no more such runs */
}
/* look for the limit run of such a sequence (the run behind it) */
for (limitRun = firstRun; ++limitRun < runCount &&
levels[runs[limitRun].start]>=maxLevel; ) {}
/* Swap the entire sequence of runs from firstRun to limitRun-1. */
endRun = limitRun - 1;
while (firstRun < endRun) {
tempRun = runs[firstRun];
runs[firstRun] = runs[endRun];
runs[endRun] = tempRun;
++firstRun;
--endRun;
}
if (limitRun == runCount) {
break; /* no more such runs */
} else {
firstRun = limitRun + 1;
}
}
}
/* now do maxLevel==old minLevel (==odd!), see above */
if ((minLevel & 1) == 0) {
firstRun = 0;
/* include the trailing WS run in this complete reordering */
if (bidi.trailingWSStart == bidi.length) {
--runCount;
}
/* Swap the entire sequence of all runs. (endRun==runCount) */
while (firstRun < runCount) {
tempRun = runs[firstRun];
runs[firstRun] = runs[runCount];
runs[runCount] = tempRun;
++firstRun;
--runCount;
}
}
} |
java | @Override
public void eUnset(int featureID)
{
switch (featureID)
{
case TypesPackage.JVM_OPERATION__STATIC:
setStatic(STATIC_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__FINAL:
setFinal(FINAL_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__ABSTRACT:
setAbstract(ABSTRACT_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__RETURN_TYPE:
setReturnType((JvmTypeReference)null);
return;
case TypesPackage.JVM_OPERATION__DEFAULT_VALUE:
setDefaultValue((JvmAnnotationValue)null);
return;
case TypesPackage.JVM_OPERATION__SYNCHRONIZED:
setSynchronized(SYNCHRONIZED_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__DEFAULT:
setDefault(DEFAULT_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__NATIVE:
setNative(NATIVE_EDEFAULT);
return;
case TypesPackage.JVM_OPERATION__STRICT_FLOATING_POINT:
setStrictFloatingPoint(STRICT_FLOATING_POINT_EDEFAULT);
return;
}
super.eUnset(featureID);
} |
python | def rm_watch(self, wd, rec=False, quiet=True):
"""
Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool}
"""
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {} # return {wd: bool, ...}
for awd in lwd:
# remove watch
wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd)
if wd_ < 0:
ret_[awd] = False
err = ('rm_watch: cannot remove WD=%d, %s' % \
(awd, self._inotify_wrapper.str_errno()))
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
# Remove watch from our dictionary
if awd in self._wmd:
del self._wmd[awd]
ret_[awd] = True
log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd))
return ret_ |
java | public static void main(final String[] args) throws ServletException, IOException, JspException {
final Map<String, String> config = toMap(args);
System.setProperty("lucee.cli.call", "true");
final boolean useRMI = "true".equalsIgnoreCase(config.get("rmi"));
File root;
final String param = config.get("webroot");
if (Util.isEmpty(param, true)) {
root = new File("."); // working directory that the java command was called from
config.put("webroot", root.getAbsolutePath());
}
else {
root = new File(param);
root.mkdirs();
}
// System.out.println("set webroot to: " + root.getAbsolutePath());
String servletName = config.get("servlet-name");
if (Util.isEmpty(servletName, true)) servletName = "CFMLServlet";
if (useRMI) {
final CLIFactory factory = new CLIFactory(root, servletName, config);
factory.setDaemon(false);
factory.start();
}
else {
final CLIInvokerImpl invoker = new CLIInvokerImpl(root, servletName);
invoker.invoke(config);
}
} |
python | def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
super(InstallCommand, self).execute(i, o)
database = i.get_option('database')
repository = DatabaseMigrationRepository(self._resolver, 'migrations')
repository.set_source(database)
repository.create_repository()
o.writeln('<info>Migration table created successfully</info>') |
java | public static boolean hasNamedAnnotation(AnnotatedConstruct ac, Pattern pattern) {
for (AnnotationMirror annotation : ac.getAnnotationMirrors()) {
if (pattern.matcher(getName(annotation.getAnnotationType().asElement())).matches()) {
return true;
}
}
return false;
} |
python | def perform_smooth(x_values, y_values, span=None, smoother_cls=None):
"""
Convenience function to run the basic smoother.
Parameters
----------
x_values : iterable
List of x value observations
y_ values : iterable
list of y value observations
span : float, optional
Fraction of data to use as the window
smoother_cls : Class
The class of smoother to use to smooth the data
Returns
-------
smoother : object
The smoother object with results stored on it.
"""
if smoother_cls is None:
smoother_cls = DEFAULT_BASIC_SMOOTHER
smoother = smoother_cls()
smoother.specify_data_set(x_values, y_values)
smoother.set_span(span)
smoother.compute()
return smoother |
java | @Override
public void validate(final List<Diagnostic> diags) {
// Validate each row.
List beanList = this.getBeanList();
WComponent row = getRepeatedComponent();
for (int i = 0; i < beanList.size(); i++) {
Object rowData = beanList.get(i);
UIContext rowContext = getRowContext(rowData, i);
UIContextHolder.pushContext(rowContext);
try {
row.validate(diags);
} finally {
UIContextHolder.popContext();
}
}
} |
python | def point_window_unitxy(x, y, affine):
""" Given an x, y and a geotransform
Returns
- rasterio window representing 2x2 window whose center points encompass point
- the cartesian x, y coordinates of the point on the unit square
defined by the array center points.
((row1, row2), (col1, col2)), (unitx, unity)
"""
fcol, frow = ~affine * (x, y)
r, c = int(round(frow)), int(round(fcol))
# The new source window for our 2x2 array
new_win = ((r - 1, r + 1), (c - 1, c + 1))
# the new x, y coords on the unit square
unitxy = (0.5 - (c - fcol),
0.5 + (r - frow))
return new_win, unitxy |
java | @Override
public ListOfferingPromotionsResult listOfferingPromotions(ListOfferingPromotionsRequest request) {
request = beforeClientExecution(request);
return executeListOfferingPromotions(request);
} |
python | def after_request(self, func: Callable) -> Callable:
"""Add an after request function to the Blueprint.
This is designed to be used as a decorator, and has the same arguments
as :meth:`~quart.Quart.after_request`. It applies only to requests that
are routed to an endpoint in this blueprint. An example usage,
.. code-block:: python
blueprint = Blueprint(__name__)
@blueprint.after_request
def after():
...
"""
self.record_once(lambda state: state.app.after_request(func, self.name))
return func |
java | @NonNull
public SourceParams setMetaData(@NonNull Map<String, String> metaData) {
mMetaData = metaData;
return this;
} |
java | final void setBodyType(JmsBodyType value) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "setBodyType", value);
setSubtype(value.toByte());
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(this, tc, "setBodyType");
} |
python | def unsubscribe_list(self, list_id):
"""
Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) |
python | def edit(env, securitygroup_id, rule_id, remote_ip, remote_group,
direction, ethertype, port_max, port_min, protocol):
"""Edit a security group rule in a security group."""
mgr = SoftLayer.NetworkManager(env.client)
data = {}
if remote_ip:
data['remote_ip'] = remote_ip
if remote_group:
data['remote_group'] = remote_group
if direction:
data['direction'] = direction
if ethertype:
data['ethertype'] = ethertype
if port_max is not None:
data['port_max'] = port_max
if port_min is not None:
data['port_min'] = port_min
if protocol:
data['protocol'] = protocol
ret = mgr.edit_securitygroup_rule(securitygroup_id, rule_id, **data)
if not ret:
raise exceptions.CLIAbort("Failed to edit security group rule")
table = formatting.Table(REQUEST_BOOL_COLUMNS)
table.add_row([ret['requestId']])
env.fout(table) |
python | def copy_group(from_file, to_file, key):
"""Recursively copy all groups/datasets/attributes from from_file[key] to
to_file. Datasets are not overwritten, attributes are.
"""
if not key in to_file:
from_file.copy(key, to_file, key)
else:
# also make sure any additional attributes are copied
for attr in from_file[key].attrs:
to_file.attrs[attr] = from_file[key].attrs[attr]
if hasattr(from_file[key], 'keys'):
for subkey in from_file[key].keys():
copy_group(from_file, to_file, '/'.join([key,subkey])) |
java | public Subject replaceCallerSubject(Subject callerSubject) {
SubjectThreadContext subjectThreadContext = getSubjectThreadContext();
Subject replacedCallerSubject = subjectThreadContext.getCallerSubject();
subjectThreadContext.setCallerSubject(callerSubject);
return replacedCallerSubject;
} |
python | def _dismantle_callsign(self, callsign, timestamp=timestamp_now):
""" try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified
"""
entire_callsign = callsign.upper()
if re.search('[/A-Z0-9\-]{3,15}', entire_callsign): # make sure the call has at least 3 characters
if re.search('\-\d{1,3}$', entire_callsign): # cut off any -10 / -02 appendixes
callsign = re.sub('\-\d{1,3}$', '', entire_callsign)
if re.search('/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$', callsign):
callsign = re.sub('/[A-Z0-9]{1,4}$', '', callsign) # cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2
# multiple character appendix (callsign/xxx)
if re.search('[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$', callsign): # case call/xxx, but ignoring /p and /m or /5
appendix = re.search('/[A-Z0-9]{2,4}$', callsign)
appendix = re.sub('/', '', appendix.group(0))
self._logger.debug("appendix: " + appendix)
if appendix == 'MM': # special case Martime Mobile
#self._mm = True
return {
'adif': 999,
'continent': '',
'country': 'MARITIME MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'AM': # special case Aeronautic Mobile
return {
'adif': 998,
'continent': '',
'country': 'AIRCAFT MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'QRP': # special case QRP
callsign = re.sub('/QRP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'QRPP': # special case QRPP
callsign = re.sub('/QRPP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'BCN': # filter all beacons
callsign = re.sub('/BCN', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif appendix == "LH": # Filter all Lighthouses
callsign = re.sub('/LH', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif re.search('[A-Z]{3}', appendix): #case of US county(?) contest N3HBX/UAL
callsign = re.sub('/[A-Z]{3}$', '', callsign)
return self._iterate_prefix(callsign, timestamp)
else:
# check if the appendix is a valid country prefix
return self._iterate_prefix(re.sub('/', '', appendix), timestamp)
# Single character appendix (callsign/x)
elif re.search('/[A-Z0-9]$', callsign): # case call/p or /b /m or /5 etc.
appendix = re.search('/[A-Z0-9]$', callsign)
appendix = re.sub('/', '', appendix.group(0))
if appendix == 'B': # special case Beacon
callsign = re.sub('/B', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif re.search('\d$', appendix):
area_nr = re.search('\d$', appendix).group(0)
callsign = re.sub('/\d$', '', callsign) #remove /number
if len(re.findall(r'\d+', callsign)) == 1: #call has just on digit e.g. DH1TW
callsign = re.sub('[\d]+', area_nr, callsign)
else: # call has several digits e.g. 7N4AAL
pass # no (two) digit prefix contries known where appendix would change entitiy
return self._iterate_prefix(callsign, timestamp)
else:
return self._iterate_prefix(callsign, timestamp)
# regular callsigns, without prefix or appendix
elif re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', callsign):
return self._iterate_prefix(callsign, timestamp)
# callsigns with prefixes (xxx/callsign)
elif re.search('^[A-Z0-9]{1,4}/', entire_callsign):
pfx = re.search('^[A-Z0-9]{1,4}/', entire_callsign)
pfx = re.sub('/', '', pfx.group(0))
#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)
rest = re.search('/[A-Z0-9]+', entire_callsign)
rest = re.sub('/', '', rest.group(0))
if re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', rest):
return self._iterate_prefix(pfx)
if entire_callsign in callsign_exceptions:
return self._iterate_prefix(callsign_exceptions[entire_callsign])
self._logger.debug("Could not decode " + callsign)
raise KeyError("Callsign could not be decoded") |
java | protected void doPaint(Graphics2D g, JComponent c, int width, int height, Object[] extendedCacheKeys) {
int x = focusInsets.left;
int y = focusInsets.top;
width -= focusInsets.left + focusInsets.right;
height -= focusInsets.top + focusInsets.bottom;
switch (state) {
case BACKGROUND_DISABLED:
case BACKGROUND_ENABLED:
case BACKGROUND_SELECTED:
paintBackground(g, c, x, y, width, height);
break;
case BORDER_DISABLED:
case BORDER_ENABLED:
case BORDER_FOCUSED:
paintBorder(g, c, x, y, width, height);
break;
}
} |
java | private Map<String, String> getAttributeTokens(LDAPConnection ldapConnection,
String username) throws GuacamoleException {
// Get attributes from configuration information
List<String> attrList = confService.getAttributes();
// If there are no attributes there is no reason to search LDAP
if (attrList.isEmpty())
return Collections.<String, String>emptyMap();
// Build LDAP query parameters
String[] attrArray = attrList.toArray(new String[attrList.size()]);
String userDN = getUserBindDN(username);
Map<String, String> tokens = new HashMap<String, String>();
try {
// Get LDAP attributes by querying LDAP
LDAPEntry userEntry = ldapConnection.read(userDN, attrArray);
if (userEntry == null)
return Collections.<String, String>emptyMap();
LDAPAttributeSet attrSet = userEntry.getAttributeSet();
if (attrSet == null)
return Collections.<String, String>emptyMap();
// Convert each retrieved attribute into a corresponding token
for (Object attrObj : attrSet) {
LDAPAttribute attr = (LDAPAttribute)attrObj;
tokens.put(TokenName.fromAttribute(attr.getName()), attr.getStringValue());
}
}
catch (LDAPException e) {
throw new GuacamoleServerException("Could not query LDAP user attributes.", e);
}
return tokens;
} |
python | def delete_service(self, stack, service):
"""删除服务
删除指定名称服务,并自动销毁服务已部署的所有容器和存储卷。
Args:
- stack: 服务所属的服务组名称
- service: 服务名
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/stacks/{1}/services/{2}'.format(self.host, stack, service)
return self.__delete(url) |
python | def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret |
java | public TapStream tapCustom(final String id, final RequestMessage message)
throws ConfigurationException, IOException {
final TapConnectionProvider conn = new TapConnectionProvider(addrs);
final TapStream ts = new TapStream();
conn.broadcastOp(new BroadcastOpFactory() {
public Operation newOp(final MemcachedNode n,
final CountDownLatch latch) {
Operation op = conn.getOpFactory().tapCustom(id, message,
new TapOperation.Callback() {
public void receivedStatus(OperationStatus status) {
}
public void gotData(ResponseMessage tapMessage) {
rqueue.add(tapMessage);
messagesRead++;
}
public void gotAck(MemcachedNode node, TapOpcode opcode,
int opaque) {
rqueue.add(new TapAck(conn, node, opcode, opaque, this));
}
public void complete() {
latch.countDown();
}
});
ts.addOp((TapOperation)op);
return op;
}
});
synchronized (omap) {
omap.put(ts, conn);
}
return ts;
} |
python | def kick(self, channel, nick, reason=''):
"""
Kick someone from a channel.
Required arguments:
* channel - Channel to kick them from.
* nick - Nick to kick.
Optional arguments:
* reason - Reason for the kick.
"""
with self.lock:
self.is_in_channel(channel)
self.send('KICK %s %s :%s' % (channel, nick, reason))
if self.readable():
msg = self._recv(expected_replies=('KICK',))
if msg[0] == 'KICK':
channel = msg[1]
if not self.hide_called_events:
self.stepback()
if self.compare(self.current_nick, nick):
del self.channels[channel] |
python | def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.send(data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback) |
python | def _learner_interpret(learn:Learner, ds_type:DatasetType=DatasetType.Valid):
"Create a `ClassificationInterpretation` object from `learner` on `ds_type` with `tta`."
return ClassificationInterpretation.from_learner(learn, ds_type=ds_type) |
java | public BufferedImage getImageResource (String rset, String path)
throws IOException
{
// grab the resource bundles in the specified resource set
ResourceBundle[] bundles = getResourceSet(rset);
if (bundles == null) {
throw new FileNotFoundException(
"Unable to locate image resource [set=" + rset + ", path=" + path + "]");
}
String localePath = getLocalePath(path);
// look for the resource in any of the bundles
for (ResourceBundle bundle : bundles) {
BufferedImage image;
// try a localized version first
if (localePath != null) {
image = bundle.getImageResource(localePath, false);
if (image != null) {
return image;
}
}
// if we didn't find that, try generic
image = bundle.getImageResource(path, false);
if (image != null) {
return image;
}
}
throw new FileNotFoundException(
"Unable to locate image resource [set=" + rset + ", path=" + path + "]");
} |
java | public void fatal( Object messagePattern, Object arg )
{
if( m_delegate.isFatalEnabled() )
{
String msgStr = (String) messagePattern;
msgStr = MessageFormatter.format( msgStr, arg );
m_delegate.fatal( msgStr, null );
}
} |
java | @Override
public void run() {
long now = System.currentTimeMillis();
final long stamp = lock.readLock();
try {
for (MemoryPartition partition : partitions.values()) {
partition.removeOldEntries(now);
}
} finally {
lock.unlockRead(stamp);
}
} |
java | public static Annotation getAnnotation(Properties attributes) {
float llx = 0, lly = 0, urx = 0, ury = 0;
String value;
value = attributes.getProperty(ElementTags.LLX);
if (value != null) {
llx = Float.parseFloat(value + "f");
}
value = attributes.getProperty(ElementTags.LLY);
if (value != null) {
lly = Float.parseFloat(value + "f");
}
value = attributes.getProperty(ElementTags.URX);
if (value != null) {
urx = Float.parseFloat(value + "f");
}
value = attributes.getProperty(ElementTags.URY);
if (value != null) {
ury = Float.parseFloat(value + "f");
}
String title = attributes.getProperty(ElementTags.TITLE);
String text = attributes.getProperty(ElementTags.CONTENT);
if (title != null || text != null) {
return new Annotation(title, text, llx, lly, urx, ury);
}
value = attributes.getProperty(ElementTags.URL);
if (value != null) {
return new Annotation(llx, lly, urx, ury, value);
}
value = attributes.getProperty(ElementTags.NAMED);
if (value != null) {
return new Annotation(llx, lly, urx, ury, Integer.parseInt(value));
}
String file = attributes.getProperty(ElementTags.FILE);
String destination = attributes.getProperty(ElementTags.DESTINATION);
String page = (String) attributes.remove(ElementTags.PAGE);
if (file != null) {
if (destination != null) {
return new Annotation(llx, lly, urx, ury, file, destination);
}
if (page != null) {
return new Annotation(llx, lly, urx, ury, file, Integer
.parseInt(page));
}
}
return new Annotation("", "", llx, lly, urx, ury);
} |
python | def select_host(self, metric):
"""
Returns the carbon host that has data for the given metric.
"""
key = self.keyfunc(metric)
nodes = []
servers = set()
for node in self.hash_ring.get_nodes(key):
server, instance = node
if server in servers:
continue
servers.add(server)
nodes.append(node)
if len(servers) >= self.replication_factor:
break
available = [n for n in nodes if self.is_available(n)]
return random.choice(available or nodes) |
python | def find_program_variables(code):
"""
Return a dict describing program variables::
{'var_name': ('uniform|attribute|varying', type), ...}
"""
vars = {}
lines = code.split('\n')
for line in lines:
m = re.match(r"\s*" + re_prog_var_declaration + r"\s*(=|;)", line)
if m is not None:
vtype, dtype, names = m.groups()[:3]
for name in names.split(','):
vars[name.strip()] = (vtype, dtype)
return vars |
python | def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name.replace(':', '_')
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values) |
python | def decode_array(values):
"""
Decode the values which are bytestrings.
"""
out = []
for val in values:
try:
out.append(val.decode('utf8'))
except AttributeError:
out.append(val)
return out |
java | public static PipeDataElement newInstance(String name, Object data) {
if (!data.getClass().isArray()) throw new IllegalArgumentException("data is expected to be an array!");
if (Array.getLength(data) == 0) throw new IllegalArgumentException("data can not be empty!");
Class<?> componentType = data.getClass().getComponentType();
if (componentType == Object.class) throw new IllegalArgumentException("Can not create PipeBlob from Object[]");
if (componentType == Boolean.class || componentType == Short.class ||
componentType == Integer.class || componentType == Long.class ||
componentType == Float.class || componentType == Double.class) {
return newInstance(name, ArrayUtils.toPrimitiveArray(data, componentType));
}
if (componentType.isArray()) {
PipeBlobBuilder arrayBlob = new PipeBlobBuilder("array");
for (int i = 0, size = Array.getLength(data); i < size; ++i) {
arrayBlob.add(Integer.toString(i), Array.get(data, i));
}
return new PipeDataElement(name, arrayBlob.build());
}
if (componentType == String.class) {
return new PipeDataElement(name, (String[]) data);
} else if (componentType == DevState.class) {
return new PipeDataElement(name, (DevState[]) data);
} else if (componentType == DevEncoded.class) {
return new PipeDataElement(name, (DevEncoded[]) data);
} else if (componentType == boolean.class) {
return new PipeDataElement(name, (boolean[]) data);
} else if (componentType == short.class) {
return new PipeDataElement(name, (short[]) data);
} else if (componentType == int.class) {
return new PipeDataElement(name, (int[]) data);
} else if (componentType == long.class) {
return new PipeDataElement(name, (long[]) data);
} else if (componentType == float.class) {
return new PipeDataElement(name, (float[]) data);
} else if (componentType == double.class) {
return new PipeDataElement(name, (double[]) data);
} else {
throw new IllegalArgumentException("An array of ComponentType is not supported: " + componentType.getSimpleName());
}
} |
python | def search_items(query,
fields=None,
sorts=None,
params=None,
archive_session=None,
config=None,
config_file=None,
http_adapter_kwargs=None,
request_kwargs=None,
max_retries=None):
"""Search for items on Archive.org.
:type query: str
:param query: The Archive.org search query to yield results for. Refer to
https://archive.org/advancedsearch.php#raw for help formatting your
query.
:type fields: list
:param fields: (optional) The metadata fields to return in the search results.
:type params: dict
:param params: (optional) The URL parameters to send with each request sent to the
Archive.org Advancedsearch Api.
:type config: dict
:param secure: (optional) Configuration options for session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
:type max_retries: int, object
:param max_retries: The number of times to retry a failed request.
This can also be an `urllib3.Retry` object.
If you need more control (e.g. `status_forcelist`), use a
`ArchiveSession` object, and mount your own adapter after the
session object has been initialized. For example::
>>> s = get_session()
>>> s.mount_http_adapter()
>>> search_results = s.search_items('nasa')
See :meth:`ArchiveSession.mount_http_adapter`
for more details.
:returns: A :class:`Search` object, yielding search results.
"""
if not archive_session:
archive_session = get_session(config, config_file, http_adapter_kwargs)
return archive_session.search_items(query,
fields=fields,
sorts=sorts,
params=params,
request_kwargs=request_kwargs,
max_retries=max_retries) |
java | public ButtonTemplateBuilder addUrlButton(String title, String url,
WebViewHeightRatioType ratioType) {
Button button = ButtonFactory.createUrlButton(title, url, ratioType);
this.payload.addButton(button);
return this;
} |
java | public SDVariable logSumExp(String name, SDVariable input, int... dimensions) {
validateNumerical("logSumExp reduction", input);
SDVariable ret = f().logSumExp(input, dimensions);
return updateVariableNameAndReference(ret, name);
} |
java | @Nullable
private Status validateInitialMetadata(Metadata headers) {
Integer httpStatus = headers.get(HTTP2_STATUS);
if (httpStatus == null) {
return Status.INTERNAL.withDescription("Missing HTTP status code");
}
String contentType = headers.get(GrpcUtil.CONTENT_TYPE_KEY);
if (!GrpcUtil.isGrpcContentType(contentType)) {
return GrpcUtil.httpStatusToGrpcStatus(httpStatus)
.augmentDescription("invalid content-type: " + contentType);
}
return null;
} |
python | def conversion_transfer(conversion, version=3):
""" convert between mdf4 and mdf3 channel conversions
Parameters
----------
conversion : block
channel conversion
version : int
target mdf version
Returns
-------
conversion : block
channel conversion for specified version
"""
if version <= 3:
if conversion is None:
conversion = v3b.ChannelConversion(conversion_type=v3c.CONVERSION_TYPE_NONE)
else:
conversion_type = conversion["conversion_type"]
if conversion["id"] == b"CC":
pass
else:
unit = conversion.unit.strip(" \r\n\t\0").encode("latin-1")
if conversion_type == v4c.CONVERSION_TYPE_NON:
conversion = v3b.ChannelConversion(
unit=unit, conversion_type=v3c.CONVERSION_TYPE_NONE
)
elif conversion_type == v4c.CONVERSION_TYPE_LIN:
conversion = v3b.ChannelConversion(
unit=unit,
conversion_type=v3c.CONVERSION_TYPE_LINEAR,
a=conversion.a,
b=conversion.b,
)
elif conversion_type == v4c.CONVERSION_TYPE_RAT:
conversion = v3b.ChannelConversion(
unit=unit,
conversion_type=v3c.CONVERSION_TYPE_RAT,
P1=conversion.P1,
P2=conversion.P2,
P3=conversion.P3,
P4=conversion.P4,
P5=conversion.P5,
P6=conversion.P6,
)
elif conversion_type == v4c.CONVERSION_TYPE_TAB:
conversion_ = {}
conversion_["ref_param_nr"] = conversion.val_param_nr // 2
for i in range(conversion.val_param_nr // 2):
conversion_[f"raw_{i}"] = conversion[f"raw_{i}"]
conversion_[f"phys_{i}"] = conversion[f"phys_{i}"]
conversion = v3b.ChannelConversion(
unit=unit,
conversion_type=v3c.CONVERSION_TYPE_TAB,
**conversion_,
)
elif conversion_type == v4c.CONVERSION_TYPE_TABI:
conversion_ = {}
conversion_["ref_param_nr"] = conversion.val_param_nr // 2
for i in range(conversion.val_param_nr // 2):
conversion_[f"raw_{i}"] = conversion[f"raw_{i}"]
conversion_[f"phys_{i}"] = conversion[f"phys_{i}"]
conversion = v3b.ChannelConversion(
unit=unit,
conversion_type=v3c.CONVERSION_TYPE_TABI,
**conversion_,
)
elif conversion_type == v4c.CONVERSION_TYPE_ALG:
formula = conversion.formula.replace("X", "X1")
conversion = v3b.ChannelConversion(
formula=formula,
unit=unit,
conversion_type=v3c.CONVERSION_TYPE_FORMULA,
)
elif conversion_type == v4c.CONVERSION_TYPE_RTAB:
nr = (conversion.val_param_nr - 1) // 3
kargs = {
"ref_param_nr": nr,
"conversion_type": v3c.CONVERSION_TYPE_TABI,
}
for i in range(nr):
l_ = conversion[f"lower_{i}"]
u_ = conversion[f"upper_{i}"]
p_ = conversion[f"phys_{i}"]
kargs[f"raw_{i}"] = l_
kargs[f"raw_{i}"] = u_ - 0.000_001
kargs[f"phys_{i}"] = p_
kargs[f"phys_{i}"] = p_
conversion = v3b.ChannelConversion(unit=unit, **kargs)
elif conversion_type == v4c.CONVERSION_TYPE_TABX:
nr = conversion.val_param_nr
kargs = {
"ref_param_nr": nr,
"unit": unit,
"conversion_type": v3c.CONVERSION_TYPE_TABX,
}
for i in range(nr):
kargs[f"param_val_{i}"] = conversion[f"val_{i}"]
kargs[f"text_{i}"] = conversion.referenced_blocks[
f"text_{i}"
].text
conversion = v3b.ChannelConversion(**kargs)
elif conversion_type == v4c.CONVERSION_TYPE_RTABX:
nr = conversion.val_param_nr // 2
kargs = {
"ref_param_nr": nr + 1,
"unit": unit,
"conversion_type": v3c.CONVERSION_TYPE_RTABX,
}
for i in range(nr):
kargs[f"lower_{i}"] = conversion[f"lower_{i}"]
kargs[f"upper_{i}"] = conversion[f"upper_{i}"]
kargs[f"text_{i}"] = conversion.referenced_blocks[
f"text_{i}"
].text
new_conversion = v3b.ChannelConversion(**kargs)
new_conversion.referenced_blocks["default_addr"] = v3b.TextBlock(
text=conversion.referenced_blocks["default_addr"].text
)
new_conversion
conversion = new_conversion
else:
if conversion is None or conversion["id"] == b"##CC":
pass
else:
conversion_type = conversion["conversion_type"]
unit = conversion["unit_field"].decode("latin-1").strip(" \r\n\t\0")
if conversion_type == v3c.CONVERSION_TYPE_NONE:
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_NON
)
elif conversion_type == v3c.CONVERSION_TYPE_LINEAR:
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_LIN,
a=conversion.a,
b=conversion.b,
)
elif conversion_type == v3c.CONVERSION_TYPE_RAT:
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_RAT,
P1=conversion.P1,
P2=conversion.P2,
P3=conversion.P3,
P4=conversion.P4,
P5=conversion.P5,
P6=conversion.P6,
)
elif conversion_type == v3c.CONVERSION_TYPE_FORMULA:
formula = conversion.formula
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_ALG, formula=formula
)
elif conversion_type == v3c.CONVERSION_TYPE_TAB:
conversion_ = {}
conversion_["val_param_nr"] = conversion.ref_param_nr * 2
for i in range(conversion.val_param_nr):
conversion_[f"raw_{i}"] = conversion[f"raw_{i}"]
conversion_[f"phys_{i}"] = conversion[f"phys_{i}"]
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_TAB, **conversion_
)
elif conversion_type == v3c.CONVERSION_TYPE_TABI:
conversion_ = {}
conversion_["val_param_nr"] = conversion.ref_param_nr * 2
for i in range(conversion.ref_param_nr):
conversion_[f"raw_{i}"] = conversion[f"raw_{i}"]
conversion_[f"phys_{i}"] = conversion[f"phys_{i}"]
conversion = v4b.ChannelConversion(
conversion_type=v4c.CONVERSION_TYPE_TABI, **conversion_
)
elif conversion_type == v3c.CONVERSION_TYPE_TABX:
nr = conversion["ref_param_nr"]
kargs = {
"val_param_nr": nr,
"ref_param_nr": nr + 1,
"conversion_type": v4c.CONVERSION_TYPE_TABX,
}
for i in range(nr):
kargs[f"val_{i}"] = conversion[f"param_val_{i}"]
kargs[f"text_{i}"] = conversion[f"text_{i}"]
conversion = v4b.ChannelConversion(**kargs)
elif conversion_type == v3c.CONVERSION_TYPE_RTABX:
# print('IN', conversion)
nr = conversion["ref_param_nr"] - 1
kargs = {
"val_param_nr": nr * 2,
"ref_param_nr": nr + 1,
"conversion_type": v4c.CONVERSION_TYPE_RTABX,
"default_addr": conversion.referenced_blocks["default_addr"].text,
}
for i in range(nr):
kargs[f"lower_{i}"] = conversion[f"lower_{i}"]
kargs[f"upper_{i}"] = conversion[f"upper_{i}"]
kargs[f"text_{i}"] = conversion.referenced_blocks[f"text_{i}"].text
conversion = v4b.ChannelConversion(**kargs)
# print('OUT', conversion)
# print('\n'*3)
conversion.unit = unit
return conversion |
python | def populate_ast_nsarg_orthologs(ast, species):
"""Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or
"""
ortholog_namespace = "EG"
if isinstance(ast, NSArg):
if re.match(ortholog_namespace, ast.canonical):
orthologs = bel.terms.orthologs.get_orthologs(
ast.canonical, list(species.keys())
)
for species_id in species:
if species_id in orthologs:
orthologs[species_id]["species_label"] = species[species_id]
ast.orthologs = copy.deepcopy(orthologs)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_orthologs(arg, species)
return ast |
java | public boolean insert(String key, int value, boolean overwrite)
{
if ((null == key) || key.length() == 0 || (key.indexOf(UNUSED_CHAR) != -1))
{
return false;
}
if ((value < 0) || ((value & LEAF_BIT) != 0))
{
return false;
}
value = setLeafValue(value);
int[] ids = this.charMap.toIdList(key + UNUSED_CHAR);
int fromState = 1; // 根节点的index为1
int toState = 1;
int index = 0;
while (index < ids.length)
{
int c = ids[index];
toState = getBase(fromState) + c; // to = base[from] + c
expandArray(toState);
if (isEmpty(toState))
{
deleteFreeLink(toState);
setCheck(toState, fromState); // check[to] = from
if (index == ids.length - 1) // Leaf
{
++this.size;
setBase(toState, value); // base[to] = value
}
else
{
int nextChar = ids[(index + 1)];
setBase(toState, getNextFreeBase(nextChar)); // base[to] = free_state - c
}
}
else if (getCheck(toState) != fromState) // 冲突
{
solveConflict(fromState, c);
continue;
}
fromState = toState;
++index;
}
if (overwrite)
{
setBase(toState, value);
}
return true;
} |
java | public static boolean isUntouchedEmpty(String name, Map<String, String> data, Options options) {
return isEmptyInput(name, data, options._inputMode())
&& options.skipUntouched().orElse(false)
&& (options.touchedChecker() == null || ! options.touchedChecker().apply(name, data));
} |
python | def _create_state_data(self, context, resp_args, relay_state):
"""
Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
"""
state = super()._create_state_data(context, resp_args, relay_state)
state["target_entity_id"] = context.target_entity_id_from_path()
return state |
java | public void saveField(BaseField field)
{
String strFieldName = field.getFieldName(); // Fieldname only
String strData = field.getString();
this.setProperty(strFieldName, strData);
} |
python | def flatten_all_paths(self, group_filter=lambda x: True,
path_filter=lambda x: True,
path_conversions=CONVERSIONS):
"""Forward the tree of this document into the more general
flatten_all_paths function and return the result."""
return flatten_all_paths(self.tree.getroot(), group_filter,
path_filter, path_conversions) |
java | public static long getPreviousIntervalStart(long time, int intervalInMinutes, int offsetInMinutes) {
long interval = MINUTE_IN_MS * intervalInMinutes;
long offset = calculateOffsetInMs(intervalInMinutes, offsetInMinutes);
return (interval * ((time + LOCAL_UTC_OFFSET - offset) / (interval))) + offset - LOCAL_UTC_OFFSET;
} |
java | public static CPDefinitionSpecificationOptionValue fetchByCPSpecificationOptionId_Last(
long CPSpecificationOptionId,
OrderByComparator<CPDefinitionSpecificationOptionValue> orderByComparator) {
return getPersistence()
.fetchByCPSpecificationOptionId_Last(CPSpecificationOptionId,
orderByComparator);
} |
java | Map<String, CmsSSLMode> getWebserverList() {
Map<String, CmsSSLMode> ret = new LinkedHashMap<String, CmsSSLMode>();
for (I_CmsEditableGroupRow row : m_workplaceServerGroup.getRows()) {
CmsWorkplaceServerWidget widget = (CmsWorkplaceServerWidget)row.getComponent();
ret.put(widget.getServer(), widget.getSSLMode());
}
return ret;
} |
python | def _move_consonant(self, letters: list, positions: List[int]) -> List[str]:
"""
Given a list of consonant positions, move the consonants according to certain
consonant syllable behavioral rules for gathering and grouping.
:param letters:
:param positions:
:return:
"""
for pos in positions:
previous_letter = letters[pos - 1]
consonant = letters[pos]
next_letter = letters[pos + 1]
if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter):
return string_utils.move_consonant_right(letters, [pos])
if self._contains_vowels(previous_letter) and self._ends_with_vowel(
previous_letter) and len(previous_letter) == 1:
return string_utils.move_consonant_left(letters, [pos])
if previous_letter + consonant in self.constants.ASPIRATES:
return string_utils.move_consonant_left(letters, [pos])
if consonant + next_letter in self.constants.ASPIRATES:
return string_utils.move_consonant_right(letters, [pos])
if next_letter[0] == consonant:
return string_utils.move_consonant_left(letters, [pos])
if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS:
return string_utils.move_consonant_right(letters, [pos])
if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']:
return string_utils.move_consonant_right(letters, [pos])
if self._contains_consonants(next_letter[0]) and self._starts_with_vowel(
previous_letter[-1]):
return string_utils.move_consonant_left(letters, [pos])
# fall through case
if self._contains_consonants(next_letter[0]):
return string_utils.move_consonant_right(letters, [pos])
return letters |
java | private static boolean isPrimitive(Class<? extends Object> c) {
return (null == c) || (Class.class == c) || (String.class == c) || c.isPrimitive() || (Integer.class == c) || (Long.class == c)
|| (Short.class == c) || (Byte.class == c) || (Character.class == c) || (Float.class == c) || (Double.class == c)
|| (Void.class == c) || (Boolean.class == c);
} |
java | private ImmutableMultimap<Integer, TermType> collectProposedCastTypes(
Collection<CQIE> samePredicateRules, ImmutableMap<CQIE, ImmutableList<Optional<TermType>>> termTypeMap,
Map<Predicate, ImmutableList<TermType>> alreadyKnownCastTypes) {
ImmutableMultimap.Builder<Integer, TermType> indexedCastTypeBuilder = ImmutableMultimap.builder();
int arity = samePredicateRules.iterator().next().getHead().getTerms().size();
/*
* For each rule...
*/
samePredicateRules
.forEach(rule -> {
List<Term> headArguments = rule.getHead().getTerms();
ImmutableList<Optional<TermType>> termTypes = termTypeMap.get(rule);
IntStream.range(0, arity)
.forEach(i -> {
TermType type = termTypes.get(i)
/*
* If not defined, extracts the cast type of the variable by looking at its defining
* data atom (normally intensional)
*/
.orElseGet(() -> getCastTypeFromSubRule(
immutabilityTools.convertIntoImmutableTerm(headArguments.get(i)),
extractDataAtoms(rule.getBody()).collect(ImmutableCollectors.toList()),
alreadyKnownCastTypes));
indexedCastTypeBuilder.put(i, type);
});
});
return indexedCastTypeBuilder.build();
} |
java | public synchronized void stop() {
if (!running) {
return;
}
LOGGER.info("stopping raft agent");
raftAlgorithm.stop();
raftNetworkClient.stop();
serverBossPool.shutdown();
clientBossPool.shutdown();
workerPool.shutdown();
sharedWorkerPool.shutdown();
sharedWorkerPool.destroy();
ioExecutorService.shutdownNow();
nonIoExecutorService.shutdownNow();
timer.stop();
snapshotStore.teardown();
jdbcLog.teardown();
jdbcStore.teardown();
running = false;
initialized = false;
} |
java | protected ClassLoader getClassLoader(Set<Artifact> artifacts) throws Exception {
Set<URL> classpathURLs = new LinkedHashSet<URL>();
addCustomClasspaths(classpathURLs, true);
// add ourselves to top of classpath
URL mainClasses = new File(project.getBuild().getOutputDirectory()).toURI().toURL();
getLog().debug("Adding to classpath : " + mainClasses);
classpathURLs.add(mainClasses);
for (Artifact artifact : artifacts) {
File file = artifact.getFile();
if (file != null) {
classpathURLs.add(file.toURI().toURL());
}
}
addCustomClasspaths(classpathURLs, false);
if (logClasspath) {
getLog().info("Classpath (" + classpathURLs.size() + " entries):");
for (URL url : classpathURLs) {
getLog().info(" " + url.getFile().toString());
}
}
return new URLClassLoader(classpathURLs.toArray(new URL[classpathURLs.size()]));
} |
python | def get_scripts():
"""Get custom npm scripts."""
proc = Popen(['npm', 'run-script'], stdout=PIPE)
should_yeild = False
for line in proc.stdout.readlines():
line = line.decode()
if 'available via `npm run-script`:' in line:
should_yeild = True
continue
if should_yeild and re.match(r'^ [^ ]+', line):
yield line.strip().split(' ')[0] |
python | def usearch61_denovo_cluster(seq_path,
percent_id=0.97,
rev=False,
save_intermediate_files=True,
minlen=64,
output_dir='.',
remove_usearch_logs=False,
verbose=False,
wordlength=8,
usearch_fast_cluster=False,
usearch61_sort_method='abundance',
otu_prefix="denovo",
usearch61_maxrejects=32,
usearch61_maxaccepts=1,
sizeorder=False,
threads=1.0,
HALT_EXEC=False,
file_prefix="denovo_"
):
""" Returns dictionary of cluster IDs:seq IDs
Overall function for de novo clustering with usearch61
seq_path: fasta filepath to be clustered with usearch61
percent_id: percentage id to cluster at
rev: enable reverse strand matching for clustering
save_intermediate_files: Saves intermediate files created during clustering
minlen: minimum sequence length
output_dir: directory to output log, OTU mapping, and intermediate files
remove_usearch_logs: Saves usearch log files
verbose: print current processing step to stdout
wordlength: word length to use for clustering
usearch_fast_cluster: Use usearch61 fast cluster option, not as memory
efficient as the default cluster_smallmem option, requires sorting by
length, and does not allow reverse strand matching.
usearch61_sort_method: Sort sequences by abundance or length by using
functionality provided by usearch61, or do not sort by using None option.
otu_prefix: label to place in front of OTU IDs, used to prevent duplicate
IDs from appearing with reference based OTU picking.
usearch61_maxrejects: Number of rejects allowed by usearch61
usearch61_maxaccepts: Number of accepts allowed by usearch61
sizeorder: used for clustering based upon abundance of seeds
threads: Specify number of threads used per core per CPU
HALT_EXEC: application controller option to halt execution.
"""
files_to_remove = []
# Need absolute paths to avoid potential problems with app controller
if output_dir:
output_dir = abspath(output_dir) + '/'
seq_path = abspath(seq_path)
try:
if verbose and usearch61_sort_method is not None and\
not usearch_fast_cluster:
print "Sorting sequences according to %s..." % usearch61_sort_method
# fast sorting option automatically performs length sorting
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
intermediate_fasta, dereplicated_uc, app_result =\
sort_by_abundance_usearch61(seq_path, output_dir, rev,
minlen, remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(
output_dir,
file_prefix + 'abundance_sorted.fna'),
output_uc_filepath=join(output_dir,
file_prefix + 'abundance_sorted.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
files_to_remove.append(dereplicated_uc)
elif usearch61_sort_method == 'length' and not usearch_fast_cluster:
intermediate_fasta, app_result =\
sort_by_length_usearch61(seq_path, output_dir, minlen,
remove_usearch_logs, HALT_EXEC,
output_fna_filepath=join(output_dir,
file_prefix + 'length_sorted.fna'))
if not save_intermediate_files:
files_to_remove.append(intermediate_fasta)
else:
intermediate_fasta = seq_path
if verbose:
print "Clustering sequences de novo..."
if usearch_fast_cluster:
clusters_fp, app_result = usearch61_fast_cluster(
intermediate_fasta,
percent_id, minlen, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, HALT_EXEC,
output_uc_filepath=join(
output_dir,
file_prefix + 'fast_clustered.uc'), threads=threads)
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
else:
clusters_fp, app_result =\
usearch61_smallmem_cluster(intermediate_fasta, percent_id,
minlen, rev, output_dir, remove_usearch_logs, wordlength,
usearch61_maxrejects, usearch61_maxaccepts, sizeorder, HALT_EXEC,
output_uc_filepath=join(output_dir,
file_prefix + 'smallmem_clustered.uc'))
if not save_intermediate_files:
files_to_remove.append(clusters_fp)
except ApplicationError:
raise ApplicationError('Error running usearch61. Possible causes are '
'unsupported version (current supported version is usearch ' +
'v6.1.544) is installed or improperly formatted input file was ' +
'provided')
except ApplicationNotFoundError:
remove_files(files_to_remove)
raise ApplicationNotFoundError('usearch61 not found, is it properly ' +
'installed?')
if usearch61_sort_method == 'abundance' and not usearch_fast_cluster:
de_novo_clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
dereplicated_clusters =\
parse_dereplicated_uc(open(dereplicated_uc, "U"))
clusters = merge_clusters_dereplicated_seqs(de_novo_clusters,
dereplicated_clusters)
else:
clusters, failures =\
parse_usearch61_clusters(open(clusters_fp, "U"), otu_prefix)
if not save_intermediate_files:
remove_files(files_to_remove)
return clusters |
java | void updateRack(UpdateRackHeartbeat updateRack)
{
ClusterHeartbeat cluster = findCluster(updateRack.getClusterId());
if (cluster == null) {
return;
}
RackHeartbeat rack;
if (cluster != _serverSelf.getCluster()) {
rack = cluster.createRack("external");
ClusterTarget target = createClusterTarget(cluster);
}
else {
rack = cluster.findRack(updateRack.getId());
}
if (rack == null) {
return;
}
rack.updateRack(this, updateRack);
updateHeartbeats();
} |
python | def advance(self, myDateTime):
"""
Advances to the next value and returns an appropriate value for the given
time.
:param myDateTime: (datetime) when to fetch the value for
:return: (float|int) value for given time
"""
if self.getTime() == myDateTime:
out = self.next()
# Sometimes, the stream has no value for this field and returns None, in
# this case we'll use the last value as well.
if out is None:
out = self.last()
else:
out = self.last()
# If there's no more data, we must fetch more
if len(self) is 0:
self._fetchNextData()
self._updateMinMax(out)
if isinstance(out, float):
self._dataType = "float"
# Convert to proper data type
if self._dataType is "float":
out = float(out)
else:
out = int(out)
return out |
java | public UriBuilder setPath(final String str) {
final String[] parts;
if (str.startsWith("/")) {
parts = new String[]{str};
} else {
final String base = getPath().toString();
parts = new String[]{base, base.endsWith("/") ? "" : "/", str};
}
return setPath(new GStringImpl(EMPTY, parts));
} |
python | def update_item(TableName=None, Key=None, AttributeUpdates=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, UpdateExpression=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):
"""
Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.
See also: AWS API Documentation
Examples
This example updates an item in the Music table. It adds a new attribute (Year) and modifies the AlbumTitle attribute. All of the attributes in the item, as they appear after the update, are returned in the response.
Expected Output:
:example: response = client.update_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
AttributeUpdates={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Action': 'ADD'|'PUT'|'DELETE'
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ConditionalOperator='AND'|'OR',
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
UpdateExpression='string',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table containing the item to update.
:type Key: dict
:param Key: [REQUIRED]
The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type AttributeUpdates: dict
:param AttributeUpdates: This is a legacy parameter. Use UpdateExpression instead. For more information, see AttributeUpdates in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --For the UpdateItem operation, represents the attributes to be modified, the action to perform on each, and the new value for each.
Note
You cannot use UpdateItem to update any primary key attributes. Instead, you will need to delete the item, and then use PutItem to create a new item with new attributes.
Attribute values cannot be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests with empty values will be rejected with a ValidationException exception.
Value (dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data TYpes in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Action (string) --Specifies how to perform the update. Valid values are PUT (default), DELETE , and ADD . The behavior depends on whether the specified primary key already exists in the table.
If an item with the specified *Key* is found in the table:
PUT - Adds the specified attribute to the item. If the attribute already exists, it is replaced by the new value.
DELETE - If no value is specified, the attribute and its value are removed from the item. The data type of the specified value must match the existing value's data type. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specified [a,c] , then the final attribute value would be [b] . Specifying an empty set is an error.
ADD - If the attribute does not already exist, then the attribute and its values are added to the item. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then the Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an existing item, and intend to increment or decrement an attribute value which does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update does not yet have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway, even though it currently does not exist. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set, and if the Value is also a set, then the Value is added to the existing set. (This is a set operation, not mathematical addition.) For example, if the attribute value was the set [1,2] , and the ADD action specified [3] , then the final attribute value would be [1,2,3] . An error occurs if an Add action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings. The same holds true for number sets and binary sets.
This action is only valid for an existing attribute whose data type is number or is a set. Do not use ADD for any other data types.
If no item with the specified *Key* is found:
PUT - DynamoDB creates a new item with the specified primary key, and then adds the attribute.
DELETE - Nothing happens; there is no attribute to delete.
ADD - DynamoDB creates an item with the supplied primary key and number (or set of numbers) for the attribute value. The only data types allowed are number and number set; no other data types can be specified.
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared either before or after they were updated. For UpdateItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
UPDATED_OLD - Returns only the updated attributes, as they appeared before the UpdateItem operation.
ALL_NEW - Returns all of the attributes of the item, as they appear after the UpdateItem operation.
UPDATED_NEW - Returns only the updated attributes, as they appear after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No Read Capacity Units are consumed.
Values returned are strongly consistent
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type UpdateExpression: string
:param UpdateExpression: An expression that defines one or more attributes to be updated, the action to be performed on them, and new value(s) for them.
The following action values are available for UpdateExpression .
SET - Adds one or more attributes and values to an item. If any of these attribute already exist, they are replaced by the new values. You can also use SET to add or subtract from an attribute that is of type Number. For example: SET myNum = myNum + :val SET supports the following functions:
if_not_exists (path, operand) - if the item does not contain an attribute at the specified path, then if_not_exists evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.
list_append (operand, operand) - evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.
These function names are case-sensitive.
REMOVE - Removes one or more attributes from an item.
ADD - Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of ADD depends on the data type of the attribute:
If the existing attribute is a number, and if Value is also a number, then Value is mathematically added to the existing attribute. If Value is a negative number, then it is subtracted from the existing attribute.
Note
If you use ADD to increment or decrement a number value for an item that doesn't exist before the update, DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing item to increment or decrement an attribute value that doesn't exist before the update, DynamoDB uses 0 as the initial value. For example, suppose that the item you want to update doesn't have an attribute named itemcount , but you decide to ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount attribute, set its initial value to 0 , and finally add 3 to it. The result will be a new itemcount attribute in the item, with a value of 3 .
If the existing data type is a set and if Value is also a set, then Value is added to the existing set. For example, if the attribute value is the set [1,2] , and the ADD action specified [3] , then the final attribute value is [1,2,3] . An error occurs if an ADD action is specified for a set attribute and the attribute type specified does not match the existing set type. Both sets must have the same primitive data type. For example, if the existing data type is a set of strings, the Value must also be a set of strings.
Warning
The ADD action only supports Number and set data types. In addition, ADD can only be used on top-level attributes, not nested attributes.
DELETE - Deletes an element from a set. If a set of values is specified, then those values are subtracted from the old set. For example, if the attribute value was the set [a,b,c] and the DELETE action specifies [a,c] , then the final attribute value is [b] . Specifying an empty set is an error.
Warning
The DELETE action only supports set data types. In addition, DELETE can only be used on top-level attributes, not nested attributes.
You can have many actions in a single expression, such as the following: SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide .
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional update to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) --
"""
pass |
python | def get_placeholder_data_view(self, request, object_id):
"""
Return the placeholder data as dictionary.
This is used in the client for the "copy" functionality.
"""
language = 'en' #request.POST['language']
with translation.override(language): # Use generic solution here, don't assume django-parler is used now.
obj = self.get_object(request, object_id)
if obj is None:
json = {'success': False, 'error': 'Page not found'}
status = 404
elif not self.has_change_permission(request, obj):
json = {'success': False, 'error': 'No access to page'}
status = 403
else:
# Fetch the forms that would be displayed,
# return the data as serialized form data.
status = 200
json = {
'success': True,
'object_id': object_id,
'language_code': language,
'formset_forms': self._get_object_formset_data(request, obj),
}
return JsonResponse(json, status=status) |
python | def calc_pvalue(self, study_count, study_n, pop_count, pop_n):
"""pvalues are calculated in derived classes."""
fnc_call = "calc_pvalue({SCNT}, {STOT}, {PCNT} {PTOT})".format(
SCNT=study_count, STOT=study_n, PCNT=pop_count, PTOT=pop_n)
raise Exception("NOT IMPLEMENTED: {FNC_CALL} using {FNC}.".format(
FNC_CALL=fnc_call, FNC=self.pval_fnc)) |
java | private void updateUnknownStatus(Map<String, Map<String, String>> statusMap,
List<String> unreportedNn) {
if (unreportedNn == null || unreportedNn.isEmpty()) {
// no unreported namenodes
return;
}
for (Map.Entry<String, Map<String,String>> entry : statusMap.entrySet()) {
String dn = entry.getKey();
Map<String, String> nnStatus = entry.getValue();
for (String nn : unreportedNn) {
nnStatus.put(nn, DecommissionStates.UNKNOWN.toString());
}
statusMap.put(dn, nnStatus);
}
} |
java | public void setActualExpiryTime(com.google.api.ads.admanager.axis.v201902.DateTime actualExpiryTime) {
this.actualExpiryTime = actualExpiryTime;
} |
java | private int prepareConnectionForBatch(Connection conn) throws SQLException {
conn.setAutoCommit(false);
int initialIsolation = Connection.TRANSACTION_REPEATABLE_READ;
if (_isDB2)
{
try
{
initialIsolation = conn.getTransactionIsolation();
if (Connection.TRANSACTION_REPEATABLE_READ != initialIsolation && Connection.TRANSACTION_SERIALIZABLE != initialIsolation)
{
if (tc.isDebugEnabled())
Tr.debug(tc, "Transaction isolation level was " + initialIsolation + " , setting to TRANSACTION_REPEATABLE_READ");
conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
}
} catch (Exception e)
{
if (tc.isDebugEnabled())
Tr.debug(tc, "setTransactionIsolation to RR threw Exception. Transaction isolation level was " + initialIsolation + " ", e);
FFDCFilter.processException(e, "com.ibm.ws.recoverylog.spi.SQLMultiScopeRecoveryLog.prepareConnectionForBatch", "3668", this);
if (!isolationFailureReported)
{
isolationFailureReported = true;
Tr.warning(tc, "CWRLS0024_EXC_DURING_RECOVERY", e);
}
// returning RR will prevent closeConnectionAfterBatch resetting isolation level
initialIsolation = Connection.TRANSACTION_REPEATABLE_READ;
}
}
return initialIsolation;
} |
java | private <X> X executeLockingMethod(Supplier<X> method) {
try {
return method.get();
} catch (JanusGraphException e) {
if (e.isCausedBy(TemporaryLockingException.class) || e.isCausedBy(PermanentLockingException.class)) {
throw TemporaryWriteException.temporaryLock(e);
} else {
throw GraknServerException.unknown(e);
}
}
} |
java | public JSONObject accumulate( String key, double value ) {
return _accumulate( key, Double.valueOf( value ), new JsonConfig() );
} |
python | def doppler_width(transition, Temperature):
r"""Return the Doppler width of a transition at a given temperature
(in angular frequency).
The usual Doppler FWHM of the rubidium D2 line (in MHz).
>>> g = State("Rb", 87, 5, 0, 1/Integer(2), 2)
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> t = Transition(e, g)
>>> omega = doppler_width(t, 273.15 + 22)
>>> "{:2.3f}".format(omega/2/np.pi*1e-6)
'522.477'
"""
atom = Atom(transition.e1.element, transition.e1.isotope)
m = atom.mass
omega = transition.omega
return omega*np.log(8*np.sqrt(2))*np.sqrt(k_B*Temperature/m/c**2) |
java | public float getWidthCorrected(float charSpacing, float wordSpacing)
{
if (image != null) {
return image.getScaledWidth() + charSpacing;
}
int numberOfSpaces = 0;
int idx = -1;
while ((idx = value.indexOf(' ', idx + 1)) >= 0)
++numberOfSpaces;
return width() + (value.length() * charSpacing + numberOfSpaces * wordSpacing);
} |
java | static int getAnimationResource(int gravity, boolean isInAnimation) {
if ((gravity & Gravity.TOP) == Gravity.TOP) {
return isInAnimation ? R.anim.slide_in_top : R.anim.slide_out_top;
}
if ((gravity & Gravity.BOTTOM) == Gravity.BOTTOM) {
return isInAnimation ? R.anim.slide_in_bottom : R.anim.slide_out_bottom;
}
if ((gravity & Gravity.CENTER) == Gravity.CENTER) {
return isInAnimation ? R.anim.fade_in_center : R.anim.fade_out_center;
}
return INVALID;
} |
python | def update_alarm(deployment_id, metric_name, data, api_key=None, profile="telemetry"):
'''
update an telemetry alarms. data is a dict of alert configuration data.
Returns (bool success, str message) tuple.
CLI Example:
salt myminion telemetry.update_alarm rs-ds033197 {} profile=telemetry
'''
auth = _auth(api_key, profile)
alert = get_alert_config(deployment_id, metric_name, api_key, profile)
if not alert:
return False, "No entity found matching deployment {0} and alarms {1}".format(deployment_id, metric_name)
request_uri = _get_telemetry_base(profile) + '/alerts/' + alert['_id']
# set the notification channels if not already set
post_body = {
"deployment": deployment_id,
"filter": data.get('filter'),
"notificationChannel": get_notification_channel_id(data.get('escalate_to')).split(),
"condition": {
"metric": metric_name,
"max": data.get('max'),
"min": data.get('min')
}
}
try:
response = requests.put(request_uri, data=salt.utils.json.dumps(post_body), headers=auth)
except requests.exceptions.RequestException as e:
log.error('Update failed: %s', e)
return False, six.text_type(e)
if response.status_code >= 200 and response.status_code < 300:
# Also update cache
log.debug('Updating cache for metric %s in deployment %s: %s',
metric_name, deployment_id, response.json())
_update_cache(deployment_id, metric_name, response.json())
log.info('Updated alarm on metric: %s in deployment: %s', metric_name, deployment_id)
return True, response.json()
err_msg = six.text_type( # future lint: disable=blacklisted-function
'Failed to create alarm on metric: {0} in deployment: {1} '
'payload: {2}').format(
salt.utils.stringutils.to_unicode(metric_name),
salt.utils.stringutils.to_unicode(deployment_id),
salt.utils.json.dumps(post_body)
)
log.error(err_msg)
return False, err_msg |
python | def delete_events(environment, start_response, headers):
"""
Delete events
POST body should contain a JSON encoded version of:
{ namespace: namespace_name (optional),
stream : stream_name,
start_time : starting_time_as_kronos_time,
end_time : ending_time_as_kronos_time,
start_id : only_delete_events_with_id_gte_me,
}
Either start_time or start_id should be specified.
"""
request_json = environment['json']
try:
stream = request_json['stream']
validate_stream(stream)
except Exception, e:
log.exception('delete_events: stream validation failed for `%s`.',
request_json.get('stream'))
start_response('400 Bad Request', headers)
return {ERRORS_FIELD: [repr(e)]}
namespace = request_json.get('namespace', settings.default_namespace)
backends = router.backends_to_mutate(namespace, stream)
statuses = {}
for backend, conf in backends.iteritems():
statuses[backend.name] = execute_greenlet_async(
backend.delete,
namespace,
stream,
long(request_json.get('start_time', 0)),
long(request_json['end_time']),
request_json.get('start_id'),
conf)
wait(statuses.values())
success = True
response = {}
for backend, status in statuses.iteritems():
try:
num_deleted, errors = status.get()
response[backend] = {'num_deleted': num_deleted}
if errors:
success = False
response[ERRORS_FIELD] = errors
except Exception, e:
log.exception('delete_events: delete from backend `%s` failed.', backend)
success = False
response[backend] = {'num_deleted': -1,
ERRORS_FIELD: [repr(e)]}
response = {request_json['stream']: response,
SUCCESS_FIELD: success}
start_response('200 OK', headers)
return response |
python | def changed(self):
"""Returns dict of fields that changed since save (with old values)"""
if not self.instance.pk:
return {}
saved = self.saved_data.items()
current = self.current()
return dict((k, v) for k, v in saved if v != current[k]) |
java | public static void __gmpz_export(Pointer rop, Pointer countp, int order, int size, int endian,
int nails, mpz_t op) {
if (SIZE_T_CLASS == SizeT4.class) {
SizeT4.__gmpz_export(rop, countp, order, size, endian, nails, op);
} else {
SizeT8.__gmpz_export(rop, countp, order, size, endian, nails, op);
}
} |
python | def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
if not self.closed:
self.flush()
self.fileobj.close() # Closes the file descriptor
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise PtyProcessError('Could not terminate the child.')
self.fd = -1
self.closed = True |
python | def as_dict(self) -> Dict[str, str]:
"""
Export color register as dict.
"""
items: Dict[str, str] = {}
for k, v in self.items():
if type(v) is str:
items.update({k: v})
return items |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.