language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def apply_with_summary(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to `input_layer` and create a summary.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
return layers.apply_activation(input_layer.bookkeeper,
input_layer.tensor,
operation,
activation_args=op_args,
activation_kwargs=op_kwargs) |
java | public <T> T read(Class<T> clazz, Name dn) {
return ldapTemplate.findByDn(dn, clazz);
} |
java | void writeSection(RecoverableUnitSectionImpl target, int unwrittenDataSize) throws InternalLogException
{
if (tc.isEntryEnabled())
Tr.entry(tc, "writeSection", new java.lang.Object[] { this, target, new Integer(unwrittenDataSize) });
// If the parent recovery log instance has experienced a serious internal error then prevent
// this operation from executing.
if (_recLog.failed())
{
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", this);
throw new InternalLogException(null);
}
// If the log was not open then throw an exception
if (_logHandle == null)
{
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(null);
}
_controlLock.getSharedLock(LOCK_REQUEST_ID_RUI_WRITESECTION);
try
{
if (tc.isDebugEnabled())
Tr.debug(tc, "Writing recoverable unit '" + target.identity() + "'");
if (tc.isDebugEnabled())
Tr.debug(tc, "Unwritten data size = " + unwrittenDataSize);
final int requiredRecordSize = _recordHeaderSize + _unwrittenDataSize;;
// Obtain a WritableLogRecord that provides direct access to the underlying recovery log.
// The WritableLogRecord will write the required log record header to the underlying
// recovery log.
final WriteableLogRecord logRecord = _logHandle.getWriteableLogRecord(requiredRecordSize);
// In some situations, there will not be enough space in the underlying recovery to obtain a
// WritableLogRecord of the required size. The recovery log will need to perform "housekeeping"
// to clean up the recovery log before this latest record can be written. In such
// situations, the getWritableLogRecord() will trigger a keypoint operation before returning.
// Given that the keypoint operation will actually cause all the information within this
// recoverable unit to be (re)written to disk, this method need take no further action. This
// condition is indicated by the return of a null log record.
if (logRecord != null)
{
// Write the records header to disk. This includes the recoverable unit's identity,
// the failure scope that the unit belongs to, and the record's type.
writeRecordHeader(logRecord, RECORDTYPENORMAL);
// Now direct the recoverable unit section to write its content. If the recoverable unit
// section has no data to write then this will be a no-op.
target.format(false, logRecord);
// Finally write a negative recoverable unit section id to indicate the there are no
// more sections.
logRecord.putInt(END_OF_SECTIONS);
// Tell the WritableLogRecord that we have finished adding recoverable unit sections. This
// will cause it to add the appropriate record tail to the underlying recovery log.
logRecord.close();
// Flag the fact at least part of this recoverable unit has now been written to the
// underlying recovery log.
_storedOnDisk = true;
_logHandle.writeLogRecord(logRecord);
}
} catch (IOException exc)
{
FFDCFilter.processException(exc, "com.ibm.ws.recoverylog.spi.RecoverableUnitImpl.writeSection", "755", this);
if (tc.isEventEnabled())
Tr.event(tc, "An unexpected error IO occurred whilst formatting the recovery log buffer", exc);
_recLog.markFailed(exc); /* @MD19484C */
try
{
_controlLock.releaseSharedLock(LOCK_REQUEST_ID_RUI_WRITESECTION);
} catch (Throwable exc2)
{
FFDCFilter.processException(exc2, "com.ibm.ws.recoverylog.spi.RecoverableUnitImpl.writeSection", "766", this);
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(exc2);
}
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(exc);
} catch (Throwable exc)
{
FFDCFilter.processException(exc, "com.ibm.ws.recoverylog.spi.RecoverableUnitImpl.writeSection", "776", this);
if (tc.isEventEnabled())
Tr.event(tc, "An unexpected error occurred whilst formatting the recovery log buffer", exc);
_recLog.markFailed(exc); /* @MD19484C */
try
{
_controlLock.releaseSharedLock(LOCK_REQUEST_ID_RUI_WRITESECTION);
} catch (Throwable exc2)
{
FFDCFilter.processException(exc2, "com.ibm.ws.recoverylog.spi.RecoverableUnitImpl.writeSection", "787", this);
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(exc2);
}
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(exc);
}
try
{
_controlLock.releaseSharedLock(LOCK_REQUEST_ID_RUI_WRITESECTION);
} catch (NoSharedLockException exc)
{
FFDCFilter.processException(exc, "com.ibm.ws.recoverylog.spi.RecoverableUnitImpl.writeSection", "802", this);
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection", "InternalLogException");
throw new InternalLogException(exc);
}
if (tc.isEntryEnabled())
Tr.exit(tc, "writeSection");
} |
java | private void setSelectPositionX(int posX, int width) {
m_markerStyle.setLeft(posX, Unit.PX);
m_markerStyle.setWidth(width, Unit.PX);
m_overlayLeftStyle.setWidth(posX, Unit.PX);
m_overlayTopStyle.setLeft(posX, Unit.PX);
m_overlayTopStyle.setWidth(width, Unit.PX);
m_overlayBottomStyle.setLeft(posX, Unit.PX);
m_overlayBottomStyle.setWidth(width, Unit.PX);
m_overlayRightStyle.setWidth(m_elementWidth - posX - width, Unit.PX);
m_currentSelection.setLeft(posX);
m_currentSelection.setWidth(width);
} |
java | @Override
@Deprecated
public void loadServerInstances(final String serverGroup, final AsyncCallback<List<ServerInstance>> callback) {
final List<ServerInstance> instancesOfGroup = new LinkedList<ServerInstance>();
loadHostsAndServerInstances(new SimpleCallback<List<HostInfo>>() {
@Override
public void onSuccess(final List<HostInfo> result) {
for (HostInfo host : result) {
List<ServerInstance> instances = host.getServerInstances();
for (ServerInstance instance : instances) {
if (serverGroup == null) {
instancesOfGroup.add(instance);
} else if (instance.getGroup().equals(instance.getGroup())) {
instancesOfGroup.add(instance);
}
}
}
callback.onSuccess(instancesOfGroup);
}
});
} |
python | def popitem(self):
"""Remove and return the `(key, value)` pair least recently used that
has not already expired.
"""
with self.__timer as time:
self.expire(time)
try:
key = next(iter(self.__links))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key)) |
java | public static void removeEntry(final File zip, final String path) {
operateInPlace(zip, new InPlaceAction() {
public boolean act(File tmpFile) {
removeEntry(zip, path, tmpFile);
return true;
}
});
} |
python | def open(cls, typename):
"""Create an OMAPI open message with given typename.
@type typename: bytes
@rtype: OmapiMessage
"""
return cls(opcode=OMAPI_OP_OPEN, message=[(b"type", typename)], tid=-1) |
python | def _get_movie_raw_metadata():
"""
Get raw lines of the genre file.
"""
path = _get_movielens_path()
if not os.path.isfile(path):
_download_movielens(path)
with zipfile.ZipFile(path) as datafile:
return datafile.read('ml-100k/u.item').decode(errors='ignore').split('\n') |
python | def check_if_modified_since(self, dt, etag=None):
"""Validate If-Modified-Since with current request conditions."""
dt = dt.replace(microsecond=0)
if request.if_modified_since and dt <= request.if_modified_since:
raise SameContentException(etag, last_modified=dt) |
java | protected int add_child(int idx, IonValueLite child)
{
_isNullValue(false); // if we add children we're not null anymore
child.setContext(this.getContextForIndex(child, idx));
if (_children == null || _child_count >= _children.length) {
int old_len = (_children == null) ? 0 : _children.length;
int new_len = this.nextSize(old_len, true);
assert(new_len > idx);
IonValueLite[] temp = new IonValueLite[new_len];
if (old_len > 0) {
System.arraycopy(_children, 0, temp, 0, old_len);
}
_children = temp;
}
if (idx < _child_count) {
System.arraycopy(_children, idx, _children, idx+1, _child_count-idx);
}
_child_count++;
_children[idx] = child;
structuralModificationCount++;
child._elementid(idx);
if (!_isSymbolIdPresent() && child._isSymbolIdPresent())
{
cascadeSIDPresentToContextRoot();
}
return idx;
} |
java | public ExtViewQuery image(String url, Callback callback) {
if (!TextUtils.isEmpty(url) && view instanceof ImageView) {
Picasso.with(context).load(url).into((ImageView) view, callback);
}
return self();
} |
python | def urlinfo(self, domain, response_group = URLINFO_RESPONSE_GROUPS):
'''
Provide information about supplied domain as specified by the response group
:param domain: Any valid URL
:param response_group: Any valid urlinfo response group
:return: Traffic and/or content data of the domain in XML format
'''
params = {
'Action': "UrlInfo",
'Url': domain,
'ResponseGroup': response_group
}
url, headers = self.create_v4_signature(params)
return self.return_output(url, headers) |
java | private void readProjectProperties(Document cdp)
{
WorkspaceProperties props = cdp.getWorkspaceProperties();
ProjectProperties mpxjProps = m_projectFile.getProjectProperties();
mpxjProps.setSymbolPosition(props.getCurrencyPosition());
mpxjProps.setCurrencyDigits(props.getCurrencyDigits());
mpxjProps.setCurrencySymbol(props.getCurrencySymbol());
mpxjProps.setDaysPerMonth(props.getDaysPerMonth());
mpxjProps.setMinutesPerDay(props.getHoursPerDay());
mpxjProps.setMinutesPerWeek(props.getHoursPerWeek());
m_workHoursPerDay = mpxjProps.getMinutesPerDay().doubleValue() / 60.0;
} |
java | private void setHeadersComplete() {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "completed headers have been received stream " + myID);
}
headersCompleted = true;
muxLink.setContinuationExpected(false);
} |
java | public String create(List<String> sortedIncludedHeaders, HashFunction hashFunction) {
// Add the method and uri
StringBuilder canonicalRequest = new StringBuilder();
canonicalRequest.append(method).append(NEW_LINE);
String canonicalUri = CANONICALIZE_PATH.apply(uri);
canonicalRequest.append(canonicalUri).append(NEW_LINE);
// Get the query args, replace whitespace and values that should be not encoded, sort and rejoin
String canonicalQuery = CANONICALIZE_QUERY.apply(queryString);
canonicalRequest.append(canonicalQuery).append(NEW_LINE);
// Normalize all the headers
Header[] normalizedHeaders = NORMALIZE_HEADERS.apply(headers);
Map<String, List<String>> combinedHeaders = COMBINE_HEADERS.apply(normalizedHeaders);
// Add the headers that we care about
for (String header : sortedIncludedHeaders) {
String lowercase = header.toLowerCase().trim();
if (combinedHeaders.containsKey(lowercase)) {
List<String> values = combinedHeaders.get(lowercase);
Collections.sort(values);
canonicalRequest.append(lowercase)
.append(":")
.append(Joiner.on(',').join(values))
.append(NEW_LINE);
}
}
canonicalRequest.append(NEW_LINE);
// Mark the headers that we care about
canonicalRequest.append(Joiner.on(";").join(sortedIncludedHeaders)).append(NEW_LINE);
// Hash and hex the request payload
if (!Strings.isNullOrEmpty(requestBody)) {
String hashedPayload = hashFunction.hashString(requestBody, Charsets.UTF_8).toString();
canonicalRequest.append(hashedPayload);
}
return canonicalRequest.toString();
} |
java | private static Iterable<String> getAssociationTables(EntityClass entityClass) {
Iterable<Settable> association = filter(entityClass.getElements(),
and(or(has(ManyToMany.class), has(ElementCollection.class)), has(JoinTable.class)));
return transform(association, new Function<Settable, String>() {
@Override
public String apply(Settable input) {
JoinTable annotation = input.getAnnotation(JoinTable.class);
return annotation.name();
}
});
} |
python | def level(self, name, no=None, color=None, icon=None):
"""Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, all parameters should be passed so it can be properly configured.
To update an existing level, pass its ``name`` with the parameters to be changed.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A namedtuple containing information about the level.
Examples
--------
>>> level = logger.level("ERROR")
Level(no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {icon} {message}")
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\\")
>>> logger.warning("Updated!")
30 /!\\ Updated!
"""
if not isinstance(name, str):
raise ValueError(
"Invalid level name, it should be a string, not: '%s'" % type(name).__name__
)
if no is color is icon is None:
try:
return self._levels[name]
except KeyError:
raise ValueError("Level '%s' does not exist" % name)
if name not in self._levels:
if no is None:
raise ValueError(
"Level '%s' does not exist, you have to create it by specifying a level no"
% name
)
else:
old_no, old_color, old_icon = None, "", " "
else:
old_no, old_color, old_icon = self.level(name)
if no is None:
no = old_no
if color is None:
color = old_color
if icon is None:
icon = old_icon
if not isinstance(no, int):
raise ValueError(
"Invalid level no, it should be an integer, not: '%s'" % type(no).__name__
)
if no < 0:
raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no)
self._levels[name] = Level(no, color, icon)
with self._lock:
for handler in self._handlers.values():
handler.update_format(color)
return self.level(name) |
java | public Buffer readFrom(InputStream in) throws IOException {
readFrom(in, Long.MAX_VALUE, true);
return this;
} |
python | def is_newer_b(a, bfiles):
"""
check that all b files have been modified more recently than a
"""
if isinstance(bfiles, basestring):
bfiles = [bfiles]
if not op.exists(a): return False
if not all(op.exists(b) for b in bfiles): return False
atime = os.stat(a).st_mtime # modification time
for b in bfiles:
# a has been modified since
if atime > os.stat(b).st_mtime:
return False
return True |
python | def make_entropy_col_consensus(bg_freqs):
"""Consensus according to maximal relative entropy term (MET).
For a given column i, choose the residue j with the highest relative
entropy term::
f_ij ln(f_ij/b_j)
where f_ij = column aa frequency, b_j = background aa frequency.
Source: http://bioinformatics.oxfordjournals.org/content/24/18/1987.long
"""
def col_consensus(col):
col_freqs = sequtils.aa_frequencies(col)
entroper = entropy_func(col_freqs, bg_freqs)
return max(col_freqs.keys(), key=entroper)
return col_consensus |
java | private void checkReferenceEquality(Node n, String typeName, String fileName) {
if (n.getToken() == Token.SHEQ
|| n.getToken() == Token.EQ
|| n.getToken() == Token.SHNE
|| n.getToken() == Token.NE) {
JSType firstJsType = n.getFirstChild().getJSType();
JSType lastJsType = n.getLastChild().getJSType();
boolean hasType = isType(firstJsType, fileName) || isType(lastJsType, fileName);
boolean hasNullType = isNullType(firstJsType) || isNullType(lastJsType);
if (hasType && !hasNullType) {
compiler.report(JSError.make(n, J2CL_REFERENCE_EQUALITY, typeName));
}
}
} |
java | public static IntPoint Divide(IntPoint point1, IntPoint point2) {
IntPoint result = new IntPoint(point1);
result.Divide(point2);
return result;
} |
java | protected void init(
String userAdmin,
String userGuest,
String userExport,
String userDeletedResource,
String groupAdministrators,
String groupUsers,
String groupGuests) {
// check if all required user and group names are not null or empty
if (CmsStringUtil.isEmptyOrWhitespaceOnly(userAdmin)
|| CmsStringUtil.isEmptyOrWhitespaceOnly(userGuest)
|| CmsStringUtil.isEmptyOrWhitespaceOnly(userExport)
|| CmsStringUtil.isEmptyOrWhitespaceOnly(groupAdministrators)
|| CmsStringUtil.isEmptyOrWhitespaceOnly(groupUsers)
|| CmsStringUtil.isEmptyOrWhitespaceOnly(groupGuests)) {
throw new CmsRuntimeException(Messages.get().container(Messages.ERR_USER_GROUP_NAMES_EMPTY_0));
}
// set members
m_userAdmin = userAdmin.trim();
m_userGuest = userGuest.trim();
m_userExport = userExport.trim();
if (CmsStringUtil.isEmptyOrWhitespaceOnly(userDeletedResource)) {
m_userDeletedResource = userAdmin;
} else {
m_userDeletedResource = userDeletedResource.trim();
}
m_groupAdministrators = groupAdministrators.trim();
m_groupUsers = groupUsers.trim();
m_groupGuests = groupGuests.trim();
} |
python | def populate(self, ticket=None):
"""
Populate the database with types retrieved from the AFIP.
If no ticket is provided, the most recent available one will be used.
"""
ticket = ticket or AuthTicket.objects.get_any_active('wsfe')
client = clients.get_client('wsfe', ticket.owner.is_sandboxed)
service = getattr(client.service, self.__service_name)
response_xml = service(serializers.serialize_ticket(ticket))
check_response(response_xml)
for result in getattr(response_xml.ResultGet, self.__type_name):
self.get_or_create(
code=result.Id,
description=result.Desc,
valid_from=parsers.parse_date(result.FchDesde),
valid_to=parsers.parse_date(result.FchHasta),
) |
java | public Set<java.util.Map.Entry<String, T>> entrySet()
{
return lookupMap.entrySet();
} |
python | def cmvn(vec, variance_normalization=False):
""" This function is aimed to perform global cepstral mean and
variance normalization (CMVN) on input feature vector "vec".
The code assumes that there is one observation per row.
Args:
vec (array): input feature matrix
(size:(num_observation,num_features))
variance_normalization (bool): If the variance
normilization should be performed or not.
Return:
array: The mean(or mean+variance) normalized feature vector.
"""
eps = 2**-30
rows, cols = vec.shape
# Mean calculation
norm = np.mean(vec, axis=0)
norm_vec = np.tile(norm, (rows, 1))
# Mean subtraction
mean_subtracted = vec - norm_vec
# Variance normalization
if variance_normalization:
stdev = np.std(mean_subtracted, axis=0)
stdev_vec = np.tile(stdev, (rows, 1))
output = mean_subtracted / (stdev_vec + eps)
else:
output = mean_subtracted
return output |
python | def make_sqlite_url(filename: str) -> str:
"""
Makes an SQLAlchemy URL for a SQLite database.
"""
absfile = os.path.abspath(filename)
return "sqlite://{host}/{path}".format(host="", path=absfile) |
python | def __refresh(self):
"""Update local knowledge of values (to be used to create new skeletal instances). MUST be called within
lock."""
raw_values = self.__get_values()
if not raw_values:
raise RefreshException('Point has no values')
# individual templates
templates = []
# lookup tables by type and unit of value
by_type = {}
by_unit = {}
for raw_value in raw_values:
label = raw_value['label']
if not valid_identifier(label) or label.startswith('__'):
raise RefreshException('Value "%s" unsuitable for object wrapper' % label)
value = Value(label, raw_value['type'], raw_value['unit'], raw_value['comment'])
templates.append(value)
try:
by_type[value.type_].add(label)
except KeyError:
by_type[value.type_] = {label}
if value.unit:
try:
by_unit[value.unit].add(label)
except KeyError:
by_unit[value.unit] = {label}
self.__value_templates = templates
self.__filter = _ValueFilter(by_type, by_unit) |
python | def current_revision(self):
"""
:return: The current :class:`revision.data.Revision`.
:rtype: :class:`revision.data.Revision`
"""
if self.current_index is None:
return None
if len(self.revisions) > self.current_index:
return self.revisions[self.current_index]
return None |
python | def get_jsapi_signature(self, noncestr, ticket, timestamp, url):
"""
获取 JSAPI 签名
https://work.weixin.qq.com/api/doc#90001/90144/90539/签名算法/
:param noncestr: nonce string
:param ticket: JS-SDK ticket
:param timestamp: 时间戳
:param url: URL
:return: 签名
"""
data = [
'noncestr={noncestr}'.format(noncestr=noncestr),
'jsapi_ticket={ticket}'.format(ticket=ticket),
'timestamp={timestamp}'.format(timestamp=timestamp),
'url={url}'.format(url=url),
]
signer = WeChatSigner(delimiter=b'&')
signer.add_data(*data)
return signer.signature |
java | public void addMetaBeanProperty(MetaBeanProperty mp) {
MetaProperty staticProperty = establishStaticMetaProperty(mp);
if (staticProperty != null) {
staticPropertyIndex.put(mp.getName(), mp);
} else {
SingleKeyHashMap propertyMap = classPropertyIndex.getNotNull(theCachedClass);
//keep field
CachedField field;
MetaProperty old = (MetaProperty) propertyMap.get(mp.getName());
if (old != null) {
if (old instanceof MetaBeanProperty) {
field = ((MetaBeanProperty) old).getField();
} else if (old instanceof MultipleSetterProperty) {
field = ((MultipleSetterProperty)old).getField();
} else {
field = (CachedField) old;
}
mp.setField(field);
}
// put it in the list
// this will overwrite a possible field property
propertyMap.put(mp.getName(), mp);
}
} |
java | @Override
public double calculateExpectedDisagreement() {
ensureDistanceFunction();
if (coincidenceMatrix == null) {
coincidenceMatrix = CodingAnnotationStudy.countCategoryCoincidence(study);
}
if (study.getCategoryCount() <= 1) {
throw new InsufficientDataException("An annotation study needs at least two different categories; otherwise there is no decision for the raters to agree on.");
}
double n = 0.0;
Map<Object, Double> marginals = new HashMap<Object, Double>();
for (Entry<Object, Map<Object, Double>> cat1 : coincidenceMatrix.entrySet()) {
double n_c = 0.0;
for (Entry<Object, Double> cat2 : cat1.getValue().entrySet()) {
n_c += cat2.getValue();
}
marginals.put(cat1.getKey(), n_c);
n += n_c;
}
double result = 0.0;
for (Entry<Object, Double> cat1 : marginals.entrySet()) {
for (Entry<Object, Double> cat2 : marginals.entrySet()) {
result += cat1.getValue() * cat2.getValue()
* distanceFunction.measureDistance(study, cat1.getKey(), cat2.getKey());
}
}
result /= n * (n - 1.0);
return result;
} |
java | public static boolean nonEmptyIntersection(
Comparator<String> comparator, String[] first, String[] second) {
if (first == null || second == null || first.length == 0 || second.length == 0) {
return false;
}
for (String a : first) {
for (String b : second) {
if (comparator.compare(a, b) == 0) {
return true;
}
}
}
return false;
} |
python | def decompose(df, period=365, lo_frac=0.6, lo_delta=0.01):
"""Create a seasonal-trend (with Loess, aka "STL") decomposition of observed time series data.
This implementation is modeled after the ``statsmodels.tsa.seasonal_decompose`` method
but substitutes a Lowess regression for a convolution in its trend estimation.
This is an additive model, Y[t] = T[t] + S[t] + e[t]
For more details on lo_frac and lo_delta, see:
`statsmodels.nonparametric.smoothers_lowess.lowess()`
Args:
df (pandas.Dataframe): Time series of observed counts. This DataFrame must be continuous (no
gaps or missing data), and include a ``pandas.DatetimeIndex``.
period (int, optional): Most significant periodicity in the observed time series, in units of
1 observation. Ex: to accomodate strong annual periodicity within years of daily
observations, ``period=365``.
lo_frac (float, optional): Fraction of data to use in fitting Lowess regression.
lo_delta (float, optional): Fractional distance within which to use linear-interpolation
instead of weighted regression. Using non-zero ``lo_delta`` significantly decreases
computation time.
Returns:
`statsmodels.tsa.seasonal.DecomposeResult`: An object with DataFrame attributes for the
seasonal, trend, and residual components, as well as the average seasonal cycle.
"""
# use some existing pieces of statsmodels
lowess = sm.nonparametric.lowess
_pandas_wrapper, _ = _maybe_get_pandas_wrapper_freq(df)
# get plain np array
observed = np.asanyarray(df).squeeze()
# calc trend, remove from observation
trend = lowess(observed, [x for x in range(len(observed))],
frac=lo_frac,
delta=lo_delta * len(observed),
return_sorted=False)
detrended = observed - trend
# period must not be larger than size of series to avoid introducing NaNs
period = min(period, len(observed))
# calc one-period seasonality, remove tiled array from detrended
period_averages = np.array([pd_nanmean(detrended[i::period]) for i in range(period)])
# 0-center the period avgs
period_averages -= np.mean(period_averages)
seasonal = np.tile(period_averages, len(observed) // period + 1)[:len(observed)]
resid = detrended - seasonal
# convert the arrays back to appropriate dataframes, stuff them back into
# the statsmodel object
results = list(map(_pandas_wrapper, [seasonal, trend, resid, observed]))
dr = DecomposeResult(seasonal=results[0],
trend=results[1],
resid=results[2],
observed=results[3],
period_averages=period_averages)
return dr |
java | public void valueUnbound( HttpSessionBindingEvent event )
{
if ( _log.isDebugEnabled() )
{
_log.debug( "The page flow stack is being unbound from the session." );
}
while ( ! isEmpty() )
{
PageFlowController jpf = pop( null ).getPageFlow();
// Note that this page flow may have been serialized/deserialized, which will cause its transient info
// to be lost. Rehydrate it.
HttpSession session = event.getSession();
if ( session != null ) jpf.reinitialize( null, null, session.getServletContext() );
if ( ! jpf.isLongLived() ) jpf.destroy( event.getSession() );
}
} |
java | private static double shapiroWilkW(double[] x) {
Arrays.sort(x);
int n = x.length;
if(n<3) {
throw new IllegalArgumentException("The provided collection must have more than 2 elements.");
}
if (n > 5000) {
throw new IllegalArgumentException("The provided collection must have less or equal to 5000 elements.");
}
int nn2 = n/2;
double[] a = new double[nn2+1]; /* 1-based */
/*
ALGORITHM AS R94 APPL. STATIST. (1995) vol.44, no.4, 547-551.
Calculates the Shapiro-Wilk W test and its significance level
*/
double small = 1e-19;
/* polynomial coefficients */
double g[] = { -2.273, 0.459 };
double c1[] = { 0.0, 0.221157, -0.147981, -2.07119, 4.434685, -2.706056 };
double c2[] = { 0.0, 0.042981, -0.293762, -1.752461, 5.682633, -3.582633 };
double c3[] = { 0.544, -0.39978, 0.025054, -6.714e-4 };
double c4[] = { 1.3822, -0.77857, 0.062767, -0.0020322 };
double c5[] = { -1.5861, -0.31082, -0.083751, 0.0038915 };
double c6[] = { -0.4803, -0.082676, 0.0030302 };
/* Local variables */
int i, j, i1;
double ssassx, summ2, ssumm2, gamma, range;
double a1, a2, an, m, s, sa, xi, sx, xx, y, w1;
double fac, asa, an25, ssa, sax, rsn, ssx, xsx;
double pw;
an = (double)n;
if (n == 3) {
a[1] = 0.70710678;/* = sqrt(1/2) */
}
else {
an25 = an + 0.25;
summ2 = 0.0;
for (i = 1; i <= nn2; i++) {
a[i] = ContinuousDistributions.normalQuantile((i - 0.375) / an25, 0, 1); // p(X <= x),
summ2 += a[i] * a[i];
}
summ2 *= 2.0;
ssumm2 = Math.sqrt(summ2);
rsn = 1.0 / Math.sqrt(an);
a1 = poly(c1, 6, rsn) - a[1] / ssumm2;
/* Normalize a[] */
if (n > 5) {
i1 = 3;
a2 = -a[2] / ssumm2 + poly(c2, 6, rsn);
fac = Math.sqrt((summ2 - 2.0 * (a[1] * a[1]) - 2.0 * (a[2] * a[2])) / (1.0 - 2.0 * (a1 * a1) - 2.0 * (a2 * a2)));
a[2] = a2;
}
else {
i1 = 2;
fac = Math.sqrt((summ2 - 2.0 * (a[1] * a[1])) / ( 1.0 - 2.0 * (a1 * a1)));
}
a[1] = a1;
for (i = i1; i <= nn2; i++) {
a[i] /= - fac;
}
}
/* Check for zero range */
range = x[n-1] - x[0];
if (range < small) {
throw new IllegalArgumentException("The range is too small.");
}
/* Check for correct sort order on range - scaled X */
xx = x[0] / range;
sx = xx;
sa = -a[1];
for (i = 1, j = n - 1; i < n; j--) {
xi = x[i] / range;
if (xx - xi > small) {
throw new IllegalArgumentException("The xx - xi is too big.");
}
sx += xi;
i++;
if (i != j) {
sa += sign(i - j) * a[Math.min(i, j)];
}
xx = xi;
}
/* Calculate W statistic as squared correlation
between data and coefficients */
sa /= n;
sx /= n;
ssa = ssx = sax = 0.;
for (i = 0, j = n - 1; i < n; i++, j--) {
if (i != j) {
asa = sign(i - j) * a[1 + Math.min(i, j)] - sa;
}
else {
asa = -sa;
}
xsx = x[i] / range - sx;
ssa += asa * asa;
ssx += xsx * xsx;
sax += asa * xsx;
}
/* W1 equals (1-W) calculated to avoid excessive rounding error
for W very near 1 (a potential problem in very large samples) */
ssassx = Math.sqrt(ssa * ssx);
w1 = (ssassx - sax) * (ssassx + sax) / (ssa * ssx);
double w = 1.0 - w1;
/* Calculate significance level for W */
if (n == 3) {/* exact P value : */
double pi6 = 6.0/Math.PI; /* 1.90985931710274 = 6/pi */
double stqr = Math.PI/3.0; /* 1.04719755119660 = asin(sqrt(3/4)) */
pw = pi6 * (Math.asin(Math.sqrt(w)) - stqr);
if (pw < 0.) {
pw = 0;
}
//return w;
return pw;
}
y = Math.log(w1);
xx = Math.log(an);
if (n <= 11) {
gamma = poly(g, 2, an);
if (y >= gamma) {
pw = 1e-99; /* an "obvious" value, was 'small' which was 1e-19f */
//return w;
return pw;
}
y = -Math.log(gamma - y);
m = poly(c3, 4, an);
s = Math.exp(poly(c4, 4, an));
}
else { /* n >= 12 */
m = poly(c5, 4, xx);
s = Math.exp(poly(c6, 3, xx));
}
// Oops, we don't have pnorm
// pw = pnorm(y, m, s, 0/* upper tail */, 0);
pw=ContinuousDistributions.gaussCdf((y-m)/s);
//return w;
return pw;
} |
java | public static <T> Parser<T> longest(Parser<? extends T>... parsers) {
if (parsers.length == 0) return never();
if (parsers.length == 1) return parsers[0].cast();
return new BestParser<T>(parsers, IntOrder.GT);
} |
java | protected String findPattern(String strPattern, String text, int grp) {
Pattern pattern = Pattern.compile(strPattern, Pattern.MULTILINE);
Matcher matcher = pattern.matcher(text);
if (matcher.find(0))
return matcher.group(grp);
return null;
} |
python | def _read_regpol_file(reg_pol_path):
'''
helper function to read a reg policy file and return decoded data
'''
returndata = None
if os.path.exists(reg_pol_path):
with salt.utils.files.fopen(reg_pol_path, 'rb') as pol_file:
returndata = pol_file.read()
return returndata |
python | def add_margins(df, vars, margins=True):
"""
Add margins to a data frame.
All margining variables will be converted to factors.
Parameters
----------
df : dataframe
input data frame
vars : list
a list of 2 lists | tuples vectors giving the
variables in each dimension
margins : bool | list
variable names to compute margins for.
True will compute all possible margins.
"""
margin_vars = _margins(vars, margins)
if not margin_vars:
return df
# create margin dataframes
margin_dfs = [df]
for vlst in margin_vars[1:]:
dfx = df.copy()
for v in vlst:
dfx.loc[0:, v] = '(all)'
margin_dfs.append(dfx)
merged = pd.concat(margin_dfs, axis=0)
merged.reset_index(drop=True, inplace=True)
# All margin columns become categoricals. The margin indicator
# (all) needs to be added as the last level of the categories.
categories = {}
for v in itertools.chain(*vars):
col = df[v]
if not pdtypes.is_categorical_dtype(df[v].dtype):
col = pd.Categorical(df[v])
categories[v] = col.categories
if '(all)' not in categories[v]:
categories[v] = categories[v].insert(
len(categories[v]), '(all)')
for v in merged.columns.intersection(set(categories)):
merged[v] = merged[v].astype(
pdtypes.CategoricalDtype(categories[v]))
return merged |
java | public void startElement(
StylesheetHandler handler, String uri, String localName, String rawName, Attributes attributes)
throws org.xml.sax.SAXException
{
final String resultNS;
NamespaceAlias na = new NamespaceAlias(handler.nextUid());
setPropertiesFromAttributes(handler, rawName, attributes, na);
String prefix = na.getStylesheetPrefix();
if(prefix.equals("#default"))
{
prefix = "";
na.setStylesheetPrefix(prefix);
}
String stylesheetNS = handler.getNamespaceForPrefix(prefix);
na.setStylesheetNamespace(stylesheetNS);
prefix = na.getResultPrefix();
if(prefix.equals("#default"))
{
prefix = "";
na.setResultPrefix(prefix);
resultNS = handler.getNamespaceForPrefix(prefix);
if(null == resultNS)
handler.error(XSLTErrorResources.ER_INVALID_NAMESPACE_URI_VALUE_FOR_RESULT_PREFIX_FOR_DEFAULT, null, null);
}
else
{
resultNS = handler.getNamespaceForPrefix(prefix);
if(null == resultNS)
handler.error(XSLTErrorResources.ER_INVALID_NAMESPACE_URI_VALUE_FOR_RESULT_PREFIX, new Object[] {prefix}, null);
}
na.setResultNamespace(resultNS);
handler.getStylesheet().setNamespaceAlias(na);
handler.getStylesheet().appendChild(na);
} |
java | public static <INPUT extends Comparable<INPUT>> void executeLargeUpdates(Collection<INPUT> inputs, Consumer<List<INPUT>> consumer,
IntFunction<Integer> partitionSizeManipulations) {
Iterable<List<INPUT>> partitions = toUniqueAndSortedPartitions(inputs, partitionSizeManipulations);
for (List<INPUT> partition : partitions) {
consumer.accept(partition);
}
} |
java | protected void addViolation(MethodNode node, String message) {
addViolation((ASTNode) node, String.format(
"Violation in class %s. %s", node.getDeclaringClass().getNameWithoutPackage(), message
));
} |
java | @Override
public void afterCompletion(int status)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
Tr.entry(tc, "afterCompletion : " + status + " : " + this);
// JPA 5.9.1 Container Responsibilities
// - After the JTA transaction has completed (either by transaction commit or rollback),
// The container closes the entity manager by calling EntityManager.close. [39]
//
// [39] The container may choose to pool EntityManagers and instead of creating and
// closing in each case acquire one from its pool and call clear() on it.
// Note : em may be null now, if it was non-transactional. d472866.1
if (ivEm != null)
{
ivJpaEm.closeTxEntityManager(ivEm, ivPoolEM); // d510184
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
Tr.exit(tc, "afterCompletion");
} |
python | def night_mode(self):
"""bool: The speaker's night mode.
True if on, False if off, None if not supported.
"""
if not self.is_soundbar:
return None
response = self.renderingControl.GetEQ([
('InstanceID', 0),
('EQType', 'NightMode')
])
return bool(int(response['CurrentValue'])) |
java | private static void setValue(Object target, String field, Object value) {
// TODO: Should we do this for all numbers, not just '0'?
if ("0".equals(field)) {
if (!(target instanceof Collection)) {
throw new IllegalArgumentException(
"Cannot evaluate '0' on object " + target);
}
@SuppressWarnings("unchecked")
Collection<Object> collection = (Collection<Object>) target;
collection.add(value);
} else {
Method setter = findMethod(target, "set" + field, value.getClass());
try {
setter.invoke(target, value);
} catch (IllegalAccessException exception) {
throw new IllegalStateException(
"Unable to access setter method",
exception);
} catch(InvocationTargetException exception) {
if (exception.getCause() instanceof RuntimeException) {
throw (RuntimeException) exception.getCause();
}
throw new IllegalStateException(
"Checked exception thrown from setter method",
exception);
}
}
} |
python | def delete_item(TableName=None, Key=None, Expected=None, ConditionalOperator=None, ReturnValues=None, ReturnConsumedCapacity=None, ReturnItemCollectionMetrics=None, ConditionExpression=None, ExpressionAttributeNames=None, ExpressionAttributeValues=None):
"""
Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues parameter.
Unless you specify conditions, the DeleteItem is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.
Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.
See also: AWS API Documentation
Examples
This example deletes an item from the Music table.
Expected Output:
:example: response = client.delete_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
Expected={
'string': {
'Value': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
'Exists': True|False,
'ComparisonOperator': 'EQ'|'NE'|'IN'|'LE'|'LT'|'GE'|'GT'|'BETWEEN'|'NOT_NULL'|'NULL'|'CONTAINS'|'NOT_CONTAINS'|'BEGINS_WITH',
'AttributeValueList': [
{
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
},
]
}
},
ConditionalOperator='AND'|'OR',
ReturnValues='NONE'|'ALL_OLD'|'UPDATED_OLD'|'ALL_NEW'|'UPDATED_NEW',
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ReturnItemCollectionMetrics='SIZE'|'NONE',
ConditionExpression='string',
ExpressionAttributeNames={
'string': 'string'
},
ExpressionAttributeValues={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
}
)
:type TableName: string
:param TableName: [REQUIRED]
The name of the table from which to delete the item.
:type Key: dict
:param Key: [REQUIRED]
A map of attribute names to AttributeValue objects, representing the primary key of the item to delete.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type Expected: dict
:param Expected: This is a legacy parameter. Use ConditionExpresssion instead. For more information, see Expected in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents a condition to be compared with an attribute value. This condition can be used with DeleteItem , PutItem or UpdateItem operations; if the comparison evaluates to true, the operation succeeds; if not, the operation fails. You can use ExpectedAttributeValue in one of two different ways:
Use AttributeValueList to specify one or more values to compare against an attribute. Use ComparisonOperator to specify how you want to perform the comparison. If the comparison evaluates to true, then the conditional operation succeeds.
Use Value to specify a value that DynamoDB will compare against an attribute. If the values match, then ExpectedAttributeValue evaluates to true and the conditional operation succeeds. Optionally, you can also set Exists to false, indicating that you do not expect to find the attribute value in the table. In this case, the conditional operation succeeds only if the comparison evaluates to false.
Value and Exists are incompatible with AttributeValueList and ComparisonOperator . Note that if you use both sets of parameters at once, DynamoDB will return a ValidationException exception.
Value (dict) --Represents the data for the expected attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
Exists (boolean) --Causes DynamoDB to evaluate the value before attempting a conditional operation:
If Exists is true , DynamoDB will check to see if that attribute value already exists in the table. If it is found, then the operation succeeds. If it is not found, the operation fails with a ConditionalCheckFailedException .
If Exists is false , DynamoDB assumes that the attribute value does not exist in the table. If in fact the value does not exist, then the assumption is valid and the operation succeeds. If the value is found, despite the assumption that it does not exist, the operation fails with a ConditionalCheckFailedException .
The default setting for Exists is true . If you supply a Value all by itself, DynamoDB assumes the attribute exists: You don't have to set Exists to true , because it is implied.
DynamoDB returns a ValidationException if:
Exists is true but there is no Value to check. (You expect a value to exist, but don't specify what that value is.)
Exists is false but you also provide a Value . (You cannot expect an attribute to have a value, while also expecting it not to exist.)
ComparisonOperator (string) --A comparator for evaluating attributes in the AttributeValueList . For example, equals, greater than, less than, etc.
The following comparison operators are available:
EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
The following are descriptions of each comparison operator.
EQ : Equal. EQ is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue element of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
NE : Not equal. NE is supported for all data types, including lists and maps. AttributeValueList can contain only one AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not equal {'NS':['6', '2', '1']} .
LE : Less than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
LT : Less than. AttributeValueList can contain only one AttributeValue of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GE : Greater than or equal. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
GT : Greater than. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not equal {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']} .
NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, including lists and maps.
Note
This operator tests for the existence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NOT_NULL , the result is a Boolean true . This result is because the attribute 'a ' exists; its data type is not relevant to the NOT_NULL comparison operator.
NULL : The attribute does not exist. NULL is supported for all data types, including lists and maps.
Note
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute 'a ' is null, and you evaluate it using NULL , the result is a Boolean false . This is because the attribute 'a ' exists; its data type is not relevant to the NULL comparison operator.
CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then the operator checks for a substring match. If the target attribute of the comparison is of type Binary, then the operator looks for a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it finds an exact match with any member of the set. CONTAINS is supported for lists: When evaluating 'a CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in a set. AttributeValueList can contain only one AttributeValue element of type String, Number, or Binary (not a set type). If the target attribute of the comparison is a String, then the operator checks for the absence of a substring match. If the target attribute of the comparison is Binary, then the operator checks for the absence of a subsequence of the target that matches the input. If the target attribute of the comparison is a set ('SS ', 'NS ', or 'BS '), then the operator evaluates to true if it does not find an exact match with any member of the set. NOT_CONTAINS is supported for lists: When evaluating 'a NOT CONTAINS b ', 'a ' can be a list; however, 'b ' cannot be a set, a map, or a list.
BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one AttributeValue of type String or Binary (not a Number or a set type). The target attribute of the comparison must be of type String or Binary (not a Number or a set type).
IN : Checks for matching elements in a list. AttributeValueList can contain one or more AttributeValue elements of type String, Number, or Binary. These attributes are compared against an existing attribute of an item. If any elements of the input are equal to the item attribute, the expression evaluates to true.
BETWEEN : Greater than or equal to the first value, and less than or equal to the second value. AttributeValueList must contain two AttributeValue elements of the same type, either String, Number, or Binary (not a set type). A target attribute matches if the target value is greater than, or equal to, the first element and less than, or equal to, the second element. If an item contains an AttributeValue element of a different type than the one provided in the request, the value does not match. For example, {'S':'6'} does not compare to {'N':'6'} . Also, {'N':'6'} does not compare to {'NS':['6', '2', '1']}
AttributeValueList (list) --One or more values to evaluate against the supplied attribute. The number of values in the list depends on the ComparisonOperator being used.
For type Number, value comparisons are numeric.
String value comparisons for greater than, equals, or less than are based on ASCII character code values. For example, a is greater than A , and a is greater than B . For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters .
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide .
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:type ConditionalOperator: string
:param ConditionalOperator: This is a legacy parameter. Use ConditionExpression instead. For more information, see ConditionalOperator in the Amazon DynamoDB Developer Guide .
:type ReturnValues: string
:param ReturnValues: Use ReturnValues if you want to get the item attributes as they appeared before they were deleted. For DeleteItem , the valid values are:
NONE - If ReturnValues is not specified, or if its value is NONE , then nothing is returned. (This setting is the default for ReturnValues .)
ALL_OLD - The content of the old item is returned.
Note
The ReturnValues parameter is used by several DynamoDB operations; however, DeleteItem does not recognize any values other than NONE or ALL_OLD .
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:
INDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).
TOTAL - The response includes only the aggregate ConsumedCapacity for the operation.
NONE - No ConsumedCapacity details are included in the response.
:type ReturnItemCollectionMetrics: string
:param ReturnItemCollectionMetrics: Determines whether item collection metrics are returned. If set to SIZE , the response includes statistics about item collections, if any, that were modified during the operation are returned in the response. If set to NONE (the default), no statistics are returned.
:type ConditionExpression: string
:param ConditionExpression: A condition that must be satisfied in order for a conditional DeleteItem to succeed.
An expression can contain any of the following:
Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size These function names are case-sensitive.
Comparison operators: = | | | | = | = | BETWEEN | IN
Logical operators: AND | OR | NOT
For more information on condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :
To access an attribute whose name conflicts with a DynamoDB reserved word.
To create a placeholder for repeating occurrences of an attribute name in an expression.
To prevent special characters in an attribute name from being misinterpreted in an expression.
Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
Percentile
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :
{'#P':'Percentile'}
You could then use this substitution in an expression, as in this example:
#P = :val
Note
Tokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.
For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide .
(string) --
(string) --
:type ExpressionAttributeValues: dict
:param ExpressionAttributeValues: One or more values that can be substituted in an expression.
Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
Available | Backordered | Discontinued
You would first need to specify ExpressionAttributeValues as follows:
{ ':avail':{'S':'Available'}, ':back':{'S':'Backordered'}, ':disc':{'S':'Discontinued'} }
You could then use these values in an expression, such as this:
ProductStatus IN (:avail, :back, :disc)
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide .
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --An attribute of type String. For example:
'S': 'Hello'
N (string) --An attribute of type Number. For example:
'N': '123.45'
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --An attribute of type Binary. For example:
'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'
SS (list) --An attribute of type String Set. For example:
'SS': ['Giraffe', 'Hippo' ,'Zebra']
(string) --
NS (list) --An attribute of type Number Set. For example:
'NS': ['42.2', '-19', '7.5', '3.14']
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --An attribute of type Binary Set. For example:
'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']
(bytes) --
M (dict) --An attribute of type Map. For example:
'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}
(string) --
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --An attribute of type List. For example:
'L': ['Cookies', 'Coffee', 3.14159]
(dict) --Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --An attribute of type Null. For example:
'NULL': true
BOOL (boolean) --An attribute of type Boolean. For example:
'BOOL': true
:rtype: dict
:return: {
'Attributes': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'Table': {
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
:returns:
(string) --
"""
pass |
python | def __send_exc_clear(self, log_if_exc_set=None):
"""Clear send exception and time. If exception was previously was set, optionally log log_if_exc_set at INFO
level.
"""
if not (log_if_exc_set is None or self.__send_exc is None):
logger.info(log_if_exc_set)
self.__send_exc_time = None
self.__send_exc = None |
python | def hash(buf, encoding="utf-8"):
"""
Compute the fuzzy hash of a buffer
:param String|Bytes buf: The data to be fuzzy hashed
:return: The fuzzy hash
:rtype: String
:raises InternalError: If lib returns an internal error
:raises TypeError: If buf is not String or Bytes
"""
if isinstance(buf, six.text_type):
buf = buf.encode(encoding)
if not isinstance(buf, six.binary_type):
raise TypeError(
"Argument must be of string, unicode or bytes type not "
"'%r'" % type(buf)
)
# allocate memory for result
result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT)
if binding.lib.fuzzy_hash_buf(buf, len(buf), result) != 0:
raise InternalError("Function returned an unexpected error code")
return ffi.string(result).decode("ascii") |
java | public boolean canTrackerBeUsed(
String taskTracker, String trackerHost, TaskInProgress tip) {
synchronized (lockObject) {
return !tip.hasFailedOnMachine(trackerHost);
}
} |
java | private double computeQuantile(double ration, int totalCount) throws IllegalStateException, IllegalArgumentException {
if (ration <= 0.0D || ration >= 1.0D) {
throw new IllegalArgumentException("Expected ratio between 0 and 1 excluded: " + ration);
}
final double expectedCount = ration * totalCount;
// Search bucket corresponding to expected count
double lastCount = 0D, newCount;
int bucketIndex = 0;
for (int i = 0; i < buckets.length; i++) {
newCount = lastCount + buckets[i].getCount();
if (expectedCount >= lastCount && expectedCount < newCount) {
bucketIndex = i;
break;
}
lastCount = newCount;
}
// Check that bucket index is in bounds
if (bucketIndex == 0) {
throw new IllegalStateException("Quantile out of bounds: decrease min");
} else if (bucketIndex == bucketNb + 1) {
throw new IllegalStateException("Quantile out of bounds: increase max");
}
// Interpolation of value
final Bucket bucket = buckets[bucketIndex];
return estimateQuantile(bucket, expectedCount, lastCount);
} |
java | ContentValues toContentValues() {
ContentValues contentValues = new ContentValues();
mBuilder.fillContentValues(contentValues);
contentValues.put(JobStorage.COLUMN_NUM_FAILURES, mFailureCount);
contentValues.put(JobStorage.COLUMN_SCHEDULED_AT, mScheduledAt);
contentValues.put(JobStorage.COLUMN_STARTED, mStarted);
contentValues.put(JobStorage.COLUMN_FLEX_SUPPORT, mFlexSupport);
contentValues.put(JobStorage.COLUMN_LAST_RUN, mLastRun);
return contentValues;
} |
python | def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated using Django Rest Framework's LimitOffsetPagination paginator,
this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
page_size = getattr(settings, 'REQUEST_PAGE_SIZE', 20)
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=page_size, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += page_size
return loaded_results |
python | def create_review(self, commit=github.GithubObject.NotSet, body=None, event=github.GithubObject.NotSet, comments=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
:param commit: github.Commit.Commit
:param body: string
:param event: string
:param comments: list
:rtype: :class:`github.PullRequestReview.PullRequestReview`
"""
assert commit is github.GithubObject.NotSet or isinstance(commit, github.Commit.Commit), commit
assert isinstance(body, str), body
assert event is github.GithubObject.NotSet or isinstance(event, str), event
assert comments is github.GithubObject.NotSet or isinstance(comments, list), comments
post_parameters = dict()
if commit is not github.GithubObject.NotSet:
post_parameters['commit_id'] = commit.sha
post_parameters['body'] = body
post_parameters['event'] = 'COMMENT' if event == github.GithubObject.NotSet else event
if comments is github.GithubObject.NotSet:
post_parameters['comments'] = []
else:
post_parameters['comments'] = comments
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/reviews",
input=post_parameters
)
self._useAttributes(data)
return github.PullRequestReview.PullRequestReview(self._requester, headers, data, completed=True) |
java | @Override
public <T> long deleteObject(String name, T obj) throws CpoException {
return getCurrentResource().deleteObject( name, obj);
} |
java | @Nonnull
public static <T1, T2> LToSrtBiFunction<T1, T2> toSrtBiFunctionFrom(Consumer<LToSrtBiFunctionBuilder<T1, T2>> buildingFunction) {
LToSrtBiFunctionBuilder builder = new LToSrtBiFunctionBuilder();
buildingFunction.accept(builder);
return builder.build();
} |
python | def std_hash(word, salt):
"""Generates a cryptographically strong (sha512) hash with this nodes
salt added."""
try:
password = word.encode('utf-8')
except UnicodeDecodeError:
password = word
word_hash = sha512(password)
word_hash.update(salt)
hex_hash = word_hash.hexdigest()
return hex_hash |
java | public static ConstraintViolationException instantiate(
final SerializationStreamReader streamReader) throws SerializationException {
final String message = streamReader.readString();
@SuppressWarnings("unchecked")
final Set<ConstraintViolation<?>> set = (Set<ConstraintViolation<?>>) streamReader.readObject();
return new ConstraintViolationException(message, set);
} |
python | def _add_to_graph(self, term, parents):
"""
Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles.
"""
if self._frozen:
raise ValueError(
"Can't mutate %s after construction." % type(self).__name__
)
# If we've seen this node already as a parent of the current traversal,
# it means we have an unsatisifiable dependency. This should only be
# possible if the term's inputs are mutated after construction.
if term in parents:
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term) |
python | def cloud_cover_to_irradiance_clearsky_scaling(self, cloud_cover,
method='linear',
**kwargs):
"""
Estimates irradiance from cloud cover in the following steps:
1. Determine clear sky GHI using Ineichen model and
climatological turbidity.
2. Estimate cloudy sky GHI using a function of
cloud_cover e.g.
:py:meth:`~ForecastModel.cloud_cover_to_ghi_linear`
3. Estimate cloudy sky DNI using the DISC model.
4. Calculate DHI from DNI and DHI.
Parameters
----------
cloud_cover : Series
Cloud cover in %.
method : str, default 'linear'
Method for converting cloud cover to GHI.
'linear' is currently the only option.
**kwargs
Passed to the method that does the conversion
Returns
-------
irrads : DataFrame
Estimated GHI, DNI, and DHI.
"""
solpos = self.location.get_solarposition(cloud_cover.index)
cs = self.location.get_clearsky(cloud_cover.index, model='ineichen',
solar_position=solpos)
method = method.lower()
if method == 'linear':
ghi = self.cloud_cover_to_ghi_linear(cloud_cover, cs['ghi'],
**kwargs)
else:
raise ValueError('invalid method argument')
dni = disc(ghi, solpos['zenith'], cloud_cover.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
irrads = pd.DataFrame({'ghi': ghi, 'dni': dni, 'dhi': dhi}).fillna(0)
return irrads |
python | def get_repo_info(repo_name, profile='github', ignore_cache=False):
'''
Return information for a given repo.
.. versionadded:: 2016.11.0
repo_name
The name of the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.get_repo_info salt
salt myminion github.get_repo_info salt profile='my-github-profile'
'''
org_name = _get_config_value(profile, 'org_name')
key = "github.{0}:{1}:repo_info".format(
_get_config_value(profile, 'org_name'),
repo_name.lower()
)
if key not in __context__ or ignore_cache:
client = _get_client(profile)
try:
repo = client.get_repo('/'.join([org_name, repo_name]))
if not repo:
return {}
# client.get_repo can return a github.Repository.Repository object,
# even if the repo is invalid. We need to catch the exception when
# we try to perform actions on the repo object, rather than above
# the if statement.
ret = _repo_to_dict(repo)
__context__[key] = ret
except github.UnknownObjectException:
raise CommandExecutionError(
'The \'{0}\' repository under the \'{1}\' organization could not '
'be found.'.format(
repo_name,
org_name
)
)
return __context__[key] |
java | private PublishDocumentResponse publishDocument(PublishDocumentRequest request) {
checkNotNull(request, "request should not be null.");
checkNotNull(request.getDocumentId(), "documentId should not be null.");
InternalRequest internalRequest = this.createRequest(HttpMethodName.PUT, request, DOC, request.getDocumentId());
internalRequest.addParameter("publish", null);
// need to set content-length, otherwise auth will failed
// cause HTTP will set Content-Length auto when send http request
internalRequest.addHeader(Headers.CONTENT_LENGTH, "0");
PublishDocumentResponse response;
try {
response = this.invokeHttpClient(internalRequest, PublishDocumentResponse.class);
} finally {
try {
internalRequest.getContent().close();
} catch (Exception e) {
// ignore exception
}
}
return response;
} |
python | def translate_poco_step(self, step):
"""
处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
-------
"""
ret = {}
prev_step = self._steps[-1]
if prev_step:
ret.update(prev_step)
ret['type'] = step[1].get("name", "")
if step.get('trace'):
ret['trace'] = step['trace']
ret['traceback'] = step.get('traceback')
if ret['type'] == 'touch':
# 取出点击位置
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
elif ret['type'] == 'swipe':
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
# swipe 需要显示一个方向
vector = step[1]["kwargs"].get("vector")
if vector:
ret['swipe'] = self.dis_vector(vector)
ret['vector'] = vector
ret['desc'] = self.func_desc_poco(ret)
ret['title'] = self._translate_title(ret)
return ret |
python | def StripTypeInfo(rendered_data):
"""Strips type information from rendered data. Useful for debugging."""
if isinstance(rendered_data, (list, tuple)):
return [StripTypeInfo(d) for d in rendered_data]
elif isinstance(rendered_data, dict):
if "value" in rendered_data and "type" in rendered_data:
return StripTypeInfo(rendered_data["value"])
else:
result = {}
for k, v in iteritems(rendered_data):
result[k] = StripTypeInfo(v)
return result
else:
return rendered_data |
python | def init_menu():
"""Initialize menu before first request."""
# Register breadcrumb root
item = current_menu.submenu('breadcrumbs.settings')
item.register('', _('Account'))
item = current_menu.submenu('breadcrumbs.{0}'.format(
current_app.config['SECURITY_BLUEPRINT_NAME']))
if current_app.config.get('SECURITY_CHANGEABLE', True):
item.register('', _('Change password'))
# Register settings menu
item = current_menu.submenu('settings.change_password')
item.register(
"{0}.change_password".format(
current_app.config['SECURITY_BLUEPRINT_NAME']),
# NOTE: Menu item text (icon replaced by a user icon).
_('%(icon)s Change password',
icon='<i class="fa fa-key fa-fw"></i>'),
order=1)
# Register breadcrumb
item = current_menu.submenu('breadcrumbs.{0}.change_password'.format(
current_app.config['SECURITY_BLUEPRINT_NAME']))
item.register(
"{0}.change_password".format(
current_app.config['SECURITY_BLUEPRINT_NAME']),
_("Change password"),
order=0,
) |
python | def _W(self, mu, weights, y=None):
"""
compute the PIRLS weights for model predictions.
TODO lets verify the formula for this.
if we use the square root of the mu with the stable opt,
we get the same results as when we use non-sqrt mu with naive opt.
this makes me think that they are equivalent.
also, using non-sqrt mu with stable opt gives very small edofs for even lam=0.001
and the parameter variance is huge. this seems strange to me.
computed [V * d(link)/d(mu)] ^(-1/2) by hand and the math checks out as hoped.
ive since moved the square to the naive pirls method to make the code modular.
Parameters
---------
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like of shape (n_samples,)
containing sample weights
y = array-like of shape (n_samples,) or None, default None
useful for computing the asymmetric weight.
Returns
-------
weights : scipy.sparse array of shape (n_samples, n_samples)
"""
# asymmetric weight
asym = (y > mu) * self.expectile + (y <= mu) * (1 - self.expectile)
return sp.sparse.diags((self.link.gradient(mu, self.distribution)**2 *
self.distribution.V(mu=mu) *
weights ** -1)**-0.5 * asym**0.5) |
python | def add_input_variable(self, var):
"""Adds the argument variable as one of the input variable"""
assert(isinstance(var, Variable))
self.input_variable_list.append(var) |
python | def load_obj(self, jref, getter=None, parser=None):
""" load a object(those in spec._version_.objects) from a JSON reference.
"""
obj = self.__resolver.resolve(jref, getter)
# get root document to check its swagger version.
tmp = {'_tmp_': {}}
version = utils.get_swagger_version(obj)
if version == '1.2':
# swagger 1.2
with ResourceListContext(tmp, '_tmp_') as ctx:
ctx.parse(obj, jref, self.__resolver, getter)
elif version == '2.0':
# swagger 2.0
with SwaggerContext(tmp, '_tmp_') as ctx:
ctx.parse(obj)
elif version == None and parser:
with parser(tmp, '_tmp_') as ctx:
ctx.parse(obj)
version = tmp['_tmp_'].__swagger_version__ if hasattr(tmp['_tmp_'], '__swagger_version__') else version
else:
raise NotImplementedError('Unsupported Swagger Version: {0} from {1}'.format(version, jref))
if not tmp['_tmp_']:
raise Exception('Unable to parse object from {0}'.format(jref))
logger.info('version: {0}'.format(version))
return tmp['_tmp_'], version |
python | def insert(self, value, index):
'''Accepts a :value: and :index: parameter and inserts
a new key, value member at the desired index.
Note: Inserting with a negative index will have the following behavior:
>>> l = [1, 2, 3, 4]
>>> l.insert(-1, 5)
>>> l
[1, 2, 3, 5, 4]
'''
if value in self._set:
self._set.discard(value)
self._keys.insert(index, value)
self._set.add(value) |
python | def _format_extname(self, ext):
"""Pretty print given extension name and number tuple."""
if ext is None:
outs = ext
else:
outs = '{0},{1}'.format(ext[0], ext[1])
return outs |
python | def get_scratch_predictions(self, path_to_scratch, results_dir, scratch_basename='scratch', num_cores=1,
exposed_buried_cutoff=25, custom_gene_mapping=None):
"""Run and parse ``SCRATCH`` results to predict secondary structure and solvent accessibility.
Annotations are stored in the protein's representative sequence at:
* ``.annotations``
* ``.letter_annotations``
Args:
path_to_scratch (str): Path to SCRATCH executable
results_dir (str): Path to SCRATCH results folder, which will have the files (scratch.ss, scratch.ss8,
scratch.acc, scratch.acc20)
scratch_basename (str): Basename of the SCRATCH results ('scratch' is default)
num_cores (int): Number of cores to use to parallelize SCRATCH run
exposed_buried_cutoff (int): Cutoff of exposed/buried for the acc20 predictions
custom_gene_mapping (dict): Default parsing of SCRATCH output files is to look for the model gene IDs. If
your output files contain IDs which differ from the model gene IDs, use this dictionary to map model
gene IDs to result file IDs. Dictionary keys must match model genes.
"""
if not self.genome_path:
# Write all sequences as one file
all_seqs = self.write_representative_sequences_file(outname=self.id)
# Runs SCRATCH or loads existing results in results_dir
scratch = SCRATCH(project_name=scratch_basename, seq_file=self.genome_path)
scratch.run_scratch(path_to_scratch=path_to_scratch, num_cores=num_cores, outdir=results_dir)
sspro_summary = scratch.sspro_summary()
sspro8_summary = scratch.sspro8_summary()
sspro_results = scratch.sspro_results()
sspro8_results = scratch.sspro8_results()
accpro_summary = scratch.accpro_summary()
accpro20_summary = scratch.accpro20_summary(exposed_buried_cutoff)
accpro_results = scratch.accpro_results()
accpro20_results = scratch.accpro20_results()
counter = 0
# Adding the scratch annotations to the representative_sequences letter_annotations
for g in tqdm(self.genes_with_a_representative_sequence):
if custom_gene_mapping:
g_id = custom_gene_mapping[g.id]
else:
g_id = g.id
if g_id in sspro_summary:
# Secondary structure
g.protein.representative_sequence.annotations.update(sspro_summary[g_id])
g.protein.representative_sequence.annotations.update(sspro8_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['SS-sspro'] = sspro_results[g_id]
g.protein.representative_sequence.letter_annotations['SS-sspro8'] = sspro8_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
# Solvent accessibility
g.protein.representative_sequence.annotations.update(accpro_summary[g_id])
g.protein.representative_sequence.annotations.update(accpro20_summary[g_id])
try:
g.protein.representative_sequence.letter_annotations['RSA-accpro'] = accpro_results[g_id]
g.protein.representative_sequence.letter_annotations['RSA-accpro20'] = accpro20_results[g_id]
except TypeError:
log.error('Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative '
'sequence, unable to set letter annotation'.format(g_id, g.protein.representative_sequence.id))
counter += 1
else:
log.error('{}: missing SCRATCH results'.format(g.id))
log.info('{}/{}: number of genes with SCRATCH predictions loaded'.format(counter, len(self.genes))) |
java | public static List<BitextRule> getBitextRules(Language source,
Language target, File externalBitextRuleFile) throws IOException, ParserConfigurationException, SAXException {
List<BitextRule> bRules = new ArrayList<>();
//try to load the bitext pattern rules for the language...
BitextPatternRuleLoader ruleLoader = new BitextPatternRuleLoader();
String name = "/" + target.getShortCode() + "/bitext.xml";
if (JLanguageTool.getDataBroker().ruleFileExists(name)) {
InputStream is = JLanguageTool.getDataBroker().getFromRulesDirAsStream(name);
if (is != null) {
bRules.addAll(ruleLoader.getRules(is, name));
}
}
if (externalBitextRuleFile != null) {
bRules.addAll(ruleLoader.getRules(new FileInputStream(externalBitextRuleFile), externalBitextRuleFile.getAbsolutePath()));
}
//load the false friend rules in the bitext mode:
FalseFriendsAsBitextLoader fRuleLoader = new FalseFriendsAsBitextLoader();
String falseFriendsFile = "/false-friends.xml";
List<BitextPatternRule> rules = fRuleLoader.getFalseFriendsAsBitext(falseFriendsFile, source, target);
bRules.addAll(rules);
//load Java bitext rules:
bRules.addAll(getAllBuiltinBitextRules(source, null));
return bRules;
} |
java | public static Date adddDaysToCurrentDate(int numberOfDays) {
Date date = new Date();
Calendar instance = Calendar.getInstance();
instance.setTime(date);
instance.add(Calendar.DATE, numberOfDays);
return instance.getTime();
} |
java | public long distance(String a, String b)
{
Long[] itemA = get(a);
if (itemA == null) return Long.MAX_VALUE / 3;
Long[] itemB = get(b);
if (itemB == null) return Long.MAX_VALUE / 3;
return ArrayDistance.computeAverageDistance(itemA, itemB);
} |
java | @Override
public SortedSet<String> getAttributeSortedStringSet(String name) {
try {
TreeSet<String> attrSet = new TreeSet<String>();
LdapUtils.collectAttributeValues(originalAttrs, name, attrSet, String.class);
return attrSet;
}
catch (NoSuchAttributeException e) {
// The attribute does not exist - contract says to return null.
return null;
}
} |
java | public final Node getPropertyNode(String propertyName) {
Property p = getSlot(propertyName);
return p == null ? null : p.getNode();
} |
python | def _dcm_to_q(self, dcm):
"""
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
"""
assert(dcm.shape == (3, 3))
q = np.zeros(4)
tr = np.trace(dcm)
if tr > 0:
s = np.sqrt(tr + 1.0)
q[0] = s * 0.5
s = 0.5 / s
q[1] = (dcm[2][1] - dcm[1][2]) * s
q[2] = (dcm[0][2] - dcm[2][0]) * s
q[3] = (dcm[1][0] - dcm[0][1]) * s
else:
dcm_i = np.argmax(np.diag(dcm))
dcm_j = (dcm_i + 1) % 3
dcm_k = (dcm_i + 2) % 3
s = np.sqrt((dcm[dcm_i][dcm_i] - dcm[dcm_j][dcm_j] -
dcm[dcm_k][dcm_k]) + 1.0)
q[dcm_i + 1] = s * 0.5
s = 0.5 / s
q[dcm_j + 1] = (dcm[dcm_i][dcm_j] + dcm[dcm_j][dcm_i]) * s
q[dcm_k + 1] = (dcm[dcm_k][dcm_i] + dcm[dcm_i][dcm_k]) * s
q[0] = (dcm[dcm_k][dcm_j] - dcm[dcm_j][dcm_k]) * s
return q |
python | def make_epsilons(matrix, seed, correlation):
"""
Given a matrix N * R returns a matrix of the same shape N * R
obtained by applying the multivariate_normal distribution to
N points and R samples, by starting from the given seed and
correlation.
"""
if seed is not None:
numpy.random.seed(seed)
asset_count = len(matrix)
samples = len(matrix[0])
if not correlation: # avoid building the covariance matrix
return numpy.random.normal(size=(samples, asset_count)).transpose()
means_vector = numpy.zeros(asset_count)
covariance_matrix = (
numpy.ones((asset_count, asset_count)) * correlation +
numpy.diag(numpy.ones(asset_count)) * (1 - correlation))
return numpy.random.multivariate_normal(
means_vector, covariance_matrix, samples).transpose() |
python | def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc) |
java | private void renderItemValue(MenuItem item) {
// only proceed if there's a label to be updated..
Label lblForVal = itemIdToLabel.get(item.getId());
if (lblForVal == null) return;
//
// First we use the visitor again to call the right method in the visitor based on it's type.
// The visitors setResult stores a string value that it to be used for rendering.
//
Optional<String> value = MenuItemHelper.visitWithResult(item, new AbstractMenuItemVisitor<>() {
/**
* Render an analog item into the label, notice it has to apply the offset and divisor.
* @param item the item to display
*/
@Override
public void visit(AnalogMenuItem item) {
MenuState<Integer> state = menuTree.getMenuState(item);
if (state != null) {
double val = (double) (state.getValue() + item.getOffset());
val = val / ((double) item.getDivisor());
setResult(String.format("%.2f%s", val, item.getUnitName()));
}
}
/**
* Render a boolean item, taking into account the different types of boolean naming.
* @param item the item to render
*/
@Override
public void visit(BooleanMenuItem item) {
MenuState<Boolean> state = menuTree.getMenuState(item);
if (state != null) {
switch (item.getNaming()) {
case ON_OFF:
setResult(state.getValue() ? "ON" : "OFF");
break;
case YES_NO:
setResult(state.getValue() ? "YES" : "NO");
break;
case TRUE_FALSE:
default:
setResult(state.getValue() ? "TRUE" : "FALSE");
break;
}
}
}
/**
* Render an enumeration by displaying it's printable name for the chosen index
* @param item the item to render
*/
@Override
public void visit(EnumMenuItem item) {
MenuState<Integer> state = menuTree.getMenuState(item);
if (state != null) {
setResult(item.getEnumEntries().get(state.getValue()));
}
}
/**
* Render a remote item by displaying the connection status contained in the object
* @param item the remote item to render
*/
@Override
public void visit(RemoteMenuItem item) {
MenuState<String> state = menuTree.getMenuState(item);
if(state != null) {
setResult(state.getValue());
}
}
/**
* Render a floating point item to the number of decimal places configured.
* @param item the floating point item to render.
*/
@Override
public void visit(FloatMenuItem item) {
MenuState<Float> state = menuTree.getMenuState(item);
if(state != null) {
NumberFormat fmt = NumberFormat.getInstance();
fmt.setGroupingUsed(false);
fmt.setMinimumFractionDigits(item.getNumDecimalPlaces());
fmt.setMaximumFractionDigits(item.getNumDecimalPlaces());
setResult(fmt.format(state.getValue()));
}
}
/**
* Render a text value simply by using its current value.
* @param item the item to render.
*/
@Override
public void visit(TextMenuItem item) {
MenuState<String> state = menuTree.getMenuState(item);
if (state != null) {
setResult(state.getValue());
}
}
/**
* For anything else, do nothing.
* @param item an item type we are not rendering.
*/
@Override
public void anyItem(MenuItem item) {
setResult("");
}
});
// And lastly set the text we just built into the label.
lblForVal.setText(value.orElse("Not Present"));
itemIdToChangeTicks.put(item.getId(), TICKS_HIGHLIGHT_ON_CHANGE);
} |
java | public boolean isBound(@NonNull BeaconConsumer consumer) {
synchronized(consumers) {
// Annotation doesn't guarantee we get a non-null, but raising an NPE here is excessive
//noinspection ConstantConditions
return consumer != null && consumers.get(consumer) != null &&
(mScheduledScanJobsEnabled || serviceMessenger != null);
}
} |
java | @Override
public int compareTo(Object arg0) {
Interval that = (Interval)arg0;
return this.bounds.compareTo(that.getBounds());
} |
java | public KnowledgeRuntimeManager newRuntimeManager(KnowledgeRuntimeManagerType type) {
RuntimeManager runtimeManager;
final String identifier = _identifierRoot + IDENTIFIER_COUNT.incrementAndGet();
final ClassLoader origTCCL = Classes.setTCCL(_classLoader);
try {
runtimeManager = _runtimeManagerBuilder.build(type, identifier);
} finally {
Classes.setTCCL(origTCCL);
}
return new KnowledgeRuntimeManager(_classLoader, type, _serviceDomainName, _serviceName, runtimeManager, _persistent, _channelBuilders, _loggerBuilders);
} |
java | @Override
public boolean add(T obj) {
if (obj.getDRIndex() != null)
throw new IllegalArgumentException("Cannot insert an object into a Store which is already in a store (drIndex=" + obj.getDRIndex() + ")");
obj.setDRIndex(items.size());
return items.add(obj);
} |
java | public static ImageIcon getHelpIcon() {
if (helpIcon == null) {
helpIcon = DisplayUtils.getScaledIcon(new ImageIcon(ExtensionHelp.class.getResource("/resource/icon/16/201.png")));
}
return helpIcon;
} |
python | def getContactUIDForUser(self):
"""Get the UID of the user associated with the authenticated user
"""
membership_tool = api.get_tool("portal_membership")
member = membership_tool.getAuthenticatedMember()
username = member.getUserName()
r = self.portal_catalog(
portal_type="Contact",
getUsername=username
)
if len(r) == 1:
return r[0].UID |
java | protected final PrcAccEntityWithSubaccCreate<RS, IHasId<Object>, Object>
createPutPrcAccEntityWithSubaccCreate(
final Map<String, Object> pAddParam) throws Exception {
PrcAccEntityWithSubaccCreate<RS, IHasId<Object>, Object> proc =
new PrcAccEntityWithSubaccCreate<RS, IHasId<Object>, Object>();
@SuppressWarnings("unchecked")
PrcEntityCreate<RS, IHasId<Object>, Object> procDlg =
(PrcEntityCreate<RS, IHasId<Object>, Object>)
this.fctBnEntitiesProcessors
.lazyGet(pAddParam, PrcEntityCreate.class.getSimpleName());
proc.setPrcAccEntityCreate(procDlg);
proc.setSrvTypeCode(getSrvTypeCode());
//assigning fully initialized object:
this.processorsMap
.put(PrcAccEntityWithSubaccCreate.class.getSimpleName(), proc);
return proc;
} |
python | def get_unicode_property(self, i):
"""Get Unicode property."""
index = i.index
prop = []
value = []
try:
c = next(i)
if c.upper() in _ASCII_LETTERS:
prop.append(c)
elif c != '{':
raise SyntaxError("Unicode property missing '{' at %d!" % (i.index - 1))
else:
c = next(i)
if c == '^':
prop.append(c)
c = next(i)
while c not in (':', '=', '}'):
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
prop.append(c)
c = next(i)
if c in (':', '='):
c = next(i)
while c != '}':
if c not in _PROPERTY:
raise SyntaxError('Invalid Unicode property character at %d!' % (i.index - 1))
if c not in _PROPERTY_STRIP:
value.append(c)
c = next(i)
if not value:
raise SyntaxError('Invalid Unicode property!')
except StopIteration:
raise SyntaxError("Missing or unmatched '{' at %d!" % index)
return ''.join(prop).lower(), ''.join(value).lower() |
java | public boolean actionSupportsHttpMethod(String actionMethodName, HttpMethod httpMethod) {
if (restful()) {
return restfulActionSupportsHttpMethod(actionMethodName, httpMethod) || standardActionSupportsHttpMethod(actionMethodName, httpMethod);
} else {
return standardActionSupportsHttpMethod(actionMethodName, httpMethod);
}
} |
python | def parse_compound_list(path, compounds):
"""Parse a structured list of compounds as obtained from a YAML file
Yields CompoundEntries. Path can be given as a string or a context.
"""
context = FilePathContext(path)
for compound_def in compounds:
if 'include' in compound_def:
file_format = compound_def.get('format')
include_context = context.resolve(compound_def['include'])
for compound in parse_compound_file(include_context, file_format):
yield compound
else:
yield parse_compound(compound_def, context) |
python | def list_build_configurations_for_product_version(product_id, version_id, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given ProductVersion
"""
data = list_build_configurations_for_project_raw(product_id, version_id, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) |
python | def transform_member(self, node, results):
"""Transform for imports of specific module elements. Replaces
the module to be imported from with the appropriate new
module.
"""
mod_member = results.get("mod_member")
pref = mod_member.prefix
member = results.get("member")
# Simple case with only a single member being imported
if member:
# this may be a list of length one, or just a node
if isinstance(member, list):
member = member[0]
new_name = None
for change in MAPPING[mod_member.value]:
if member.value in change[1]:
new_name = change[0]
break
if new_name:
mod_member.replace(Name(new_name, prefix=pref))
else:
self.cannot_convert(node, "This is an invalid module element")
# Multiple members being imported
else:
# a dictionary for replacements, order matters
modules = []
mod_dict = {}
members = results["members"]
for member in members:
# we only care about the actual members
if member.type == syms.import_as_name:
as_name = member.children[2].value
member_name = member.children[0].value
else:
member_name = member.value
as_name = None
if member_name != u",":
for change in MAPPING[mod_member.value]:
if member_name in change[1]:
if change[0] not in mod_dict:
modules.append(change[0])
mod_dict.setdefault(change[0], []).append(member)
new_nodes = []
indentation = find_indentation(node)
first = True
def handle_name(name, prefix):
if name.type == syms.import_as_name:
kids = [Name(name.children[0].value, prefix=prefix),
name.children[1].clone(),
name.children[2].clone()]
return [Node(syms.import_as_name, kids)]
return [Name(name.value, prefix=prefix)]
for module in modules:
elts = mod_dict[module]
names = []
for elt in elts[:-1]:
names.extend(handle_name(elt, pref))
names.append(Comma())
names.extend(handle_name(elts[-1], pref))
new = FromImport(module, names)
if not first or node.parent.prefix.endswith(indentation):
new.prefix = indentation
new_nodes.append(new)
first = False
if new_nodes:
nodes = []
for new_node in new_nodes[:-1]:
nodes.extend([new_node, Newline()])
nodes.append(new_nodes[-1])
node.replace(nodes)
else:
self.cannot_convert(node, "All module elements are invalid") |
python | def get_subs(subs_file='subreddits.txt', blacklist_file='blacklist.txt') -> List[str]:
"""
Get subs based on a file of subreddits and a file of blacklisted subreddits.
:param subs_file: List of subreddits. Each sub in a new line.
:param blacklist_file: List of blacklisted subreddits. Each sub in a new line.
:return: List of subreddits filtered with the blacklisted subs.
**Example files**::
sub0
sub1
sub2
...
"""
# Get subs and blacklisted subs
subsf = open(subs_file)
blacklf = open(blacklist_file)
subs = [b.lower().replace('\n','') for b in subsf.readlines()]
blacklisted = [b.lower().replace('\n','') for b in blacklf.readlines()]
subsf.close()
blacklf.close()
# Filter blacklisted
subs_filtered = list(sorted(set(subs).difference(set(blacklisted))))
return subs_filtered |
python | def format_strings(self, **kwargs):
"""String substitution of name."""
return mutablerecords.CopyRecord(
self, name=util.format_string(self.name, kwargs)) |
python | def run(self, schedule_type, lookup_id, **kwargs):
"""
Loads Schedule linked to provided lookup
"""
log = self.get_logger(**kwargs)
log.info("Queuing <%s> <%s>" % (schedule_type, lookup_id))
task_run = QueueTaskRun()
task_run.task_id = self.request.id or uuid4()
task_run.started_at = now()
tr_qs = QueueTaskRun.objects
# Load the schedule active items
schedules = Schedule.objects.filter(enabled=True)
if schedule_type == "crontab":
schedules = schedules.filter(celery_cron_definition=lookup_id)
tr_qs = tr_qs.filter(celery_cron_definition=lookup_id)
scheduler_type = CrontabSchedule
task_run.celery_cron_definition_id = lookup_id
elif schedule_type == "interval":
schedules = schedules.filter(celery_interval_definition=lookup_id)
tr_qs = tr_qs.filter(celery_interval_definition=lookup_id)
scheduler_type = IntervalSchedule
task_run.celery_interval_definition_id = lookup_id
# Confirm that this task should run now based on last run time.
try:
last_task_run = tr_qs.latest("started_at")
except QueueTaskRun.DoesNotExist:
# No previous run so it is safe to continue.
pass
else:
# This basicly replicates what celery beat is meant to do, but
# we can't trust celery beat and django-celery to always accurately
# update their own last run time.
sched = scheduler_type.objects.get(id=lookup_id)
due, due_next = sched.schedule.is_due(last_task_run.started_at)
if not due and due_next >= settings.DEFAULT_CLOCK_SKEW_SECONDS:
return (
"Aborted Queuing <%s> <%s> due to last task run (%s) "
"at %s"
% (
schedule_type,
lookup_id,
last_task_run.id,
last_task_run.started_at,
)
)
task_run.save()
# create tasks for each active schedule
queued = 0
schedules = schedules.values("id", "auth_token", "endpoint", "payload")
for schedule in schedules.iterator():
schedule["schedule_id"] = str(schedule.pop("id"))
DeliverTask.apply_async(kwargs=schedule)
queued += 1
task_run.completed_at = now()
task_run.save()
return "Queued <%s> Tasks" % (queued,) |
java | static byte [] asHostData(NavigableSet<ChannelSpec> specs)
throws JSONException, IllegalArgumentException {
JSONStringer js = new JSONStringer();
js.array();
for (ChannelSpec spec: specs) {
js.value(spec.asJSONValue());
}
js.endArray();
return js.toString().getBytes(StandardCharsets.UTF_8);
} |
java | public static Class<?> resolveArgument(Type genericType, Class<?> targetType) {
Class<?>[] arguments = resolveArguments(genericType, targetType);
if (arguments == null)
return Unknown.class;
if (arguments.length != 1)
throw new IllegalArgumentException("Expected 1 type argument on generic type "
+ targetType.getName() + " but found " + arguments.length);
return arguments[0];
} |
python | def get_form(self, id=None, *args, **kwargs):
"""Find form by ID, as well as standard BeautifulSoup arguments.
:param str id: Form ID
:return: BeautifulSoup tag if found, else None
"""
if id:
kwargs['id'] = id
form = self.find(_form_ptn, *args, **kwargs)
if form is not None:
return Form(form) |
python | def export(vault_client, opt):
"""Export contents of a Secretfile from the Vault server
into a specified directory."""
ctx = Context.load(get_secretfile(opt), opt) \
.fetch(vault_client)
for resource in ctx.resources():
resource.export(opt.directory) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.