name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
open-banking-gateway_PsuAuthService_tryAuthenticateUser | /**
* Try to authenticate PSU give login and password
* @param login PSU login
* @param password PSU password
* @return PSU entity if user was successfully authenticated
* @throws PsuWrongCredentials Exception indicating user has provided wrong name or password.
*/
@Transactional
public Psu tryAuthenticateUser(String login, String password) throws PsuWrongCredentials {
Optional<Psu> psu = psuRepository.findByLogin(login);
if (!psu.isPresent()) {
throw new PsuDoesNotExist("User not found: " + login);
}
UserIDAuth idAuth = new UserIDAuth(psu.get().getId().toString(), password::toCharArray);
enableDatasafeAuthentication(idAuth);
return psu.get();
} | 3.68 |
hbase_AsyncRpcRetryingCallerFactory_start | /**
* Short cut for {@code build().start(HBaseRpcController, ScanResponse)}.
*/
public CompletableFuture<Boolean> start(HBaseRpcController controller,
ScanResponse respWhenOpen) {
return build().start(controller, respWhenOpen);
} | 3.68 |
dubbo_CompatibleTypeUtils_compatibleTypeConvert | /**
* Compatible type convert. Null value is allowed to pass in. If no conversion is needed, then the original value
* will be returned.
* <p>
* Supported compatible type conversions include (primary types and corresponding wrappers are not listed):
* <ul>
* <li> String -> char, enum, Date
* <li> byte, short, int, long -> byte, short, int, long
* <li> float, double -> float, double
* </ul>
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static Object compatibleTypeConvert(Object value, Class<?> type) {
if (value == null || type == null || type.isAssignableFrom(value.getClass())) {
return value;
}
if (value instanceof String) {
String string = (String) value;
if (char.class.equals(type) || Character.class.equals(type)) {
if (string.length() != 1) {
throw new IllegalArgumentException(String.format(
"CAN NOT convert String(%s) to char!"
+ " when convert String to char, the String MUST only 1 char.",
string));
}
return string.charAt(0);
}
if (type.isEnum()) {
return Enum.valueOf((Class<Enum>) type, string);
}
if (type == BigInteger.class) {
return new BigInteger(string);
}
if (type == BigDecimal.class) {
return new BigDecimal(string);
}
if (type == Short.class || type == short.class) {
return new Short(string);
}
if (type == Integer.class || type == int.class) {
return new Integer(string);
}
if (type == Long.class || type == long.class) {
return new Long(string);
}
if (type == Double.class || type == double.class) {
return new Double(string);
}
if (type == Float.class || type == float.class) {
return new Float(string);
}
if (type == Byte.class || type == byte.class) {
return new Byte(string);
}
if (type == Boolean.class || type == boolean.class) {
return Boolean.valueOf(string);
}
if (type == Date.class
|| type == java.sql.Date.class
|| type == java.sql.Timestamp.class
|| type == java.sql.Time.class) {
try {
Date date = new SimpleDateFormat(DATE_FORMAT).parse(string);
if (type == java.sql.Date.class) {
return new java.sql.Date(date.getTime());
}
if (type == java.sql.Timestamp.class) {
return new java.sql.Timestamp(date.getTime());
}
if (type == java.sql.Time.class) {
return new java.sql.Time(date.getTime());
}
return date;
} catch (ParseException e) {
throw new IllegalStateException(
"Failed to parse date " + value + " by format " + DATE_FORMAT + ", cause: "
+ e.getMessage(),
e);
}
}
if (type == java.time.LocalDateTime.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
return LocalDateTime.parse(string);
}
if (type == java.time.LocalDate.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
return LocalDate.parse(string);
}
if (type == java.time.LocalTime.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
if (string.length() >= ISO_LOCAL_DATE_TIME_MIN_LEN) {
return LocalDateTime.parse(string).toLocalTime();
} else {
return LocalTime.parse(string);
}
}
if (type == Class.class) {
try {
return ReflectUtils.name2class(string);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
if (char[].class.equals(type)) {
// Process string to char array for generic invoke
// See
// - https://github.com/apache/dubbo/issues/2003
int len = string.length();
char[] chars = new char[len];
string.getChars(0, len, chars, 0);
return chars;
}
}
if (value instanceof Number) {
Number number = (Number) value;
if (type == byte.class || type == Byte.class) {
return number.byteValue();
}
if (type == short.class || type == Short.class) {
return number.shortValue();
}
if (type == int.class || type == Integer.class) {
return number.intValue();
}
if (type == long.class || type == Long.class) {
return number.longValue();
}
if (type == float.class || type == Float.class) {
return number.floatValue();
}
if (type == double.class || type == Double.class) {
return number.doubleValue();
}
if (type == BigInteger.class) {
return BigInteger.valueOf(number.longValue());
}
if (type == BigDecimal.class) {
return new BigDecimal(number.toString());
}
if (type == Date.class) {
return new Date(number.longValue());
}
if (type == boolean.class || type == Boolean.class) {
return 0 != number.intValue();
}
}
if (value instanceof Collection) {
Collection collection = (Collection) value;
if (type.isArray()) {
int length = collection.size();
Object array = Array.newInstance(type.getComponentType(), length);
int i = 0;
for (Object item : collection) {
Array.set(array, i++, item);
}
return array;
}
if (!type.isInterface()) {
try {
Collection result =
(Collection) type.getDeclaredConstructor().newInstance();
result.addAll(collection);
return result;
} catch (Throwable ignored) {
}
}
if (type == List.class) {
return new ArrayList<Object>(collection);
}
if (type == Set.class) {
return new HashSet<Object>(collection);
}
}
if (value.getClass().isArray() && Collection.class.isAssignableFrom(type)) {
int length = Array.getLength(value);
Collection collection;
if (!type.isInterface()) {
try {
collection = (Collection) type.getDeclaredConstructor().newInstance();
} catch (Exception e) {
collection = new ArrayList<Object>(length);
}
} else if (type == Set.class) {
collection = new HashSet<Object>(Math.max((int) (length / .75f) + 1, 16));
} else {
collection = new ArrayList<Object>(length);
}
for (int i = 0; i < length; i++) {
collection.add(Array.get(value, i));
}
return collection;
}
return value;
} | 3.68 |
framework_LegacyWindow_removeComponent | /**
* This implementation removes the component from the content container (
* {@link #getContent()}) instead of from the actual UI.
*
* This method should only be called when the content is a
* {@link ComponentContainer} (default {@link VerticalLayout} or explicitly
* set).
*/
public void removeComponent(Component component) {
getContent().removeComponent(component);
} | 3.68 |
framework_VAbstractOrderedLayout_getCaptionPositionFromElement | /**
* Deducts the caption position by examining the wrapping element.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param captionWrap
* The wrapping element
*
* @return The caption position
* @since 7.2
*/
public CaptionPosition getCaptionPositionFromElement(Element captionWrap) {
return getCaptionPositionFromElement(DOM.asOld(captionWrap));
} | 3.68 |
querydsl_Fetchable_stream | /**
* Get the projection as a typed closeable Stream.
*
* @return closeable stream
*/
default Stream<T> stream() {
final CloseableIterator<T> iterator = iterate();
final Spliterator<T> spliterator = Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED);
return StreamSupport.stream(spliterator, false)
.onClose(iterator::close);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations3 | /**
* @return expected SQL for math operation 3
*/
protected String expectedSqlForMathOperations3() {
return "a / (b + c)";
} | 3.68 |
morf_SchemaAdapter_getTable | /**
* @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String)
*/
@Override
public Table getTable(String name) {
return delegate.getTable(name);
} | 3.68 |
flink_OneInputOperatorTransformation_keyBy | /**
* Partitions the operator state of a {@link OperatorTransformation} using field expressions. A
* field expression is either the name of a public field or a getter method with parentheses of
* the {@code OperatorTransformation}'s underlying type. A dot can be used to drill down into
* objects, as in {@code "field1.getInnerField2()" }.
*
* @param fields One or more field expressions on which the state of the {@link
* OperatorTransformation} operators will be partitioned.
* @return The {@code OperatorTransformation} with partitioned state (i.e. KeyedStream)
*/
public KeyedOperatorTransformation<Tuple, T> keyBy(String... fields) {
return keyBy(new Keys.ExpressionKeys<>(fields, dataSet.getType()));
} | 3.68 |
flink_TableDescriptor_toBuilder | /** Converts this immutable instance into a mutable {@link Builder}. */
public Builder toBuilder() {
return new Builder(this);
} | 3.68 |
morf_AbstractSelectStatementBuilder_alias | /**
* Sets the alias for this select statement. This is useful if you are
* including multiple select statements in a single select (not to be confused
* with a join) and wish to reference the select statement itself.
*
* @param alias the alias to set.
* @return this, for method chaining.
*/
public T alias(String alias) {
this.alias = alias;
return castToChild(this);
} | 3.68 |
flink_IntermediateResult_getConsumersParallelism | /**
* Currently, this method is only used to compute the maximum number of consumers. For dynamic
* graph, it should be called before adaptively deciding the downstream consumer parallelism.
*/
int getConsumersParallelism() {
List<JobEdge> consumers = intermediateDataSet.getConsumers();
checkState(!consumers.isEmpty());
InternalExecutionGraphAccessor graph = getProducer().getGraph();
int consumersParallelism =
graph.getJobVertex(consumers.get(0).getTarget().getID()).getParallelism();
if (consumers.size() == 1) {
return consumersParallelism;
}
// sanity check, all consumer vertices must have the same parallelism:
// 1. for vertices that are not assigned a parallelism initially (for example, dynamic
// graph), the parallelisms will all be -1 (parallelism not decided yet)
// 2. for vertices that are initially assigned a parallelism, the parallelisms must be the
// same, which is guaranteed at compilation phase
for (JobVertexID jobVertexID : consumerVertices) {
checkState(
consumersParallelism == graph.getJobVertex(jobVertexID).getParallelism(),
"Consumers must have the same parallelism.");
}
return consumersParallelism;
} | 3.68 |
framework_DragSourceExtension_setDragImage | /**
* Set a custom drag image for the current drag source.
*
* @param imageResource
* Resource of the image to be displayed as drag image.
*/
public void setDragImage(Resource imageResource) {
setResource(DragSourceState.RESOURCE_DRAG_IMAGE, imageResource);
} | 3.68 |
hbase_ZkSplitLogWorkerCoordination_nodeChildrenChanged | /**
* Override handler from {@link ZKListener}
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.getZNodePaths().splitLogZNode)) {
if (LOG.isTraceEnabled()) {
LOG.trace("tasks arrived or departed on " + path);
}
synchronized (taskReadySeq) {
this.taskReadySeq.incrementAndGet();
taskReadySeq.notify();
}
}
} | 3.68 |
framework_VAbstractCalendarPanel_renderCalendar | /**
* For internal use only. May be removed or replaced in the future.
*
* Updates the calendar and text field with the selected dates.
*
* @param updateDate
* The value false prevents setting the selected date of the
* calendar based on focusedDate. That can be used when only the
* resolution of the calendar is changed and no date has been
* selected.
*/
@SuppressWarnings("rawtypes")
public void renderCalendar(boolean updateDate) {
if (parent instanceof VAbstractPopupCalendar
&& !((VAbstractPopupCalendar) parent).popup.isShowing()) {
// a popup that isn't open cannot possibly need a focus change event
updateDate = false;
}
doRenderCalendar(updateDate);
initialRenderDone = true;
} | 3.68 |
graphhopper_DistanceCalcEarth_calcCircumference | /**
* Circumference of the earth at different latitudes (breitengrad)
*/
public double calcCircumference(double lat) {
return 2 * PI * R * cos(toRadians(lat));
} | 3.68 |
flink_Tuple6_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
} | 3.68 |
flink_DataStream_global | /**
* Sets the partitioning of the {@link DataStream} so that the output values all go to the first
* instance of the next processing operator. Use this setting with care since it might cause a
* serious performance bottleneck in the application.
*
* @return The DataStream with shuffle partitioning set.
*/
@PublicEvolving
public DataStream<T> global() {
return setConnectionType(new GlobalPartitioner<T>());
} | 3.68 |
flink_HiveDDLUtils_disableConstraint | // returns a constraint trait that doesn't require ENABLE
public static byte disableConstraint(byte trait) {
return (byte) (trait & (~HIVE_CONSTRAINT_ENABLE));
} | 3.68 |
hadoop_AbstractS3ACommitter_getTaskAttemptPath | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed. This may be the normal Task attempt path
* or it may be a subdirectory.
* The default implementation returns the value of
* {@link #getBaseTaskAttemptPath(TaskAttemptContext)};
* subclasses may return different values.
* @param context the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
public Path getTaskAttemptPath(TaskAttemptContext context) {
return getBaseTaskAttemptPath(context);
} | 3.68 |
hbase_CreateTableProcedure_addRegionsToMeta | /**
* Add the specified set of regions to the hbase:meta table.
*/
private static void addRegionsToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List<RegionInfo> regionInfos) throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos,
tableDescriptor.getRegionReplication());
} | 3.68 |
pulsar_FunctionApiResource_clientAuthData | /**
* @deprecated use {@link #authParams()} instead.
*/
@Deprecated
public AuthenticationDataSource clientAuthData() {
return (AuthenticationDataSource) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedDataAttributeName);
} | 3.68 |
framework_AbstractComponent_getCustomAttributes | /**
* Returns a collection of attributes that should not be handled by the
* basic implementation of the {@link #readDesign(Element, DesignContext)}
* and {@link #writeDesign(Element, DesignContext)} methods. Typically these
* are handled in a custom way in the overridden versions of the above
* methods
*
* @since 7.4
*
* @return the collection of attributes that are not handled by the basic
* implementation
*/
protected Collection<String> getCustomAttributes() {
List<String> l = new ArrayList<>(Arrays.asList(CUSTOM_ATTRIBUTES));
if (this instanceof Focusable) {
l.add("tab-index");
l.add("tabindex");
}
return l;
} | 3.68 |
morf_H2Dialect_changePrimaryKeyColumns | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#changePrimaryKeyColumns(org.alfasoftware.morf.metadata.Table, java.util.List, java.util.List)
*/
@Override
public Collection<String> changePrimaryKeyColumns(Table table, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) {
List<String> result = new ArrayList<>();
if (!oldPrimaryKeyColumns.isEmpty()) {
result.add(dropPrimaryKeyConstraintStatement(table));
}
if (!newPrimaryKeyColumns.isEmpty()) {
result.add(addPrimaryKeyConstraintStatement(table, newPrimaryKeyColumns));
}
return result;
} | 3.68 |
querydsl_SQLExpressions_percentileDisc | /**
* PERCENTILE_DISC is an inverse distribution function that assumes a discrete distribution model.
* It takes a percentile value and a sort specification and returns an element from the set.
* Nulls are ignored in the calculation.
*
* <p>This function takes as an argument any numeric datatype or any nonnumeric datatype that can be
* implicitly converted to a numeric datatype. The function returns the same datatype as the numeric
* datatype of the argument.</p>
*
* @param arg argument
* @return percentile_disc(arg)
*/
public static <T extends Number> WithinGroup<T> percentileDisc(Expression<T> arg) {
return new WithinGroup<T>(arg.getType(), SQLOps.PERCENTILEDISC, arg);
} | 3.68 |
flink_SingleInputGate_retriggerPartitionRequest | /** Retriggers a partition request. */
public void retriggerPartitionRequest(
IntermediateResultPartitionID partitionId, int subpartitionIndex) throws IOException {
synchronized (requestLock) {
if (!closeFuture.isDone()) {
final InputChannel ch =
inputChannels.get(new SubpartitionInfo(partitionId, subpartitionIndex));
checkNotNull(ch, "Unknown input channel with ID " + partitionId);
LOG.debug(
"{}: Retriggering partition request {}:{}.",
owningTaskName,
ch.partitionId,
ch.getConsumedSubpartitionIndex());
if (ch.getClass() == RemoteInputChannel.class) {
final RemoteInputChannel rch = (RemoteInputChannel) ch;
rch.retriggerSubpartitionRequest();
} else if (ch.getClass() == LocalInputChannel.class) {
final LocalInputChannel ich = (LocalInputChannel) ch;
if (retriggerLocalRequestTimer == null) {
retriggerLocalRequestTimer = new Timer(true);
}
ich.retriggerSubpartitionRequest(retriggerLocalRequestTimer);
} else {
throw new IllegalStateException(
"Unexpected type of channel to retrigger partition: " + ch.getClass());
}
}
}
} | 3.68 |
flink_ValueLiteralExpression_stringifyValue | /** Supports (nested) arrays and makes string values more explicit. */
private static String stringifyValue(Object value) {
if (value instanceof String[]) {
final String[] array = (String[]) value;
return Stream.of(array)
.map(ValueLiteralExpression::stringifyValue)
.collect(Collectors.joining(", ", "[", "]"));
} else if (value instanceof Object[]) {
final Object[] array = (Object[]) value;
return Stream.of(array)
.map(ValueLiteralExpression::stringifyValue)
.collect(Collectors.joining(", ", "[", "]"));
} else if (value instanceof String) {
return "'" + ((String) value).replace("'", "''") + "'";
}
return StringUtils.arrayAwareToString(value);
} | 3.68 |
flink_CheckpointProperties_isUnclaimed | /** Returns whether the checkpoint should be restored in a {@link RestoreMode#NO_CLAIM} mode. */
public boolean isUnclaimed() {
return unclaimed;
} | 3.68 |
Activiti_TreeMethodExpression_isDeferred | /**
* Answer <code>true</code> if this is a deferred expression (starting with <code>#{</code>)
*/
public boolean isDeferred() {
return deferred;
} | 3.68 |
dubbo_SslContexts_findSslProvider | /**
* Returns OpenSSL if available, otherwise returns the JDK provider.
*/
private static SslProvider findSslProvider() {
if (OpenSsl.isAvailable()) {
logger.debug("Using OPENSSL provider.");
return SslProvider.OPENSSL;
}
if (checkJdkProvider()) {
logger.debug("Using JDK provider.");
return SslProvider.JDK;
}
throw new IllegalStateException(
"Could not find any valid TLS provider, please check your dependency or deployment environment, "
+ "usually netty-tcnative, Conscrypt, or Jetty NPN/ALPN is needed.");
} | 3.68 |
flink_SharedResources_getOrAllocateSharedResource | /**
* Gets the shared memory resource for the given owner and registers a lease. If the resource
* does not yet exist, it will be created via the given initializer function.
*
* <p>The resource must be released when no longer used. That releases the lease. When all
* leases are released, the resource is disposed.
*/
public <T extends AutoCloseable> ResourceAndSize<T> getOrAllocateSharedResource(
String type,
Object leaseHolder,
LongFunctionWithException<T, Exception> initializer,
long sizeForInitialization)
throws Exception {
// We could be stuck on this lock for a while, in cases where another initialization is
// currently
// happening and the initialization is expensive.
// We lock interruptibly here to allow for faster exit in case of cancellation errors.
try {
lock.lockInterruptibly();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new MemoryAllocationException("Interrupted while acquiring memory");
}
try {
// we cannot use "computeIfAbsent()" here because the computing function may throw an
// exception.
@SuppressWarnings("unchecked")
LeasedResource<T> resource = (LeasedResource<T>) reservedResources.get(type);
if (resource == null) {
resource = createResource(initializer, sizeForInitialization);
reservedResources.put(type, resource);
}
resource.addLeaseHolder(leaseHolder);
return resource;
} finally {
lock.unlock();
}
} | 3.68 |
hbase_WALProcedureStore_initOldLog | /**
* Loads given log file and it's tracker.
*/
private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArchiveDir)
throws IOException {
final ProcedureWALFile log = new ProcedureWALFile(fs, logFile);
if (logFile.getLen() == 0) {
LOG.warn("Remove uninitialized log: {}", logFile);
log.removeFile(walArchiveDir);
return null;
}
LOG.debug("Opening Pv2 {}", logFile);
try {
log.open();
} catch (ProcedureWALFormat.InvalidWALDataException e) {
LOG.warn("Remove uninitialized log: {}", logFile, e);
log.removeFile(walArchiveDir);
return null;
} catch (IOException e) {
String msg = "Unable to read state log: " + logFile;
LOG.error(msg, e);
throw new IOException(msg, e);
}
try {
log.readTracker();
} catch (IOException e) {
log.getTracker().reset();
log.getTracker().setPartialFlag(true);
LOG.warn("Unable to read tracker for {}", log, e);
}
log.close();
return log;
} | 3.68 |
hadoop_IOStatisticsStoreImpl_getMeanStatistic | /**
* Get a mean statistic.
* @param key statistic name
* @return the reference
* @throws NullPointerException if there is no entry of that name
*/
@Override
public MeanStatistic getMeanStatistic(String key) {
return lookup(meanStatisticMap, key);
} | 3.68 |
hbase_MetaTableAccessor_addRegionStateToPut | /**
* Set the column value corresponding to this {@code replicaId}'s {@link RegionState} to the
* provided {@code state}. Mutates the provided {@link Put}.
*/
public static Put addRegionStateToPut(Put put, int replicaId, RegionState.State state)
throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(CatalogFamilyFormat.getRegionStateColumn(replicaId))
.setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name()))
.build());
return put;
} | 3.68 |
zxing_ECIStringBuilder_appendECI | /**
* Appends ECI value to output.
*
* @param value ECI value to append, as an int
* @throws FormatException on invalid ECI value
*/
public void appendECI(int value) throws FormatException {
encodeCurrentBytesIfAny();
CharacterSetECI characterSetECI = CharacterSetECI.getCharacterSetECIByValue(value);
if (characterSetECI == null) {
throw FormatException.getFormatInstance();
}
currentCharset = characterSetECI.getCharset();
} | 3.68 |
flink_RocksDBIncrementalCheckpointUtils_deleteRange | /**
* Delete the record falls into [beginKeyBytes, endKeyBytes) of the db.
*
* @param db the target need to be clipped.
* @param columnFamilyHandles the column family need to be clipped.
* @param beginKeyBytes the begin key bytes
* @param endKeyBytes the end key bytes
*/
private static void deleteRange(
RocksDB db,
List<ColumnFamilyHandle> columnFamilyHandles,
byte[] beginKeyBytes,
byte[] endKeyBytes)
throws RocksDBException {
for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
// Using RocksDB's deleteRange will take advantage of delete
// tombstones, which mark the range as deleted.
//
// https://github.com/ververica/frocksdb/blob/FRocksDB-6.20.3/include/rocksdb/db.h#L363-L377
db.deleteRange(columnFamilyHandle, beginKeyBytes, endKeyBytes);
}
} | 3.68 |
AreaShop_AreaShop_getWorldGuard | /**
* Function to get the WorldGuard plugin.
* @return WorldGuardPlugin
*/
@Override
public WorldGuardPlugin getWorldGuard() {
return worldGuard;
} | 3.68 |
incubator-hugegraph-toolchain_DataTypeUtil_parseMultiValues | /**
* collection format: "obj1,obj2,...,objn" or "[obj1,obj2,...,objn]" ..etc
* TODO: After parsing to json, the order of the collection changed
* in some cases (such as list<date>)
**/
private static Object parseMultiValues(String key, Object values,
DataType dataType,
Cardinality cardinality,
InputSource source) {
// JSON file should not parse again
if (values instanceof Collection &&
checkCollectionDataType(key, (Collection<?>) values, dataType)) {
return values;
}
E.checkState(values instanceof String,
"The value(key='%s') must be String type, " +
"but got '%s'(%s)", key, values);
String rawValue = (String) values;
List<Object> valueColl = split(key, rawValue, source);
Collection<Object> results = cardinality == Cardinality.LIST ?
InsertionOrderUtil.newList() :
InsertionOrderUtil.newSet();
valueColl.forEach(value -> {
results.add(parseSingleValue(key, value, dataType, source));
});
E.checkArgument(checkCollectionDataType(key, results, dataType),
"Not all collection elems %s match with data type %s",
results, dataType);
return results;
} | 3.68 |
pulsar_ClientCnxIdleState_tryMarkReleasing | /**
* Changes the idle-state of the connection to #{@link State#RELEASING}, This method only changes this
* connection from the #{@link State#IDLE} state to the #{@link State#RELEASING} state.
* @return Whether change idle-stat to #{@link State#RELEASING} success.
*/
public boolean tryMarkReleasing() {
return compareAndSetIdleStat(State.IDLE, State.RELEASING);
} | 3.68 |
querydsl_ExpressionUtils_isNull | /**
* Create a {@code left is null} expression
*
* @param left operation argument
* @return left is null
*/
public static Predicate isNull(Expression<?> left) {
return predicate(Ops.IS_NULL, left);
} | 3.68 |
flink_NFACompiler_createMiddleStates | /**
* Creates all the states between Start and Final state.
*
* @param sinkState the state that last state should point to (always the Final state)
* @return the next state after Start in the resulting graph
*/
private State<T> createMiddleStates(final State<T> sinkState) {
State<T> lastSink = sinkState;
while (currentPattern.getPrevious() != null) {
if (currentPattern.getQuantifier().getConsumingStrategy()
== Quantifier.ConsumingStrategy.NOT_FOLLOW) {
// skip notFollow patterns, they are converted into edge conditions
if ((currentPattern.getWindowTime(WithinType.PREVIOUS_AND_CURRENT) != null
|| getWindowTime() > 0)
&& lastSink.isFinal()) {
final State<T> notFollow = createState(State.StateType.Pending, true);
final IterativeCondition<T> notCondition = getTakeCondition(currentPattern);
final State<T> stopState =
createStopState(notCondition, currentPattern.getName());
notFollow.addProceed(stopState, notCondition);
notFollow.addIgnore(new RichNotCondition<>(notCondition));
lastSink = notFollow;
}
} else if (currentPattern.getQuantifier().getConsumingStrategy()
== Quantifier.ConsumingStrategy.NOT_NEXT) {
final State<T> notNext = createState(State.StateType.Normal, true);
final IterativeCondition<T> notCondition = getTakeCondition(currentPattern);
final State<T> stopState =
createStopState(notCondition, currentPattern.getName());
if (lastSink.isFinal()) {
// so that the proceed to final is not fired
notNext.addIgnore(lastSink, new RichNotCondition<>(notCondition));
} else {
notNext.addProceed(lastSink, new RichNotCondition<>(notCondition));
}
notNext.addProceed(stopState, notCondition);
lastSink = notNext;
} else {
lastSink = convertPattern(lastSink);
}
// we traverse the pattern graph backwards
followingPattern = currentPattern;
currentPattern = currentPattern.getPrevious();
final Time currentWindowTime = currentPattern.getWindowTime();
if (currentWindowTime != null
&& currentWindowTime.toMilliseconds() < windowTime.orElse(Long.MAX_VALUE)) {
// the window time is the global minimum of all window times of each state
windowTime = Optional.of(currentWindowTime.toMilliseconds());
}
}
return lastSink;
} | 3.68 |
dubbo_RegistryBuilder_transport | /**
* @param transport
* @see #transporter(String)
* @deprecated
*/
@Deprecated
public RegistryBuilder transport(String transport) {
this.transporter = transport;
return getThis();
} | 3.68 |
hadoop_QuotaUsage_getTypeConsumed | /**
* Return storage type consumed.
*
* @param type storage type.
* @return type consumed.
*/
public long getTypeConsumed(StorageType type) {
return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L;
} | 3.68 |
flink_TypeExtractor_createSubTypesInfo | /**
* Creates the TypeInformation for all elements of a type that expects a certain number of
* subtypes (e.g. TupleXX).
*
* @param originalType most concrete subclass
* @param definingType type that defines the number of subtypes (e.g. Tuple2 -> 2 subtypes)
* @param typeHierarchy necessary for type inference
* @param in1Type necessary for type inference
* @param in2Type necessary for type inference
* @param lenient decides whether exceptions should be thrown if a subtype can not be determined
* @return array containing TypeInformation of sub types or null if definingType contains more
* subtypes (fields) that defined
*/
private <IN1, IN2> TypeInformation<?>[] createSubTypesInfo(
Type originalType,
ParameterizedType definingType,
List<Type> typeHierarchy,
TypeInformation<IN1> in1Type,
TypeInformation<IN2> in2Type,
boolean lenient) {
Type[] subtypes = new Type[definingType.getActualTypeArguments().length];
// materialize possible type variables
for (int i = 0; i < subtypes.length; i++) {
final Type actualTypeArg = definingType.getActualTypeArguments()[i];
// materialize immediate TypeVariables
if (actualTypeArg instanceof TypeVariable<?>) {
subtypes[i] =
materializeTypeVariable(typeHierarchy, (TypeVariable<?>) actualTypeArg);
}
// class or parameterized type
else {
subtypes[i] = actualTypeArg;
}
}
TypeInformation<?>[] subTypesInfo = new TypeInformation<?>[subtypes.length];
for (int i = 0; i < subtypes.length; i++) {
final List<Type> subTypeHierarchy = new ArrayList<>(typeHierarchy);
subTypeHierarchy.add(subtypes[i]);
// sub type could not be determined with materializing
// try to derive the type info of the TypeVariable from the immediate base child input
// as a last attempt
if (subtypes[i] instanceof TypeVariable<?>) {
subTypesInfo[i] =
createTypeInfoFromInputs(
(TypeVariable<?>) subtypes[i], subTypeHierarchy, in1Type, in2Type);
// variable could not be determined
if (subTypesInfo[i] == null && !lenient) {
throw new InvalidTypesException(
"Type of TypeVariable '"
+ ((TypeVariable<?>) subtypes[i]).getName()
+ "' in '"
+ ((TypeVariable<?>) subtypes[i]).getGenericDeclaration()
+ "' could not be determined. This is most likely a type erasure problem. "
+ "The type extraction currently supports types with generic variables only in cases where "
+ "all variables in the return type can be deduced from the input type(s). "
+ "Otherwise the type has to be specified explicitly using type information.");
}
} else {
// create the type information of the subtype or null/exception
try {
subTypesInfo[i] =
createTypeInfoWithTypeHierarchy(
subTypeHierarchy, subtypes[i], in1Type, in2Type);
} catch (InvalidTypesException e) {
if (lenient) {
subTypesInfo[i] = null;
} else {
throw e;
}
}
}
}
// check that number of fields matches the number of subtypes
if (!lenient) {
Class<?> originalTypeAsClass = null;
if (isClassType(originalType)) {
originalTypeAsClass = typeToClass(originalType);
}
checkNotNull(originalTypeAsClass, "originalType has an unexpected type");
// check if the class we assumed to conform to the defining type so far is actually a
// pojo because the
// original type contains additional fields.
// check for additional fields.
int fieldCount = countFieldsInClass(originalTypeAsClass);
if (fieldCount > subTypesInfo.length) {
return null;
}
}
return subTypesInfo;
} | 3.68 |
hbase_StateMachineProcedure_addChildProcedure | /**
* Add a child procedure to execute
* @param subProcedure the child procedure
*/
protected <T extends Procedure<TEnvironment>> void
addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) {
if (subProcedure == null) {
return;
}
final int len = subProcedure.length;
if (len == 0) {
return;
}
if (subProcList == null) {
subProcList = new ArrayList<>(len);
}
for (int i = 0; i < len; ++i) {
Procedure<TEnvironment> proc = subProcedure[i];
if (!proc.hasOwner()) {
proc.setOwner(getOwner());
}
subProcList.add(proc);
}
} | 3.68 |
pulsar_ManagedLedgerImpl_isBkErrorNotRecoverable | /**
* return BK error codes that are considered not likely to be recoverable.
*/
private static boolean isBkErrorNotRecoverable(int rc) {
switch (rc) {
case Code.NoSuchLedgerExistsException:
case Code.NoSuchLedgerExistsOnMetadataServerException:
case Code.NoSuchEntryException:
return true;
default:
return false;
}
} | 3.68 |
hbase_ClientTokenUtil_toToken | /**
* Converts a protobuf Token message back into a Token instance.
* @param proto the protobuf Token message
* @return the Token instance
*/
@InterfaceAudience.Private
static Token<AuthenticationTokenIdentifier> toToken(AuthenticationProtos.Token proto) {
return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null,
proto.hasPassword() ? proto.getPassword().toByteArray() : null,
AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE,
proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null);
} | 3.68 |
flink_Configuration_get | /**
* Please check the java doc of {@link #getRawValueFromOption(ConfigOption)}. If no keys are
* found in {@link Configuration}, default value of the given option will return. Please make
* sure there will be at least one value available. Otherwise, a NPE will be thrown by Flink
* when the value is used.
*
* <p>NOTE: current logic is not able to get the default value of the fallback key's
* ConfigOption, in case the given ConfigOption has no default value. If you want to use
* fallback key, please make sure its value could be found in {@link Configuration} at runtime.
*
* @param option metadata of the option to read
* @return the value of the given option
*/
@Override
public <T> T get(ConfigOption<T> option) {
return getOptional(option).orElseGet(option::defaultValue);
} | 3.68 |
framework_Tree_getItemIdInto | /**
* If the event is on a node that can not have children (see
* {@link Tree#areChildrenAllowed(Object)}), this method returns the
* parent item id of the target item (see {@link #getItemIdOver()} ).
* The identifier of the parent node is also returned if the cursor is
* on the top part of node. Else this method returns the same as
* {@link #getItemIdOver()}.
* <p>
* In other words this method returns the identifier of the "folder"
* into the drag operation is targeted.
* <p>
* If the method returns null, the current target is on a root node or
* on other undefined area over the tree component.
* <p>
* The default Tree implementation marks the targeted tree node with CSS
* classnames v-tree-node-dragfolder and v-tree-node-caption-dragfolder
* (for the caption element).
*
* @return the ID of the item that can receive the targeted drop
*/
public Object getItemIdInto() {
Object itemIdOver = getItemIdOver();
if (areChildrenAllowed(itemIdOver)
&& getDropLocation() == VerticalDropLocation.MIDDLE) {
return itemIdOver;
}
return getParent(itemIdOver);
} | 3.68 |
pulsar_Topics_deletePartitionedTopicAsync | /**
* @see Topics#deletePartitionedTopic(String, boolean, boolean)
*/
default CompletableFuture<Void> deletePartitionedTopicAsync(String topic, boolean force) {
return deletePartitionedTopicAsync(topic, force, true);
} | 3.68 |
hbase_RegionInfo_encodeRegionName | /** Returns the encodedName */
@InterfaceAudience.Private
static String encodeRegionName(final byte[] regionName) {
String encodedName;
if (hasEncodedName(regionName)) {
// region is in new format:
// <tableName>,<startKey>,<regionIdTimeStamp>/encodedName/
encodedName =
Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH);
} else {
// old format region name. First hbase:meta region also
// use this format.EncodedName is the JenkinsHash value.
HashKey<byte[]> key = new ByteArrayHashKey(regionName, 0, regionName.length);
int hashVal = Math.abs(JenkinsHash.getInstance().hash(key, 0));
encodedName = String.valueOf(hashVal);
}
return encodedName;
} | 3.68 |
hudi_BaseHoodieWriteClient_restoreToInstant | /**
* NOTE : This action requires all writers (ingest and compact) to a table to be stopped before proceeding. Revert
* the (inflight/committed) record changes for all commits after the provided instant time.
*
* @param savepointToRestoreTimestamp savepoint instant time to which restoration is requested
*/
public HoodieRestoreMetadata restoreToInstant(final String savepointToRestoreTimestamp, boolean initialMetadataTableIfNecessary) throws HoodieRestoreException {
LOG.info("Begin restore to instant " + savepointToRestoreTimestamp);
Timer.Context timerContext = metrics.getRollbackCtx();
try {
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UNKNOWN, Option.empty(), initialMetadataTableIfNecessary);
Pair<String, Option<HoodieRestorePlan>> timestampAndRestorePlan = scheduleAndGetRestorePlan(savepointToRestoreTimestamp, table);
final String restoreInstantTimestamp = timestampAndRestorePlan.getLeft();
Option<HoodieRestorePlan> restorePlanOption = timestampAndRestorePlan.getRight();
if (restorePlanOption.isPresent()) {
HoodieRestoreMetadata restoreMetadata = table.restore(context, restoreInstantTimestamp, savepointToRestoreTimestamp);
if (timerContext != null) {
final long durationInMs = metrics.getDurationInMs(timerContext.stop());
final long totalFilesDeleted = restoreMetadata.getHoodieRestoreMetadata().values().stream()
.flatMap(Collection::stream)
.mapToLong(HoodieRollbackMetadata::getTotalFilesDeleted)
.sum();
metrics.updateRollbackMetrics(durationInMs, totalFilesDeleted);
}
return restoreMetadata;
} else {
throw new HoodieRestoreException("Failed to restore " + config.getBasePath() + " to commit " + savepointToRestoreTimestamp);
}
} catch (Exception e) {
throw new HoodieRestoreException("Failed to restore to " + savepointToRestoreTimestamp, e);
}
} | 3.68 |
graphhopper_OSMReader_addEdge | /**
* This method is called for each segment an OSM way is split into during the second pass of {@link WaySegmentParser}.
*
* @param fromIndex a unique integer id for the first node of this segment
* @param toIndex a unique integer id for the last node of this segment
* @param pointList coordinates of this segment
* @param way the OSM way this segment was taken from
* @param nodeTags node tags of this segment. there is one map of tags for each point.
*/
protected void addEdge(int fromIndex, int toIndex, PointList pointList, ReaderWay way, List<Map<String, Object>> nodeTags) {
// sanity checks
if (fromIndex < 0 || toIndex < 0)
throw new AssertionError("to or from index is invalid for this edge " + fromIndex + "->" + toIndex + ", points:" + pointList);
if (pointList.getDimension() != nodeAccess.getDimension())
throw new AssertionError("Dimension does not match for pointList vs. nodeAccess " + pointList.getDimension() + " <-> " + nodeAccess.getDimension());
if (pointList.size() != nodeTags.size())
throw new AssertionError("there should be as many maps of node tags as there are points. node tags: " + nodeTags.size() + ", points: " + pointList.size());
// todo: in principle it should be possible to delay elevation calculation so we do not need to store
// elevations during import (saves memory in pillar info during import). also note that we already need to
// to do some kind of elevation processing (bridge+tunnel interpolation in GraphHopper class, maybe this can
// go together
if (pointList.is3D()) {
// sample points along long edges
if (config.getLongEdgeSamplingDistance() < Double.MAX_VALUE)
pointList = EdgeSampling.sample(pointList, config.getLongEdgeSamplingDistance(), distCalc, eleProvider);
// smooth the elevation before calculating the distance because the distance will be incorrect if calculated afterwards
if (config.getElevationSmoothing().equals("ramer"))
EdgeElevationSmoothingRamer.smooth(pointList, config.getElevationSmoothingRamerMax());
else if (config.getElevationSmoothing().equals("moving_average"))
EdgeElevationSmoothingMovingAverage.smooth(pointList, config.getSmoothElevationAverageWindowSize());
else if (!config.getElevationSmoothing().isEmpty())
throw new AssertionError("Unsupported elevation smoothing algorithm: '" + config.getElevationSmoothing() + "'");
}
if (config.getMaxWayPointDistance() > 0 && pointList.size() > 2)
simplifyAlgo.simplify(pointList);
double distance = distCalc.calcDistance(pointList);
if (distance < 0.001) {
// As investigation shows often two paths should have crossed via one identical point
// but end up in two very close points.
zeroCounter++;
distance = 0.001;
}
double maxDistance = (Integer.MAX_VALUE - 1) / 1000d;
if (Double.isNaN(distance)) {
LOGGER.warn("Bug in OSM or GraphHopper. Illegal tower node distance " + distance + " reset to 1m, osm way " + way.getId());
distance = 1;
}
if (Double.isInfinite(distance) || distance > maxDistance) {
// Too large is very rare and often the wrong tagging. See #435
// so we can avoid the complexity of splitting the way for now (new towernodes would be required, splitting up geometry etc)
// For example this happens here: https://www.openstreetmap.org/way/672506453 (Cape Town - Tristan da Cunha ferry)
LOGGER.warn("Bug in OSM or GraphHopper. Too big tower node distance " + distance + " reset to large value, osm way " + way.getId());
distance = maxDistance;
}
setArtificialWayTags(pointList, way, distance, nodeTags);
IntsRef relationFlags = getRelFlagsMap(way.getId());
EdgeIteratorState edge = baseGraph.edge(fromIndex, toIndex).setDistance(distance);
osmParsers.handleWayTags(edge.getEdge(), edgeIntAccess, way, relationFlags);
List<KVStorage.KeyValue> list = way.getTag("key_values", Collections.emptyList());
if (!list.isEmpty())
edge.setKeyValues(list);
// If the entire way is just the first and last point, do not waste space storing an empty way geometry
if (pointList.size() > 2) {
// the geometry consists only of pillar nodes, but we check that the first and last points of the pointList
// are equal to the tower node coordinates
checkCoordinates(fromIndex, pointList.get(0));
checkCoordinates(toIndex, pointList.get(pointList.size() - 1));
edge.setWayGeometry(pointList.shallowCopy(1, pointList.size() - 1, false));
}
checkDistance(edge);
restrictedWaysToEdgesMap.putIfReserved(way.getId(), edge.getEdge());
} | 3.68 |
flink_ForwardHashExchangeProcessor_updateOriginalEdgeInMultipleInput | /**
* Add new exchange node between the input node and the target node for the given edge, and
* reconnect the edges. So that the transformations can be connected correctly.
*/
private void updateOriginalEdgeInMultipleInput(
BatchExecMultipleInput multipleInput, int edgeIdx, BatchExecExchange newExchange) {
ExecEdge originalEdge = multipleInput.getOriginalEdges().get(edgeIdx);
ExecNode<?> inputNode = originalEdge.getSource();
ExecNode<?> targetNode = originalEdge.getTarget();
int edgeIdxInTargetNode = targetNode.getInputEdges().indexOf(originalEdge);
checkArgument(edgeIdxInTargetNode >= 0);
List<ExecEdge> newEdges = new ArrayList<>(targetNode.getInputEdges());
// connect input node to new exchange node
ExecEdge newEdge1 =
new ExecEdge(
inputNode,
newExchange,
originalEdge.getShuffle(),
originalEdge.getExchangeMode());
newExchange.setInputEdges(Collections.singletonList(newEdge1));
// connect new exchange node to target node
ExecEdge newEdge2 =
new ExecEdge(
newExchange,
targetNode,
originalEdge.getShuffle(),
originalEdge.getExchangeMode());
newEdges.set(edgeIdxInTargetNode, newEdge2);
targetNode.setInputEdges(newEdges);
// update the originalEdge in MultipleInput, this is need for multiple operator fusion
// codegen
multipleInput.getOriginalEdges().set(edgeIdx, newEdge2);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getNodeStateHelper | /** Return the state of the node. null will be returned if the node is removed. */
private S getNodeStateHelper(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
long valuePointer = SkipListUtils.getValuePointer(segment, offsetInSegment);
return helpGetState(valuePointer);
} | 3.68 |
hadoop_ManifestSuccessData_getDescription | /**
* @return any description text.
*/
public String getDescription() {
return description;
} | 3.68 |
framework_AbstractDateField_setResolution | /**
* Sets the resolution of the DateField.
*
* The default resolution is {@link DateResolution#DAY} since Vaadin 7.0.
*
* @param resolution
* the resolution to set, not {@code null}
*/
public void setResolution(R resolution) {
if (!resolution.equals(this.resolution)) {
this.resolution = resolution;
setValue(adjustToResolution(getValue(), resolution));
updateResolutions();
}
} | 3.68 |
hbase_Client_head | /**
* Send a HEAD request
* @param cluster the cluster definition
* @param path the path or URI
* @param headers the HTTP headers to include in the request
* @return a Response object with response detail
*/
public Response head(Cluster cluster, String path, Header[] headers) throws IOException {
HttpHead method = new HttpHead(path);
try {
HttpResponse resp = execute(cluster, method, null, path);
return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), null);
} finally {
method.releaseConnection();
}
} | 3.68 |
hudi_OptionsResolver_isSchemaEvolutionEnabled | /**
* Returns whether comprehensive schema evolution enabled.
*/
public static boolean isSchemaEvolutionEnabled(Configuration conf) {
return conf.getBoolean(HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE.key(), HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE.defaultValue());
} | 3.68 |
flink_AllWindowedStream_sideOutputLateData | /**
* Send late arriving data to the side output identified by the given {@link OutputTag}. Data is
* considered late after the watermark has passed the end of the window plus the allowed
* lateness set using {@link #allowedLateness(Time)}.
*
* <p>You can get the stream of late data using {@link
* SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link
* SingleOutputStreamOperator} resulting from the windowed operation with the same {@link
* OutputTag}.
*/
@PublicEvolving
public AllWindowedStream<T, W> sideOutputLateData(OutputTag<T> outputTag) {
Preconditions.checkNotNull(outputTag, "Side output tag must not be null.");
this.lateDataOutputTag = input.getExecutionEnvironment().clean(outputTag);
return this;
} | 3.68 |
hbase_HBaseFsckRepair_fixMetaHoleOnlineAndAddReplicas | /**
* Puts the specified RegionInfo into META with replica related columns
*/
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri,
Collection<ServerName> servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random rand = ThreadLocalRandom.current();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
for (int i = 1; i < numReplicas; i++) {
ServerName sn = serversArr[rand.nextInt(serversArr.length)];
// the column added here is just to make sure the master is able to
// see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated
// by the actual regionservers that start hosting the respective replicas
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), i);
}
}
meta.put(put);
meta.close();
conn.close();
} | 3.68 |
flink_FieldSet_clone | /**
* Since instances of FieldSet are strictly immutable, this method does not actually clone, but
* it only returns the original instance.
*
* @return This objects reference, unmodified.
*/
public FieldSet clone() {
return this;
} | 3.68 |
hbase_HttpServer_addServletWithAuth | /**
* Internal method to add a servlet to the HTTP server. Developers should not call this method
* directly, but invoke it via {@link #addUnprivilegedServlet(String, ServletHolder)} or
* {@link #addPrivilegedServlet(String, ServletHolder)}.
*/
void addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAuthz) {
addInternalServlet(pathSpec, holder, requireAuthz);
addFilterPathMapping(pathSpec, webAppContext);
} | 3.68 |
pulsar_JsonRecordBuilderImpl_clear | /**
* Clears the value of the given field.
*
* @param field the field to clear.
* @return a reference to the RecordBuilder.
*/
@Override
public GenericRecordBuilder clear(Field field) {
clear(field.getName());
return this;
} | 3.68 |
cron-utils_Preconditions_checkState | /**
* Ensures the truth of an expression involving the state of the calling instance, but not.
* involving any parameters to the calling method.
*
* @param expression a boolean expression
* @param errorMessageTemplate a template for the exception message should the check fail. The
* message is formed by replacing each {@code %s} placeholder in the template with an
* argument. These are matched by position - the first {@code %s} gets {@code
* errorMessageArgs[0]}, etc. Unmatched arguments will be appended to the formatted message
* in square braces. Unmatched placeholders will be left as-is.
* @param errorMessageArgs the arguments to be substituted into the message template. Arguments
* are converted to strings using {@link String#valueOf(Object)}.
* @throws IllegalStateException if {@code expression} is false
* @throws NullPointerException if the check fails and either {@code errorMessageTemplate} or
* {@code errorMessageArgs} is null (don't let this happen)
*/
public static void checkState(final boolean expression,
final String errorMessageTemplate,
final Object... errorMessageArgs) {
if (!expression) {
throw new IllegalStateException(format(errorMessageTemplate, errorMessageArgs));
}
} | 3.68 |
framework_TreeGrid_addCollapseListener | /**
* Adds a CollapseListener to this TreeGrid.
*
* @see CollapseEvent
*
* @param listener
* the listener to add
* @return a registration for the listener
*/
public Registration addCollapseListener(CollapseListener<T> listener) {
return addListener(CollapseEvent.class, listener,
CollapseListener.COLLAPSE_METHOD);
} | 3.68 |
morf_SqlDialect_legacyFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming | /**
* @deprecated this method returns the legacy value and is primarily for backwards compatibility.
* Please use {@link SqlDialect#fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()} for the new recommended default value.
* @see SqlDialect#fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()
*
* @return The number of rows to try and fetch at a time (default) when
* performing bulk select operations.
*/
@Deprecated
public int legacyFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming() {
return legacyFetchSizeForBulkSelects();
} | 3.68 |
framework_Table_getVisibleColumns | /**
* Gets the array of visible column id:s, including generated columns.
*
* <p>
* The columns are show in the order of their appearance in this array.
* </p>
*
* @return an array of currently visible propertyIds and generated column
* ids.
*/
public Object[] getVisibleColumns() {
if (visibleColumns == null) {
return null;
}
return visibleColumns.toArray();
} | 3.68 |
rocketmq-connect_WrapperStatusListener_onFailure | /**
* Invoked if the task raises an error. No shutdown event will follow.
*
* @param id The id of the task
* @param cause The error raised by the task.
*/
@Override
public void onFailure(ConnectorTaskId id, Throwable cause) {
managementService.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause)));
} | 3.68 |
framework_Overlay_getVisualViewportHeight | /**
* Gets the visual viewport height, which is useful for e.g iOS where the
* view can be zoomed in while keeping the layout viewport intact.
*
* Falls back to layout viewport; for those browsers/devices the difference
* is that the scrollbar with is included (if there is a scrollbar).
*
* @since 7.0.7
* @return
*/
private int getVisualViewportHeight() {
int h = (int) getSubpixelInnerHeight();
if (h < 0) {
return Window.getClientHeight();
} else {
return h;
}
} | 3.68 |
druid_MySqlStatementParser_parseDeclareHandler | /**
* 定义异常处理程序
*
* @author zhujun [[email protected]]
* 2016-04-16
*/
public MySqlDeclareHandlerStatement parseDeclareHandler() {
//DECLARE handler_type HANDLER FOR condition_value[,...] sp_statement
//handler_type 取值为 CONTINUE | EXIT | UNDO
//condition_value 取值为 SQLWARNING | NOT FOUND | SQLEXCEPTION | SQLSTATE value(异常码 e.g 1062)
MySqlDeclareHandlerStatement stmt = new MySqlDeclareHandlerStatement();
accept(Token.DECLARE);
//String handlerType = exprParser.name().getSimpleName();
if (lexer.token() == Token.CONTINUE) {
stmt.setHandleType(MySqlHandlerType.CONTINUE);
} else if (lexer.token() == Token.EXIT) {
stmt.setHandleType(MySqlHandlerType.CONTINUE);
} else if (lexer.token() == Token.UNDO) {
stmt.setHandleType(MySqlHandlerType.CONTINUE);
} else {
throw new ParserException("unkown handle type. " + lexer.info());
}
lexer.nextToken();
acceptIdentifier("HANDLER");
accept(Token.FOR);
for (; ; ) {
String tokenName = lexer.stringVal();
ConditionValue condition = new ConditionValue();
if (tokenName.equalsIgnoreCase("NOT")) {
//for 'NOT FOUND'
lexer.nextToken();
acceptIdentifier("FOUND");
condition.setType(ConditionType.SYSTEM);
condition.setValue("NOT FOUND");
} else if (tokenName.equalsIgnoreCase("SQLSTATE")) {
//for SQLSTATE (SQLSTATE '10001')
condition.setType(ConditionType.SQLSTATE);
lexer.nextToken();
//condition.setValue(lexer.stringVal());
//lexer.nextToken();
condition.setValue(exprParser.name().toString());
} else if (lexer.identifierEquals("SQLEXCEPTION")) {
//for SQLEXCEPTION
condition.setType(ConditionType.SYSTEM);
condition.setValue(lexer.stringVal());
lexer.nextToken();
} else if (lexer.identifierEquals("SQLWARNING")) {
//for SQLWARNING
condition.setType(ConditionType.SYSTEM);
condition.setValue(lexer.stringVal());
lexer.nextToken();
} else { //for condition_name or mysql_error_code
if (lexer.token() == Token.LITERAL_INT) {
condition.setType(ConditionType.MYSQL_ERROR_CODE);
condition.setValue(lexer.integerValue().toString());
} else {
condition.setType(ConditionType.SELF);
condition.setValue(tokenName);
}
lexer.nextToken();
}
stmt.getConditionValues().add(condition);
if (lexer.token() == Token.COMMA) {
accept(Token.COMMA);
continue;
} else if (lexer.token() != Token.EOF) {
break;
} else {
throw new ParserException("declare handle not eof");
}
}
stmt.setSpStatement(parseSpStatement());
if (!(stmt.getSpStatement() instanceof SQLBlockStatement)) {
accept(Token.SEMI);
}
return stmt;
} | 3.68 |
pulsar_BrokersBase_persistDynamicConfigurationAsync | /**
* if {@link ServiceConfiguration}-field is allowed to be modified dynamically, update configuration-map into zk, so
* all other brokers get the watch and can see the change and take appropriate action on the change.
*
* @param configName
* : configuration key
* @param configValue
* : configuration value
*/
private synchronized CompletableFuture<Void> persistDynamicConfigurationAsync(
String configName, String configValue) {
if (!pulsar().getBrokerService().validateDynamicConfiguration(configName, configValue)) {
return FutureUtil
.failedFuture(new RestException(Status.PRECONDITION_FAILED, " Invalid dynamic-config value"));
}
if (pulsar().getBrokerService().isDynamicConfiguration(configName)) {
return dynamicConfigurationResources().setDynamicConfigurationWithCreateAsync(old -> {
Map<String, String> configurationMap = old.orElseGet(Maps::newHashMap);
configurationMap.put(configName, configValue);
return configurationMap;
});
} else {
return FutureUtil.failedFuture(new RestException(Status.PRECONDITION_FAILED,
"Can't update non-dynamic configuration"));
}
} | 3.68 |
hadoop_IOStatisticsBinding_copyMap | /**
* Copy into the dest map all the source entries.
* The destination is cleared first.
* @param <E> entry type
* @param dest destination of the copy
* @param source source
* @param copyFn function to copy entries
* @return the destination.
*/
private static <E> Map<String, E> copyMap(
Map<String, E> dest,
Map<String, E> source,
Function<E, E> copyFn) {
// we have to clone the values so that they aren't
// bound to the original values
dest.clear();
source.entrySet()
.forEach(entry ->
dest.put(entry.getKey(), copyFn.apply(entry.getValue())));
return dest;
} | 3.68 |
dubbo_LoggerFactory_getErrorTypeAwareLogger | /**
* Get error type aware logger by a String key.
*
* @param key the returned logger will be named after key
* @return error type aware logger
*/
public static ErrorTypeAwareLogger getErrorTypeAwareLogger(String key) {
return ConcurrentHashMapUtils.computeIfAbsent(
ERROR_TYPE_AWARE_LOGGERS, key, k -> new FailsafeErrorTypeAwareLogger(loggerAdapter.getLogger(k)));
} | 3.68 |
flink_RuntimeConverter_create | /**
* Creates a new instance of {@link Context}.
*
* @param classLoader runtime classloader for loading user-defined classes.
*/
static Context create(ClassLoader classLoader) {
return new Context() {
@Override
public ClassLoader getClassLoader() {
return classLoader;
}
};
} | 3.68 |
hudi_SingleSparkJobExecutionStrategy_runClusteringForGroup | /**
* Submit job to execute clustering for the group.
*/
private Stream<WriteStatus> runClusteringForGroup(ClusteringGroupInfo clusteringOps, Map<String, String> strategyParams,
boolean preserveHoodieMetadata, SerializableSchema schema,
TaskContextSupplier taskContextSupplier, String instantTime) {
List<HoodieFileGroupId> inputFileIds = clusteringOps.getOperations().stream()
.map(op -> new HoodieFileGroupId(op.getPartitionPath(), op.getFileId()))
.collect(Collectors.toList());
Iterator<HoodieRecord<T>> inputRecords = readRecordsForGroupBaseFiles(clusteringOps.getOperations());
Iterator<List<WriteStatus>> writeStatuses = performClusteringWithRecordsIterator(inputRecords, clusteringOps.getNumOutputGroups(), instantTime,
strategyParams, schema.get(), inputFileIds, preserveHoodieMetadata, taskContextSupplier);
Iterable<List<WriteStatus>> writeStatusIterable = () -> writeStatuses;
return StreamSupport.stream(writeStatusIterable.spliterator(), false)
.flatMap(Collection::stream);
} | 3.68 |
dubbo_PortUnificationExchanger_getServers | // for test
public static ConcurrentMap<String, RemotingServer> getServers() {
return servers;
} | 3.68 |
flink_CreditBasedPartitionRequestClientHandler_checkError | /** Checks for an error and rethrows it if one was reported. */
@VisibleForTesting
void checkError() throws IOException {
final Throwable t = channelError.get();
if (t != null) {
if (t instanceof IOException) {
throw (IOException) t;
} else {
throw new IOException("There has been an error in the channel.", t);
}
}
} | 3.68 |
open-banking-gateway_Xs2aFlowNameSelector_getNameForValidation | /**
* Sub-process name for current context (PSU/FinTech input) validation.
*/
public String getNameForValidation(Xs2aContext ctx) {
return actionName(ctx);
} | 3.68 |
flink_FromElementsFunction_getNumElementsEmitted | /**
* Gets the number of elements emitted so far.
*
* @return The number of elements emitted so far.
*/
public int getNumElementsEmitted() {
return numElementsEmitted;
} | 3.68 |
hadoop_RMContainerTokenSecretManager_rollMasterKey | /**
* Creates a new master-key and sets it as the primary.
*/
@Private
public void rollMasterKey() {
super.writeLock.lock();
try {
LOG.info("Rolling master-key for container-tokens");
if (this.currentMasterKey == null) { // Setting up for the first time.
this.currentMasterKey = createNewMasterKey();
} else {
this.nextMasterKey = createNewMasterKey();
LOG.info("Going to activate master-key with key-id "
+ this.nextMasterKey.getMasterKey().getKeyId() + " in "
+ this.activationDelay + "ms");
this.timer.schedule(new NextKeyActivator(), this.activationDelay);
}
} finally {
super.writeLock.unlock();
}
} | 3.68 |
hbase_TableMapReduceUtil_initCredentialsForCluster | /**
* Obtain an authentication token, for the specified cluster, on behalf of the current user and
* add it to the credentials for the given map reduce job.
* @param job The job that requires the permission.
* @param conf The configuration to use in connecting to the peer cluster
* @throws IOException When the authentication token cannot be obtained.
*/
public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException {
UserProvider userProvider = UserProvider.instantiate(conf);
if (userProvider.isHBaseSecurityEnabled()) {
try {
Connection peerConn = ConnectionFactory.createConnection(conf);
try {
TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
} finally {
peerConn.close();
}
} catch (InterruptedException e) {
LOG.info("Interrupted obtaining user authentication token");
Thread.interrupted();
}
}
} | 3.68 |
flink_StreamTask_advanceToEndOfEventTime | /**
* Emits the {@link org.apache.flink.streaming.api.watermark.Watermark#MAX_WATERMARK
* MAX_WATERMARK} so that all registered timers are fired.
*
* <p>This is used by the source task when the job is {@code TERMINATED}. In the case, we want
* all the timers registered throughout the pipeline to fire and the related state (e.g.
* windows) to be flushed.
*
* <p>For tasks other than the source task, this method does nothing.
*/
protected void advanceToEndOfEventTime() throws Exception {} | 3.68 |
morf_UpdateStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getTable())
.dispatch(getWhereCriterion())
.dispatch(getFields())
.dispatch(getHints());
} | 3.68 |
pulsar_ResourceGroupService_unRegisterNameSpace | /**
* UnRegisters a namespace from a resource group.
*
* @param resourceGroupName
* @param fqNamespaceName i.e., in "tenant/Namespace" format)
* @throws if the RG does not exist, or if the NS does not references the RG yet.
*/
public void unRegisterNameSpace(String resourceGroupName, NamespaceName fqNamespaceName)
throws PulsarAdminException {
ResourceGroup rg = checkResourceGroupExists(resourceGroupName);
ResourceGroupOpStatus status = rg.registerUsage(fqNamespaceName.toString(), ResourceGroupRefTypes.Namespaces,
false, this.resourceUsageTransportManagerMgr);
if (status == ResourceGroupOpStatus.DoesNotExist) {
String errMesg = String.format("Namespace %s does not yet reference resource group %s",
fqNamespaceName, resourceGroupName);
throw new PulsarAdminException(errMesg);
}
// Dissociate this NS-name from the RG.
this.namespaceToRGsMap.remove(fqNamespaceName, rg);
rgNamespaceUnRegisters.labels(resourceGroupName).inc();
} | 3.68 |
zxing_EAN13Reader_determineFirstDigit | /**
* Based on pattern of odd-even ('L' and 'G') patterns used to encoded the explicitly-encoded
* digits in a barcode, determines the implicitly encoded first digit and adds it to the
* result string.
*
* @param resultString string to insert decoded first digit into
* @param lgPatternFound int whose bits indicates the pattern of odd/even L/G patterns used to
* encode digits
* @throws NotFoundException if first digit cannot be determined
*/
private static void determineFirstDigit(StringBuilder resultString, int lgPatternFound)
throws NotFoundException {
for (int d = 0; d < 10; d++) {
if (lgPatternFound == FIRST_DIGIT_ENCODINGS[d]) {
resultString.insert(0, (char) ('0' + d));
return;
}
}
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
hadoop_HeapSort_sort | /**
* Sort the given range of items using heap sort.
* {@inheritDoc}
*/
@Override
public void sort(IndexedSortable s, int p, int r) {
sort(s, p, r, null);
} | 3.68 |
hbase_Result_getRow | /**
* Method for retrieving the row key that corresponds to the row from which this Result was
* created.
*/
public byte[] getRow() {
if (this.row == null) {
this.row =
(this.cells == null || this.cells.length == 0) ? null : CellUtil.cloneRow(this.cells[0]);
}
return this.row;
} | 3.68 |
framework_Slot_getExpandRatio | /**
* Get the expand ratio for the slot. The expand ratio describes how the
* slot should be resized compared to other slots in the layout
*
* @return the expand ratio of the slot
*
* @see #setExpandRatio(double)
*/
public double getExpandRatio() {
return expandRatio;
} | 3.68 |
hbase_MultiByteBuff_getItemIndex | /*
* Returns in which sub ByteBuffer, the given element index will be available.
*/
private int getItemIndex(int elemIndex) {
if (elemIndex < 0) {
throw new IndexOutOfBoundsException();
}
int index = 1;
while (elemIndex >= this.itemBeginPos[index]) {
index++;
if (index == this.itemBeginPos.length) {
throw new IndexOutOfBoundsException();
}
}
return index - 1;
} | 3.68 |
hbase_HRegion_logFatLineOnFlush | /**
* Utility method broken out of internalPrepareFlushCache so that method is smaller.
*/
private void logFatLineOnFlush(Collection<HStore> storesToFlush, long sequenceId) {
if (!LOG.isInfoEnabled()) {
return;
}
// Log a fat line detailing what is being flushed.
StringBuilder perCfExtras = null;
if (!isAllFamilies(storesToFlush)) {
perCfExtras = new StringBuilder();
for (HStore store : storesToFlush) {
MemStoreSize mss = store.getFlushableSize();
perCfExtras.append("; ").append(store.getColumnFamilyName());
perCfExtras.append("={dataSize=").append(StringUtils.byteDesc(mss.getDataSize()));
perCfExtras.append(", heapSize=").append(StringUtils.byteDesc(mss.getHeapSize()));
perCfExtras.append(", offHeapSize=").append(StringUtils.byteDesc(mss.getOffHeapSize()));
perCfExtras.append("}");
}
}
MemStoreSize mss = this.memStoreSizing.getMemStoreSize();
LOG.info("Flushing " + this.getRegionInfo().getEncodedName() + " " + storesToFlush.size() + "/"
+ stores.size() + " column families," + " dataSize=" + StringUtils.byteDesc(mss.getDataSize())
+ " heapSize=" + StringUtils.byteDesc(mss.getHeapSize())
+ ((perCfExtras != null && perCfExtras.length() > 0) ? perCfExtras.toString() : "")
+ ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + sequenceId));
} | 3.68 |
framework_VAbstractDropHandler_dragLeave | /**
* Default implementation does nothing. Implementors should clean possible
* emphasis or drag icons here.
*/
@Override
public void dragLeave(VDragEvent drag) {
} | 3.68 |
hudi_HoodieMetaSyncOperations_updateTableComments | /**
* Update the field comments for table in metastore, by using the ones from storage.
*
* @return
*/
default boolean updateTableComments(String tableName, List<FieldSchema> fromMetastore, List<FieldSchema> fromStorage) {
return false;
} | 3.68 |
aws-saas-boost_UpdateWorkflow_updateCloudFormationStack | // TODO all CloudFormation activities (reading params, updating stacks)
// should be extracted to a class for easier mocking/testing
private void updateCloudFormationStack(String stackName, Map<String, String> paramsMap, String yamlFile) {
List<Parameter> templateParameters = paramsMap.entrySet().stream()
.map(entry -> Parameter.builder().parameterKey(entry.getKey()).parameterValue(entry.getValue()).build())
.collect(Collectors.toList());
CloudFormationClient cfn = clientBuilderFactory.cloudFormationBuilder().build();
LOGGER.info("Executing CloudFormation update stack for " + stackName);
try {
UpdateStackResponse updateStackResponse = cfn.updateStack(UpdateStackRequest.builder()
.stackName(stackName)
.capabilitiesWithStrings("CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND")
.templateURL(environment.getArtifactsBucket().getBucketUrl() + yamlFile)
.parameters(templateParameters)
.build()
);
String stackId = updateStackResponse.stackId();
LOGGER.info("Waiting for update stack to complete for " + stackId);
long sleepTime = 1L;
final long timeoutMinutes = 60L;
final long timeout = (timeoutMinutes * 60 * 1000) + System.currentTimeMillis();
while (true) {
if (System.currentTimeMillis() > timeout) {
outputMessage("CloudFormation update of stack: " + stackName + " timed out. "
+ "Check the events in the AWS CloudFormation console.");
}
DescribeStacksResponse response = cfn.describeStacks(request -> request.stackName(stackId));
Stack stack = response.stacks().get(0);
StackStatus stackStatus = stack.stackStatus();
EnumSet<StackStatus> failureStatuses = EnumSet.of(
StackStatus.UPDATE_ROLLBACK_COMPLETE,
StackStatus.UPDATE_FAILED,
StackStatus.UPDATE_ROLLBACK_FAILED);
if (stackStatus == StackStatus.UPDATE_COMPLETE) {
outputMessage("CloudFormation stack: " + stackName + " updated successfully.");
break;
} else if (failureStatuses.contains(stackStatus)) {
outputMessage("CloudFormation stack: " + stackName + " update failed.");
throw new RuntimeException("Error with CloudFormation stack " + stackName
+ ". Check the events in the AWS CloudFormation Console");
} else {
// TODO should we set an upper bound on this loop?
outputMessage("Awaiting Update of CloudFormation Stack " + stackName
+ " to complete. Sleep " + sleepTime + " minute(s)...");
try {
Thread.sleep(sleepTime * 60 * 1000);
} catch (Exception e) {
LOGGER.error("Error pausing thread", e);
}
sleepTime = 1L; //set to 1 minute after kick off of 5 minute
}
}
} catch (SdkServiceException cfnError) {
if (cfnError.getMessage().contains("No updates are to be performed")) {
outputMessage("No Updates to be performed for Stack: " + stackName);
} else {
LOGGER.error("updateCloudFormationStack::update stack failed {}", cfnError.getMessage());
LOGGER.error(Utils.getFullStackTrace(cfnError));
throw cfnError;
}
}
} | 3.68 |
querydsl_SimpleExpression_isNotNull | /**
* Create a {@code this is not null} expression
*
* @return this is not null
*/
public BooleanExpression isNotNull() {
if (isnotnull == null) {
isnotnull = Expressions.booleanOperation(Ops.IS_NOT_NULL, mixin);
}
return isnotnull;
} | 3.68 |
hbase_Result_value | /**
* Returns the value of the first column in the Result.
* @return value of the first column
*/
public byte[] value() {
if (isEmpty()) {
return null;
}
return CellUtil.cloneValue(cells[0]);
} | 3.68 |
framework_GridRowDragger_setTargetDataProviderUpdater | /**
* Sets the target data provider updater, which handles adding the dropped
* items to the target grid.
* <p>
* By default, items are added to the index where they were dropped on for
* any {@link ListDataProvider}. If another type of data provider is used,
* this updater should be set to handle updating instead.
*
* @param targetDataProviderUpdater
* the target drop handler to set, or {@code null} to remove
*/
public void setTargetDataProviderUpdater(
TargetDataProviderUpdater<T> targetDataProviderUpdater) {
this.targetDataProviderUpdater = targetDataProviderUpdater;
} | 3.68 |
querydsl_ExpressionUtils_count | /**
* Create a {@code count(source)} expression
*
* @param source source
* @return count(source)
*/
public static Expression<Long> count(Expression<?> source) {
return operation(Long.class, Ops.AggOps.COUNT_AGG, source);
} | 3.68 |
hadoop_AbstractConfigurableFederationPolicy_setPolicyInfo | /**
* Setter method for the configuration weights.
*
* @param policyInfo the {@link WeightedPolicyInfo} representing the policy
* configuration.
*/
public void setPolicyInfo(WeightedPolicyInfo policyInfo) {
this.policyInfo = policyInfo;
} | 3.68 |
framework_VAbsoluteLayout_setWidgetWrapperStyleNames | /**
* Sets style names for the wrapper wrapping the widget in the layout. The
* style names will be prefixed with v-absolutelayout-wrapper.
*
* @param widget
* The widget which wrapper we want to add the stylenames to
* @param stylenames
* The style names that should be added to the wrapper
*/
public void setWidgetWrapperStyleNames(Widget widget,
String... stylenames) {
AbsoluteWrapper wrapper = getChildWrapper(widget);
if (wrapper == null) {
throw new IllegalArgumentException(
"No wrapper for widget found, has the widget been added to the layout?");
}
wrapper.setWrapperStyleNames(stylenames);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.