name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ParquetSchemaConverter_fromParquetType | /**
* Converts Parquet schema to Flink Internal Type.
*
* @param type Parquet schema
* @return Flink type information
*/
public static TypeInformation<?> fromParquetType(MessageType type) {
return convertFields(type.getFields());
} | 3.68 |
dubbo_Utf8Utils_isThreeBytes | /**
* Returns whether this is a three-byte codepoint with the form '110XXXXX'.
*/
private static boolean isThreeBytes(byte b) {
return b < (byte) 0xF0;
} | 3.68 |
framework_AbstractSelect_addItem | /**
* Create a new item into container. The created new item is returned and
* ready for setting property values. if the creation fails, null is
* returned. In case the container already contains the item, null is
* returned.
*
* This functionality is optional. If the function is unsupported, it always
* returns null.
*
* @param itemId
* the Identification of the item to be created.
* @return the Created item with the given id, or null in case of failure.
* @see Container#addItem(java.lang.Object)
*/
@Override
public Item addItem(Object itemId) throws UnsupportedOperationException {
final Item retval = items.addItem(itemId);
if (retval != null
&& !(items instanceof Container.ItemSetChangeNotifier)) {
fireItemSetChange();
}
return retval;
} | 3.68 |
rocketmq-connect_ChangeCaseConfig_from | /**
* from
*
* @return
*/
public CaseFormat from() {
return this.from;
} | 3.68 |
hudi_BaseHoodieWriteClient_renameColumn | /**
* rename col name for hudi table.
*
* @param colName col name to be renamed. if we want to rename col from a nested filed, the fullName should be specified
* @param newName new name for current col. no need to specify fullName.
*/
public void renameColumn(String colName, String newName) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyRenameChange(colName, newName);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
flink_Tuple19_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18) {
return new Tuple19<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17,
f18);
} | 3.68 |
hbase_RequestConverter_buildDisableTableRequest | /**
* Creates a protocol buffer DisableTableRequest
* @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
DisableTableRequest.Builder builder = DisableTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.68 |
hadoop_AbfsPermission_valueOf | /**
* Create a AbfsPermission from a abfs symbolic permission string
* @param abfsSymbolicPermission e.g. "rw-rw-rw-+" / "rw-rw-rw-"
* @return a permission object for the provided string representation
*/
public static AbfsPermission valueOf(final String abfsSymbolicPermission) {
if (abfsSymbolicPermission == null) {
return null;
}
final boolean isExtendedAcl = abfsSymbolicPermission.charAt(abfsSymbolicPermission.length() - 1) == '+';
final String abfsRawSymbolicPermission = isExtendedAcl ? abfsSymbolicPermission.substring(0, abfsSymbolicPermission.length() - 1)
: abfsSymbolicPermission;
int n = 0;
for (int i = 0; i < abfsRawSymbolicPermission.length(); i++) {
n = n << 1;
char c = abfsRawSymbolicPermission.charAt(i);
n += (c == '-' || c == 'T' || c == 'S') ? 0: 1;
}
// Add sticky bit value if set
if (abfsRawSymbolicPermission.charAt(abfsRawSymbolicPermission.length() - 1) == 't'
|| abfsRawSymbolicPermission.charAt(abfsRawSymbolicPermission.length() - 1) == 'T') {
n += STICKY_BIT_OCTAL_VALUE;
}
return new AbfsPermission((short) n, isExtendedAcl);
} | 3.68 |
flink_FileSystemJobResultStore_constructCleanPath | /**
* Given a job ID, construct the path for a clean entry corresponding to it in the job result
* store.
*
* @param jobId The job ID to construct a clean entry path from.
* @return A path for a clean entry for the given the Job ID.
*/
private Path constructCleanPath(JobID jobId) {
return constructEntryPath(jobId.toString() + FILE_EXTENSION);
} | 3.68 |
hbase_HBaseTestingUtility_waitTableDisabled | /**
* Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
* @param table Table to wait on.
* @param timeoutMillis Time to wait on it being marked disabled.
*/
public void waitTableDisabled(byte[] table, long timeoutMillis)
throws InterruptedException, IOException {
waitTableDisabled(TableName.valueOf(table), timeoutMillis);
} | 3.68 |
flink_CheckpointConfig_getAlignmentTimeout | /**
* @return value of alignment timeout, as configured via {@link #setAlignmentTimeout(Duration)}
* or {@link ExecutionCheckpointingOptions#ALIGNMENT_TIMEOUT}.
* @deprecated User {@link #getAlignedCheckpointTimeout()} instead.
*/
@Deprecated
@PublicEvolving
public Duration getAlignmentTimeout() {
return getAlignedCheckpointTimeout();
} | 3.68 |
flink_ActiveResourceManager_clearStateForWorker | /**
* Clear states for a terminated worker.
*
* @param resourceId Identifier of the worker
* @return True if the worker is known and states are cleared; false if the worker is unknown
* (duplicate call to already cleared worker)
*/
private boolean clearStateForWorker(ResourceID resourceId) {
WorkerType worker = workerNodeMap.remove(resourceId);
if (worker == null) {
log.debug("Ignore unrecognized worker {}.", resourceId.getStringWithMetadata());
return false;
}
WorkerResourceSpec workerResourceSpec = workerResourceSpecs.remove(resourceId);
tryRemovePreviousPendingRecoveryTaskManager(resourceId);
if (workerResourceSpec != null) {
totalWorkerCounter.decreaseAndGet(workerResourceSpec);
if (currentAttemptUnregisteredWorkers.remove(resourceId)) {
final int count = pendingWorkerCounter.decreaseAndGet(workerResourceSpec);
log.info(
"Worker {} with resource spec {} was requested in current attempt and has not registered."
+ " Current pending count after removing: {}.",
resourceId.getStringWithMetadata(),
workerResourceSpec,
count);
}
}
return true;
} | 3.68 |
graphhopper_MMapDataAccess_clean | /**
* Cleans up MappedByteBuffers. Be sure you bring the segments list in a consistent state
* afterwards.
* <p>
*
* @param from inclusive
* @param to exclusive
*/
private void clean(int from, int to) {
for (int i = from; i < to; i++) {
ByteBuffer bb = segments.get(i);
cleanMappedByteBuffer(bb);
segments.set(i, null);
}
} | 3.68 |
framework_InMemoryDataProvider_addSortComparator | /**
* Adds a comparator to the default sorting for this data provider. If no
* default sorting has been defined, then the provided comparator will be
* used as the default sorting. If a default sorting has been defined, then
* the provided comparator will be used to determine the ordering of items
* that are considered equal by the previously defined default sorting.
* <p>
* The default sorting is used if the query defines no sorting. The default
* sorting is also used to determine the ordering of items that are
* considered equal by the sorting defined in the query.
*
* @see #setSortComparator(SerializableComparator)
* @see #addSortOrder(ValueProvider, SortDirection)
*
* @param comparator
* a comparator to add, not <code>null</code>
*/
public default void addSortComparator(
SerializableComparator<T> comparator) {
Objects.requireNonNull(comparator, "Comparator to add cannot be null");
SerializableComparator<T> originalComparator = getSortComparator();
if (originalComparator == null) {
setSortComparator(comparator);
} else {
setSortComparator((a, b) -> {
int result = originalComparator.compare(a, b);
if (result == 0) {
result = comparator.compare(a, b);
}
return result;
});
}
} | 3.68 |
framework_Escalator_getBody | /**
* Returns the row container for the body in this Escalator.
*
* @return the body. Never <code>null</code>
*/
public BodyRowContainer getBody() {
return body;
} | 3.68 |
flink_MapValue_values | /*
* (non-Javadoc)
* @see java.util.Map#values()
*/
@Override
public Collection<V> values() {
return this.map.values();
} | 3.68 |
flink_HiveParserUtils_makeOver | /**
* Proxy to {@link RexBuilder#makeOver(RelDataType, SqlAggFunction, List, List,
* com.google.common.collect.ImmutableList, RexWindowBound, RexWindowBound, boolean, boolean,
* boolean, boolean, boolean)}.
*/
public static RexNode makeOver(
RexBuilder rexBuilder,
RelDataType type,
SqlAggFunction operator,
List<RexNode> exprs,
List<RexNode> partitionKeys,
List<RexFieldCollation> orderKeys,
RexWindowBound lowerBound,
RexWindowBound upperBound,
boolean physical,
boolean allowPartial,
boolean nullWhenCountZero,
boolean distinct,
boolean ignoreNulls) {
Preconditions.checkState(
immutableListClz != null || shadedImmutableListClz != null,
"Neither original nor shaded guava class can be found");
Method method = null;
final String methodName = "makeOver";
final int orderKeysIndex = 4;
Class[] argTypes =
new Class[] {
RelDataType.class,
SqlAggFunction.class,
List.class,
List.class,
null,
RexWindowBound.class,
RexWindowBound.class,
boolean.class,
boolean.class,
boolean.class,
boolean.class,
boolean.class
};
if (immutableListClz != null) {
argTypes[orderKeysIndex] = immutableListClz;
method = HiveReflectionUtils.tryGetMethod(rexBuilder.getClass(), methodName, argTypes);
}
if (method == null) {
Preconditions.checkState(
shadedImmutableListClz != null,
String.format(
"Shaded guava class not found, but method %s takes shaded parameter",
methodName));
argTypes[orderKeysIndex] = shadedImmutableListClz;
method = HiveReflectionUtils.tryGetMethod(rexBuilder.getClass(), methodName, argTypes);
}
Preconditions.checkState(method != null, "Neither original nor shaded method can be found");
Object orderKeysArg = toImmutableList(orderKeys);
Object[] args =
new Object[] {
type,
operator,
exprs,
partitionKeys,
orderKeysArg,
lowerBound,
upperBound,
physical,
allowPartial,
nullWhenCountZero,
distinct,
ignoreNulls
};
try {
return (RexNode) method.invoke(rexBuilder, args);
} catch (InvocationTargetException | IllegalAccessException e) {
throw new RuntimeException("Failed to invoke " + methodName, e);
}
} | 3.68 |
hudi_LSMTimeline_getMaxInstantTime | /**
* Parse the maximum instant time from the file name.
*/
public static String getMaxInstantTime(String fileName) {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return fileMatcher.group(2);
} else {
throw new HoodieException("Unexpected archival file name: " + fileName);
}
} | 3.68 |
hbase_BalancerClusterState_getRegionCacheRatioOnRegionServer | /**
* Returns the amount by which a region is cached on a given region server. If the region is not
* currently hosted on the given region server, then find out if it was previously hosted there
* and return the old cache ratio.
*/
protected float getRegionCacheRatioOnRegionServer(int region, int regionServerIndex) {
float regionCacheRatio = 0.0f;
// Get the current region cache ratio if the region is hosted on the server regionServerIndex
for (int regionIndex : regionsPerServer[regionServerIndex]) {
if (region != regionIndex) {
continue;
}
Deque<BalancerRegionLoad> regionLoadList = regionLoads[regionIndex];
// The region is currently hosted on this region server. Get the region cache ratio for this
// region on this server
regionCacheRatio =
regionLoadList == null ? 0.0f : regionLoadList.getLast().getCurrentRegionCacheRatio();
return regionCacheRatio;
}
// Region is not currently hosted on this server. Check if the region was cached on this
// server earlier. This can happen when the server was shutdown and the cache was persisted.
// Search using the region name and server name and not the index id and server id as these ids
// may change when a server is marked as dead or a new server is added.
String regionEncodedName = regions[region].getEncodedName();
ServerName serverName = servers[regionServerIndex];
if (
regionCacheRatioOnOldServerMap != null
&& regionCacheRatioOnOldServerMap.containsKey(regionEncodedName)
) {
Pair<ServerName, Float> cacheRatioOfRegionOnServer =
regionCacheRatioOnOldServerMap.get(regionEncodedName);
if (ServerName.isSameAddress(cacheRatioOfRegionOnServer.getFirst(), serverName)) {
regionCacheRatio = cacheRatioOfRegionOnServer.getSecond();
if (LOG.isDebugEnabled()) {
LOG.debug("Old cache ratio found for region {} on server {}: {}", regionEncodedName,
serverName, regionCacheRatio);
}
}
}
return regionCacheRatio;
} | 3.68 |
hadoop_FutureDataInputStreamBuilderImpl_getStatus | /**
* Get any status set in {@link #withFileStatus(FileStatus)}.
* @return a status value or null.
*/
protected FileStatus getStatus() {
return status;
} | 3.68 |
querydsl_TimeExpression_hour | /**
* Create a hours expression (range 0-23)
*
* @return hour
*/
public NumberExpression<Integer> hour() {
if (hours == null) {
hours = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.HOUR, mixin);
}
return hours;
} | 3.68 |
hibernate-validator_ModCheckBase_extractDigit | /**
* Returns the numeric {@code int} value of a {@code char}
*
* @param value the input {@code char} to be parsed
*
* @return the numeric {@code int} value represented by the character.
*
* @throws NumberFormatException in case character is not a digit
*/
protected int extractDigit(char value) throws NumberFormatException {
if ( Character.isDigit( value ) ) {
return Character.digit( value, DEC_RADIX );
}
else {
throw LOG.getCharacterIsNotADigitException( value );
}
} | 3.68 |
hbase_RegionLocations_isEmpty | /**
* Returns whether there are non-null elements in the list
* @return whether there are non-null elements in the list
*/
public boolean isEmpty() {
return numNonNullElements == 0;
} | 3.68 |
hbase_SnapshotInfo_printInfo | /**
* Dump the {@link SnapshotDescription}
*/
private void printInfo() {
SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
System.out.println("Snapshot Info");
System.out.println("----------------------------------------");
System.out.println(" Name: " + snapshotDesc.getName());
System.out.println(" Type: " + snapshotDesc.getType());
System.out.println(" Table: " + snapshotDesc.getTable());
System.out.println(" Format: " + snapshotDesc.getVersion());
System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime())));
System.out.println(" Ttl: " + snapshotDesc.getTtl());
System.out.println(" Owner: " + snapshotDesc.getOwner());
System.out.println();
} | 3.68 |
hadoop_NativeTaskOutputFiles_removeAll | /** Removes all of the files related to a task. */
public void removeAll() throws IOException {
conf.deleteLocalFiles(TASKTRACKER_OUTPUT);
} | 3.68 |
flink_StreamProjection_projectTuple1 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0> SingleOutputStreamOperator<Tuple1<T0>> projectTuple1() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple1<T0>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
framework_ColorPickerGrid_setPosition | /**
* Sets the position.
*
* @param x
* the x
* @param y
* the y
*/
public void setPosition(int x, int y) {
if (x >= 0 && x < getColumns() && y >= 0 && y < getRows()) {
this.x = x;
this.y = y;
}
} | 3.68 |
hibernate-validator_AbstractConfigurationImpl_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
morf_NamedParameterPreparedStatement_execute | /**
* @see PreparedStatement#execute()
* <P>
* Executes the SQL statement in this <code>PreparedStatement</code> object,
* which may be any kind of SQL statement.
* Some prepared statements return multiple results; the <code>execute</code>
* method handles these complex statements as well as the simpler
* form of statements handled by the methods <code>executeQuery</code>
* and <code>executeUpdate</code>.
* </P><P>
* The <code>execute</code> method returns a <code>boolean</code> to
* indicate the form of the first result. You must call either the method
* <code>getResultSet</code> or <code>getUpdateCount</code>
* to retrieve the result; you must call <code>getMoreResults</code> to
* move to any subsequent result(s).</P>
*
* @return <code>true</code> if the first result is a <code>ResultSet</code>
* object; <code>false</code> if the first result is an update
* count or there is no result
* @exception SQLException if a database access error occurs;
* this method is called on a closed <code>PreparedStatement</code>
* or an argument is supplied to this method
* @throws SQLTimeoutException when the driver has determined that the
* timeout value that was specified by the {@code setQueryTimeout}
* method has been exceeded and has at least attempted to cancel
* the currently running {@code Statement}
*/
public boolean execute() throws SQLException {
return statement.execute();
} | 3.68 |
hbase_ReversedStoreScanner_seekAsDirection | /**
* Do a backwardSeek in a reversed StoreScanner(scan backward)
*/
@Override
protected boolean seekAsDirection(Cell kv) throws IOException {
return backwardSeek(kv);
} | 3.68 |
flink_InputSelection_from | /**
* Returns a {@code Builder} that uses the input mask of the specified {@code selection} as
* the initial mask.
*/
public static Builder from(InputSelection selection) {
Builder builder = new Builder();
builder.inputMask = selection.inputMask;
return builder;
} | 3.68 |
hbase_ColumnPrefixFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnPrefixFilter)) {
return false;
}
ColumnPrefixFilter other = (ColumnPrefixFilter) o;
return Bytes.equals(this.getPrefix(), other.getPrefix());
} | 3.68 |
hadoop_HAState_getServiceState | /**
* @return the generic service state
*/
public HAServiceState getServiceState() {
return state;
} | 3.68 |
hbase_FamilyFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof FamilyFilter)) {
return false;
}
FamilyFilter other = (FamilyFilter) o;
return super.areSerializedFieldsEqual(other);
} | 3.68 |
flink_DefaultLookupCache_expireAfterWrite | /**
* Specifies the duration after an entry is created that it should be automatically removed.
*/
public Builder expireAfterWrite(Duration duration) {
expireAfterWriteDuration = duration;
return this;
} | 3.68 |
hadoop_ManifestSuccessData_toJson | /**
* To JSON.
* @return json string value.
* @throws IOException failure
*/
public String toJson() throws IOException {
return serializer().toJson(this);
} | 3.68 |
hibernate-validator_ValidationInterceptor_validateMethodInvocation | /**
* Validates the Bean Validation constraints specified at the parameters and/or return value of the intercepted method.
*
* @param ctx The context of the intercepted method invocation.
*
* @return The result of the method invocation.
*
* @throws Exception Any exception caused by the intercepted method invocation. A {@link ConstraintViolationException}
* in case at least one constraint violation occurred either during parameter or return value validation.
*/
@AroundInvoke
public Object validateMethodInvocation(InvocationContext ctx) throws Exception {
ExecutableValidator executableValidator = validator.forExecutables();
Set<ConstraintViolation<Object>> violations = executableValidator.validateParameters(
ctx.getTarget(),
ctx.getMethod(),
ctx.getParameters()
);
if ( !violations.isEmpty() ) {
throw new ConstraintViolationException(
getMessage( ctx.getMethod(), ctx.getParameters(), violations ),
violations
);
}
Object result = ctx.proceed();
violations = executableValidator.validateReturnValue(
ctx.getTarget(),
ctx.getMethod(),
result
);
if ( !violations.isEmpty() ) {
throw new ConstraintViolationException(
getMessage( ctx.getMethod(), ctx.getParameters(), violations ),
violations
);
}
return result;
} | 3.68 |
flink_LongSumAggregator_aggregate | /**
* Adds the given value to the current aggregate.
*
* @param value The value to add to the aggregate.
*/
public void aggregate(long value) {
sum += value;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getDestPath | /**
* Gets the Destination path.
*
* @return - Path
*/
public String getDestPath() {
return destPath;
} | 3.68 |
hadoop_XMLUtils_transform | /**
* Transform input xml given a stylesheet.
*
* @param styleSheet the style-sheet
* @param xml input xml data
* @param out output
* @throws TransformerConfigurationException synopsis signals a problem
* creating a transformer object.
* @throws TransformerException this is used for throwing processor
* exceptions before the processing has started.
*/
public static void transform(
InputStream styleSheet, InputStream xml, Writer out
)
throws TransformerConfigurationException, TransformerException {
// Instantiate a TransformerFactory
TransformerFactory tFactory = newSecureTransformerFactory();
// Use the TransformerFactory to process the
// stylesheet and generate a Transformer
Transformer transformer = tFactory.newTransformer(
new StreamSource(styleSheet)
);
// Use the Transformer to transform an XML Source
// and send the output to a Result object.
transformer.transform(new StreamSource(xml), new StreamResult(out));
} | 3.68 |
hadoop_Utils_getMajor | /**
* Get the major version.
*
* @return Major version.
*/
public int getMajor() {
return major;
} | 3.68 |
flink_SpanningWrapper_transferLeftOverTo | /** Copies the leftover data and transfers the "ownership" (i.e. clears this wrapper). */
void transferLeftOverTo(NonSpanningWrapper nonSpanningWrapper) {
nonSpanningWrapper.clear();
if (leftOverData != null) {
nonSpanningWrapper.initializeFromMemorySegment(
leftOverData, leftOverStart, leftOverLimit);
}
clear();
} | 3.68 |
flink_CatalogManager_createCatalog | /**
* Creates a catalog under the given name. The catalog name must be unique.
*
* @param catalogName the given catalog name under which to create the given catalog
* @param catalogDescriptor catalog descriptor for creating catalog
* @throws CatalogException If the catalog already exists in the catalog store or initialized
* catalogs, or if an error occurs while creating the catalog or storing the {@link
* CatalogDescriptor}
*/
public void createCatalog(String catalogName, CatalogDescriptor catalogDescriptor)
throws CatalogException {
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(catalogName),
"Catalog name cannot be null or empty.");
checkNotNull(catalogDescriptor, "Catalog descriptor cannot be null");
if (catalogStoreHolder.catalogStore().contains(catalogName)) {
throw new CatalogException(
format("Catalog %s already exists in catalog store.", catalogName));
}
if (catalogs.containsKey(catalogName)) {
throw new CatalogException(
format("Catalog %s already exists in initialized catalogs.", catalogName));
}
Catalog catalog = initCatalog(catalogName, catalogDescriptor);
catalog.open();
catalogs.put(catalogName, catalog);
catalogStoreHolder.catalogStore().storeCatalog(catalogName, catalogDescriptor);
} | 3.68 |
flink_KubernetesUtils_getTaskManagerSelectors | /**
* Get task manager selectors for the current Flink cluster. They could be used to watch the
* pods status.
*
* @return Task manager labels.
*/
public static Map<String, String> getTaskManagerSelectors(String clusterId) {
final Map<String, String> labels = getCommonLabels(clusterId);
labels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_TASK_MANAGER);
return Collections.unmodifiableMap(labels);
} | 3.68 |
flink_CheckpointStatsHistory_addInProgressCheckpoint | /**
* Adds an in progress checkpoint to the checkpoint history.
*
* @param pending In progress checkpoint to add.
*/
void addInProgressCheckpoint(PendingCheckpointStats pending) {
if (readOnly) {
throw new UnsupportedOperationException(
"Can't create a snapshot of a read-only history.");
}
if (maxSize == 0) {
return;
}
checkNotNull(pending, "Pending checkpoint");
// Grow the array if required. This happens only for the first entries
// and makes the iterator logic easier, because we don't have any
// null elements with the growing array.
if (checkpointsArray.length < maxSize) {
checkpointsArray = Arrays.copyOf(checkpointsArray, checkpointsArray.length + 1);
}
// Wrap around if we are at the end. The next pos is the least recently
// added checkpoint.
if (nextPos == checkpointsArray.length) {
nextPos = 0;
}
checkpointsArray[nextPos++] = pending;
recentCheckpoints.put(pending.checkpointId, pending);
} | 3.68 |
flink_OrcShimV200_computeProjectionMask | /**
* Computes the ORC projection mask of the fields to include from the selected
* fields.rowOrcInputFormat.nextRecord(null).
*
* @return The ORC projection mask.
*/
public static boolean[] computeProjectionMask(TypeDescription schema, int[] selectedFields) {
// mask with all fields of the schema
boolean[] projectionMask = new boolean[schema.getMaximumId() + 1];
// for each selected field
for (int inIdx : selectedFields) {
// set all nested fields of a selected field to true
TypeDescription fieldSchema = schema.getChildren().get(inIdx);
for (int i = fieldSchema.getId(); i <= fieldSchema.getMaximumId(); i++) {
projectionMask[i] = true;
}
}
return projectionMask;
} | 3.68 |
hudi_FlinkCompactionConfig_toFlinkConfig | /**
* Transforms a {@code HoodieFlinkCompaction.config} into {@code Configuration}.
* The latter is more suitable for the table APIs. It reads all the properties
* in the properties file (set by `--props` option) and cmd line options
* (set by `--hoodie-conf` option).
*/
public static org.apache.flink.configuration.Configuration toFlinkConfig(FlinkCompactionConfig config) {
Map<String, String> propsMap = new HashMap<String, String>((Map) getProps(config));
org.apache.flink.configuration.Configuration conf = fromMap(propsMap);
conf.setString(FlinkOptions.PATH, config.path);
conf.setString(FlinkOptions.COMPACTION_TRIGGER_STRATEGY, config.compactionTriggerStrategy);
conf.setInteger(FlinkOptions.ARCHIVE_MAX_COMMITS, config.archiveMaxCommits);
conf.setInteger(FlinkOptions.ARCHIVE_MIN_COMMITS, config.archiveMinCommits);
conf.setString(FlinkOptions.CLEAN_POLICY, config.cleanPolicy);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_COMMITS, config.cleanRetainCommits);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_HOURS, config.cleanRetainHours);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_FILE_VERSIONS, config.cleanRetainFileVersions);
conf.setInteger(FlinkOptions.COMPACTION_DELTA_COMMITS, config.compactionDeltaCommits);
conf.setInteger(FlinkOptions.COMPACTION_DELTA_SECONDS, config.compactionDeltaSeconds);
conf.setInteger(FlinkOptions.COMPACTION_MAX_MEMORY, config.compactionMaxMemory);
conf.setLong(FlinkOptions.COMPACTION_TARGET_IO, config.compactionTargetIo);
conf.setInteger(FlinkOptions.COMPACTION_TASKS, config.compactionTasks);
conf.setBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED, config.cleanAsyncEnable);
// use synchronous compaction always
conf.setBoolean(FlinkOptions.COMPACTION_ASYNC_ENABLED, false);
conf.setBoolean(FlinkOptions.COMPACTION_SCHEDULE_ENABLED, config.schedule);
// Map memory
conf.setString(HoodieMemoryConfig.SPILLABLE_MAP_BASE_PATH.key(), config.spillableMapPath);
return conf;
} | 3.68 |
framework_VRichTextArea_selectAll | /** For internal use only. May be removed or replaced in the future. */
public void selectAll() {
/*
* There is a timing issue if trying to select all immediately on first
* render. Simple deferred command is not enough. Using Timer with
* moderated timeout. If this appears to fail on many (most likely slow)
* environments, consider increasing the timeout.
*
* FF seems to require the most time to stabilize its RTA. On Vaadin
* tiergarden test machines, 200ms was not enough always (about 50%
* success rate) - 300 ms was 100% successful. This however was not
* enough on a sluggish old non-virtualized XP test machine. A bullet
* proof solution would be nice, GWT 2.1 might however solve these. At
* least setFocus has a workaround for this kind of issue.
*/
new Timer() {
@Override
public void run() {
rta.getFormatter().selectAll();
}
}.schedule(320);
} | 3.68 |
AreaShop_GeneralRegion_destroy | /**
* Deregister everything.
*/
public void destroy() {
for(RegionFeature feature : features.values()) {
feature.shutdown();
}
} | 3.68 |
framework_SQLContainer_setReferencedItem | /**
* Sets the referenced item. The referencing column of the item in this
* container is updated accordingly.
*
* @param itemId
* Item Id of the reference source (from this container)
* @param refdItemId
* Item Id of the reference target (from referenced container)
* @param refdCont
* Target SQLContainer of the reference
* @return true if the referenced item was successfully set, false on
* failure
*/
public boolean setReferencedItem(Object itemId, Object refdItemId,
SQLContainer refdCont) {
if (refdCont == null) {
throw new IllegalArgumentException(
"Referenced SQLContainer can not be null.");
}
Reference r = references.get(refdCont);
if (r == null) {
throw new IllegalArgumentException(
"Reference to the given SQLContainer not defined.");
}
try {
getContainerProperty(itemId, r.getReferencingColumn())
.setValue(refdCont.getContainerProperty(refdItemId,
r.getReferencedColumn()));
return true;
} catch (Exception e) {
getLogger().log(Level.WARNING, "Setting referenced item failed.",
e);
return false;
}
} | 3.68 |
hbase_HFileOutputFormat2_writePartitions | /**
* Write out a {@link SequenceFile} that can be read by {@link TotalOrderPartitioner} that
* contains the split points in startKeys.
*/
@SuppressWarnings("deprecation")
private static void writePartitions(Configuration conf, Path partitionsPath,
List<ImmutableBytesWritable> startKeys, boolean writeMultipleTables) throws IOException {
LOG.info("Writing partition information to " + partitionsPath);
if (startKeys.isEmpty()) {
throw new IllegalArgumentException("No regions passed");
}
// We're generating a list of split points, and we don't ever
// have keys < the first region (which has an empty start key)
// so we need to remove it. Otherwise we would end up with an
// empty reducer with index 0
TreeSet<ImmutableBytesWritable> sorted = new TreeSet<>(startKeys);
ImmutableBytesWritable first = sorted.first();
if (writeMultipleTables) {
first =
new ImmutableBytesWritable(MultiTableHFileOutputFormat.getSuffix(sorted.first().get()));
}
if (!first.equals(HConstants.EMPTY_BYTE_ARRAY)) {
throw new IllegalArgumentException(
"First region of table should have empty start key. Instead has: "
+ Bytes.toStringBinary(first.get()));
}
sorted.remove(sorted.first());
// Write the actual file
FileSystem fs = partitionsPath.getFileSystem(conf);
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath,
ImmutableBytesWritable.class, NullWritable.class);
try {
for (ImmutableBytesWritable startKey : sorted) {
writer.append(startKey, NullWritable.get());
}
} finally {
writer.close();
}
} | 3.68 |
morf_AddColumn_reverse | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
* @return a new {@link Schema} which results from the removal of the relevant table from <var>schema</var>
*/
@Override
public Schema reverse(Schema metadata) {
Table original = metadata.getTable(tableName);
List<String> columns = new ArrayList<>();
boolean found = false;
for (Column column : original.columns()) {
if (column.getName().equalsIgnoreCase(newColumnDefinition.getName())) {
found = true;
} else {
columns.add(column.getName());
}
}
// Remove the column we are filtering
if (!found) {
String columnsInTargetTable = original.columns().toString();
throw new IllegalStateException(
"Column [" + newColumnDefinition + "] not found in table [" + tableName + "] so it could not be removed.\n" +
"Columns in target table [" + tableName + "]:\n" +
columnsInTargetTable.replace(",", ",\n")
);
}
return new TableOverrideSchema(metadata, new AlteredTable(original, columns));
} | 3.68 |
hbase_Procedure_getRootProcedureId | /**
* Helper to lookup the root Procedure ID given a specified procedure.
*/
protected static <T> Long getRootProcedureId(Map<Long, Procedure<T>> procedures,
Procedure<T> proc) {
while (proc.hasParent()) {
proc = procedures.get(proc.getParentProcId());
if (proc == null) {
return null;
}
}
return proc.getProcId();
} | 3.68 |
morf_SqlUtils_asType | /**
* Returns a SQL DSL expression to return the field CASTed to
* the specified type, length and scale.
*
* @param type The target type.
* @param length The target length.
* @param scale The target scale.
* @return The SQL DSL expression applying the cast function.
*/
public Cast asType(DataType type, int length, int scale) {
return new Cast(field, type, length, scale);
} | 3.68 |
flink_EncodingUtils_loadClass | /**
* @deprecated Use {@link #loadClass(String, ClassLoader)} instead, in order to explicitly
* provide the correct classloader.
*/
@Deprecated
public static Class<?> loadClass(String qualifiedName) {
return loadClass(qualifiedName, Thread.currentThread().getContextClassLoader());
} | 3.68 |
dubbo_ArrayUtils_isEmpty | /**
* <p>Checks if the array is null or empty. <p/>
*
* @param array th array to check
* @return {@code true} if the array is null or empty.
*/
public static boolean isEmpty(final Object[] array) {
return array == null || array.length == 0;
} | 3.68 |
hbase_PrivateCellUtil_writeFlatKey | /**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
* timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowLen = cell.getRowLength();
byte fLen = cell.getFamilyLength();
int qLen = cell.getQualifierLength();
// Using just one if/else loop instead of every time checking before writing every
// component of cell
if (cell instanceof ByteBufferExtendedCell) {
out.writeShort(rowLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), rowLen);
out.writeByte(fLen);
ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
((ByteBufferExtendedCell) cell).getFamilyPosition(), fLen);
ByteBufferUtils.copyBufferToStream(out,
((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
((ByteBufferExtendedCell) cell).getQualifierPosition(), qLen);
} else {
out.writeShort(rowLen);
out.write(cell.getRowArray(), cell.getRowOffset(), rowLen);
out.writeByte(fLen);
out.write(cell.getFamilyArray(), cell.getFamilyOffset(), fLen);
out.write(cell.getQualifierArray(), cell.getQualifierOffset(), qLen);
}
out.writeLong(cell.getTimestamp());
out.writeByte(cell.getTypeByte());
} | 3.68 |
flink_MemorySegment_isFreed | /**
* Checks whether the memory segment was freed.
*
* @return <tt>true</tt>, if the memory segment has been freed, <tt>false</tt> otherwise.
*/
@VisibleForTesting
public boolean isFreed() {
return address > addressLimit;
} | 3.68 |
zxing_ContactEncoder_trim | /**
* @return null if s is null or empty, or result of s.trim() otherwise
*/
static String trim(String s) {
if (s == null) {
return null;
}
String result = s.trim();
return result.isEmpty() ? null : result;
} | 3.68 |
morf_SqlServerDialect_getSqlForDaysBetween | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDaysBetween(org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField)
*/
@Override
protected String getSqlForDaysBetween(AliasedField toDate, AliasedField fromDate) {
return String.format("DATEDIFF(DAY, %s, %s)", getSqlFrom(fromDate), getSqlFrom(toDate));
} | 3.68 |
flink_Costs_getNetworkCost | /**
* Gets the network cost.
*
* @return The network cost, in bytes to be transferred.
*/
public double getNetworkCost() {
return networkCost;
} | 3.68 |
flink_MultiStateKeyIterator_remove | /** Removes the current key from <b>ALL</b> known states in the state backend. */
@Override
public void remove() {
if (currentKey == null) {
return;
}
for (StateDescriptor<?, ?> descriptor : descriptors) {
try {
State state =
backend.getPartitionedState(
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE,
descriptor);
state.clear();
} catch (Exception e) {
throw new RuntimeException(
"Failed to drop partitioned state from state backend", e);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectFirstOrderByNullsLastDescendingScript | /**
* Tests a select with an "order by" clause with nulls first and descending direction.
*/
@Test
public void testSelectFirstOrderByNullsLastDescendingScript() {
FieldReference fieldReference = new FieldReference(STRING_FIELD);
SelectFirstStatement stmt = selectFirst(fieldReference)
.from(new TableReference(ALTERNATE_TABLE))
.orderBy(fieldReference.desc().nullsLast());
assertEquals("Select with descending order by", expectedSelectFirstOrderByNullsLastDesc(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
framework_BackEndDataProvider_setSortOrder | /**
* Sets a single sort order to use as the default sorting for this data
* provider. This overrides the sorting set by any other method that
* manipulates the default sorting of this data provider.
* <p>
* The default sorting is used if the query defines no sorting. The default
* sorting is also used to determine the ordering of items that are
* considered equal by the sorting defined in the query.
*
* @see #setSortOrders(List)
*
* @param sortOrder
* a sort order to set, or <code>null</code> to clear any
* previously set sort orders
*/
default void setSortOrder(QuerySortOrder sortOrder) {
if (sortOrder == null) {
setSortOrders(Collections.emptyList());
} else {
setSortOrders(Collections.singletonList(sortOrder));
}
} | 3.68 |
framework_VTooltip_setQuickOpenTimeout | /**
* Sets the time (in ms) that determines when {@link #getQuickOpenDelay()}
* should be used instead of {@link #getOpenDelay()}. The quick open delay
* is used when the tooltip has very recently been shown, is currently
* hidden but about to be shown again.
*
* @param quickOpenTimeout
* The quick open timeout (in ms)
*/
public void setQuickOpenTimeout(int quickOpenTimeout) {
this.quickOpenTimeout = quickOpenTimeout;
} | 3.68 |
flink_StatePathExtractor_getStateFilePathFromStreamStateHandle | /**
* This method recursively looks for the contained {@link FileStateHandle}s in a given {@link
* StreamStateHandle}.
*
* @param handle the {@code StreamStateHandle} to check for a contained {@code FileStateHandle}
* @return the file path if the given {@code StreamStateHandle} contains a {@code
* FileStateHandle} object, null otherwise
*/
private @Nullable Path getStateFilePathFromStreamStateHandle(StreamStateHandle handle) {
if (handle instanceof FileStateHandle) {
return ((FileStateHandle) handle).getFilePath();
} else if (handle instanceof OperatorStateHandle) {
return getStateFilePathFromStreamStateHandle(
((OperatorStateHandle) handle).getDelegateStateHandle());
} else if (handle instanceof KeyedStateHandle) {
if (handle instanceof KeyGroupsStateHandle) {
return getStateFilePathFromStreamStateHandle(
((KeyGroupsStateHandle) handle).getDelegateStateHandle());
}
// other KeyedStateHandles either do not contains FileStateHandle, or are not part of a
// savepoint
}
return null;
} | 3.68 |
flink_JobVertex_getMaxParallelism | /**
* Gets the maximum parallelism for the task.
*
* @return The maximum parallelism for the task.
*/
public int getMaxParallelism() {
return maxParallelism;
} | 3.68 |
flink_MapValue_putAll | /*
* (non-Javadoc)
* @see java.util.Map#putAll(java.util.Map)
*/
@Override
public void putAll(final Map<? extends K, ? extends V> m) {
this.map.putAll(m);
} | 3.68 |
flink_CatalogManager_schemaExists | /**
* Checks if there is a database with given name in a given catalog or is there a temporary
* object registered within a given catalog and database.
*
* <p><b>NOTE:</b>It is primarily used for interacting with Calcite's schema.
*
* @param catalogName filter for the catalog part of the schema
* @param databaseName filter for the database part of the schema
* @return true if a subschema exists
*/
public boolean schemaExists(String catalogName, String databaseName) {
return temporaryDatabaseExists(catalogName, databaseName)
|| permanentDatabaseExists(catalogName, databaseName);
} | 3.68 |
hbase_StoreFileInfo_isLink | /** Returns True if the store file is a link */
public boolean isLink() {
return this.link != null && this.reference == null;
} | 3.68 |
hbase_HRegionServer_createNewReplicationInstance | //
// Main program and support routines
//
/**
* Load the replication executorService objects, if any
*/
private static void createNewReplicationInstance(Configuration conf, HRegionServer server,
FileSystem walFs, Path walDir, Path oldWALDir, WALFactory walFactory) throws IOException {
// read in the name of the source replication class from the config file.
String sourceClassname = conf.get(HConstants.REPLICATION_SOURCE_SERVICE_CLASSNAME,
HConstants.REPLICATION_SERVICE_CLASSNAME_DEFAULT);
// read in the name of the sink replication class from the config file.
String sinkClassname = conf.get(HConstants.REPLICATION_SINK_SERVICE_CLASSNAME,
HConstants.REPLICATION_SINK_SERVICE_CLASSNAME_DEFAULT);
// If both the sink and the source class names are the same, then instantiate
// only one object.
if (sourceClassname.equals(sinkClassname)) {
server.replicationSourceHandler = newReplicationInstance(sourceClassname,
ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory);
server.replicationSinkHandler = (ReplicationSinkService) server.replicationSourceHandler;
server.sameReplicationSourceAndSink = true;
} else {
server.replicationSourceHandler = newReplicationInstance(sourceClassname,
ReplicationSourceService.class, conf, server, walFs, walDir, oldWALDir, walFactory);
server.replicationSinkHandler = newReplicationInstance(sinkClassname,
ReplicationSinkService.class, conf, server, walFs, walDir, oldWALDir, walFactory);
server.sameReplicationSourceAndSink = false;
}
} | 3.68 |
pulsar_GeoPersistentReplicator_getProducerName | /**
* @return Producer name format : replicatorPrefix.localCluster-->remoteCluster
*/
@Override
protected String getProducerName() {
return getReplicatorName(replicatorPrefix, localCluster) + REPL_PRODUCER_NAME_DELIMITER + remoteCluster;
} | 3.68 |
Activiti_BpmnActivityBehavior_performIgnoreConditionsOutgoingBehavior | /**
* Performs the default outgoing BPMN 2.0 behavior (@see {@link #performDefaultOutgoingBehavior(ExecutionEntity)}), but without checking the conditions on the outgoing sequence flow.
* <p>
* This means that every outgoing sequence flow is selected for continuing the process instance, regardless of having a condition or not. In case of multiple outgoing sequence flow, multiple
* parallel paths of executions will be created.
*/
public void performIgnoreConditionsOutgoingBehavior(ExecutionEntity activityExecution) {
performOutgoingBehavior(activityExecution,
false,
false);
} | 3.68 |
flink_VarCharType_ofEmptyLiteral | /**
* The SQL standard defines that character string literals are allowed to be zero-length strings
* (i.e., to contain no characters) even though it is not permitted to declare a type that is
* zero. This has also implications on variable-length character strings during type inference
* because any fixed-length character string should be convertible to a variable-length one.
*
* <p>This method enables this special kind of character string.
*
* <p>Zero-length character strings have no serializable string representation.
*/
public static VarCharType ofEmptyLiteral() {
return new VarCharType(EMPTY_LITERAL_LENGTH, false);
} | 3.68 |
flink_ShuffleDescriptor_isUnknown | /**
* Returns whether the partition is known and registered with the {@link ShuffleMaster}
* implementation.
*
* <p>When a partition consumer is being scheduled, it can happen that the producer of the
* partition (consumer input channel) has not been scheduled and its location and other relevant
* data is yet to be defined. To proceed with the consumer deployment, currently unknown input
* channels have to be marked with placeholders. The placeholder is a special implementation of
* the shuffle descriptor: {@link UnknownShuffleDescriptor}.
*
* <p>Note: this method is not supposed to be overridden in concrete shuffle implementation. The
* only class where it returns {@code true} is {@link UnknownShuffleDescriptor}.
*
* @return whether the partition producer has been ever deployed and the corresponding shuffle
* descriptor is obtained from the {@link ShuffleMaster} implementation.
*/
default boolean isUnknown() {
return false;
} | 3.68 |
querydsl_BooleanBuilder_or | /**
* Create the union of this and the given predicate
*
* @param right right hand side of {@code or} operation
* @return the current object
*/
public BooleanBuilder or(@Nullable Predicate right) {
if (right != null) {
if (predicate == null) {
predicate = right;
} else {
predicate = ExpressionUtils.or(predicate, right);
}
}
return this;
} | 3.68 |
framework_ComboBox_setPageLength | /**
* Sets the page length for the suggestion popup. Setting the page length to
* 0 will disable suggestion popup paging (all items visible).
*
* @param pageLength
* the pageLength to set
*/
public void setPageLength(int pageLength) {
this.pageLength = pageLength;
markAsDirty();
} | 3.68 |
hadoop_NMContainerTokenSecretManager_isValidStartContainerRequest | /**
* Container will be remembered based on expiration time of the container
* token used for starting the container. It is safe to use expiration time
* as there is one to many mapping between expiration time and containerId.
* @return true if the current token identifier is not present in cache.
*/
public synchronized boolean isValidStartContainerRequest(
ContainerTokenIdentifier containerTokenIdentifier) {
removeAnyContainerTokenIfExpired();
Long expTime = containerTokenIdentifier.getExpiryTimeStamp();
List<ContainerId> containers =
this.recentlyStartedContainerTracker.get(expTime);
if (containers == null
|| !containers.contains(containerTokenIdentifier.getContainerID())) {
return true;
} else {
return false;
}
} | 3.68 |
hbase_MultithreadedTableMapper_getMapperClass | /**
* Get the application's mapper class.
* @param <K2> the map's output key type
* @param <V2> the map's output value type
* @param job the job
* @return the mapper class to run
*/
@SuppressWarnings("unchecked")
public static <K2, V2> Class<Mapper<ImmutableBytesWritable, Result, K2, V2>>
getMapperClass(JobContext job) {
return (Class<Mapper<ImmutableBytesWritable, Result, K2, V2>>) job.getConfiguration()
.getClass(MAPPER_CLASS, Mapper.class);
} | 3.68 |
hudi_SourceCommitCallback_onCommit | /**
* Performs some action on successful Hudi commit like committing offsets to Kafka.
*
* @param lastCkptStr last checkpoint string.
*/
default void onCommit(String lastCkptStr) {
} | 3.68 |
framework_AbstractOrderedLayout_getComponent | /**
* Returns the component at the given position.
*
* @param index
* The position of the component.
* @return The component at the given index.
* @throws IndexOutOfBoundsException
* If the index is out of range.
*/
public Component getComponent(int index) throws IndexOutOfBoundsException {
return components.get(index);
} | 3.68 |
framework_CalendarMonthDropHandler_drop | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#drop(com.vaadin
* .terminal.gwt.client.ui.dd.VDragEvent)
*/
@Override
public boolean drop(VDragEvent drag) {
if (isLocationValid(drag.getElementOver())) {
updateDropDetails(drag);
deEmphasis();
return super.drop(drag);
} else {
deEmphasis();
return false;
}
} | 3.68 |
hbase_Procedure_getTimeout | /** Returns the timeout in msec */
public int getTimeout() {
return timeout;
} | 3.68 |
framework_ListDataSource_sort | /**
* Sort entire container according to a {@link Comparator}.
*
* @param comparator
* a comparator object, which compares two data source entries
* (beans/pojos)
*/
public void sort(Comparator<T> comparator) {
Collections.sort(ds, comparator);
getHandlers().forEach(dch -> dch.dataUpdated(0, ds.size()));
} | 3.68 |
cron-utils_CronDefinitionBuilder_cron4j | /**
* Creates CronDefinition instance matching cron4j specification.
*
* @return CronDefinition instance, never null;
*/
private static CronDefinition cron4j() {
return CronDefinitionBuilder.defineCron()
.withMinutes().withValidRange(0, 59).withStrictRange().and()
.withHours().withValidRange(0, 23).withStrictRange().and()
.withDayOfMonth().withValidRange(0, 31).supportsL().withStrictRange().and()
.withMonth().withValidRange(1, 12).withStrictRange().and()
.withDayOfWeek().withValidRange(0, 6).withMondayDoWValue(1).withStrictRange().and()
.matchDayOfWeekAndDayOfMonth()
.instance();
} | 3.68 |
hibernate-validator_DefaultGetterPropertySelectionStrategy_isGetter | /**
* Checks whether the given executable is a valid JavaBean getter method, which
* is the case if
* <ul>
* <li>its name starts with "get" and it has a return type but no parameter or</li>
* <li>its name starts with "is", it has no parameter and is returning
* {@code boolean} or</li>
* <li>its name starts with "has", it has no parameter and is returning
* {@code boolean} (HV-specific, not mandated by the JavaBeans spec).</li>
* </ul>
*
* @param executable The executable of interest.
*
* @return {@code true}, if the given executable is a JavaBean getter method,
* {@code false} otherwise.
*/
private static boolean isGetter(ConstrainableExecutable executable) {
if ( executable.getParameterTypes().length != 0 ) {
return false;
}
String methodName = executable.getName();
//<PropertyType> get<PropertyName>()
if ( methodName.startsWith( GETTER_PREFIX_GET ) && executable.getReturnType() != void.class ) {
return true;
}
//boolean is<PropertyName>()
else if ( methodName.startsWith( GETTER_PREFIX_IS ) && executable.getReturnType() == boolean.class ) {
return true;
}
//boolean has<PropertyName>()
else if ( methodName.startsWith( GETTER_PREFIX_HAS ) && executable.getReturnType() == boolean.class ) {
return true;
}
return false;
} | 3.68 |
hadoop_ConfigurationWithLogging_get | /**
* See {@link Configuration#get(String, String)}.
*/
@Override
public String get(String name, String defaultValue) {
String value = super.get(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name,
redactor.redact(name, value), redactor.redact(name, defaultValue));
return value;
} | 3.68 |
hadoop_StageConfig_getProgressable | /**
* Get optional progress callback.
* @return callback or null
*/
public Progressable getProgressable() {
return progressable;
} | 3.68 |
hmily_GsonUtils_toObjectMap | /**
* To object map map.
*
* @param json the json
* @return the map
*/
public Map<String, Object> toObjectMap(final String json) {
return GSON_MAP.fromJson(json, new TypeToken<LinkedHashMap<String, Object>>() {
}.getType());
} | 3.68 |
flink_RocksDBResourceContainer_getReadOptions | /** Gets the RocksDB {@link ReadOptions} to be used for read operations. */
public ReadOptions getReadOptions() {
ReadOptions opt = new ReadOptions();
handlesToClose.add(opt);
// add user-defined options factory, if specified
if (optionsFactory != null) {
opt = optionsFactory.createReadOptions(opt, handlesToClose);
}
return opt;
} | 3.68 |
flink_TypeSerializerSnapshotSerializationUtil_readSerializerSnapshot | /**
* Reads from a data input view a {@link TypeSerializerSnapshot} that was previously written
* using {@link #writeSerializerSnapshot(DataOutputView, TypeSerializerSnapshot}.
*
* @param in the data input view
* @param userCodeClassLoader the user code class loader to use
* @return the read serializer configuration snapshot
*/
public static <T> TypeSerializerSnapshot<T> readSerializerSnapshot(
DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
final TypeSerializerSnapshotSerializationProxy<T> proxy =
new TypeSerializerSnapshotSerializationProxy<>(userCodeClassLoader);
proxy.read(in);
return proxy.getSerializerSnapshot();
} | 3.68 |
framework_BrowserWindowOpener_getParameter | /**
* Gets the value of a parameter set using
* {@link #setParameter(String, String)}. If there is no parameter with the
* given name, <code>null</code> is returned.
*
* @param name
* the name of the parameter to get, not <code>null</code>
* @return the value of the parameter, or <code>null</code> there is no
* parameter
*
* @see #setParameter(String, String)
* @see #getParameter(String)
*/
public String getParameter(String name) {
if (name == null) {
throw new IllegalArgumentException("Null not allowed");
}
return getState(false).parameters.get(name);
} | 3.68 |
hbase_SnapshotFileCache_getFiles | /** Returns the hfiles in the snapshot when <tt>this</tt> was made. */
public Collection<String> getFiles() {
return this.files;
} | 3.68 |
flink_ConfigOptions_defaultValues | /**
* Creates a ConfigOption with the given default value.
*
* @param values The list of default values for the config option
* @return The config option with the default value.
*/
@SafeVarargs
public final ConfigOption<List<E>> defaultValues(E... values) {
return new ConfigOption<>(
key, clazz, ConfigOption.EMPTY_DESCRIPTION, Arrays.asList(values), true);
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_getCurrentKeyId | /**
* Obtains the value of the last delegation key id.
* @return Last delegation key id.
*/
@Override
public int getCurrentKeyId() {
try {
return selectKeyId();
} catch (SQLException e) {
throw new RuntimeException(
"Failed to get delegation key id in SQL secret manager", e);
}
} | 3.68 |
hudi_HoodieRecordGlobalLocation_fromLocal | /**
* Returns the global record location from local.
*/
public static HoodieRecordGlobalLocation fromLocal(String partitionPath, HoodieRecordLocation localLoc) {
return new HoodieRecordGlobalLocation(partitionPath, localLoc.getInstantTime(), localLoc.getFileId());
} | 3.68 |
hadoop_WasbFsck_setMockFileSystemForTesting | /**
* For testing purposes, set the file system to use here instead of relying on
* getting it from the FileSystem class based on the URI.
*
* @param fileSystem
* The file system to use.
*/
@VisibleForTesting
public void setMockFileSystemForTesting(FileSystem fileSystem) {
this.mockFileSystemForTesting = fileSystem;
} | 3.68 |
flink_HiveParserDefaultGraphWalker_dispatch | // Dispatch the current operator.
public void dispatch(Node nd, Stack<Node> ndStack) throws SemanticException {
dispatchAndReturn(nd, ndStack);
} | 3.68 |
hudi_HoodieCopyOnWriteTableInputFormat_listStatusForNonHoodiePaths | /**
* return non hoodie paths
* @param job
* @return
* @throws IOException
*/
public FileStatus[] listStatusForNonHoodiePaths(JobConf job) throws IOException {
return doListStatus(job);
} | 3.68 |
hmily_HmilyAutoConfiguration_hmilyTransactionAspect | /**
* Hmily transaction aspect spring boot hmily transaction aspect.
*
* @return the spring boot hmily transaction aspect
*/
@Bean
public SpringHmilyTransactionAspect hmilyTransactionAspect() {
return new SpringHmilyTransactionAspect();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.