name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
shardingsphere-elasticjob_JobConfiguration_addExtraConfigurations | /**
* Add extra configurations.
*
* @param extraConfig job extra configuration
* @return job configuration builder
*/
public Builder addExtraConfigurations(final JobExtraConfiguration extraConfig) {
extraConfigurations.add(extraConfig);
return this;
} | 3.68 |
framework_RpcDataProviderExtension_updateRowData | /**
* Informs the client side that data of a row has been modified in the data
* source.
*
* @param itemId
* the item Id the row that was updated
*/
public void updateRowData(Object itemId) {
if (updatedItemIds == null) {
updatedItemIds = new LinkedHashSet<Object>();
}
if (updatedItemIds.isEmpty()) {
// At least one new item will be updated. Mark as dirty to actually
// update before response to client.
markAsDirty();
}
updatedItemIds.add(itemId);
} | 3.68 |
hbase_CachedClusterId_setClusterId | /**
* Succeeds only once, when setting to a non-null value. Overwrites are not allowed.
*/
private void setClusterId(ClusterId id) {
if (id == null || isClusterIdSet.get()) {
return;
}
clusterId = id;
isClusterIdSet.set(true);
} | 3.68 |
flink_HiveStatsUtil_getPartitionColumnStats | /**
* Get statistics for a specific partition column.
*
* @param logicalType the specific partition column's logical type
* @param partitionValue the partition value for the specific partition column
* @param partitionColIndex the index of the specific partition column
* @param defaultPartitionName the default partition name for null value
*/
private static CatalogColumnStatisticsDataBase getPartitionColumnStats(
HiveMetastoreClientWrapper client,
Table hiveTable,
LogicalType logicalType,
Object partitionValue,
int partitionColIndex,
String defaultPartitionName) {
switch (logicalType.getTypeRoot()) {
case CHAR:
case VARCHAR:
{
Long maxLength = null;
Double avgLength = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount =
getPartitionColumnNullCount(
client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
long valLength = ((String) partitionValue).length();
maxLength = valLength;
avgLength = (double) valLength;
}
return new CatalogColumnStatisticsDataString(
maxLength, avgLength, 1L, nullCount);
}
case BOOLEAN:
{
long trueCount = 0L;
long falseCount = 0L;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount =
getPartitionColumnNullCount(
client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
Boolean boolVal = (Boolean) partitionValue;
if (boolVal) {
trueCount = 1L;
} else {
falseCount = 1L;
}
}
return new CatalogColumnStatisticsDataBoolean(trueCount, falseCount, nullCount);
}
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
{
Long min = null;
Long max = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount =
getPartitionColumnNullCount(
client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
min = ((Number) partitionValue).longValue();
max = min;
}
return new CatalogColumnStatisticsDataLong(min, max, 1L, nullCount);
}
case FLOAT:
case DOUBLE:
case DECIMAL:
{
Double min = null;
Double max = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount =
getPartitionColumnNullCount(
client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
min = ((Number) partitionValue).doubleValue();
max = min;
}
return new CatalogColumnStatisticsDataDouble(min, max, 1L, nullCount);
}
case DATE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
{
Date min = null;
Date max = null;
Long nullCount = 0L;
if (partitionValue == null) {
nullCount =
getPartitionColumnNullCount(
client, hiveTable, partitionColIndex, defaultPartitionName);
} else {
if (partitionValue instanceof LocalDate) {
min = new Date(((LocalDate) partitionValue).toEpochDay());
} else if (partitionValue instanceof LocalDateTime) {
min =
new Date(
((LocalDateTime) partitionValue)
.toLocalDate()
.toEpochDay());
}
max = min;
}
return new CatalogColumnStatisticsDataDate(min, max, 1L, nullCount);
}
default:
return null;
}
} | 3.68 |
morf_SchemaUtils_defaultValue | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder#defaultValue(java.lang.String)
*/
@Override
public ColumnBuilder defaultValue(String value) {
return new ColumnBuilderImpl(this, isNullable(), value, isPrimaryKey(), isAutoNumbered(), getAutoNumberStart());
} | 3.68 |
MagicPlugin_BaseSpell_isBypassRegionPermission | /**
* @return Whether or not this spell can bypass region permissions such as custom world-guard flags.
*/
public boolean isBypassRegionPermission() {
return bypassRegionPermission;
} | 3.68 |
hbase_MetricsSource_getEditsFiltered | /**
* Gets the number of edits not eligible for replication this source queue logs so far.
* @return logEditsFiltered non-replicable edits filtered from this queue logs.
*/
public long getEditsFiltered() {
return this.singleSourceSource.getEditsFiltered();
} | 3.68 |
hadoop_BCFile_getCurrentPos | /**
* Get the current position in file.
*
* @return The current byte offset in underlying file.
* @throws IOException
*/
long getCurrentPos() throws IOException {
return fsOut.getPos() + fsBufferedOutput.size();
} | 3.68 |
hadoop_TypedBytesInput_readDouble | /**
* Reads the double following a <code>Type.DOUBLE</code> code.
* @return the obtained double
* @throws IOException
*/
public double readDouble() throws IOException {
return in.readDouble();
} | 3.68 |
Activiti_ObjectValueExpression_getType | /**
* Answer <code>null</code>.
*/
@Override
public Class<?> getType(ELContext context) {
return null;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_pruneTableSnapshots | /**
* Removes the snapshot entries that are present in Quota table but not in snapshotsToComputeSize
* @param snapshotsToComputeSize list of snapshots to be persisted
*/
void pruneTableSnapshots(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {
Multimap<TableName, String> existingSnapshotEntries = QuotaTableUtil.getTableSnapshots(conn);
Multimap<TableName, String> snapshotEntriesToRemove = HashMultimap.create();
for (Entry<TableName, Collection<String>> entry : existingSnapshotEntries.asMap().entrySet()) {
TableName tn = entry.getKey();
Set<String> setOfSnapshots = new HashSet<>(entry.getValue());
for (String snapshot : snapshotsToComputeSize.get(tn)) {
setOfSnapshots.remove(snapshot);
}
for (String snapshot : setOfSnapshots) {
snapshotEntriesToRemove.put(tn, snapshot);
}
}
removeExistingTableSnapshotSizes(snapshotEntriesToRemove);
} | 3.68 |
hbase_HRegionServer_reportForDuty | /*
* Let the master know we're here Run initialization using parameters passed us by the master.
* @return A Map of key/value configurations we got from the Master else null if we failed to
* register.
*/
private RegionServerStartupResponse reportForDuty() throws IOException {
if (this.masterless) {
return RegionServerStartupResponse.getDefaultInstance();
}
ServerName masterServerName = createRegionServerStatusStub(true);
RegionServerStatusService.BlockingInterface rss = rssStub;
if (masterServerName == null || rss == null) {
return null;
}
RegionServerStartupResponse result = null;
try {
rpcServices.requestCount.reset();
rpcServices.rpcGetRequestCount.reset();
rpcServices.rpcScanRequestCount.reset();
rpcServices.rpcFullScanRequestCount.reset();
rpcServices.rpcMultiRequestCount.reset();
rpcServices.rpcMutateRequestCount.reset();
LOG.info("reportForDuty to master=" + masterServerName + " with port="
+ rpcServices.getSocketAddress().getPort() + ", startcode=" + this.startcode);
long now = EnvironmentEdgeManager.currentTime();
int port = rpcServices.getSocketAddress().getPort();
RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder();
if (!StringUtils.isBlank(useThisHostnameInstead)) {
request.setUseThisHostnameInstead(useThisHostnameInstead);
}
request.setPort(port);
request.setServerStartCode(this.startcode);
request.setServerCurrentTime(now);
result = rss.regionServerStartup(null, request.build());
} catch (ServiceException se) {
IOException ioe = ProtobufUtil.getRemoteException(se);
if (ioe instanceof ClockOutOfSyncException) {
LOG.error(HBaseMarkers.FATAL, "Master rejected startup because clock is out of sync", ioe);
// Re-throw IOE will cause RS to abort
throw ioe;
} else if (ioe instanceof ServerNotRunningYetException) {
LOG.debug("Master is not running yet");
} else {
LOG.warn("error telling master we are up", se);
}
rssStub = null;
}
return result;
} | 3.68 |
hmily_HmilyLoadBalanceUtils_doSelect | /**
* Do select referer.
*
* @param <T> the type parameter
* @param defaultReferer the default referer
* @param refererList the referer list
* @return the referer
*/
public static <T> Referer<T> doSelect(final Referer<T> defaultReferer, final List<Referer<T>> refererList) {
final HmilyTransactionContext hmilyTransactionContext = HmilyContextHolder.get();
if (Objects.isNull(hmilyTransactionContext)) {
return defaultReferer;
}
//if try
String key = defaultReferer.getInterface().getName();
if (hmilyTransactionContext.getAction() == HmilyActionEnum.TRYING.getCode()) {
URL_MAP.put(key, defaultReferer.getUrl());
return defaultReferer;
}
final URL orlUrl = URL_MAP.get(key);
URL_MAP.remove(key);
if (Objects.nonNull(orlUrl)) {
for (Referer<T> inv : refererList) {
if (Objects.equals(inv.getUrl(), orlUrl)) {
return inv;
}
}
}
return defaultReferer;
} | 3.68 |
flink_Schema_columnByExpression | /**
* Declares a computed column that is appended to this schema.
*
* <p>See {@link #columnByExpression(String, Expression)} for a detailed explanation.
*
* <p>This method uses a SQL expression that can be easily persisted in a durable catalog.
*
* <p>Example: {@code .columnByExpression("ts", "CAST(json_obj.ts AS TIMESTAMP(3))")}
*
* @param columnName column name
* @param sqlExpression computation of the column using SQL
*/
public Builder columnByExpression(String columnName, String sqlExpression) {
return columnByExpression(columnName, new SqlCallExpression(sqlExpression));
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations2 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
* <p>
* Since it is a chain of operations, and all of the operations takes a field
* or a literal as a second operand, there should be no brackets in the
* generated SQL.
* </p>
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations2() {
String result = testDialect.getSqlFrom(field("a").divideBy(field("b")).plus(literal(100)));
assertEquals(expectedSqlForMathOperations2(), result);
} | 3.68 |
hbase_MultiByteBuff_getShort | /**
* Returns the short value at the current position. Also advances the position by the size of
* short
* @return the short value at the current position
*/
@Override
public short getShort() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_SHORT) {
return this.curItem.getShort();
}
short n = 0;
n = (short) (n ^ (get() & 0xFF));
n = (short) (n << 8);
n = (short) (n ^ (get() & 0xFF));
return n;
} | 3.68 |
hbase_Mutation_extraHeapSize | /**
* Subclasses should override this method to add the heap size of their own fields.
* @return the heap size to add (will be aligned).
*/
protected long extraHeapSize() {
return 0L;
} | 3.68 |
morf_MergeStatement_getSelectStatement | /**
* Gets the select statement that selects the values to merge
* into the table.
*
* @return the select statement.
*/
public SelectStatement getSelectStatement() {
return selectStatement;
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_monitorExecution | /**
* Set enable or disable monitor execution.
*
* <p>
* For short interval job, it is better to disable monitor execution to improve performance.
* It can't guarantee repeated data fetch and can't failover if disable monitor execution, please keep idempotence in job.
* For long interval job, it is better to enable monitor execution to guarantee fetch data exactly once.
* </p>
*
* @param monitorExecution monitor job execution status
* @return ElasticJob configuration builder
*/
public Builder monitorExecution(final boolean monitorExecution) {
this.monitorExecution = monitorExecution;
return this;
} | 3.68 |
framework_BeanUtil_getPropertyType | /**
* Returns the type of the property with the given name and declaring class.
* The property name may refer to a nested property, e.g.
* "property.subProperty" or "property.subProperty1.subProperty2". The
* property must have a public read method (or a chain of read methods in
* case of a nested property).
*
* @param beanType
* the type declaring the property
* @param propertyName
* the name of the property
* @return the property type
* @throws IntrospectionException
* if the introspection fails
*/
public static Class<?> getPropertyType(Class<?> beanType,
String propertyName) throws IntrospectionException {
PropertyDescriptor descriptor = getPropertyDescriptor(beanType,
propertyName);
if (descriptor != null) {
return descriptor.getPropertyType();
} else {
return null;
}
} | 3.68 |
hbase_HBaseFsckRepair_removeParentInMeta | /*
* Remove parent
*/
public static void removeParentInMeta(Configuration conf, RegionInfo hri) throws IOException {
throw new UnsupportedOperationException("HBCK1 is read-only now, use HBCK2 instead");
} | 3.68 |
flink_Time_minutes | /** Creates a new {@link Time} that represents the given number of minutes. */
public static Time minutes(long minutes) {
return of(minutes, TimeUnit.MINUTES);
} | 3.68 |
hadoop_RegistryTypeUtils_validateServiceRecord | /**
* Validate the record by checking for null fields and other invalid
* conditions
* @param path path for exceptions
* @param record record to validate. May be null
* @throws InvalidRecordException on invalid entries
*/
public static void validateServiceRecord(String path, ServiceRecord record)
throws InvalidRecordException {
if (record == null) {
throw new InvalidRecordException(path, "Null record");
}
if (!ServiceRecord.RECORD_TYPE.equals(record.type)) {
throw new InvalidRecordException(path,
"invalid record type field: \"" + record.type + "\"");
}
if (record.external != null) {
for (Endpoint endpoint : record.external) {
validateEndpoint(path, endpoint);
}
}
if (record.internal != null) {
for (Endpoint endpoint : record.internal) {
validateEndpoint(path, endpoint);
}
}
} | 3.68 |
aws-saas-boost_KeycloakKeyProvider_jwksUrl | // https://www.keycloak.org/docs/latest/securing_apps/index.html#_certificate_endpoint
protected static URL jwksUrl() {
URL url = null;
try {
url = new URL(KEYCLOAK_HOST + "/realms/" + KEYCLOAK_REALM + "/protocol/openid-connect/certs");
} catch (MalformedURLException e) {
LOGGER.error(Utils.getFullStackTrace(e));
}
return url;
} | 3.68 |
rocketmq-connect_AbstractConnectController_taskConfigs | /**
* task configs
*
* @param connName
* @return
*/
public List<TaskInfo> taskConfigs(final String connName) {
ClusterConfigState configState = configManagementService.snapshot();
List<TaskInfo> result = new ArrayList<>();
for (int i = 0; i < configState.taskCount(connName); i++) {
ConnectorTaskId id = new ConnectorTaskId(connName, i);
result.add(new TaskInfo(id, configState.rawTaskConfig(id)));
}
return result;
} | 3.68 |
flink_FileInputFormat_extractFileExtension | /**
* Returns the extension of a file name (!= a path).
*
* @return the extension of the file name or {@code null} if there is no extension.
*/
protected static String extractFileExtension(String fileName) {
checkNotNull(fileName);
int lastPeriodIndex = fileName.lastIndexOf('.');
if (lastPeriodIndex < 0) {
return null;
} else {
return fileName.substring(lastPeriodIndex + 1);
}
} | 3.68 |
framework_CustomizedSystemMessages_setSessionExpiredMessage | /**
* Sets the message of the notification. Set to null for no message. If both
* caption and message are null, client automatically forwards to
* sessionExpiredUrl after timeout timer expires. Timer uses value read from
* HTTPSession.getMaxInactiveInterval()
*
* @param sessionExpiredMessage
* the message
*/
public void setSessionExpiredMessage(String sessionExpiredMessage) {
this.sessionExpiredMessage = sessionExpiredMessage;
} | 3.68 |
flink_InPlaceMutableHashTable_rebuild | /** Same as above, but the number of bucket segments of the new table can be specified. */
private void rebuild(long newNumBucketSegments) throws IOException {
// Get new bucket segments
releaseBucketSegments();
allocateBucketSegments((int) newNumBucketSegments);
T record = buildSideSerializer.createInstance();
try {
EntryIterator iter = getEntryIterator();
recordArea.resetAppendPosition();
recordArea.setWritePosition(0);
while ((record = iter.next(record)) != null && !closed) {
final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
final int bucket = hashCode & numBucketsMask;
final int bucketSegmentIndex =
bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
final int bucketOffset =
(bucket & numBucketsPerSegmentMask)
<< bucketSizeBits; // offset of the bucket in the segment
final long firstPointer = bucketSegment.getLong(bucketOffset);
long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record);
bucketSegment.putLong(bucketOffset, ptrToAppended);
}
recordArea.freeSegmentsAfterAppendPosition();
holes = 0;
} catch (EOFException ex) {
throw new RuntimeException(
"Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, "
+ "because we aren't allocating any new memory.");
}
} | 3.68 |
hbase_MobFile_getScanner | /**
* Internal use only. This is used by the sweeper.
* @return The store file scanner.
*/
public StoreFileScanner getScanner() throws IOException {
List<HStoreFile> sfs = new ArrayList<>();
sfs.add(sf);
List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs, false, true,
false, false, sf.getMaxMemStoreTS());
return sfScanners.get(0);
} | 3.68 |
hadoop_DirectoryDiffListFactory_randomLevel | /**
* Returns the level of a skip list node.
* @return A value in the range 0 to maxLevels.
*/
public static int randomLevel() {
final Random r = ThreadLocalRandom.current();
for (int level = 0; level < maxLevels; level++) {
// skip to the next level with probability 1/skipInterval
if (r.nextInt(skipInterval) > 0) {
return level;
}
}
return maxLevels;
} | 3.68 |
flink_JoinOperator_projectFirst | /**
* Continues a ProjectJoin transformation and adds fields of the first join input.
*
* <p>If the first join input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the first join input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link
* org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectFirst(int...)} and
* {@link
* org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectSecond(int...)}.
*
* @param firstFieldIndexes If the first input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended JoinProjection.
* @see Tuple
* @see DataSet
*/
protected JoinProjection<I1, I2> projectFirst(int... firstFieldIndexes) {
boolean isFirstTuple;
isFirstTuple = ds1.getType() instanceof TupleTypeInfo && firstFieldIndexes.length > 0;
if (!isFirstTuple && firstFieldIndexes.length != 0) {
// field index provided for non-Tuple input
throw new IllegalArgumentException(
"Input is not a Tuple. Call projectFirst() without arguments to include it.");
} else if (firstFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException(
"You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isFirstTuple) {
// extend index and flag arrays
this.fieldIndexes =
Arrays.copyOf(
this.fieldIndexes,
this.fieldIndexes.length + firstFieldIndexes.length);
this.isFieldInFirst =
Arrays.copyOf(
this.isFieldInFirst,
this.isFieldInFirst.length + firstFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs1;
for (int i = 0; i < firstFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(firstFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = true;
this.fieldIndexes[offset + i] = firstFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst =
Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = true;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_updateStat | /** Update some statistics. */
private void updateStat() {
requestCount++;
} | 3.68 |
hbase_KeyValue_codeToType | /**
* Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
* @param b the kv serialized byte[] to process
* @return Type associated with passed code.
*/
public static Type codeToType(final byte b) {
Type t = codeArray[b & 0xff];
if (t != null) {
return t;
}
throw new RuntimeException("Unknown code " + b);
} | 3.68 |
morf_SpreadsheetDataSetConsumer_close | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#close(CloseState)
*/
@Override
public void close(CloseState closeState) {
try {
// Create the index
createIndex();
workbook.write();
workbook.close();
} catch (Exception e) {
throw new RuntimeException("Error closing writable workbook", e);
}
} | 3.68 |
pulsar_ManagedLedgerImpl_getFirstPositionAndCounter | /**
* Get the first position written in the managed ledger, alongside with the associated counter.
*/
Pair<PositionImpl, Long> getFirstPositionAndCounter() {
PositionImpl pos;
long count;
Pair<PositionImpl, Long> lastPositionAndCounter;
do {
pos = getFirstPosition();
lastPositionAndCounter = getLastPositionAndCounter();
count = lastPositionAndCounter.getRight()
- getNumberOfEntries(Range.openClosed(pos, lastPositionAndCounter.getLeft()));
} while (pos.compareTo(getFirstPosition()) != 0
|| lastPositionAndCounter.getLeft().compareTo(getLastPosition()) != 0);
return Pair.of(pos, count);
} | 3.68 |
incubator-hugegraph-toolchain_MetricsManager_all | /**
* The nesting level is too deep, may need to optimize the server first
*/
public Map<String, Map<String, Object>> all() {
return this.metricsAPI.all();
} | 3.68 |
flink_ExceptionUtils_assertThrowable | /**
* The same as {@link #findThrowable(Throwable, Predicate)}, but rethrows original exception if
* the expected exception was not found.
*/
public static <T extends Throwable> void assertThrowable(
T throwable, Predicate<Throwable> predicate) throws T {
if (!findThrowable(throwable, predicate).isPresent()) {
throw (T) throwable;
}
} | 3.68 |
hbase_HRegion_prepareDelete | /**
* Prepare a delete for a row mutation processor
* @param delete The passed delete is modified by this method. WARNING!
*/
private void prepareDelete(Delete delete) throws IOException {
// Check to see if this is a deleteRow insert
if (delete.getFamilyCellMap().isEmpty()) {
for (byte[] family : this.htableDescriptor.getColumnFamilyNames()) {
// Don't eat the timestamp
delete.addFamily(family, delete.getTimestamp());
}
} else {
for (byte[] family : delete.getFamilyCellMap().keySet()) {
if (family == null) {
throw new NoSuchColumnFamilyException("Empty family is invalid");
}
checkFamily(family, delete.getDurability());
}
}
} | 3.68 |
framework_DesignContext_readDesign | /**
*
* Reads the given design node and populates the given component with the
* corresponding component tree.
* <p>
* Additionally registers the component id, local id and caption of the
* given component and all its children in the context
*
* @param componentDesign
* The design element containing the description of the component
* to be created
* @param component
* The component which corresponds to the design element
*/
public void readDesign(Element componentDesign, Component component) {
component.readDesign(componentDesign, this);
// Get the ids and the caption of the component and store them in the
// maps of this design context.
org.jsoup.nodes.Attributes attributes = componentDesign.attributes();
// global id: only update the mapping, the id has already been set for
// the component
String id = component.getId();
if (id != null && !id.isEmpty()) {
boolean mappingExists = mapId(id, component);
if (mappingExists) {
throw new DesignException(
"The following global id is not unique: " + id);
}
}
// local id: this is not a property of a component, so need to fetch it
// from the attributes of componentDesign
if (attributes.hasKey(LOCAL_ID_ATTRIBUTE)) {
String localId = attributes.get(LOCAL_ID_ATTRIBUTE);
boolean mappingExists = setComponentLocalId(component, localId);
if (mappingExists) {
throw new DesignException(
"the following local id is not unique: " + localId);
}
}
// caption: a property of a component, possibly not unique
String caption = component.getCaption();
if (caption != null) {
mapCaption(caption, component);
}
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_fillEphemeralJobNode | /**
* Fill ephemeral job node.
*
* @param node node
* @param value data of job node
*/
public void fillEphemeralJobNode(final String node, final Object value) {
regCenter.persistEphemeral(jobNodePath.getFullPath(node), value.toString());
} | 3.68 |
hibernate-validator_JavaBeanField_getAccessible | /**
* Returns an accessible copy of the given member.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static Field getAccessible(Field original) {
SecurityManager sm = System.getSecurityManager();
if ( sm != null ) {
sm.checkPermission( HibernateValidatorPermission.ACCESS_PRIVATE_MEMBERS );
}
Class<?> clazz = original.getDeclaringClass();
return run( GetDeclaredField.andMakeAccessible( clazz, original.getName() ) );
} | 3.68 |
graphhopper_ResponsePath_getAscend | /**
* This method returns the total elevation change (going upwards) in meter.
* <p>
*
* @return ascend in meter
*/
public double getAscend() {
return ascend;
} | 3.68 |
hadoop_RouterFedBalance_getSrcPath | /**
* Get src uri from Router.
*/
private Path getSrcPath(String fedPath) throws IOException {
String address = getConf().getTrimmed(
RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
RouterClient rClient = new RouterClient(routerSocket, getConf());
try {
MountTableManager mountTable = rClient.getMountTableManager();
MountTable entry = MountTableProcedure.getMountEntry(fedPath, mountTable);
if (entry == null) {
throw new IllegalArgumentException(
"The mount point doesn't exist. path=" + fedPath);
} else if (entry.getDestinations().size() > 1) {
throw new IllegalArgumentException(
"The mount point has more than one destination. path=" + fedPath);
} else {
String ns = entry.getDestinations().get(0).getNameserviceId();
String path = entry.getDestinations().get(0).getDest();
return new Path("hdfs://" + ns + path);
}
} finally {
rClient.close();
}
} | 3.68 |
flink_FileSource_forRecordStreamFormat | /**
* Builds a new {@code FileSource} using a {@link StreamFormat} to read record-by-record from a
* file stream.
*
* <p>When possible, stream-based formats are generally easier (preferable) to file-based
* formats, because they support better default behavior around I/O batching or progress
* tracking (checkpoints).
*
* <p>Stream formats also automatically de-compress files based on the file extension. This
* supports files ending in ".deflate" (Deflate), ".xz" (XZ), ".bz2" (BZip2), ".gz", ".gzip"
* (GZip).
*/
public static <T> FileSourceBuilder<T> forRecordStreamFormat(
final StreamFormat<T> streamFormat, final Path... paths) {
return forBulkFileFormat(new StreamFormatAdapter<>(streamFormat), paths);
} | 3.68 |
hbase_RegionStateStore_deleteMergeQualifiers | /**
* Deletes merge qualifiers for the specified merge region.
* @param connection connection we're using
* @param mergeRegion the merged region
*/
public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException {
// NOTE: We are doing a new hbase:meta read here.
Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells();
if (cells == null || cells.length == 0) {
return;
}
Delete delete = new Delete(mergeRegion.getRegionName());
List<byte[]> qualifiers = new ArrayList<>();
for (Cell cell : cells) {
if (!CatalogFamilyFormat.isMergeQualifierPrefix(cell)) {
continue;
}
byte[] qualifier = CellUtil.cloneQualifier(cell);
qualifiers.add(qualifier);
delete.addColumns(HConstants.CATALOG_FAMILY, qualifier, HConstants.LATEST_TIMESTAMP);
}
// There will be race condition that a GCMultipleMergedRegionsProcedure is scheduled while
// the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second
// GCMultipleMergedRegionsProcedure could delete the merged region by accident!
if (qualifiers.isEmpty()) {
LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString()
+ " in meta table, they are cleaned up already, Skip.");
return;
}
try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
table.delete(delete);
}
LOG.info(
"Deleted merge references in " + mergeRegion.getRegionNameAsString() + ", deleted qualifiers "
+ qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")));
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_jsonifySchemaInfoWithVersion | /**
* Jsonify the schema info with version.
*
* @param schemaInfoWithVersion the schema info with version
* @return the jsonified schema info with version
*/
public String jsonifySchemaInfoWithVersion(SchemaInfoWithVersion schemaInfoWithVersion) {
return SchemaUtils.jsonifySchemaInfoWithVersion(schemaInfoWithVersion);
} | 3.68 |
hbase_ConnectionOverAsyncConnection_closePool | // will be called from AsyncConnection, to avoid infinite loop as in the above method we will call
// AsyncConnection.close.
synchronized void closePool() {
ExecutorService batchPool = this.batchPool;
if (batchPool != null) {
ConnectionUtils.shutdownPool(batchPool);
this.batchPool = null;
}
} | 3.68 |
framework_PropertyValueGenerator_modifyFilter | /**
* Return an updated filter that should be compatible with the underlying
* container.
* <p>
* This function is called when setting a filter for this generated
* property. Returning null from this function causes
* GeneratedPropertyContainer to discard the filter and not use it.
* <p>
* By default this function throws UnsupportedFilterException.
*
* @param filter
* original filter for this property
* @return modified filter that is compatible with the underlying container
* @throws UnsupportedFilterException
* if the implementation doesn't support modifying the provided
* filter
*/
public Filter modifyFilter(Filter filter)
throws UnsupportedFilterException {
throw new UnsupportedFilterException(
"Filter" + filter + " is not supported");
} | 3.68 |
framework_CssLayoutConnector_updateCaption | /*
* (non-Javadoc)
*
* @see com.vaadin.client.HasComponentsConnector#updateCaption(com.vaadin
* .client.ComponentConnector)
*/
@Override
public void updateCaption(ComponentConnector child) {
Widget childWidget = child.getWidget();
int widgetPosition = getWidget().getWidgetIndex(childWidget);
String childId = child.getConnectorId();
VCaption caption = childIdToCaption.get(childId);
if (VCaption.isNeeded(child)) {
if (caption == null) {
caption = new VCaption(child, getConnection());
childIdToCaption.put(childId, caption);
}
if (!caption.isAttached()) {
// Insert caption at widget index == before widget
getWidget().insert(caption, widgetPosition);
}
caption.updateCaption();
} else if (caption != null) {
childIdToCaption.remove(childId);
getWidget().remove(caption);
}
} | 3.68 |
framework_VTabsheet_scrollRight | /**
* Finds a plausible scroll position to the closest tab on the right
* that hasn't been set hidden on the server. If a suitable tab is
* found, also sets the previous leftmost tab hidden and remove the
* first visible styles. Does not update the scroller index or set the
* new first visible style, in case there are multiple calls in a row.
* Does not update any visibilities or styles if a suitable tab is not
* found.
*
* @param currentFirstVisible
* the index of the current first visible tab
* @return the index of the closest visible tab to the right from the
* starting point, or {@code -1} if not found
*/
public int scrollRight(int currentFirstVisible) {
int nextVisible = getNextVisibleTab(currentFirstVisible);
if (nextVisible < 0) {
return -1;
}
Tab currentFirst = getTab(currentFirstVisible);
currentFirst.setVisible(false);
currentFirst.setStyleNames(
currentFirstVisible == tabsheet.activeTabIndex, false);
currentFirst.recalculateCaptionWidth();
return nextVisible;
} | 3.68 |
framework_AbstractConnector_getState | /**
* Returns the shared state object for this connector.
*
* Override this method to define the shared state type for your connector.
*
* @return the current shared state (never null)
*/
@Override
public SharedState getState() {
if (state == null) {
Profiler.enter("AbstractConnector.createState()");
state = createState();
Profiler.leave("AbstractConnector.createState()");
}
return state;
} | 3.68 |
flink_JobExecutionResult_getAllAccumulatorResults | /**
* Gets all accumulators produced by the job. The map contains the accumulators as mappings from
* the accumulator name to the accumulator value.
*
* @return A map containing all accumulators produced by the job.
*/
public Map<String, Object> getAllAccumulatorResults() {
return accumulatorResults.entrySet().stream()
.collect(
Collectors.toMap(
Map.Entry::getKey, entry -> entry.getValue().getUnchecked()));
} | 3.68 |
flink_TypeExtractionUtils_validateLambdaType | /**
* Checks whether the given type has the generic parameters declared in the class definition.
*
* @param t type to be validated
*/
public static void validateLambdaType(Class<?> baseClass, Type t) {
if (!(t instanceof Class)) {
return;
}
final Class<?> clazz = (Class<?>) t;
if (clazz.getTypeParameters().length > 0) {
throw new InvalidTypesException(
"The generic type parameters of '"
+ clazz.getSimpleName()
+ "' are missing. "
+ "In many cases lambda methods don't provide enough information for automatic type extraction when Java generics are involved. "
+ "An easy workaround is to use an (anonymous) class instead that implements the '"
+ baseClass.getName()
+ "' interface. "
+ "Otherwise the type has to be specified explicitly using type information.");
}
} | 3.68 |
dubbo_ConfigurationUtils_getSubIds | /**
* Search props and extract config ids
* <pre>
* # properties
* dubbo.registries.registry1.address=xxx
* dubbo.registries.registry2.port=xxx
*
* # extract ids
* Set configIds = getSubIds("dubbo.registries.")
*
* # result
* configIds: ["registry1", "registry2"]
* </pre>
*
* @param configMaps
* @param prefix
* @return
*/
public static <V extends Object> Set<String> getSubIds(Collection<Map<String, V>> configMaps, String prefix) {
if (!prefix.endsWith(".")) {
prefix += ".";
}
Set<String> ids = new LinkedHashSet<>();
for (Map<String, V> configMap : configMaps) {
Map<String, V> copy;
synchronized (configMap) {
copy = new HashMap<>(configMap);
}
for (Map.Entry<String, V> entry : copy.entrySet()) {
String key = entry.getKey();
V val = entry.getValue();
if (StringUtils.startsWithIgnoreCase(key, prefix)
&& key.length() > prefix.length()
&& !ConfigurationUtils.isEmptyValue(val)) {
String k = key.substring(prefix.length());
int endIndex = k.indexOf(".");
if (endIndex > 0) {
String id = k.substring(0, endIndex);
ids.add(id);
}
}
}
}
return ids;
} | 3.68 |
hudi_TableSchemaResolver_hasOperationField | /**
* NOTE: This method could only be used in tests
*
* @VisibleForTesting
*/
public boolean hasOperationField() {
try {
Schema tableAvroSchema = getTableAvroSchemaFromDataFile();
return tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null;
} catch (Exception e) {
LOG.info(String.format("Failed to read operation field from avro schema (%s)", e.getMessage()));
return false;
}
} | 3.68 |
MagicPlugin_HeroesSpellSkill_getSkillName | /**
* This code is redudant, but unfortunately it needs to be since we need to know the
* skill name for the super() constructor call.
*/
private static String getSkillName(Heroes heroes, String spellKey) {
Plugin magicPlugin = heroes.getServer().getPluginManager().getPlugin("Magic");
if (magicPlugin == null || (!(magicPlugin instanceof MagicAPI) && !magicPlugin.isEnabled())) {
heroes.getLogger().warning("MagicHeroes skills require the Magic plugin");
throw new RuntimeException("MagicHeroes skills require the Magic plugin");
}
MagicAPI api = (MagicAPI) magicPlugin;
MageController controller = api.getController();
// This is unfortunately needed because Heroes tries to load these skills before
// Magic has loaded spell configs
((MagicController)controller).checkPostStartupLoad();
SpellTemplate spellTemplate = controller.getSpellTemplate(spellKey);
if (spellTemplate == null) {
controller.getLogger().warning("Failed to load Magic skill spell: " + spellKey);
throw new RuntimeException("Failed to load Magic skill spell: " + spellKey);
}
String baseName = ChatColor.stripColor(spellTemplate.getName().replace(" ", ""));
return controller.getHeroesSkillPrefix() + baseName;
} | 3.68 |
hmily_TransactionImpl_getXid | /**
* Gets xid.
*
* @return the xid
*/
public XidImpl getXid() {
return xid;
} | 3.68 |
flink_InputChannel_checkError | /**
* Checks for an error and rethrows it if one was reported.
*
* <p>Note: Any {@link PartitionException} instances should not be transformed and make sure
* they are always visible in task failure cause.
*/
protected void checkError() throws IOException {
final Throwable t = cause.get();
if (t != null) {
if (t instanceof CancelTaskException) {
throw (CancelTaskException) t;
}
if (t instanceof IOException) {
throw (IOException) t;
} else {
throw new IOException(t);
}
}
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getOffset | /**
* Returns the start offset of the i<sup>th</sup> Path.
*/
@Override
public long getOffset(int i) {
return inputSplitShim.getOffset(i);
} | 3.68 |
hadoop_DecayRpcSchedulerDetailedMetrics_init | /**
* Initialize the metrics for JMX with priority levels.
* @param numLevels input numLevels.
*/
public void init(int numLevels) {
LOG.info("Initializing RPC stats for {} priority levels", numLevels);
queueNamesForLevels = new String[numLevels];
processingNamesForLevels = new String[numLevels];
for (int i = 0; i < numLevels; i++) {
queueNamesForLevels[i] = getQueueName(i+1);
processingNamesForLevels[i] = getProcessingName(i+1);
}
rpcQueueRates.init(queueNamesForLevels);
rpcProcessingRates.init(processingNamesForLevels);
} | 3.68 |
pulsar_PulsarRegistrationClient_updatedBookies | /**
* This method will receive metadata store notifications and then update the
* local cache in background sequentially.
*/
private void updatedBookies(Notification n) {
// make the notification callback run sequential in background.
final String path = n.getPath();
if (!path.startsWith(bookieReadonlyRegistrationPath) && !path.startsWith(bookieRegistrationPath)) {
// ignore unknown path
return;
}
if (path.equals(bookieReadonlyRegistrationPath) || path.equals(bookieRegistrationPath)) {
// ignore root path
return;
}
final BookieId bookieId = stripBookieIdFromPath(n.getPath());
sequencer.sequential(() -> {
switch (n.getType()) {
case Created:
log.info("Bookie {} created. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
return getReadOnlyBookies().thenAccept(bookies ->
readOnlyBookiesWatchers.forEach(w ->
executor.execute(() -> w.onBookiesChanged(bookies))));
}
return getWritableBookies().thenAccept(bookies ->
writableBookiesWatchers.forEach(w ->
executor.execute(() -> w.onBookiesChanged(bookies))));
case Modified:
if (bookieId == null) {
return completedFuture(null);
}
log.info("Bookie {} modified. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
return readBookieInfoAsReadonlyBookie(bookieId).thenApply(__ -> null);
}
return readBookieInfoAsWritableBookie(bookieId).thenApply(__ -> null);
case Deleted:
if (bookieId == null) {
return completedFuture(null);
}
log.info("Bookie {} deleted. path: {}", bookieId, n.getPath());
if (path.startsWith(bookieReadonlyRegistrationPath)) {
readOnlyBookieInfo.remove(bookieId);
return getReadOnlyBookies().thenAccept(bookies -> {
readOnlyBookiesWatchers.forEach(w ->
executor.execute(() -> w.onBookiesChanged(bookies)));
});
}
if (path.startsWith(bookieRegistrationPath)) {
writableBookieInfo.remove(bookieId);
return getWritableBookies().thenAccept(bookies -> {
writableBookiesWatchers.forEach(w ->
executor.execute(() -> w.onBookiesChanged(bookies)));
});
}
return completedFuture(null);
default:
return completedFuture(null);
}
});
} | 3.68 |
hbase_AbstractFSWALProvider_getWALArchiveDirectoryName | /**
* Construct the directory name for all old WALs on a given server. The default old WALs dir looks
* like: <code>hbase/oldWALs</code>. If you config hbase.separate.oldlogdir.by.regionserver to
* true, it looks like <code>hbase//oldWALs/kalashnikov.att.net,61634,1486865297088</code>.
* @param serverName Server name formatted as described in {@link ServerName}
* @return the relative WAL directory name
*/
public static String getWALArchiveDirectoryName(Configuration conf, final String serverName) {
StringBuilder dirName = new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME);
if (conf.getBoolean(SEPARATE_OLDLOGDIR, DEFAULT_SEPARATE_OLDLOGDIR)) {
dirName.append(Path.SEPARATOR);
dirName.append(serverName);
}
return dirName.toString();
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_seekBackwards | /**
* Seek backwards, incrementing the seek and backward seek counters.
*
* @param negativeOffset how far was the seek?
* This is expected to be negative.
*/
@Override
public void seekBackwards(long negativeOffset) {
seekOps.incrementAndGet();
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BACKWARD_OPERATIONS);
ioStatisticsStore.incrementCounter(StreamStatisticNames.STREAM_READ_SEEK_BYTES_BACKWARDS, negativeOffset);
} | 3.68 |
framework_Embedded_setMimeType | /**
* Sets the mimeType, the MIME-Type of the object.
*
* @param mimeType
* the mimeType to set.
*/
public void setMimeType(String mimeType) {
String oldMimeType = getMimeType();
if (mimeType != oldMimeType
|| (mimeType != null && !mimeType.equals(oldMimeType))) {
getState().mimeType = mimeType;
if ("application/x-shockwave-flash".equals(mimeType)) {
/*
* Automatically add wmode transparent as we use lots of
* floating layers in Vaadin. If developers need better flash
* performance, they can override this value programmatically
* back to "window" (the default).
*/
if (getParameter("wmode") == null) {
setParameter("wmode", "transparent");
}
}
}
} | 3.68 |
framework_VAbstractCalendarPanel_setParentField | /**
* Sets the parent date field widget.
*
* @param parent
* the parent widget
*/
public void setParentField(VDateField<R> parent) {
this.parent = parent;
} | 3.68 |
hbase_CompactingMemStore_preFlushSeqIDEstimation | /**
* This method is called before the flush is executed.
* @return an estimation (lower bound) of the unflushed sequence id in memstore after the flush is
* executed. if memstore will be cleared returns {@code HConstants.NO_SEQNUM}.
*/
@Override
public long preFlushSeqIDEstimation() {
if (compositeSnapshot) {
return HConstants.NO_SEQNUM;
}
Segment segment = getLastSegment();
if (segment == null) {
return HConstants.NO_SEQNUM;
}
return segment.getMinSequenceId();
} | 3.68 |
flink_TableFunction_collect | /**
* Emits an (implicit or explicit) output row.
*
* <p>If null is emitted as an explicit row, it will be skipped by the runtime. For implicit
* rows, the row's field will be null.
*
* @param row the output row
*/
protected final void collect(T row) {
collector.collect(row);
} | 3.68 |
flink_NFACompiler_checkPatternWindowTimes | /** Check pattern window times between events. */
private void checkPatternWindowTimes() {
windowTime.ifPresent(
windowTime -> {
if (windowTimes.values().stream().anyMatch(time -> time > windowTime)) {
throw new MalformedPatternException(
"The window length between the previous and current event cannot be larger than the window length between the first and last event for a Pattern.");
}
});
} | 3.68 |
dubbo_AwaitingNonWebApplicationListener_releaseOnExit | /**
* @param applicationContext
* @since 2.7.8
*/
private void releaseOnExit(ConfigurableApplicationContext applicationContext) {
ApplicationModel applicationModel = DubboBeanUtils.getApplicationModel(applicationContext);
if (applicationModel == null) {
return;
}
ShutdownHookCallbacks shutdownHookCallbacks =
applicationModel.getBeanFactory().getBean(ShutdownHookCallbacks.class);
if (shutdownHookCallbacks != null) {
shutdownHookCallbacks.addCallback(this::release);
}
} | 3.68 |
morf_HumanReadableStatementProducer_getUpgradeStepVersion | /**
* Gets the version the upgrade {@code step} belongs in.
* First attempts to pull the version from the {@code @Version}
* annotation, otherwise from the package name.
*
* @param step the upgrade step.
* @return the version the upgrade step belongs in.
*/
private String getUpgradeStepVersion(UpgradeStep step) {
Version versionAnnotation = step.getClass().getAnnotation(Version.class);
if (versionAnnotation!=null) {
return "v".concat(versionAnnotation.value());
}
String version = step.getClass().getPackage().getName();
version = version.substring(version.lastIndexOf('.') + 1);
return version.replace('_', '.');
} | 3.68 |
hadoop_ActiveAuditManagerS3A_activeSpan | /**
* Get the active span.
* This is the wrapped span, not the inner one, and it is
* of that type.
* @return the active WrappingAuditSpan
*/
private WrappingAuditSpan activeSpan() {
return activeSpanMap.getForCurrentThread();
} | 3.68 |
flink_Tuple1_toString | /**
* Creates a string representation of the tuple in the form (f0), where the individual fields
* are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "(" + StringUtils.arrayAwareToString(this.f0) + ")";
} | 3.68 |
framework_VAbstractCalendarPanel_setShowISOWeekNumbers | /**
* Sets whether ISO 8601 week numbers should be shown in the value selector
* or not. ISO 8601 defines that a week always starts with a Monday so the
* week numbers are only shown if this is the case.
*
* @param showISOWeekNumbers
* {@code true} if week number should be shown, {@code false}
* otherwise
*/
public void setShowISOWeekNumbers(boolean showISOWeekNumbers) {
this.showISOWeekNumbers = showISOWeekNumbers;
if (initialRenderDone && isBelowMonth(resolution)) {
clearCalendarBody(false);
buildCalendarBody();
}
} | 3.68 |
pulsar_NamespacesBase_internalGetBacklogQuotaMap | /**
* Base method for getBackLogQuotaMap v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalGetBacklogQuotaMap(AsyncResponse asyncResponse) {
validateNamespacePolicyOperationAsync(namespaceName, PolicyName.BACKLOG, PolicyOperation.READ)
.thenCompose(__ -> namespaceResources().getPoliciesAsync(namespaceName))
.thenAccept(policiesOpt -> {
Map<BacklogQuotaType, BacklogQuota> backlogQuotaMap = policiesOpt.orElseThrow(() ->
new RestException(Response.Status.NOT_FOUND, "Namespace does not exist"))
.backlog_quota_map;
asyncResponse.resume(backlogQuotaMap);
})
.exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to get backlog quota map on namespace {}", clientAppId(), namespaceName, ex);
return null;
});
} | 3.68 |
hadoop_SampleQuantiles_query | /**
* Get the estimated value at the specified quantile.
*
* @param quantile Queried quantile, e.g. 0.50 or 0.99.
* @return Estimated value at that quantile.
*/
private long query(double quantile) {
Preconditions.checkState(!samples.isEmpty(), "no data in estimator");
int rankMin = 0;
int desired = (int) (quantile * count);
ListIterator<SampleItem> it = samples.listIterator();
SampleItem prev = null;
SampleItem cur = it.next();
for (int i = 1; i < samples.size(); i++) {
prev = cur;
cur = it.next();
rankMin += prev.g;
if (rankMin + cur.g + cur.delta > desired + (allowableError(i) / 2)) {
return prev.value;
}
}
// edge case of wanting max value
return samples.get(samples.size() - 1).value;
} | 3.68 |
hbase_MultiRowRangeFilter_isExclusive | /**
* Returns true if the current range's key is exclusive
*/
public boolean isExclusive() {
return exclusive;
} | 3.68 |
hadoop_WordList_setWords | /**
* Sets the words.
*
* Note: That this API is only for Jackson JSON deserialization.
*/
public void setWords(Map<String, Integer> list) {
this.list = list;
} | 3.68 |
hbase_Scan_getAllowPartialResults | /**
* Returns true when the constructor of this scan understands that the results they will see may
* only represent a partial portion of a row. The entire row would be retrieved by subsequent
* calls to {@link ResultScanner#next()}
*/
public boolean getAllowPartialResults() {
return allowPartialResults;
} | 3.68 |
hmily_HmilyTacTransactionManager_begin | /**
* Begin hmily transaction.
*
* @param point proceeding join point
* @return the hmily transaction
*/
public HmilyTransaction begin(final ProceedingJoinPoint point) {
//创建全局的事务,创建一个参与者
HmilyTransaction globalHmilyTransaction = createHmilyTransaction();
HmilyRepositoryStorage.createHmilyTransaction(globalHmilyTransaction);
final HmilyParticipant hmilyParticipant = buildHmilyParticipant(globalHmilyTransaction.getTransId());
HmilyRepositoryStorage.createHmilyParticipant(hmilyParticipant);
globalHmilyTransaction.registerParticipant(hmilyParticipant);
//save tacTransaction in threadLocal
HmilyTransactionHolder.getInstance().set(globalHmilyTransaction);
//set TacTransactionContext this context transfer remote
HmilyTransactionContext context = new HmilyTransactionContext();
//set action is try
context.setAction(HmilyActionEnum.TRYING.getCode());
context.setTransId(globalHmilyTransaction.getTransId());
context.setRole(HmilyRoleEnum.START.getCode());
context.setTransType(TransTypeEnum.TAC.name());
// set transaction isolation level
MethodSignature signature = (MethodSignature) point.getSignature();
Method method = signature.getMethod();
final HmilyTAC hmilyTAC = method.getAnnotation(HmilyTAC.class);
context.setIsolationLevel(hmilyTAC.isolationLevel().getValue());
context.setLockRetryInterval(hmilyTAC.lockRetryInterval());
context.setLockRetryTimes(hmilyTAC.lockRetryTimes());
context.setParticipantId(hmilyParticipant.getParticipantId());
HmilyContextHolder.set(context);
log.debug("TAC-tm-begin ::: {}", globalHmilyTransaction);
return globalHmilyTransaction;
} | 3.68 |
zxing_UPCEANExtension2Support_parseExtensionString | /**
* @param raw raw content of extension
* @return formatted interpretation of raw content as a {@link Map} mapping
* one {@link ResultMetadataType} to appropriate value, or {@code null} if not known
*/
private static Map<ResultMetadataType,Object> parseExtensionString(String raw) {
if (raw.length() != 2) {
return null;
}
Map<ResultMetadataType,Object> result = new EnumMap<>(ResultMetadataType.class);
result.put(ResultMetadataType.ISSUE_NUMBER, Integer.valueOf(raw));
return result;
} | 3.68 |
morf_AlteredTable_getName | /**
* @see org.alfasoftware.morf.metadata.Table#getName()
*/
@Override
public String getName() {
return baseTable.getName();
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_convertDescribeTable | /**
* A query like this will generate a tree as follows "describe formatted default.maptable
* partition (b=100) id;" TOK_TABTYPE TOK_TABNAME --> root for tablename, 2 child nodes mean DB
* specified default maptable TOK_PARTSPEC --> root node for partition spec. else columnName
* TOK_PARTVAL b 100 id --> root node for columnName formatted
*/
private Operation convertDescribeTable(HiveParserASTNode ast) {
HiveParserASTNode tableTypeExpr = (HiveParserASTNode) ast.getChild(0);
String dbName = null;
String tableName;
String colPath;
Map<String, String> partSpec;
HiveParserASTNode tableNode;
// process the first node to extract tablename
// tablename is either TABLENAME or DBNAME.TABLENAME if db is given
if (tableTypeExpr.getChild(0).getType() == HiveASTParser.TOK_TABNAME) {
tableNode = (HiveParserASTNode) tableTypeExpr.getChild(0);
if (tableNode.getChildCount() == 1) {
tableName = tableNode.getChild(0).getText();
} else if (tableNode.getChildCount() == 2) {
dbName = tableNode.getChild(0).getText();
tableName = dbName + "." + tableNode.getChild(1).getText();
} else {
// tablemname is CATALOGNAME.DBNAME.TABLENAME, which is not supported yet.
// todo: fix it in FLINK-29343
throw new ValidationException(
"Describe a table in specific catalog is not supported in HiveDialect,"
+ " please switch to Flink default dialect.");
}
} else {
throw new ValidationException(
tableTypeExpr.getChild(0).getText() + " is not an expected token type");
}
// process the second child,if exists, node to get partition spec(s)
partSpec = QualifiedNameUtil.getPartitionSpec(tableTypeExpr);
// process the third child node,if exists, to get partition spec(s)
colPath = QualifiedNameUtil.getColPath(tableTypeExpr, dbName, tableName, partSpec);
if (partSpec != null) {
handleUnsupportedOperation("DESCRIBE PARTITION is not supported");
}
if (!colPath.equals(tableName)) {
handleUnsupportedOperation("DESCRIBE COLUMNS is not supported");
}
boolean isExt = false;
boolean isFormatted = false;
if (ast.getChildCount() == 2) {
int descOptions = ast.getChild(1).getType();
isExt = descOptions == HiveASTParser.KW_EXTENDED;
isFormatted = descOptions == HiveASTParser.KW_FORMATTED;
if (descOptions == HiveASTParser.KW_PRETTY) {
handleUnsupportedOperation("DESCRIBE PRETTY is not supported.");
}
}
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tableName);
return new HiveExecutableOperation(
new DescribeTableOperation(tableIdentifier, isExt || isFormatted));
} | 3.68 |
flink_JsonRowSerializationSchema_builder | /** Creates a builder for {@link JsonRowSerializationSchema.Builder}. */
public static Builder builder() {
return new Builder();
} | 3.68 |
framework_GridDropTarget_isDropAllowedOnRowsWhenSorted | /**
* Gets whether drops are allowed on rows as target, when the user has
* sorted the grid.
*
* @return whether drop are allowed for the grid's rows when user has sorted
* the grid
* @since 8.2
*/
public boolean isDropAllowedOnRowsWhenSorted() {
return dropAllowedOnRowsWhenSorted;
} | 3.68 |
hudi_HoodieAvroUtils_getRecordColumnValues | /**
* Gets record column values into one object.
*
* @param record Hoodie record.
* @param columns Names of the columns to get values.
* @param schema {@link SerializableSchema} instance.
* @return Column value if a single column, or concatenated String values by comma.
*/
public static Object getRecordColumnValues(HoodieRecord record,
String[] columns,
SerializableSchema schema, boolean consistentLogicalTimestampEnabled) {
return getRecordColumnValues(record, columns, schema.get(), consistentLogicalTimestampEnabled);
} | 3.68 |
hibernate-validator_ConstraintTree_validateSingleConstraint | /**
* @return an {@link Optional#empty()} if there is no violation or a corresponding {@link ConstraintValidatorContextImpl}
* otherwise.
*/
protected final <V> Optional<ConstraintValidatorContextImpl> validateSingleConstraint(
ValueContext<?, ?> valueContext,
ConstraintValidatorContextImpl constraintValidatorContext,
ConstraintValidator<A, V> validator) {
boolean isValid;
try {
@SuppressWarnings("unchecked")
V validatedValue = (V) valueContext.getCurrentValidatedValue();
isValid = validator.isValid( validatedValue, constraintValidatorContext );
}
catch (RuntimeException e) {
if ( e instanceof ConstraintDeclarationException ) {
throw e;
}
throw LOG.getExceptionDuringIsValidCallException( e );
}
if ( !isValid ) {
//We do not add these violations yet, since we don't know how they are
//going to influence the final boolean evaluation
return Optional.of( constraintValidatorContext );
}
return Optional.empty();
} | 3.68 |
rocketmq-connect_ReporterManagerUtil_createWorkerErrorRecordReporter | /**
* create worker error record reporter
*
* @param connConfig
* @param retryWithToleranceOperator
* @param converter
* @return
*/
public static WorkerErrorRecordReporter createWorkerErrorRecordReporter(
ConnectKeyValue connConfig,
RetryWithToleranceOperator retryWithToleranceOperator,
RecordConverter converter) {
DeadLetterQueueConfig deadLetterQueueConfig = new DeadLetterQueueConfig(connConfig);
if (deadLetterQueueConfig.enableErrantRecordReporter()) {
return new WorkerErrorRecordReporter(retryWithToleranceOperator, converter);
}
return null;
} | 3.68 |
hmily_ConsistentHashSelector_md5 | /**
* Md 5 byte [ ].
*
* @param value the value
* @return the byte [ ]
*/
private byte[] md5(final String value) {
MessageDigest md5;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException(e.getMessage(), e);
}
md5.reset();
byte[] bytes;
bytes = value.getBytes(StandardCharsets.UTF_8);
md5.update(bytes);
return md5.digest();
} | 3.68 |
flink_LongValue_setValue | /**
* Sets the value of the encapsulated long to the specified value.
*
* @param value The new value of the encapsulated long.
*/
public void setValue(final long value) {
this.value = value;
} | 3.68 |
framework_VCalendarPanel_adjustDateToFitInsideRange | /**
* Adjusts a date to fit inside the range, only if outside
*
* @param date
*/
private Date adjustDateToFitInsideRange(Date date) {
if (rangeStart != null && rangeStart.after(date)) {
date = (Date) rangeStart.clone();
} else if (rangeEnd != null && rangeEnd.before(date)) {
date = (Date) rangeEnd.clone();
}
return date;
} | 3.68 |
hbase_CheckAndMutate_getCompareOp | /** Returns the comparison operator */
public CompareOperator getCompareOp() {
return op;
} | 3.68 |
graphhopper_MaxHeight_create | /**
* Currently enables to store 0.1 to max=0.1*2⁷m and infinity. If a value is between the maximum and infinity
* it is assumed to use the maximum value.
*/
public static DecimalEncodedValue create() {
return new DecimalEncodedValueImpl(KEY, 7, 0, 0.1, false, false, true);
} | 3.68 |
framework_AbstractComponentConnector_getIcon | /**
* Gets the icon set for this component.
*
* @return the icon, or <code>null</code> if no icon has been defined.
*/
protected Icon getIcon() {
return getConnection().getIcon(getIconUri());
} | 3.68 |
hbase_HMaster_getMasterProcedureManagerHost | /** Returns the underlying MasterProcedureManagerHost */
@Override
public MasterProcedureManagerHost getMasterProcedureManagerHost() {
return mpmHost;
} | 3.68 |
flink_KeyedStateTransformation_window | /**
* Windows this transformation into a {@code WindowedOperatorTransformation}, which bootstraps
* state that can be restored by a {@code WindowOperator}. Elements are put into windows by a
* {@link WindowAssigner}. The grouping of elements is done both by key and by window.
*
* <p>A {@link org.apache.flink.streaming.api.windowing.triggers.Trigger} can be defined to
* specify when windows are evaluated. However, {@code WindowAssigners} have a default {@code
* Trigger} that is used if a {@code Trigger} is not specified.
*
* @param assigner The {@code WindowAssigner} that assigns elements to windows.
*/
public <W extends Window> WindowedStateTransformation<T, K, W> window(
WindowAssigner<? super T, W> assigner) {
return new WindowedStateTransformation<>(
stream, operatorMaxParallelism, keySelector, keyType, assigner);
} | 3.68 |
hadoop_StorageStatisticsFromIOStatistics_toLongStatistic | /**
* Convert a counter/gauge entry to a long statistics.
* @param e entry
* @return statistic
*/
private LongStatistic toLongStatistic(final Map.Entry<String, Long> e) {
return new LongStatistic(e.getKey(), e.getValue());
} | 3.68 |
framework_VCheckBoxGroup_getItem | /**
* Returns the JsonObject used to populate the CheckBox widget that contains
* given Element.
*
* @since 8.2
* @param element
* the element to search for
* @return the related JsonObject; {@code null} if not found
*/
public JsonObject getItem(Element element) {
return optionsToItems.entrySet().stream()
.filter(entry -> entry.getKey().getElement()
.isOrHasChild(element))
.map(entry -> entry.getValue()).findFirst().orElse(null);
} | 3.68 |
hibernate-validator_ConstraintDescriptorImpl_getMatchingConstraintValidatorDescriptors | /**
* Return all constraint validator descriptors (either generic or cross-parameter) which are registered for the
* constraint of this descriptor.
*
* @return The constraint validator descriptors applying to type of this constraint.
*/
public List<ConstraintValidatorDescriptor<T>> getMatchingConstraintValidatorDescriptors() {
return matchingConstraintValidatorDescriptors;
} | 3.68 |
flink_Pattern_where | /**
* Adds a condition that has to be satisfied by an event in order to be considered a match. If
* another condition has already been set, the new one is going to be combined with the previous
* with a logical {@code AND}. In other case, this is going to be the only condition.
*
* @param condition The condition as an {@link IterativeCondition}.
* @return The pattern with the new condition is set.
*/
public Pattern<T, F> where(IterativeCondition<F> condition) {
Preconditions.checkNotNull(condition, "The condition cannot be null.");
ClosureCleaner.clean(condition, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
if (this.condition == null) {
this.condition = condition;
} else {
this.condition = new RichAndCondition<>(this.condition, condition);
}
return this;
} | 3.68 |
hadoop_ExternalSPSBlockMoveTaskHandler_cleanUp | /**
* Cleanup the resources.
*/
void cleanUp() {
blkMovementTracker.stopTracking();
if (movementTrackerThread != null) {
movementTrackerThread.interrupt();
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.