name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_NamenodeStatusReport_getNumStaleDatanodes | /**
* Get the number of stale nodes.
*
* @return The number of stale nodes.
*/
public int getNumStaleDatanodes() {
return this.staleDatanodes;
} | 3.68 |
flink_StreamConfig_setManagedMemoryFractionOperatorOfUseCase | /** Fraction of managed memory reserved for the given use case that this operator should use. */
public void setManagedMemoryFractionOperatorOfUseCase(
ManagedMemoryUseCase managedMemoryUseCase, double fraction) {
final ConfigOption<Double> configOption =
getManagedMemoryFractionConfigOption(managedMemoryUseCase);
checkArgument(
fraction >= 0.0 && fraction <= 1.0,
String.format(
"%s should be in range [0.0, 1.0], but was: %s",
configOption.key(), fraction));
config.setDouble(configOption, fraction);
} | 3.68 |
hbase_MasterProcedureManager_execProcedureWithRet | /**
* Execute a distributed procedure on cluster with return data.
* @param desc Procedure description
* @return data returned from the procedure execution, null if no data
*/
public byte[] execProcedureWithRet(ProcedureDescription desc) throws IOException {
return null;
} | 3.68 |
hadoop_IOStatisticsBinding_trackDurationConsumer | /**
* Given an IOException raising Consumer,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
* @param factory factory of duration trackers
* @param statistic statistic key
* @param input input callable.
* @param <B> return type.
* @return a new consumer which tracks duration and failure.
*/
public static <B> ConsumerRaisingIOE<B> trackDurationConsumer(
@Nullable DurationTrackerFactory factory,
String statistic,
ConsumerRaisingIOE<B> input) {
return (B t) -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
input.accept(t);
} catch (IOException | RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();
}
};
} | 3.68 |
flink_TwoInputTransformation_getOperatorFactory | /** Returns the {@code StreamOperatorFactory} of this Transformation. */
public StreamOperatorFactory<OUT> getOperatorFactory() {
return operatorFactory;
} | 3.68 |
hudi_AvroSchemaCompatibility_lookupWriterField | /**
* Identifies the writer field that corresponds to the specified reader field.
*
* <p>
* Matching includes reader name aliases.
* </p>
*
* @param writerSchema Schema of the record where to look for the writer field.
* @param readerField Reader field to identify the corresponding writer field
* of.
* @return the writer field, if any does correspond, or None.
*/
public static Field lookupWriterField(final Schema writerSchema, final Field readerField) {
assert (writerSchema.getType() == Type.RECORD);
final List<Field> writerFields = new ArrayList<>();
final Field direct = writerSchema.getField(readerField.name());
if (direct != null) {
writerFields.add(direct);
}
for (final String readerFieldAliasName : readerField.aliases()) {
final Field writerField = writerSchema.getField(readerFieldAliasName);
if (writerField != null) {
writerFields.add(writerField);
}
}
switch (writerFields.size()) {
case 0:
return null;
case 1:
return writerFields.get(0);
default: {
throw new AvroRuntimeException(String.format(
"Reader record field %s matches multiple fields in writer record schema %s", readerField, writerSchema));
}
}
} | 3.68 |
hudi_CompactionUtils_getAllPendingLogCompactionOperations | /**
* Get all partition + file Ids with pending Log Compaction operations and their target log compaction instant time.
*/
public static Map<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>> getAllPendingLogCompactionOperations(
HoodieTableMetaClient metaClient) {
List<Pair<HoodieInstant, HoodieCompactionPlan>> pendingLogCompactionPlanWithInstants =
getAllPendingLogCompactionPlans(metaClient);
return getAllPendingCompactionOperationsInPendingCompactionPlans(pendingLogCompactionPlanWithInstants);
} | 3.68 |
rocketmq-connect_RocketMQScheduledReporter_reportTimers | /**
* report timers
*
* @param timers
*/
private void reportTimers(SortedMap<MetricName, Timer> timers) {
timers.forEach((name, timer) -> {
send(name, timer.getMeanRate());
});
} | 3.68 |
hadoop_SampleQuantiles_getCount | /**
* Returns the number of items that the estimator has processed
*
* @return count total number of items processed
*/
synchronized public long getCount() {
return count;
} | 3.68 |
hbase_FailedServers_isFailedServer | /**
* Check if the server should be considered as bad. Clean the old entries of the list.
* @return true if the server is in the failed servers list
*/
public synchronized boolean isFailedServer(final Address address) {
if (failedServers.isEmpty()) {
return false;
}
final long now = EnvironmentEdgeManager.currentTime();
if (now > this.latestExpiry) {
failedServers.clear();
return false;
}
Long expiry = this.failedServers.get(address);
if (expiry == null) {
return false;
}
if (expiry >= now) {
return true;
} else {
this.failedServers.remove(address);
}
return false;
} | 3.68 |
flink_TypeStrategies_varyingString | /**
* A type strategy that ensures that the result type is either {@link LogicalTypeRoot#VARCHAR}
* or {@link LogicalTypeRoot#VARBINARY} from their corresponding non-varying roots.
*/
public static TypeStrategy varyingString(TypeStrategy initialStrategy) {
return new VaryingStringTypeStrategy(initialStrategy);
} | 3.68 |
framework_VAbstractCalendarPanel_selectFocused | /**
* Updates year, month, day from focusedDate to value
*/
@SuppressWarnings("deprecation")
private void selectFocused() {
if (focusedDate != null
&& isDateInsideRange(focusedDate, getResolution())) {
if (value == null) {
// No previously selected value (set to null on server side).
// Create a new date using current date and time
value = new Date();
}
/*
* #5594 set Date (day) to 1 in order to prevent any kind of
* wrapping of months when later setting the month. (e.g. 31 ->
* month with 30 days -> wraps to the 1st of the following month,
* e.g. 31st of May -> 31st of April = 1st of May)
*/
value.setDate(1);
if (value.getYear() != focusedDate.getYear()) {
value.setYear(focusedDate.getYear());
}
if (value.getMonth() != focusedDate.getMonth()) {
value.setMonth(focusedDate.getMonth());
}
if (value.getDate() != focusedDate.getDate()) {
}
// We always need to set the date, even if it hasn't changed, since
// it was forced to 1 above.
value.setDate(focusedDate.getDate());
selectDate(focusedDate);
} else {
getLogger()
.info("Trying to select the focused date which is NULL!");
}
} | 3.68 |
hadoop_FileMetadata_getKey | /**
* Returns the Azure storage key for the file. Used internally by the framework.
*
* @return The key for the file.
*/
public String getKey() {
return key;
} | 3.68 |
pulsar_AuthorizationService_allowTenantOperationAsync | /**
* Grant authorization-action permission on a tenant to the given client.
*
* @param tenantName tenant name
* @param operation tenant operation
* @param role role name
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when tenant not found
* @throws IllegalStateException
* when failed to grant permission
*/
public CompletableFuture<Boolean> allowTenantOperationAsync(String tenantName,
TenantOperation operation,
String role,
AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowTenantOperationAsync(tenantName, role, operation, authData);
} | 3.68 |
hbase_MetricsAssignmentManager_getCloseProcMetrics | /** Returns Set of common metrics for CloseRegionProcedure */
public ProcedureMetrics getCloseProcMetrics() {
return closeProcMetrics;
} | 3.68 |
morf_AbstractSelectStatementBuilder_getWhereCriterion | /**
* Gets the where criteria.
*
* @return the where criteria
*/
Criterion getWhereCriterion() {
return whereCriterion;
} | 3.68 |
flink_DataSetUtils_zipWithIndex | /**
* Method that assigns a unique {@link Long} value to all elements in the input data set. The
* generated values are consecutive.
*
* @param input the input data set
* @return a data set of tuple 2 consisting of consecutive ids and initial values.
*/
public static <T> DataSet<Tuple2<Long, T>> zipWithIndex(DataSet<T> input) {
DataSet<Tuple2<Integer, Long>> elementCount = countElementsPerPartition(input);
return input.mapPartition(
new RichMapPartitionFunction<T, Tuple2<Long, T>>() {
long start = 0;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
List<Tuple2<Integer, Long>> offsets =
getRuntimeContext()
.getBroadcastVariableWithInitializer(
"counts",
new BroadcastVariableInitializer<
Tuple2<Integer, Long>,
List<Tuple2<Integer, Long>>>() {
@Override
public List<Tuple2<Integer, Long>>
initializeBroadcastVariable(
Iterable<
Tuple2<
Integer,
Long>>
data) {
// sort the list by task id to
// calculate the correct offset
List<Tuple2<Integer, Long>>
sortedData =
new ArrayList<>();
for (Tuple2<Integer, Long> datum :
data) {
sortedData.add(datum);
}
Collections.sort(
sortedData,
new Comparator<
Tuple2<
Integer,
Long>>() {
@Override
public int compare(
Tuple2<
Integer,
Long>
o1,
Tuple2<
Integer,
Long>
o2) {
return o1.f0
.compareTo(
o2.f0);
}
});
return sortedData;
}
});
// compute the offset for each partition
for (int i = 0;
i < getRuntimeContext().getIndexOfThisSubtask();
i++) {
start += offsets.get(i).f1;
}
}
@Override
public void mapPartition(
Iterable<T> values, Collector<Tuple2<Long, T>> out)
throws Exception {
for (T value : values) {
out.collect(new Tuple2<>(start++, value));
}
}
})
.withBroadcastSet(elementCount, "counts");
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setCoordinator | /**
* Sets {@code cleanUpInterval}.
*
* @param coordinator Coordinator for thread info stats request.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setCoordinator(ThreadInfoRequestCoordinator coordinator) {
this.coordinator = coordinator;
return this;
} | 3.68 |
pulsar_DispatchRateLimiter_getDispatchRateOnMsg | /**
* Get configured msg dispatch-throttling rate. Returns -1 if not configured
*
* @return
*/
public long getDispatchRateOnMsg() {
return dispatchRateLimiterOnMessage != null ? dispatchRateLimiterOnMessage.getRate() : -1;
} | 3.68 |
hadoop_ExcessRedundancyMap_size | /**
* @return the number of redundancies in this map.
*/
long size() {
return size.get();
} | 3.68 |
flink_TimestampData_fromLocalDateTime | /**
* Creates an instance of {@link TimestampData} from an instance of {@link LocalDateTime}.
*
* @param dateTime an instance of {@link LocalDateTime}
*/
public static TimestampData fromLocalDateTime(LocalDateTime dateTime) {
long epochDay = dateTime.toLocalDate().toEpochDay();
long nanoOfDay = dateTime.toLocalTime().toNanoOfDay();
long millisecond = epochDay * MILLIS_PER_DAY + nanoOfDay / 1_000_000;
int nanoOfMillisecond = (int) (nanoOfDay % 1_000_000);
return new TimestampData(millisecond, nanoOfMillisecond);
} | 3.68 |
flink_FactoryUtil_validateFactoryOptions | /**
* Validates the required options and optional options.
*
* <p>Note: It does not check for left-over options.
*/
public static void validateFactoryOptions(
Set<ConfigOption<?>> requiredOptions,
Set<ConfigOption<?>> optionalOptions,
ReadableConfig options) {
// currently Flink's options have no validation feature which is why we access them eagerly
// to provoke a parsing error
final List<String> missingRequiredOptions =
requiredOptions.stream()
// Templated options will never appear with their template key, so we need
// to ignore them as required properties here
.filter(
option ->
allKeys(option)
.noneMatch(k -> k.contains(PLACEHOLDER_SYMBOL)))
.filter(option -> readOption(options, option) == null)
.map(ConfigOption::key)
.sorted()
.collect(Collectors.toList());
if (!missingRequiredOptions.isEmpty()) {
throw new ValidationException(
String.format(
"One or more required options are missing.\n\n"
+ "Missing required options are:\n\n"
+ "%s",
String.join("\n", missingRequiredOptions)));
}
optionalOptions.forEach(option -> readOption(options, option));
} | 3.68 |
hbase_HBaseTestingUtility_expireSession | /**
* Expire a ZooKeeper session as recommended in ZooKeeper documentation
* http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
* http://www.mail-archive.com/[email protected]/msg01942.html [2]
* https://issues.apache.org/jira/browse/ZOOKEEPER-1105
* @param nodeZK - the ZK watcher to expire
* @param checkStatus - true to check if we can create a Table with the current configuration.
*/
public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception {
Configuration c = new Configuration(this.conf);
String quorumServers = ZKConfig.getZKQuorumServersString(c);
ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
byte[] password = zk.getSessionPasswd();
long sessionID = zk.getSessionId();
// Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
// so we create a first watcher to be sure that the
// event was sent. We expect that if our watcher receives the event
// other watchers on the same machine will get is as well.
// When we ask to close the connection, ZK does not close it before
// we receive all the events, so don't have to capture the event, just
// closing the connection should be enough.
ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() {
@Override
public void process(WatchedEvent watchedEvent) {
LOG.info("Monitor ZKW received event=" + watchedEvent);
}
}, sessionID, password);
// Making it expire
ZooKeeper newZK =
new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password);
// ensure that we have connection to the server before closing down, otherwise
// the close session event will be eaten out before we start CONNECTING state
long start = EnvironmentEdgeManager.currentTime();
while (
newZK.getState() != States.CONNECTED && EnvironmentEdgeManager.currentTime() - start < 1000
) {
Thread.sleep(1);
}
newZK.close();
LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
// Now closing & waiting to be sure that the clients get it.
monitor.close();
if (checkStatus) {
getConnection().getTable(TableName.META_TABLE_NAME).close();
}
} | 3.68 |
hibernate-validator_ISBNValidator_checkChecksumISBN13 | /**
* Check the digits for ISBN 13 using algorithm from
* <a href="https://en.wikipedia.org/wiki/International_Standard_Book_Number#ISBN-13_check_digit_calculation">Wikipedia</a>.
*/
private static boolean checkChecksumISBN13(String isbn) {
int sum = 0;
for ( int i = 0; i < isbn.length(); i++ ) {
sum += ( isbn.charAt( i ) - '0' ) * ( i % 2 == 0 ? 1 : 3 );
}
return ( sum % 10 ) == 0;
} | 3.68 |
flink_OptimizerNode_haveAllOutputConnectionInterestingProperties | /**
* Checks, if all outgoing connections have their interesting properties set from their target
* nodes.
*
* @return True, if on all outgoing connections, the interesting properties are set. False
* otherwise.
*/
public boolean haveAllOutputConnectionInterestingProperties() {
for (DagConnection conn : getOutgoingConnections()) {
if (conn.getInterestingProperties() == null) {
return false;
}
}
return true;
} | 3.68 |
framework_Table_setColumnOrder | /*
* Arranges visible columns according to given columnOrder. Silently ignores
* colimnId:s that are not visible columns, and keeps the internal order of
* visible columns left out of the ordering (trailing). Silently does
* nothing if columnReordering is not allowed.
*/
private void setColumnOrder(Object[] columnOrder) {
if (columnOrder == null || !isColumnReorderingAllowed()) {
return;
}
final LinkedList<Object> newOrder = new LinkedList<Object>();
for (Object id : columnOrder) {
if (id != null && visibleColumns.contains(id)) {
visibleColumns.remove(id);
newOrder.add(id);
}
}
for (final Object columnId : visibleColumns) {
if (!newOrder.contains(columnId)) {
newOrder.add(columnId);
}
}
visibleColumns = newOrder;
// Assure visual refresh
refreshRowCache();
} | 3.68 |
pulsar_ElasticSearchSink_extractIdAndDocument | /**
* Extract ES _id and _source using the Schema if available.
*
* @param record
* @return A pair for _id and _source
*/
public Pair<String, String> extractIdAndDocument(Record<GenericObject> record) throws JsonProcessingException {
if (elasticSearchConfig.isSchemaEnable()) {
Object key = null;
GenericObject value = null;
Schema<?> keySchema = null;
Schema<?> valueSchema = null;
if (record.getSchema() != null && record.getSchema() instanceof KeyValueSchema) {
KeyValueSchema<GenericObject, GenericObject> keyValueSchema = (KeyValueSchema) record.getSchema();
keySchema = keyValueSchema.getKeySchema();
valueSchema = keyValueSchema.getValueSchema();
KeyValue<GenericObject, GenericObject> keyValue =
(KeyValue<GenericObject, GenericObject>) record.getValue().getNativeObject();
key = keyValue.getKey();
value = keyValue.getValue();
} else {
key = record.getKey().orElse(null);
valueSchema = record.getSchema();
value = getGenericObjectFromRecord(record);
}
String id = null;
if (!elasticSearchConfig.isKeyIgnore() && key != null) {
if (keySchema != null){
id = stringifyKey(keySchema, key);
} else {
id = key.toString();
}
}
String doc = null;
if (value != null) {
if (valueSchema != null) {
if (elasticSearchConfig.isCopyKeyFields()
&& (keySchema.getSchemaInfo().getType().equals(SchemaType.AVRO)
|| keySchema.getSchemaInfo().getType().equals(SchemaType.JSON))) {
JsonNode keyNode = extractJsonNode(keySchema, key);
JsonNode valueNode = extractJsonNode(valueSchema, value);
doc = stringify(JsonConverter.topLevelMerge(keyNode, valueNode));
} else {
doc = stringifyValue(valueSchema, value);
}
} else {
if (value.getNativeObject() instanceof byte[]) {
// for BWC with the ES-Sink
doc = new String((byte[]) value.getNativeObject(), StandardCharsets.UTF_8);
} else {
doc = value.getNativeObject().toString();
}
}
}
if (doc != null && primaryFields != null) {
try {
// extract the PK from the JSON document
JsonNode jsonNode = objectMapper.readTree(doc);
id = stringifyKey(jsonNode, primaryFields);
} catch (JsonProcessingException e) {
log.error("Failed to read JSON", e);
throw e;
}
}
final ElasticSearchConfig.IdHashingAlgorithm idHashingAlgorithm =
elasticSearchConfig.getIdHashingAlgorithm();
if (id != null
&& idHashingAlgorithm != null
&& idHashingAlgorithm != ElasticSearchConfig.IdHashingAlgorithm.NONE) {
final byte[] idBytes = id.getBytes(StandardCharsets.UTF_8);
boolean performHashing = true;
if (elasticSearchConfig.isConditionalIdHashing() && idBytes.length <= 512) {
performHashing = false;
}
if (performHashing) {
Hasher hasher;
switch (idHashingAlgorithm) {
case SHA256:
hasher = Hashing.sha256().newHasher();
break;
case SHA512:
hasher = Hashing.sha512().newHasher();
break;
default:
throw new UnsupportedOperationException("Unsupported IdHashingAlgorithm: "
+ idHashingAlgorithm);
}
hasher.putBytes(idBytes);
id = base64Encoder.encodeToString(hasher.hash().asBytes());
}
}
if (log.isDebugEnabled()) {
SchemaType schemaType = null;
if (record.getSchema() != null && record.getSchema().getSchemaInfo() != null) {
schemaType = record.getSchema().getSchemaInfo().getType();
}
log.debug("recordType={} schemaType={} id={} doc={}",
record.getClass().getName(),
schemaType,
id,
doc);
}
doc = sanitizeValue(doc);
return Pair.of(id, doc);
} else {
Message message = record.getMessage().orElse(null);
final String rawData;
if (message != null) {
rawData = new String(message.getData(), StandardCharsets.UTF_8);
} else {
GenericObject recordObject = getGenericObjectFromRecord(record);
rawData = stringifyValue(record.getSchema(), recordObject);
}
if (rawData == null || rawData.length() == 0){
throw new IllegalArgumentException("Record does not carry message information.");
}
String key = elasticSearchConfig.isKeyIgnore() ? null : record.getKey().map(Object::toString).orElse(null);
return Pair.of(key, sanitizeValue(rawData));
}
} | 3.68 |
flink_MetricStore_retainJobs | /**
* Remove inactive jobs..
*
* @param activeJobs to retain.
*/
synchronized void retainJobs(List<String> activeJobs) {
jobs.keySet().retainAll(activeJobs);
representativeAttempts.keySet().retainAll(activeJobs);
} | 3.68 |
hudi_BootstrapExecutor_execute | /**
* Executes Bootstrap.
*/
public void execute() throws IOException {
initializeTable();
try (SparkRDDWriteClient bootstrapClient = new SparkRDDWriteClient(new HoodieSparkEngineContext(jssc), bootstrapConfig)) {
HashMap<String, String> checkpointCommitMetadata = new HashMap<>();
checkpointCommitMetadata.put(HoodieStreamer.CHECKPOINT_KEY, cfg.checkpoint);
if (cfg.checkpoint != null) {
checkpointCommitMetadata.put(HoodieStreamer.CHECKPOINT_RESET_KEY, cfg.checkpoint);
}
bootstrapClient.bootstrap(Option.of(checkpointCommitMetadata));
syncHive();
}
} | 3.68 |
framework_VSlider_updateStyleNames | /**
* Updates the style names for this widget and the child elements.
*
* @param styleName
* the new style name
* @param isPrimaryStyleName
* {@code true} if the new style name is primary, {@code false}
* otherwise
*/
protected void updateStyleNames(String styleName,
boolean isPrimaryStyleName) {
feedbackPopup.removeStyleName(getStylePrimaryName() + "-feedback");
removeStyleName(getStylePrimaryName() + "-vertical");
if (isPrimaryStyleName) {
super.setStylePrimaryName(styleName);
} else {
super.setStyleName(styleName);
}
feedbackPopup.addStyleName(getStylePrimaryName() + "-feedback");
base.setClassName(getStylePrimaryName() + "-base");
handle.setClassName(getStylePrimaryName() + "-handle");
smaller.setClassName(getStylePrimaryName() + "-smaller");
bigger.setClassName(getStylePrimaryName() + "-bigger");
if (isVertical()) {
addStyleName(getStylePrimaryName() + "-vertical");
}
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_read | /**
* Read boolean.
*
* @param wrappedLine the wrapped line
* @return the boolean
* @throws IOException the io exception
*/
public boolean read(final boolean wrappedLine) throws IOException {
this.escaped = false;
this.character = this.reader.read();
this.columnNumber++;
if (this.columnNumber == 0) {
skipLeadingWhitespace();
if (!wrappedLine) {
skipComment();
}
}
if (this.character == '\\') {
this.escaped = true;
readEscaped();
} else if (this.character == '\n') {
this.columnNumber = -1;
}
return !isEndOfFile();
} | 3.68 |
framework_ComplexRenderer_onBrowserEvent | /**
* Called whenever a registered event is triggered in the column the
* renderer renders.
* <p>
* The events that triggers this needs to be returned by the
* {@link #getConsumedEvents()} method.
* <p>
* Returns boolean telling if the event has been completely handled and
* should not cause any other actions.
*
* @param cell
* Object containing information about the cell the event was
* triggered on.
*
* @param event
* The original DOM event
* @return true if event should not be handled by grid
*/
public boolean onBrowserEvent(CellReference<?> cell, NativeEvent event) {
return false;
} | 3.68 |
hadoop_FindOptions_setMinDepth | /**
* Sets the minimum depth for applying expressions.
*
* @param minDepth minimum depth
*/
public void setMinDepth(int minDepth) {
this.minDepth = minDepth;
} | 3.68 |
hbase_NamespaceAuditor_checkQuotaToUpdateRegion | /**
* Check and update region count quota for an existing table.
* @param tName - table name for which region count to be updated.
* @param regions - Number of regions that will be added.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void checkQuotaToUpdateRegion(TableName tName, int regions) throws IOException {
if (stateManager.isInitialized()) {
stateManager.checkAndUpdateNamespaceRegionCount(tName, regions);
} else {
checkTableTypeAndThrowException(tName);
}
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations9 | /**
* Test for proper SQL mathematics operation generation from DSL expressions.
* <p>
* Bracket should be generated for subexpression "b/c". Even without explicit
* {@link org.alfasoftware.morf.sql.SqlUtils#bracket(MathsField)} call.
* </p>
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations9() {
AliasedField dDivByE = field("c").divideBy(field("d"));
String result = testDialect.getSqlFrom(field("a").plus(field("b")).plus(dDivByE).plus(field("e")).plus(literal(100))
.plus(field("f")).divideBy(literal(5)));
assertEquals(expectedSqlForMathOperations9(), result);
} | 3.68 |
framework_Slot_attachListeners | /**
* Attaches resize listeners to the widget, caption and spacing elements
*/
private void attachListeners() {
if (getWidget() != null && layout.getLayoutManager() != null) {
LayoutManager lm = layout.getLayoutManager();
if (getCaptionElement() != null && captionResizeListener != null) {
lm.addElementResizeListener(getCaptionElement(),
captionResizeListener);
}
if (widgetResizeListener != null) {
lm.addElementResizeListener(getWidget().getElement(),
widgetResizeListener);
}
if (getSpacingElement() != null && spacingResizeListener != null) {
lm.addElementResizeListener(getSpacingElement(),
spacingResizeListener);
}
}
} | 3.68 |
hadoop_RawErasureDecoder_release | /**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void release() {
// Nothing to do here.
} | 3.68 |
hadoop_PerGpuTemperature_getCurrentGpuTemp | /**
* Get current celsius GPU temperature
* @return temperature
*/
@XmlJavaTypeAdapter(PerGpuDeviceInformation.StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_temp")
public Float getCurrentGpuTemp() {
return currentGpuTemp;
} | 3.68 |
pulsar_Authentication_authenticationStage | /**
* An authentication Stage.
* when authentication complete, passed-in authFuture will contains authentication related http request headers.
*/
default void authenticationStage(String requestUrl,
AuthenticationDataProvider authData,
Map<String, String> previousResHeaders,
CompletableFuture<Map<String, String>> authFuture) {
authFuture.complete(null);
} | 3.68 |
flink_MutableHashTable_ensureNumBuffersReturned | /**
* This method makes sure that at least a certain number of memory segments is in the list of
* free segments. Free memory can be in the list of free segments, or in the return-queue where
* segments used to write behind are put. The number of segments that are in that return-queue,
* but are actually reclaimable is tracked. This method makes sure at least a certain number of
* buffers is reclaimed.
*
* @param minRequiredAvailable The minimum number of buffers that needs to be reclaimed.
*/
final void ensureNumBuffersReturned(final int minRequiredAvailable) {
if (minRequiredAvailable > this.availableMemory.size() + this.writeBehindBuffersAvailable) {
throw new IllegalArgumentException(
"More buffers requested available than totally available.");
}
try {
while (this.availableMemory.size() < minRequiredAvailable) {
this.availableMemory.add(this.writeBehindBuffers.take());
this.writeBehindBuffersAvailable--;
}
} catch (InterruptedException iex) {
throw new RuntimeException("Hash Join was interrupted.");
}
} | 3.68 |
flink_KubernetesUtils_getResourceRequirements | /**
* Get resource requirements from memory and cpu.
*
* @param resourceRequirements resource requirements in pod template
* @param mem Memory in mb.
* @param memoryLimitFactor limit factor for the memory, used to set the limit resources.
* @param cpu cpu.
* @param cpuLimitFactor limit factor for the cpu, used to set the limit resources.
* @param externalResources external resources
* @param externalResourceConfigKeys config keys of external resources
* @return KubernetesResource requirements.
*/
public static ResourceRequirements getResourceRequirements(
ResourceRequirements resourceRequirements,
int mem,
double memoryLimitFactor,
double cpu,
double cpuLimitFactor,
Map<String, ExternalResource> externalResources,
Map<String, String> externalResourceConfigKeys) {
final Quantity cpuQuantity = new Quantity(String.valueOf(cpu));
final Quantity cpuLimitQuantity = new Quantity(String.valueOf(cpu * cpuLimitFactor));
final Quantity memQuantity = new Quantity(mem + Constants.RESOURCE_UNIT_MB);
final Quantity memQuantityLimit =
new Quantity(((int) (mem * memoryLimitFactor)) + Constants.RESOURCE_UNIT_MB);
ResourceRequirementsBuilder resourceRequirementsBuilder =
new ResourceRequirementsBuilder(resourceRequirements)
.addToRequests(Constants.RESOURCE_NAME_MEMORY, memQuantity)
.addToRequests(Constants.RESOURCE_NAME_CPU, cpuQuantity)
.addToLimits(Constants.RESOURCE_NAME_MEMORY, memQuantityLimit)
.addToLimits(Constants.RESOURCE_NAME_CPU, cpuLimitQuantity);
// Add the external resources to resource requirement.
for (Map.Entry<String, ExternalResource> externalResource : externalResources.entrySet()) {
final String configKey = externalResourceConfigKeys.get(externalResource.getKey());
if (!StringUtils.isNullOrWhitespaceOnly(configKey)) {
final Quantity resourceQuantity =
new Quantity(
String.valueOf(externalResource.getValue().getValue().longValue()));
resourceRequirementsBuilder
.addToRequests(configKey, resourceQuantity)
.addToLimits(configKey, resourceQuantity);
LOG.info(
"Request external resource {} with config key {}.",
resourceQuantity.getAmount(),
configKey);
}
}
return resourceRequirementsBuilder.build();
} | 3.68 |
hbase_TimeRange_compare | /**
* Compare the timestamp to timerange.
* @return -1 if timestamp is less than timerange, 0 if timestamp is within timerange, 1 if
* timestamp is greater than timerange
*/
public int compare(long timestamp) {
assert timestamp >= 0;
if (this.allTime) {
return 0;
}
if (timestamp < minStamp) {
return -1;
}
return timestamp >= maxStamp ? 1 : 0;
} | 3.68 |
shardingsphere-elasticjob_TimeService_getCurrentMillis | /**
* Get current millis.
*
* @return current millis
*/
public long getCurrentMillis() {
return System.currentTimeMillis();
} | 3.68 |
framework_ValidationException_getValidationErrors | /**
* Gets both field and bean level validation errors.
*
* @return a list of all validation errors
*/
public List<ValidationResult> getValidationErrors() {
List<ValidationResult> errors = getFieldValidationErrors().stream()
.map(s -> s.getResult().get()).collect(Collectors.toList());
errors.addAll(getBeanValidationErrors());
return errors;
} | 3.68 |
flink_MapValue_equals | /*
* (non-Javadoc)
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (this.getClass() != obj.getClass()) {
return false;
}
final MapValue<?, ?> other = (MapValue<?, ?>) obj;
if (this.map == null) {
if (other.map != null) {
return false;
}
} else if (!this.map.equals(other.map)) {
return false;
}
return true;
} | 3.68 |
hadoop_DefaultAuditLogger_setCallerContextEnabled | /**
* Enable or disable CallerContext.
*
* @param value true, enable CallerContext, otherwise false to disable it.
*/
void setCallerContextEnabled(final boolean value) {
isCallerContextEnabled = value;
} | 3.68 |
hbase_UserProvider_shouldLoginFromKeytab | /**
* In secure environment, if a user specified his keytab and principal, a hbase client will try to
* login with them. Otherwise, hbase client will try to obtain ticket(through kinit) from system.
*/
public boolean shouldLoginFromKeytab() {
return User.shouldLoginFromKeytab(this.getConf());
} | 3.68 |
hadoop_AMRMClientAsyncImpl_getAvailableResources | /**
* Get the currently available resources in the cluster.
* A valid value is available after a call to allocate has been made
* @return Currently available resources
*/
public Resource getAvailableResources() {
return client.getAvailableResources();
} | 3.68 |
hadoop_OBSCommonUtils_innerMkdirs | /**
* Make the given path and all non-existent parents into directories.
*
* @param owner the owner OBSFileSystem instance
* @param path path to create
* @return true if a directory was created
* @throws FileAlreadyExistsException there is a file at the path specified
* @throws IOException other IO problems
* @throws ObsException on failures inside the OBS SDK
*/
static boolean innerMkdirs(final OBSFileSystem owner, final Path path)
throws IOException, FileAlreadyExistsException, ObsException {
LOG.debug("Making directory: {}", path);
FileStatus fileStatus;
try {
fileStatus = owner.getFileStatus(path);
if (fileStatus.isDirectory()) {
return true;
} else {
throw new FileAlreadyExistsException("Path is a file: " + path);
}
} catch (FileNotFoundException e) {
Path fPart = path.getParent();
do {
try {
fileStatus = owner.getFileStatus(fPart);
if (fileStatus.isDirectory()) {
break;
}
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(
String.format("Can't make directory for path '%s'"
+ " since it is a file.", fPart));
}
} catch (FileNotFoundException fnfe) {
LOG.debug("file {} not fount, but ignore.", path);
}
fPart = fPart.getParent();
} while (fPart != null);
String key = pathToKey(owner, path);
if (owner.isFsBucket()) {
OBSPosixBucketUtils.fsCreateFolder(owner, key);
} else {
OBSObjectBucketUtils.createFakeDirectory(owner, key);
}
return true;
}
} | 3.68 |
flink_Pattern_followedBy | /**
* Appends a new group pattern to the existing one. The new pattern enforces non-strict temporal
* contiguity. This means that a matching event of this pattern and the preceding matching event
* might be interleaved with other events which are ignored.
*
* @param group the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> followedBy(Pattern<T, F> group) {
return new GroupPattern<>(
this, group, ConsumingStrategy.SKIP_TILL_NEXT, afterMatchSkipStrategy);
} | 3.68 |
hadoop_GetContainersResponsePBImpl_initLocalContainerList | // Once this is called. containerList will never be null - until a getProto
// is called.
private void initLocalContainerList() {
if (this.containerList != null) {
return;
}
GetContainersResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerReportProto> list = p.getContainersList();
containerList = new ArrayList<ContainerReport>();
for (ContainerReportProto c : list) {
containerList.add(convertFromProtoFormat(c));
}
} | 3.68 |
framework_DragAndDropWrapper_getAbsoluteLeft | /**
* @return the absolute position of wrapper on the page
*/
public Integer getAbsoluteLeft() {
return (Integer) getData("absoluteLeft");
} | 3.68 |
hbase_StoreFileInfo_getFileStatus | /** Returns The {@link FileStatus} of the file */
public FileStatus getFileStatus() throws IOException {
return getReferencedFileStatus(fs);
} | 3.68 |
hadoop_ResourceSkyline_getJobFinishTime | /**
* Get the job's finish time.
*
* @return job's finish time.
*/
public final long getJobFinishTime() {
return jobFinishTime;
} | 3.68 |
flink_BufferCompressor_compress | /**
* Compresses the given {@link Buffer} into the intermediate buffer and returns the compressed
* data size.
*/
private int compress(Buffer buffer) {
checkArgument(buffer != null, "The input buffer must not be null.");
checkArgument(buffer.isBuffer(), "Event can not be compressed.");
checkArgument(!buffer.isCompressed(), "Buffer already compressed.");
checkArgument(buffer.getReaderIndex() == 0, "Reader index of the input buffer must be 0.");
checkArgument(buffer.readableBytes() > 0, "No data to be compressed.");
checkState(
internalBuffer.refCnt() == 1,
"Illegal reference count, buffer need to be released.");
try {
int compressedLen;
int length = buffer.getSize();
MemorySegment memorySegment = buffer.getMemorySegment();
// If buffer is on-heap, manipulate the underlying array directly. There are two main
// reasons why NIO buffer is not directly used here: One is that some compression
// libraries will use the underlying array for heap buffer, but our input buffer may be
// a read-only ByteBuffer, and it is illegal to access internal array. Another reason
// is that for the on-heap buffer, directly operating the underlying array can reduce
// additional overhead compared to generating a NIO buffer.
if (!memorySegment.isOffHeap()) {
compressedLen =
blockCompressor.compress(
memorySegment.getArray(),
buffer.getMemorySegmentOffset(),
length,
internalBufferArray,
0);
} else {
// compress the given buffer into the internal heap buffer
compressedLen =
blockCompressor.compress(
buffer.getNioBuffer(0, length),
0,
length,
internalBuffer.getNioBuffer(0, internalBuffer.capacity()),
0);
}
return compressedLen < length ? compressedLen : 0;
} catch (Throwable throwable) {
// return the original buffer if failed to compress
return 0;
}
} | 3.68 |
zxing_OneDimensionalCodeWriter_appendPattern | /**
* @param target encode black/white pattern into this array
* @param pos position to start encoding at in {@code target}
* @param pattern lengths of black/white runs to encode
* @param startColor starting color - false for white, true for black
* @return the number of elements added to target.
*/
protected static int appendPattern(boolean[] target, int pos, int[] pattern, boolean startColor) {
boolean color = startColor;
int numAdded = 0;
for (int len : pattern) {
for (int j = 0; j < len; j++) {
target[pos++] = color;
}
numAdded += len;
color = !color; // flip color after each segment
}
return numAdded;
} | 3.68 |
flink_TemplateUtils_extractLocalFunctionTemplates | /** Retrieve local templates from function method. */
static Set<FunctionTemplate> extractLocalFunctionTemplates(
DataTypeFactory typeFactory, Method method) {
return asFunctionTemplates(
typeFactory, collectAnnotationsOfMethod(FunctionHint.class, method));
} | 3.68 |
framework_CustomLayout_setTemplateContents | /**
* Set the contents of the template used to draw the custom layout.
*
* Note: setTemplateContents can be applied only before CustomLayout
* instance has been attached.
*
* @param templateContents
*/
public void setTemplateContents(String templateContents) {
getState().templateContents = templateContents;
getState().templateName = null;
} | 3.68 |
framework_Highlight_showOnly | /**
* Highlight the {@link Widget} for the given connector if it is a
* {@link ComponentConnector}. Hide any other highlight.
* <p>
* Pass the returned {@link Element} to {@link #hide(Element)} to remove
* this particular highlight.
* </p>
*
* @since 7.1
*
* @param connector
* the server connector to highlight
* @return Highlight element, or <code>null</code> if the connector isn't a
* component
*/
static Element showOnly(ServerConnector connector) {
hideAll();
if (connector instanceof ComponentConnector) {
return show((ComponentConnector) connector);
} else {
return null;
}
} | 3.68 |
hadoop_StagingCommitter_getConfictModeOption | /**
* Get the conflict mode option string.
* @param context context with the config
* @param fsConf filesystem config
* @param defVal default value.
* @return the trimmed configuration option, upper case.
*/
public static String getConfictModeOption(JobContext context,
Configuration fsConf, String defVal) {
return getConfigurationOption(context,
fsConf,
FS_S3A_COMMITTER_STAGING_CONFLICT_MODE,
defVal).toUpperCase(Locale.ENGLISH);
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_getPartitionSpecs | // Get the partition specs from the tree
private List<Map<String, String>> getPartitionSpecs(CommonTree ast) {
List<Map<String, String>> partSpecs = new ArrayList<>();
// get partition metadata if partition specified
for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) {
HiveParserASTNode partSpecNode = (HiveParserASTNode) ast.getChild(childIndex);
// sanity check
if (partSpecNode.getType() == HiveASTParser.TOK_PARTSPEC) {
Map<String, String> partSpec = getPartSpec(partSpecNode);
partSpecs.add(partSpec);
}
}
return partSpecs;
} | 3.68 |
hadoop_PathFinder_prependPathComponent | /**
* Appends the specified component to the path list
*/
public void prependPathComponent(String str) {
pathenv = str + pathSep + pathenv;
} | 3.68 |
framework_VComboBox_hasNextPage | /**
* Does the Select have more pages?
*
* @return true if a next page exists, else false if the current page is the
* last page
*/
public boolean hasNextPage() {
return pageLength > 0
&& getTotalSuggestionsIncludingNullSelectionItem() > (currentPage
+ 1) * pageLength;
} | 3.68 |
flink_StateBackendLoader_stateBackendFromApplicationOrConfigOrDefaultUseManagedMemory | /**
* Checks whether state backend uses managed memory, without having to deserialize or load the
* state backend.
*
* @param config Cluster configuration.
* @param stateBackendFromApplicationUsesManagedMemory Whether the application-defined backend
* uses Flink's managed memory. Empty if application has not defined a backend.
* @param classLoader User code classloader.
* @return Whether the state backend uses managed memory.
*/
public static boolean stateBackendFromApplicationOrConfigOrDefaultUseManagedMemory(
Configuration config,
Optional<Boolean> stateBackendFromApplicationUsesManagedMemory,
ClassLoader classLoader) {
checkNotNull(config, "config");
// (1) the application defined state backend has precedence
if (stateBackendFromApplicationUsesManagedMemory.isPresent()) {
return stateBackendFromApplicationUsesManagedMemory.get();
}
// (2) check if the config defines a state backend
try {
final StateBackend fromConfig = loadStateBackendFromConfig(config, classLoader, LOG);
if (fromConfig != null) {
return fromConfig.useManagedMemory();
}
} catch (IllegalConfigurationException | DynamicCodeLoadingException | IOException e) {
LOG.warn(
"Cannot decide whether state backend uses managed memory. Will reserve managed memory by default.",
e);
return true;
}
// (3) use the default MemoryStateBackend
return false;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_isOkContainerState | // Determines whether we have to pull the container information again
// or we can work based off what we already have.
private boolean isOkContainerState(ContainerAccessType accessType) {
switch (currentKnownContainerState) {
case Unknown:
// When using SAS, we can't discover container attributes
// so just live with Unknown state and fail later if it
// doesn't exist.
return connectingUsingSAS;
case DoesntExist:
return false; // the container could have been created
case ExistsAtRightVersion:
return true; // fine to optimize
case ExistsAtWrongVersion:
return false;
case ExistsNoVersion:
// If there's no version, it's OK if we don't need to stamp the version
// or we can't anyway even if we wanted to.
return !needToStampVersion(accessType);
default:
throw new AssertionError("Unknown access type: " + accessType);
}
} | 3.68 |
hadoop_OBSBlockOutputStream_hasActiveBlock | /**
* Predicate to query whether or not there is an active block.
*
* @return true if there is an active block.
*/
private synchronized boolean hasActiveBlock() {
return activeBlock != null;
} | 3.68 |
framework_ConnectorTracker_markAllConnectorsClean | /**
* Mark all connectors in this uI as clean.
*/
public void markAllConnectorsClean() {
dirtyConnectors.clear();
if (fineLogging) {
getLogger().fine("All connectors are now clean");
}
} | 3.68 |
hbase_Cipher_getProvider | /**
* Return the provider for this Cipher
*/
public CipherProvider getProvider() {
return provider;
} | 3.68 |
dubbo_MergerFactory_getMerger | /**
* Find the merger according to the returnType class, the merger will
* merge an array of returnType into one
*
* @param returnType the merger will return this type
* @return the merger which merges an array of returnType into one, return null if not exist
* @throws IllegalArgumentException if returnType is null
*/
public <T> Merger<T> getMerger(Class<T> returnType) {
if (returnType == null) {
throw new IllegalArgumentException("returnType is null");
}
if (CollectionUtils.isEmptyMap(MERGER_CACHE)) {
loadMergers();
}
Merger merger = MERGER_CACHE.get(returnType);
if (merger == null && returnType.isArray()) {
merger = ArrayMerger.INSTANCE;
}
return merger;
} | 3.68 |
pulsar_DeviationShedder_findBundlesForUnloading | /**
* Recommend that all of the returned bundles be unloaded based on observing excessive standard deviations according
* to some metric.
*
* @param loadData
* The load data to used to make the unloading decision.
* @param conf
* The service configuration.
* @return A map from all selected bundles to the brokers on which they reside.
*/
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
final Multimap<String, String> result = ArrayListMultimap.create();
bundleTreeSetCache.clear();
metricTreeSetCache.clear();
double sum = 0;
double squareSum = 0;
final Map<String, BrokerData> brokerDataMap = loadData.getBrokerData();
// Treating each broker as a data point, calculate the sum and squared
// sum of the evaluated broker metrics.
// These may be used to calculate the standard deviation.
for (Map.Entry<String, BrokerData> entry : brokerDataMap.entrySet()) {
final double value = brokerValue(entry.getValue(), conf);
sum += value;
squareSum += value * value;
metricTreeSetCache.add(new ImmutablePair<>(value, entry.getKey()));
}
// Mean cannot change by just moving around bundles.
final double mean = sum / brokerDataMap.size();
double standardDeviation = Math.sqrt(squareSum / brokerDataMap.size() - mean * mean);
final double deviationThreshold = getDeviationThreshold(conf);
String lastMostOverloaded = null;
// While the most loaded broker is above the standard deviation
// threshold, continue to move bundles.
while ((metricTreeSetCache.last().getKey() - mean) / standardDeviation > deviationThreshold) {
final Pair<Double, String> mostLoadedPair = metricTreeSetCache.last();
final double highestValue = mostLoadedPair.getKey();
final String mostLoaded = mostLoadedPair.getValue();
final Pair<Double, String> leastLoadedPair = metricTreeSetCache.first();
final double leastValue = leastLoadedPair.getKey();
final String leastLoaded = metricTreeSetCache.first().getValue();
if (!mostLoaded.equals(lastMostOverloaded)) {
// Reset the bundle tree set now that a different broker is
// being considered.
bundleTreeSetCache.clear();
for (String bundle : brokerDataMap.get(mostLoaded).getLocalData().getBundles()) {
if (!result.containsKey(bundle)) {
// Don't consider bundles that are already going to be
// moved.
bundleTreeSetCache.add(
new ImmutablePair<>(bundleValue(bundle, brokerDataMap.get(mostLoaded), conf), bundle));
}
}
lastMostOverloaded = mostLoaded;
}
boolean selected = false;
while (!(bundleTreeSetCache.isEmpty() || selected)) {
Pair<Double, String> mostExpensivePair = bundleTreeSetCache.pollLast();
double loadIncurred = mostExpensivePair.getKey();
// When the bundle is moved, we want the now least loaded server
// to have lower overall load than the
// most loaded server does not. Thus, we will only consider
// moving the bundle if this condition
// holds, and otherwise we will try the next bundle.
if (loadIncurred + leastValue < highestValue) {
// Update the standard deviation and replace the old load
// values in the broker tree set with the
// load values assuming this move took place.
final String bundleToMove = mostExpensivePair.getValue();
result.put(bundleToMove, mostLoaded);
metricTreeSetCache.remove(mostLoadedPair);
metricTreeSetCache.remove(leastLoadedPair);
final double newHighLoad = highestValue - loadIncurred;
final double newLowLoad = leastValue - loadIncurred;
squareSum -= highestValue * highestValue + leastValue * leastValue;
squareSum += newHighLoad * newHighLoad + newLowLoad * newLowLoad;
standardDeviation = Math.sqrt(squareSum / brokerDataMap.size() - mean * mean);
metricTreeSetCache.add(new ImmutablePair<>(newLowLoad, leastLoaded));
metricTreeSetCache.add(new ImmutablePair<>(newHighLoad, mostLoaded));
selected = true;
}
}
if (!selected) {
// Move on to the next broker if no bundle could be moved.
metricTreeSetCache.pollLast();
}
}
return result;
} | 3.68 |
dubbo_ReferenceConfig_meshModeHandleUrl | /**
* if enable mesh mode, handle url.
*
* @param referenceParameters referenceParameters
*/
private void meshModeHandleUrl(Map<String, String> referenceParameters) {
if (!checkMeshConfig(referenceParameters)) {
return;
}
if (StringUtils.isNotEmpty(url)) {
// user specified URL, could be peer-to-peer address, or register center's address.
if (logger.isInfoEnabled()) {
logger.info("The url already exists, mesh no longer processes url: " + url);
}
return;
}
// get provider namespace if (@DubboReference, <reference provider-namespace="xx"/>) present
String podNamespace = referenceParameters.get(RegistryConstants.PROVIDER_NAMESPACE);
// get pod namespace from env if annotation not present the provider namespace
if (StringUtils.isEmpty(podNamespace)) {
if (StringUtils.isEmpty(System.getenv("POD_NAMESPACE"))) {
if (logger.isWarnEnabled()) {
logger.warn(
CONFIG_FAILED_LOAD_ENV_VARIABLE,
"",
"",
"Can not get env variable: POD_NAMESPACE, it may not be running in the K8S environment , "
+ "finally use 'default' replace.");
}
podNamespace = "default";
} else {
podNamespace = System.getenv("POD_NAMESPACE");
}
}
// In mesh mode, providedBy equals K8S Service name.
String providedBy = referenceParameters.get(PROVIDED_BY);
// cluster_domain default is 'cluster.local',generally unchanged.
String clusterDomain =
Optional.ofNullable(System.getenv("CLUSTER_DOMAIN")).orElse(DEFAULT_CLUSTER_DOMAIN);
// By VirtualService and DestinationRule, envoy will generate a new route rule,such as
// 'demo.default.svc.cluster.local:80',the default port is 80.
Integer meshPort = Optional.ofNullable(getProviderPort()).orElse(DEFAULT_MESH_PORT);
// DubboReference default is -1, process it.
meshPort = meshPort > -1 ? meshPort : DEFAULT_MESH_PORT;
// get mesh url.
url = TRIPLE + "://" + providedBy + "." + podNamespace + SVC + clusterDomain + ":" + meshPort;
} | 3.68 |
hadoop_INodeSymlink_asSymlink | /** @return this object. */
@Override
public INodeSymlink asSymlink() {
return this;
} | 3.68 |
framework_VTree_getSubPartElement | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.SubPartAware#getSubPartElement(java
* .lang.String)
*/
@Override
public com.google.gwt.user.client.Element getSubPartElement(
String subPart) {
if ("fe".equals(subPart)) {
if (BrowserInfo.get().isOpera() && focusedNode != null) {
return focusedNode.getElement();
}
return getFocusElement();
}
if (subPart.startsWith(SUBPART_NODE_PREFIX + "[")) {
boolean expandCollapse = false;
// Node
String[] nodes = subPart.split("/");
TreeNode treeNode = null;
try {
for (String node : nodes) {
if (node.startsWith(SUBPART_NODE_PREFIX)) {
// skip SUBPART_NODE_PREFIX"["
node = node.substring(SUBPART_NODE_PREFIX.length() + 1);
// skip "]"
node = node.substring(0, node.length() - 1);
int position = Integer.parseInt(node);
if (treeNode == null) {
treeNode = getRootNodes().get(position);
} else {
treeNode = treeNode.getChildren().get(position);
}
} else if (node.startsWith(EXPAND_IDENTIFIER)) {
expandCollapse = true;
}
}
if (expandCollapse) {
return treeNode.getElement();
} else {
return DOM.asOld(treeNode.nodeCaptionSpan);
}
} catch (Exception e) {
// Invalid locator string or node could not be found
return null;
}
}
return null;
} | 3.68 |
framework_PropertysetItem_getItemProperty | /**
* Gets the Property corresponding to the given Property ID stored in the
* Item. If the Item does not contain the Property, <code>null</code> is
* returned.
*
* @param id
* the identifier of the Property to get.
* @return the Property with the given ID or <code>null</code>
*/
@Override
public Property getItemProperty(Object id) {
return map.get(id);
} | 3.68 |
hibernate-validator_LoadClass_loadClassInValidatorNameSpace | // HV-363 - library internal classes are loaded via Class.forName first
private Class<?> loadClassInValidatorNameSpace() {
final ClassLoader loader = HibernateValidator.class.getClassLoader();
Exception exception;
try {
return Class.forName( className, true, HibernateValidator.class.getClassLoader() );
}
catch (ClassNotFoundException e) {
exception = e;
}
catch (RuntimeException e) {
exception = e;
}
if ( fallbackOnTCCL ) {
ClassLoader contextClassLoader = initialThreadContextClassLoader != null
? initialThreadContextClassLoader
: Thread.currentThread().getContextClassLoader();
if ( contextClassLoader != null ) {
try {
return Class.forName( className, false, contextClassLoader );
}
catch (ClassNotFoundException e) {
throw LOG.getUnableToLoadClassException( className, contextClassLoader, e );
}
}
else {
throw LOG.getUnableToLoadClassException( className, loader, exception );
}
}
else {
throw LOG.getUnableToLoadClassException( className, loader, exception );
}
} | 3.68 |
hbase_MasterObserver_postAbortProcedure | /**
* Called after a abortProcedure request has been processed.
* @param ctx the environment to interact with the framework and master
*/
default void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
pulsar_MessageDeduplication_isDuplicate | /**
* Assess whether the message was already stored in the topic.
*
* @return true if the message should be published or false if it was recognized as a duplicate
*/
public MessageDupStatus isDuplicate(PublishContext publishContext, ByteBuf headersAndPayload) {
if (!isEnabled() || publishContext.isMarkerMessage()) {
return MessageDupStatus.NotDup;
}
String producerName = publishContext.getProducerName();
long sequenceId = publishContext.getSequenceId();
long highestSequenceId = Math.max(publishContext.getHighestSequenceId(), sequenceId);
MessageMetadata md = null;
if (producerName.startsWith(replicatorPrefix)) {
// Message is coming from replication, we need to use the original producer name and sequence id
// for the purpose of deduplication and not rely on the "replicator" name.
int readerIndex = headersAndPayload.readerIndex();
md = Commands.parseMessageMetadata(headersAndPayload);
producerName = md.getProducerName();
sequenceId = md.getSequenceId();
highestSequenceId = Math.max(md.getHighestSequenceId(), sequenceId);
publishContext.setOriginalProducerName(producerName);
publishContext.setOriginalSequenceId(sequenceId);
publishContext.setOriginalHighestSequenceId(highestSequenceId);
headersAndPayload.readerIndex(readerIndex);
}
long chunkID = -1;
long totalChunk = -1;
if (publishContext.isChunked()) {
if (md == null) {
int readerIndex = headersAndPayload.readerIndex();
md = Commands.parseMessageMetadata(headersAndPayload);
headersAndPayload.readerIndex(readerIndex);
}
chunkID = md.getChunkId();
totalChunk = md.getNumChunksFromMsg();
}
// All chunks of a message use the same message metadata and sequence ID,
// so we only need to check the sequence ID for the last chunk in a chunk message.
if (chunkID != -1 && chunkID != totalChunk - 1) {
publishContext.setProperty(IS_LAST_CHUNK, Boolean.FALSE);
return MessageDupStatus.NotDup;
}
// Synchronize the get() and subsequent put() on the map. This would only be relevant if the producer
// disconnects and re-connects very quickly. At that point the call can be coming from a different thread
synchronized (highestSequencedPushed) {
Long lastSequenceIdPushed = highestSequencedPushed.get(producerName);
if (lastSequenceIdPushed != null && sequenceId <= lastSequenceIdPushed) {
if (log.isDebugEnabled()) {
log.debug("[{}] Message identified as duplicated producer={} seq-id={} -- highest-seq-id={}",
topic.getName(), producerName, sequenceId, lastSequenceIdPushed);
}
// Also need to check sequence ids that has been persisted.
// If current message's seq id is smaller or equals to the
// lastSequenceIdPersisted than its definitely a dup
// If current message's seq id is between lastSequenceIdPersisted and
// lastSequenceIdPushed, then we cannot be sure whether the message is a dup or not
// we should return an error to the producer for the latter case so that it can retry at a future time
Long lastSequenceIdPersisted = highestSequencedPersisted.get(producerName);
if (lastSequenceIdPersisted != null && sequenceId <= lastSequenceIdPersisted) {
return MessageDupStatus.Dup;
} else {
return MessageDupStatus.Unknown;
}
}
highestSequencedPushed.put(producerName, highestSequenceId);
}
// Only put sequence ID into highestSequencedPushed and
// highestSequencedPersisted until receive and persistent the last chunk.
if (chunkID != -1 && chunkID == totalChunk - 1) {
publishContext.setProperty(IS_LAST_CHUNK, Boolean.TRUE);
}
return MessageDupStatus.NotDup;
} | 3.68 |
hadoop_BaseService_getServer | /**
* Returns the server owning the service.
*
* @return the server owning the service.
*/
protected Server getServer() {
return server;
} | 3.68 |
hudi_HoodieCreateHandle_doWrite | /**
* Perform the actual writing of the given record into the backing file.
*/
@Override
protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props) {
Option<Map<String, String>> recordMetadata = record.getMetadata();
try {
if (!HoodieOperation.isDelete(record.getOperation()) && !record.isDelete(schema, config.getProps())) {
if (record.shouldIgnore(schema, config.getProps())) {
return;
}
MetadataValues metadataValues = new MetadataValues().setFileName(path.getName());
HoodieRecord populatedRecord =
record.prependMetaFields(schema, writeSchemaWithMetaFields, metadataValues, config.getProps());
if (preserveMetadata) {
fileWriter.write(record.getRecordKey(), populatedRecord, writeSchemaWithMetaFields);
} else {
fileWriter.writeWithMetadata(record.getKey(), populatedRecord, writeSchemaWithMetaFields);
}
// Update the new location of record, so we know where to find it next
record.unseal();
record.setNewLocation(newRecordLocation);
record.seal();
recordsWritten++;
insertRecordsWritten++;
} else {
recordsDeleted++;
}
writeStatus.markSuccess(record, recordMetadata);
// deflate record payload after recording success. This will help users access payload as a
// part of marking
// record successful.
record.deflate();
} catch (Throwable t) {
// Not throwing exception from here, since we don't want to fail the entire job
// for a single record
writeStatus.markFailure(record, t, recordMetadata);
LOG.error("Error writing record " + record, t);
}
} | 3.68 |
hbase_HttpServerUtil_constrainHttpMethods | /**
* Add constraints to a Jetty Context to disallow undesirable Http methods.
* @param ctxHandler The context to modify
* @param allowOptionsMethod if true then OPTIONS method will not be set in constraint mapping
*/
public static void constrainHttpMethods(ServletContextHandler ctxHandler,
boolean allowOptionsMethod) {
Constraint c = new Constraint();
c.setAuthenticate(true);
ConstraintMapping cmt = new ConstraintMapping();
cmt.setConstraint(c);
cmt.setMethod("TRACE");
cmt.setPathSpec("/*");
ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler();
if (!allowOptionsMethod) {
ConstraintMapping cmo = new ConstraintMapping();
cmo.setConstraint(c);
cmo.setMethod("OPTIONS");
cmo.setPathSpec("/*");
securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt, cmo });
} else {
securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt });
}
ctxHandler.setSecurityHandler(securityHandler);
} | 3.68 |
hibernate-validator_SizeValidatorForArraysOfLong_isValid | /**
* Checks the number of entries in an array.
*
* @param array The array to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the array is {@code null} or the number of entries in
* {@code array} is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(long[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return true;
}
return array.length >= min && array.length <= max;
} | 3.68 |
framework_VAbstractOrderedLayout_setMargin | /**
* Set the margin of the layout.
*
* @param marginInfo
* The margin information
*/
public void setMargin(MarginInfo marginInfo) {
if (marginInfo != null) {
setStyleName("v-margin-top", marginInfo.hasTop());
setStyleName("v-margin-right", marginInfo.hasRight());
setStyleName("v-margin-bottom", marginInfo.hasBottom());
setStyleName("v-margin-left", marginInfo.hasLeft());
}
} | 3.68 |
flink_JobClient_stopWithSavepoint | /**
* Stops the associated job on Flink cluster.
*
* <p>Stopping works only for streaming programs. Be aware, that the job might continue to run
* for a while after sending the stop command, because after sources stopped to emit data all
* operators need to finish processing.
*
* @param advanceToEndOfEventTime flag indicating if the source should inject a {@code
* MAX_WATERMARK} in the pipeline
* @param savepointDirectory directory the savepoint should be written to
* @return a {@link CompletableFuture} containing the path where the savepoint is located
* @deprecated pass the format explicitly
*/
@Deprecated
default CompletableFuture<String> stopWithSavepoint(
boolean advanceToEndOfEventTime, @Nullable String savepointDirectory) {
return stopWithSavepoint(
advanceToEndOfEventTime, savepointDirectory, SavepointFormatType.DEFAULT);
} | 3.68 |
hadoop_DynamicIOStatistics_addMinimumFunction | /**
* add a mapping of a key to a minimum function.
* @param key the key
* @param eval the evaluator
*/
void addMinimumFunction(String key, Function<String, Long> eval) {
minimums.addFunction(key, eval);
} | 3.68 |
hibernate-validator_AnnotationApiHelper_determineUnwrapMode | /**
* Checks the annotation's payload for unwrapping option ({@code jakarta.validation.valueextraction.Unwrapping.Unwrap},
* {@code jakarta.validation.valueextraction.Unwrapping.Skip}) of constraint.
*
* @param annotationMirror constraint annotation mirror under check
* @return unwrapping option, if one is present in the annotation payload, {@link UnwrapMode#NONE} otherwise
*/
public UnwrapMode determineUnwrapMode(AnnotationMirror annotationMirror) {
return getAnnotationArrayValue( annotationMirror, "payload" ).stream()
.map( AnnotationValue::getValue )
.map( type -> (TypeMirror) type )
.map( typeUtils::asElement )
.map( elem -> ( (TypeElement) elem ).getQualifiedName() )
.filter( name -> name.toString().startsWith( "jakarta.validation.valueextraction.Unwrapping." ) )
.map( UnwrapMode::of )
.findAny().orElse( UnwrapMode.NONE );
} | 3.68 |
framework_AbstractListing_readItem | /**
* Reads an Item from a design and inserts it into the data source.
* <p>
* Doesn't care about selection/value (if any).
*
* @param child
* a child element representing the item
* @param context
* the DesignContext instance used in parsing
* @return the item id of the new item
*
* @throws DesignException
* if the tag name of the {@code child} element is not
* {@code option}.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
protected T readItem(Element child, DesignContext context) {
if (!"option".equals(child.tagName())) {
throw new DesignException("Unrecognized child element in "
+ getClass().getSimpleName() + ": " + child.tagName());
}
String serializedItem = "";
String caption = DesignFormatter.decodeFromTextNode(child.html());
if (child.hasAttr("item")) {
serializedItem = child.attr("item");
}
T item = deserializeDeclarativeRepresentation(serializedItem);
ItemCaptionGenerator<T> captionGenerator = getItemCaptionGenerator();
if (captionGenerator instanceof DeclarativeCaptionGenerator) {
((DeclarativeCaptionGenerator) captionGenerator).setCaption(item,
caption);
} else {
throw new IllegalStateException(String.format("Don't know how "
+ "to set caption using current caption generator '%s'",
captionGenerator.getClass().getName()));
}
IconGenerator<T> iconGenerator = getItemIconGenerator();
if (child.hasAttr("icon")) {
if (iconGenerator instanceof DeclarativeIconGenerator) {
((DeclarativeIconGenerator) iconGenerator).setIcon(item,
DesignAttributeHandler.readAttribute("icon",
child.attributes(), Resource.class));
} else {
throw new IllegalStateException(String.format("Don't know how "
+ "to set icon using current caption generator '%s'",
iconGenerator.getClass().getName()));
}
}
return item;
} | 3.68 |
druid_DruidAbstractDataSource_getConnectTimeout | /**
* @since 1.2.12
*/
public int getConnectTimeout() {
return connectTimeout;
} | 3.68 |
hbase_Procedure_setTimeoutFailure | /**
* Called by the ProcedureExecutor when the timeout set by setTimeout() is expired.
* <p/>
* Another usage for this method is to implement retrying. A procedure can set the state to
* {@code WAITING_TIMEOUT} by calling {@code setState} method, and throw a
* {@link ProcedureSuspendedException} to halt the execution of the procedure, and do not forget a
* call {@link #setTimeout(int)} method to set the timeout. And you should also override this
* method to wake up the procedure, and also return false to tell the ProcedureExecutor that the
* timeout event has been handled.
* @return true to let the framework handle the timeout as abort, false in case the procedure
* handled the timeout itself.
*/
protected synchronized boolean setTimeoutFailure(TEnvironment env) {
if (state == ProcedureState.WAITING_TIMEOUT) {
long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
setFailure("ProcedureExecutor",
new TimeoutIOException("Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
return true;
}
return false;
} | 3.68 |
hudi_HoodieMetaSyncOperations_dropPartitions | /**
* Drop partitions from the table in metastore.
*/
default void dropPartitions(String tableName, List<String> partitionsToDrop) {
} | 3.68 |
shardingsphere-elasticjob_QueryParameterMap_get | /**
* Get values by parameter name.
*
* @param parameterName parameter name
* @return values
*/
public List<String> get(final String parameterName) {
return queryMap.get(parameterName);
} | 3.68 |
hbase_SimplePositionedMutableByteRange_setOffset | /**
* Update the beginning of this range. {@code offset + length} may not be greater than
* {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
public PositionedByteRange setOffset(int offset) {
this.position = 0;
super.setOffset(offset);
return this;
} | 3.68 |
flink_CachingAsyncLookupFunction_updateLatestLoadTime | // --------------------------------- Helper functions ----------------------------
private synchronized void updateLatestLoadTime(long loadTime) {
if (latestLoadTime == UNINITIALIZED) {
cacheMetricGroup.latestLoadTimeGauge(() -> latestLoadTime);
}
latestLoadTime = loadTime;
} | 3.68 |
flink_TestUtils_readCsvResultFiles | /** Read the all files with the specified path. */
public static List<String> readCsvResultFiles(Path path) throws IOException {
File filePath = path.toFile();
// list all the non-hidden files
File[] csvFiles = filePath.listFiles((dir, name) -> !name.startsWith("."));
List<String> result = new ArrayList<>();
if (csvFiles != null) {
for (File file : csvFiles) {
result.addAll(Files.readAllLines(file.toPath()));
}
}
return result;
} | 3.68 |
framework_DesignContext_createElement | /**
* Creates an html tree node corresponding to the given element. Also
* initializes its attributes by calling writeDesign. As a result of the
* writeDesign() call, this method creates the entire subtree rooted at the
* returned Node.
*
* @param childComponent
* The component with state that is written in to the node
* @return An html tree node corresponding to the given component. The tag
* name of the created node is derived from the class name of
* childComponent.
*/
public Element createElement(Component childComponent) {
ComponentMapper componentMapper = Design.getComponentMapper();
String tagName = componentMapper.componentToTag(childComponent, this);
Element newElement = doc.createElement(tagName);
childComponent.writeDesign(newElement, this);
// Handle the local id. Global id and caption should have been taken
// care of by writeDesign.
String localId = componentToLocalId.get(childComponent);
if (localId != null) {
newElement.attr(LOCAL_ID_ATTRIBUTE, localId);
}
return newElement;
} | 3.68 |
hbase_RootProcedureState_unsetRollback | /**
* Called by the ProcedureExecutor to mark rollback execution
*/
protected synchronized void unsetRollback() {
assert state == State.ROLLINGBACK;
state = State.FAILED;
} | 3.68 |
flink_DefaultPackagedProgramRetriever_create | /**
* Creates a {@code PackageProgramRetrieverImpl} with the given parameters.
*
* @param userLibDir The user library directory that is used for generating the user classpath
* if specified. The system classpath is used if not specified.
* @param jarFile The jar archive expected to contain the job class included; {@code null} if
* the job class is on the system classpath.
* @param jobClassName The job class to use; if {@code null} the user classpath (or, if not set,
* the system classpath) will be scanned for possible main class.
* @param programArgs The program arguments.
* @param configuration The Flink configuration for the given job.
* @return The {@code PackageProgramRetrieverImpl} that can be used to create a {@link
* PackagedProgram} instance.
* @throws FlinkException If something goes wrong during instantiation.
*/
public static DefaultPackagedProgramRetriever create(
@Nullable File userLibDir,
@Nullable File jarFile,
@Nullable String jobClassName,
String[] programArgs,
Configuration configuration)
throws FlinkException {
List<URL> userClasspaths;
try {
final List<URL> classpathsFromUserLibDir = getClasspathsFromUserLibDir(userLibDir);
final List<URL> classpathsFromConfiguration =
getClasspathsFromConfiguration(configuration);
final List<URL> classpaths = new ArrayList<>();
classpaths.addAll(classpathsFromUserLibDir);
classpaths.addAll(classpathsFromConfiguration);
userClasspaths = Collections.unmodifiableList(classpaths);
} catch (IOException e) {
throw new FlinkException("An error occurred while extracting the user classpath.", e);
}
final EntryClassInformationProvider entryClassInformationProvider =
createEntryClassInformationProvider(
userLibDir == null ? null : userClasspaths,
jarFile,
jobClassName,
programArgs);
return new DefaultPackagedProgramRetriever(
entryClassInformationProvider, programArgs, userClasspaths, configuration);
} | 3.68 |
hbase_CacheConfig_shouldCacheBlockOnRead | /**
* Should we cache a block of a particular category? We always cache important blocks such as
* index blocks, as long as the block cache is available.
*/
public boolean shouldCacheBlockOnRead(BlockCategory category) {
return cacheDataOnRead || category == BlockCategory.INDEX || category == BlockCategory.BLOOM
|| (prefetchOnOpen && (category != BlockCategory.META && category != BlockCategory.UNKNOWN));
} | 3.68 |
framework_ComboBoxElement_getPopupSuggestions | /**
* Gets the text representation of all suggestions on the current page.
*
* @return List of suggestion texts
*/
public List<String> getPopupSuggestions() {
List<String> suggestionsTexts = new ArrayList<>();
List<WebElement> suggestions = getPopupSuggestionElements();
for (WebElement suggestion : suggestions) {
String text = suggestion.getText();
if (!text.isEmpty()) {
suggestionsTexts.add(text);
}
}
return suggestionsTexts;
} | 3.68 |
hbase_RegionMover_ack | /**
* Set ack/noAck mode.
* <p>
* In ack mode regions are acknowledged before and after moving and the move is retried
* hbase.move.retries.max times, if unsuccessful we quit with exit code 1.No Ack mode is a best
* effort mode,each region movement is tried once.This can be used during graceful shutdown as
* even if we have a stuck region,upon shutdown it'll be reassigned anyway.
* <p>
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder ack(boolean ack) {
this.ack = ack;
return this;
} | 3.68 |
dubbo_ConfigUtils_checkFileNameExist | /**
* check if the fileName can be found in filesystem
*
* @param fileName
* @return
*/
private static boolean checkFileNameExist(String fileName) {
File file = new File(fileName);
return file.exists();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.