name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ReplicationSourceWALReader_addEntryToBatch | // returns true if we reach the size limit for batch, i.e, we need to finish the batch and return.
protected final boolean addEntryToBatch(WALEntryBatch batch, Entry entry) {
WALEdit edit = entry.getEdit();
if (edit == null || edit.isEmpty()) {
LOG.trace("Edit null or empty for entry {} ", entry);
return false;
}
LOG.trace("updating TimeStampOfLastAttempted to {}, from entry {}, for source queue: {}",
entry.getKey().getWriteTime(), entry.getKey(), this.source.getQueueId());
updateReplicationMarkerEdit(entry, batch.getLastWalPosition());
long entrySize = getEntrySizeIncludeBulkLoad(entry);
batch.addEntry(entry, entrySize);
updateBatchStats(batch, entry, entrySize);
boolean totalBufferTooLarge = this.getSourceManager().acquireWALEntryBufferQuota(batch, entry);
// Stop if too many entries or too big
return totalBufferTooLarge || batch.getHeapSize() >= replicationBatchSizeCapacity
|| batch.getNbEntries() >= replicationBatchCountCapacity;
} | 3.68 |
flink_ExtractionUtils_collectAnnotationsOfMethod | /**
* Collects all annotations of the given type defined in the given method. Duplicates are
* ignored.
*/
static <T extends Annotation> Set<T> collectAnnotationsOfMethod(
Class<T> annotation, Method annotatedMethod) {
return new LinkedHashSet<>(Arrays.asList(annotatedMethod.getAnnotationsByType(annotation)));
} | 3.68 |
hadoop_IOStatisticsStoreImpl_getCounterReference | /**
* Get a reference to the atomic instance providing the
* value for a specific counter. This is useful if
* the value is passed around.
* @param key statistic name
* @return the reference
* @throws NullPointerException if there is no entry of that name
*/
@Override
public AtomicLong getCounterReference(String key) {
return lookup(counterMap, key);
} | 3.68 |
AreaShop_BuyRegion_getFormattedMoneyBackAmount | /**
* Get the formatted string of the amount of the moneyBack amount.
* @return String with currency symbols and proper fractional part
*/
public String getFormattedMoneyBackAmount() {
return Utils.formatCurrency(getMoneyBackAmount());
} | 3.68 |
hudi_TableChanges_renameColumn | /**
* Rename a column in the schema.
*
* @param name name of the column to rename
* @param newName new name for the column
* @return this
* @throws IllegalArgumentException
*/
public ColumnUpdateChange renameColumn(String name, String newName) {
checkColModifyIsLegal(name);
Types.Field field = internalSchema.findField(name);
if (field == null) {
throw new IllegalArgumentException(String.format("cannot update a missing column: %s", name));
}
if (newName == null || newName.isEmpty()) {
throw new IllegalArgumentException(String.format("cannot rename column: %s to empty", name));
}
if (internalSchema.hasColumn(newName, caseSensitive)) {
throw new IllegalArgumentException(String.format("cannot rename column: %s to a existing name", name));
}
// save update info
Types.Field update = updates.get(field.fieldId());
if (update == null) {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), field.isOptional(), newName, field.type(), field.doc()));
} else {
updates.put(field.fieldId(), Types.Field.get(field.fieldId(), update.isOptional(), newName, update.type(), update.doc()));
}
return this;
} | 3.68 |
hbase_CompactionTool_doClient | /**
* Execute compaction, from this client, one path at the time.
*/
private int doClient(final FileSystem fs, final Set<Path> toCompactDirs,
final boolean compactOnce, final boolean major) throws IOException {
CompactionWorker worker = new CompactionWorker(fs, getConf());
for (Path path : toCompactDirs) {
worker.compact(path, compactOnce, major);
}
return 0;
} | 3.68 |
flink_ClearJoinHintsWithInvalidPropagationShuttle_getInvalidJoinHint | /**
* Get the invalid join hint in this node.
*
* <p>The invalid join meets the following requirement:
*
* <p>1. This hint name is same with the join hint that needs to be removed
*
* <p>2.The length of this hint should be same with the length of propagating this removed
* join hint.
*
* <p>3. The inherited path of this hint should match the inherited path of this removed
* join hint.
*
* @param hints all hints
* @return return the invalid join hint if exists, else return empty
*/
private Optional<RelHint> getInvalidJoinHint(List<RelHint> hints) {
for (RelHint hint : hints) {
if (hint.hintName.equals(joinHintNeedRemove.hintName)
&& isMatchInvalidInheritPath(
new ArrayList<>(currentInheritPath), hint.inheritPath)) {
return Optional.of(hint);
}
}
return Optional.empty();
} | 3.68 |
flink_DateTimeUtils_toSQLTimestamp | /**
* Converts the internal representation of a SQL TIMESTAMP (long) to the Java type used for UDF
* parameters ({@link java.sql.Timestamp}).
*/
public static java.sql.Timestamp toSQLTimestamp(long v) {
return new java.sql.Timestamp(v - LOCAL_TZ.getOffset(v));
} | 3.68 |
flink_OperatorSnapshotFutures_cancel | /** @return discarded state size (if available). */
public Tuple2<Long, Long> cancel() throws Exception {
List<Tuple2<Future<? extends StateObject>, String>> pairs = new ArrayList<>();
pairs.add(new Tuple2<>(getKeyedStateManagedFuture(), "managed keyed"));
pairs.add(new Tuple2<>(getKeyedStateRawFuture(), "managed operator"));
pairs.add(new Tuple2<>(getOperatorStateManagedFuture(), "raw keyed"));
pairs.add(new Tuple2<>(getOperatorStateRawFuture(), "raw operator"));
pairs.add(new Tuple2<>(getInputChannelStateFuture(), "input channel"));
pairs.add(new Tuple2<>(getResultSubpartitionStateFuture(), "result subpartition"));
final long[] sizeTuple = new long[2];
try (Closer closer = Closer.create()) {
for (Tuple2<Future<? extends StateObject>, String> pair : pairs) {
closer.register(
() -> {
try {
Tuple2<Long, Long> tuple = discardStateFuture(pair.f0);
sizeTuple[0] += tuple.f0;
sizeTuple[1] += tuple.f1;
} catch (Exception e) {
throw new RuntimeException(
String.format(
"Could not properly cancel %s state future",
pair.f1),
e);
}
});
}
}
return Tuple2.of(sizeTuple[0], sizeTuple[1]);
} | 3.68 |
hbase_QuotaSettings_buildSetQuotaRequestProto | /**
* Convert a QuotaSettings to a protocol buffer SetQuotaRequest. This is used internally by the
* Admin client to serialize the quota settings and send them to the master.
*/
@InterfaceAudience.Private
public static SetQuotaRequest buildSetQuotaRequestProto(final QuotaSettings settings) {
SetQuotaRequest.Builder builder = SetQuotaRequest.newBuilder();
if (settings.getUserName() != null) {
builder.setUserName(settings.getUserName());
}
if (settings.getTableName() != null) {
builder.setTableName(ProtobufUtil.toProtoTableName(settings.getTableName()));
}
if (settings.getNamespace() != null) {
builder.setNamespace(settings.getNamespace());
}
if (settings.getRegionServer() != null) {
builder.setRegionServer(settings.getRegionServer());
}
settings.setupSetQuotaRequest(builder);
return builder.build();
} | 3.68 |
graphhopper_LandmarkStorage_getMinimumNodes | /**
* @see #setMinimumNodes(int)
*/
public int getMinimumNodes() {
return minimumNodes;
} | 3.68 |
flink_BatchShuffleReadBufferPool_initialize | /** Initializes this buffer pool which allocates all the buffers. */
public void initialize() {
synchronized (buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (initialized) {
return;
}
initialized = true;
try {
for (int i = 0; i < numTotalBuffers; ++i) {
buffers.add(MemorySegmentFactory.allocateUnpooledOffHeapMemory(bufferSize));
}
} catch (OutOfMemoryError outOfMemoryError) {
int allocated = buffers.size();
buffers.forEach(MemorySegment::free);
buffers.clear();
throw new OutOfMemoryError(
String.format(
"Can't allocate enough direct buffer for batch shuffle read buffer "
+ "pool (bytes allocated: %d, bytes still needed: %d). To "
+ "avoid the exception, you need to do one of the following"
+ " adjustments: 1) If you have ever decreased %s, you need"
+ " to undo the decrement; 2) If you ever increased %s, you"
+ " should also increase %s; 3) If neither the above cases,"
+ " it usually means some other parts of your application "
+ "have consumed too many direct memory and the value of %s"
+ " should be increased.",
allocated * bufferSize,
(numTotalBuffers - allocated) * bufferSize,
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(),
TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY.key(),
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.key(),
TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key()));
}
}
LOG.info(
"Batch shuffle IO buffer pool initialized: numBuffers={}, bufferSize={}.",
numTotalBuffers,
bufferSize);
} | 3.68 |
streampipes_DataSinkBuilder_create | /**
* Creates a new data sink using the builder pattern. If no label and description is given
* for an element,
* {@link org.apache.streampipes.sdk.builder.AbstractProcessingElementBuilder#withLocales(Locales...)}
* must be called.
*
* @param id A unique identifier of the new element, e.g., com.mycompany.sink.mynewdatasink
*/
public static DataSinkBuilder create(String id) {
return new DataSinkBuilder(id);
} | 3.68 |
hadoop_AbstractSchedulerPlanFollower_moveAppsInQueueSync | /**
* Move all apps in the set of queues to the parent plan queue's default
* reservation queue in a synchronous fashion
*/
private void moveAppsInQueueSync(String expiredReservation,
String defReservationQueue) {
List<ApplicationAttemptId> activeApps =
scheduler.getAppsInQueue(expiredReservation);
if (activeApps.isEmpty()) {
return;
}
for (ApplicationAttemptId app : activeApps) {
// fallback to parent's default queue
try {
scheduler.moveApplication(app.getApplicationId(), defReservationQueue);
} catch (YarnException e) {
LOG.warn(
"Encountered unexpected error during migration of application: {}"
+ " from reservation: {}",
app, expiredReservation, e);
}
}
} | 3.68 |
framework_TreeTable_addCollapseListener | /**
* Adds a collapse listener.
*
* @param listener
* the Listener to be added.
*/
public void addCollapseListener(CollapseListener listener) {
addListener(CollapseEvent.class, listener,
CollapseListener.COLLAPSE_METHOD);
} | 3.68 |
hbase_ExportSnapshot_createOutputPath | /**
* Create the output folder and optionally set ownership.
*/
private void createOutputPath(final Path path) throws IOException {
if (filesUser == null && filesGroup == null) {
outputFs.mkdirs(path);
} else {
Path parent = path.getParent();
if (!outputFs.exists(parent) && !parent.isRoot()) {
createOutputPath(parent);
}
outputFs.mkdirs(path);
if (filesUser != null || filesGroup != null) {
// override the owner when non-null user/group is specified
outputFs.setOwner(path, filesUser, filesGroup);
}
if (filesMode > 0) {
outputFs.setPermission(path, new FsPermission(filesMode));
}
}
} | 3.68 |
hudi_Key_write | /**
* Serialize the fields of this object to <code>out</code>.
*
* @param out <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {
out.writeInt(bytes.length);
out.write(bytes);
out.writeDouble(weight);
} | 3.68 |
hbase_PreemptiveFastFailException_getLastAttemptAt | /** Returns time of the latest attempt */
public long getLastAttemptAt() {
return timeOfLatestAttemptMilliSec;
} | 3.68 |
querydsl_JPAExpressions_selectDistinct | /**
* Create a new detached JPQLQuery instance with the given projection
*
* @param exprs projection
* @return select(distinct expr)
*/
public static JPQLQuery<Tuple> selectDistinct(Expression<?>... exprs) {
return new JPASubQuery<Void>().select(exprs).distinct();
} | 3.68 |
pulsar_AbstractAwsConnector_createCredentialProviderWithPlugin | /**
* Creates a instance of credential provider which can return {@link AWSCredentials} or {@link BasicAWSCredentials}
* based on IAM user/roles.
*
* @param pluginFQClassName
* @param param
* @return
* @throws IllegalArgumentException
*/
public static AwsCredentialProviderPlugin createCredentialProviderWithPlugin(String pluginFQClassName, String param)
throws IllegalArgumentException {
try {
Class<?> clazz = Class.forName(pluginFQClassName);
Constructor<?> ctor = clazz.getConstructor();
final AwsCredentialProviderPlugin plugin = (AwsCredentialProviderPlugin) ctor.newInstance(new Object[] {});
plugin.init(param);
return plugin;
} catch (Exception e) {
log.error("Failed to initialize AwsCredentialProviderPlugin {}", pluginFQClassName, e);
throw new IllegalArgumentException(
String.format("invalid authplugin name %s, failed to init %s", pluginFQClassName, e.getMessage()));
}
} | 3.68 |
streampipes_MqttUtils_extractQoSFromString | // remove non-digits
public static QoS extractQoSFromString(String s) {
int qos = Integer.parseInt(s.replaceAll("\\D+", ""));
switch (qos) {
case 0:
return QoS.AT_MOST_ONCE;
case 1:
return QoS.AT_LEAST_ONCE;
case 2:
return QoS.EXACTLY_ONCE;
}
throw new SpRuntimeException("Could not retrieve QoS level: QoS " + qos);
} | 3.68 |
hadoop_CSQueueStore_clear | /**
* Clears the store, removes all queue references.
*/
public void clear() {
try {
modificationLock.writeLock().lock();
fullNameQueues.clear();
shortNameToLongNames.clear();
getMap.clear();
} finally {
modificationLock.writeLock().unlock();
}
} | 3.68 |
querydsl_JTSGeometryExpressions_xmin | /**
* Returns X minima of a bounding box 2d or 3d or a geometry.
*
* @param expr geometry
* @return x minima
*/
public static NumberExpression<Double> xmin(JTSGeometryExpression<?> expr) {
return Expressions.numberOperation(Double.class, SpatialOps.XMIN, expr);
} | 3.68 |
hadoop_SelectBinding_xopt | /**
* Get an option with backslash arguments transformed.
* These are not trimmed, so whitespace is significant.
* @param selectOpts options in the select call
* @param fsConf filesystem conf
* @param base base option name
* @param defVal default value
* @return the transformed value
*/
static String xopt(Configuration selectOpts,
Configuration fsConf,
String base,
String defVal) {
return expandBackslashChars(
opt(selectOpts, fsConf, base, defVal, false));
} | 3.68 |
MagicPlugin_Base64Coder_encode | /**
* Encodes a byte array into Base64 format.
* No blanks or line breaks are inserted in the output.
*
* @param in An array containing the data bytes to be encoded.
* @param iOff Offset of the first byte in <code>in</code> to be processed.
* @param iLen Number of bytes to process in <code>in</code>, starting at <code>iOff</code>.
* @return A character array containing the Base64 encoded data.
*/
public static char[] encode(byte[] in, int iOff, int iLen) {
int oDataLen = (iLen * 4 + 2) / 3; // output length without padding
int oLen = ((iLen + 2) / 3) * 4; // output length including padding
char[] out = new char[oLen];
int ip = iOff;
int iEnd = iOff + iLen;
int op = 0;
while (ip < iEnd) {
int i0 = in[ip++] & 0xff;
int i1 = ip < iEnd ? in[ip++] & 0xff : 0;
int i2 = ip < iEnd ? in[ip++] & 0xff : 0;
int o0 = i0 >>> 2;
int o1 = ((i0 & 3) << 4) | (i1 >>> 4);
int o2 = ((i1 & 0xf) << 2) | (i2 >>> 6);
int o3 = i2 & 0x3F;
out[op++] = map1[o0];
out[op++] = map1[o1];
out[op] = op < oDataLen ? map1[o2] : '=';
op++;
out[op] = op < oDataLen ? map1[o3] : '=';
op++;
}
return out;
} | 3.68 |
hbase_FamilyFilter_parseFrom | /**
* Parse the serialized representation of {@link FamilyFilter}
* @param pbBytes A pb serialized {@link FamilyFilter} instance
* @return An instance of {@link FamilyFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FamilyFilter proto;
try {
proto = FilterProtos.FamilyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp =
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new FamilyFilter(valueCompareOp, valueComparator);
} | 3.68 |
hudi_AvroSchemaCompatibility_getType | /**
* Gets the type of this result.
*
* @return the type of this result.
*/
public SchemaCompatibilityType getType() {
return mResult.getCompatibility();
} | 3.68 |
hadoop_SnappyDecompressor_getRemaining | /**
* Returns <code>0</code>.
*
* @return <code>0</code>.
*/
@Override
public int getRemaining() {
// Never use this function in BlockDecompressorStream.
return 0;
} | 3.68 |
zxing_OneDReader_patternMatchVariance | /**
* Determines how closely a set of observed counts of runs of black/white values matches a given
* target pattern. This is reported as the ratio of the total variance from the expected pattern
* proportions across all pattern elements, to the length of the pattern.
*
* @param counters observed counters
* @param pattern expected pattern
* @param maxIndividualVariance The most any counter can differ before we give up
* @return ratio of total variance between counters and pattern compared to total pattern size
*/
protected static float patternMatchVariance(int[] counters,
int[] pattern,
float maxIndividualVariance) {
int numCounters = counters.length;
int total = 0;
int patternLength = 0;
for (int i = 0; i < numCounters; i++) {
total += counters[i];
patternLength += pattern[i];
}
if (total < patternLength) {
// If we don't even have one pixel per unit of bar width, assume this is too small
// to reliably match, so fail:
return Float.POSITIVE_INFINITY;
}
float unitBarWidth = (float) total / patternLength;
maxIndividualVariance *= unitBarWidth;
float totalVariance = 0.0f;
for (int x = 0; x < numCounters; x++) {
int counter = counters[x];
float scaledPattern = pattern[x] * unitBarWidth;
float variance = counter > scaledPattern ? counter - scaledPattern : scaledPattern - counter;
if (variance > maxIndividualVariance) {
return Float.POSITIVE_INFINITY;
}
totalVariance += variance;
}
return totalVariance / total;
} | 3.68 |
flink_CrossOperator_projectTuple10 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>
ProjectCross<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>
projectTuple10() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType =
new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes);
return new ProjectCross<I1, I2, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
flink_DynamicSourceUtils_pushMetadataProjection | /**
* Creates a projection that reorders physical and metadata columns according to the given
* schema. It casts metadata columns into the expected data type to be accessed by computed
* columns in the next step. Computed columns are ignored here.
*
* @see SupportsReadingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<String> fieldNames =
schema.getColumns().stream()
.filter(c -> !(c instanceof ComputedColumn))
.map(Column::getName)
.collect(Collectors.toList());
final List<RexNode> fieldNodes =
schema.getColumns().stream()
.filter(c -> !(c instanceof ComputedColumn))
.map(
c -> {
final RelDataType relDataType =
relBuilder
.getTypeFactory()
.createFieldTypeFromLogicalType(
c.getDataType().getLogicalType());
if (c instanceof MetadataColumn) {
final MetadataColumn metadataColumn = (MetadataColumn) c;
String columnName = metadataColumn.getName();
return rexBuilder.makeAbstractCast(
relDataType, relBuilder.field(columnName));
} else {
return relBuilder.field(c.getName());
}
})
.collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
} | 3.68 |
querydsl_AbstractMongodbQuery_join | /**
* Define a join
*
* @param ref reference
* @param target join target
* @return join builder
*/
public <T> JoinBuilder<Q, K,T> join(CollectionPathBase<?,T,?> ref, Path<T> target) {
return new JoinBuilder<Q, K,T>(queryMixin, ref, target);
} | 3.68 |
hbase_WALEntryStream_close | /**
* {@inheritDoc}
*/
@Override
public void close() {
closeReader();
} | 3.68 |
pulsar_StructuredEventLog_get | /**
* Create a new logger object, from which root events can be created.
*/
static StructuredEventLog get() {
return Initializer.get();
} | 3.68 |
framework_HierarchyMapper_getIndexOf | /**
* Finds the current index of given object. This is based on a search in
* flattened version of the hierarchy.
*
* @param target
* the target object to find
* @return optional index of given object
*/
public Optional<Integer> getIndexOf(T target) {
if (target == null) {
return Optional.empty();
}
final List<Object> collect = getHierarchy(null).map(provider::getId)
.collect(Collectors.toList());
int index = collect.indexOf(getDataProvider().getId(target));
return Optional.ofNullable(index < 0 ? null : index);
} | 3.68 |
hadoop_FederationMembershipStateStoreInputValidator_checkSubClusterInfo | /**
* Validate if all the required fields on {@link SubClusterInfo} are present
* or not. {@code Capability} will be empty as the corresponding
* {@code ResourceManager} is in the process of initialization during
* registration.
*
* @param subClusterInfo the information of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException if the SubCluster Info
* are invalid
*/
public static void checkSubClusterInfo(SubClusterInfo subClusterInfo)
throws FederationStateStoreInvalidInputException {
if (subClusterInfo == null) {
String message = "Missing SubCluster Information."
+ " Please try again by specifying SubCluster Information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster id
checkSubClusterId(subClusterInfo.getSubClusterId());
// validate AMRM Service address
checkAddress(subClusterInfo.getAMRMServiceAddress());
// validate ClientRM Service address
checkAddress(subClusterInfo.getClientRMServiceAddress());
// validate RMClient Service address
checkAddress(subClusterInfo.getRMAdminServiceAddress());
// validate RMWeb Service address
checkAddress(subClusterInfo.getRMWebServiceAddress());
// validate last heartbeat timestamp
checkTimestamp(subClusterInfo.getLastHeartBeat());
// validate last start timestamp
checkTimestamp(subClusterInfo.getLastStartTime());
// validate subcluster state
checkSubClusterState(subClusterInfo.getState());
} | 3.68 |
morf_FieldLiteral_fromObject | /**
* Constructs a {@linkplain FieldLiteral} from a specified object.
*
* @param object the object to construct the {@linkplain FieldLiteral} from
* @return the new {@linkplain FieldLiteral}
*/
public static FieldLiteral fromObject(Object object) {
if (object instanceof String) {
return new FieldLiteral((String) object);
}
if (object instanceof Double) {
return new FieldLiteral((Double) object);
}
if (object instanceof Integer) {
return new FieldLiteral((Integer) object);
}
if (object instanceof Character) {
return new FieldLiteral((Character) object);
}
return new FieldLiteral(object.toString());
} | 3.68 |
zxing_EmailDoCoMoResultParser_isBasicallyValidEmailAddress | /**
* This implements only the most basic checking for an email address's validity -- that it contains
* an '@' and contains no characters disallowed by RFC 2822. This is an overly lenient definition of
* validity. We want to generally be lenient here since this class is only intended to encapsulate what's
* in a barcode, not "judge" it.
*/
static boolean isBasicallyValidEmailAddress(String email) {
return email != null && ATEXT_ALPHANUMERIC.matcher(email).matches() && email.indexOf('@') >= 0;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_blockUploaded | /**
* Indicate that we just uploaded a block and record its latency.
* @param latency The latency in milliseconds.
*/
public void blockUploaded(long latency) {
currentBlockUploadLatency.addPoint(latency);
} | 3.68 |
framework_DragSourceExtension_setPayload | /**
* Sets payload for this drag source to use with acceptance criterion. The
* payload is transferred as data type in the data transfer object in the
* following format: {@code "v-item:double:key:value"}. The given value is
* compared to the criterion value when the drag source is dragged on top of
* a drop target that has the suitable criterion.
* <p>
* Note that setting payload in Internet Explorer 11 is not possible due to
* the browser's limitations.
*
* @param key
* key of the payload to be transferred
* @param value
* value of the payload to be transferred
* @see DropTargetExtension#setDropCriterion(String,
* com.vaadin.shared.ui.dnd.criteria.ComparisonOperator, double)
* DropTargetExtension#setDropCriterion(String, ComparisonOperator,
* double)
*/
public void setPayload(String key, double value) {
setPayload(key, String.valueOf(value), Payload.ValueType.DOUBLE);
} | 3.68 |
graphhopper_GHLongLongBTree_getCapacity | /**
* @return used bytes
*/
long getCapacity() {
long cap = keys.length * (8 + 4) + 3 * 12 + 4 + 1;
if (!isLeaf) {
cap += children.length * 4;
for (int i = 0; i < children.length; i++) {
if (children[i] != null) {
cap += children[i].getCapacity();
}
}
}
return cap;
} | 3.68 |
framework_VScrollTable_getMaxIndent | /**
* This method exists for the needs of {@link VTreeTable} only. May be
* removed or replaced in the future.<br>
* <br>
* Returns the maximum indent of the hierarcyColumn, if applicable.
*
* @see VScrollTable#getHierarchyColumnIndex()
*
* @return maximum indent in pixels
*/
protected int getMaxIndent() {
return 0;
} | 3.68 |
framework_LegacyCommunicationManager_getDependencies | /**
* @deprecated As of 7.1. See #11413.
*/
@Deprecated
public Map<String, Class<?>> getDependencies() {
return publishedFileContexts;
} | 3.68 |
dubbo_SingleRouterChain_addRouters | /**
* If we use route:// protocol in version before 2.7.0, each URL will generate a Router instance, so we should
* keep the routers up to date, that is, each time router URLs changes, we should update the routers list, only
* keep the builtinRouters which are available all the time and the latest notified routers which are generated
* from URLs.
*
* @param routers routers from 'router://' rules in 2.6.x or before.
*/
public void addRouters(List<Router> routers) {
List<Router> newRouters = new LinkedList<>();
newRouters.addAll(builtinRouters);
newRouters.addAll(routers);
CollectionUtils.sort(newRouters);
this.routers = newRouters;
} | 3.68 |
querydsl_SQLExpressions_firstValue | /**
* returns value evaluated at the row that is the first row of the window frame
*
* @param expr argument
* @return first_value(expr)
*/
public static <T> WindowOver<T> firstValue(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.FIRSTVALUE, expr);
} | 3.68 |
AreaShop_GeneralRegion_getLimitingFactor | /**
* Get the type of the factor that is limiting the action, assuming actionAllowed() is false.
* @return The type of the limiting factor
*/
public LimitType getLimitingFactor() {
return limitingFactor;
} | 3.68 |
hudi_HoodieAvroUtils_addCommitMetadataToRecord | /**
* Adds the Hoodie commit metadata into the provided Generic Record.
*/
public static GenericRecord addCommitMetadataToRecord(GenericRecord record, String instantTime, String commitSeqno) {
record.put(HoodieRecord.COMMIT_TIME_METADATA_FIELD, instantTime);
record.put(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, commitSeqno);
return record;
} | 3.68 |
hadoop_WrappedFailoverProxyProvider_useLogicalURI | /**
* Assume logical URI is used for old proxy provider implementations.
*/
@Override
public boolean useLogicalURI() {
return true;
} | 3.68 |
framework_FocusableFlowPanel_addKeyDownHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler(
* com.google.gwt.event.dom.client.KeyDownHandler)
*/
@Override
public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) {
return addDomHandler(handler, KeyDownEvent.getType());
} | 3.68 |
pulsar_Topics_createNonPartitionedTopic | /**
* Create a non-partitioned topic.
* <p/>
* Create a non-partitioned topic.
* <p/>
*
* @param topic Topic name
* @throws PulsarAdminException
*/
default void createNonPartitionedTopic(String topic) throws PulsarAdminException {
createNonPartitionedTopic(topic, null);
} | 3.68 |
flink_ResultPartitionMetrics_refreshAndGetMax | /**
* Iterates over all sub-partitions and collects the maximum number of queued buffers in a
* sub-partition in a best-effort way.
*
* @return maximum number of queued buffers per sub-partition
*/
int refreshAndGetMax() {
int max = 0;
int numSubpartitions = partition.getNumberOfSubpartitions();
for (int targetSubpartition = 0;
targetSubpartition < numSubpartitions;
++targetSubpartition) {
int size = partition.getNumberOfQueuedBuffers(targetSubpartition);
max = Math.max(max, size);
}
return max;
} | 3.68 |
flink_DynamicPartitionPruningUtils_isSuitableFilter | /**
* Not all filter condition suitable for using to filter partitions by dynamic partition
* pruning rules. For example, NOT NULL can only filter one default partition which have a
* small impact on filtering data.
*/
private static boolean isSuitableFilter(RexNode filterCondition) {
switch (filterCondition.getKind()) {
case AND:
List<RexNode> conjunctions = RelOptUtil.conjunctions(filterCondition);
return isSuitableFilter(conjunctions.get(0))
|| isSuitableFilter(conjunctions.get(1));
case OR:
List<RexNode> disjunctions = RelOptUtil.disjunctions(filterCondition);
return isSuitableFilter(disjunctions.get(0))
&& isSuitableFilter(disjunctions.get(1));
case NOT:
return isSuitableFilter(((RexCall) filterCondition).operands.get(0));
case EQUALS:
case GREATER_THAN:
case GREATER_THAN_OR_EQUAL:
case LESS_THAN:
case LESS_THAN_OR_EQUAL:
case NOT_EQUALS:
case IN:
case LIKE:
case CONTAINS:
case SEARCH:
case IS_FALSE:
case IS_NOT_FALSE:
case IS_NOT_TRUE:
case IS_TRUE:
// TODO adding more suitable filters which can filter enough partitions after
// using this filter in dynamic partition pruning.
return true;
default:
return false;
}
} | 3.68 |
flink_SplitsAssignment_assignment | /** @return A mapping from subtask ID to their split assignment. */
public Map<Integer, List<SplitT>> assignment() {
return assignment;
} | 3.68 |
framework_ContainerOrderedWrapper_containsId | /*
* Does the container contain the specified Item? Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean containsId(Object itemId) {
return container.containsId(itemId);
} | 3.68 |
framework_VaadinPortletRequest_getCurrentPortletRequest | /**
* Gets the currently processed portlet request. The current portlet request
* is automatically defined when the request is started. The current portlet
* request can not be used in e.g. background threads because of the way
* server implementations reuse request instances.
*
* @return the current portlet request instance if available, otherwise
* <code>null</code>
* @since 7.3
*/
public static PortletRequest getCurrentPortletRequest() {
return VaadinPortletService.getCurrentPortletRequest();
} | 3.68 |
querydsl_Expressions_numberTemplate | /**
* Create a new Template expression
*
* @param cl type of expression
* @param template template
* @param args template parameters
* @return template expression
*/
public static <T extends Number & Comparable<?>> NumberTemplate<T> numberTemplate(Class<? extends T> cl, Template template, List<?> args) {
return new NumberTemplate<T>(cl, template, args);
} | 3.68 |
flink_BufferManager_takeBuffer | /**
* Takes the floating buffer first in order to make full use of floating buffers reasonably.
*
* @return An available floating or exclusive buffer, may be null if the channel is
* released.
*/
@Nullable
Buffer takeBuffer() {
if (floatingBuffers.size() > 0) {
return floatingBuffers.poll();
} else {
return exclusiveBuffers.poll();
}
} | 3.68 |
hadoop_OBSWriteOperationHelper_newUploadPartRequest | /**
* Create request for uploading one part of a multipart task.
*
* @param destKey destination object key
* @param uploadId upload id
* @param partNumber part number
* @param size data size
* @param uploadStream upload stream for the part
* @return part upload request
*/
UploadPartRequest newUploadPartRequest(
final String destKey,
final String uploadId,
final int partNumber,
final int size,
final InputStream uploadStream) {
Preconditions.checkNotNull(uploadId);
Preconditions.checkArgument(uploadStream != null, "Data source");
Preconditions.checkArgument(size > 0, "Invalid partition size %s",
size);
Preconditions.checkArgument(
partNumber > 0 && partNumber <= PART_NUMBER);
LOG.debug("Creating part upload request for {} #{} size {}", uploadId,
partNumber, size);
UploadPartRequest request = new UploadPartRequest();
request.setUploadId(uploadId);
request.setBucketName(bucket);
request.setObjectKey(destKey);
request.setPartSize((long) size);
request.setPartNumber(partNumber);
request.setInput(uploadStream);
if (owner.getSse().isSseCEnable()) {
request.setSseCHeader(owner.getSse().getSseCHeader());
}
return request;
} | 3.68 |
framework_VComboBox_onClick | /**
* Listener for popupopener.
*/
@Override
public void onClick(ClickEvent event) {
debug("VComboBox: onClick()");
if (textInputEnabled && event.getNativeEvent().getEventTarget()
.cast() == tb.getElement()) {
// Don't process clicks on the text field if text input is enabled
return;
}
if (enabled && !readonly) {
// ask suggestionPopup if it was just closed, we are using GWT
// Popup's auto close feature
if (!suggestionPopup.isJustClosed()) {
filterOptions(-1, "");
dataReceivedHandler.popupOpenerClicked();
}
DOM.eventPreventDefault(DOM.eventGetCurrentEvent());
focus();
tb.selectAll();
}
} | 3.68 |
hbase_RpcClientFactory_createClient | /**
* Creates a new RpcClient by the class defined in the configuration or falls back to
* RpcClientImpl
* @param conf configuration
* @param clusterId the cluster id
* @param localAddr client socket bind address.
* @param metrics the connection metrics
* @return newly created RpcClient
*/
public static RpcClient createClient(Configuration conf, String clusterId,
SocketAddress localAddr, MetricsConnection metrics, Map<String, byte[]> connectionAttributes) {
String rpcClientClass = getRpcClientClass(conf);
return ReflectionUtils.instantiateWithCustomCtor(
rpcClientClass, new Class[] { Configuration.class, String.class, SocketAddress.class,
MetricsConnection.class, Map.class },
new Object[] { conf, clusterId, localAddr, metrics, connectionAttributes });
} | 3.68 |
hibernate-validator_AnnotationApiHelper_getAnnotationValue | /**
* Returns the annotation value of the given annotation mirror with the
* given name.
*
* @param annotationMirror An annotation mirror.
* @param name The name of the annotation value of interest.
*
* @return The annotation value with the given name or null, if one of the
* input values is null or if no value with the given name exists
* within the given annotation mirror.
*/
public AnnotationValue getAnnotationValue(AnnotationMirror annotationMirror, String name) {
if ( annotationMirror == null || name == null ) {
return null;
}
Map<? extends ExecutableElement, ? extends AnnotationValue> elementValues = annotationMirror.getElementValues();
for ( Entry<? extends ExecutableElement, ? extends AnnotationValue> oneElementValue : elementValues.entrySet() ) {
if ( oneElementValue.getKey().getSimpleName().contentEquals( name ) ) {
return oneElementValue.getValue();
}
}
return null;
} | 3.68 |
hadoop_BlockData_setState | /**
* Sets the state of the given block to the given value.
* @param blockNumber the id of the given block.
* @param blockState the target state.
* @throws IllegalArgumentException if blockNumber is invalid.
*/
public void setState(int blockNumber, State blockState) {
throwIfInvalidBlockNumber(blockNumber);
state[blockNumber] = blockState;
} | 3.68 |
hudi_HoodieRowCreateHandle_canWrite | /**
* Returns {@code true} if this handle can take in more writes. else {@code false}.
*/
public boolean canWrite() {
return fileWriter.canWrite();
} | 3.68 |
framework_ObjectProperty_getValue | /**
* Gets the value stored in the Property.
*
* @return the value stored in the Property
*/
@Override
public T getValue() {
return value;
} | 3.68 |
hudi_HoodieMetaSyncOperations_updatePartitionsToTable | /**
* Update partitions to the table in metastore.
*/
default void updatePartitionsToTable(String tableName, List<String> changedPartitions) {
} | 3.68 |
rocketmq-connect_ExpressionBuilder_appendTo | /**
* Append this object to the specified builder.
*
* @param builder the builder to use; may not be null
* @param useQuotes whether quotes should be used for this object
*/
default void appendTo(
ExpressionBuilder builder,
QuoteMethod useQuotes
) {
switch (useQuotes) {
case ALWAYS:
appendTo(builder, true);
break;
case NEVER:
default:
// do nothing
break;
}
} | 3.68 |
hbase_RemoteProcedureDispatcher_storeInDispatchedQueue | /**
* Whether store this remote procedure in dispatched queue only OpenRegionProcedure and
* CloseRegionProcedure return false since they are not fully controlled by dispatcher
*/
default boolean storeInDispatchedQueue() {
return true;
} | 3.68 |
hbase_ProcedureExecutor_restoreLocks | // Restore the locks for all the procedures.
// Notice that we need to restore the locks starting from the root proc, otherwise there will be
// problem that a sub procedure may hold the exclusive lock first and then we are stuck when
// calling the acquireLock method for the parent procedure.
// The algorithm is straight-forward:
// 1. Use a set to record the procedures which locks have already been restored.
// 2. Use a stack to store the hierarchy of the procedures
// 3. For all the procedure, we will first try to find its parent and push it into the stack,
// unless
// a. We have no parent, i.e, we are the root procedure
// b. The lock has already been restored(by checking the set introduced in #1)
// then we start to pop the stack and call acquireLock for each procedure.
// Notice that this should be done for all procedures, not only the ones in runnableList.
private void restoreLocks() {
Set<Long> restored = new HashSet<>();
Deque<Procedure<TEnvironment>> stack = new ArrayDeque<>();
procedures.values().forEach(proc -> {
for (;;) {
if (restored.contains(proc.getProcId())) {
restoreLocks(stack, restored);
return;
}
if (!proc.hasParent()) {
restoreLock(proc, restored);
restoreLocks(stack, restored);
return;
}
stack.push(proc);
proc = procedures.get(proc.getParentProcId());
}
});
} | 3.68 |
morf_AbstractSelectStatement_leftOuterJoin | /**
* Specifies an left outer join to a subselect:
*
* <blockquote><pre>
* TableReference sale = tableRef("Sale");
* TableReference customer = tableRef("Customer");
*
* // Define the subselect - a group by showing total sales by age in the
* // previous month.
* SelectStatement amountsByAgeLastMonth = select(field("age"), sum(field("amount")))
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .where(sale.field("month").eq(5))
* .groupBy(customer.field("age")
* .alias("amountByAge");
*
* // The outer select, showing each sale this month as a percentage of the sales
* // to that age the previous month
* SelectStatement outer = select(
* sale.field("id"),
* sale.field("amount")
* // May cause division by zero (!)
* .divideBy(isNull(amountsByAgeLastMonth.asTable().field("amount"), 0))
* .multiplyBy(literal(100))
* )
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .leftOuterJoin(amountsByAgeLastMonth, amountsByAgeLastMonth.asTable().field("age").eq(customer.field("age")));
* </pre></blockquote>
*
* @param subSelect the sub select statement to join on to
* @param criterion the criteria on which to join the tables
* @return a new select statement with the change applied.
*/
public T leftOuterJoin(SelectStatement subSelect, Criterion criterion) {
return copyOnWriteOrMutate(
b -> b.leftOuterJoin(subSelect, criterion),
() -> joins.add(new Join(JoinType.LEFT_OUTER_JOIN, subSelect, criterion))
);
} | 3.68 |
flink_BaseHybridHashTable_freeCurrent | /** Free the memory not used. */
public void freeCurrent() {
internalPool.cleanCache();
} | 3.68 |
hbase_LruAdaptiveBlockCache_asReferencedHeapBlock | /**
* The block cached in LruAdaptiveBlockCache will always be an heap block: on the one side, the
* heap access will be more faster then off-heap, the small index block or meta block cached in
* CombinedBlockCache will benefit a lot. on other side, the LruAdaptiveBlockCache size is always
* calculated based on the total heap size, if caching an off-heap block in LruAdaptiveBlockCache,
* the heap size will be messed up. Here we will clone the block into an heap block if it's an
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
* the block (HBASE-22127): <br>
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
* reservoir, if both RPC and LruAdaptiveBlockCache release the block, then it can be garbage
* collected by JVM, so need a retain here.
* @param buf the original block
* @return an block with an heap memory backend.
*/
private Cacheable asReferencedHeapBlock(Cacheable buf) {
if (buf instanceof HFileBlock) {
HFileBlock blk = ((HFileBlock) buf);
if (blk.isSharedMem()) {
return HFileBlock.deepCloneOnHeap(blk);
}
}
// The block will be referenced by this LruAdaptiveBlockCache,
// so should increase its refCnt here.
return buf.retain();
} | 3.68 |
hbase_HFileInfo_parseWritable | /**
* Now parse the old Writable format. It was a list of Map entries. Each map entry was a key and a
* value of a byte []. The old map format had a byte before each entry that held a code which was
* short for the key or value type. We know it was a byte [] so in below we just read and dump it.
*/
void parseWritable(final DataInputStream in) throws IOException {
// First clear the map.
// Otherwise we will just accumulate entries every time this method is called.
this.map.clear();
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
byte[] key = Bytes.readByteArray(in);
// We used to read a byte that encoded the class type.
// Read and ignore it because it is always byte [] in hfile
in.readByte();
byte[] value = Bytes.readByteArray(in);
this.map.put(key, value);
}
} | 3.68 |
flink_MurmurHashUtils_fmix | // Finalization mix - force all bits of a hash block to avalanche
private static int fmix(int h1, int length) {
h1 ^= length;
return fmix(h1);
} | 3.68 |
pulsar_ResourceGroupService_resourceGroupDelete | /**
* Delete RG.
*
* @throws if RG with that name does not exist, or if the RG exists but is still in use.
*/
public void resourceGroupDelete(String name) throws PulsarAdminException {
ResourceGroup rg = this.getResourceGroupInternal(name);
if (rg == null) {
throw new PulsarAdminException("Resource group does not exist: " + name);
}
long tenantRefCount = rg.getResourceGroupNumOfTenantRefs();
long nsRefCount = rg.getResourceGroupNumOfNSRefs();
if ((tenantRefCount + nsRefCount) > 0) {
String errMesg = "Resource group " + name + " still has " + tenantRefCount + " tenant refs";
errMesg += " and " + nsRefCount + " namespace refs on it";
throw new PulsarAdminException(errMesg);
}
rg.resourceGroupPublishLimiter.close();
rg.resourceGroupPublishLimiter = null;
resourceGroupsMap.remove(name);
} | 3.68 |
flink_EvictingWindowSavepointReader_reduce | /**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid The uid of the operator.
* @param function The reduce function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param reduceType The type information of the reduce function.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the reduce function.
* @param <OUT> The output type of the reduce function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> reduce(
String uid,
ReduceFunction<T> function,
WindowReaderFunction<T, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> reduceType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(
new ReduceEvictingWindowReaderFunction<>(readerFunction, function),
keyType,
windowSerializer,
reduceType,
env.getConfig());
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
framework_AbstractRemoteDataSource_pinHandle | /**
* Pins a row with given handle. This function can be overridden to do
* specific logic related to pinning rows.
*
* @param handle
* row handle to pin
*/
protected void pinHandle(RowHandleImpl handle) {
Object key = handle.key;
Integer count = pinnedCounts.get(key);
if (count == null) {
count = Integer.valueOf(0);
pinnedRows.put(key, handle);
}
pinnedCounts.put(key, Integer.valueOf(count.intValue() + 1));
} | 3.68 |
flink_ParquetProtoWriters_forType | /**
* Creates a {@link ParquetWriterFactory} for the given type. The type should represent a
* Protobuf message.
*
* @param type The class of the type to write.
*/
public static <T extends Message> ParquetWriterFactory<T> forType(Class<T> type) {
ParquetBuilder<T> builder = (out) -> new ParquetProtoWriterBuilder<>(out, type).build();
return new ParquetWriterFactory<>(builder);
} | 3.68 |
flink_StateUtil_getStateSize | /**
* Returns the size of a state object.
*
* @param handle The handle to the retrieved state
*/
public static long getStateSize(StateObject handle) {
return handle == null ? 0 : handle.getStateSize();
} | 3.68 |
hbase_NamespaceTableAndRegionInfo_getName | /**
* Gets the name of the namespace.
* @return name of the namespace.
*/
String getName() {
return name;
} | 3.68 |
pulsar_ClientConfiguration_setIoThreads | /**
* Set the number of threads to be used for handling connections to brokers <i>(default: 1 thread)</i>.
*
* @param numIoThreads
*/
public void setIoThreads(int numIoThreads) {
checkArgument(numIoThreads > 0);
confData.setNumIoThreads(numIoThreads);
} | 3.68 |
flink_DecodingFormat_applyReadableMetadata | /**
* Provides a list of metadata keys that the produced row must contain as appended metadata
* columns. By default, this method throws an exception if metadata keys are defined.
*
* <p>See {@link SupportsReadingMetadata} for more information.
*
* <p>Note: This method is only used if the outer {@link DynamicTableSource} implements {@link
* SupportsReadingMetadata} and calls this method in {@link
* SupportsReadingMetadata#applyReadableMetadata(List, DataType)}.
*/
@SuppressWarnings("unused")
default void applyReadableMetadata(List<String> metadataKeys) {
throw new UnsupportedOperationException(
"A decoding format must override this method to apply metadata keys.");
} | 3.68 |
hbase_TableState_getTableName | /**
* Table name for state
*/
public TableName getTableName() {
return tableName;
} | 3.68 |
pulsar_BrokerInterceptorUtils_searchForInterceptors | /**
* Search and load the available broker interceptors.
*
* @param interceptorsDirectory the directory where all the broker interceptors are stored
* @return a collection of broker interceptors
* @throws IOException when fail to load the available broker interceptors from the provided directory.
*/
public BrokerInterceptorDefinitions searchForInterceptors(String interceptorsDirectory,
String narExtractionDirectory) throws IOException {
Path path = Paths.get(interceptorsDirectory).toAbsolutePath();
log.info("Searching for broker interceptors in {}", path);
BrokerInterceptorDefinitions interceptors = new BrokerInterceptorDefinitions();
if (!path.toFile().exists()) {
log.warn("Pulsar broker interceptors directory not found");
return interceptors;
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, "*.nar")) {
for (Path archive : stream) {
try {
BrokerInterceptorDefinition def =
BrokerInterceptorUtils.getBrokerInterceptorDefinition(archive.toString(),
narExtractionDirectory);
log.info("Found broker interceptors from {} : {}", archive, def);
checkArgument(StringUtils.isNotBlank(def.getName()));
checkArgument(StringUtils.isNotBlank(def.getInterceptorClass()));
BrokerInterceptorMetadata metadata = new BrokerInterceptorMetadata();
metadata.setDefinition(def);
metadata.setArchivePath(archive);
interceptors.interceptors().put(def.getName(), metadata);
} catch (Throwable t) {
log.warn("Failed to load broker interceptor from {}."
+ " It is OK however if you want to use this broker interceptor,"
+ " please make sure you put the correct broker interceptor NAR"
+ " package in the broker interceptors directory.", archive, t);
}
}
}
return interceptors;
} | 3.68 |
dubbo_DubboRelaxedBinding2AutoConfiguration_dubboBasePackages | /**
* The bean is used to scan the packages of Dubbo Service classes
*
* @param environment {@link Environment} instance
* @return non-null {@link Set}
* @since 2.7.8
*/
@ConditionalOnMissingBean(name = BASE_PACKAGES_BEAN_NAME)
@Bean(name = BASE_PACKAGES_BEAN_NAME)
public Set<String> dubboBasePackages(ConfigurableEnvironment environment) {
PropertyResolver propertyResolver = dubboScanBasePackagesPropertyResolver(environment);
return propertyResolver.getProperty(BASE_PACKAGES_PROPERTY_NAME, Set.class, emptySet());
} | 3.68 |
hbase_EventHandler_compareTo | /**
* Default prioritized runnable comparator which implements a FIFO ordering.
* <p>
* Subclasses should not override this. Instead, if they want to implement priority beyond FIFO,
* they should override {@link #getPriority()}.
*/
@Override
public int compareTo(EventHandler o) {
if (o == null) {
return 1;
}
if (getPriority() != o.getPriority()) {
return (getPriority() < o.getPriority()) ? -1 : 1;
}
return (this.seqid < o.seqid) ? -1 : 1;
} | 3.68 |
flink_StreamRecord_getValue | /** Returns the value wrapped in this stream value. */
public T getValue() {
return value;
} | 3.68 |
morf_SchemaUtils_schema | /**
* Build a {@link Schema} from a list of schema. The resulting schema is the
* superset of all elements.
*
* @param schema Schema to combine.
* @return A single schema representing all of {@code schema}.
*/
public static Schema schema(Schema... schema) {
return new CompositeSchema(schema);
} | 3.68 |
flink_JobVertex_getID | /**
* Returns the ID of this job vertex.
*
* @return The ID of this job vertex
*/
public JobVertexID getID() {
return this.id;
} | 3.68 |
hadoop_IOStatisticsLogging_demandStringifyIOStatistics | /**
* On demand stringifier of an IOStatistics instance.
* <p>
* Whenever this object's toString() method is called, it evaluates the
* statistics.
* <p>
* This is for use in log statements where for the cost of creation
* of this entry is low; it is affordable to use in log statements.
* @param statistics statistics to stringify -may be null.
* @return an object whose toString() operation returns the current values.
*/
public static Object demandStringifyIOStatistics(
@Nullable IOStatistics statistics) {
return new StatisticsToString(statistics);
} | 3.68 |
hadoop_ClientGSIContext_updateRequestState | /**
* Client side implementation for providing state alignment info in requests.
*/
@Override
public synchronized void updateRequestState(RpcRequestHeaderProto.Builder header) {
if (lastSeenStateId.get() != Long.MIN_VALUE) {
header.setStateId(lastSeenStateId.get());
}
if (routerFederatedState != null) {
header.setRouterFederatedState(routerFederatedState);
}
} | 3.68 |
hbase_MetaTableAccessor_getRegionResult | /**
* Gets the result in hbase:meta for the specified region.
* @param connection connection we're using
* @param regionInfo region we're looking for
* @return result of the specified region
*/
public static Result getRegionResult(Connection connection, RegionInfo regionInfo)
throws IOException {
Get get = new Get(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo));
get.addFamily(HConstants.CATALOG_FAMILY);
try (Table t = getMetaHTable(connection)) {
return t.get(get);
}
} | 3.68 |
morf_SpreadsheetDataSetProducer_getTranslationsColumnIndex | /**
* Determines if a worksheet contains something that looks like translations.
* This is done by looking for a gap in the column headings followed by an
* actual heading, e.g.
*
* <pre>
* Heading 1 | Heading 2 | | Translation
* -------------------------------------------------
* Value 1 | Value 2 | | Bonjour
* </pre>
*
* @param sheet sheet to look for translations in
* @param headingRow the index of the heading row
* @return the index of the translation column, -1 otherwise
*/
private int getTranslationsColumnIndex(Sheet sheet, int headingRow) {
boolean hasBlank = false;
int i = 0;
for (; i < sheet.getRow(headingRow).length; i++) {
if (sheet.getCell(i, headingRow).getContents().length() == 0) {
hasBlank = true;
break;
}
}
if (!hasBlank) {
return -1;
}
for (; i < sheet.getRow(headingRow).length; i++) {
if (sheet.getCell(i, headingRow).getContents().length() > 0) {
return i;
}
}
return -1;
} | 3.68 |
morf_SqlServerDialect_containsPrimaryKeyConstraint | /**
* @param statements the statements to check for the primary key constraint
* @param tableName the table name which is expected to make up the pk constraint name
* @return true if the statements contain tableName_PK
*/
private boolean containsPrimaryKeyConstraint(Collection<String> statements, String tableName) {
for (String s : statements) {
if (s.contains(tableName + "_PK")) {
return true;
}
}
return false;
} | 3.68 |
zxing_FinderPatternFinder_handlePossibleCenter | /**
* <p>This is called when a horizontal scan finds a possible alignment pattern. It will
* cross check with a vertical scan, and if successful, will, ah, cross-cross-check
* with another horizontal scan. This is needed primarily to locate the real horizontal
* center of the pattern in cases of extreme skew.
* And then we cross-cross-cross check with another diagonal scan.</p>
*
* <p>If that succeeds the finder pattern location is added to a list that tracks
* the number of times each location has been nearly-matched as a finder pattern.
* Each additional find is more evidence that the location is in fact a finder
* pattern center
*
* @param stateCount reading state module counts from horizontal scan
* @param i row where finder pattern may be found
* @param j end of possible finder pattern in row
* @return true if a finder pattern candidate was found this time
*/
protected final boolean handlePossibleCenter(int[] stateCount, int i, int j) {
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
float centerJ = centerFromEnd(stateCount, j);
float centerI = crossCheckVertical(i, (int) centerJ, stateCount[2], stateCountTotal);
if (!Float.isNaN(centerI)) {
// Re-cross check
centerJ = crossCheckHorizontal((int) centerJ, (int) centerI, stateCount[2], stateCountTotal);
if (!Float.isNaN(centerJ) && crossCheckDiagonal((int) centerI, (int) centerJ)) {
float estimatedModuleSize = stateCountTotal / 7.0f;
boolean found = false;
for (int index = 0; index < possibleCenters.size(); index++) {
FinderPattern center = possibleCenters.get(index);
// Look for about the same center and module size:
if (center.aboutEquals(estimatedModuleSize, centerI, centerJ)) {
possibleCenters.set(index, center.combineEstimate(centerI, centerJ, estimatedModuleSize));
found = true;
break;
}
}
if (!found) {
FinderPattern point = new FinderPattern(centerJ, centerI, estimatedModuleSize);
possibleCenters.add(point);
if (resultPointCallback != null) {
resultPointCallback.foundPossibleResultPoint(point);
}
}
return true;
}
}
return false;
} | 3.68 |
framework_ValidationResult_ok | /**
* Returns a successful result.
*
* @return the successful result
*/
public static ValidationResult ok() {
return new SimpleValidationResult(null, null);
} | 3.68 |
hbase_Table_batch | /**
* Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. The
* ordering of execution of the actions is not defined. Meaning if you do a Put and a Get in the
* same {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the
* Put had put.
* @param actions list of Get, Put, Delete, Increment, Append, RowMutations.
* @param results Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
* array corresponds to the order of actions in the request list.
* @since 0.90.0
*/
default void batch(final List<? extends Row> actions, final Object[] results)
throws IOException, InterruptedException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
rocketmq-connect_SourceOffsetCompute_sourcePartitions | /**
* source partitions
*
* @param tableId
* @param offsetSuffix
* @return
*/
public static Map<String, String> sourcePartitions(String prefix, TableId tableId, String offsetSuffix) {
String fqn = ExpressionBuilder.create().append(tableId, QuoteMethod.NEVER).toString();
Map<String, String> partition = new HashMap<>();
partition.put(JdbcSourceConfigConstants.TABLE_NAME_KEY(offsetSuffix), fqn);
if (StringUtils.isNotEmpty(prefix)) {
partition.put(TOPIC, prefix.concat(tableId.tableName()));
} else {
partition.put(TOPIC, tableId.tableName());
}
return partition;
} | 3.68 |
flink_JobStatusPollingUtils_getJobResult | /**
* Polls the {@link JobStatus} of a job periodically and when the job has reached a terminal
* state, it requests its {@link JobResult}.
*
* @param dispatcherGateway the {@link DispatcherGateway} to be used for requesting the details
* of the job.
* @param jobId the id of the job
* @param scheduledExecutor the executor to be used to periodically request the status of the
* job
* @param rpcTimeout the timeout of the rpc
* @param retryPeriod the interval between two consecutive job status requests
* @return a future that will contain the job's {@link JobResult}.
*/
static CompletableFuture<JobResult> getJobResult(
final DispatcherGateway dispatcherGateway,
final JobID jobId,
final ScheduledExecutor scheduledExecutor,
final Time rpcTimeout,
final Time retryPeriod) {
return pollJobResultAsync(
() -> dispatcherGateway.requestJobStatus(jobId, rpcTimeout),
() -> dispatcherGateway.requestJobResult(jobId, rpcTimeout),
scheduledExecutor,
retryPeriod.toMilliseconds());
} | 3.68 |
flink_AsyncWaitOperator_outputCompletedElement | /**
* Outputs one completed element. Watermarks are always completed if it's their turn to be
* processed.
*
* <p>This method will be called from {@link #processWatermark(Watermark)} and from a mail
* processing the result of an async function call.
*/
private void outputCompletedElement() {
if (queue.hasCompletedElements()) {
// emit only one element to not block the mailbox thread unnecessarily
queue.emitCompletedElement(timestampedCollector);
// if there are more completed elements, emit them with subsequent mails
if (queue.hasCompletedElements()) {
try {
mailboxExecutor.execute(
this::outputCompletedElement,
"AsyncWaitOperator#outputCompletedElement");
} catch (RejectedExecutionException mailboxClosedException) {
// This exception can only happen if the operator is cancelled which means all
// pending records can be safely ignored since they will be processed one more
// time after recovery.
LOG.debug(
"Attempt to complete element is ignored since the mailbox rejected the execution.",
mailboxClosedException);
}
}
}
} | 3.68 |
dubbo_CallbackServiceCodec_referOrDestroyCallbackService | /**
* refer or destroy callback service on server side
*
* @param url
*/
@SuppressWarnings("unchecked")
private Object referOrDestroyCallbackService(
Channel channel, URL url, Class<?> clazz, Invocation inv, int instid, boolean isRefer) {
Object proxy;
String invokerCacheKey = getServerSideCallbackInvokerCacheKey(channel, clazz.getName(), instid);
String proxyCacheKey = getServerSideCallbackServiceCacheKey(channel, clazz.getName(), instid);
proxy = channel.getAttribute(proxyCacheKey);
String countkey = getServerSideCountKey(channel, clazz.getName());
if (isRefer) {
if (proxy == null) {
URL referurl = URL.valueOf("callback://" + url.getAddress() + "/" + clazz.getName() + "?"
+ INTERFACE_KEY + "=" + clazz.getName());
referurl = referurl.addParametersIfAbsent(url.getParameters())
.removeParameter(METHODS_KEY)
.addParameter(SIDE_KEY, CONSUMER_SIDE);
if (!isInstancesOverLimit(channel, referurl, clazz.getName(), instid, true)) {
url.getOrDefaultApplicationModel()
.getDefaultModule()
.getServiceRepository()
.registerService(clazz);
@SuppressWarnings("rawtypes")
Invoker<?> invoker = new ChannelWrappedInvoker(clazz, channel, referurl, String.valueOf(instid));
FilterChainBuilder builder = getFilterChainBuilder(url);
invoker = builder.buildInvokerChain(invoker, REFERENCE_FILTER_KEY, CommonConstants.CONSUMER);
invoker = builder.buildInvokerChain(invoker, REFERENCE_FILTER_KEY, CommonConstants.CALLBACK);
proxy = proxyFactory.getProxy(invoker);
channel.setAttribute(proxyCacheKey, proxy);
channel.setAttribute(invokerCacheKey, invoker);
increaseInstanceCount(channel, countkey);
// convert error fail fast .
// ignore concurrent problem.
Set<Invoker<?>> callbackInvokers = (Set<Invoker<?>>) channel.getAttribute(CHANNEL_CALLBACK_KEY);
if (callbackInvokers == null) {
callbackInvokers = new ConcurrentHashSet<>(1);
channel.setAttribute(CHANNEL_CALLBACK_KEY, callbackInvokers);
}
callbackInvokers.add(invoker);
logger.info("method " + RpcUtils.getMethodName(inv) + " include a callback service :"
+ invoker.getUrl() + ", a proxy :" + invoker + " has been created.");
}
}
} else {
if (proxy != null) {
Invoker<?> invoker = (Invoker<?>) channel.getAttribute(invokerCacheKey);
try {
Set<Invoker<?>> callbackInvokers = (Set<Invoker<?>>) channel.getAttribute(CHANNEL_CALLBACK_KEY);
if (callbackInvokers != null) {
callbackInvokers.remove(invoker);
}
invoker.destroy();
} catch (Exception e) {
logger.error(PROTOCOL_FAILED_DESTROY_INVOKER, "", "", e.getMessage(), e);
}
// cancel refer, directly remove from the map
channel.removeAttribute(proxyCacheKey);
channel.removeAttribute(invokerCacheKey);
decreaseInstanceCount(channel, countkey);
}
}
return proxy;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.