name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_TaskInfo_getTaskMemory | /**
* @return Memory used by the task leq the heap size.
*/
public long getTaskMemory() {
return maxMemory;
} | 3.68 |
pulsar_RateLimiter_setRate | /**
* Resets new rate with new permits and rate-time.
*
* @param permits
* @param rateTime
* @param timeUnit
* @param permitUpdaterByte
*/
public synchronized void setRate(long permits, long rateTime, TimeUnit timeUnit, Supplier<Long> permitUpdaterByte) {
if (renewTask != null) {
renewTask.cancel(false);
}
this.permits = permits;
this.rateTime = rateTime;
this.timeUnit = timeUnit;
this.permitUpdater = permitUpdaterByte;
this.renewTask = createTask();
} | 3.68 |
hbase_InternalScan_checkOnlyStoreFiles | /**
* MemStore will not be scanned. Only StoreFiles will be scanned.
*/
public void checkOnlyStoreFiles() {
memOnly = false;
filesOnly = true;
} | 3.68 |
hadoop_DataNodeFaultInjector_delayBlockReader | /**
* Used as a hook to inject latency when read block
* in erasure coding reconstruction process.
*/
public void delayBlockReader() {} | 3.68 |
hadoop_AltKerberosAuthenticationHandler_isBrowser | /**
* This method parses the User-Agent String and returns whether or not it
* refers to a browser. If its not a browser, then Kerberos authentication
* will be used; if it is a browser, alternateAuthenticate from the subclass
* will be used.
* <p>
* A User-Agent String is considered to be a browser if it does not contain
* any of the values from alt-kerberos.non-browser.user-agents; the default
* behavior is to consider everything a browser unless it contains one of:
* "java", "curl", "wget", or "perl". Subclasses can optionally override
* this method to use different behavior.
*
* @param userAgent The User-Agent String, or null if there isn't one
* @return true if the User-Agent String refers to a browser, false if not
*/
protected boolean isBrowser(String userAgent) {
if (userAgent == null) {
return false;
}
userAgent = userAgent.toLowerCase(Locale.ENGLISH);
boolean isBrowser = true;
for (String nonBrowserUserAgent : nonBrowserUserAgents) {
if (userAgent.contains(nonBrowserUserAgent)) {
isBrowser = false;
break;
}
}
return isBrowser;
} | 3.68 |
hbase_ProcedureTree_checkReady | // In this method first we will check whether the given root procedure and all its sub procedures
// are valid, through the procedure stack. And we will also remove all these procedures from the
// remainingProcMap, so at last, if there are still procedures in the map, we know that there are
// orphan procedures.
private void checkReady(Entry rootEntry, Map<Long, Entry> remainingProcMap) {
if (ProcedureUtil.isFinished(rootEntry.proc)) {
if (!rootEntry.subProcs.isEmpty()) {
LOG.error("unexpected active children for root-procedure: {}", rootEntry);
rootEntry.subProcs.forEach(e -> LOG.error("unexpected active children: {}", e));
addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
} else {
addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
}
return;
}
Map<Integer, List<Entry>> stackId2Proc = new HashMap<>();
MutableInt maxStackId = new MutableInt(Integer.MIN_VALUE);
collectStackId(rootEntry, stackId2Proc, maxStackId);
// the stack ids should start from 0 and increase by one every time
boolean valid = true;
for (int i = 0; i <= maxStackId.intValue(); i++) {
List<Entry> entries = stackId2Proc.get(i);
if (entries == null) {
LOG.error("Missing stack id {}, max stack id is {}, root procedure is {}", i, maxStackId,
rootEntry);
valid = false;
} else if (entries.size() > 1) {
LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {},"
+ " root procedure is {}", entries, i, maxStackId, rootEntry);
valid = false;
}
}
if (valid) {
addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
} else {
addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
}
} | 3.68 |
framework_VCalendar_setFirstHourOfTheDay | /**
* Set the first hour of the day.
*
* @param hour
* The first hour of the day
*/
public void setFirstHourOfTheDay(int hour) {
assert (hour >= 0 && hour <= 23);
firstHour = hour;
} | 3.68 |
hadoop_BlockManagerParameters_withFuturePool | /**
* Sets the executor service future pool that is later used to perform
* async prefetch tasks.
*
* @param pool The future pool.
* @return The builder.
*/
public BlockManagerParameters withFuturePool(
final ExecutorServiceFuturePool pool) {
this.futurePool = pool;
return this;
} | 3.68 |
hudi_SanitizationUtils_transformMap | /**
* Parse map for sanitizing. If we have a string in the map, and it is an avro field name key, then we sanitize the name.
* Otherwise, we keep recursively going through the schema.
* @param src - deserialized schema
* @param invalidCharMask - mask to replace invalid characters with
*/
private static Map<String, Object> transformMap(Map<String, Object> src, String invalidCharMask) {
return src.entrySet().stream()
.map(kv -> {
if (kv.getValue() instanceof List) {
return Pair.of(kv.getKey(), transformList((List<Object>) kv.getValue(), invalidCharMask));
} else if (kv.getValue() instanceof Map) {
return Pair.of(kv.getKey(), transformMap((Map<String, Object>) kv.getValue(), invalidCharMask));
} else if (kv.getValue() instanceof String) {
String currentStrValue = (String) kv.getValue();
if (kv.getKey().equals(AVRO_FIELD_NAME_KEY)) {
return Pair.of(kv.getKey(), HoodieAvroUtils.sanitizeName(currentStrValue, invalidCharMask));
}
return Pair.of(kv.getKey(), currentStrValue);
} else {
return Pair.of(kv.getKey(), kv.getValue());
}
}).collect(Collectors.toMap(Pair::getLeft, Pair::getRight));
} | 3.68 |
zilla_HpackHuffman_decode | /*
* Based on "Fast Prefix Code Processing (by Renato Pajarola)" paper. It
* precomputes all 256 possible bit sequences or node transitions at
* every node. This allows it to jump efficiently from any node to another
* in the code tree by processing bytes simultaneously instead of single
* bits.
*
* https://pdfs.semanticscholar.org/3697/8e4715a7bf21426877132f5b2e9c3d280287.pdf
*
* @return length of decoded string
* -1 if there is an error
*/
public static int decode(DirectBuffer src, MutableDirectBuffer dst)
{
Node current = ROOT;
int offset = 0;
int limit = dst.capacity();
for (int i = 0; i < src.capacity() && offset < limit; i++)
{
int b = src.getByte(i) & 0xff;
Node next = current.transitions[b];
if (next == null)
{
return -1;
}
if (current.symbols[b] != null)
{
dst.putByte(offset++, (byte) current.symbols[b].charAt(0));
if (current.symbols[b].length() == 2 && offset < limit)
{
dst.putByte(offset++, (byte) current.symbols[b].charAt(1));
}
}
current = next;
}
return current.accept && offset < limit ? offset : -1;
} | 3.68 |
hadoop_GangliaSink31_emitMetric | /**
* The method sends metrics to Ganglia servers. The method has been taken from
* org.apache.hadoop.metrics.ganglia.GangliaContext31 with minimal changes in
* order to keep it in sync.
* @param groupName The group name of the metric
* @param name The metric name
* @param type The type of the metric
* @param value The value of the metric
* @param gConf The GangliaConf for this metric
* @param gSlope The slope for this metric
* @throws IOException raised on errors performing I/O.
*/
@Override
protected void emitMetric(String groupName, String name, String type,
String value, GangliaConf gConf, GangliaSlope gSlope)
throws IOException {
if (name == null) {
LOG.warn("Metric was emitted with no name.");
return;
} else if (value == null) {
LOG.warn("Metric name " + name +" was emitted with a null value.");
return;
} else if (type == null) {
LOG.warn("Metric name " + name + ", value " + value + " has no type.");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Emitting metric " + name + ", type " + type + ", value " + value
+ ", slope " + gSlope.name()+ " from hostname " + getHostName());
}
// The following XDR recipe was done through a careful reading of
// gm_protocol.x in Ganglia 3.1 and carefully examining the output of
// the gmetric utility with strace.
// First we send out a metadata message
xdr_int(128); // metric_id = metadata_msg
xdr_string(getHostName()); // hostname
xdr_string(name); // metric name
xdr_int(0); // spoof = False
xdr_string(type); // metric type
xdr_string(name); // metric name
xdr_string(gConf.getUnits()); // units
xdr_int(gSlope.ordinal()); // slope
xdr_int(gConf.getTmax()); // tmax, the maximum time between metrics
xdr_int(gConf.getDmax()); // dmax, the maximum data value
xdr_int(1); /*Num of the entries in extra_value field for
Ganglia 3.1.x*/
xdr_string("GROUP"); /*Group attribute*/
xdr_string(groupName); /*Group value*/
// send the metric to Ganglia hosts
emitToGangliaHosts();
// Now we send out a message with the actual value.
// Technically, we only need to send out the metadata message once for
// each metric, but I don't want to have to record which metrics we did and
// did not send.
xdr_int(133); // we are sending a string value
xdr_string(getHostName()); // hostName
xdr_string(name); // metric name
xdr_int(0); // spoof = False
xdr_string("%s"); // format field
xdr_string(value); // metric value
// send the metric to Ganglia hosts
emitToGangliaHosts();
} | 3.68 |
pulsar_Subscription_isCumulativeAckMode | // Subscription utils
static boolean isCumulativeAckMode(SubType subType) {
return SubType.Exclusive.equals(subType) || SubType.Failover.equals(subType);
} | 3.68 |
flink_DynamicSourceUtils_convertSourceToRel | /**
* Converts a given {@link DynamicTableSource} to a {@link RelNode}. It adds helper projections
* if necessary.
*/
public static RelNode convertSourceToRel(
boolean isBatchMode,
ReadableConfig config,
FlinkRelBuilder relBuilder,
ContextResolvedTable contextResolvedTable,
FlinkStatistic statistic,
List<RelHint> hints,
DynamicTableSource tableSource) {
final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
final ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
final List<SourceAbilitySpec> sourceAbilities = new ArrayList<>();
// 1. prepare table source
prepareDynamicSource(
tableDebugName,
resolvedCatalogTable,
tableSource,
isBatchMode,
config,
sourceAbilities);
// 2. push table scan
pushTableScan(
isBatchMode,
relBuilder,
contextResolvedTable,
statistic,
hints,
tableSource,
sourceAbilities);
// 3. push project for non-physical columns
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
if (!schema.getColumns().stream().allMatch(Column::isPhysical)) {
pushMetadataProjection(relBuilder, schema);
pushGeneratedProjection(relBuilder, schema);
}
// 4. push watermark assigner
if (!isBatchMode && !schema.getWatermarkSpecs().isEmpty()) {
pushWatermarkAssigner(relBuilder, schema);
}
return relBuilder.build();
} | 3.68 |
flink_SubtaskGatewayImpl_markForCheckpoint | /**
* Marks the gateway for the next checkpoint. This remembers the checkpoint ID and will only
* allow closing the gateway for this specific checkpoint.
*
* <p>This is the gateway's mechanism to detect situations where multiple coordinator
* checkpoints would be attempted overlapping, which is currently not supported (the gateway
* doesn't keep a list of events blocked per checkpoint). It also helps to identify situations
* where the checkpoint was aborted even before the gateway was closed (by finding out that the
* {@code currentCheckpointId} was already reset to {@code NO_CHECKPOINT}.
*/
void markForCheckpoint(long checkpointId) {
checkRunsInMainThread();
if (checkpointId > latestAttemptedCheckpointId) {
currentMarkedCheckpointIds.add(checkpointId);
latestAttemptedCheckpointId = checkpointId;
} else {
throw new IllegalStateException(
String.format(
"Regressing checkpoint IDs. Previous checkpointId = %d, new checkpointId = %d",
latestAttemptedCheckpointId, checkpointId));
}
} | 3.68 |
flink_DynamicEventTimeSessionWindows_mergeWindows | /** Merge overlapping {@link TimeWindow}s. */
@Override
public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) {
TimeWindow.mergeWindows(windows, c);
} | 3.68 |
hadoop_RequestHedgingRMFailoverProxyProvider_invoke | /**
* Creates a Executor and invokes all proxies concurrently.
*/
@Override
public Object invoke(Object proxy, final Method method, final Object[] args)
throws Throwable {
if (successfulProxy != null) {
return invokeMethod(nonRetriableProxy.get(successfulProxy), method,
args);
}
LOG.info("Looking for the active RM in " + Arrays.toString(rmServiceIds)
+ "...");
ExecutorService executor = null;
CompletionService<Object> completionService;
try {
Map<Future<Object>, ProxyInfo<T>> proxyMap = new HashMap<>();
executor = HadoopExecutors.newFixedThreadPool(allProxies.size());
completionService = new ExecutorCompletionService<>(executor);
for (final ProxyInfo<T> pInfo : allProxies.values()) {
Callable<Object> c = new Callable<Object>() {
@Override
public Object call() throws Exception {
return method.invoke(pInfo.proxy, args);
}
};
proxyMap.put(completionService.submit(c), pInfo);
}
Future<Object> callResultFuture = completionService.take();
String pInfo = proxyMap.get(callResultFuture).proxyInfo;
successfulProxy = pInfo;
Object retVal;
try {
retVal = callResultFuture.get();
LOG.info("Found active RM [" + pInfo + "]");
return retVal;
} catch (Exception ex) {
// Throw exception from first responding RM so that clients can handle
// appropriately
Throwable rootCause = extraRootException(ex);
LOG.warn("Invocation returned exception: " + rootCause.toString()
+ " on " + "[" + pInfo + "], so propagating back to caller.");
throw rootCause;
}
} finally {
if (executor != null) {
executor.shutdownNow();
}
}
} | 3.68 |
flink_TaskEventDispatcher_registerPartition | /**
* Registers the given partition for incoming task events allowing calls to {@link
* #subscribeToEvent(ResultPartitionID, EventListener, Class)}.
*
* @param partitionId the partition ID
*/
public void registerPartition(ResultPartitionID partitionId) {
checkNotNull(partitionId);
synchronized (registeredHandlers) {
LOG.debug("registering {}", partitionId);
if (registeredHandlers.put(partitionId, new TaskEventHandler()) != null) {
throw new IllegalStateException(
"Partition "
+ partitionId
+ " already registered at task event dispatcher.");
}
}
} | 3.68 |
hbase_BigDecimalComparator_parseFrom | /**
* Parse a serialized representation of {@link BigDecimalComparator}
* @param pbBytes A pb serialized {@link BigDecimalComparator} instance
* @return An instance of {@link BigDecimalComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static BigDecimalComparator parseFrom(final byte[] pbBytes)
throws DeserializationException {
ComparatorProtos.BigDecimalComparator proto;
try {
proto = ComparatorProtos.BigDecimalComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new BigDecimalComparator(
Bytes.toBigDecimal(proto.getComparable().getValue().toByteArray()));
} | 3.68 |
streampipes_SupportedFormats_thriftFormat | /**
* Defines that a pipeline element (data processor or data sink) supports processing messaging arriving in Thrift
* format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat thriftFormat() {
return new TransportFormat(MessageFormat.THRIFT);
} | 3.68 |
hbase_SnapshotDescriptionUtils_readSnapshotInfo | /**
* Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
* @param fs filesystem where the snapshot was taken
* @param snapshotDir directory where the snapshot was stored
* @return the stored snapshot description
* @throws CorruptedSnapshotException if the snapshot cannot be read
*/
public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
throws CorruptedSnapshotException {
Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE);
try (FSDataInputStream in = fs.open(snapshotInfo)) {
return SnapshotDescription.parseFrom(in);
} catch (IOException e) {
throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e);
}
} | 3.68 |
morf_DataSetAdapter_table | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#table(org.alfasoftware.morf.metadata.Table, java.lang.Iterable)
*/
@Override
public void table(Table table, Iterable<Record> records) {
consumer.table(table, records);
} | 3.68 |
flink_HiveCatalogLock_createFactory | /** Create a hive lock factory. */
public static CatalogLock.Factory createFactory(HiveConf hiveConf) {
return new HiveCatalogLockFactory(hiveConf);
} | 3.68 |
framework_StaticSection_getHtml | /**
* Returns the HTML content displayed in this cell.
*
* @return the html
*
*/
public String getHtml() {
if (cellState.type != GridStaticCellType.HTML) {
throw new IllegalStateException(
"Cannot fetch HTML from a cell with type "
+ cellState.type);
}
return cellState.html;
} | 3.68 |
hadoop_ApplicationColumn_getColumnQualifier | /**
* @return the column name value
*/
private String getColumnQualifier() {
return columnQualifier;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_connectUsingAnonymousCredentials | /**
* Connect to Azure storage using anonymous credentials.
*
* @param uri
* - URI to target blob (R/O access to public blob)
*
* @throws StorageException
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
* @throws URISyntaxException
* raised on creating mal-formed URI's.
*/
private void connectUsingAnonymousCredentials(final URI uri)
throws StorageException, IOException, URISyntaxException {
// Use an HTTP scheme since the URI specifies a publicly accessible
// container. Explicitly create a storage URI corresponding to the URI
// parameter for use in creating the service client.
String accountName = getAccountFromAuthority(uri);
URI storageUri = new URI(getHTTPScheme() + ":" + PATH_DELIMITER
+ PATH_DELIMITER + accountName);
// Create the service client with anonymous credentials.
String containerName = getContainerFromAuthority(uri);
storageInteractionLayer.createBlobClient(storageUri);
suppressRetryPolicyInClientIfNeeded();
// Capture the container reference.
container = storageInteractionLayer.getContainerReference(containerName);
rootDirectory = container.getDirectoryReference("");
// Check for container existence, and our ability to access it.
boolean canAccess;
try {
canAccess = container.exists(getInstrumentedContext());
} catch (StorageException ex) {
LOG.error("Service returned StorageException when checking existence "
+ "of container {} in account {}", containerName, accountName, ex);
canAccess = false;
}
if (!canAccess) {
throw new AzureException(String.format(NO_ACCESS_TO_CONTAINER_MSG,
accountName, containerName));
}
// Accessing the storage server unauthenticated using
// anonymous credentials.
isAnonymousCredentials = true;
} | 3.68 |
hadoop_AuxServiceRecord_configuration | /**
* Config properties of an service. Configurations provided at the
* service/global level are available to all the components. Specific
* properties can be overridden at the component level.
**/
public AuxServiceRecord configuration(AuxServiceConfiguration conf) {
this.configuration = conf;
return this;
} | 3.68 |
graphhopper_VectorTile_setStringValue | /**
* <pre>
* Exactly one of these values must be present in a valid message
* </pre>
*
* <code>optional string string_value = 1;</code>
*/
public Builder setStringValue(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
stringValue_ = value;
onChanged();
return this;
} | 3.68 |
querydsl_MathExpressions_acos | /**
* Create a {@code acos(num)} expression
*
* <p>Returns the principal value of the arc cosine of num, expressed in radians.</p>
*
* @param num numeric expression
* @return acos(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> acos(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.ACOS, num);
} | 3.68 |
hbase_RequestConverter_buildRunCleanerChoreRequest | /**
* Creates a request for running cleaner chore
* @return A {@link RunCleanerChoreRequest}
*/
public static RunCleanerChoreRequest buildRunCleanerChoreRequest() {
return RunCleanerChoreRequest.getDefaultInstance();
} | 3.68 |
zxing_PDF417ScanningDecoder_decode | // TODO don't pass in minCodewordWidth and maxCodewordWidth, pass in barcode columns for start and stop pattern
// columns. That way width can be deducted from the pattern column.
// This approach also allows to detect more details about the barcode, e.g. if a bar type (white or black) is wider
// than it should be. This can happen if the scanner used a bad blackpoint.
public static DecoderResult decode(BitMatrix image,
ResultPoint imageTopLeft,
ResultPoint imageBottomLeft,
ResultPoint imageTopRight,
ResultPoint imageBottomRight,
int minCodewordWidth,
int maxCodewordWidth)
throws NotFoundException, FormatException, ChecksumException {
BoundingBox boundingBox = new BoundingBox(image, imageTopLeft, imageBottomLeft, imageTopRight, imageBottomRight);
DetectionResultRowIndicatorColumn leftRowIndicatorColumn = null;
DetectionResultRowIndicatorColumn rightRowIndicatorColumn = null;
DetectionResult detectionResult;
for (boolean firstPass = true; ; firstPass = false) {
if (imageTopLeft != null) {
leftRowIndicatorColumn = getRowIndicatorColumn(image, boundingBox, imageTopLeft, true, minCodewordWidth,
maxCodewordWidth);
}
if (imageTopRight != null) {
rightRowIndicatorColumn = getRowIndicatorColumn(image, boundingBox, imageTopRight, false, minCodewordWidth,
maxCodewordWidth);
}
detectionResult = merge(leftRowIndicatorColumn, rightRowIndicatorColumn);
if (detectionResult == null) {
throw NotFoundException.getNotFoundInstance();
}
BoundingBox resultBox = detectionResult.getBoundingBox();
if (firstPass && resultBox != null &&
(resultBox.getMinY() < boundingBox.getMinY() || resultBox.getMaxY() > boundingBox.getMaxY())) {
boundingBox = resultBox;
} else {
break;
}
}
detectionResult.setBoundingBox(boundingBox);
int maxBarcodeColumn = detectionResult.getBarcodeColumnCount() + 1;
detectionResult.setDetectionResultColumn(0, leftRowIndicatorColumn);
detectionResult.setDetectionResultColumn(maxBarcodeColumn, rightRowIndicatorColumn);
boolean leftToRight = leftRowIndicatorColumn != null;
for (int barcodeColumnCount = 1; barcodeColumnCount <= maxBarcodeColumn; barcodeColumnCount++) {
int barcodeColumn = leftToRight ? barcodeColumnCount : maxBarcodeColumn - barcodeColumnCount;
if (detectionResult.getDetectionResultColumn(barcodeColumn) != null) {
// This will be the case for the opposite row indicator column, which doesn't need to be decoded again.
continue;
}
DetectionResultColumn detectionResultColumn;
if (barcodeColumn == 0 || barcodeColumn == maxBarcodeColumn) {
detectionResultColumn = new DetectionResultRowIndicatorColumn(boundingBox, barcodeColumn == 0);
} else {
detectionResultColumn = new DetectionResultColumn(boundingBox);
}
detectionResult.setDetectionResultColumn(barcodeColumn, detectionResultColumn);
int startColumn = -1;
int previousStartColumn = startColumn;
// TODO start at a row for which we know the start position, then detect upwards and downwards from there.
for (int imageRow = boundingBox.getMinY(); imageRow <= boundingBox.getMaxY(); imageRow++) {
startColumn = getStartColumn(detectionResult, barcodeColumn, imageRow, leftToRight);
if (startColumn < 0 || startColumn > boundingBox.getMaxX()) {
if (previousStartColumn == -1) {
continue;
}
startColumn = previousStartColumn;
}
Codeword codeword = detectCodeword(image, boundingBox.getMinX(), boundingBox.getMaxX(), leftToRight,
startColumn, imageRow, minCodewordWidth, maxCodewordWidth);
if (codeword != null) {
detectionResultColumn.setCodeword(imageRow, codeword);
previousStartColumn = startColumn;
minCodewordWidth = Math.min(minCodewordWidth, codeword.getWidth());
maxCodewordWidth = Math.max(maxCodewordWidth, codeword.getWidth());
}
}
}
return createDecoderResult(detectionResult);
} | 3.68 |
morf_TableReference_deepCopy | /**
* Create a deep copy of this table.
*
* @return TableReference a deep copy for this table
*/
public TableReference deepCopy() {
return new TableReference(this, TableReference.this.alias);
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_getDelegationKey | /**
* Obtains the DelegationKey from the SQL database.
* @param keyId KeyId of the DelegationKey to obtain.
* @return DelegationKey that matches the given keyId or null
* if it doesn't exist in the database.
*/
@Override
protected DelegationKey getDelegationKey(int keyId) {
// Look for delegation key in local cache
DelegationKey delegationKey = super.getDelegationKey(keyId);
if (delegationKey == null) {
try {
// Look for delegation key in SQL database
byte[] delegationKeyBytes = selectDelegationKey(keyId);
if (delegationKeyBytes != null) {
delegationKey = new DelegationKey();
try (ByteArrayInputStream bis = new ByteArrayInputStream(delegationKeyBytes)) {
try (DataInputStream dis = new DataInputStream(bis)) {
delegationKey.readFields(dis);
}
}
// Update delegation key in local cache
allKeys.put(keyId, delegationKey);
}
} catch (IOException | SQLException e) {
LOG.error("Failed to get delegation key in SQL secret manager", e);
}
}
return delegationKey;
} | 3.68 |
dubbo_SlidingWindow_calculatePaneStart | /**
* Calculate the pane start corresponding to the specified timestamp.
*
* @param timeMillis the specified timestamp.
* @return the pane start corresponding to the specified timestamp.
*/
protected long calculatePaneStart(long timeMillis) {
return timeMillis - timeMillis % paneIntervalInMs;
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_recoverAndCommit | /**
* Invoked on recovered transactions after a failure. User implementation must ensure that this
* call will eventually succeed. If it fails, Flink application will be restarted and it will be
* invoked again. If it does not succeed eventually, a data loss will occur. Transactions will
* be recovered in an order in which they were created.
*/
protected void recoverAndCommit(TXN transaction) {
commit(transaction);
} | 3.68 |
hadoop_AbstractConfigurableFederationPolicy_setPolicyContext | /**
* Setter method for the {@link FederationPolicyInitializationContext}.
*
* @param policyContext the context to assign to this policy.
*/
public void setPolicyContext(
FederationPolicyInitializationContext policyContext) {
this.policyContext = policyContext;
} | 3.68 |
framework_Tree_isRoot | /**
* Tests if the Item specified with <code>itemId</code> is a root Item.
*
* @see Container.Hierarchical#isRoot(Object)
*/
@Override
public boolean isRoot(Object itemId) {
return ((Container.Hierarchical) items).isRoot(itemId);
} | 3.68 |
framework_AbstractComponentContainer_removeComponentDetachListener | /* documented in interface */
@Override
@Deprecated
public void removeComponentDetachListener(
ComponentDetachListener listener) {
removeListener(ComponentDetachEvent.class, listener,
ComponentDetachListener.detachMethod);
} | 3.68 |
flink_InternalWindowProcessFunction_open | /** Initialization method for the function. It is called before the actual working methods. */
public void open(Context<K, W> ctx) throws Exception {
this.ctx = ctx;
this.windowAssigner.open(ctx);
} | 3.68 |
hmily_TransactionManagerImpl_markTransactionRollback | /**
* 把事务标记为回滚状态.
*
* @param transId 事务id.
*/
public void markTransactionRollback(final String transId) {
hmilyXaTransactionManager.markTransactionRollback(transId);
} | 3.68 |
pulsar_GenericRecord_getSchemaType | /**
* Return the schema tyoe.
*
* @return the schema type
* @throws UnsupportedOperationException if this feature is not implemented
* @see SchemaType#AVRO
* @see SchemaType#PROTOBUF_NATIVE
* @see SchemaType#JSON
*/
@Override
default SchemaType getSchemaType() {
throw new UnsupportedOperationException();
} | 3.68 |
framework_VaadinPortletService_getCurrentRequest | /**
* Gets the currently processed Vaadin portlet request. The current request
* is automatically defined when the request is started. The current request
* can not be used in e.g. background threads because of the way server
* implementations reuse request instances.
*
* @return the current Vaadin portlet request instance if available,
* otherwise <code>null</code>
*
*/
public static VaadinPortletRequest getCurrentRequest() {
return (VaadinPortletRequest) VaadinService.getCurrentRequest();
} | 3.68 |
hbase_MultiRowRangeFilter_isStopRowInclusive | /** Returns if stop row is inclusive. */
public boolean isStopRowInclusive() {
return stopRowInclusive;
} | 3.68 |
pulsar_FunctionRuntimeManager_stopAllOwnedFunctions | /**
* It stops all functions instances owned by current worker.
* @throws Exception
*/
public void stopAllOwnedFunctions() {
if (runtimeFactory.externallyManaged()) {
log.warn("Will not stop any functions since they are externally managed");
return;
}
final String workerId = this.workerConfig.getWorkerId();
Map<String, Assignment> assignments = workerIdToAssignments.get(workerId);
if (assignments != null) {
// Take a copy of the map since the stopFunction will modify the same map
// and invalidate the iterator
Map<String, Assignment> copiedAssignments = new TreeMap<>(assignments);
copiedAssignments.values().forEach(assignment -> {
String fullyQualifiedInstanceId = FunctionCommon.getFullyQualifiedInstanceId(assignment.getInstance());
try {
stopFunction(fullyQualifiedInstanceId, false);
} catch (Exception e) {
log.warn("Failed to stop function {} - {}", fullyQualifiedInstanceId, e.getMessage());
}
});
}
} | 3.68 |
hbase_ServerManager_removeRegions | /**
* Called by delete table and similar to notify the ServerManager that a region was removed.
*/
public void removeRegions(final List<RegionInfo> regions) {
for (RegionInfo hri : regions) {
removeRegion(hri);
}
} | 3.68 |
flink_BinaryStringData_toUpperCase | /**
* Converts all of the characters in this {@code BinaryStringData} to upper case.
*
* @return the {@code BinaryStringData}, converted to uppercase.
*/
public BinaryStringData toUpperCase() {
if (javaObject != null) {
return javaToUpperCase();
}
if (binarySection.sizeInBytes == 0) {
return EMPTY_UTF8;
}
int size = binarySection.segments[0].size();
BinaryStringData.SegmentAndOffset segmentAndOffset = startSegmentAndOffset(size);
byte[] bytes = new byte[binarySection.sizeInBytes];
bytes[0] = (byte) Character.toTitleCase(segmentAndOffset.value());
for (int i = 0; i < binarySection.sizeInBytes; i++) {
byte b = segmentAndOffset.value();
if (numBytesForFirstByte(b) != 1) {
// fallback
return javaToUpperCase();
}
int upper = Character.toUpperCase((int) b);
if (upper > 127) {
// fallback
return javaToUpperCase();
}
bytes[i] = (byte) upper;
segmentAndOffset.nextByte(size);
}
return fromBytes(bytes);
} | 3.68 |
flink_HiveParserUtils_toImmutableList | // converts a collection to guava ImmutableList
private static Object toImmutableList(Collection collection) {
try {
Class clz = useShadedImmutableList ? shadedImmutableListClz : immutableListClz;
return HiveReflectionUtils.invokeMethod(
clz, null, "copyOf", new Class[] {Collection.class}, new Object[] {collection});
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create immutable list", e);
}
} | 3.68 |
flink_SlotSharingGroup_build | /** Build the SlotSharingGroup. */
public SlotSharingGroup build() {
if (cpuCores != null && taskHeapMemory != null) {
taskOffHeapMemory = Optional.ofNullable(taskOffHeapMemory).orElse(MemorySize.ZERO);
managedMemory = Optional.ofNullable(managedMemory).orElse(MemorySize.ZERO);
return new SlotSharingGroup(
name,
cpuCores,
taskHeapMemory,
taskOffHeapMemory,
managedMemory,
externalResources);
} else if (cpuCores != null
|| taskHeapMemory != null
|| taskOffHeapMemory != null
|| managedMemory != null
|| !externalResources.isEmpty()) {
throw new IllegalArgumentException(
"The cpu cores and task heap memory are required when specifying the resource of a slot sharing group. "
+ "You need to explicitly configure them with positive value.");
} else {
return new SlotSharingGroup(name);
}
} | 3.68 |
flink_AggregatingStateDescriptor_getAggregateFunction | /** Returns the aggregate function to be used for the state. */
public AggregateFunction<IN, ACC, OUT> getAggregateFunction() {
return aggFunction;
} | 3.68 |
flink_StreamProjection_projectTuple25 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
SingleOutputStreamOperator<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
projectTuple25() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>
tType =
new TupleTypeInfo<
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
framework_ApplicationConnection_showSessionExpiredError | /**
* Shows the session expiration notification.
*
* @param details
* Optional details.
*/
public void showSessionExpiredError(String details) {
getLogger().severe("Session expired: " + details);
showError(details, configuration.getSessionExpiredError());
} | 3.68 |
hudi_HoodieLogFormatWriter_addShutDownHook | /**
* Close the output stream when the JVM exits.
*/
private void addShutDownHook() {
shutdownThread = new Thread() {
public void run() {
try {
if (output != null) {
close();
}
} catch (Exception e) {
LOG.warn("unable to close output stream for log file " + logFile, e);
// fail silently for any sort of exception
}
}
};
Runtime.getRuntime().addShutdownHook(shutdownThread);
} | 3.68 |
hadoop_BaseRecord_hasOtherFields | /**
* If the record has fields others than the primary keys. This is used by
* TestStateStoreDriverBase to skip the modification check.
*
* @return If the record has more fields.
*/
@VisibleForTesting
public boolean hasOtherFields() {
return true;
} | 3.68 |
hbase_HBaseTestingUtility_waitLabelAvailable | /**
* Wait until labels is ready in VisibilityLabelsCache.
*/
public void waitLabelAvailable(long timeoutMillis, final String... labels) {
final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
@Override
public boolean evaluate() {
for (String label : labels) {
if (labelsCache.getLabelOrdinal(label) == 0) {
return false;
}
}
return true;
}
@Override
public String explainFailure() {
for (String label : labels) {
if (labelsCache.getLabelOrdinal(label) == 0) {
return label + " is not available yet";
}
}
return "";
}
});
} | 3.68 |
rocketmq-connect_JdbcSinkConnector_validate | /**
* Should invoke before start the connector.
*
* @param config
* @return error message
*/
@Override
public void validate(KeyValue config) {
// do validate config
} | 3.68 |
hadoop_IdentifierResolver_getInputWriterClass | /**
* Returns the resolved {@link InputWriter} class.
*/
public Class<? extends InputWriter> getInputWriterClass() {
return inputWriterClass;
} | 3.68 |
pulsar_SingleSnapshotAbortedTxnProcessorImpl_trimExpiredAbortedTxns | //In this implementation we clear the invalid aborted txn ID one by one.
@Override
public void trimExpiredAbortedTxns() {
while (!aborts.isEmpty() && !((ManagedLedgerImpl) topic.getManagedLedger())
.ledgerExists(aborts.get(aborts.firstKey()).getLedgerId())) {
if (log.isDebugEnabled()) {
log.debug("[{}] Topic transaction buffer clear aborted transaction, TxnId : {}, Position : {}",
topic.getName(), aborts.firstKey(), aborts.get(aborts.firstKey()));
}
aborts.remove(aborts.firstKey());
}
} | 3.68 |
flink_BaseMappingExtractor_createParameterSignatureExtraction | /**
* Extraction that uses the method parameters for producing a {@link FunctionSignatureTemplate}.
*/
static SignatureExtraction createParameterSignatureExtraction(int offset) {
return (extractor, method) -> {
final List<FunctionArgumentTemplate> parameterTypes =
extractArgumentTemplates(
extractor.typeFactory, extractor.getFunctionClass(), method, offset);
final String[] argumentNames = extractArgumentNames(method, offset);
return FunctionSignatureTemplate.of(parameterTypes, method.isVarArgs(), argumentNames);
};
} | 3.68 |
rocketmq-connect_WorkerDirectTask_initializeAndStart | /**
* initinalize and start
*/
@Override
protected void initializeAndStart() {
starkSinkTask();
startSourceTask();
log.info("Direct task start, config:{}", JSON.toJSONString(taskConfig));
} | 3.68 |
flink_SSLUtils_createInternalSSLContext | /**
* Creates the SSL Context for internal SSL, if internal SSL is configured. For internal SSL,
* the client and server side configuration are identical, because of mutual authentication.
*/
@Nullable
private static SSLContext createInternalSSLContext(Configuration config, boolean clientMode)
throws Exception {
JdkSslContext nettySSLContext =
(JdkSslContext) createInternalNettySSLContext(config, clientMode, JDK);
if (nettySSLContext != null) {
return nettySSLContext.context();
} else {
return null;
}
} | 3.68 |
flink_BinaryHashPartition_addHashBloomFilter | /** Add new hash to bloomFilter when insert a record to spilled partition. */
void addHashBloomFilter(int hash) {
if (bloomFilter != null) {
// check if too full.
if (!bloomFilter.addHash(hash)) {
freeBloomFilter();
}
}
} | 3.68 |
framework_ComputedStyle_getMarginWidth | /**
* Returns the sum of the left and right margin.
*
* @since 7.5.6
* @return the sum of the left and right margin
*/
public double getMarginWidth() {
double marginWidth = getDoubleProperty("marginLeft");
marginWidth += getDoubleProperty("marginRight");
return marginWidth;
} | 3.68 |
framework_AbstractProperty_removeReadOnlyStatusChangeListener | /**
* Removes a previously registered read-only status change listener.
*
* @param listener
* the listener to be removed.
*/
@Override
public void removeReadOnlyStatusChangeListener(
Property.ReadOnlyStatusChangeListener listener) {
if (readOnlyStatusChangeListeners != null) {
readOnlyStatusChangeListeners.remove(listener);
}
} | 3.68 |
hudi_MarkerDirState_deleteAllMarkers | /**
* Deletes markers in the directory.
*
* @return {@code true} if successful; {@code false} otherwise.
*/
public boolean deleteAllMarkers() {
boolean result = FSUtils.deleteDir(hoodieEngineContext, fileSystem, new Path(markerDirPath), parallelism);
allMarkers.clear();
fileMarkersMap.clear();
return result;
} | 3.68 |
morf_ChangeIndex_isApplied | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
if (!schema.tableExists(tableName)) {
return false;
}
Table table = schema.getTable(tableName);
SchemaHomology homology = new SchemaHomology();
for (Index index : table.indexes()) {
if (homology.indexesMatch(index, toIndex)) {
return true;
}
}
return false;
} | 3.68 |
framework_HierarchicalContainer_addFilteredChildrenRecursively | /**
* Recursively adds all items in the includedItems list to the
* filteredChildren map in the same order as they are in the children map.
* Starts from parentItemId and recurses down as long as child items that
* should be included are found.
*
* @param parentItemId
* The item id to start recurse from. Not added to a
* filteredChildren list
* @param includedItems
* Set containing the item ids for the items that should be
* included in the filteredChildren map
*/
private void addFilteredChildrenRecursively(Object parentItemId,
HashSet<Object> includedItems) {
LinkedList<Object> childList = children.get(parentItemId);
if (childList == null) {
return;
}
for (Object childItemId : childList) {
if (includedItems.contains(childItemId)) {
addFilteredChild(parentItemId, childItemId);
addFilteredChildrenRecursively(childItemId, includedItems);
}
}
} | 3.68 |
hadoop_StagingCommitter_getJobAttemptPath | /**
* For a job attempt path, the staging committer returns that of the
* wrapped committer.
* @param context the context of the job.
* @return a path in HDFS.
*/
@Override
public Path getJobAttemptPath(JobContext context) {
return wrappedCommitter.getJobAttemptPath(context);
} | 3.68 |
pulsar_FunctionRuntimeManager_initialize | /**
* Initializes the FunctionRuntimeManager. Does the following:
* 1. Consume all existing assignments to establish existing/latest set of assignments
* 2. After current assignments are read, assignments belonging to this worker will be processed
*
* @return the message id of the message processed during init phase
*/
public MessageId initialize() {
try (Reader<byte[]> reader = WorkerUtils.createReader(
workerService.getClient().newReader(),
workerConfig.getWorkerId() + "-function-assignment-initialize",
workerConfig.getFunctionAssignmentTopic(),
MessageId.earliest)) {
// start init phase
this.isInitializePhase = true;
// keep track of the last message read
MessageId lastMessageRead = MessageId.earliest;
// read all existing messages
while (reader.hasMessageAvailable()) {
Message<byte[]> message = reader.readNext();
lastMessageRead = message.getMessageId();
processAssignmentMessage(message);
}
// init phase is done
this.isInitializePhase = false;
// realize existing assignments
Map<String, Assignment> assignmentMap = workerIdToAssignments.get(this.workerConfig.getWorkerId());
if (assignmentMap != null) {
for (Assignment assignment : assignmentMap.values()) {
if (needsStart(assignment)) {
startFunctionInstance(assignment);
}
}
}
// complete future to indicate initialization is complete
isInitialized.complete(null);
return lastMessageRead;
} catch (Exception e) {
log.error("Failed to initialize function runtime manager: {}", e.getMessage(), e);
throw new RuntimeException(e);
}
} | 3.68 |
flink_MessageSerializer_getRequestId | /**
* De-serializes the header and returns the {@link MessageType}.
*
* <pre>
* <b>The buffer is expected to be at the request id position.</b>
* </pre>
*
* @param buf The {@link ByteBuf} containing the serialized request id.
* @return The request id.
*/
public static long getRequestId(final ByteBuf buf) {
return buf.readLong();
} | 3.68 |
hadoop_FederationStateStoreFacade_getInstanceInternal | /**
* Returns the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
* @return the singleton {@link FederationStateStoreFacade} instance
*/
private static FederationStateStoreFacade getInstanceInternal(Configuration conf){
if (facade != null) {
return facade;
}
generateStateStoreFacade(conf);
return facade;
} | 3.68 |
framework_VaadinService_addServiceDestroyListener | /**
* Adds a service destroy listener that gets notified when this service is
* destroyed.
* <p>
* The listeners may be invoked in a non-deterministic order. In particular,
* it is not guaranteed that listeners will be invoked in the order they
* were added.
*
* @since 8.0
* @param listener
* the service destroy listener to add
*
* @see #destroy()
* @see #removeServiceDestroyListener(ServiceDestroyListener)
* @see ServiceDestroyListener
* @return a registration object for removing the listener
*/
public Registration addServiceDestroyListener(
ServiceDestroyListener listener) {
serviceDestroyListeners.add(listener);
return () -> serviceDestroyListeners.remove(listener);
} | 3.68 |
hadoop_DeletionTaskRecoveryInfo_getSuccessorTaskIds | /**
* Return all of the dependent DeletionTasks.
*
* @return the dependent DeletionTasks.
*/
public List<Integer> getSuccessorTaskIds() {
return successorTaskIds;
} | 3.68 |
graphhopper_VectorTile_setDoubleValue | /**
* <code>optional double double_value = 3;</code>
*/
public Builder setDoubleValue(double value) {
bitField0_ |= 0x00000004;
doubleValue_ = value;
onChanged();
return this;
} | 3.68 |
hbase_MasterObserver_postTransitReplicationPeerSyncReplicationState | /**
* Called after transit current cluster state for the specified synchronous replication peer
* @param ctx the environment to interact with the framework and master
* @param peerId a short name that identifies the peer
* @param from the old state
* @param to the new state
*/
default void postTransitReplicationPeerSyncReplicationState(
final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
SyncReplicationState from, SyncReplicationState to) throws IOException {
} | 3.68 |
rocketmq-connect_Worker_startConnectors | /**
* assign connector
* <p>
* Start a collection of connectors with the given configs. If a connector is already started with the same configs,
* it will not start again. If a connector is already started but not contained in the new configs, it will stop.
*
* @param connectorConfigs
* @param connectController
* @throws Exception
*/
public synchronized void startConnectors(Map<String, ConnectKeyValue> connectorConfigs,
AbstractConnectController connectController) throws Exception {
// Step 1: Check and stop connectors
checkAndStopConnectors(connectorConfigs.keySet());
// Step 2: Check config update
checkAndReconfigureConnectors(connectorConfigs);
// Step 3: check new
Map<String, ConnectKeyValue> newConnectors = checkAndNewConnectors(connectorConfigs);
//Step 4: start connectors
for (String connectorName : newConnectors.keySet()) {
ClassLoader savedLoader = plugin.currentThreadLoader();
try {
ConnectKeyValue keyValue = newConnectors.get(connectorName);
String connectorClass = keyValue.getString(ConnectorConfig.CONNECTOR_CLASS);
ClassLoader connectorLoader = plugin.delegatingLoader().pluginClassLoader(connectorClass);
savedLoader = Plugin.compareAndSwapLoaders(connectorLoader);
// instance connector
final Connector connector = plugin.newConnector(connectorClass);
WorkerConnector workerConnector = new WorkerConnector(
connectorName,
connector,
connectorConfigs.get(connectorName),
new DefaultConnectorContext(connectorName, connectController),
statusListener,
savedLoader
);
// initinal target state
executor.submit(workerConnector);
workerConnector.transitionTo(keyValue.getTargetState(), new Callback<TargetState>() {
@Override
public void onCompletion(Throwable error, TargetState result) {
if (error != null) {
log.error(error.getMessage());
} else {
log.info("Start connector {} and set target state {} successed!!", connectorName, result);
}
}
});
log.info("Connector {} start", workerConnector.getConnectorName());
Plugin.compareAndSwapLoaders(savedLoader);
this.connectors.put(connectorName, workerConnector);
} catch (Exception e) {
Plugin.compareAndSwapLoaders(savedLoader);
log.error("worker connector start exception. workerName: " + connectorName, e);
} finally {
// compare and swap
Plugin.compareAndSwapLoaders(savedLoader);
}
}
// Step 3: check and transition to connectors
checkAndTransitionToConnectors(connectorConfigs);
} | 3.68 |
hadoop_MetricsFilter_accepts | /**
* Whether to accept the record
* @param record to filter on
* @return true to accept; false otherwise.
*/
public boolean accepts(MetricsRecord record) {
return accepts(record.name()) && accepts(record.tags());
} | 3.68 |
morf_SqlDialect_dropStatements | /**
* Creates SQL to drop the named view.
*
* @param view The view to drop
* @return The SQL statements as strings.
*/
public Collection<String> dropStatements(View view) {
return ImmutableList.of("DROP VIEW " + schemaNamePrefix() + view.getName() + " IF EXISTS CASCADE");
} | 3.68 |
framework_AbstractComponent_isResponsive | /**
* Returns true if the component is responsive.
*
* @since 7.5.0
* @return true if the component is responsive
*/
public boolean isResponsive() {
for (Extension e : getExtensions()) {
if (e instanceof Responsive) {
return true;
}
}
return false;
} | 3.68 |
framework_VAbsoluteLayout_getChildWrapper | /**
* Get the wrapper for a widget.
*
* @param child
* The child to get the wrapper for
* @return
*/
protected AbsoluteWrapper getChildWrapper(Widget child) {
for (Widget w : getChildren()) {
if (w instanceof AbsoluteWrapper) {
AbsoluteWrapper wrapper = (AbsoluteWrapper) w;
if (wrapper.getWidget() == child) {
return wrapper;
}
}
}
return null;
} | 3.68 |
pulsar_NonPersistentReplicator_getProducerName | /**
* @return Producer name format : replicatorPrefix.localCluster-->remoteCluster
*/
@Override
protected String getProducerName() {
return getReplicatorName(replicatorPrefix, localCluster) + REPL_PRODUCER_NAME_DELIMITER + remoteCluster;
} | 3.68 |
hadoop_DoubleValueSum_addNextValue | /**
* add a value to the aggregator
*
* @param val
* a double value.
*
*/
public void addNextValue(double val) {
this.sum += val;
} | 3.68 |
hbase_HRegionServer_getOnlineTables | /**
* Gets the online tables in this RS. This method looks at the in-memory onlineRegions.
* @return all the online tables in this RS
*/
public Set<TableName> getOnlineTables() {
Set<TableName> tables = new HashSet<>();
synchronized (this.onlineRegions) {
for (Region region : this.onlineRegions.values()) {
tables.add(region.getTableDescriptor().getTableName());
}
}
return tables;
} | 3.68 |
framework_FlyweightRow_getCells | /**
* Returns a subrange of flyweight cells for the client code to render. The
* cells get their associated {@link FlyweightCell#getElement() elements}
* from the row element.
* <p>
* Precondition: each cell has a corresponding element in the row
*
* @param offset
* the index of the first cell to return
* @param numberOfCells
* the number of cells to return
* @return an iterable of flyweight cells
*/
public Iterable<FlyweightCell> getCells(final int offset,
final int numberOfCells) {
assertSetup();
assert offset >= 0 && offset + numberOfCells <= cells
.size() : "Invalid range of cells";
return () -> CellIterator
.attached(cells.subList(offset, offset + numberOfCells));
} | 3.68 |
framework_AbstractConnector_getResourceUrl | /**
* Gets the URL for a resource that has been added by the server-side
* connector using
* {@link com.vaadin.terminal.AbstractClientConnector#setResource(String, com.vaadin.terminal.Resource)}
* with the same key. {@code null} is returned if no corresponding resource
* is found.
*
* @param key
* a string identifying the resource.
* @return the resource URL as a string, or {@code null} if no corresponding
* resource is found.
*/
public String getResourceUrl(String key) {
URLReference urlReference = getState().resources.get(key);
if (urlReference == null) {
return null;
}
return urlReference.getURL();
} | 3.68 |
framework_Alignment_isTop | /**
* Checks if component is aligned to the top of the available space.
*
* @return true if aligned top
*/
public boolean isTop() {
return (bitMask & Bits.ALIGNMENT_TOP) == Bits.ALIGNMENT_TOP;
} | 3.68 |
framework_ColorPickerPopup_setHistoryVisible | /**
* Sets the visibility of the History.
*
* @param visible
* {@code true} to show the history, {@code false} to hide it
*/
public void setHistoryVisible(boolean visible) {
historyContainer.setVisible(visible);
resize.setVisible(visible);
} | 3.68 |
hmily_TransactionImpl_createSubTransaction | /**
* 创建一个子任务.
*
* @return the transaction
*/
public TransactionImpl createSubTransaction() {
return new TransactionImpl(this);
} | 3.68 |
framework_InfoSection_refresh | /**
* Updates the information for a single running application
*
* @since 7.1
*/
private void refresh(ApplicationConnection connection) {
clear();
ApplicationConfiguration configuration = connection.getConfiguration();
addVersionInfo(configuration);
addRow("Widget set", GWT.getModuleName());
addRow("Theme", connection.getUIConnector().getActiveTheme());
String communicationMethodInfo = connection.getMessageSender()
.getCommunicationMethodName();
int pollInterval = connection.getUIConnector().getState().pollInterval;
if (pollInterval > 0) {
communicationMethodInfo += " (poll interval " + pollInterval
+ "ms)";
}
addRow("Communication method", communicationMethodInfo);
String heartBeatInfo;
if (configuration.getHeartbeatInterval() < 0) {
heartBeatInfo = "Disabled";
} else {
heartBeatInfo = configuration.getHeartbeatInterval() + "s";
}
addRow("Heartbeat", heartBeatInfo);
} | 3.68 |
flink_TypeExtractor_countTypeInHierarchy | /** @return number of items with equal type or same raw type */
private static int countTypeInHierarchy(List<Type> typeHierarchy, Type type) {
int count = 0;
for (Type t : typeHierarchy) {
if (t == type
|| (isClassType(type) && t == typeToClass(type))
|| (isClassType(t) && typeToClass(t) == type)) {
count++;
}
}
return count;
} | 3.68 |
flink_CliFrontend_getJobJarAndDependencies | /** Get all provided libraries needed to run the program from the ProgramOptions. */
private List<URL> getJobJarAndDependencies(ProgramOptions programOptions)
throws CliArgsException {
String entryPointClass = programOptions.getEntryPointClassName();
String jarFilePath = programOptions.getJarFilePath();
try {
File jarFile = jarFilePath != null ? getJarFile(jarFilePath) : null;
return PackagedProgram.getJobJarAndDependencies(jarFile, entryPointClass);
} catch (FileNotFoundException | ProgramInvocationException e) {
throw new CliArgsException(
"Could not get job jar and dependencies from JAR file: " + e.getMessage(), e);
}
} | 3.68 |
hadoop_SuccessData_getDate | /** @return timestamp as date; no expectation of parseability. */
public String getDate() {
return date;
} | 3.68 |
flink_TableConfigUtils_getLocalTimeZone | /**
* Similar to {@link TableConfig#getLocalTimeZone()} but extracting it from a generic {@link
* ReadableConfig}.
*
* @see TableConfig#getLocalTimeZone()
*/
public static ZoneId getLocalTimeZone(ReadableConfig tableConfig) {
final String zone = tableConfig.get(TableConfigOptions.LOCAL_TIME_ZONE);
if (TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone)) {
return ZoneId.systemDefault();
}
validateTimeZone(zone);
return ZoneId.of(zone);
} | 3.68 |
hbase_AbstractProtobufWALReader_getWriterClsNames | /**
* Returns names of the accepted writer classes
*/
public List<String> getWriterClsNames() {
return WRITER_CLS_NAMES;
} | 3.68 |
hudi_ImmutablePair_getLeft | /**
* {@inheritDoc}
*/
@Override
public L getLeft() {
return left;
} | 3.68 |
zxing_DecodeWorker_dumpBlackPoint | /**
* Writes out a single PNG which is three times the width of the input image, containing from left
* to right: the original image, the row sampling monochrome version, and the 2D sampling
* monochrome version.
*/
private static void dumpBlackPoint(URI uri, BufferedImage image, BinaryBitmap bitmap) throws IOException {
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int stride = width * 3;
int[] pixels = new int[stride * height];
// The original image
int[] argb = new int[width];
for (int y = 0; y < height; y++) {
image.getRGB(0, y, width, 1, argb, 0, width);
System.arraycopy(argb, 0, pixels, y * stride, width);
}
// Row sampling
BitArray row = new BitArray(width);
for (int y = 0; y < height; y++) {
try {
row = bitmap.getBlackRow(y, row);
} catch (NotFoundException nfe) {
// If fetching the row failed, draw a red line and keep going.
int offset = y * stride + width;
Arrays.fill(pixels, offset, offset + width, RED);
continue;
}
int offset = y * stride + width;
for (int x = 0; x < width; x++) {
pixels[offset + x] = row.get(x) ? BLACK : WHITE;
}
}
// 2D sampling
try {
for (int y = 0; y < height; y++) {
BitMatrix matrix = bitmap.getBlackMatrix();
int offset = y * stride + width * 2;
for (int x = 0; x < width; x++) {
pixels[offset + x] = matrix.get(x, y) ? BLACK : WHITE;
}
}
} catch (NotFoundException ignored) {
// continue
}
writeResultImage(stride, height, pixels, uri, ".mono.png");
} | 3.68 |
AreaShop_Utils_applyColors | /**
* Convert color and formatting codes to bukkit values.
* @param input Start string with color and formatting codes in it
* @return String with the color and formatting codes in the bukkit format
*/
public static String applyColors(String input) {
String result = null;
if(input != null) {
result = ChatColor.translateAlternateColorCodes('&', input);
}
return result;
} | 3.68 |
hbase_ProcedureExecutor_createNonceKey | // ==========================================================================
// Nonce Procedure helpers
// ==========================================================================
/**
* Create a NonceKey from the specified nonceGroup and nonce.
* @param nonceGroup the group to use for the {@link NonceKey}
* @param nonce the nonce to use in the {@link NonceKey}
* @return the generated NonceKey
*/
public NonceKey createNonceKey(final long nonceGroup, final long nonce) {
return (nonce == HConstants.NO_NONCE) ? null : new NonceKey(nonceGroup, nonce);
} | 3.68 |
zxing_CalendarParsedResult_getEnd | /**
* @return event end {@link Date}, or {@code null} if event has no duration
* @deprecated use {@link #getEndTimestamp()}
*/
@Deprecated
public Date getEnd() {
return end < 0L ? null : new Date(end);
} | 3.68 |
framework_AbstractSplitPanel_setSplitPositionLimits | /**
* Sets the maximum and minimum position of the splitter. If the split
* position is reversed, maximum and minimum are also reversed.
*
* @param minPos
* the new minimum position
* @param minPosUnit
* the unit (from {@link Sizeable}) in which the minimum position
* is given.
* @param maxPos
* the new maximum position
* @param maxPosUnit
* the unit (from {@link Sizeable}) in which the maximum position
* is given.
*/
private void setSplitPositionLimits(float minPos, Unit minPosUnit,
float maxPos, Unit maxPosUnit) {
if ((minPosUnit != Unit.PERCENTAGE && minPosUnit != Unit.PIXELS)
|| (maxPosUnit != Unit.PERCENTAGE
&& maxPosUnit != Unit.PIXELS)) {
throw new IllegalArgumentException(
"Only percentage and pixel units are allowed");
}
SplitterState state = getSplitterState();
state.minPosition = minPos;
state.minPositionUnit = minPosUnit.getSymbol();
posMinUnit = minPosUnit;
state.maxPosition = maxPos;
state.maxPositionUnit = maxPosUnit.getSymbol();
posMaxUnit = maxPosUnit;
} | 3.68 |
hmily_JavaBeanBinder_addSetter | /**
* Add setter.
*
* @param setter the setter
*/
void addSetter(final Method setter) {
if (this.setter == null) {
this.setter = setter;
}
} | 3.68 |
hudi_HiveSchemaUtil_parquetSchemaToMapSchema | /**
* Returns schema in Map<String,String> form read from a parquet file.
*
* @param messageType : parquet Schema
* @param supportTimestamp
* @param doFormat : This option controls whether schema will have spaces in the value part of the schema map. This is required because spaces in complex schema trips the HMS create table calls.
* This value will be false for HMS but true for QueryBasedDDLExecutors
* @return : Intermediate schema in the form of Map<String, String>
*/
public static LinkedHashMap<String, String> parquetSchemaToMapSchema(MessageType messageType, boolean supportTimestamp, boolean doFormat) throws IOException {
LinkedHashMap<String, String> schema = new LinkedHashMap<>();
List<Type> parquetFields = messageType.getFields();
for (Type parquetType : parquetFields) {
StringBuilder result = new StringBuilder();
String key = parquetType.getName();
if (parquetType.isRepetition(Type.Repetition.REPEATED)) {
result.append(createHiveArray(parquetType, "", supportTimestamp, doFormat));
} else {
result.append(convertField(parquetType, supportTimestamp, doFormat));
}
schema.put(key, result.toString());
}
return schema;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.