name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_Deployment_deploy | /**
* Creates deployment statements using the supplied source meta data.
*
* @param upgradeSteps All available upgrade steps.
* @param targetSchema Schema that is to be deployed.
*/
public void deploy(Schema targetSchema, Collection<Class<? extends UpgradeStep>> upgradeSteps) {
UpgradePath path = getPath(targetSchema, upgradeSteps);
sqlScriptExecutorProvider.get().execute(path.getSql());
} | 3.68 |
flink_SqlTimeParser_parseField | /**
* Static utility to parse a field of type Time from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final Time parseField(byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Time.valueOf(str);
} | 3.68 |
zxing_MaskUtil_applyMaskPenaltyRule3 | /**
* Apply mask penalty rule 3 and return the penalty. Find consecutive runs of 1:1:3:1:1:4
* starting with black, or 4:1:1:3:1:1 starting with white, and give penalty to them. If we
* find patterns like 000010111010000, we give penalty once.
*/
static int applyMaskPenaltyRule3(ByteMatrix matrix) {
int numPenalties = 0;
byte[][] array = matrix.getArray();
int width = matrix.getWidth();
int height = matrix.getHeight();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
byte[] arrayY = array[y]; // We can at least optimize this access
if (x + 6 < width &&
arrayY[x] == 1 &&
arrayY[x + 1] == 0 &&
arrayY[x + 2] == 1 &&
arrayY[x + 3] == 1 &&
arrayY[x + 4] == 1 &&
arrayY[x + 5] == 0 &&
arrayY[x + 6] == 1 &&
(isWhiteHorizontal(arrayY, x - 4, x) || isWhiteHorizontal(arrayY, x + 7, x + 11))) {
numPenalties++;
}
if (y + 6 < height &&
array[y][x] == 1 &&
array[y + 1][x] == 0 &&
array[y + 2][x] == 1 &&
array[y + 3][x] == 1 &&
array[y + 4][x] == 1 &&
array[y + 5][x] == 0 &&
array[y + 6][x] == 1 &&
(isWhiteVertical(array, x, y - 4, y) || isWhiteVertical(array, x, y + 7, y + 11))) {
numPenalties++;
}
}
}
return numPenalties * N3;
} | 3.68 |
MagicPlugin_BlockSpell_goRight | /**
* A helper function to go change a given direction to the direction "to the right".
*
* <p>There's probably some better matrix-y, math-y way to do this.
* It'd be nice if this was in BlockFace.
*
* @param direction The current direction
* @return The direction to the right
*/
public static BlockFace goRight(BlockFace direction)
{
switch (direction)
{
case EAST:
return BlockFace.SOUTH;
case SOUTH:
return BlockFace.WEST;
case WEST:
return BlockFace.NORTH;
case NORTH:
return BlockFace.EAST;
default:
return direction;
}
} | 3.68 |
graphhopper_RouterConfig_setSimplifyResponse | /**
* This method specifies if the returned path should be simplified or not, via Ramer-Douglas-Peucker
* or similar algorithm.
*/
public void setSimplifyResponse(boolean simplifyResponse) {
this.simplifyResponse = simplifyResponse;
} | 3.68 |
hadoop_LongKeyConverter_encode | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
* #encode(java.lang.Object)
*/
@Override
public byte[] encode(Long key) {
try {
// IOException will not be thrown here as we are explicitly passing
// Long.
return longConverter.encodeValue(key);
} catch (IOException e) {
return null;
}
} | 3.68 |
flink_MemoryLogger_getMemoryUsageStatsAsString | /**
* Gets the memory footprint of the JVM in a string representation.
*
* @return A string describing how much heap memory and direct memory are allocated and used.
*/
public static String getMemoryUsageStatsAsString(MemoryMXBean memoryMXBean) {
MemoryUsage heap = memoryMXBean.getHeapMemoryUsage();
MemoryUsage nonHeap = memoryMXBean.getNonHeapMemoryUsage();
long heapUsed = heap.getUsed() >> 20;
long heapCommitted = heap.getCommitted() >> 20;
long heapMax = heap.getMax() >> 20;
long nonHeapUsed = nonHeap.getUsed() >> 20;
long nonHeapCommitted = nonHeap.getCommitted() >> 20;
long nonHeapMax = nonHeap.getMax() >> 20;
return String.format(
"Memory usage stats: [HEAP: %d/%d/%d MB, "
+ "NON HEAP: %d/%d/%d MB (used/committed/max)]",
heapUsed, heapCommitted, heapMax, nonHeapUsed, nonHeapCommitted, nonHeapMax);
} | 3.68 |
flink_CatalogManager_resolveCompactManagedTableOptions | /**
* Resolve dynamic options for compact operation on a Flink's managed table.
*
* @param origin The resolved managed table with enriched options.
* @param tableIdentifier The fully qualified path of the managed table.
* @param partitionSpec User-specified unresolved partition spec.
* @return dynamic options which describe the metadata of compaction
*/
public Map<String, String> resolveCompactManagedTableOptions(
ResolvedCatalogTable origin,
ObjectIdentifier tableIdentifier,
CatalogPartitionSpec partitionSpec) {
return managedTableListener.notifyTableCompaction(
catalogs.getOrDefault(tableIdentifier.getCatalogName(), null),
tableIdentifier,
origin,
partitionSpec,
false);
} | 3.68 |
framework_VRadioButtonGroup_buildOptions | /**
* Build all the options.
*
* @param items
* the list of options
*/
public void buildOptions(List<JsonObject> items) {
Roles.getRadiogroupRole().set(getElement());
int i = 0;
int widgetsToRemove = getWidget().getWidgetCount() - items.size();
if (widgetsToRemove < 0) {
widgetsToRemove = 0;
}
List<Widget> remove = new ArrayList<>(widgetsToRemove);
for (Widget widget : getWidget()) {
if (i < items.size()) {
updateItem((RadioButton) widget, items.get(i), false);
i++;
} else {
remove.add(widget);
}
}
remove.stream().forEach(this::remove);
while (i < items.size()) {
updateItem(new RadioButton(groupId), items.get(i), true);
i++;
}
} | 3.68 |
pulsar_TopicName_fromPersistenceNamingEncoding | /**
* get topic full name from managedLedgerName.
*
* @return the topic full name, format -> domain://tenant/namespace/topic
*/
public static String fromPersistenceNamingEncoding(String mlName) {
// The managedLedgerName convention is: tenant/namespace/domain/topic
// We want to transform to topic full name in the order: domain://tenant/namespace/topic
if (mlName == null || mlName.length() == 0) {
return mlName;
}
List<String> parts = Splitter.on("/").splitToList(mlName);
String tenant;
String cluster;
String namespacePortion;
String domain;
String localName;
if (parts.size() == 4) {
tenant = parts.get(0);
cluster = null;
namespacePortion = parts.get(1);
domain = parts.get(2);
localName = parts.get(3);
return String.format("%s://%s/%s/%s", domain, tenant, namespacePortion, localName);
} else if (parts.size() == 5) {
tenant = parts.get(0);
cluster = parts.get(1);
namespacePortion = parts.get(2);
domain = parts.get(3);
localName = parts.get(4);
return String.format("%s://%s/%s/%s/%s", domain, tenant, cluster, namespacePortion, localName);
} else {
throw new IllegalArgumentException("Invalid managedLedger name: " + mlName);
}
} | 3.68 |
hmily_CollectionUtils_create | /**
* Create collection.
*
* @param <E> the type parameter
* @param collectionType the collection type
* @param elementType the element type
* @param capacity the capacity
* @return the collection
*/
@SuppressWarnings("unchecked")
public <E> Collection<E> create(final Class<?> collectionType, final Class<?> elementType, final int capacity) {
if (collectionType.isInterface()) {
if (Set.class == collectionType || Collection.class == collectionType) {
return new LinkedHashSet<>(capacity);
} else if (List.class == collectionType) {
return new ArrayList<>(capacity);
} else if (SortedSet.class == collectionType || NavigableSet.class == collectionType) {
return new TreeSet<>();
} else {
throw new IllegalArgumentException("Unsupported Collection interface: " + collectionType.getName());
}
} else if (EnumSet.class == collectionType) {
// Cast is necessary for compilation in Eclipse 4.4.1.
return (Collection<E>) EnumSet.noneOf(asEnumType(elementType));
} else {
if (!Collection.class.isAssignableFrom(collectionType)) {
throw new IllegalArgumentException("Unsupported Collection type: " + collectionType.getName());
}
try {
return (Collection<E>) collectionType.newInstance();
} catch (Throwable ex) {
throw new IllegalArgumentException(
"Could not instantiate Collection type: " + collectionType.getName(), ex);
}
}
} | 3.68 |
flink_AsyncSinkWriter_completeRequest | /**
* Marks an in-flight request as completed and prepends failed requestEntries back to the
* internal requestEntry buffer for later retry.
*
* @param failedRequestEntries requestEntries that need to be retried
*/
private void completeRequest(
List<RequestEntryT> failedRequestEntries, int batchSize, long requestStartTime)
throws InterruptedException {
lastSendTimestamp = requestStartTime;
ackTime = System.currentTimeMillis();
inFlightRequestsCount--;
rateLimitingStrategy.registerCompletedRequest(
new BasicResultInfo(failedRequestEntries.size(), batchSize));
ListIterator<RequestEntryT> iterator =
failedRequestEntries.listIterator(failedRequestEntries.size());
while (iterator.hasPrevious()) {
addEntryToBuffer(iterator.previous(), true);
}
nonBlockingFlush();
} | 3.68 |
framework_AbstractListing_getData | /**
* Gets a data object based on its client-side identifier key.
*
* @param key
* key for data object
* @return the data object
*/
protected T getData(String key) {
return getParent().getDataCommunicator().getKeyMapper().get(key);
} | 3.68 |
flink_CompletedCheckpointStats_discard | /** Mark the checkpoint has been discarded. */
void discard() {
discarded = true;
} | 3.68 |
framework_ExtraSuperTextAreaConnector_getState | // @DelegateToWidget will not work with overridden state
@Override
public ExtraSuperTextAreaState getState() {
return (ExtraSuperTextAreaState) super.getState();
} | 3.68 |
framework_ClassResource_getCacheTime | /**
* Gets the length of cache expiration time.
*
* <p>
* This gives the adapter the possibility cache streams sent to the client.
* The caching may be made in adapter or at the client if the client
* supports caching. Default is {@link DownloadStream#DEFAULT_CACHETIME}.
* </p>
*
* @return Cache time in milliseconds
*/
public long getCacheTime() {
return cacheTime;
} | 3.68 |
hbase_DynamicMetricsRegistry_getTag | /**
* Get a tag by name
* @param name of the tag
* @return the tag object
*/
public MetricsTag getTag(String name) {
return tagsMap.get(name);
} | 3.68 |
hudi_BufferedRandomAccessFile_seek | /**
* If the new seek position is in the buffer, adjust the currentPosition.
* If the new seek position is outside of the buffer, flush the contents to
* the file and reload the buffer corresponding to the position.
*
* We logically view the file as group blocks, where each block will perfectly
* fit into the buffer (except for the last block). Given a position to seek,
* we identify the block to be loaded using BUFFER_BOUNDARY_MASK.
*
* When dealing with the last block, we will have extra space between validLastPosition
* and endPosition of the buffer.
*
* @param pos - position in the file to be loaded to the buffer.
* @throws IOException
*/
@Override
public void seek(long pos) throws IOException {
if (pos >= this.validLastPosition || pos < this.startPosition) {
// seeking outside of current buffer -- flush and read
this.flushBuffer();
this.startPosition = pos & BUFFER_BOUNDARY_MASK; // start at BuffSz boundary
alignDiskPositionToBufferStartIfNeeded();
int n = this.fillBuffer();
this.validLastPosition = this.startPosition + (long) n;
} else {
// seeking inside current buffer -- no read required
if (pos < this.currentPosition) {
// if seeking backwards, flush buffer.
this.flushBuffer();
}
}
this.currentPosition = pos;
} | 3.68 |
dubbo_MetricsApplicationListener_onErrorEventBuild | /**
* Similar to onFinishEventBuild
*/
public static AbstractMetricsKeyListener onErrorEventBuild(
MetricsKey metricsKey, MetricsPlaceValue placeType, CombMetricsCollector<?> collector) {
return AbstractMetricsKeyListener.onError(metricsKey, event -> {
collector.increment(metricsKey);
collector.addApplicationRt(placeType.getType(), event.getTimePair().calc());
});
} | 3.68 |
hadoop_HdfsFileStatus_group | /**
* Set the group for this entity (default = null).
* @param group Group
* @return This Builder instance
*/
public Builder group(String group) {
this.group = group;
return this;
} | 3.68 |
framework_VScrollTable_getEventTargetTdOrTr | /**
* Finds the TD that the event interacts with. Returns null if the
* target of the event should not be handled. If the event target is
* the row directly this method returns the TR element instead of
* the TD.
*
* @param event
* @return TD or TR element that the event targets (the actual event
* target is this element or a child of it)
*/
private Element getEventTargetTdOrTr(Event event) {
final Element eventTarget = event.getEventTarget().cast();
return getElementTdOrTr(eventTarget);
} | 3.68 |
hadoop_DiskBalancerWorkItem_setMaxDiskErrors | /**
* Sets maximum disk errors to tolerate before we fail this copy step.
*
* @param maxDiskErrors long
*/
public void setMaxDiskErrors(long maxDiskErrors) {
this.maxDiskErrors = maxDiskErrors;
} | 3.68 |
morf_ResolvedTables_addReadTable | /**
* Store information about read of given table.
*
* @param tableName read table
*/
public void addReadTable(String tableName) {
if (!modifiedTables.contains(tableName.toUpperCase())) {
readTables.add(tableName.toUpperCase());
}
} | 3.68 |
flink_ConfigurationUtils_parseMap | /**
* Parses a string as a map of strings. The expected format of the map is:
*
* <pre>
* key1:value1,key2:value2
* </pre>
*
* <p>Parts of the string can be escaped by wrapping with single or double quotes.
*
* @param stringSerializedMap a string to parse
* @return parsed map
*/
public static Map<String, String> parseMap(String stringSerializedMap) {
return StructuredOptionsSplitter.splitEscaped(stringSerializedMap, ',').stream()
.map(p -> StructuredOptionsSplitter.splitEscaped(p, ':'))
.collect(
Collectors.toMap(
arr -> arr.get(0), // key name
arr -> arr.get(1) // value
));
} | 3.68 |
hadoop_SchedulerHealth_getPreemptionCount | /**
* Get the count of preemption from the latest scheduler health report.
*
* @return preemption count
*/
public Long getPreemptionCount() {
return getOperationCount(Operation.PREEMPTION);
} | 3.68 |
graphhopper_OSMValueExtractor_conditionalWeightToTons | /**
* This parses the weight for a conditional value like "delivery @ (weight > 7.5)"
*/
public static double conditionalWeightToTons(String value) {
try {
int index = value.indexOf("weight>"); // maxweight or weight
if (index < 0) {
index = value.indexOf("weight >");
if (index > 0) index += "weight >".length();
} else {
index += "weight>".length();
}
if (index > 0) {
int lastIndex = value.indexOf(')', index); // (value) or value
if (lastIndex < 0) lastIndex = value.length() - 1;
if (lastIndex > index)
return OSMValueExtractor.stringToTons(value.substring(index, lastIndex));
}
return Double.NaN;
} catch (Exception ex) {
throw new RuntimeException("value " + value, ex);
}
} | 3.68 |
querydsl_JTSGeometryExpression_convexHull | /**
* Returns a geometric object that represents the convex hull of this geometric object.
* Convex hulls, being dependent on straight lines, can be accurately represented in linear
* interpolations for any geometry restricted to linear interpolations.
*
* @return convex hull
*/
public JTSGeometryExpression<Geometry> convexHull() {
if (convexHull == null) {
convexHull = JTSGeometryExpressions.geometryOperation(SpatialOps.CONVEXHULL, mixin);
}
return convexHull;
} | 3.68 |
flink_SkipListUtils_helpSetNextNode | /**
* Set the next node of the given node at the given level.
*
* @param node the node.
* @param nextNode the next node to set.
* @param level the level to find the next node.
* @param levelIndexHeader the header of the level index.
* @param spaceAllocator the space allocator.
*/
static void helpSetNextNode(
long node,
long nextNode,
int level,
LevelIndexHeader levelIndexHeader,
Allocator spaceAllocator) {
if (node == HEAD_NODE) {
levelIndexHeader.updateNextNode(level, nextNode);
return;
}
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
if (level == 0) {
putNextKeyPointer(segment, offsetInByteBuffer, nextNode);
} else {
putNextIndexNode(segment, offsetInByteBuffer, level, nextNode);
}
} | 3.68 |
dubbo_LruCacheFactory_createCache | /**
* Takes url as an method argument and return new instance of cache store implemented by LruCache.
* @param url url of the method
* @return ThreadLocalCache instance of cache
*/
@Override
protected Cache createCache(URL url) {
return new LruCache(url);
} | 3.68 |
hudi_HoodieDataSourceHelpers_latestCommit | /**
* Returns the last successful write operation's instant time.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static String latestCommit(FileSystem fs, String basePath) {
HoodieTimeline timeline = allCompletedCommitsCompactions(fs, basePath);
return timeline.lastInstant().get().getTimestamp();
} | 3.68 |
flink_ChainedStateHandle_isEmpty | /**
* Check if there are any states handles present. Notice that this can be true even if {@link
* #getLength()} is greater than zero, because state handles can be null.
*
* @return true if there are no state handles for any operator.
*/
public boolean isEmpty() {
for (T state : operatorStateHandles) {
if (state != null) {
return false;
}
}
return true;
} | 3.68 |
hbase_MetricsSink_getTimestampOfLastAppliedOp | /**
* Get the TimestampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp
* at which hbase instance starts
* @return timeStampsOfLastAppliedOp;
*/
public long getTimestampOfLastAppliedOp() {
return this.lastTimestampForAge;
} | 3.68 |
hbase_Bytes_toString | /**
* This method will convert utf8 encoded bytes into a string. If the given byte array is null,
* this method will return null.
* @param b Presumed UTF-8 encoded byte array.
* @param off offset into array
* @param len length of utf-8 sequence
* @return String made from <code>b</code> or null
*/
public static String toString(final byte[] b, int off, int len) {
if (b == null) {
return null;
}
if (len == 0) {
return "";
}
try {
return new String(b, off, len, UTF8_CSN);
} catch (UnsupportedEncodingException e) {
// should never happen!
throw new IllegalArgumentException("UTF8 encoding is not supported", e);
}
} | 3.68 |
hadoop_ListResultSchema_paths | /**
* * Get the paths value.
*
* @return the paths value
*/
public List<ListResultEntrySchema> paths() {
return this.paths;
} | 3.68 |
hudi_HoodieCreateHandle_setupWriteStatus | /**
* Set up the write status.
*
* @throws IOException if error occurs
*/
protected void setupWriteStatus() throws IOException {
HoodieWriteStat stat = writeStatus.getStat();
stat.setPartitionPath(writeStatus.getPartitionPath());
stat.setNumWrites(recordsWritten);
stat.setNumDeletes(recordsDeleted);
stat.setNumInserts(insertRecordsWritten);
stat.setPrevCommit(HoodieWriteStat.NULL_COMMIT);
stat.setFileId(writeStatus.getFileId());
stat.setPath(new Path(config.getBasePath()), path);
stat.setTotalWriteErrors(writeStatus.getTotalErrorRecords());
long fileSize = FSUtils.getFileSize(fs, path);
stat.setTotalWriteBytes(fileSize);
stat.setFileSizeInBytes(fileSize);
RuntimeStats runtimeStats = new RuntimeStats();
runtimeStats.setTotalCreateTime(timer.endTimer());
stat.setRuntimeStats(runtimeStats);
} | 3.68 |
MagicPlugin_PreLoadEvent_registerCurrency | /**
* Register a custom currency, which can be used in shops, spell worth/earns and casting costs.
*
* @param currency A currency instance to register
*/
public void registerCurrency(Currency currency) {
currencies.add(currency);
} | 3.68 |
flink_SqlFunctionUtils_replace | /** Replaces all the old strings with the replacement string. */
public static String replace(String str, String oldStr, String replacement) {
return str.replace(oldStr, replacement);
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperationsForExistingDataFix4 | /**
* Regression test that checks if the DSL with Math expressions produces expected SQL.
*
* Calling:
*
* <pre>
* field("vatRate / (vatRate + 100)")
* </pre>
*
* is actually a hack that was used as a workaround in order to create the
* expected SQL below. Since
* {@link org.alfasoftware.morf.sql.SqlUtils#bracket(MathsField)} is
* available, it should be use to achieve the SQL bracketing.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperationsForExistingDataFix4() {
AliasedField dsl = field("invoiceLineReceived").multiplyBy(field("vatRate / (vatRate + 100)"));
String sql = testDialect.getSqlFrom(dsl);
assertEquals(expectedSqlForMathOperationsForExistingDataFix4(), sql);
} | 3.68 |
hbase_PBType_inputStreamFromByteRange | /**
* Create a {@link CodedInputStream} from a {@link PositionedByteRange}. Be sure to update
* {@code src}'s position after consuming from the stream.
* <p/>
* For example:
*
* <pre>
* Foo.Builder builder = ...
* CodedInputStream is = inputStreamFromByteRange(src);
* Foo ret = builder.mergeFrom(is).build();
* src.setPosition(src.getPosition() + is.getTotalBytesRead());
* </pre>
*/
public static CodedInputStream inputStreamFromByteRange(PositionedByteRange src) {
return CodedInputStream.newInstance(src.getBytes(), src.getOffset() + src.getPosition(),
src.getRemaining());
} | 3.68 |
hbase_LeaseManager_getName | /** Returns name of lease */
public String getName() {
return this.leaseName;
} | 3.68 |
flink_MetricListener_getGauge | /**
* Get registered {@link Gauge} with identifier relative to the root metric group.
*
* @param identifier identifier relative to the root metric group
* @return Optional registered gauge
*/
@SuppressWarnings("unchecked")
public <T> Optional<Gauge<T>> getGauge(String... identifier) {
if (!metrics.containsKey(getActualIdentifier(identifier))) {
return Optional.empty();
} else {
return Optional.of((Gauge<T>) metrics.get(getActualIdentifier(identifier)));
}
} | 3.68 |
hbase_RandomRowFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder();
builder.setChance(this.chance);
return builder.build().toByteArray();
} | 3.68 |
open-banking-gateway_Xs2aRestorePreValidationContext_lastRedirectionTarget | // FIXME SerializerUtil does not support nestedness
private LastRedirectionTarget lastRedirectionTarget(BaseContext current) {
if (null == current.getLastRedirection()) {
return null;
}
LastRedirectionTarget target = current.getLastRedirection();
target.setRequestScoped(current.getRequestScoped());
return target;
} | 3.68 |
flink_ExternalPythonKeyedProcessOperator_processTimer | /**
* It is responsible to send timer data to python worker when a registered timer is fired. The
* input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time;
* Timestamp of the fired timer; Current watermark and the key of the timer.
*
* @param timeDomain The type of the timer.
* @param timer The internal timer.
* @throws Exception The runnerInputSerializer might throw exception.
*/
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, Object> timer)
throws Exception {
Object namespace = timer.getNamespace();
byte[] encodedNamespace;
if (VoidNamespace.INSTANCE.equals(namespace)) {
encodedNamespace = null;
} else {
namespaceSerializer.serialize(namespace, baosWrapper);
encodedNamespace = baos.toByteArray();
baos.reset();
}
Row timerData =
timerHandler.buildTimerData(
timeDomain,
internalTimerService.currentWatermark(),
timer.getTimestamp(),
timer.getKey(),
encodedNamespace);
timerDataSerializer.serialize(timerData, baosWrapper);
pythonFunctionRunner.processTimer(baos.toByteArray());
baos.reset();
elementCount++;
checkInvokeFinishBundleByCount();
emitResults();
} | 3.68 |
pulsar_BacklogQuotaManager_dropBacklogForSizeLimit | /**
* Drop the backlog on the topic.
*
* @param persistentTopic
* The topic from which backlog should be dropped
* @param quota
* Backlog quota set for the topic
*/
private void dropBacklogForSizeLimit(PersistentTopic persistentTopic, BacklogQuota quota) {
// Set the reduction factor to 90%. The aim is to drop down the backlog to 90% of the quota limit.
double reductionFactor = 0.9;
double targetSize = reductionFactor * quota.getLimitSize();
// Get estimated unconsumed size for the managed ledger associated with this topic. Estimated size is more
// useful than the actual storage size. Actual storage size gets updated only when managed ledger is trimmed.
ManagedLedgerImpl mLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger();
long backlogSize = mLedger.getEstimatedBacklogSize();
if (log.isDebugEnabled()) {
log.debug("[{}] target size is [{}] for quota limit [{}], backlog size is [{}]", persistentTopic.getName(),
targetSize, targetSize / reductionFactor, backlogSize);
}
ManagedCursor previousSlowestConsumer = null;
while (backlogSize > targetSize) {
// Get the slowest consumer for this managed ledger and save the ledger id of the marked delete position of
// slowest consumer. Calculate the factor which is used in calculating number of messages to be skipped.
ManagedCursor slowestConsumer = mLedger.getSlowestConsumer();
if (slowestConsumer == null) {
if (log.isDebugEnabled()) {
log.debug("[{}] slowest consumer null.", persistentTopic.getName());
}
break;
}
double messageSkipFactor = ((backlogSize - targetSize) / backlogSize);
if (slowestConsumer == previousSlowestConsumer) {
log.info("[{}] Cursors not progressing, target size is [{}] for quota limit [{}], backlog size is [{}]",
persistentTopic.getName(), targetSize, targetSize / reductionFactor, backlogSize);
break;
}
// Calculate number of messages to be skipped using the current backlog and the skip factor.
long entriesInBacklog = slowestConsumer.getNumberOfEntriesInBacklog(false);
int messagesToSkip = (int) (messageSkipFactor * entriesInBacklog);
try {
// If there are no messages to skip, break out of the loop
if (messagesToSkip == 0) {
if (log.isDebugEnabled()) {
log.debug("no messages to skip for [{}]", slowestConsumer);
}
break;
}
// Skip messages on the slowest consumer
if (log.isDebugEnabled()) {
log.debug("[{}] Skipping [{}] messages on slowest consumer [{}] having backlog entries : [{}]",
persistentTopic.getName(), messagesToSkip, slowestConsumer.getName(), entriesInBacklog);
}
slowestConsumer.skipEntries(messagesToSkip, IndividualDeletedEntries.Include);
} catch (Exception e) {
log.error("[{}] Error skipping [{}] messages from slowest consumer [{}]", persistentTopic.getName(),
messagesToSkip, slowestConsumer.getName(), e);
}
// Make sure that unconsumed size is updated every time when we skip the messages.
backlogSize = mLedger.getEstimatedBacklogSize();
previousSlowestConsumer = slowestConsumer;
if (log.isDebugEnabled()) {
log.debug("[{}] Updated unconsumed size = [{}]. skipFactor: [{}]", persistentTopic.getName(),
backlogSize, messageSkipFactor);
}
}
} | 3.68 |
hadoop_SubApplicationRowKey_parseRowKeyFromString | /**
* Given the encoded row key as string, returns the row key as an object.
*
* @param encodedRowKey String representation of row key.
* @return A <cite>SubApplicationRowKey</cite> object.
*/
public static SubApplicationRowKey parseRowKeyFromString(
String encodedRowKey) {
return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
} | 3.68 |
hadoop_WrappedMapper_getMapContext | /**
* Get a wrapped {@link Mapper.Context} for custom implementations.
* @param mapContext <code>MapContext</code> to be wrapped
* @return a wrapped <code>Mapper.Context</code> for custom implementations
*/
public Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context
getMapContext(MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> mapContext) {
return new Context(mapContext);
} | 3.68 |
hbase_MetaTableAccessor_deleteTableState | /**
* Remove state for table from meta
* @param connection to use for deletion
* @param table to delete state for
*/
public static void deleteTableState(Connection connection, TableName table) throws IOException {
long time = EnvironmentEdgeManager.currentTime();
Delete delete = new Delete(table.getName());
delete.addColumns(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER, time);
deleteFromMetaTable(connection, delete);
LOG.info("Deleted table " + table + " state from META");
} | 3.68 |
hudi_HoodieAvroHFileReader_filterRowKeys | /**
* Filter keys by availability.
* <p>
* Note: This method is performant when the caller passes in a sorted candidate keys.
*
* @param candidateRowKeys - Keys to check for the availability
* @return Subset of candidate keys that are available
*/
@Override
public Set<Pair<String, Long>> filterRowKeys(Set<String> candidateRowKeys) {
// candidateRowKeys must be sorted
SortedSet<String> sortedCandidateRowKeys = new TreeSet<>(candidateRowKeys);
synchronized (sharedLock) {
if (!sharedScanner.isPresent()) {
// For shared scanner, which is primarily used for point-lookups, we're caching blocks
// by default, to minimize amount of traffic to the underlying storage
sharedScanner = Option.of(getHFileScanner(getSharedHFileReader(), true));
}
return sortedCandidateRowKeys.stream()
.filter(k -> {
try {
return isKeyAvailable(k, sharedScanner.get());
} catch (IOException e) {
LOG.error("Failed to check key availability: " + k);
return false;
}
})
// Record position is not supported for HFile
.map(key -> Pair.of(key, HoodieRecordLocation.INVALID_POSITION))
.collect(Collectors.toSet());
}
} | 3.68 |
flink_AbstractOrcNoHiveVector_createHiveVectorFromConstant | /**
* Create a orc vector from partition spec value. See hive {@code
* VectorizedRowBatchCtx#addPartitionColsToBatch}.
*/
private static ColumnVector createHiveVectorFromConstant(
LogicalType type, Object value, int batchSize) {
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
return createBytesVector(batchSize, value);
case BOOLEAN:
return createLongVector(batchSize, (Boolean) value ? 1 : 0);
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
return createLongVector(batchSize, value);
case DECIMAL:
DecimalType decimalType = (DecimalType) type;
return createDecimalVector(
batchSize, decimalType.getPrecision(), decimalType.getScale(), value);
case FLOAT:
case DOUBLE:
return createDoubleVector(batchSize, value);
case DATE:
if (value instanceof LocalDate) {
value = Date.valueOf((LocalDate) value);
}
return createLongVector(batchSize, toInternal((Date) value));
case TIMESTAMP_WITHOUT_TIME_ZONE:
return createTimestampVector(batchSize, value);
default:
throw new UnsupportedOperationException("Unsupported type: " + type);
}
} | 3.68 |
hadoop_AbstractMultipartUploader_abortUploadsUnderPath | /**
* {@inheritDoc}.
* @param path path to abort uploads under.
* @return a future to -1.
* @throws IOException raised on errors performing I/O.
*/
public CompletableFuture<Integer> abortUploadsUnderPath(Path path)
throws IOException {
checkPath(path);
CompletableFuture<Integer> f = new CompletableFuture<>();
f.complete(-1);
return f;
} | 3.68 |
hbase_JMXJsonServlet_doGet | /**
* Process a GET request for the specified resource. The servlet request we are processing The
* servlet response we are creating
*/
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
try {
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {
return;
}
String jsonpcb = null;
PrintWriter writer = null;
JSONBean.Writer beanWriter = null;
try {
jsonpcb = checkCallbackName(request.getParameter(CALLBACK_PARAM));
writer = response.getWriter();
// "callback" parameter implies JSONP outpout
if (jsonpcb != null) {
response.setContentType("application/javascript; charset=utf8");
writer.write(jsonpcb + "(");
} else {
response.setContentType("application/json; charset=utf8");
}
beanWriter = this.jsonBeanWriter.open(writer);
// Should we output description on each attribute and bean?
boolean description = "true".equals(request.getParameter(INCLUDE_DESCRIPTION));
// query per mbean attribute
String getmethod = request.getParameter("get");
if (getmethod != null) {
List<String> splitStrings = Splitter.onPattern("\\:\\:").splitToList(getmethod);
if (splitStrings.size() != 2) {
beanWriter.write("result", "ERROR");
beanWriter.write("message", "query format is not as expected.");
beanWriter.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
Iterator<String> i = splitStrings.iterator();
if (
beanWriter.write(this.mBeanServer, new ObjectName(i.next()), i.next(), description) != 0
) {
beanWriter.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
return;
}
// query per mbean
String qry = request.getParameter("qry");
if (qry == null) {
qry = "*:*";
}
String excl = request.getParameter("excl");
ObjectName excluded = excl == null ? null : new ObjectName(excl);
if (
beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description, excluded) != 0
) {
beanWriter.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
} finally {
if (beanWriter != null) {
beanWriter.close();
}
if (jsonpcb != null) {
writer.write(");");
}
if (writer != null) {
writer.close();
}
}
} catch (IOException e) {
LOG.error("Caught an exception while processing JMX request", e);
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} catch (MalformedObjectNameException e) {
LOG.error("Caught an exception while processing JMX request", e);
response.sendError(HttpServletResponse.SC_BAD_REQUEST);
}
} | 3.68 |
pulsar_AuthenticationMetrics_authenticateSuccess | /**
* Log authenticate success event to the authentication metrics.
* @param providerName The short class name of the provider
* @param authMethod Authentication method name
*/
public static void authenticateSuccess(String providerName, String authMethod) {
authSuccessMetrics.labels(providerName, authMethod).inc();
} | 3.68 |
framework_MenuBar_doItemAction | /**
* Performs the action associated with the given menu item. If the item has
* a popup associated with it, the popup will be shown. If it has a command
* associated with it, and 'fireCommand' is true, then the command will be
* fired. Popups associated with other items will be hidden.
*
* @param item
* the item whose popup is to be shown.
* @param fireCommand
* <code>true</code> if the item's command should be fired,
* <code>false</code> otherwise.
*/
protected void doItemAction(final MenuItem item, boolean fireCommand) {
// If the given item is already showing its menu, we're done.
if ((shownChildMenu != null) && (item.getSubMenu() == shownChildMenu)) {
return;
}
// If another item is showing its menu, then hide it.
if (shownChildMenu != null) {
shownChildMenu.onHide();
popup.hide();
}
// If the item has no popup, optionally fire its command.
if (item.getSubMenu() == null) {
if (fireCommand) {
// Close this menu and all of its parents.
closeAllParents();
// Fire the item's command.
final Command cmd = item.getCommand();
if (cmd != null) {
Scheduler.get().scheduleDeferred(cmd);
}
}
return;
}
// Ensure that the item is selected.
selectItem(item);
// Create a new popup for this item, and position it next to
// the item (below if this is a horizontal menu bar, to the
// right if it's a vertical bar).
popup = new VOverlay(true) {
{
setWidget(item.getSubMenu());
item.getSubMenu().onShow();
setOwner(MenuBar.this);
}
@Override
public boolean onEventPreview(Event event) {
// Hook the popup panel's event preview. We use this to keep it
// from
// auto-hiding when the parent menu is clicked.
switch (DOM.eventGetType(event)) {
case Event.ONCLICK:
// If the event target is part of the parent menu, suppress
// the
// event altogether.
final Element target = DOM.eventGetTarget(event);
final Element parentMenuElement = item.getParentMenu()
.getElement();
if (DOM.isOrHasChild(parentMenuElement, target)) {
return false;
}
break;
default:
// NOP
break;
}
return super.onEventPreview(event);
}
};
popup.addPopupListener(this);
if (vertical) {
popup.setPopupPosition(
item.getAbsoluteLeft() + item.getOffsetWidth(),
item.getAbsoluteTop());
} else {
popup.setPopupPosition(item.getAbsoluteLeft(),
item.getAbsoluteTop() + item.getOffsetHeight());
}
shownChildMenu = item.getSubMenu();
item.getSubMenu().parentMenu = this;
// Show the popup, ensuring that the menubar's event preview remains on
// top
// of the popup's.
popup.show();
} | 3.68 |
flink_BinarySegmentUtils_bitGet | /**
* read bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = getByte(segments, offset);
return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
} | 3.68 |
framework_SQLContainer_updateCount | /**
* Fetches new count of rows from the data source, if needed.
*/
private void updateCount() {
if (!sizeDirty && new Date().getTime() < sizeUpdated.getTime()
+ sizeValidMilliSeconds) {
return;
}
try {
try {
queryDelegate.setFilters(filters);
} catch (UnsupportedOperationException e) {
getLogger().log(Level.FINE,
"The query delegate doesn't support filtering", e);
}
try {
queryDelegate.setOrderBy(sorters);
} catch (UnsupportedOperationException e) {
getLogger().log(Level.FINE,
"The query delegate doesn't support sorting", e);
}
int newSize = queryDelegate.getCount();
sizeUpdated = new Date();
sizeDirty = false;
if (newSize != size) {
size = newSize;
// Size is up to date so don't set it back to dirty in refresh()
refresh(false);
}
getLogger().log(Level.FINER, "Updated row count. New count is: {0}",
size);
} catch (SQLException e) {
throw new RuntimeException("Failed to update item set size.", e);
}
} | 3.68 |
hbase_RegionCoprocessorHost_preScannerClose | /**
* Supports Coprocessor 'bypass'.
* @param s the scanner
* @return true if default behavior should be bypassed, false otherwise
* @exception IOException Exception
*/
// Should this be bypassable?
public boolean preScannerClose(final InternalScanner s) throws IOException {
return execOperation(
coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(true) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preScannerClose(this, s);
}
});
} | 3.68 |
hadoop_GetClusterNodeAttributesRequest_newInstance | /**
* Create new instance of GetClusterNodeAttributesRequest.
*
* @return GetClusterNodeAttributesRequest is returned.
*/
public static GetClusterNodeAttributesRequest newInstance() {
return Records.newRecord(GetClusterNodeAttributesRequest.class);
} | 3.68 |
flink_JobStatus_isTerminalState | /**
* Checks whether this state is <i>locally terminal</i>. Locally terminal refers to the state of
* a job's execution graph within an executing JobManager. If the execution graph is locally
* terminal, the JobManager will not continue executing or recovering the job.
*
* <p>The only state that is locally terminal, but not globally terminal is {@link #SUSPENDED},
* which is typically entered when the executing JobManager loses its leader status.
*
* @return True, if this job status is terminal, false otherwise.
*/
public boolean isTerminalState() {
return terminalState != TerminalState.NON_TERMINAL;
} | 3.68 |
framework_VaadinFinderLocatorStrategy_connectorMatchesPathFragment | /**
* Determines whether a connector matches a path fragment. This is done by
* comparing the path fragment to the name of the widget type of the
* connector.
*
* @param connector
* The connector to compare.
* @param widgetName
* The name of the widget class.
* @return true if the widget type of the connector equals the widget type
* identified by the path fragment.
*/
private boolean connectorMatchesPathFragment(ComponentConnector connector,
String widgetName) {
List<String> ids = getIDsForConnector(connector);
String exactClass = connector.getConnection().getConfiguration()
.getServerSideClassNameForTag(connector.getTag());
if (!ids.contains(exactClass)) {
ids.add(exactClass);
}
List<Integer> widgetTags = new ArrayList<>();
widgetTags.addAll(getTags(widgetName));
if (widgetTags.isEmpty()) {
widgetTags.addAll(getTags("com.vaadin.ui." + widgetName));
}
for (int i = 0, l = ids.size(); i < l; ++i) {
// Fuzz the connector name, so that the client can provide (for
// example: /Button, /Button.class, /com.vaadin.ui.Button,
// /com.vaadin.ui.Button.class, etc)
String name = ids.get(i);
final String simpleName = getSimpleClassName(name);
final String fullName = getFullClassName(name);
if (!widgetTags.isEmpty()) {
Integer[] foundTags = client.getConfiguration()
.getTagsForServerSideClassName(fullName);
for (int tag : foundTags) {
if (tagsMatch(widgetTags, tag)) {
return true;
}
}
}
// Fallback if something failed before.
if (widgetName.equals(fullName + ".class")
|| widgetName.equals(fullName)
|| widgetName.equals(simpleName + ".class")
|| widgetName.equals(simpleName)
|| widgetName.equals(name)) {
return true;
}
}
// If the server-side class name didn't match, fall back to testing for
// the explicit widget name
String widget = Util.getSimpleName(connector.getWidget());
return widgetName.equals(widget)
|| widgetName.equals(widget + ".class");
} | 3.68 |
framework_ComponentTestCase_createActions | /**
* Method that creates the "actions" shown in the upper part of the screen.
* Override this only if you do not want the default actions. Custom actions
* can be added through #createCustomActions();
*
* @return A List with actions to which more actions can be added.
*/
protected List<Component> createActions() {
List<Component> actions = new ArrayList<>();
actions.add(createEnabledAction(true));
actions.add(createReadonlyAction(false));
actions.add(createErrorIndicatorAction(false));
if (HasValue.class.isAssignableFrom(getTestClass())
|| Field.class.isAssignableFrom(getTestClass())) {
actions.add(createRequiredAction(false));
}
createCustomActions(actions);
return actions;
} | 3.68 |
hbase_RestoreSnapshotHelper_cloneRegion | /**
* Clone region directory content from the snapshot info. Each region is encoded with the table
* name, so the cloned region will have a different region name. Instead of copying the hfiles a
* HFileLink is created.
* @param region {@link HRegion} cloned
*/
private void cloneRegion(final HRegion region, final RegionInfo snapshotRegionInfo,
final SnapshotRegionManifest manifest) throws IOException {
cloneRegion(region.getRegionInfo(), new Path(tableDir, region.getRegionInfo().getEncodedName()),
snapshotRegionInfo, manifest);
} | 3.68 |
flink_Tuple16_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple16)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple16 tuple = (Tuple16) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
return true;
} | 3.68 |
framework_DataSource_addDataChangeHandler | /**
* Sets a simple data change handler for a widget without lazy loading.
* Refresh method should reset all the data in the widget.
*
* @param refreshMethod
* a method to refresh all data in the widget
*
* @return registration for removing the handler
*/
public default Registration addDataChangeHandler(
Consumer<Range> refreshMethod) {
return addDataChangeHandler(
new SimpleDataChangeHandler(this, refreshMethod));
} | 3.68 |
dubbo_ConfigurableMetadataServiceExporter_generateMethodConfig | /**
* Generate Method Config for Service Discovery Metadata <p/>
* <p>
* Make {@link MetadataService} support argument callback,
* used to notify {@link org.apache.dubbo.registry.client.ServiceInstance}'s
* metadata change event
*
* @since 3.0
*/
private List<MethodConfig> generateMethodConfig() {
MethodConfig methodConfig = new MethodConfig();
methodConfig.setName("getAndListenInstanceMetadata");
ArgumentConfig argumentConfig = new ArgumentConfig();
argumentConfig.setIndex(1);
argumentConfig.setCallback(true);
methodConfig.setArguments(Collections.singletonList(argumentConfig));
return Collections.singletonList(methodConfig);
} | 3.68 |
shardingsphere-elasticjob_FailoverService_clearFailoveringItem | /**
* Clear failovering item.
*
* @param item item
*/
public void clearFailoveringItem(final int item) {
jobNodeStorage.removeJobNodeIfExisted(FailoverNode.getExecutingFailoverNode(item));
} | 3.68 |
AreaShop_RegionFeature_listen | /**
* Start listening to events.
*/
public void listen() {
plugin.getServer().getPluginManager().registerEvents(this, plugin);
} | 3.68 |
hadoop_Paths_getLocalTaskAttemptTempDir | /**
* Get the task attempt temporary directory in the local filesystem.
* This must be unique to all tasks on all jobs running on all processes
* on this host.
* It's constructed as uuid+task-attempt-ID, relying on UUID to be unique
* for each job.
* @param conf configuration
* @param uuid some UUID, such as a job UUID
* @param attemptID attempt ID
* @return a local task attempt directory.
* @throws IOException IO problem.
*/
public static Path getLocalTaskAttemptTempDir(final Configuration conf,
final String uuid,
final TaskAttemptID attemptID)
throws IOException {
try {
final LocalDirAllocator allocator =
new LocalDirAllocator(Constants.BUFFER_DIR);
String name = uuid + "-" + attemptID;
return tempFolders.get(name,
() -> {
return FileSystem.getLocal(conf).makeQualified(
allocator.getLocalPathForWrite(name, conf));
});
} catch (ExecutionException | UncheckedExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
}
if (cause instanceof IOException) {
throw (IOException) cause;
}
throw new IOException(e);
}
} | 3.68 |
flink_FlinkRelMdCollation_match | /** Helper method to determine a {@link org.apache.calcite.rel.core.Match}'s collation. */
public static List<RelCollation> match(
RelMetadataQuery mq,
RelNode input,
RelDataType rowType,
RexNode pattern,
boolean strictStart,
boolean strictEnd,
Map<String, RexNode> patternDefinitions,
Map<String, RexNode> measures,
RexNode after,
Map<String, ? extends SortedSet<String>> subsets,
boolean allRows,
ImmutableBitSet partitionKeys,
RelCollation orderKeys,
RexNode interval) {
return mq.collations(input);
} | 3.68 |
framework_AbstractClickEventHandler_getRelativeToElement | /**
* Click event calculates and returns coordinates relative to the element
* returned by this method. Default implementation uses the root element of
* the widget. Override to provide a different relative element.
*
* @return The Element used for calculating relative coordinates for a click
* or null if no relative coordinates can be calculated.
*/
protected com.google.gwt.user.client.Element getRelativeToElement() {
return connector.getWidget().getElement();
} | 3.68 |
pulsar_LinuxInfoUtils_getCpuUsageForEntireHost | /**
* Reads first line of /proc/stat to get total cpu usage.
*
* <pre>
* cpu user nice system idle iowait irq softirq steal guest guest_nice
* cpu 317808 128 58637 2503692 7634 0 13472 0 0 0
* </pre>
* <p>
* Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this
* far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include
* cpu, user, nice, system, irq, softirq, steal, guest and guest_nice.
*/
public static ResourceUsage getCpuUsageForEntireHost() {
try (Stream<String> stream = Files.lines(Paths.get(PROC_STAT_PATH))) {
Optional<String> first = stream.findFirst();
if (!first.isPresent()) {
log.error("[LinuxInfo] Failed to read CPU usage from /proc/stat, because of empty values.");
return ResourceUsage.empty();
}
String[] words = first.get().split("\\s+");
long total = Arrays.stream(words)
.filter(s -> !s.contains("cpu"))
.mapToLong(Long::parseLong)
.sum();
long idle = Long.parseLong(words[4]) + Long.parseLong(words[5]);
return ResourceUsage.builder()
.usage(total - idle)
.idle(idle)
.total(total).build();
} catch (IOException e) {
log.error("[LinuxInfo] Failed to read CPU usage from /proc/stat", e);
return ResourceUsage.empty();
}
} | 3.68 |
flink_BoundedFIFOQueue_add | /**
* Adds an element to the end of the queue. An element will be removed from the head of the
* queue if the queue would exceed its maximum size by adding the new element.
*
* @param element The element that should be added to the end of the queue.
* @throws NullPointerException If {@code null} is passed as an element.
*/
public void add(T element) {
Preconditions.checkNotNull(element);
if (elements.add(element) && elements.size() > maxSize) {
elements.poll();
}
} | 3.68 |
framework_UIConnector_injectCSS | /**
* Reads CSS strings and resources injected by {@link Styles#inject} from
* the UIDL stream.
*
* @param uidl
* The uidl which contains "css-resource" and "css-string" tags
*/
private void injectCSS(UIDL uidl) {
/*
* Search the UIDL stream for CSS resources and strings to be injected.
*/
for (Object child : uidl) {
UIDL cssInjectionsUidl = (UIDL) child;
// Check if we have resources to inject
if (cssInjectionsUidl.getTag().equals("css-resource")) {
String url = getWidget().connection.translateVaadinUri(
cssInjectionsUidl.getStringAttribute("url"));
LinkElement link = LinkElement
.as(DOM.createElement(LinkElement.TAG));
link.setRel("stylesheet");
link.setHref(url);
link.setType("text/css");
getHead().appendChild(link);
// Check if we have CSS string to inject
} else if (cssInjectionsUidl.getTag().equals("css-string")) {
for (Object c : cssInjectionsUidl) {
StyleInjector.injectAtEnd((String) c);
StyleInjector.flush();
}
}
}
} | 3.68 |
flink_FileInputFormat_setFilePath | /**
* Sets a single path of a file to be read.
*
* @param filePath The path of the file to read.
*/
public void setFilePath(Path filePath) {
if (filePath == null) {
throw new IllegalArgumentException("File path must not be null.");
}
setFilePaths(filePath);
} | 3.68 |
querydsl_ComparableExpression_ltAny | /**
* Create a {@code this < any right} expression
*
* @param right rhs of the comparison
* @return this < any right
*/
public BooleanExpression ltAny(SubQueryExpression<? extends T> right) {
return lt(ExpressionUtils.any(right));
} | 3.68 |
framework_IndexedContainer_addItemAfter | /**
* {@inheritDoc}
* <p>
* The item ID is generated from a sequence of Integers. The id of the first
* added item is 1.
*/
@Override
public Object addItemAfter(Object previousItemId) {
// Creates a new id
final Object id = generateId();
if (addItemAfter(previousItemId, id) != null) {
return id;
} else {
return null;
}
} | 3.68 |
framework_ContainerEventProvider_setEndDateProperty | /**
* Set the property which provides the ending date and time of the event.
*/
public void setEndDateProperty(Object endDateProperty) {
this.endDateProperty = endDateProperty;
} | 3.68 |
dubbo_HttpHeaderUtil_parseRequest | /**
* parse request
* @param rpcInvocation
* @param request
*/
public static void parseRequest(RpcInvocation rpcInvocation, RequestFacade request) {
parseRequestHeader(rpcInvocation, request);
parseRequestAttribute(rpcInvocation, request);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedBooleanLiteral | /**
* @param value the boolean value to translate.
* @return The expected boolean literal.
*/
protected String expectedBooleanLiteral(boolean value) {
return value ? "1" : "0";
} | 3.68 |
hadoop_UpdateContainerSchedulerEvent_getUpdatedToken | /**
* Update Container Token.
*
* @return Container Token.
*/
public ContainerTokenIdentifier getUpdatedToken() {
return containerEvent.getUpdatedToken();
} | 3.68 |
hadoop_ReadBufferManager_testResetReadBufferManager | /**
* Reset readAhead buffer to needed readAhead block size and
* thresholdAgeMilliseconds.
* @param readAheadBlockSize
* @param thresholdAgeMilliseconds
*/
@VisibleForTesting
void testResetReadBufferManager(int readAheadBlockSize, int thresholdAgeMilliseconds) {
setBlockSize(readAheadBlockSize);
setThresholdAgeMilliseconds(thresholdAgeMilliseconds);
testResetReadBufferManager();
} | 3.68 |
hudi_OptionsResolver_isPartitionedTable | /**
* Returns whether the table is partitioned.
*/
public static boolean isPartitionedTable(Configuration conf) {
return FilePathUtils.extractPartitionKeys(conf).length > 0;
} | 3.68 |
hadoop_ResourceEstimatorUtil_createProviderInstance | /**
* Helper method to create instances of Object using the class name specified
* in the configuration object.
*
* @param conf the yarn configuration
* @param configuredClassName the configuration provider key
* @param defaultValue the default implementation class
* @param type the required interface/base class
* @param <T> The type of the instance to create
* @return the instances created
* @throws ResourceEstimatorException if the provider initialization fails.
*/
@SuppressWarnings("unchecked") public static <T> T createProviderInstance(
Configuration conf, String configuredClassName, String defaultValue,
Class<T> type) throws ResourceEstimatorException {
String className = conf.get(configuredClassName);
if (className == null) {
className = defaultValue;
}
try {
Class<?> concreteClass = Class.forName(className);
if (type.isAssignableFrom(concreteClass)) {
Constructor<T> meth =
(Constructor<T>) concreteClass.getDeclaredConstructor(EMPTY_ARRAY);
meth.setAccessible(true);
return meth.newInstance();
} else {
StringBuilder errMsg = new StringBuilder();
errMsg.append("Class: ").append(className).append(" not instance of ")
.append(type.getCanonicalName());
throw new ResourceEstimatorException(errMsg.toString());
}
} catch (ClassNotFoundException e) {
StringBuilder errMsg = new StringBuilder();
errMsg.append("Could not instantiate : ").append(className)
.append(" due to exception: ").append(e.getCause());
throw new ResourceEstimatorException(errMsg.toString());
} catch (ReflectiveOperationException e) {
StringBuilder errMsg = new StringBuilder();
errMsg.append("Could not instantiate : ").append(className)
.append(" due to exception: ").append(e.getCause());
throw new ResourceEstimatorException(errMsg.toString());
}
} | 3.68 |
hbase_ClientZKSyncer_upsertQueue | /**
* Update the value of the single element in queue if any, or else insert.
* <p/>
* We only need to synchronize the latest znode value to client ZK rather than synchronize each
* time
* @param data the data to write to queue
*/
private void upsertQueue(String node, byte[] data) {
ZKData zkData = queues.get(node);
if (zkData != null) {
zkData.set(data);
}
} | 3.68 |
hadoop_RouterHeartbeatService_getStateStoreVersion | /**
* Get the version of the data in the State Store.
*
* @param clazz Class in the State Store.
* @return Version of the data.
*/
private <R extends BaseRecord, S extends RecordStore<R>>
long getStateStoreVersion(final Class<S> clazz) {
long version = -1;
try {
StateStoreService stateStore = router.getStateStore();
S recordStore = stateStore.getRegisteredRecordStore(clazz);
if (recordStore != null) {
if (recordStore instanceof CachedRecordStore) {
CachedRecordStore<R> cachedRecordStore =
(CachedRecordStore<R>) recordStore;
List<R> records = cachedRecordStore.getCachedRecords();
for (BaseRecord record : records) {
if (record.getDateModified() > version) {
version = record.getDateModified();
}
}
}
}
} catch (Exception e) {
LOG.error("Cannot get version for {}", clazz, e);
}
return version;
} | 3.68 |
AreaShop_Utils_getRegionsInSelection | /**
* Get all AreaShop regions intersecting with a WorldEdit selection.
* @param selection The selection to check
* @return A list with all the AreaShop regions intersecting with the selection
*/
public static List<GeneralRegion> getRegionsInSelection(WorldEditSelection selection) {
ArrayList<GeneralRegion> result = new ArrayList<>();
for(ProtectedRegion region : getWorldEditRegionsInSelection(selection)) {
GeneralRegion asRegion = AreaShop.getInstance().getFileManager().getRegion(region.getId());
if(asRegion != null) {
result.add(asRegion);
}
}
return result;
} | 3.68 |
hbase_AsyncAdmin_modifyTable | /**
* Modify an existing table, more IRB friendly version.
* @param desc modified description of the table
*/
default CompletableFuture<Void> modifyTable(TableDescriptor desc) {
return modifyTable(desc, true);
} | 3.68 |
querydsl_NumberExpression_round | /**
* Create a {@code round(this)} expression
*
* <p>Returns the closest {@code int} to this.</p>
*
* @return round(this)
* @see java.lang.Math#round(double)
* @see java.lang.Math#round(float)
*/
public NumberExpression<T> round() {
if (round == null) {
round = Expressions.numberOperation(getType(), MathOps.ROUND, mixin);
}
return round;
} | 3.68 |
framework_ServletPortletHelper_findLocale | /**
* Helper to find the most most suitable Locale. These potential sources are
* checked in order until a Locale is found:
* <ol>
* <li>The passed component (or UI) if not null</li>
* <li>{@link UI#getCurrent()} if defined</li>
* <li>The passed session if not null</li>
* <li>{@link VaadinSession#getCurrent()} if defined</li>
* <li>The passed request if not null</li>
* <li>{@link VaadinService#getCurrentRequest()} if defined</li>
* <li>{@link Locale#getDefault()}</li>
* </ol>
*/
public static Locale findLocale(Component component, VaadinSession session,
VaadinRequest request) {
if (component == null) {
component = UI.getCurrent();
}
if (component != null) {
Locale locale = component.getLocale();
if (locale != null) {
return locale;
}
}
if (session == null) {
session = VaadinSession.getCurrent();
}
if (session != null) {
Locale locale = session.getLocale();
if (locale != null) {
return locale;
}
}
if (request == null) {
request = VaadinService.getCurrentRequest();
}
if (request != null) {
Locale locale = request.getLocale();
if (locale != null) {
return locale;
}
}
return Locale.getDefault();
} | 3.68 |
hadoop_BlockMovementAttemptFinished_getTargetDatanode | /**
* @return the target datanode where it moved the block.
*/
public DatanodeInfo getTargetDatanode() {
return target;
} | 3.68 |
hadoop_StripedBlockReader_closeBlockReader | // close block reader
void closeBlockReader() {
IOUtils.closeStream(blockReader);
blockReader = null;
} | 3.68 |
hadoop_Lz4Codec_getConf | /**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
} | 3.68 |
hadoop_AuditingIntegration_enterStage | /**
* Callback on stage entry.
* Sets the activeStage and updates the
* common context.
* @param stage new stage
*/
public static void enterStage(String stage) {
currentAuditContext().put(CONTEXT_ATTR_STAGE, stage);
} | 3.68 |
framework_VComboBox_setSelectedItemIcon | /**
* Sets the icon URI of the selected item. The icon is shown on the left
* side of the item caption text. Set the URI to null to remove the icon.
*
* @param iconUri
* The URI of the icon, or null to remove icon
*/
public void setSelectedItemIcon(String iconUri) {
if (selectedItemIcon != null) {
panel.remove(selectedItemIcon);
}
if (iconUri == null || iconUri.isEmpty()) {
if (selectedItemIcon != null) {
selectedItemIcon = null;
afterSelectedItemIconChange();
}
} else {
selectedItemIcon = new IconWidget(
connector.getConnection().getIcon(iconUri));
selectedItemIcon.addDomHandler(VComboBox.this,
ClickEvent.getType());
selectedItemIcon.addDomHandler(VComboBox.this,
MouseDownEvent.getType());
selectedItemIcon.addDomHandler(
event -> afterSelectedItemIconChange(),
LoadEvent.getType());
panel.insert(selectedItemIcon, 0);
afterSelectedItemIconChange();
}
} | 3.68 |
hadoop_LoadManifestsStage_processAllManifests | /**
* Load and process all the manifests.
* @param manifestFiles list of manifest files.
* @throws IOException failure to load/parse/queue
*/
private void processAllManifests(
final RemoteIterator<FileStatus> manifestFiles) throws IOException {
trackDurationOfInvocation(getIOStatistics(), OP_LOAD_ALL_MANIFESTS, () ->
TaskPool.foreach(manifestFiles)
.executeWith(getIOProcessors())
.stopOnFailure()
.run(this::processOneManifest));
} | 3.68 |
flink_ChainedStateHandle_getLength | /**
* Returns the length of the operator chain. This can be different from the number of operator
* state handles, because the some operators in the chain can have no state and thus their state
* handle can be null.
*
* @return length of the operator chain
*/
public int getLength() {
return operatorStateHandles.size();
} | 3.68 |
hadoop_StagingCommitter_getJobAttemptFileSystem | /**
* Get the filesystem for the job attempt.
* @param context the context of the job. This is used to get the
* application attempt ID.
* @return the FS to store job attempt data.
* @throws IOException failure to create the FS.
*/
public FileSystem getJobAttemptFileSystem(JobContext context)
throws IOException {
Path p = getJobAttemptPath(context);
return p.getFileSystem(context.getConfiguration());
} | 3.68 |
hadoop_BlockManager_getBlockData | /**
* Gets block data information.
*
* @return instance of {@code BlockData}.
*/
public BlockData getBlockData() {
return blockData;
} | 3.68 |
hadoop_AbfsOutputStream_close | /**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete. Close the access to the stream and
* shutdown the upload thread pool.
* If the blob was created, its lease will be released.
* Any error encountered caught in threads and stored will be rethrown here
* after cleanup.
*/
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
try {
// Check if Executor Service got shutdown before the writes could be
// completed.
if (hasActiveBlockDataToUpload() && executorService.isShutdown()) {
throw new PathIOException(path, "Executor Service closed before "
+ "writes could be completed.");
}
flushInternal(true);
} catch (IOException e) {
// Problems surface in try-with-resources clauses if
// the exception thrown in a close == the one already thrown
// -so we wrap any exception with a new one.
// See HADOOP-16785
throw wrapException(path, e.getMessage(), e);
} finally {
if (hasLease()) {
lease.free();
lease = null;
}
lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
buffer = null;
bufferIndex = 0;
closed = true;
writeOperations.clear();
if (hasActiveBlock()) {
clearActiveBlock();
}
}
LOG.debug("Closing AbfsOutputStream : {}", this);
} | 3.68 |
AreaShop_SignsFeature_getAllSigns | /**
* Get the map with all signs.
* @return Map with all signs: locationString -> RegionSign
*/
public static Map<String, RegionSign> getAllSigns() {
return allSigns;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.