name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_TimeRangeTracker_includesTimeRange | /**
* Check if the range has ANY overlap with TimeRange
* @param tr TimeRange, it expects [minStamp, maxStamp)
* @return True if there is overlap, false otherwise
*/
public boolean includesTimeRange(final TimeRange tr) {
return (getMin() < tr.getMax() && getMax() >= tr.getMin());
} | 3.68 |
morf_Function_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser.dispatch(getArguments());
} | 3.68 |
flink_RpcSystem_load | /**
* Loads the RpcSystem.
*
* @param config Flink configuration
* @return loaded RpcSystem
*/
static RpcSystem load(Configuration config) {
final PriorityQueue<RpcSystemLoader> rpcSystemLoaders =
new PriorityQueue<>(Comparator.comparingInt(RpcSystemLoader::getLoadPriority));
ServiceLoader.load(RpcSystemLoader.class).forEach(rpcSystemLoaders::add);
final Iterator<RpcSystemLoader> iterator = rpcSystemLoaders.iterator();
Exception loadError = null;
while (iterator.hasNext()) {
final RpcSystemLoader next = iterator.next();
try {
return next.loadRpcSystem(config);
} catch (Exception e) {
loadError = ExceptionUtils.firstOrSuppressed(e, loadError);
}
}
throw new RpcLoaderException("Could not load RpcSystem.", loadError);
} | 3.68 |
hadoop_AMRMProxyService_createRequestInterceptorChain | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/
protected RequestInterceptor createRequestInterceptorChain() {
Configuration conf = getConfig();
List<String> interceptorClassNames = getInterceptorClassNames(conf);
RequestInterceptor pipeline = null;
RequestInterceptor current = null;
for (String interceptorClassName : interceptorClassNames) {
try {
Class<?> interceptorClass =
conf.getClassByName(interceptorClassName);
if (RequestInterceptor.class.isAssignableFrom(interceptorClass)) {
RequestInterceptor interceptorInstance =
(RequestInterceptor) ReflectionUtils.newInstance(
interceptorClass, conf);
if (pipeline == null) {
pipeline = interceptorInstance;
current = interceptorInstance;
continue;
} else {
current.setNextInterceptor(interceptorInstance);
current = interceptorInstance;
}
} else {
throw new YarnRuntimeException("Class: " + interceptorClassName
+ " not instance of "
+ RequestInterceptor.class.getCanonicalName());
}
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException(
"Could not instantiate ApplicationMasterRequestInterceptor: "
+ interceptorClassName, e);
}
}
if (pipeline == null) {
throw new YarnRuntimeException(
"RequestInterceptor pipeline is not configured in the system");
}
return pipeline;
} | 3.68 |
flink_Plan_getExecutionConfig | /**
* Gets the execution config object.
*
* @return The execution config object.
*/
public ExecutionConfig getExecutionConfig() {
if (executionConfig == null) {
throw new RuntimeException("Execution config has not been set properly for this plan");
}
return executionConfig;
} | 3.68 |
framework_ContainerHierarchicalWrapper_removeItem | /**
* Removes an Item specified by the itemId from the underlying container and
* from the hierarchy.
*
* @param itemId
* the ID of the Item to be removed.
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
* @throws UnsupportedOperationException
* if the removeItem is not supported.
*/
@Override
public boolean removeItem(Object itemId)
throws UnsupportedOperationException {
final boolean success = container.removeItem(itemId);
if (!hierarchical && success) {
removeFromHierarchyWrapper(itemId);
}
return success;
} | 3.68 |
hbase_HStoreFile_getReader | /**
* @return Current reader. Must call initReader first else returns null.
* @see #initReader()
*/
public StoreFileReader getReader() {
return this.initialReader;
} | 3.68 |
flink_BatchTask_closeChainedTasks | /**
* Closes all chained tasks, in the order as they are stored in the array. The closing process
* creates a standardized log info message.
*
* @param tasks The tasks to be closed.
* @param parent The parent task, used to obtain parameters to include in the log message.
* @throws Exception Thrown, if the closing encounters an exception.
*/
public static void closeChainedTasks(List<ChainedDriver<?, ?>> tasks, AbstractInvokable parent)
throws Exception {
for (ChainedDriver<?, ?> task : tasks) {
task.closeTask();
if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Finished task code", task.getTaskName(), parent));
}
}
} | 3.68 |
framework_DataCommunicatorConnector_updateRowData | /**
* Updates row data based on row key.
*
* @param rowData
* new row object
*/
protected void updateRowData(JsonObject rowData) {
int index = indexOfKey(getRowKey(rowData));
if (index >= 0) {
JsonObject oldRowData = getRow(index);
onRowDataUpdate(rowData, oldRowData);
setRowData(index, Collections.singletonList(rowData));
}
} | 3.68 |
flink_FlinkRelMdCollation_mergeJoin | /**
* Helper method to determine a {@link Join}'s collation assuming that it uses a merge-join
* algorithm.
*
* <p>If the inputs are sorted on other keys <em>in addition to</em> the join key, the result
* preserves those collations too.
*/
public static List<RelCollation> mergeJoin(
RelMetadataQuery mq,
RelNode left,
RelNode right,
ImmutableIntList leftKeys,
ImmutableIntList rightKeys) {
final com.google.common.collect.ImmutableList.Builder<RelCollation> builder =
com.google.common.collect.ImmutableList.builder();
final com.google.common.collect.ImmutableList<RelCollation> leftCollations =
mq.collations(left);
assert RelCollations.contains(leftCollations, leftKeys)
: "cannot merge join: left input is not sorted on left keys";
builder.addAll(leftCollations);
final com.google.common.collect.ImmutableList<RelCollation> rightCollations =
mq.collations(right);
assert RelCollations.contains(rightCollations, rightKeys)
: "cannot merge join: right input is not sorted on right keys";
final int leftFieldCount = left.getRowType().getFieldCount();
for (RelCollation collation : rightCollations) {
builder.add(RelCollations.shift(collation, leftFieldCount));
}
return builder.build();
} | 3.68 |
hbase_RawByte_decodeByte | /**
* Read a {@code byte} value from the buffer {@code buff}.
*/
public byte decodeByte(byte[] buff, int offset) {
return buff[offset];
} | 3.68 |
hadoop_ArrayFile_next | /**
* Read and return the next value in the file.
*
* @param value value.
* @throws IOException raised on errors performing I/O.
* @return Writable.
*/
public synchronized Writable next(Writable value) throws IOException {
return next(key, value) ? value : null;
} | 3.68 |
flink_ResolvedSchema_getWatermarkSpecs | /**
* Returns a list of watermark specifications each consisting of a rowtime attribute and
* watermark strategy expression.
*
* <p>Note: Currently, there is at most one {@link WatermarkSpec} in the list, because we don't
* support multiple watermark definitions yet.
*/
public List<WatermarkSpec> getWatermarkSpecs() {
return watermarkSpecs;
} | 3.68 |
flink_CatalogBaseTable_getSchema | /**
* @deprecated This method returns the deprecated {@link TableSchema} class. The old class was a
* hybrid of resolved and unresolved schema information. It has been replaced by the new
* {@link Schema} which is always unresolved and will be resolved by the framework later.
*/
@Deprecated
default TableSchema getSchema() {
return null;
} | 3.68 |
flink_JoinOperator_with | /**
* Finalizes a Join transformation by applying a {@link
* org.apache.flink.api.common.functions.RichFlatJoinFunction} to each pair of joined
* elements.
*
* <p>Each JoinFunction call returns exactly one element.
*
* @param function The JoinFunction that is called for each pair of joined elements.
* @return An EquiJoin that represents the joined result DataSet
* @see org.apache.flink.api.common.functions.RichFlatJoinFunction
* @see org.apache.flink.api.java.operators.JoinOperator.EquiJoin
* @see DataSet
*/
public <R> EquiJoin<I1, I2, R> with(FlatJoinFunction<I1, I2, R> function) {
if (function == null) {
throw new NullPointerException("Join function must not be null.");
}
TypeInformation<R> returnType =
TypeExtractor.getFlatJoinReturnTypes(
function,
getInput1Type(),
getInput2Type(),
Utils.getCallLocationName(),
true);
return new EquiJoin<>(
getInput1(),
getInput2(),
getKeys1(),
getKeys2(),
clean(function),
returnType,
getJoinHint(),
Utils.getCallLocationName(),
joinType);
} | 3.68 |
framework_VRadioButtonGroup_addSelectionChangeHandler | /**
* Adds the given selection change handler to this widget.
*
* @param selectionChanged
* the handler that should be triggered when selection changes
* @return the registration object for removing the given handler when no
* longer needed
*/
public Registration addSelectionChangeHandler(
Consumer<JsonObject> selectionChanged) {
selectionChangeListeners.add(selectionChanged);
return (Registration) () -> selectionChangeListeners
.remove(selectionChanged);
} | 3.68 |
hbase_Compressor_main | /**
* Command line tool to compress and uncompress WALs.
*/
public static void main(String[] args) throws IOException {
if (args.length != 2 || args[0].equals("--help") || args[0].equals("-h")) {
printHelp();
System.exit(-1);
}
Path inputPath = new Path(args[0]);
Path outputPath = new Path(args[1]);
transformFile(inputPath, outputPath);
} | 3.68 |
framework_Escalator_getSpacersAfterPx | /**
* Get all spacers from one pixel point onwards.
* <p>
*
* In this method, the {@link SpacerInclusionStrategy} has the following
* meaning when a spacer lies in the middle of either pixel argument:
* <dl>
* <dt>{@link SpacerInclusionStrategy#COMPLETE COMPLETE}
* <dd>include the spacer
* <dt>{@link SpacerInclusionStrategy#PARTIAL PARTIAL}
* <dd>include the spacer
* <dt>{@link SpacerInclusionStrategy#NONE NONE}
* <dd>ignore the spacer
* </dl>
*
* @param px
* the pixel point after which to return all spacers
* @param strategy
* the inclusion strategy regarding the {@code px}
* @return a collection of the spacers that exist after {@code px}
*/
public Collection<SpacerImpl> getSpacersAfterPx(final double px,
final SpacerInclusionStrategy strategy) {
List<SpacerImpl> spacers = new ArrayList<SpacerImpl>(
rowIndexToSpacer.values());
for (int i = 0; i < spacers.size(); i++) {
SpacerImpl spacer = spacers.get(i);
double top = spacer.getTop();
double bottom = top + spacer.getHeight();
if (top > px) {
return spacers.subList(i, spacers.size());
} else if (bottom > px) {
if (strategy == SpacerInclusionStrategy.NONE) {
return spacers.subList(i + 1, spacers.size());
} else {
return spacers.subList(i, spacers.size());
}
}
}
return Collections.emptySet();
} | 3.68 |
framework_VScrollTable_sendColumnWidthUpdates | /**
* Non-immediate variable update of column widths for a collection of
* columns.
*
* @param columns
* the columns to trigger the events for.
*/
private void sendColumnWidthUpdates(Collection<HeaderCell> columns) {
String[] newSizes = new String[columns.size()];
int ix = 0;
for (HeaderCell cell : columns) {
newSizes[ix++] = cell.getColKey() + ":" + cell.getWidth();
}
client.updateVariable(paintableId, "columnWidthUpdates", newSizes,
false);
} | 3.68 |
hadoop_BlockStorageMovementNeeded_addAll | /**
* Add the itemInfo list to tracking list for which storage movement expected
* if necessary.
*
* @param startPath
* - start path
* @param itemInfoList
* - List of child in the directory
* @param scanCompleted
* -Indicates whether the start id directory has no more elements to
* scan.
*/
@VisibleForTesting
public synchronized void addAll(long startPath, List<ItemInfo> itemInfoList,
boolean scanCompleted) {
storageMovementNeeded.addAll(itemInfoList);
updatePendingDirScanStats(startPath, itemInfoList.size(), scanCompleted);
} | 3.68 |
hbase_TableSplit_write | /**
* Writes the field values to the output.
* @param out The output to write to.
* @throws IOException When writing the values to the output fails.
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, VERSION.code);
Bytes.writeByteArray(out, tableName.getName());
Bytes.writeByteArray(out, startRow);
Bytes.writeByteArray(out, endRow);
Bytes.writeByteArray(out, Bytes.toBytes(regionLocation));
Bytes.writeByteArray(out, Bytes.toBytes(scan));
WritableUtils.writeVLong(out, length);
Bytes.writeByteArray(out, Bytes.toBytes(encodedRegionName));
} | 3.68 |
flink_ScriptProcessBuilder_blackListed | /**
* Checks whether a given configuration name is blacklisted and should not be converted to an
* environment variable.
*/
private boolean blackListed(Configuration conf, String name) {
if (blackListedConfEntries == null) {
blackListedConfEntries = new HashSet<>();
if (conf != null) {
String bl =
conf.get(
HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(),
HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue());
if (bl != null && !bl.isEmpty()) {
String[] bls = bl.split(",");
Collections.addAll(blackListedConfEntries, bls);
}
}
}
return blackListedConfEntries.contains(name);
} | 3.68 |
hmily_MongoEntityConvert_create | /**
* 转换mongo对象.
* @param lock hmily entity.
* @return mongo entity.
*/
public LockMongoEntity create(final HmilyLock lock) {
LockMongoEntity entity = new LockMongoEntity();
entity.setLockId(lock.getLockId());
entity.setTargetTableName(lock.getTargetTableName());
entity.setTargetTablePk(lock.getTargetTablePk());
entity.setResourceId(lock.getResourceId());
entity.setParticipantId(lock.getParticipantId());
entity.setTransId(lock.getTransId());
return entity;
} | 3.68 |
framework_InMemoryDataProvider_addFilter | /**
* Adds a filter for an item property. The filter will be used in addition
* to any filter that has been set or added previously.
*
* @see #addFilter(SerializablePredicate)
* @see #addFilterByValue(ValueProvider, Object)
* @see #setFilter(ValueProvider, SerializablePredicate)
*
* @param valueProvider
* value provider that gets the property value, not
* <code>null</code>
* @param valueFilter
* filter for testing the property value, not <code>null</code>
*/
public default <V> void addFilter(ValueProvider<T, V> valueProvider,
SerializablePredicate<V> valueFilter) {
Objects.requireNonNull(valueProvider, "Value provider cannot be null");
Objects.requireNonNull(valueFilter, "Value filter cannot be null");
addFilter(InMemoryDataProviderHelpers
.createValueProviderFilter(valueProvider, valueFilter));
} | 3.68 |
dubbo_Bytes_long2bytes | /**
* to byte array.
*
* @param v value.
* @param b byte array.
* @param off array offset.
*/
public static void long2bytes(long v, byte[] b, int off) {
b[off + 7] = (byte) v;
b[off + 6] = (byte) (v >>> 8);
b[off + 5] = (byte) (v >>> 16);
b[off + 4] = (byte) (v >>> 24);
b[off + 3] = (byte) (v >>> 32);
b[off + 2] = (byte) (v >>> 40);
b[off + 1] = (byte) (v >>> 48);
b[off + 0] = (byte) (v >>> 56);
} | 3.68 |
hmily_HmilyParticipantCacheManager_get | /**
* acquire hmilyTransaction.
*
* @param participantId this guava key.
* @return {@linkplain HmilyTransaction}
*/
public List<HmilyParticipant> get(final Long participantId) {
try {
return LOADING_CACHE.get(participantId);
} catch (ExecutionException e) {
return Collections.emptyList();
}
} | 3.68 |
hbase_HMobStore_getTempDir | /**
* Gets the temp directory.
* @return The temp directory.
*/
private Path getTempDir() {
return new Path(homePath, MobConstants.TEMP_DIR_NAME);
} | 3.68 |
flink_BaseHybridHashTable_releaseMemoryCacheForSMJ | /**
* Due to adaptive hash join is introduced, the cached memory segments should be released to
* {@link MemoryManager} before switch to sort merge join. Otherwise, open sort merge join
* operator maybe fail because of insufficient memory.
*
* <p>Note: this method should only be invoked for sort merge join.
*/
public void releaseMemoryCacheForSMJ() {
// return build spill buffer memory first
returnSpillBuffers();
freeCurrent();
} | 3.68 |
flink_BinarySegmentUtils_getFloat | /**
* get float from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static float getFloat(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getFloat(offset);
} else {
return getFloatMultiSegments(segments, offset);
}
} | 3.68 |
flink_Pattern_notNext | /**
* Appends a new pattern to the existing one. The new pattern enforces that there is no event
* matching this pattern right after the preceding matched event.
*
* @param name Name of the new pattern
* @return A new pattern which is appended to this one
*/
public Pattern<T, T> notNext(final String name) {
if (quantifier.hasProperty(Quantifier.QuantifierProperty.OPTIONAL)) {
throw new UnsupportedOperationException(
"Specifying a pattern with an optional path to NOT condition is not supported yet. "
+ "You can simulate such pattern with two independent patterns, one with and the other without "
+ "the optional part.");
}
return new Pattern<>(name, this, ConsumingStrategy.NOT_NEXT, afterMatchSkipStrategy);
} | 3.68 |
hbase_RSGroupInfo_addServer | /** Adds the given server to the group. */
public void addServer(Address hostPort) {
servers.add(hostPort);
} | 3.68 |
dubbo_MeshRuleRouter_getRemoteAppName | /**
* for ut only
*/
@Deprecated
public Set<String> getRemoteAppName() {
return remoteAppName;
} | 3.68 |
pulsar_ConsumerInterceptors_onNegativeAcksSend | /**
* This is called when a redelivery from a negative acknowledge occurs.
* <p>
* This method calls {@link ConsumerInterceptor#onNegativeAcksSend(Consumer, Set)
* onNegativeAcksSend(Consumer, Set<MessageId>)} method for each interceptor.
* <p>
* This method does not throw exceptions. Exceptions thrown by any of interceptors in the chain are logged, but not
* propagated.
*
* @param consumer the consumer which contains the interceptors.
* @param messageIds set of message IDs being redelivery due a negative acknowledge.
*/
public void onNegativeAcksSend(Consumer<T> consumer, Set<MessageId> messageIds) {
for (int i = 0, interceptorsSize = interceptors.size(); i < interceptorsSize; i++) {
try {
interceptors.get(i).onNegativeAcksSend(consumer, messageIds);
} catch (Throwable e) {
log.warn("Error executing interceptor onNegativeAcksSend callback", e);
}
}
} | 3.68 |
flink_AbstractPythonFunctionOperator_checkInvokeFinishBundleByTime | /** Checks whether to invoke finishBundle by timeout. */
private void checkInvokeFinishBundleByTime() throws Exception {
long now = getProcessingTimeService().getCurrentProcessingTime();
if (now - lastFinishBundleTime >= maxBundleTimeMills) {
invokeFinishBundle();
}
} | 3.68 |
hbase_Procedure_setProcId | /**
* Called by the ProcedureExecutor to assign the ID to the newly created procedure.
*/
protected void setProcId(long procId) {
this.procId = procId;
this.submittedTime = EnvironmentEdgeManager.currentTime();
setState(ProcedureState.RUNNABLE);
} | 3.68 |
hbase_HRegionServer_getFlushRequester | /** Returns reference to FlushRequester */
@Override
public FlushRequester getFlushRequester() {
return this.cacheFlusher;
} | 3.68 |
framework_VScrollTable_focusRowFromBody | /** For internal use only. May be removed or replaced in the future. */
public void focusRowFromBody() {
if (selectedRowKeys.size() == 1) {
// try to focus a row currently selected and in viewport
String selectedRowKey = selectedRowKeys.iterator().next();
if (selectedRowKey != null) {
VScrollTableRow renderedRow = getRenderedRowByKey(
selectedRowKey);
if (renderedRow == null || !renderedRow.isInViewPort()) {
setRowFocus(
scrollBody.getRowByRowIndex(firstRowInViewPort));
} else {
setRowFocus(renderedRow);
}
}
} else {
// multiselect mode
setRowFocus(scrollBody.getRowByRowIndex(firstRowInViewPort));
}
} | 3.68 |
dubbo_AbstractJSONImpl_getNumberAsLong | /**
* Gets a number from an object for the given key, casted to an long. If the key is not
* present, this returns null. If the value does not represent a long integer, throws an
* exception.
*/
@Override
public Long getNumberAsLong(Map<String, ?> obj, String key) {
assert obj != null;
assert key != null;
if (!obj.containsKey(key)) {
return null;
}
Object value = obj.get(key);
if (value instanceof Double) {
Double d = (Double) value;
long l = d.longValue();
if (l != d) {
throw new ClassCastException("Number expected to be long: " + d);
}
return l;
}
if (value instanceof String) {
try {
return Long.parseLong((String) value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
String.format("value '%s' for key '%s' is not a long integer", value, key));
}
}
throw new IllegalArgumentException(String.format("value '%s' for key '%s' is not a long integer", value, key));
} | 3.68 |
hudi_ByteBufferBackedInputStream_seek | /**
* Seeks to a position w/in the stream
*
* NOTE: Position is relative to the start of the stream (ie its absolute w/in this stream),
* with following invariant being assumed:
* <p>0 <= pos <= length (of the stream)</p>
*
* This method is NOT thread-safe
*
* @param pos target position to seek to w/in the holding buffer
*/
public void seek(long pos) {
buffer.reset(); // to mark
int offset = buffer.position();
// NOTE: That the new pos is still relative to buffer's offset
int newPos = offset + (int) pos;
if (newPos > buffer.limit() || newPos < offset) {
throw new IllegalArgumentException(
String.format("Can't seek past the backing buffer (limit %d, offset %d, new %d)", buffer.limit(), offset, newPos)
);
}
buffer.position(newPos);
} | 3.68 |
querydsl_AbstractHibernateQuery_scroll | /**
* Return the query results as {@code ScrollableResults}. The
* scrollability of the returned results depends upon JDBC driver
* support for scrollable {@code ResultSet}s.<br>
*
* @param mode scroll mode
* @return scrollable results
*/
public ScrollableResults scroll(ScrollMode mode) {
try {
return createQuery().scroll(mode);
} finally {
reset();
}
} | 3.68 |
hadoop_HttpFSServerWebServer_deprecateEnv | /**
* Load the deprecated environment variable into the configuration.
*
* @param varName the environment variable name
* @param conf the configuration
* @param propName the configuration property name
* @param confFile the configuration file name
*/
private static void deprecateEnv(String varName, Configuration conf,
String propName, String confFile) {
String value = System.getenv(varName);
if (value == null) {
return;
}
LOG.warn("Environment variable {} is deprecated and overriding"
+ " property {}', please set the property in {} instead.",
varName, propName, confFile);
conf.set(propName, value, "environment variable " + varName);
} | 3.68 |
flink_DistinctOperator_setCombineHint | /**
* Sets the strategy to use for the combine phase of the reduce.
*
* <p>If this method is not called, then the default hint will be used. ({@link
* org.apache.flink.api.common.operators.base.ReduceOperatorBase.CombineHint#OPTIMIZER_CHOOSES})
*
* @param strategy The hint to use.
* @return The DistinctOperator object, for function call chaining.
*/
@PublicEvolving
public DistinctOperator<T> setCombineHint(CombineHint strategy) {
this.hint = strategy;
return this;
} | 3.68 |
flink_HistoryServerStaticFileServerHandler_respondWithFile | /** Response when running with leading JobManager. */
private void respondWithFile(ChannelHandlerContext ctx, HttpRequest request, String requestPath)
throws IOException, ParseException, RestHandlerException {
// make sure we request the "index.html" in case there is a directory request
if (requestPath.endsWith("/")) {
requestPath = requestPath + "index.html";
}
if (!requestPath.contains(".")) { // we assume that the path ends in either .html or .js
requestPath = requestPath + ".json";
}
// convert to absolute path
final File file = new File(rootPath, requestPath);
if (!file.exists()) {
// file does not exist. Try to load it with the classloader
ClassLoader cl = HistoryServerStaticFileServerHandler.class.getClassLoader();
try (InputStream resourceStream = cl.getResourceAsStream("web" + requestPath)) {
boolean success = false;
try {
if (resourceStream != null) {
URL root = cl.getResource("web");
URL requested = cl.getResource("web" + requestPath);
if (root != null && requested != null) {
URI rootURI = new URI(root.getPath()).normalize();
URI requestedURI = new URI(requested.getPath()).normalize();
// Check that we don't load anything from outside of the
// expected scope.
if (!rootURI.relativize(requestedURI).equals(requestedURI)) {
LOG.debug("Loading missing file from classloader: {}", requestPath);
// ensure that directory to file exists.
file.getParentFile().mkdirs();
Files.copy(resourceStream, file.toPath());
success = true;
}
}
}
} catch (Throwable t) {
LOG.error("error while responding", t);
} finally {
if (!success) {
LOG.debug("Unable to load requested file {} from classloader", requestPath);
throw new NotFoundException("File not found.");
}
}
}
}
StaticFileServerHandler.checkFileValidity(file, rootPath, LOG);
// cache validation
final String ifModifiedSince = request.headers().get(IF_MODIFIED_SINCE);
if (ifModifiedSince != null && !ifModifiedSince.isEmpty()) {
SimpleDateFormat dateFormatter =
new SimpleDateFormat(StaticFileServerHandler.HTTP_DATE_FORMAT, Locale.US);
Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince);
// Only compare up to the second because the datetime format we send to the client
// does not have milliseconds
long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000;
long fileLastModifiedSeconds = file.lastModified() / 1000;
if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Responding 'NOT MODIFIED' for file '" + file.getAbsolutePath() + '\'');
}
StaticFileServerHandler.sendNotModified(ctx);
return;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Responding with file '" + file.getAbsolutePath() + '\'');
}
// Don't need to close this manually. Netty's DefaultFileRegion will take care of it.
final RandomAccessFile raf;
try {
raf = new RandomAccessFile(file, "r");
} catch (FileNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not find file {}.", file.getAbsolutePath());
}
HandlerUtils.sendErrorResponse(
ctx,
request,
new ErrorResponseBody("File not found."),
NOT_FOUND,
Collections.emptyMap());
return;
}
try {
long fileLength = raf.length();
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
StaticFileServerHandler.setContentTypeHeader(response, file);
// the job overview should be updated as soon as possible
if (!requestPath.equals("/joboverview.json")) {
StaticFileServerHandler.setDateAndCacheHeaders(response, file);
}
if (HttpUtil.isKeepAlive(request)) {
response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
HttpUtil.setContentLength(response, fileLength);
// write the initial line and the header.
ctx.write(response);
// write the content.
ChannelFuture lastContentFuture;
if (ctx.pipeline().get(SslHandler.class) == null) {
ctx.write(
new DefaultFileRegion(raf.getChannel(), 0, fileLength),
ctx.newProgressivePromise());
lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
} else {
lastContentFuture =
ctx.writeAndFlush(
new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)),
ctx.newProgressivePromise());
// HttpChunkedInput will write the end marker (LastHttpContent) for us.
}
// close the connection, if no keep-alive is needed
if (!HttpUtil.isKeepAlive(request)) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
} catch (Exception e) {
raf.close();
LOG.error("Failed to serve file.", e);
throw new RestHandlerException("Internal server error.", INTERNAL_SERVER_ERROR);
}
} | 3.68 |
framework_SelectorPredicate_getName | /**
* @return the name
*/
public String getName() {
return name;
} | 3.68 |
hbase_HRegionFileSystem_getStoreFileInfo | /**
* Return the store file information of the specified family/file.
* @param familyName Column Family Name
* @param fileName File Name
* @return The {@link StoreFileInfo} for the specified family/file
*/
StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
throws IOException {
Path familyDir = getStoreDir(familyName);
return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs,
familyName, new Path(familyDir, fileName));
} | 3.68 |
framework_WrappedPortletSession_removeAttribute | /**
* Removes the object bound with the specified name and the given scope from
* this session. If the session does not have an object bound with the
* specified name, this method does nothing.
*
* @param name
* the name of the object to be removed from this session
* @param scope
* session scope of this attribute
*
* @exception java.lang.IllegalStateException
* if this method is called on a session which has been
* invalidated
* @exception java.lang.IllegalArgumentException
* if name is <code>null</code>.
* @see PortletSession#removeAttribute(String, int)
* @see PortletSession#PORTLET_SCOPE
* @see PortletSession#APPLICATION_SCOPE
*
* @since 7.6
*/
public void removeAttribute(String name, int scope) {
session.removeAttribute(name, scope);
} | 3.68 |
hadoop_OSSListResult_logAtDebug | /**
* Dump the result at debug level.
* @param log log to use
*/
public void logAtDebug(Logger log) {
Collection<String> prefixes = getCommonPrefixes();
Collection<OSSObjectSummary> summaries = getObjectSummaries();
log.debug("Prefix count = {}; object count={}",
prefixes.size(), summaries.size());
for (OSSObjectSummary summary : summaries) {
log.debug("Summary: {} {}", summary.getKey(), summary.getSize());
}
for (String prefix : prefixes) {
log.debug("Prefix: {}", prefix);
}
} | 3.68 |
hbase_ResponseConverter_buildEnableCatalogJanitorResponse | /**
* Creates a response for the catalog scan request
* @return A EnableCatalogJanitorResponse
*/
public static EnableCatalogJanitorResponse buildEnableCatalogJanitorResponse(boolean prevValue) {
return EnableCatalogJanitorResponse.newBuilder().setPrevValue(prevValue).build();
} | 3.68 |
flink_ResolvedSchema_getPrimaryKey | /** Returns the primary key if it has been defined. */
public Optional<UniqueConstraint> getPrimaryKey() {
return Optional.ofNullable(primaryKey);
} | 3.68 |
hbase_Table_batchCallback | /**
* Same as {@link #batch(List, Object[])}, but with a callback.
* @since 0.96.0
* @deprecated since 3.0.0, will removed in 4.0.0. Please use the batch related methods in
* {@link AsyncTable} directly if you want to use callback. We reuse the callback for
* coprocessor here, and the problem is that for batch operation, the
* {@link AsyncTable} does not tell us the region, so in this method we need an extra
* locating after we get the result, which is not good.
*/
@Deprecated
default <R> void batchCallback(final List<? extends Row> actions, final Object[] results,
final Batch.Callback<R> callback) throws IOException, InterruptedException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
pulsar_AbstractDispatcherMultipleConsumers_getConsumerFromHigherPriority | /**
* Finds index of first available consumer which has higher priority then given targetPriority.
*
* @param targetPriority
* @return -1 if couldn't find any available consumer
*/
private int getConsumerFromHigherPriority(int targetPriority) {
for (int i = 0; i < currentConsumerRoundRobinIndex; i++) {
Consumer consumer = consumerList.get(i);
if (consumer.getPriorityLevel() < targetPriority) {
if (isConsumerAvailable(consumerList.get(i))) {
return i;
}
} else {
break;
}
}
return -1;
} | 3.68 |
flink_HybridShuffleConfiguration_getFullStrategyReleaseBufferRatio | /** The proportion of buffers to be released. Used by {@link HsFullSpillingStrategy}. */
public float getFullStrategyReleaseBufferRatio() {
return fullStrategyReleaseBufferRatio;
} | 3.68 |
flink_StreamProjection_projectTuple4 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3> SingleOutputStreamOperator<Tuple4<T0, T1, T2, T3>> projectTuple4() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType =
new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple4<T0, T1, T2, T3>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hadoop_NMStateStoreService_releaseAssignedResources | /**
* Delete the assigned resources of a container of specific resourceType.
* @param containerId Container Id
* @param resourceType resource Type
* @throws IOException while releasing resources
*/
public void releaseAssignedResources(ContainerId containerId, String resourceType)
throws IOException {} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_build | /**
* Constructs a new {@link VertexThreadInfoTracker}.
*
* @return a new {@link VertexThreadInfoTracker} instance.
*/
public VertexThreadInfoTracker build() {
if (jobVertexStatsCache == null) {
jobVertexStatsCache = defaultCache();
}
if (executionVertexStatsCache == null) {
executionVertexStatsCache = defaultCache();
}
return new VertexThreadInfoTracker(
coordinator,
resourceManagerGatewayRetriever,
executor,
cleanUpInterval,
numSamples,
statsRefreshInterval,
delayBetweenSamples,
maxThreadInfoDepth,
restTimeout,
jobVertexStatsCache,
executionVertexStatsCache);
} | 3.68 |
pulsar_OverloadShedder_findBundlesForUnloading | /**
* Attempt to shed some bundles off every broker which is overloaded.
*
* @param loadData
* The load data to used to make the unloading decision.
* @param conf
* The service configuration.
* @return A map from bundles to unload to the brokers on which they are loaded.
*/
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
// Check every broker and select
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = localData.getMaxResourceUsage();
if (currentUsage < overloadThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker,
localData.printResourceUsage());
}
return;
}
// We want to offload enough traffic such that this broker will go below the overload threshold
// Also, add a small margin so that this broker won't be very close to the threshold edge.
double percentOfTrafficToOffload = currentUsage - overloadThreshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
log.info(
"Attempting to shed load on {}, which has resource usage {}% above threshold {}%"
+ " -- Offloading at least {} MByte/s of traffic ({})",
broker, 100 * currentUsage, 100 * overloadThreshold, minimumThroughputToOffload / 1024 / 1024,
localData.printResourceUsage());
MutableDouble trafficMarkedToOffload = new MutableDouble(0);
MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false);
if (localData.getBundles().size() > 1) {
// Sort bundles by throughput, then pick the biggest N which combined
// make up for at least the minimum throughput to offload
loadData.getBundleDataForLoadShedding().entrySet().stream()
.filter(e -> localData.getBundles().contains(e.getKey()))
.map((e) -> {
// Map to throughput value
// Consider short-term byte rate to address system resource burden
String bundle = e.getKey();
BundleData bundleData = e.getValue();
TimeAverageMessageData shortTermData = bundleData.getShortTermData();
double throughput = shortTermData.getMsgThroughputIn() + shortTermData
.getMsgThroughputOut();
return Pair.of(bundle, throughput);
}).filter(e -> {
// Only consider bundles that were not already unloaded recently
return !recentlyUnloadedBundles.containsKey(e.getLeft());
}).sorted((e1, e2) -> {
// Sort by throughput in reverse order
return Double.compare(e2.getRight(), e1.getRight());
}).forEach(e -> {
if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload
|| atLeastOneBundleSelected.isFalse()) {
selectedBundlesCache.put(broker, e.getLeft());
trafficMarkedToOffload.add(e.getRight());
atLeastOneBundleSelected.setTrue();
}
});
} else if (localData.getBundles().size() == 1) {
log.warn(
"HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. "
+ "No Load Shedding will be done on this broker",
localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
return selectedBundlesCache;
} | 3.68 |
morf_InsertStatement_getFields | /**
* Gets the list of fields to insert
*
* @return the fields being inserted into
*/
public List<AliasedField> getFields() {
return fields;
} | 3.68 |
framework_SelectorPredicate_readPredicatesFromString | /**
* Splits the predicate string to list of predicate strings.
*
* @param predicateStr
* Comma separated predicate strings
* @return List of predicate strings
*/
private static List<String> readPredicatesFromString(String predicateStr) {
List<String> predicates = new ArrayList<>();
int prevIdx = 0;
int idx = LocatorUtil.indexOfIgnoringQuoted(predicateStr, ',', prevIdx);
while (idx > -1) {
predicates.add(predicateStr.substring(prevIdx, idx));
prevIdx = idx + 1;
idx = LocatorUtil.indexOfIgnoringQuoted(predicateStr, ',', prevIdx);
}
predicates.add(predicateStr.substring(prevIdx));
return predicates;
} | 3.68 |
pulsar_KerberosName_toString | /**
* Put the name back together from the parts.
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(serviceName);
if (hostName != null) {
result.append('/');
result.append(hostName);
}
if (realm != null) {
result.append('@');
result.append(realm);
}
return result.toString();
} | 3.68 |
querydsl_StringExpression_likeIgnoreCase | /**
* Create a {@code this like str} expression ignoring case
*
* @param str string
* @param escape escape character
* @return this like string
*/
public BooleanExpression likeIgnoreCase(Expression<String> str, char escape) {
return Expressions.booleanOperation(Ops.LIKE_ESCAPE_IC, mixin, str, ConstantImpl.create(escape));
} | 3.68 |
flink_BufferManager_notifyBufferAvailable | /**
* The buffer pool notifies this listener of an available floating buffer. If the listener is
* released or currently does not need extra buffers, the buffer should be returned to the
* buffer pool. Otherwise, the buffer will be added into the <tt>bufferQueue</tt>.
*
* @param buffer Buffer that becomes available in buffer pool.
* @return true if the buffer is accepted by this listener.
*/
@Override
public boolean notifyBufferAvailable(Buffer buffer) {
// Assuming two remote channels with respective buffer managers as listeners inside
// LocalBufferPool.
// While canceler thread calling ch1#releaseAllResources, it might trigger
// bm2#notifyBufferAvaialble.
// Concurrently if task thread is recycling exclusive buffer, it might trigger
// bm1#notifyBufferAvailable.
// Then these two threads will both occupy the respective bufferQueue lock and wait for
// other side's
// bufferQueue lock to cause deadlock. So we check the isReleased state out of synchronized
// to resolve it.
if (inputChannel.isReleased()) {
return false;
}
int numBuffers = 0;
boolean isBufferUsed = false;
try {
synchronized (bufferQueue) {
checkState(
isWaitingForFloatingBuffers,
"This channel should be waiting for floating buffers.");
isWaitingForFloatingBuffers = false;
// Important: make sure that we never add a buffer after releaseAllResources()
// released all buffers. Following scenarios exist:
// 1) releaseAllBuffers() already released buffers inside bufferQueue
// -> while isReleased is set correctly in InputChannel
// 2) releaseAllBuffers() did not yet release buffers from bufferQueue
// -> we may or may not have set isReleased yet but will always wait for the
// lock on bufferQueue to release buffers
if (inputChannel.isReleased()
|| bufferQueue.getAvailableBufferSize() >= numRequiredBuffers) {
return false;
}
bufferQueue.addFloatingBuffer(buffer);
isBufferUsed = true;
numBuffers += 1 + tryRequestBuffers();
bufferQueue.notifyAll();
}
inputChannel.notifyBufferAvailable(numBuffers);
} catch (Throwable t) {
inputChannel.setError(t);
}
return isBufferUsed;
} | 3.68 |
querydsl_ComparableExpression_goeAll | /**
* Create a {@code this >= all right} expression
*
* @param right rhs of the comparison
* @return this >= all right
*/
public BooleanExpression goeAll(SubQueryExpression<? extends T> right) {
return goe(ExpressionUtils.all(right));
} | 3.68 |
framework_AbstractClientConnector_updateDiffstate | /**
* Sets the expected value of a state property so that changes can be
* properly sent to the client. This needs to be done in cases where a state
* change originates from the client, since otherwise the server-side would
* fail to recognize if the value is changed back to its previous value.
*
* @param propertyName
* the name of the shared state property to update
* @param newValue
* the new diffstate reference value
*/
protected void updateDiffstate(String propertyName, JsonValue newValue) {
if (!isAttached()) {
return;
}
JsonObject diffState = getUI().getConnectorTracker().getDiffState(this);
if (diffState == null) {
return;
}
assert diffState.hasKey(propertyName) : "Diffstate for "
+ getClass().getName() + " has no property named "
+ propertyName;
diffState.put(propertyName, newValue);
} | 3.68 |
hbase_TableDescriptorBuilder_isMergeEnabled | /**
* Check if the region merge enable flag of the table is true. If flag is false then no merge
* will be done.
* @return true if table region merge enabled
*/
@Override
public boolean isMergeEnabled() {
return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED);
} | 3.68 |
hadoop_AMWebServices_getTaskAttemptFromTaskAttemptString | /**
* convert a task attempt id string to an actual task attempt and handle all
* the error checking.
*/
public static TaskAttempt getTaskAttemptFromTaskAttemptString(String attId, Task task)
throws NotFoundException {
TaskAttemptId attemptId;
TaskAttempt ta;
try {
attemptId = MRApps.toTaskAttemptID(attId);
} catch (YarnRuntimeException e) {
// TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
// anymore but keeping it for now just in case other stuff starts failing.
// Also, the webservice should ideally return BadRequest (HTTP:400) when
// the id is malformed instead of NotFound (HTTP:404). The webserver on
// top of which AMWebServices is built seems to automatically do that for
// unhandled exceptions
throw new NotFoundException(e.getMessage());
} catch (NumberFormatException ne) {
throw new NotFoundException(ne.getMessage());
} catch (IllegalArgumentException e) {
throw new NotFoundException(e.getMessage());
}
if (attemptId == null) {
throw new NotFoundException("task attempt id " + attId
+ " not found or invalid");
}
ta = task.getAttempt(attemptId);
if (ta == null) {
throw new NotFoundException("Error getting info on task attempt id "
+ attId);
}
return ta;
} | 3.68 |
flink_TaskExecutorMemoryConfiguration_getNetworkMemory | /** Returns the configured maximum network memory. */
public Long getNetworkMemory() {
return networkMemory;
} | 3.68 |
rocketmq-connect_TableDefinitions_refresh | /**
* Refresh the cached {@link TableDefinition} for the given table.
*
* @param connection the JDBC connection to use; may not be null
* @param tableId the table identifier; may not be null
* @return the refreshed {@link TableDefinition}, or null if there is no such table
* @throws SQLException if there is any problem using the connection
*/
public TableDefinition refresh(
Connection connection,
TableId tableId
) throws SQLException {
TableDefinition dbTable = dialect.describeTable(connection, tableId);
if (dbTable != null) {
log.info("Refreshing metadata for table {} to {}", tableId, dbTable);
cache.put(dbTable.id(), dbTable);
} else {
log.warn("Failed to refresh metadata for table {}", tableId);
}
return dbTable;
} | 3.68 |
hbase_HBaseCluster_waitForNamenodeAvailable | /**
* Wait for the namenode.
*/
public void waitForNamenodeAvailable() throws InterruptedException {
} | 3.68 |
morf_AbstractSqlDialectTest_testRound | /**
* Test that Round functionality behaves as expected.
*/
@Test
public void testRound() {
// Given
Function round = round(new FieldReference("field1"), new FieldLiteral(2));
SelectStatement stmt = new SelectStatement(round).from(new TableReference("schedule"));
// When
String result = testDialect.convertStatementToSQL(stmt);
// Then
assertEquals("Round script should match expected", expectedRound(), result);
} | 3.68 |
pulsar_NonPersistentSubscription_deleteForcefully | /**
* Forcefully close all consumers and deletes the subscription.
* @return
*/
@Override
public CompletableFuture<Void> deleteForcefully() {
return delete(true);
} | 3.68 |
hadoop_OBSFileSystem_toString | /**
* Return a string that describes this filesystem instance.
*
* @return the string
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("OBSFileSystem{");
sb.append("uri=").append(uri);
sb.append(", workingDir=").append(workingDir);
sb.append(", partSize=").append(partSize);
sb.append(", enableMultiObjectsDelete=")
.append(enableMultiObjectDelete);
sb.append(", maxKeys=").append(maxKeys);
if (cannedACL != null) {
sb.append(", cannedACL=").append(cannedACL.toString());
}
sb.append(", readAheadRange=").append(readAheadRange);
sb.append(", blockSize=").append(getDefaultBlockSize());
if (blockFactory != null) {
sb.append(", blockFactory=").append(blockFactory);
}
sb.append(", boundedMultipartUploadThreadPool=")
.append(boundedMultipartUploadThreadPool);
sb.append(", statistics {").append(statistics).append("}");
sb.append(", metrics {").append("}");
sb.append('}');
return sb.toString();
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setCacheIndexesOnWrite | /**
* Set the setCacheIndexesOnWrite flag
* @param value true if we should cache index blocks on write
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCacheIndexesOnWrite(boolean value) {
return setValue(CACHE_INDEX_ON_WRITE_BYTES, Boolean.toString(value));
} | 3.68 |
graphhopper_GraphHopper_process | /**
* Creates the graph from OSM data.
*/
protected void process(boolean closeEarly) {
GHDirectory directory = new GHDirectory(ghLocation, dataAccessDefaultType);
directory.configure(dataAccessConfig);
boolean withUrbanDensity = urbanDensityCalculationThreads > 0;
boolean withMaxSpeedEstimation = maxSpeedCalculator != null;
Map<String, String> vehiclesByName = getVehiclesByName(vehiclesString, profilesByName.values());
List<String> encodedValueStrings = getEncodedValueStrings(encodedValuesString);
encodingManager = buildEncodingManager(vehiclesByName, encodedValueStrings, withUrbanDensity,
withMaxSpeedEstimation, profilesByName.values());
osmParsers = buildOSMParsers(vehiclesByName, encodedValueStrings, osmReaderConfig.getIgnoredHighways(), dateRangeParserString);
baseGraph = new BaseGraph.Builder(getEncodingManager())
.setDir(directory)
.set3D(hasElevation())
.withTurnCosts(encodingManager.needsTurnCostsSupport())
.setSegmentSize(defaultSegmentSize)
.build();
properties = new StorableProperties(directory);
checkProfilesConsistency();
GHLock lock = null;
try {
if (directory.getDefaultType().isStoring()) {
lockFactory.setLockDir(new File(ghLocation));
lock = lockFactory.create(fileLockName, true);
if (!lock.tryLock())
throw new RuntimeException("To avoid multiple writers we need to obtain a write lock but it failed. In " + ghLocation, lock.getObtainFailedReason());
}
ensureWriteAccess();
importOSM();
cleanUp();
postImport();
postProcessing(closeEarly);
flush();
} finally {
if (lock != null)
lock.release();
}
} | 3.68 |
hbase_MetricsSource_getUncleanlyClosedWALs | /**
* Get the value of uncleanlyClosedWAL counter
*/
public long getUncleanlyClosedWALs() {
return singleSourceSource.getUncleanlyClosedWALs();
} | 3.68 |
hudi_ClusteringPlanStrategy_buildMetrics | /**
* Generate metrics for the data to be clustered.
*/
protected Map<String, Double> buildMetrics(List<FileSlice> fileSlices) {
Map<String, Double> metrics = new HashMap<>();
FileSliceMetricUtils.addFileSliceCommonMetrics(fileSlices, metrics, getWriteConfig().getParquetMaxFileSize());
return metrics;
} | 3.68 |
flink_JMXService_startInstance | /**
* Start the JMV-wide singleton JMX server.
*
* <p>If JMXServer static instance is already started, it will not be started again. Instead a
* warning will be logged indicating which port the existing JMXServer static instance is
* exposing.
*
* @param portsConfig port configuration of the JMX server.
*/
public static synchronized void startInstance(String portsConfig) {
if (jmxServer == null) {
if (portsConfig != null) {
Iterator<Integer> ports = NetUtils.getPortRangeFromString(portsConfig);
if (ports.hasNext()) {
jmxServer = startJMXServerWithPortRanges(ports);
}
if (jmxServer == null) {
LOG.error(
"Could not start JMX server on any configured port(s) in: "
+ portsConfig);
}
}
} else {
LOG.warn("JVM-wide JMXServer already started at port: " + jmxServer.getPort());
}
} | 3.68 |
hbase_ResponseConverter_buildGetLastFlushedSequenceIdResponse | /**
* Creates a response for the last flushed sequence Id request
* @return A GetLastFlushedSequenceIdResponse
*/
public static GetLastFlushedSequenceIdResponse
buildGetLastFlushedSequenceIdResponse(RegionStoreSequenceIds ids) {
return GetLastFlushedSequenceIdResponse.newBuilder()
.setLastFlushedSequenceId(ids.getLastFlushedSequenceId())
.addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build();
} | 3.68 |
hbase_ScannerModel_getColumns | /** Returns list of columns of interest in column:qualifier format, or empty for all */
@XmlElement(name = "column")
public List<byte[]> getColumns() {
return columns;
} | 3.68 |
flink_PartitionCommitPolicy_partitionSpec | /** Partition spec in the form of a map from partition keys to values. */
default LinkedHashMap<String, String> partitionSpec() {
LinkedHashMap<String, String> res = new LinkedHashMap<>();
for (int i = 0; i < partitionKeys().size(); i++) {
res.put(partitionKeys().get(i), partitionValues().get(i));
}
return res;
} | 3.68 |
dubbo_AbstractConfig_getMetaData | /**
* <p>
* <b>The new instance of the AbstractConfig subclass should return empty metadata.</b>
* The purpose is to get the attributes set by the user instead of the default value when the {@link #refresh()} method handles attribute overrides.
* </p>
*
* <p><b>The default value of the field should be set in the {@link #checkDefault()} method</b>,
* which will be called at the end of {@link #refresh()}, so that it will not affect the behavior of attribute overrides.</p>
*
* <p></p>
* Should be called after Config was fully initialized.
* <p>
* Notice! This method should include all properties in the returning map, treat @Parameter differently compared to appendParameters?
* </p>
* // FIXME: this method should be completely replaced by appendParameters?
* // -- Url parameter may use key, but props override only use property name. So replace it with appendAttributes().
*
* @see AbstractConfig#checkDefault()
* @see AbstractConfig#appendParameters(Map, Object, String)
*/
@Transient
public Map<String, String> getMetaData() {
return getMetaData(null);
} | 3.68 |
hibernate-validator_AnnotationApiHelper_getAnnotationArrayValue | /**
* Returns the given annotation mirror's array-typed annotation value with
* the given name.
*
* @param annotationMirror An annotation mirror.
* @param name The name of the annotation value of interest.
*
* @return The annotation value with the given name or an empty list, if no
* such value exists within the given annotation mirror or such a
* value exists but is not an array-typed one.
*/
public List<? extends AnnotationValue> getAnnotationArrayValue(AnnotationMirror annotationMirror, String name) {
AnnotationValue annotationValue = getAnnotationValue( annotationMirror, name );
if ( annotationValue == null ) {
return Collections.<AnnotationValue>emptyList();
}
List<? extends AnnotationValue> theValue = annotationValue.accept(
new SimpleAnnotationValueVisitor8<List<? extends AnnotationValue>, Void>() {
@Override
public List<? extends AnnotationValue> visitArray(List<? extends AnnotationValue> values, Void p) {
return values;
}
}, null
);
return theValue != null ? theValue : Collections
.<AnnotationValue>emptyList();
} | 3.68 |
flink_AndCondition_getLeft | /** @return One of the {@link IterativeCondition conditions} combined in this condition. */
public IterativeCondition<T> getLeft() {
return left;
} | 3.68 |
flink_PlanNode_setBroadcastInputs | /** Sets a list of all broadcast inputs attached to this node. */
public void setBroadcastInputs(List<NamedChannel> broadcastInputs) {
if (broadcastInputs != null) {
this.broadcastInputs = broadcastInputs;
// update the branch map
for (NamedChannel nc : broadcastInputs) {
PlanNode source = nc.getSource();
mergeBranchPlanMaps(branchPlan, source.branchPlan);
}
}
// do a sanity check that if we are branching, we have now candidates for each branch point
if (this.template.hasUnclosedBranches()) {
if (this.branchPlan == null) {
throw new CompilerException(
"Branching and rejoining logic did not find a candidate for the branching point.");
}
for (UnclosedBranchDescriptor uc : this.template.getOpenBranches()) {
OptimizerNode brancher = uc.getBranchingNode();
if (this.branchPlan.get(brancher) == null) {
throw new CompilerException(
"Branching and rejoining logic did not find a candidate for the branching point.");
}
}
}
} | 3.68 |
framework_WebBrowser_isChromeFrameCapable | /**
* Tests whether the user's browser is Chrome Frame capable.
*
* @return true if the user can use Chrome Frame, false if the user can not
* or if no information on the browser is present
*/
public boolean isChromeFrameCapable() {
if (browserDetails == null) {
return false;
}
return browserDetails.isChromeFrameCapable();
} | 3.68 |
morf_InsertStatementBuilder_from | /**
* Specifies the table to source the data from
*
* @param sourceTable the table to source the data from
* @return this, for method chaining.
*/
public InsertStatementBuilder from(TableReference sourceTable) {
if (selectStatement != null) {
throw new UnsupportedOperationException("Cannot specify both a source table and a source SelectStatement");
}
if (!fields.isEmpty()) {
throw new UnsupportedOperationException("Cannot specify both a source table and a list of fields");
}
if (!values.isEmpty()) {
throw new UnsupportedOperationException("Cannot specify both a source table and a set of literal field values.");
}
this.fromTable = sourceTable;
return this;
} | 3.68 |
hadoop_WasbTokenRenewer_cancel | /**
* Cancel the delegation token.
* @param token token to cancel.
* @param conf configuration object.
* @throws IOException thrown when trying get current user.
* @throws InterruptedException thrown when thread is interrupted.
*/
@Override
public void cancel(final Token<?> token, Configuration conf)
throws IOException, InterruptedException {
LOG.debug("Cancelling the delegation token");
getInstance(conf).cancelDelegationToken(token);
} | 3.68 |
hadoop_AzureBlobFileSystemStore_getPrimaryGroup | /**
* @return primary group that user belongs to.
* */
public String getPrimaryGroup() {
return this.primaryUserGroup;
} | 3.68 |
framework_Table_isColumnCollapsed | /**
* Checks if the specified column is collapsed.
*
* @param propertyId
* the propertyID identifying the column.
* @return true if the column is collapsed; false otherwise;
*/
public boolean isColumnCollapsed(Object propertyId) {
return collapsedColumns != null
&& collapsedColumns.contains(propertyId);
} | 3.68 |
flink_FlinkRelMetadataQuery_getRelWindowProperties | /**
* Returns the {@link RelWindowProperties} statistic.
*
* @param rel the relational expression
* @return the window properties for the corresponding RelNode
*/
public RelWindowProperties getRelWindowProperties(RelNode rel) {
for (; ; ) {
try {
return windowPropertiesHandler.getWindowProperties(rel, this);
} catch (JaninoRelMetadataProvider.NoHandler e) {
windowPropertiesHandler = revise(e.relClass, FlinkMetadata.WindowProperties.DEF);
}
}
} | 3.68 |
framework_ServerRpcManager_getImplementation | /**
* Returns the RPC interface implementation for the RPC target.
*
* @return RPC interface implementation
*/
protected T getImplementation() {
return implementation;
} | 3.68 |
hadoop_ApplicationRowKey_parseRowKey | /**
* Given the raw row key as bytes, returns the row key as an object.
*
* @param rowKey Byte representation of row key.
* @return An <cite>ApplicationRowKey</cite> object.
*/
public static ApplicationRowKey parseRowKey(byte[] rowKey) {
return new ApplicationRowKeyConverter().decode(rowKey);
} | 3.68 |
querydsl_Expressions_comparablePath | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T extends Comparable<?>> ComparablePath<T> comparablePath(Class<? extends T> type,
PathMetadata metadata) {
return new ComparablePath<T>(type, metadata);
} | 3.68 |
flink_TaskStateSnapshot_getOutputRescalingDescriptor | /**
* Returns the output channel mapping for rescaling with in-flight data or {@link
* InflightDataRescalingDescriptor#NO_RESCALE}.
*/
public InflightDataRescalingDescriptor getOutputRescalingDescriptor() {
return getMapping(OperatorSubtaskState::getOutputRescalingDescriptor);
} | 3.68 |
pulsar_NarUnpacker_unpack | /**
* Unpacks the NAR to the specified directory.
*
* @param workingDirectory
* the root directory to which the NAR should be unpacked.
* @throws IOException
* if the NAR could not be unpacked.
*/
private static void unpack(final File nar, final File workingDirectory) throws IOException {
try (JarFile jarFile = new JarFile(nar)) {
Enumeration<JarEntry> jarEntries = jarFile.entries();
while (jarEntries.hasMoreElements()) {
JarEntry jarEntry = jarEntries.nextElement();
String name = jarEntry.getName();
File f = new File(workingDirectory, name);
if (jarEntry.isDirectory()) {
FileUtils.ensureDirectoryExistAndCanReadAndWrite(f);
} else {
// The directory entry might appear after the file entry
FileUtils.ensureDirectoryExistAndCanReadAndWrite(f.getParentFile());
makeFile(jarFile.getInputStream(jarEntry), f);
}
}
}
} | 3.68 |
hudi_FlatLists_of | /**
* Creates a memory-, CPU- and cache-efficient immutable list from an
* existing list. The list is always copied.
*
* @param t Array of members of list
* @param <T> Element type
* @return List containing the given members
*/
public static <T> List<T> of(List<T> t) {
return of_(t);
} | 3.68 |
hbase_SaslServerAuthenticationProviders_selectProvider | /**
* Selects the appropriate SaslServerAuthenticationProvider from those available. If there is no
* matching provider for the given {@code authByte}, this method will return null.
*/
public SaslServerAuthenticationProvider selectProvider(byte authByte) {
return providers.get(Byte.valueOf(authByte));
} | 3.68 |
pulsar_NamespaceIsolationPolicies_getBrokerAssignment | /**
* Get the broker assignment based on the namespace name.
*
* @param nsPolicy
* The namespace name
* @param brokerAddress
* The broker address is the format of host:port
* @return The broker assignment: {primary, secondary, shared}
*/
private BrokerAssignment getBrokerAssignment(NamespaceIsolationPolicy nsPolicy, String brokerAddress) {
if (nsPolicy != null) {
if (nsPolicy.isPrimaryBroker(brokerAddress)) {
return BrokerAssignment.primary;
} else if (nsPolicy.isSecondaryBroker(brokerAddress)) {
return BrokerAssignment.secondary;
}
throw new IllegalArgumentException("The broker " + brokerAddress
+ " is not among the assigned broker pools for the controlled namespace.");
}
// Only uncontrolled namespace will be assigned to the shared pool
if (!this.isSharedBroker(brokerAddress)) {
throw new IllegalArgumentException("The broker " + brokerAddress
+ " is not among the shared broker pools for the uncontrolled namespace.");
}
return BrokerAssignment.shared;
} | 3.68 |
hbase_MemStoreLABImpl_copyToChunkCell | /**
* Clone the passed cell by copying its data into the passed buf and create a cell with a chunkid
* out of it
* @see #copyBBECToChunkCell(ByteBufferExtendedCell, ByteBuffer, int, int)
*/
private static Cell copyToChunkCell(Cell cell, ByteBuffer buf, int offset, int len) {
int tagsLen = cell.getTagsLength();
if (cell instanceof ExtendedCell) {
((ExtendedCell) cell).write(buf, offset);
} else {
// Normally all Cell impls within Server will be of type ExtendedCell. Just considering the
// other case also. The data fragments within Cell is copied into buf as in KeyValue
// serialization format only.
KeyValueUtil.appendTo(cell, buf, offset, true);
}
return createChunkCell(buf, offset, len, tagsLen, cell.getSequenceId());
} | 3.68 |
hadoop_HttpReferrerAuditHeader_set | /**
* Set an attribute. If the value is non-null/empty,
* it will be used as a query parameter.
*
* @param key key to set
* @param value value.
*/
public void set(final String key, final String value) {
addAttribute(requireNonNull(key), value);
} | 3.68 |
morf_SqlDialect_withPrefix | /**
* Use this to create a temporary or non-temporary {@link IdTable} which is
* guaranteed to have a legal name for the dialect. The non-temporary idTables
* are necessary to enable access from many sessions/connections in case of some
* dialects.
*
* @param dialect {@link SqlDialect} that knows what temp table names are
* allowed.
* @param prefix prefix for the unique name generated.
* @param isTemporary if set to true the table will be created as a temporary
* table specific for the dialect.
* @return {@link IdTable}
*/
public static IdTable withPrefix(SqlDialect dialect, String prefix, boolean isTemporary) {
return new IdTable(dialect.decorateTemporaryTableName(prefix + RandomStringUtils.randomAlphabetic(5)), isTemporary);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.