name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ApplicationACLsManager_checkAccess | /**
* If authorization is enabled, checks whether the user (in the callerUGI) is
* authorized to perform the access specified by 'applicationAccessType' on
* the application by checking if the user is applicationOwner or part of
* application ACL for the specific access-type.
* <ul>
* <li>The owner of the application can have all access-types on the
* application</li>
* <li>For all other users/groups application-acls are checked</li>
* </ul>
*
* @param callerUGI UserGroupInformation for the user.
* @param applicationAccessType Application Access Type.
* @param applicationOwner Application Owner.
* @param applicationId ApplicationId.
* @return true if the user has permission, false otherwise.
*/
public boolean checkAccess(UserGroupInformation callerUGI,
ApplicationAccessType applicationAccessType, String applicationOwner,
ApplicationId applicationId) {
LOG.debug("Verifying access-type {} for {} on application {} owned by {}",
applicationAccessType, callerUGI, applicationId, applicationOwner);
String user = callerUGI.getShortUserName();
if (!areACLsEnabled()) {
return true;
}
AccessControlList applicationACL = DEFAULT_YARN_APP_ACL;
Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS
.get(applicationId);
if (acls == null) {
LOG.debug("ACL not found for application {} owned by {}."
+ " Using default [{}]", applicationId, applicationOwner,
YarnConfiguration.DEFAULT_YARN_APP_ACL);
} else {
AccessControlList applicationACLInMap = acls.get(applicationAccessType);
if (applicationACLInMap != null) {
applicationACL = applicationACLInMap;
} else {
LOG.debug("ACL not found for access-type {} for application {}"
+ " owned by {}. Using default [{}]", applicationAccessType,
applicationId, applicationOwner,
YarnConfiguration.DEFAULT_YARN_APP_ACL);
}
}
// Allow application-owner for any type of access on the application
if (this.adminAclsManager.isAdmin(callerUGI)
|| user.equals(applicationOwner)
|| applicationACL.isUserAllowed(callerUGI)) {
return true;
}
return false;
} | 3.68 |
zxing_C40Encoder_handleEOD | /**
* Handle "end of data" situations
*
* @param context the encoder context
* @param buffer the buffer with the remaining encoded characters
*/
void handleEOD(EncoderContext context, StringBuilder buffer) {
int unwritten = (buffer.length() / 3) * 2;
int rest = buffer.length() % 3;
int curCodewordCount = context.getCodewordCount() + unwritten;
context.updateSymbolInfo(curCodewordCount);
int available = context.getSymbolInfo().getDataCapacity() - curCodewordCount;
if (rest == 2) {
buffer.append('\0'); //Shift 1
while (buffer.length() >= 3) {
writeNextTriplet(context, buffer);
}
if (context.hasMoreCharacters()) {
context.writeCodeword(HighLevelEncoder.C40_UNLATCH);
}
} else if (available == 1 && rest == 1) {
while (buffer.length() >= 3) {
writeNextTriplet(context, buffer);
}
if (context.hasMoreCharacters()) {
context.writeCodeword(HighLevelEncoder.C40_UNLATCH);
}
// else no unlatch
context.pos--;
} else if (rest == 0) {
while (buffer.length() >= 3) {
writeNextTriplet(context, buffer);
}
if (available > 0 || context.hasMoreCharacters()) {
context.writeCodeword(HighLevelEncoder.C40_UNLATCH);
}
} else {
throw new IllegalStateException("Unexpected case. Please report!");
}
context.signalEncoderChange(HighLevelEncoder.ASCII_ENCODATION);
} | 3.68 |
zxing_EmailAddressParsedResult_getEmailAddress | /**
* @return first elements of {@link #getTos()} or {@code null} if none
* @deprecated use {@link #getTos()}
*/
@Deprecated
public String getEmailAddress() {
return tos == null || tos.length == 0 ? null : tos[0];
} | 3.68 |
open-banking-gateway_ValidatedExecution_execute | /**
* Entrypoint for Flowable BPMN to call the service.
*/
@Override
@Transactional(noRollbackFor = BpmnError.class)
public void execute(DelegateExecution execution) {
@SuppressWarnings("unchecked")
T context = (T) ContextUtil.getContext(execution, BaseContext.class);
logResolver.log("execute: execution ({}) with context ({})", execution, context);
doUpdateXRequestId(execution, context);
doPrepareContext(execution, context);
doValidate(execution, context);
logResolver.log("execution contextMode ({})", context.getMode());
if (ContextMode.MOCK_REAL_CALLS == context.getMode()) {
doMockedExecution(execution, context);
} else {
doRealExecution(execution, context);
}
doAfterCall(execution, context);
logResolver.log("done execution ({}) with context ({})", execution, context);
} | 3.68 |
Activiti_ProcessEngines_retry | /**
* retries to initialize a process engine that previously failed.
*/
public static ProcessEngineInfo retry(String resourceUrl) {
log.debug("retying initializing of resource {}", resourceUrl);
try {
return initProcessEngineFromResource(new URL(resourceUrl));
} catch (MalformedURLException e) {
throw new ActivitiIllegalArgumentException("invalid url: " + resourceUrl, e);
}
} | 3.68 |
cron-utils_SingleCron_equivalent | /**
* Provides means to compare if two cron expressions are equivalent.
* Assumes same cron definition.
*
* @param cron - any cron instance, never null
* @return boolean - true if equivalent; false otherwise.
*/
public boolean equivalent(final Cron cron) {
return asString().equals(cron.asString());
} | 3.68 |
hadoop_StringValueMax_reset | /**
* reset the aggregator
*/
public void reset() {
maxVal = null;
} | 3.68 |
flink_SharedBufferAccessor_advanceTime | /**
* Notifies shared buffer that there will be no events with timestamp <&eq; the given value.
* It allows to clear internal counters for number of events seen so far per timestamp.
*
* @param timestamp watermark, no earlier events will arrive
* @throws Exception Thrown if the system cannot access the state.
*/
public void advanceTime(long timestamp) throws Exception {
sharedBuffer.advanceTime(timestamp);
} | 3.68 |
graphhopper_Instruction_setUseRawName | /**
* This method does not perform translation or combination with the sign - it just uses the
* provided name as instruction.
*/
public void setUseRawName() {
rawName = true;
} | 3.68 |
hadoop_AbstractTask_write | /**
* Write Task.
* @param out : dataoutout object.
* @throws IOException : Throws IO exception if any error occurs.
*/
@Override
public final void write(final DataOutput out) throws IOException {
taskID.write(out);
int environmentSize = 0;
if (environment == null) {
environmentSize = 0;
} else {
environmentSize = environment.size();
}
new IntWritable(environmentSize).write(out);
if (environmentSize != 0) {
for (Entry<String, String> envEntry : environment.entrySet()) {
new Text(envEntry.getKey()).write(out);
new Text(envEntry.getValue()).write(out);
}
}
Text taskCmdText;
if (taskCmd == null) {
taskCmdText = new Text("");
} else {
taskCmdText = new Text(taskCmd);
}
taskCmdText.write(out);
WritableUtils.writeEnum(out, taskType);
WritableUtils.writeVLong(out, timeout);
} | 3.68 |
hadoop_TaskManifest_addFileToCommit | /**
* Add a file to the list of files to commit.
* @param entry entry to add
*/
public void addFileToCommit(FileEntry entry) {
filesToCommit.add(entry);
} | 3.68 |
framework_PropertyFilterDefinition_getMaxNestingDepth | /**
* Returns the maximum amount of nesting levels for sub-properties.
*
* @return maximum nesting depth
*/
public int getMaxNestingDepth() {
return maxNestingDepth;
} | 3.68 |
framework_VLoadingIndicator_ensureTriggered | /**
* Triggers displaying of this loading indicator unless it's already visible
* or scheduled to be shown after a delay.
*
* @since 7.4
*/
public void ensureTriggered() {
if (!isVisible() && !firstTimer.isRunning()) {
trigger();
}
} | 3.68 |
flink_OptimizerNode_readStubAnnotations | /**
* Reads all stub annotations, i.e. which fields remain constant, what cardinality bounds the
* functions have, which fields remain unique.
*/
protected void readStubAnnotations() {
readUniqueFieldsAnnotation();
} | 3.68 |
hbase_TableSchemaModel_getColumnFamily | /**
* Retrieve the column family at the given index from the table descriptor
* @param index the index
* @return the column family model
*/
public ColumnSchemaModel getColumnFamily(int index) {
return columns.get(index);
} | 3.68 |
hbase_HRegion_getWALRegionDir | /**
* @return the Region directory under WALRootDirectory
* @throws IOException if there is an error getting WALRootDir
*/
public Path getWALRegionDir() throws IOException {
if (regionWalDir == null) {
regionWalDir = CommonFSUtils.getWALRegionDir(conf, getRegionInfo().getTable(),
getRegionInfo().getEncodedName());
}
return regionWalDir;
} | 3.68 |
hbase_ParseFilter_isQuoteUnescaped | /**
* Returns a boolean indicating whether the quote was escaped or not
* <p>
* @param array byte array in which the quote was found
* @param quoteIndex index of the single quote
* @return returns true if the quote was unescaped
*/
public static boolean isQuoteUnescaped(byte[] array, int quoteIndex) {
if (array == null) {
throw new IllegalArgumentException("isQuoteUnescaped called with a null array");
}
if (quoteIndex == array.length - 1 || array[quoteIndex + 1] != ParseConstants.SINGLE_QUOTE) {
return true;
} else {
return false;
}
} | 3.68 |
hudi_JavaExecutionStrategy_getPartitioner | /**
* Create {@link BulkInsertPartitioner} based on strategy params.
*
* @param strategyParams Strategy parameters containing columns to sort the data by when clustering.
* @param schema Schema of the data including metadata fields.
* @return partitioner for the java engine
*/
protected BulkInsertPartitioner<List<HoodieRecord<T>>> getPartitioner(Map<String, String> strategyParams, Schema schema) {
if (strategyParams.containsKey(PLAN_STRATEGY_SORT_COLUMNS.key())) {
return new JavaCustomColumnsSortPartitioner(
strategyParams.get(PLAN_STRATEGY_SORT_COLUMNS.key()).split(","),
HoodieAvroUtils.addMetadataFields(schema), getWriteConfig());
} else {
return JavaBulkInsertInternalPartitionerFactory.get(getWriteConfig().getBulkInsertSortMode());
}
} | 3.68 |
flink_ConnectedStreams_map | /**
* Applies a CoMap transformation on a {@link ConnectedStreams} and maps the output to a common
* type. The transformation calls a {@link CoMapFunction#map1} for each element of the first
* input and {@link CoMapFunction#map2} for each element of the second input. Each CoMapFunction
* call returns exactly one element.
*
* @param coMapper The CoMapFunction used to jointly transform the two input DataStreams
* @param outputType {@link TypeInformation} for the result type of the function.
* @return The transformed {@link DataStream}
*/
public <R> SingleOutputStreamOperator<R> map(
CoMapFunction<IN1, IN2, R> coMapper, TypeInformation<R> outputType) {
return transform("Co-Map", outputType, new CoStreamMap<>(inputStream1.clean(coMapper)));
} | 3.68 |
hadoop_OBSDataBlocks_validateWriteArgs | /**
* Validate args to a write command. These are the same validation checks
* expected for any implementation of {@code OutputStream.write()}.
*
* @param b byte array containing data
* @param off offset in array where to start
* @param len number of bytes to be written
* @throws NullPointerException for a null buffer
* @throws IndexOutOfBoundsException if indices are out of range
*/
static void validateWriteArgs(final byte[] b, final int off,
final int len) {
Preconditions.checkNotNull(b);
if (off < 0 || off > b.length || len < 0 || off + len > b.length
|| off + len < 0) {
throw new IndexOutOfBoundsException(
"write (b[" + b.length + "], " + off + ", " + len + ')');
}
} | 3.68 |
framework_AbstractSelect_size | /**
* Gets the number of items in the container.
*
* @return the Number of items in the container.
*
* @see Container#size()
*/
@Override
public int size() {
int size = items.size();
assert size >= 0;
return size;
} | 3.68 |
hbase_IncrementingEnvironmentEdge_incrementTime | /**
* Increment the time by the given amount
*/
public synchronized long incrementTime(long amount) {
timeIncrement += amount;
return timeIncrement;
} | 3.68 |
druid_ListDG_BFS | /*
* 广度优先搜索(类似于树的层次遍历)
*/
public void BFS() {
int head = 0;
int rear = 0;
int[] queue = new int[mVexs.size()]; // 辅组队列
boolean[] visited = new boolean[mVexs.size()]; // 顶点访问标记
for (int i = 0; i < mVexs.size(); i++) {
visited[i] = false;
}
for (int i = 0; i < mVexs.size(); i++) {
if (!visited[i]) {
visited[i] = true;
System.out.printf("%c ", mVexs.get(i).data);
queue[rear++] = i; // 入队列
}
while (head != rear) {
int j = queue[head++]; // 出队列
ENode node = mVexs.get(j).firstEdge;
while (node != null) {
int k = node.ivex;
if (!visited[k]) {
visited[k] = true;
System.out.printf("%c ", mVexs.get(k).data);
queue[rear++] = k;
}
node = node.nextEdge;
}
}
}
} | 3.68 |
hadoop_GetGroupsBase_getUgmProtocol | /**
* Get a client of the {@link GetUserMappingsProtocol}.
* @return A {@link GetUserMappingsProtocol} client proxy.
* @throws IOException raised on errors performing I/O.
*/
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
GetUserMappingsProtocol userGroupMappingProtocol =
RPC.getProxy(GetUserMappingsProtocol.class,
GetUserMappingsProtocol.versionID,
getProtocolAddress(getConf()), UserGroupInformation.getCurrentUser(),
getConf(), NetUtils.getSocketFactory(getConf(),
GetUserMappingsProtocol.class));
return userGroupMappingProtocol;
} | 3.68 |
hbase_StoreFileInfo_isMobFile | /**
* Checks if the file is a MOB file
* @param path path to a file
* @return true, if - yes, false otherwise
*/
public static boolean isMobFile(final Path path) {
String fileName = path.getName();
String[] parts = fileName.split(MobUtils.SEP);
if (parts.length != 2) {
return false;
}
Matcher m = HFILE_NAME_PATTERN.matcher(parts[0]);
Matcher mm = HFILE_NAME_PATTERN.matcher(parts[1]);
return m.matches() && mm.matches();
} | 3.68 |
framework_VTwinColSelect_getOptionsCaption | /**
* Gets the options caption HTML Widget.
*
* @return the options caption widget
*/
protected HTML getOptionsCaption() {
if (optionsCaption == null) {
optionsCaption = new HTML();
optionsCaption.setStyleName(CLASSNAME + "-caption-left");
optionsCaption.getElement().getStyle()
.setFloat(com.google.gwt.dom.client.Style.Float.LEFT);
captionWrapper.add(optionsCaption);
}
return optionsCaption;
} | 3.68 |
flink_NormalizedKeySorter_write | /**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException Thrown, if an error occurred while serializing the record into the
* buffers.
*/
@Override
public boolean write(T record) throws IOException {
// check whether we need a new memory segment for the sort index
if (this.currentSortIndexOffset > this.lastIndexEntryOffset) {
if (memoryAvailable()) {
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.currentSortIndexOffset = 0;
this.sortIndexBytes += this.segmentSize;
} else {
return false;
}
}
// serialize the record into the data buffers
try {
this.serializer.serialize(record, this.recordCollector);
} catch (EOFException e) {
return false;
}
final long newOffset = this.recordCollector.getCurrentOffset();
final boolean shortRecord =
newOffset - this.currentDataBufferOffset < LARGE_RECORD_THRESHOLD;
if (!shortRecord && LOG.isDebugEnabled()) {
LOG.debug("Put a large record ( >" + LARGE_RECORD_THRESHOLD + " into the sort buffer");
}
// add the pointer and the normalized key
this.currentSortIndexSegment.putLong(
this.currentSortIndexOffset,
shortRecord
? this.currentDataBufferOffset
: (this.currentDataBufferOffset | LARGE_RECORD_TAG));
if (this.numKeyBytes != 0) {
this.comparator.putNormalizedKey(
record,
this.currentSortIndexSegment,
this.currentSortIndexOffset + OFFSET_LEN,
this.numKeyBytes);
}
this.currentSortIndexOffset += this.indexEntrySize;
this.currentDataBufferOffset = newOffset;
this.numRecords++;
return true;
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsRenameToNewFolder | /**
* Used to rename a source folder to a destination folder that is not existed
* before rename.
*
* @param owner OBS File System instance
* @param src source folder key
* @param dst destination folder key that not existed before rename
* @throws IOException any io exception
* @throws ObsException any obs operation exception
*/
static void fsRenameToNewFolder(final OBSFileSystem owner, final String src,
final String dst)
throws IOException, ObsException {
LOG.debug("RenameFolder path {} to {}", src, dst);
try {
RenameRequest renameObjectRequest = new RenameRequest();
renameObjectRequest.setBucketName(owner.getBucket());
renameObjectRequest.setObjectKey(src);
renameObjectRequest.setNewObjectKey(dst);
owner.getObsClient().renameFolder(renameObjectRequest);
owner.getSchemeStatistics().incrementWriteOps(1);
} catch (ObsException e) {
throw OBSCommonUtils.translateException(
"renameFile(" + src + ", " + dst + ")", src, e);
}
} | 3.68 |
hbase_AsyncTableRegionLocator_getRegionLocation | /**
* Finds the region with the given <code>replicaId</code> on which the given row is being served.
* <p/>
* Returns the location of the region with the given <code>replicaId</code> to which the row
* belongs.
* @param row Row to find.
* @param replicaId the replica id of the region
*/
default CompletableFuture<HRegionLocation> getRegionLocation(byte[] row, int replicaId) {
return getRegionLocation(row, replicaId, false);
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_getPartitionSpec | // get partition metadata
public static Map<String, String> getPartitionSpec(HiveParserASTNode ast) {
HiveParserASTNode partNode = null;
// if this ast has only one child, then no partition spec specified.
if (ast.getChildCount() == 1) {
return null;
}
// if ast has two children
// the 2nd child could be partition spec or columnName
// if the ast has 3 children, the second *has to* be partition spec
if (ast.getChildCount() > 2
&& (ast.getChild(1).getType() != HiveASTParser.TOK_PARTSPEC)) {
throw new ValidationException(
ast.getChild(1).getType() + " is not a partition specification");
}
if (ast.getChild(1).getType() == HiveASTParser.TOK_PARTSPEC) {
partNode = (HiveParserASTNode) ast.getChild(1);
}
if (partNode != null) {
return getPartSpec(partNode);
}
return null;
} | 3.68 |
morf_DataValueLookupMetadata_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
return this == obj; // Fully interned
} | 3.68 |
flink_Tuple5_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple5<T0, T1, T2, T3, T4> copy() {
return new Tuple5<>(this.f0, this.f1, this.f2, this.f3, this.f4);
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_webResponse | /**
* Indicate that we just got a web response from Azure Storage. This should
* be called for every web request/response we do (to get accurate metrics
* of how we're hitting the storage service).
*/
public void webResponse() {
numberOfWebResponses.incr();
inMemoryNumberOfWebResponses.incrementAndGet();
} | 3.68 |
hudi_HoodieTableMetaClient_scanFiles | /**
* Helper method to scan all hoodie-instant metafiles.
*
* @param fs The file system implementation for this table
* @param metaPath The meta path where meta files are stored
* @param nameFilter The name filter to filter meta files
* @return An array of meta FileStatus
* @throws IOException In case of failure
*/
public static FileStatus[] scanFiles(FileSystem fs, Path metaPath, PathFilter nameFilter) throws IOException {
return fs.listStatus(metaPath, nameFilter);
} | 3.68 |
hbase_BlockCache_getBlock | /**
* Fetch block from cache.
* @param cacheKey Block to fetch.
* @param caching Whether this request has caching enabled (used for stats)
* @param repeat Whether this is a repeat lookup for the same block (used to avoid
* double counting cache misses when doing double-check locking)
* @param updateCacheMetrics Whether to update cache metrics or not
* @param blockType BlockType
* @return Block or null if block is not in 2 cache.
*/
default Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics, BlockType blockType) {
return getBlock(cacheKey, caching, repeat, updateCacheMetrics);
} | 3.68 |
pulsar_ContextImpl_tryGetConsumer | // returns null if consumer not found
private Consumer<?> tryGetConsumer(String topic, int partition) {
if (partition == 0) {
// maybe a non-partitioned topic
Consumer<?> consumer = topicConsumers.get(TopicName.get(topic));
if (consumer != null) {
return consumer;
}
}
// maybe partitioned topic
return topicConsumers.get(TopicName.get(topic).getPartition(partition));
} | 3.68 |
pulsar_PulsarJsonRowDecoder_decodeRow | /**
* decode ByteBuf by {@link org.apache.pulsar.client.api.schema.GenericSchema}.
* @param byteBuf
* @return
*/
@Override
public Optional<Map<DecoderColumnHandle, FieldValueProvider>> decodeRow(ByteBuf byteBuf) {
GenericJsonRecord record = (GenericJsonRecord) genericJsonSchema.decode(byteBuf);
JsonNode tree = record.getJsonNode();
Map<DecoderColumnHandle, FieldValueProvider> decodedRow = new HashMap<>();
for (Map.Entry<DecoderColumnHandle, JsonFieldDecoder> entry : fieldDecoders.entrySet()) {
DecoderColumnHandle columnHandle = entry.getKey();
JsonFieldDecoder decoder = entry.getValue();
JsonNode node = locateNode(tree, columnHandle);
decodedRow.put(columnHandle, decoder.decode(node));
}
return Optional.of(decodedRow);
} | 3.68 |
framework_DateCell_shouldDisplay | /**
*
* @param event
* @return
*
* This method is not necessary in the long run.. Or here can be
* various types of implementations..
*/
// Date methods not deprecated in GWT
@SuppressWarnings("deprecation")
private boolean shouldDisplay(CalendarEvent event) {
boolean display = true;
if (event.isTimeOnDifferentDays()) {
display = true;
} else {
// only in case of one-day event we are able not to display
// event which is placed in unpublished parts on calendar
Date eventStart = event.getStartTime();
Date eventEnd = event.getEndTime();
int eventStartHours = eventStart.getHours();
int eventEndHours = eventEnd.getHours();
/*
* Special case (#14737): if event end time is 00:00 of the
* following day then isTimeOnDifferentDays() returns false
* (according to logic of this method), so this case should be
* handled here
*/
if (!event.getStart().equals(event.getEnd())
&& (event.getEndTime().getHours() == 0
&& event.getEndTime().getMinutes() == 0)) {
eventEndHours = 23;
}
display = !(eventEndHours < firstHour
|| eventStartHours > lastHour);
}
return display;
} | 3.68 |
framework_Escalator_setScrollTop | /**
* Sets the vertical scroll offset. Note that this will not necessarily
* become the same as the {@code scrollTop} attribute in the DOM.
*
* @param scrollTop
* the number of pixels to scroll vertically
*/
public void setScrollTop(final double scrollTop) {
verticalScrollbar.setScrollPos(scrollTop);
} | 3.68 |
dubbo_ProtobufTypeBuilder_generateListFieldName | /**
* get list property name from setting method.<br/>
* ex: getXXXList()<br/>
*
* @param methodName
* @return
*/
private String generateListFieldName(String methodName) {
return toCamelCase(methodName.substring(3, methodName.length() - 4));
} | 3.68 |
framework_DDEventHandleStrategy_handleMouseOut | /**
* Called to handle {@link Event#ONMOUSEOUT} event.
*
* @param target
* target element over which DnD event has happened
* @param event
* ONMOUSEOUT GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
protected void handleMouseOut(Element target, NativePreviewEvent event,
DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
Element relatedTarget = Element
.as(event.getNativeEvent().getRelatedEventTarget());
VDropHandler newDragHanler = findDragTarget(relatedTarget, mediator);
if (manager.getDragElement() != null
&& manager.getDragElement().isOrHasChild(relatedTarget)) {
// ApplicationConnection.getConsole().log(
// "Mouse out of dragImage, ignored");
return;
}
if (manager.getCurrentDropHandler() != newDragHanler) {
handleDragLeave(mediator, true);
manager.setCurrentDropHandler(null);
}
} | 3.68 |
hibernate-validator_ReflectionHelper_getClassFromType | /**
* Converts the given {@code Type} to a {@code Class}.
*
* @param type the type to convert
* @return the class corresponding to the type
*/
public static Class<?> getClassFromType(Type type) {
if ( type instanceof Class ) {
return (Class<?>) type;
}
if ( type instanceof ParameterizedType ) {
return getClassFromType( ( (ParameterizedType) type ).getRawType() );
}
if ( type instanceof GenericArrayType ) {
return Object[].class;
}
throw LOG.getUnableToConvertTypeToClassException( type );
} | 3.68 |
flink_FailureHandlingResult_getVerticesToRestart | /**
* Returns the tasks to restart.
*
* @return the tasks to restart
*/
public Set<ExecutionVertexID> getVerticesToRestart() {
if (canRestart()) {
return verticesToRestart;
} else {
throw new IllegalStateException(
"Cannot get vertices to restart when the restarting is suppressed.");
}
} | 3.68 |
graphhopper_LocationIndexTree_calculateRMin | /**
* Calculates the distance to the nearest tile border, where the tile border is the rectangular
* region with dimension 2*paddingTiles + 1 and where the center tile contains the given lat/lon
* coordinate
*/
final double calculateRMin(double lat, double lon, int paddingTiles) {
int x = indexStructureInfo.getKeyAlgo().x(lon);
int y = indexStructureInfo.getKeyAlgo().y(lat);
double minLat = graph.getBounds().minLat + (y - paddingTiles) * indexStructureInfo.getDeltaLat();
double maxLat = graph.getBounds().minLat + (y + paddingTiles + 1) * indexStructureInfo.getDeltaLat();
double minLon = graph.getBounds().minLon + (x - paddingTiles) * indexStructureInfo.getDeltaLon();
double maxLon = graph.getBounds().minLon + (x + paddingTiles + 1) * indexStructureInfo.getDeltaLon();
double dSouthernLat = lat - minLat;
double dNorthernLat = maxLat - lat;
double dWesternLon = lon - minLon;
double dEasternLon = maxLon - lon;
// convert degree deltas into a radius in meter
double dMinLat, dMinLon;
if (dSouthernLat < dNorthernLat) {
dMinLat = DIST_PLANE.calcDist(lat, lon, minLat, lon);
} else {
dMinLat = DIST_PLANE.calcDist(lat, lon, maxLat, lon);
}
if (dWesternLon < dEasternLon) {
dMinLon = DIST_PLANE.calcDist(lat, lon, lat, minLon);
} else {
dMinLon = DIST_PLANE.calcDist(lat, lon, lat, maxLon);
}
return Math.min(dMinLat, dMinLon);
} | 3.68 |
framework_FocusOnSelectedItem_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final Table table = new Table();
table.setSelectable(true);
table.setImmediate(true);
table.addContainerProperty("Property", String.class, null);
for (int i = 0; i < 200; i++) {
table.addItem(new String[] { "Item " + i }, "Item " + i);
}
addComponent(table);
addButton("Select", event -> {
table.setValue("Item 198");
table.setCurrentPageFirstItemId("Item 198");
table.focus();
});
} | 3.68 |
hmily_RejectedPolicyTypeEnum_fromString | /**
* From string rejected policy type enum.
*
* @param value the value
* @return the rejected policy type enum
*/
public static RejectedPolicyTypeEnum fromString(final String value) {
Optional<RejectedPolicyTypeEnum> rejectedPolicyTypeEnum =
Arrays.stream(RejectedPolicyTypeEnum.values())
.filter(v -> Objects.equals(v.getValue(), value))
.findFirst();
return rejectedPolicyTypeEnum.orElse(RejectedPolicyTypeEnum.ABORT_POLICY);
} | 3.68 |
hudi_CompactionUtils_getAllPendingCompactionOperationsInPendingCompactionPlans | /**
* Get all partition + file Ids with pending Log Compaction operations and their target log compaction instant time.
*/
public static Map<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>> getAllPendingCompactionOperationsInPendingCompactionPlans(
List<Pair<HoodieInstant, HoodieCompactionPlan>> pendingLogCompactionPlanWithInstants) {
Map<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>> fgIdToPendingCompactionsWithInstantMap = new HashMap<>();
pendingLogCompactionPlanWithInstants.stream().flatMap(instantPlanPair ->
getPendingCompactionOperations(instantPlanPair.getKey(), instantPlanPair.getValue())).forEach(pair -> {
// Defensive check to ensure a single-fileId does not have more than one pending log compaction with different
// file slices. If we find a full duplicate we assume it is caused by eventual nature of the move operation
// on some DFSs.
if (fgIdToPendingCompactionsWithInstantMap.containsKey(pair.getKey())) {
HoodieCompactionOperation operation = pair.getValue().getValue();
HoodieCompactionOperation anotherOperation = fgIdToPendingCompactionsWithInstantMap.get(pair.getKey()).getValue();
if (!operation.equals(anotherOperation)) {
String msg = "Hudi File Id (" + pair.getKey() + ") has more than 1 pending operation. Instants: "
+ pair.getValue() + ", " + fgIdToPendingCompactionsWithInstantMap.get(pair.getKey());
throw new IllegalStateException(msg);
}
}
fgIdToPendingCompactionsWithInstantMap.put(pair.getKey(), pair.getValue());
});
return fgIdToPendingCompactionsWithInstantMap;
} | 3.68 |
hadoop_FederationPolicyInitializationContext_setHomeSubcluster | /**
* Sets in the context the home sub-cluster. Useful for default policy
* behaviors.
*
* @param homeSubcluster value to set.
*/
public void setHomeSubcluster(SubClusterId homeSubcluster) {
this.homeSubcluster = homeSubcluster;
} | 3.68 |
flink_FutureCompletingBlockingQueue_wakeUpPuttingThread | /**
* Gracefully wakes up the thread with the given {@code threadIndex} if it is blocked in adding
* an element. to the queue. If the thread is blocked in {@link #put(int, Object)} it will
* immediately return from the method with a return value of false.
*
* <p>If this method is called, the next time the thread with the given index is about to be
* blocked in adding an element, it may immediately wake up and return.
*
* @param threadIndex The number identifying the thread.
*/
public void wakeUpPuttingThread(int threadIndex) {
lock.lock();
try {
maybeCreateCondition(threadIndex);
ConditionAndFlag caf = putConditionAndFlags[threadIndex];
if (caf != null) {
caf.setWakeUp(true);
caf.condition().signal();
}
} finally {
lock.unlock();
}
} | 3.68 |
hbase_ByteBufferInputStream_skip | /**
* Skips <code>n</code> bytes of input from this input stream. Fewer bytes might be skipped if the
* end of the input stream is reached. The actual number <code>k</code> of bytes to be skipped is
* equal to the smaller of <code>n</code> and remaining bytes in the stream.
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
*/
@Override
public long skip(long n) {
long k = Math.min(n, available());
if (k < 0) {
k = 0;
}
this.buf.position((int) (this.buf.position() + k));
return k;
} | 3.68 |
dubbo_WrappedChannelHandler_getPreferredExecutorService | /**
* Currently, this method is mainly customized to facilitate the thread model on consumer side.
* 1. Use ThreadlessExecutor, aka., delegate callback directly to the thread initiating the call.
* 2. Use shared executor to execute the callback.
*
* @param msg
* @return
*/
public ExecutorService getPreferredExecutorService(Object msg) {
if (msg instanceof Response) {
Response response = (Response) msg;
DefaultFuture responseFuture = DefaultFuture.getFuture(response.getId());
// a typical scenario is the response returned after timeout, the timeout response may have completed the
// future
if (responseFuture == null) {
return getSharedExecutorService();
} else {
ExecutorService executor = responseFuture.getExecutor();
if (executor == null || executor.isShutdown()) {
executor = getSharedExecutorService(msg);
}
return executor;
}
} else {
return getSharedExecutorService(msg);
}
} | 3.68 |
hbase_TableSplit_getEncodedRegionName | /**
* Returns the region's encoded name.
* @return The region's encoded name.
*/
public String getEncodedRegionName() {
return encodedRegionName;
} | 3.68 |
framework_AtmospherePushConnection_onConnect | /**
* Called whenever a server push connection is established (or
* re-established).
*
* @param response
*
* @since 7.2
*/
protected void onConnect(AtmosphereResponse response) {
transport = response.getTransport();
switch (state) {
case CONNECT_PENDING:
state = State.CONNECTED;
getConnectionStateHandler().pushOk(this);
break;
case DISCONNECT_PENDING:
// Set state to connected to make disconnect close the connection
state = State.CONNECTED;
assert pendingDisconnectCommand != null;
disconnect(pendingDisconnectCommand);
break;
case CONNECTED:
// IE likes to open the same connection multiple times, just ignore
break;
default:
throw new IllegalStateException(
"Got onOpen event when conncetion state is " + state
+ ". This should never happen.");
}
} | 3.68 |
hudi_SparkHoodieHBaseIndex_close | /**
* Ensure that any resources used for indexing are released here.
*/
@Override
public void close() {
LOG.info("No resources to release from Hbase index");
} | 3.68 |
hudi_HoodieTableMetadataUtil_getRollbackedCommits | /**
* Returns a list of commits which were rolled back as part of a Rollback or Restore operation.
*
* @param instant The Rollback operation to read
* @param timeline instant of timeline from dataset.
*/
private static List<String> getRollbackedCommits(HoodieInstant instant, HoodieActiveTimeline timeline) {
try {
List<String> commitsToRollback;
if (instant.getAction().equals(HoodieTimeline.ROLLBACK_ACTION)) {
try {
HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.deserializeHoodieRollbackMetadata(
timeline.getInstantDetails(instant).get());
commitsToRollback = rollbackMetadata.getCommitsRollback();
} catch (IOException e) {
// if file is empty, fetch the commits to rollback from rollback.requested file
HoodieRollbackPlan rollbackPlan = TimelineMetadataUtils.deserializeAvroMetadata(
timeline.readRollbackInfoAsBytes(new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION,
instant.getTimestamp())).get(), HoodieRollbackPlan.class);
commitsToRollback = Collections.singletonList(rollbackPlan.getInstantToRollback().getCommitTime());
LOG.warn("Had to fetch rollback info from requested instant since completed file is empty " + instant.toString());
}
return commitsToRollback;
}
List<String> rollbackedCommits = new LinkedList<>();
if (instant.getAction().equals(HoodieTimeline.RESTORE_ACTION)) {
// Restore is made up of several rollbacks
HoodieRestoreMetadata restoreMetadata = TimelineMetadataUtils.deserializeHoodieRestoreMetadata(
timeline.getInstantDetails(instant).get());
restoreMetadata.getHoodieRestoreMetadata().values().forEach(rms -> {
rms.forEach(rm -> rollbackedCommits.addAll(rm.getCommitsRollback()));
});
}
return rollbackedCommits;
} catch (IOException e) {
throw new HoodieMetadataException("Error retrieving rollback commits for instant " + instant, e);
}
} | 3.68 |
framework_FieldGroup_getPropertyId | /**
* Returns the property id that is bound to the given field.
*
* @param field
* The field to use to lookup the property id
* @return The property id that is bound to the field or null if the field
* is not bound to any property id by this FieldBinder
*/
public Object getPropertyId(Field<?> field) {
return fieldToPropertyId.get(field);
} | 3.68 |
pulsar_FunctionMetaDataManager_initialize | /**
* Initializes the FunctionMetaDataManager.
* We create a new reader
*/
public synchronized void initialize() {
try (Reader reader = FunctionMetaDataTopicTailer.createReader(
workerConfig, pulsarClient.newReader(), MessageId.earliest)) {
// read all existing messages
while (reader.hasMessageAvailable()) {
processMetaDataTopicMessage(reader.readNext());
}
this.isInitialized.complete(null);
} catch (Exception e) {
log.error("Failed to initialize meta data store", e);
throw new RuntimeException("Failed to initialize Metadata Manager", e);
}
log.info("FunctionMetaData Manager initialization complete");
} | 3.68 |
flink_HiveParserExpressionWalker_walk | /** walk the current operator and its descendants. */
protected void walk(Node nd) throws SemanticException {
// Push the node in the stack
opStack.push(nd);
// While there are still nodes to dispatch...
while (!opStack.empty()) {
Node node = opStack.peek();
if (node.getChildren() == null || getDispatchedList().containsAll(node.getChildren())) {
// Dispatch current node
if (!getDispatchedList().contains(node)) {
dispatch(node, opStack);
opQueue.add(node);
}
opStack.pop();
continue;
}
// Add a single child and restart the loop
for (Node childNode : node.getChildren()) {
if (!getDispatchedList().contains(childNode)) {
if (shouldByPass(childNode, node)) {
retMap.put(childNode, null);
} else {
opStack.push(childNode);
}
break;
}
}
} // end while
} | 3.68 |
hbase_KeyStoreFileType_fromFilename | /**
* Detects the type of KeyStore / TrustStore file from the file extension. If the file name ends
* with ".jks", returns <code>StoreFileType.JKS</code>. If the file name ends with ".pem", returns
* <code>StoreFileType.PEM</code>. If the file name ends with ".p12", returns
* <code>StoreFileType.PKCS12</code>. If the file name ends with ".bckfs", returns
* <code>StoreFileType.BCKFS</code>. Otherwise, throws an IllegalArgumentException.
* @param filename the filename of the key store or trust store file.
* @return a KeyStoreFileType.
* @throws IllegalArgumentException if the filename does not end with ".jks", ".pem", "p12" or
* "bcfks".
*/
public static KeyStoreFileType fromFilename(String filename) {
int i = filename.lastIndexOf('.');
if (i >= 0) {
String extension = filename.substring(i);
for (KeyStoreFileType storeFileType : KeyStoreFileType.values()) {
if (storeFileType.getDefaultFileExtension().equals(extension)) {
return storeFileType;
}
}
}
throw new IllegalArgumentException(
"Unable to auto-detect store file type from file name: " + filename);
} | 3.68 |
framework_TouchScrollDelegate_getTimeStamp | /**
* Long calculation are not very efficient in GWT, so this helper method
* returns timestamp in double.
*
* @return
*/
public static double getTimeStamp() {
return Duration.currentTimeMillis();
} | 3.68 |
hbase_AggregateImplementation_getStd | /**
* Gives a Pair with first object a List containing Sum and sum of squares, and the second object
* as row count. It is computed for a given combination of column qualifier and column family in
* the given row range as defined in the Scan object. In its current implementation, it takes one
* column family and one column qualifier (if provided). The idea is get the value of variance
* first: the average of the squares less the square of the average a standard deviation is square
* root of variance.
*/
@Override
public void getStd(RpcController controller, AggregateRequest request,
RpcCallback<AggregateResponse> done) {
InternalScanner scanner = null;
AggregateResponse response = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null, sumSqVal = null, tempVal = null;
long rowCountVal = 0L;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] qualifier = null;
if (qualifiers != null && !qualifiers.isEmpty()) {
qualifier = qualifiers.pollFirst();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
tempVal = null;
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
tempVal =
ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i))));
}
results.clear();
sumVal = ci.add(sumVal, tempVal);
sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
rowCountVal++;
} while (hasMoreRows);
if (sumVal != null) {
ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(first_sumVal);
pair.addFirstPart(first_sumSqVal);
ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
bb.rewind();
pair.setSecondPart(ByteString.copyFrom(bb));
response = pair.build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_helpSetNodeStatus | /** Set node status to the given new status, and return old status. */
private NodeStatus helpSetNodeStatus(long node, NodeStatus newStatus) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
NodeStatus oldStatus = SkipListUtils.getNodeStatus(segment, offsetInSegment);
if (oldStatus != newStatus) {
int level = SkipListUtils.getLevel(segment, offsetInSegment);
SkipListUtils.putLevelAndNodeStatus(segment, offsetInSegment, level, newStatus);
}
return oldStatus;
} | 3.68 |
querydsl_PropertyAccessInvocationHandler_intercept | //CHECKSTYLE:OFF
@Override
public Object intercept(Object proxy, Method method, Object[] args, MethodProxy methodProxy) throws Throwable {
//CHECKSTYLE:ON
Object rv = null;
MethodType methodType = MethodType.get(method);
if (methodType == MethodType.GETTER) {
String ptyName = propertyNameForGetter(method);
Class<?> ptyClass = method.getReturnType();
Type genericType = method.getGenericReturnType();
if (propToObj.containsKey(ptyName)) {
rv = propToObj.get(ptyName);
} else {
PathMetadata pm = createPropertyPath((Path<?>) hostExpression, ptyName);
rv = newInstance(ptyClass, genericType, proxy, ptyName, pm);
}
aliasFactory.setCurrent(propToExpr.get(ptyName));
} else if (methodType == MethodType.SCALA_GETTER) {
String ptyName = method.getName();
Class<?> ptyClass = method.getReturnType();
Type genericType = method.getGenericReturnType();
if (propToObj.containsKey(ptyName)) {
rv = propToObj.get(ptyName);
} else {
PathMetadata pm = createPropertyPath((Path<?>) hostExpression, ptyName);
rv = newInstance(ptyClass, genericType, proxy, ptyName, pm);
}
aliasFactory.setCurrent(propToExpr.get(ptyName));
} else if (methodType == MethodType.LIST_ACCESS || methodType == MethodType.SCALA_LIST_ACCESS) {
// TODO : manage cases where the argument is based on a property invocation
Object propKey = Arrays.asList(MethodType.LIST_ACCESS, args[0]);
if (propToObj.containsKey(propKey)) {
rv = propToObj.get(propKey);
} else {
PathMetadata pm = createListAccessPath((Path<?>) hostExpression, (Integer) args[0]);
Class<?> elementType = ((ParameterizedExpression<?>) hostExpression).getParameter(0);
rv = newInstance(elementType, elementType, proxy, propKey, pm);
}
aliasFactory.setCurrent(propToExpr.get(propKey));
} else if (methodType == MethodType.MAP_ACCESS || methodType == MethodType.SCALA_MAP_ACCESS) {
Object propKey = Arrays.asList(MethodType.MAP_ACCESS, args[0]);
if (propToObj.containsKey(propKey)) {
rv = propToObj.get(propKey);
} else {
PathMetadata pm = createMapAccessPath((Path<?>) hostExpression, args[0]);
Class<?> valueType = ((ParameterizedExpression<?>) hostExpression).getParameter(1);
rv = newInstance(valueType, valueType, proxy, propKey, pm);
}
aliasFactory.setCurrent(propToExpr.get(propKey));
} else if (methodType == MethodType.TO_STRING) {
rv = hostExpression.toString();
} else if (methodType == MethodType.HASH_CODE) {
rv = hostExpression.hashCode();
} else if (methodType == MethodType.GET_MAPPED_PATH) {
rv = hostExpression;
} else {
throw new IllegalArgumentException(
"Invocation of " + method.getName() +
" with types " + Arrays.asList(method.getParameterTypes()) + " not supported");
}
return rv;
} | 3.68 |
flink_ExecutionConfig_setClosureCleanerLevel | /**
* Configures the closure cleaner. Please see {@link ClosureCleanerLevel} for details on the
* different settings.
*/
public ExecutionConfig setClosureCleanerLevel(ClosureCleanerLevel level) {
configuration.set(PipelineOptions.CLOSURE_CLEANER_LEVEL, level);
return this;
} | 3.68 |
hbase_BalanceResponse_setMovesCalculated | /**
* Set how many moves were calculated by the balancer. This will be zero if the cluster is
* already balanced.
* @param movesCalculated moves calculated by the balance run
*/
public Builder setMovesCalculated(int movesCalculated) {
this.movesCalculated = movesCalculated;
return this;
} | 3.68 |
hbase_Log4jUtils_enableDebug | /**
* Switches the logger for the given class to DEBUG level.
* @param clazz The class for which to switch to debug logging.
*/
public static void enableDebug(Class<?> clazz) {
setLogLevel(clazz.getName(), "DEBUG");
} | 3.68 |
hadoop_ArrayFile_append | /**
* Append a value to the file.
* @param value value.
* @throws IOException raised on errors performing I/O.
*/
public synchronized void append(Writable value) throws IOException {
super.append(count, value); // add to map
count.set(count.get()+1); // increment count
} | 3.68 |
hudi_HiveSyncTool_syncPartitions | /**
* Syncs added, updated, and dropped partitions to the metastore.
*
* @param tableName The table name in the metastore.
* @param partitionEventList The partition change event list.
* @return {@code true} if one or more partition(s) are changed in the metastore;
* {@code false} otherwise.
*/
private boolean syncPartitions(String tableName, List<PartitionEvent> partitionEventList) {
List<String> newPartitions = filterPartitions(partitionEventList, PartitionEventType.ADD);
if (!newPartitions.isEmpty()) {
LOG.info("New Partitions " + newPartitions);
syncClient.addPartitionsToTable(tableName, newPartitions);
}
List<String> updatePartitions = filterPartitions(partitionEventList, PartitionEventType.UPDATE);
if (!updatePartitions.isEmpty()) {
LOG.info("Changed Partitions " + updatePartitions);
syncClient.updatePartitionsToTable(tableName, updatePartitions);
}
List<String> dropPartitions = filterPartitions(partitionEventList, PartitionEventType.DROP);
if (!dropPartitions.isEmpty()) {
LOG.info("Drop Partitions " + dropPartitions);
syncClient.dropPartitions(tableName, dropPartitions);
}
return !updatePartitions.isEmpty() || !newPartitions.isEmpty() || !dropPartitions.isEmpty();
} | 3.68 |
streampipes_TextDocument_setTitle | /**
* Updates the "main" title for this document.
*
* @param title
*/
public void setTitle(final String title) {
this.title = title;
} | 3.68 |
hbase_ServerRegionReplicaUtil_getStoreFileInfo | /**
* Returns a StoreFileInfo from the given FileStatus. Secondary replicas refer to the files of the
* primary region, so an HFileLink is used to construct the StoreFileInfo. This way ensures that
* the secondary will be able to continue reading the store files even if they are moved to
* archive after compaction
*/
public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs,
RegionInfo regionInfo, RegionInfo regionInfoForFs, String familyName, Path path)
throws IOException {
// if this is a primary region, just return the StoreFileInfo constructed from path
if (RegionInfo.COMPARATOR.compare(regionInfo, regionInfoForFs) == 0) {
return new StoreFileInfo(conf, fs, path, true);
}
// else create a store file link. The link file does not exists on filesystem though.
if (HFileLink.isHFileLink(path) || StoreFileInfo.isHFile(path)) {
HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(),
regionInfoForFs.getEncodedName(), familyName, path.getName());
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link);
} else if (StoreFileInfo.isReference(path)) {
Reference reference = Reference.read(fs, path);
Path referencePath = StoreFileInfo.getReferredToFile(path);
if (HFileLink.isHFileLink(referencePath)) {
// HFileLink Reference
HFileLink link = HFileLink.buildFromHFileLinkPattern(conf, referencePath);
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, link);
} else {
// Reference
HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(),
regionInfoForFs.getEncodedName(), familyName, path.getName());
return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference);
}
} else {
throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
}
} | 3.68 |
flink_OptimizedPlan_getJobName | /**
* Returns the name of the program.
*
* @return The name of the program.
*/
public String getJobName() {
return this.jobName;
} | 3.68 |
hadoop_SystemErasureCodingPolicies_getPolicies | /**
* Get system defined policies.
* @return system policies
*/
public static List<ErasureCodingPolicy> getPolicies() {
return SYS_POLICIES;
} | 3.68 |
graphhopper_Path_calcNodes | /**
* @return the uncached node indices of the tower nodes in this path.
*/
public IntIndexedContainer calcNodes() {
final IntArrayList nodes = new IntArrayList(edgeIds.size() + 1);
if (edgeIds.isEmpty()) {
if (isFound()) {
nodes.add(endNode);
}
return nodes;
}
int tmpNode = getFromNode();
nodes.add(tmpNode);
forEveryEdge(new EdgeVisitor() {
@Override
public void next(EdgeIteratorState eb, int index, int prevEdgeId) {
nodes.add(eb.getAdjNode());
}
@Override
public void finish() {
}
});
return nodes;
} | 3.68 |
framework_VaadinPortletRequest_getPortletRequest | /**
* Gets the original, unwrapped portlet request.
*
* @return the unwrapped portlet request
*/
public PortletRequest getPortletRequest() {
return getRequest();
} | 3.68 |
hbase_HBaseRpcController_hasRegionInfo | /** Returns True if this Controller is carrying the RPC target Region's RegionInfo. */
default boolean hasRegionInfo() {
return false;
} | 3.68 |
hmily_XaResourcePool_removeAll | /**
* Remove all.
*
* @param globalId the global id
*/
public void removeAll(final String globalId) {
Set<Xid> xids = this.xids.get(globalId);
if (xids != null) {
for (final Xid xid : xids) {
removeResource(xid);
}
this.xids.remove(globalId);
}
} | 3.68 |
framework_EventCellReference_set | /**
* Configures this CellReference and its internal RowReference to point to
* the given Cell.
*
* @param targetCell
* the cell to point to
* @param section
* the section the cell belongs to
*/
public void set(Cell targetCell, Section section) {
Grid<T> grid = getGrid();
int columnIndexDOM = targetCell.getColumn();
Column<?, T> column = null;
if (columnIndexDOM >= 0
&& columnIndexDOM < grid.getVisibleColumns().size()) {
column = grid.getVisibleColumns().get(columnIndexDOM);
}
int row = targetCell.getRow();
// Row objects only make sense for body section of Grid.
T rowObject;
if (section == Section.BODY && row >= 0
&& row < grid.getDataSource().size()) {
rowObject = grid.getDataSource().getRow(row);
} else {
rowObject = null;
}
// At least for now we don't need to have the actual TableRowElement
// available.
getRowReference().set(row, rowObject, null);
int columnIndex = grid.getColumns().indexOf(column);
set(columnIndexDOM, columnIndex, column);
this.element = targetCell.getElement();
this.section = section;
} | 3.68 |
flink_CsvReader_lineDelimiter | /**
* Configures the delimiter that separates the lines/rows. The linebreak character ({@code
* '\n'}) is used by default.
*
* @param delimiter The delimiter that separates the rows.
* @return The CSV reader instance itself, to allow for fluent function chaining.
*/
public CsvReader lineDelimiter(String delimiter) {
if (delimiter == null || delimiter.length() == 0) {
throw new IllegalArgumentException("The delimiter must not be null or an empty string");
}
this.lineDelimiter = delimiter;
return this;
} | 3.68 |
hmily_HmilyTacParticipantCoordinator_rollbackParticipant | /**
* Rollback participant.
*
* @param hmilyParticipantList the hmily participant list
* @param selfParticipantId the self participant id
*/
public void rollbackParticipant(final List<HmilyParticipant> hmilyParticipantList, final Long selfParticipantId) {
if (CollectionUtils.isEmpty(hmilyParticipantList)) {
return;
}
log.debug("TAC-participate-rollback ::: {}", hmilyParticipantList);
for (HmilyParticipant participant : hmilyParticipantList) {
try {
if (participant.getParticipantId().equals(selfParticipantId)) {
HmilyTacLocalParticipantExecutor.cancel(participant);
} else {
HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.RPC, participant);
}
} catch (Throwable e) {
log.error("HmilyParticipant rollback exception :{} ", participant.toString());
throw new HmilyRuntimeException(" hmilyParticipant execute rollback exception:" + participant.toString());
} finally {
// FIXME why remove context after first participator handled
HmilyContextHolder.remove();
}
}
} | 3.68 |
framework_Navigator_setErrorProvider | /**
* Registers a view provider that is queried for a view when no other view
* matches the navigation state. An error view provider should match any
* navigation state, but could return different views for different states.
* Its <code>getViewName(String navigationState)</code> should return
* <code>navigationState</code>.
*
* @param provider
*/
public void setErrorProvider(ViewProvider provider) {
errorProvider = provider;
} | 3.68 |
hadoop_S3ClientFactory_withHeader | /**
* Add a custom header.
* @param header header name
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withHeader(
String header, String value) {
headers.put(header, value);
return this;
} | 3.68 |
framework_DateField_getType | /*
* Gets the edited property's type. Don't add a JavaDoc comment here, we use
* the default documentation from implemented interface.
*/
@Override
public Class<Date> getType() {
return Date.class;
} | 3.68 |
hmily_HmilyXaRecoveryImpl_convert | /**
* Convert hmily xa recovery.
*
* @param <T> the type parameter
* @param t the t
* @return the hmily xa recovery
*/
public static <T extends HmilyXaRecovery> HmilyXaRecovery convert(final T t) {
HmilyXaRecoveryImpl impl = new HmilyXaRecoveryImpl();
impl.setCreateTime(t.getCreateTime());
impl.setUpdateTime(t.getUpdateTime());
impl.setBranchId(t.getBranchId());
impl.setEndBxid(t.getEndBxid());
impl.setEndXid(t.getEndXid());
impl.setGlobalId(t.getGlobalId());
impl.setIsCoordinator(t.getIsCoordinator());
impl.setState(t.getState());
impl.setSuperId(t.getSuperId());
impl.setTmUnique(t.getTmUnique());
impl.setUrl(t.getUrl());
impl.setVersion(t.getVersion());
return impl;
} | 3.68 |
zxing_ModulusPoly_getCoefficient | /**
* @return coefficient of x^degree term in this polynomial
*/
int getCoefficient(int degree) {
return coefficients[coefficients.length - 1 - degree];
} | 3.68 |
framework_VCssLayout_addOrMove | /**
* For internal use only. May be removed or replaced in the future.
*/
public void addOrMove(Widget child, int index) {
Profiler.enter("VCssLayout.addOrMove");
if (child.getParent() == this) {
Profiler.enter("VCssLayout.addOrMove getWidgetIndex");
int currentIndex = getWidgetIndex(child);
Profiler.leave("VCssLayout.addOrMove getWidgetIndex");
if (index == currentIndex) {
Profiler.leave("VCssLayout.addOrMove");
return;
}
} else if (index == getWidgetCount()) {
// optimized path for appending components - faster especially for
// initial rendering
Profiler.enter("VCssLayout.addOrMove add");
add(child);
Profiler.leave("VCssLayout.addOrMove add");
Profiler.leave("VCssLayout.addOrMove");
return;
}
Profiler.enter("VCssLayout.addOrMove insert");
insert(child, index);
Profiler.leave("VCssLayout.addOrMove insert");
Profiler.leave("VCssLayout.addOrMove");
} | 3.68 |
dubbo_HashedWheelTimer_start | /**
* Starts the background thread explicitly. The background thread will
* start automatically on demand even if you did not call this method.
*
* @throws IllegalStateException if this timer has been
* {@linkplain #stop() stopped} already
*/
public void start() {
switch (WORKER_STATE_UPDATER.get(this)) {
case WORKER_STATE_INIT:
if (WORKER_STATE_UPDATER.compareAndSet(this, WORKER_STATE_INIT, WORKER_STATE_STARTED)) {
workerThread.start();
}
break;
case WORKER_STATE_STARTED:
break;
case WORKER_STATE_SHUTDOWN:
throw new IllegalStateException("cannot be started once stopped");
default:
throw new Error("Invalid WorkerState");
}
// Wait until the startTime is initialized by the worker.
while (startTime == 0) {
try {
startTimeInitialized.await();
} catch (InterruptedException ignore) {
// Ignore - it will be ready very soon.
}
}
} | 3.68 |
AreaShop_FileManager_getRegions | /**
* Get all regions.
* @return List of all regions (it is safe to modify the list)
*/
public List<GeneralRegion> getRegions() {
return new ArrayList<>(regions.values());
} | 3.68 |
flink_StreamNonDeterministicUpdatePlanVisitor_extractSourceMapping | /** Extracts the out from source field index mapping of the given projects. */
private Map<Integer, List<Integer>> extractSourceMapping(final List<RexNode> projects) {
Map<Integer, List<Integer>> mapOutFromInPos = new HashMap<>();
for (int index = 0; index < projects.size(); index++) {
RexNode expr = projects.get(index);
mapOutFromInPos.put(
index,
FlinkRexUtil.findAllInputRefs(expr).stream()
.mapToInt(RexSlot::getIndex)
.boxed()
.collect(Collectors.toList()));
}
return mapOutFromInPos;
} | 3.68 |
hadoop_WriteOperationHelper_abortMultipartCommit | /**
* Abort a multipart commit operation.
* @param destKey destination key of ongoing operation
* @param uploadId multipart operation Id
* @throws IOException on problems.
* @throws FileNotFoundException if the abort ID is unknown
*/
@Override
@Retries.RetryTranslated
public void abortMultipartCommit(String destKey, String uploadId)
throws IOException {
abortMultipartUpload(destKey, uploadId, true, invoker.getRetryCallback());
} | 3.68 |
flink_HiveParserExpressionWalker_shouldByPass | /**
* We should bypass subquery since we have already processed and created logical plan (in
* genLogicalPlan) for subquery at this point. SubQueryExprProcessor will use generated plan and
* creates appropriate ExprNodeSubQueryDesc.
*/
private boolean shouldByPass(Node childNode, Node parentNode) {
if (parentNode instanceof HiveParserASTNode
&& ((HiveParserASTNode) parentNode).getType() == HiveASTParser.TOK_SUBQUERY_EXPR) {
HiveParserASTNode parentOp = (HiveParserASTNode) parentNode;
// subquery either in WHERE <LHS> IN <SUBQUERY> form OR WHERE EXISTS <SUBQUERY> form
// in first case LHS should not be bypassed
assert (parentOp.getChildCount() == 2 || parentOp.getChildCount() == 3);
return parentOp.getChildCount() != 3 || childNode != parentOp.getChild(2);
}
return false;
} | 3.68 |
pulsar_TxnBatchedPositionImpl_compareTo | /**
* It's exactly the same as {@link PositionImpl},to make sure that when compare to the "markDeletePosition", it
* looks like {@link PositionImpl}. {@link #batchSize} and {@link #batchIndex} should not be involved in calculate,
* just like {@link PositionImpl#ackSet} is not involved in calculate.
* Note: In {@link java.util.concurrent.ConcurrentSkipListMap}, it use the {@link Comparable#compareTo(Object)} to
* determine whether the keys are the same. In {@link java.util.HashMap}, it use the
* {@link Object#hashCode()} & {@link Object#equals(Object)} to determine whether the keys are the same.
*/
public int compareTo(PositionImpl that) {
return super.compareTo(that);
} | 3.68 |
hadoop_PlacementConstraintManagerService_getValidSourceTag | /**
* This method will return a single allocation tag. It should be called after
* validating the tags by calling {@link #validateSourceTags}.
*
* @param sourceTags the source allocation tags
* @return the single source tag
*/
protected String getValidSourceTag(Set<String> sourceTags) {
return sourceTags.iterator().next();
} | 3.68 |
framework_Upload_removeStartedListener | /**
* Removes the upload started event listener.
*
* @param listener
* the Listener to be removed.
*/
@Deprecated
public void removeStartedListener(StartedListener listener) {
removeListener(StartedEvent.class, listener, UPLOAD_STARTED_METHOD);
} | 3.68 |
zxing_BitMatrix_get | /**
* <p>Gets the requested bit, where true means black.</p>
*
* @param x The horizontal component (i.e. which column)
* @param y The vertical component (i.e. which row)
* @return value of given bit in matrix
*/
public boolean get(int x, int y) {
int offset = y * rowSize + (x / 32);
return ((bits[offset] >>> (x & 0x1f)) & 1) != 0;
} | 3.68 |
flink_TableFunctionProvider_of | /** Helper method for creating a static provider. */
static <T> TableFunctionProvider<T> of(TableFunction<T> tableFunction) {
return () -> tableFunction;
} | 3.68 |
flink_FieldParser_getParserForType | /**
* Gets the parser for the type specified by the given class. Returns null, if no parser for
* that class is known.
*
* @param type The class of the type to get the parser for.
* @return The parser for the given type, or null, if no such parser exists.
*/
public static <T> Class<FieldParser<T>> getParserForType(Class<T> type) {
Class<? extends FieldParser<?>> parser = PARSERS.get(type);
if (parser == null) {
return null;
} else {
@SuppressWarnings("unchecked")
Class<FieldParser<T>> typedParser = (Class<FieldParser<T>>) parser;
return typedParser;
}
} | 3.68 |
flink_StateTable_size | /**
* Returns the total number of entries in this {@link StateTable}. This is the sum of both
* sub-tables.
*
* @return the number of entries in this {@link StateTable}.
*/
public int size() {
int count = 0;
for (StateMap<K, N, S> stateMap : keyGroupedStateMaps) {
count += stateMap.size();
}
return count;
} | 3.68 |
zxing_BitArray_reverse | /**
* Reverses all bits in the array.
*/
public void reverse() {
int[] newBits = new int[bits.length];
// reverse all int's first
int len = (size - 1) / 32;
int oldBitsLen = len + 1;
for (int i = 0; i < oldBitsLen; i++) {
newBits[len - i] = Integer.reverse(bits[i]);
}
// now correct the int's if the bit size isn't a multiple of 32
if (size != oldBitsLen * 32) {
int leftOffset = oldBitsLen * 32 - size;
int currentInt = newBits[0] >>> leftOffset;
for (int i = 1; i < oldBitsLen; i++) {
int nextInt = newBits[i];
currentInt |= nextInt << (32 - leftOffset);
newBits[i - 1] = currentInt;
currentInt = nextInt >>> leftOffset;
}
newBits[oldBitsLen - 1] = currentInt;
}
bits = newBits;
} | 3.68 |
hadoop_FederationStateStoreFacade_getReservationHomeSubCluster | /**
* Returns the home {@link SubClusterId} for the specified {@link ReservationId}.
*
* @param reservationId the identifier of the reservation
* @return the home subCluster identifier
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterId getReservationHomeSubCluster(ReservationId reservationId)
throws YarnException {
GetReservationHomeSubClusterResponse response = stateStore.getReservationHomeSubCluster(
GetReservationHomeSubClusterRequest.newInstance(reservationId));
return response.getReservationHomeSubCluster().getHomeSubCluster();
} | 3.68 |
rocketmq-connect_IdentifierRules_leadingQuoteString | /**
* Get the string used as a leading quote.
*
* @return the leading quote string; never null
*/
public String leadingQuoteString() {
return leadingQuoteString;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.