name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Predicates_arePublicStaticFinalOfType | /**
* Tests that the field is {@code public static final} and has the fully qualified type name of
* {@code fqClassName}.
*/
public static DescribedPredicate<JavaField> arePublicStaticFinalOfType(String fqClassName) {
return arePublicStaticOfType(fqClassName).and(isFinal());
} | 3.68 |
hadoop_RLESparseResourceAllocation_toIntervalMap | /**
* Returns the representation of the current resources allocated over time as
* an interval map (in the defined non-null range).
*
* @return the representation of the current resources allocated over time as
* an interval map.
*/
public Map<ReservationInterval, Resource> toIntervalMap() {
readLock.lock();
try {
Map<ReservationInterval, Resource> allocations =
new TreeMap<ReservationInterval, Resource>();
// Empty
if (isEmpty()) {
return allocations;
}
Map.Entry<Long, Resource> lastEntry = null;
for (Map.Entry<Long, Resource> entry : cumulativeCapacity.entrySet()) {
if (lastEntry != null && entry.getValue() != null) {
ReservationInterval interval =
new ReservationInterval(lastEntry.getKey(), entry.getKey());
Resource resource = lastEntry.getValue();
allocations.put(interval, resource);
}
lastEntry = entry;
}
return allocations;
} finally {
readLock.unlock();
}
} | 3.68 |
framework_Margins_setMarginTop | /**
* Sets the height of the top margin.
*
* @param marginTop
* the top margin to set (in pixels)
*/
public void setMarginTop(int marginTop) {
this.marginTop = marginTop;
updateVertical();
} | 3.68 |
flink_HiveParserSemanticAnalyzer_doPhase1GetAllAggregations | // DFS-scan the expressionTree to find all aggregation subtrees and put them in aggregations.
private void doPhase1GetAllAggregations(
HiveParserASTNode expressionTree,
HashMap<String, HiveParserASTNode> aggregations,
List<HiveParserASTNode> wdwFns)
throws SemanticException {
int exprTokenType = expressionTree.getToken().getType();
if (exprTokenType == HiveASTParser.TOK_SUBQUERY_EXPR) {
// since now we have scalar subqueries we can get subquery expression in having
// we don't want to include aggregate from within subquery
return;
}
if (exprTokenType == HiveASTParser.TOK_FUNCTION
|| exprTokenType == HiveASTParser.TOK_FUNCTIONDI
|| exprTokenType == HiveASTParser.TOK_FUNCTIONSTAR) {
assert (expressionTree.getChildCount() != 0);
if (expressionTree.getChild(expressionTree.getChildCount() - 1).getType()
== HiveASTParser.TOK_WINDOWSPEC) {
// If it is a windowing spec, we include it in the list
// Further, we will examine its children AST nodes to check whether there are
// aggregation functions within
wdwFns.add(expressionTree);
doPhase1GetAllAggregations(
(HiveParserASTNode)
expressionTree.getChild(expressionTree.getChildCount() - 1),
aggregations,
wdwFns);
return;
}
if (expressionTree.getChild(0).getType() == HiveASTParser.Identifier) {
String functionName = unescapeIdentifier(expressionTree.getChild(0).getText());
SqlOperator sqlOperator =
HiveParserUtils.getAnySqlOperator(
functionName, frameworkConfig.getOperatorTable());
if (sqlOperator == null) {
throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(functionName));
}
if (FunctionRegistry.impliesOrder(functionName)) {
throw new SemanticException(ErrorMsg.MISSING_OVER_CLAUSE.getMsg(functionName));
}
if (HiveParserUtils.isUDAF(sqlOperator)) {
if (containsLeadLagUDF(expressionTree)) {
throw new SemanticException(
ErrorMsg.MISSING_OVER_CLAUSE.getMsg(functionName));
}
aggregations.put(expressionTree.toStringTree(), expressionTree);
if (!HiveParserUtils.isNative(sqlOperator)) {
unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) expressionTree.getChild(0));
}
return;
}
}
}
for (int i = 0; i < expressionTree.getChildCount(); i++) {
doPhase1GetAllAggregations(
(HiveParserASTNode) expressionTree.getChild(i), aggregations, wdwFns);
}
} | 3.68 |
dubbo_ServiceInstanceMetadataUtils_getEndpoint | /**
* Get the property value of port by the specified {@link ServiceInstance#getMetadata() the metadata of
* service instance} and protocol
*
* @param serviceInstance {@link ServiceInstance service instance}
* @param protocol the name of protocol, e.g, dubbo, rest, and so on
* @return if not found, return <code>null</code>
*/
public static Endpoint getEndpoint(ServiceInstance serviceInstance, String protocol) {
List<Endpoint> endpoints = ((DefaultServiceInstance) serviceInstance).getEndpoints();
if (endpoints != null) {
for (Endpoint endpoint : endpoints) {
if (endpoint.getProtocol().equals(protocol)) {
return endpoint;
}
}
}
return null;
} | 3.68 |
morf_DatabaseExceptionHelper_isCausedByTimeoutException | /**
* <p>Checks if the throwable was caused by timeout exception.</p>
* <b>This method has been tested for Oracle and MySQL only and might not work
* for other DB engines.</b>
*
* @param throwable to check
* @return true if the throwable is caused by a timeout, false otherwise
*/
public boolean isCausedByTimeoutException(Throwable throwable) {
// Valid test for Oracle timeout exception and some (not all!) MySQL
// exceptions.
if (ExceptionUtils.indexOfType(throwable, SQLTimeoutException.class) != -1) {
return true;
}
// MySQL database has two timeout exceptions in two packages. One of them
// doesn't extend SQLTimeoutException but only SQLException. It is therefore
// necessary to do ugly name check...
for (Throwable causeThrowable : ExceptionUtils.getThrowables(throwable)) {
if (MYSQL_TIMEOUT_EXCEPTION_NAME.equals(causeThrowable.getClass().getSimpleName())) {
return true;
}
}
return false;
} | 3.68 |
zxing_CalendarParsedResult_isStartAllDay | /**
* @return true if start time was specified as a whole day
*/
public boolean isStartAllDay() {
return startAllDay;
} | 3.68 |
flink_FromJarEntryClassInformationProvider_getJobClassName | /**
* Returns the specified job class name that is either available in the corresponding {@code
* jarFile}. It can return an empty {@code Optional} if the job class is the entry class of the
* jar.
*
* @return Returns the job class that can be found in the respective {@code jarFile}. It can
* also return an empty {@code Optional} despite if the job class is the entry class of the
* jar.
* @see #getJarFile()
*/
@Override
public Optional<String> getJobClassName() {
return Optional.ofNullable(jobClassName);
} | 3.68 |
hbase_UnsafeAccess_putInt | /**
* Put an int value out to the specified ByteBuffer offset in big-endian format.
* @param buf the ByteBuffer to write to
* @param offset offset in the ByteBuffer
* @param val int to write out
* @return incremented offset
*/
public static int putInt(ByteBuffer buf, int offset, int val) {
if (LITTLE_ENDIAN) {
val = Integer.reverseBytes(val);
}
if (buf.isDirect()) {
HBasePlatformDependent.putInt(directBufferAddress(buf) + offset, val);
} else {
HBasePlatformDependent.putInt(buf.array(),
offset + buf.arrayOffset() + BYTE_ARRAY_BASE_OFFSET, val);
}
return offset + Bytes.SIZEOF_INT;
} | 3.68 |
hbase_RegionStates_addToOfflineRegions | // ==========================================================================
// Region offline helpers
// ==========================================================================
// TODO: Populated when we read meta but regions never make it out of here.
public void addToOfflineRegions(final RegionStateNode regionNode) {
LOG.info("Added to offline, CURRENTLY NEVER CLEARED!!! " + regionNode);
regionOffline.put(regionNode.getRegionInfo(), regionNode);
} | 3.68 |
framework_VaadinSession_removeRequestHandler | /**
* Removes a request handler from the session.
*
* @param handler
* the request handler to remove
*
* @since 7.0
*/
public void removeRequestHandler(RequestHandler handler) {
assert hasLock();
requestHandlers.remove(handler);
} | 3.68 |
hadoop_SinglePendingCommit_putExtraData | /**
* Set/Update an extra data entry.
* @param key key
* @param value value
*/
public void putExtraData(String key, String value) {
extraData.put(key, value);
} | 3.68 |
hbase_ServerManager_removeRegion | /**
* Called by delete table and similar to notify the ServerManager that a region was removed.
*/
public void removeRegion(final RegionInfo regionInfo) {
final byte[] encodedName = regionInfo.getEncodedNameAsBytes();
storeFlushedSequenceIdsByRegion.remove(encodedName);
flushedSequenceIdByRegion.remove(encodedName);
} | 3.68 |
framework_TouchScrollDelegate_readPositionAndSpeed | /**
*
* @param event
* @return
*/
private boolean readPositionAndSpeed(NativeEvent event) {
Touch touch = event.getChangedTouches().get(0);
lastClientY = touch.getClientY();
int eventIndx = nextEvent++;
eventIndx = eventIndx % EVENTS_FOR_SPEED_CALC;
eventTimeStamps[eventIndx] = getTimeStamp();
yPositions[eventIndx] = lastClientY;
return isMovedSignificantly();
} | 3.68 |
flink_CompactingHashTable_assignPartition | /**
* Assigns a partition to a bucket.
*
* @param bucket bucket index
* @param numPartitions number of partitions
* @return The hash code for the integer.
*/
private static byte assignPartition(int bucket, byte numPartitions) {
return (byte) (bucket % numPartitions);
} | 3.68 |
hadoop_KMSMDCFilter_setContext | /**
* Sets the context with the given parameters.
* @param ugi the {@link UserGroupInformation} for the current request.
* @param method the http method
* @param requestURL the requested URL.
* @param remoteAddr the remote address of the client.
*/
@VisibleForTesting
public static void setContext(UserGroupInformation ugi,
String method, String requestURL, String remoteAddr) {
DATA_TL.set(new Data(ugi, method, requestURL, remoteAddr));
} | 3.68 |
hbase_ProcedureExecutor_bypassProcedure | /**
* Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will
* be ignored and it will return success, whatever. It is used to recover buggy stuck procedures,
* releasing the lock resources and letting other procedures run. Bypassing one procedure (and its
* ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region
* not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the
* operators may have to do some clean up on hdfs or schedule some assign procedures to let region
* online. DO AT YOUR OWN RISK.
* <p>
* A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING,
* WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is
* executing it 3. No child procedure has been submitted
* <p>
* If all the requirements are meet, the procedure and its ancestors will be bypassed and
* persisted to WAL.
* <p>
* If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What
* about WAITING_TIMEOUT?
* @param pids the procedure id
* @param lockWait time to wait lock
* @param force if force set to true, we will bypass the procedure even if it is executing.
* This is for procedures which can't break out during executing(due to bug,
* mostly) In this case, bypassing the procedure is not enough, since it is
* already stuck there. We need to restart the master after bypassing, and
* letting the problematic procedure to execute wth bypass=true, so in that
* condition, the procedure can be successfully bypassed.
* @param recursive We will do an expensive search for children of each pid. EXPENSIVE!
* @return true if bypass success
* @throws IOException IOException
*/
public List<Boolean> bypassProcedure(List<Long> pids, long lockWait, boolean force,
boolean recursive) throws IOException {
List<Boolean> result = new ArrayList<Boolean>(pids.size());
for (long pid : pids) {
result.add(bypassProcedure(pid, lockWait, force, recursive));
}
return result;
} | 3.68 |
hbase_HRegionFileSystem_getTempDir | // ===========================================================================
// Temp Helpers
// ===========================================================================
/** Returns {@link Path} to the region's temp directory, used for file creations */
public Path getTempDir() {
return new Path(getRegionDir(), REGION_TEMP_DIR);
} | 3.68 |
hadoop_ResourceUsage_getReserved | /*
* Reserved
*/
public Resource getReserved() {
return getReserved(NL);
} | 3.68 |
pulsar_ConsumerConfiguration_getAckTimeoutMillis | /**
* @return the configured timeout in milliseconds for unacked messages.
*/
public long getAckTimeoutMillis() {
return conf.getAckTimeoutMillis();
} | 3.68 |
hbase_StoreFileListFile_listFiles | // file sequence id to path
private NavigableMap<Long, List<Path>> listFiles() throws IOException {
FileSystem fs = ctx.getRegionFileSystem().getFileSystem();
FileStatus[] statuses;
try {
statuses = fs.listStatus(trackFileDir);
} catch (FileNotFoundException e) {
LOG.debug("Track file directory {} does not exist", trackFileDir, e);
return Collections.emptyNavigableMap();
}
if (statuses == null || statuses.length == 0) {
return Collections.emptyNavigableMap();
}
TreeMap<Long, List<Path>> map = new TreeMap<>((l1, l2) -> l2.compareTo(l1));
for (FileStatus status : statuses) {
Path file = status.getPath();
if (!status.isFile()) {
LOG.warn("Found invalid track file {}, which is not a file", file);
continue;
}
if (!TRACK_FILE_PATTERN.matcher(file.getName()).matches()) {
LOG.warn("Found invalid track file {}, skip", file);
continue;
}
List<String> parts = Splitter.on(TRACK_FILE_SEPARATOR).splitToList(file.getName());
map.computeIfAbsent(Long.parseLong(parts.get(1)), k -> new ArrayList<>()).add(file);
}
return map;
} | 3.68 |
framework_Tree_removeAllActionHandlers | /**
* Removes all action handlers.
*/
public void removeAllActionHandlers() {
actionHandlers = null;
actionMapper = null;
markAsDirty();
} | 3.68 |
druid_StatViewServlet_process | /**
* 程序首先判断是否存在jmx连接地址,如果不存在,则直接调用本地的druid服务; 如果存在,则调用远程jmx服务。在进行jmx通信,首先判断一下jmx连接是否已经建立成功,如果已经
* 建立成功,则直接进行通信,如果之前没有成功建立,则会尝试重新建立一遍。.
*
* @param url 要连接的服务地址
* @return 调用服务后返回的json字符串
*/
protected String process(String url) {
String resp = null;
if (jmxUrl == null) {
resp = statService.service(url);
} else {
if (conn == null) {
// 连接在初始化时创建失败
try {
// 尝试重新连接
initJmxConn();
} catch (IOException e) {
LOG.error("init jmx connection error", e);
resp = DruidStatService.returnJSONResult(DruidStatService.RESULT_CODE_ERROR,
"init jmx connection error" + e.getMessage());
}
if (conn != null) {
// 连接成功
try {
resp = getJmxResult(conn, url);
} catch (Exception e) {
LOG.error("get jmx data error", e);
resp = DruidStatService.returnJSONResult(DruidStatService.RESULT_CODE_ERROR, "get data error:"
+ e.getMessage());
}
}
} else {
// 连接成功
try {
resp = getJmxResult(conn, url);
} catch (Exception e) {
LOG.error("get jmx data error", e);
resp = DruidStatService.returnJSONResult(DruidStatService.RESULT_CODE_ERROR,
"get data error" + e.getMessage());
}
}
}
return resp;
} | 3.68 |
framework_Table_setColumnReorderingAllowed | /**
* Sets whether column reordering is allowed or not.
*
* @param columnReorderingAllowed
* specifies whether column reordering is allowed.
*/
public void setColumnReorderingAllowed(boolean columnReorderingAllowed) {
if (columnReorderingAllowed != this.columnReorderingAllowed) {
this.columnReorderingAllowed = columnReorderingAllowed;
markAsDirty();
}
} | 3.68 |
hadoop_Trash_moveToTrash | /** Move a file or directory to the current trash directory.
*
* @param path the path.
* @return false if the item is already in the trash or trash is disabled
* @throws IOException raised on errors performing I/O.
*/
public boolean moveToTrash(Path path) throws IOException {
return trashPolicy.moveToTrash(path);
} | 3.68 |
flink_InputGateDeploymentDescriptor_getConsumedSubpartitionIndexRange | /** Return the index range of the consumed subpartitions. */
public IndexRange getConsumedSubpartitionIndexRange() {
return consumedSubpartitionIndexRange;
} | 3.68 |
flink_ProjectOperator_projectTuple11 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>
ProjectOperator<T, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>
projectTuple11() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType =
new TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes);
return new ProjectOperator<T, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
framework_AbstractDateField_getDateStyles | /**
* Returns a map from dates to custom style names in each date's calendar
* cell.
*
* @return unmodifiable map from dates to custom style names in each date's
* calendar cell
*
* @see #setDateStyle(LocalDate, String)
* @since 8.3
*/
public Map<LocalDate, String> getDateStyles() {
HashMap<LocalDate, String> hashMap = new HashMap<>();
for (Entry<String, String> entry : getState(false).dateStyles
.entrySet()) {
hashMap.put(LocalDate.parse(entry.getKey()), entry.getValue());
}
return Collections.unmodifiableMap(hashMap);
} | 3.68 |
hadoop_CommonAuditContext_get | /**
* Get a context entry.
* @param key key
* @return value or null
*/
public String get(String key) {
Supplier<String> supplier = evaluatedEntries.get(key);
return supplier != null
? supplier.get()
: null;
} | 3.68 |
flink_StringColumnSummary_getEmptyCount | /** Number of empty strings e.g. java.lang.String.isEmpty(). */
public long getEmptyCount() {
return emptyCount;
} | 3.68 |
flink_DebeziumJsonFormatFactory_validateDecodingFormatOptions | /** Validator for debezium decoding format. */
private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
} | 3.68 |
hmily_HmilyTacParticipantCoordinator_buildHmilyParticipant | //TODO need review it with rpc.build-participant
private HmilyParticipant buildHmilyParticipant(final ProceedingJoinPoint point, final Long participantId, final Long participantRefId, final Long transId) {
HmilyParticipant hmilyParticipant = new HmilyParticipant();
if (null == participantId) {
hmilyParticipant.setParticipantId(IdWorkerUtils.getInstance().createUUID());
} else {
hmilyParticipant.setParticipantId(participantId);
}
if (null != participantRefId) {
hmilyParticipant.setParticipantRefId(participantRefId);
MethodSignature signature = (MethodSignature) point.getSignature();
Method method = signature.getMethod();
Class<?> clazz = point.getTarget().getClass();
Object[] args = point.getArgs();
HmilyInvocation hmilyInvocation = new HmilyInvocation(clazz.getInterfaces()[0], method.getName(), method.getParameterTypes(), args);
hmilyParticipant.setConfirmHmilyInvocation(hmilyInvocation);
}
hmilyParticipant.setTransId(transId);
hmilyParticipant.setTransType(TransTypeEnum.TAC.name());
hmilyParticipant.setStatus(HmilyActionEnum.PRE_TRY.getCode());
hmilyParticipant.setRole(HmilyRoleEnum.PARTICIPANT.getCode());
return hmilyParticipant;
} | 3.68 |
nifi-maven_NarDuplicateDependenciesMojo_execute | /*
* @see org.apache.maven.plugin.Mojo#execute()
*/
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
try {
NarDependencyUtils.ensureSingleNarDependencyExists(project);
// build the project for the nar artifact
final ProjectBuildingRequest narRequest = new DefaultProjectBuildingRequest();
narRequest.setRepositorySession(repoSession);
narRequest.setSystemProperties(System.getProperties());
artifactHandlerManager.addHandlers(NarDependencyUtils.createNarHandlerMap(narRequest, project, projectBuilder));
// get the dependency tree
final DependencyNode root = dependencyCollectorBuilder.collectDependencyGraph(narRequest, null);
DependencyNode narParent = root.getChildren()
.stream()
.filter(child -> NarDependencyUtils.NAR.equals(child.getArtifact().getType()))
.findFirst()
.orElseThrow(() -> new MojoExecutionException("Project does not have any NAR dependencies."));
getLog().info("Analyzing dependencies of " + narRequest.getProject().getFile().getPath());
// all compiled dependencies except inherited from parent
Map<String, List<Artifact>> directDependencies = new HashMap<>();
root.accept(new DependencyNodeVisitor() {
final Stack<Artifact> hierarchy = new Stack<>();
@Override
public boolean visit(DependencyNode node) {
if (node == root) {
return true;
}
Artifact artifact = node.getArtifact();
hierarchy.push(artifact);
if (NarDependencyUtils.COMPILE_STRING.equals(artifact.getScope()) && !NarDependencyUtils.NAR.equals(artifact.getType())) {
directDependencies.put(artifact.toString(), new ArrayList<>(hierarchy));
return true;
}
return false;
}
@Override
public boolean endVisit(DependencyNode node) {
if (node != root) {
hierarchy.pop();
}
return true;
}
});
Map<String, List<String>> errors = new HashMap<>();
narParent.accept(new DependencyNodeVisitor() {
final Stack<Artifact> hierarchy = new Stack<>();
@Override
public boolean visit(DependencyNode node) {
Artifact artifact = node.getArtifact();
hierarchy.push(artifact);
if (NarDependencyUtils.COMPILE_STRING.equals(artifact.getScope()) && directDependencies.containsKey(artifact.toString())) {
StringBuilder sb = new StringBuilder().append(root.getArtifact()).append(" (this nar)").append(System.lineSeparator());
List<Artifact> otherHierarchy = directDependencies.get(artifact.toString());
// print other hierarchy
for (int i = 0; i < otherHierarchy.size(); i++) {
sb.append(indent(i)).append(otherHierarchy.get(i));
// print the last artifact in the hierarchy
if (i == otherHierarchy.size() - 1) {
sb.append(" (duplicate)");
}
sb.append(System.lineSeparator());
}
// print this hierarchy
for (int i = 0; i < hierarchy.size(); i++) {
sb.append(indent(i)).append(hierarchy.get(i));
// print the last artifact in the hierarchy
if (i == hierarchy.size() - 1) {
sb.append(" (already included here)");
}
sb.append(System.lineSeparator());
}
errors.computeIfAbsent(artifact.toString(), k -> new ArrayList<>()).add(sb.toString());
}
return true;
}
@Override
public boolean endVisit(DependencyNode node) {
hierarchy.pop();
return true;
}
});
for (Map.Entry<String, List<String>> entry : errors.entrySet()) {
StringBuilder sb = new StringBuilder().append(entry.getKey()).append(" is already included in the nar");
if (entry.getValue().size() > 1) {
sb.append(" multiple times");
}
sb.append(":");
for (String error : entry.getValue()) {
sb.append(System.lineSeparator()).append(error);
}
getLog().error(sb.toString());
}
if (!errors.isEmpty()) {
getLog().info("Consider changing the scope from \"compile\" to \"provided\" or exclude it in case it's a transitive dependency.");
throw new MojoFailureException("Found duplicate dependencies");
}
} catch (ProjectBuildingException | DependencyCollectorBuilderException e) {
throw new MojoExecutionException("Cannot build project dependency tree", e);
}
} | 3.68 |
pulsar_BlobStoreBackedInputStreamImpl_refillBufferIfNeeded | /**
* Refill the buffered input if it is empty.
* @return true if there are bytes to read, false otherwise
*/
private boolean refillBufferIfNeeded() throws IOException {
if (buffer.readableBytes() == 0) {
if (cursor >= objectLen) {
return false;
}
long startRange = cursor;
long endRange = Math.min(cursor + bufferSize - 1,
objectLen - 1);
if (log.isDebugEnabled()) {
log.info("refillBufferIfNeeded {} - {} ({} bytes to fill)",
startRange, endRange, (endRange - startRange));
}
try {
long startReadTime = System.nanoTime();
Blob blob = blobStore.getBlob(bucket, key, new GetOptions().range(startRange, endRange));
versionCheck.check(key, blob);
try (InputStream stream = blob.getPayload().openStream()) {
buffer.clear();
bufferOffsetStart = startRange;
bufferOffsetEnd = endRange;
long bytesRead = endRange - startRange + 1;
int bytesToCopy = (int) bytesRead;
while (bytesToCopy > 0) {
bytesToCopy -= buffer.writeBytes(stream, bytesToCopy);
}
cursor += buffer.readableBytes();
}
// here we can get the metrics
// because JClouds streams the content
// and actually the HTTP call finishes when the stream is fully read
if (this.offloaderStats != null) {
this.offloaderStats.recordReadOffloadDataLatency(topicName,
System.nanoTime() - startReadTime, TimeUnit.NANOSECONDS);
this.offloaderStats.recordReadOffloadBytes(topicName, endRange - startRange + 1);
}
} catch (Throwable e) {
if (null != this.offloaderStats) {
this.offloaderStats.recordReadOffloadError(this.topicName);
}
throw new IOException("Error reading from BlobStore", e);
}
}
return true;
} | 3.68 |
framework_VMenuBar_getSelected | /**
* Returns the currently selected item of this menu, or null if nothing is
* selected.
*
* @return
*/
public CustomMenuItem getSelected() {
return selected;
} | 3.68 |
hadoop_Endpoint_clone | /**
* Shallow clone: the lists of addresses are shared
* @return a cloned instance
* @throws CloneNotSupportedException
*/
@Override
public Object clone() throws CloneNotSupportedException {
return super.clone();
} | 3.68 |
framework_FieldGroup_setItemDataSource | /**
* Updates the item that is used by this FieldBinder. Rebinds all fields to
* the properties in the new item.
*
* @param itemDataSource
* The new item to use
*/
public void setItemDataSource(Item itemDataSource) {
this.itemDataSource = itemDataSource;
bindFields();
} | 3.68 |
hadoop_AssumedRoleCredentialProvider_close | /**
* Propagate the close() call to the inner stsProvider.
*/
@Override
public void close() {
S3AUtils.closeAutocloseables(LOG, stsProvider, credentialsToSTS, stsClient);
} | 3.68 |
hbase_SequenceIdAccounting_update | /**
* We've been passed a new sequenceid for the region. Set it as highest seen for this region and
* if we are to record oldest, or lowest sequenceids, save it as oldest seen if nothing currently
* older.
* @param lowest Whether to keep running account of oldest sequence id.
*/
void update(byte[] encodedRegionName, Set<byte[]> families, long sequenceid,
final boolean lowest) {
Long l = Long.valueOf(sequenceid);
this.highestSequenceIds.put(encodedRegionName, l);
if (lowest) {
ConcurrentMap<ImmutableByteArray, Long> m = getOrCreateLowestSequenceIds(encodedRegionName);
for (byte[] familyName : families) {
m.putIfAbsent(ImmutableByteArray.wrap(familyName), l);
}
}
} | 3.68 |
hadoop_EditLogInputStream_readOp | /**
* Read an operation from the stream
* @return an operation from the stream or null if at end of stream
* @throws IOException if there is an error reading from the stream
*/
public FSEditLogOp readOp() throws IOException {
FSEditLogOp ret;
if (cachedOp != null) {
ret = cachedOp;
cachedOp = null;
return ret;
}
return nextOp();
} | 3.68 |
zxing_EmailAddressParsedResult_getMailtoURI | /**
* @return "mailto:"
* @deprecated without replacement
*/
@Deprecated
public String getMailtoURI() {
return "mailto:";
} | 3.68 |
hbase_ScheduledChore_isValidTime | /** Return true if time is earlier or equal to current time */
private synchronized boolean isValidTime(final long time) {
return time > 0 && time <= EnvironmentEdgeManager.currentTime();
} | 3.68 |
hadoop_DeviceMappingManager_pickAndDoSchedule | /**
* If device plugin has own scheduler, then use it.
* Otherwise, pick our default scheduler to do scheduling.
* */
private void pickAndDoSchedule(Set<Device> allowed,
Map<Device, ContainerId> used, Set<Device> assigned,
Container c, int count, String resourceName,
DevicePluginScheduler dps)
throws ResourceHandlerException {
ContainerId containerId = c.getContainerId();
Map<String, String> env = c.getLaunchContext().getEnvironment();
if (null == dps) {
LOG.debug("Customized device plugin scheduler is preferred "
+ "but not implemented, use default logic");
defaultScheduleAction(allowed, used,
assigned, containerId, count);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Customized device plugin implemented,"
+ "use customized logic");
// Use customized device scheduler
LOG.debug("Try to schedule " + count
+ "(" + resourceName + ") using " + dps.getClass());
}
// Pass in unmodifiable set
Set<Device> dpsAllocated = dps.allocateDevices(
Sets.differenceInTreeSets(allowed, used.keySet()),
count,
ImmutableMap.copyOf(env));
if (dpsAllocated.size() != count) {
throw new ResourceHandlerException(dps.getClass()
+ " should allocate " + count
+ " of " + resourceName + ", but actual: "
+ assigned.size());
}
// copy
assigned.addAll(dpsAllocated);
// Store assigned devices into usedDevices
for (Device device : assigned) {
used.put(device, containerId);
}
}
} | 3.68 |
hadoop_AclUtil_getMinimalAcl | /**
* Translates the given permission bits to the equivalent minimal ACL.
*
* @param perm FsPermission to translate
* @return List<AclEntry> containing exactly 3 entries representing the
* owner, group and other permissions
*/
public static List<AclEntry> getMinimalAcl(FsPermission perm) {
return Lists.newArrayList(
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.setPermission(perm.getUserAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.setPermission(perm.getGroupAction())
.build(),
new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.setPermission(perm.getOtherAction())
.build());
} | 3.68 |
framework_ComplexRenderer_getConsumedEvents | /**
* Returns the events that the renderer should consume. These are also the
* events that the Grid will pass to
* {@link #onBrowserEvent(Cell, NativeEvent)} when they occur.
*
* @return a list of consumed events
*
* @see com.google.gwt.dom.client.BrowserEvents
*/
public Collection<String> getConsumedEvents() {
return Collections.emptyList();
} | 3.68 |
shardingsphere-elasticjob_TaskContext_from | /**
* Get task meta data info via string.
*
* @param value task meta data info string
* @return task meta data info
*/
public static MetaInfo from(final String value) {
String[] result = value.split(DELIMITER);
Preconditions.checkState(1 == result.length || 2 == result.length || 5 == result.length);
return new MetaInfo(result[0], 1 == result.length || "".equals(result[1])
? Collections.emptyList()
: Splitter.on(",").splitToList(result[1]).stream().map(Integer::parseInt).collect(Collectors.toList()));
} | 3.68 |
graphhopper_CustomModelParser_getVariableDeclaration | /**
* For the methods getSpeed and getPriority we declare variables that contain the encoded value of the current edge
* or if an area contains the current edge.
*/
private static String getVariableDeclaration(EncodedValueLookup lookup, final String arg) {
if (lookup.hasEncodedValue(arg)) {
EncodedValue enc = lookup.getEncodedValue(arg, EncodedValue.class);
return getReturnType(enc) + " " + arg + " = (" + getReturnType(enc) + ") (reverse ? " +
"edge.getReverse((" + getInterface(enc) + ") this." + arg + "_enc) : " +
"edge.get((" + getInterface(enc) + ") this." + arg + "_enc));\n";
} else if (arg.startsWith(BACKWARD_PREFIX)) {
final String argSubstr = arg.substring(BACKWARD_PREFIX.length());
if (lookup.hasEncodedValue(argSubstr)) {
EncodedValue enc = lookup.getEncodedValue(argSubstr, EncodedValue.class);
return getReturnType(enc) + " " + arg + " = (" + getReturnType(enc) + ") (reverse ? " +
"edge.get((" + getInterface(enc) + ") this." + argSubstr + "_enc) : " +
"edge.getReverse((" + getInterface(enc) + ") this." + argSubstr + "_enc));\n";
} else {
throw new IllegalArgumentException("Not supported for backward: " + argSubstr);
}
} else if (arg.startsWith(IN_AREA_PREFIX)) {
return "";
} else {
throw new IllegalArgumentException("Not supported " + arg);
}
} | 3.68 |
framework_VTwinColSelect_isReadOnly | /**
* Returns {@code true} if this twin column select is in read only mode,
* {@code false} if not.
*
* @return {@code true} for read only, {@code false} for not read only
*/
public boolean isReadOnly() {
return readOnly;
} | 3.68 |
framework_AbstractTestUI_setContent | /**
* This method is inherited from the super class, but it should generally
* not be used. If you want to just add components to your test, use e.g.
* {@link #addComponent(Component)} instead to add the component to the
* layout used by this UI. If you don't want to use the top-level layout
* used by this class, you instead inherit directly from UI.
*
* @deprecated Use {@link #addComponent(Component)} or inherit from UI
* instead.
*/
@Override
@Deprecated
public void setContent(Component content) {
// Overridden just to deprecate
super.setContent(content);
} | 3.68 |
querydsl_BooleanExpression_orAllOf | /**
* Create a {@code this or all(predicates)} expression
*
* <p>Return a union of this and the intersection of the given predicates</p>
*
* @param predicates intersection of predicates
* @return this or all(predicates)
*/
public BooleanExpression orAllOf(Predicate... predicates) {
return or(ExpressionUtils.allOf(predicates));
} | 3.68 |
flink_Expander_substitute | /**
* Returns the SQL string with identifiers replaced according to the given unparse function.
*/
public String substitute(Function<SqlNode, String> fn) {
final SqlShuttle shuttle =
new SqlShuttle() {
@Override
public SqlNode visit(SqlCall call) {
SqlOperator operator = call.getOperator();
if (operator instanceof SqlUnresolvedFunction) {
final SqlUnresolvedFunction unresolvedFunction =
(SqlUnresolvedFunction) operator;
final SqlIdentifier functionID =
unresolvedFunction.getSqlIdentifier();
if (functionID.isSimple()
&& funcNameToId.containsKey(functionID.getSimple())) {
SqlUnresolvedFunction newFunc =
new SqlUnresolvedFunction(
funcNameToId.get(functionID.getSimple()),
unresolvedFunction.getReturnTypeInference(),
unresolvedFunction.getOperandTypeInference(),
unresolvedFunction.getOperandTypeChecker(),
unresolvedFunction.getParamTypes(),
unresolvedFunction.getFunctionType());
return newFunc.createCall(
call.getFunctionQuantifier(),
call.getParserPosition(),
call.getOperandList().toArray(new SqlNode[0]));
}
}
return super.visit(call);
}
@Override
public SqlNode visit(SqlIdentifier id) {
if (id.isStar()) {
return id;
}
final SqlIdentifier toReplace =
identifiersMap.get(id.getParserPosition());
if (toReplace == null || id.names.size() >= toReplace.names.size()) {
return id;
}
return toReplace;
}
};
final SqlNode substituted = this.oriNode.accept(shuttle);
return fn.apply(substituted);
} | 3.68 |
flink_Conditions_haveLeafTypes | /**
* Tests leaf types of a method against the given predicate.
*
* <p>Given some {@link JavaType}, "leaf" types are recursively determined as described below.
* Leaf types are taken from argument, return, and (declared) exception types.
*
* <ul>
* <li>If the type is an array type, check its base component type.
* <li>If the type is a generic type, check the type itself and all of its type arguments.
* <li>Otherwise, check just the type itself.
* </ul>
*/
public static ArchCondition<JavaMethod> haveLeafTypes(
DescribedPredicate<JavaClass> typePredicate) {
return haveLeafReturnTypes(typePredicate)
.and(haveLeafArgumentTypes(typePredicate))
.and(haveLeafExceptionTypes(typePredicate));
} | 3.68 |
morf_ObjectTreeTraverser_forCallback | /**
* Creates a new traverser for a callback.
* @param callback The callback
* @return the resulting traverser
*/
public static ObjectTreeTraverser forCallback(Callback callback) {
return new ObjectTreeTraverser(callback);
} | 3.68 |
hbase_PrivateCellUtil_getTags | /**
* Return tags in the given Cell as a List
* @param cell The Cell
* @return Tags in the given Cell as a List
*/
public static List<Tag> getTags(Cell cell) {
List<Tag> tags = new ArrayList<>();
Iterator<Tag> tagsItr = tagsIterator(cell);
while (tagsItr.hasNext()) {
tags.add(tagsItr.next());
}
return tags;
} | 3.68 |
dubbo_LoggerFactory_getAvailableAdapter | /**
* Get the available adapter names
*
* @return available adapter names
*/
public static List<String> getAvailableAdapter() {
Map<Class<? extends LoggerAdapter>, String> candidates = new HashMap<>();
candidates.put(Log4jLoggerAdapter.class, "log4j");
candidates.put(Slf4jLoggerAdapter.class, "slf4j");
candidates.put(Log4j2LoggerAdapter.class, "log4j2");
candidates.put(JclLoggerAdapter.class, "jcl");
candidates.put(JdkLoggerAdapter.class, "jdk");
List<String> result = new LinkedList<>();
for (Map.Entry<Class<? extends LoggerAdapter>, String> entry : candidates.entrySet()) {
try {
LoggerAdapter loggerAdapter =
entry.getKey().getDeclaredConstructor().newInstance();
loggerAdapter.getLogger(LoggerFactory.class);
result.add(entry.getValue());
} catch (Exception ignored) {
// ignored
}
}
return result;
} | 3.68 |
framework_AbstractComponentContainer_addComponent | /**
* This only implements the events and component parent calls. The extending
* classes must implement component list maintenance and call this method
* after component list maintenance.
*
* @see com.vaadin.ui.ComponentContainer#addComponent(Component)
*/
@Override
public void addComponent(Component c) {
// Make sure we're not adding the component inside it's own content
if (isOrHasAncestor(c)) {
throw new IllegalArgumentException(
"Component cannot be added inside it's own content");
}
if (c.getParent() != null) {
// If the component already has a parent, try to remove it
AbstractSingleComponentContainer.removeFromParent(c);
}
c.setParent(this);
fireComponentAttachEvent(c);
markAsDirty();
} | 3.68 |
flink_ResourceGuard_close | /**
* Closed the resource guard. This method will block until all calls to {@link
* #acquireResource()} have seen their matching call to {@link #releaseResource()}.
*/
@Override
public void close() {
closeUninterruptibly();
} | 3.68 |
framework_VAbstractSplitPanel_getSecondContainer | /**
* Gets the second region's container element.
*
* @since 7.5.1
* @return the container element
*/
protected Element getSecondContainer() {
return secondContainer;
} | 3.68 |
flink_CreditBasedPartitionRequestClientHandler_exceptionCaught | /**
* Called on exceptions in the client handler pipeline.
*
* <p>Remote exceptions are received as regular payload.
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (cause instanceof TransportException) {
notifyAllChannelsOfErrorAndClose(cause);
} else {
final SocketAddress remoteAddr = ctx.channel().remoteAddress();
final TransportException tex;
// Improve on the connection reset by peer error message
if (cause.getMessage() != null
&& cause.getMessage().contains("Connection reset by peer")) {
tex =
new RemoteTransportException(
"Lost connection to task manager '"
+ remoteAddr
+ " [ "
+ connectionID.getResourceID().getStringWithMetadata()
+ " ] "
+ "'. "
+ "This indicates that the remote task manager was lost.",
remoteAddr,
cause);
} else {
final SocketAddress localAddr = ctx.channel().localAddress();
tex =
new LocalTransportException(
String.format(
"%s (connection to '%s [%s]')",
cause.getMessage(),
remoteAddr,
connectionID.getResourceID().getStringWithMetadata()),
localAddr,
cause);
}
notifyAllChannelsOfErrorAndClose(tex);
}
} | 3.68 |
hbase_StoreFileReader_passesBloomFilter | /**
* Checks whether the given scan passes the Bloom filter (if present). Only checks Bloom filters
* for single-row or single-row-column scans. Bloom filter checking for multi-gets is implemented
* as part of the store scanner system (see {@link StoreFileScanner#seek(Cell)} and uses the
* lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} and
* {@link #passesGeneralRowColBloomFilter(Cell)}.
* @param scan the scan specification. Used to determine the row, and to check whether this is
* a single-row ("get") scan.
* @param columns the set of columns. Only used for row-column Bloom filters.
* @return true if the scan with the given column set passes the Bloom filter, or if the Bloom
* filter is not applicable for the scan. False if the Bloom filter is applicable and the
* scan fails it.
*/
boolean passesBloomFilter(Scan scan, final SortedSet<byte[]> columns) {
byte[] row = scan.getStartRow();
switch (this.bloomFilterType) {
case ROW:
if (!scan.isGetScan()) {
return true;
}
return passesGeneralRowBloomFilter(row, 0, row.length);
case ROWCOL:
if (!scan.isGetScan()) {
return true;
}
if (columns != null && columns.size() == 1) {
byte[] column = columns.first();
// create the required fake key
Cell kvKey = PrivateCellUtil.createFirstOnRow(row, HConstants.EMPTY_BYTE_ARRAY, column);
return passesGeneralRowColBloomFilter(kvKey);
}
// For multi-column queries the Bloom filter is checked from the
// seekExact operation.
return true;
case ROWPREFIX_FIXED_LENGTH:
return passesGeneralRowPrefixBloomFilter(scan);
default:
if (scan.isGetScan()) {
bloomFilterMetrics.incrementEligible();
}
return true;
}
} | 3.68 |
hadoop_RBFMetrics_getDateString | /**
* Get time as a date string.
* @param time Seconds since 1970.
* @return String representing the date.
*/
@VisibleForTesting
static String getDateString(long time) {
if (time <= 0) {
return "-";
}
Date date = new Date(time);
SimpleDateFormat sdf = new SimpleDateFormat(DATE_FORMAT);
return sdf.format(date);
} | 3.68 |
morf_MergeStatementBuilder_set | /**
* Adds a merge expression to be used when updating existing records.
*
* @param updateExpression the merge expressions, aliased as target field name.
* @return this, for method chaining.
*/
public UpdateValuesOverrider set(AliasedFieldBuilder updateExpression) {
expressions.add(updateExpression.build());
return this;
} | 3.68 |
pulsar_AuthorizationService_canLookupAsync | /**
* Check whether the specified role can perform a lookup for the specified topic.
*
* For that the caller needs to have producer or consumer permission.
*
* @param topicName
* @param role
* @return
* @throws Exception
*/
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.isSuperUser(role, authenticationData, conf).thenComposeAsync(isSuperUser -> {
if (isSuperUser) {
return CompletableFuture.completedFuture(true);
} else {
return provider.canLookupAsync(topicName, role, authenticationData);
}
});
} | 3.68 |
hbase_MobFileName_getStartKey | /**
* Gets the hex string of the md5 for a start key.
* @return The hex string of the md5 for a start key.
*/
public String getStartKey() {
return startKey;
} | 3.68 |
flink_DoubleZeroConvergence_isConverged | /**
* Returns true, if the aggregator value is zero, false otherwise.
*
* @param iteration The number of the iteration superstep. Ignored in this case.
* @param value The aggregator value, which is compared to zero.
* @return True, if the aggregator value is zero, false otherwise.
*/
@Override
public boolean isConverged(int iteration, DoubleValue value) {
return value.getValue() == 0;
} | 3.68 |
hadoop_CommitContext_buildSubmitters | /**
* Build the submitters and thread pools if the number of committerThreads
* is greater than zero.
* This should only be called in constructors; it is synchronized to keep
* SpotBugs happy.
*/
private synchronized void buildSubmitters() {
if (committerThreads != 0) {
outerSubmitter = new PoolSubmitter(buildThreadPool(committerThreads));
}
} | 3.68 |
dubbo_ApplicationModel_setConfigManager | /**
* @deprecated only for ut
*/
@Deprecated
public void setConfigManager(ConfigManager configManager) {
this.configManager = configManager;
} | 3.68 |
hbase_VersionInfo_getUser | /**
* The user that compiled hbase.
* @return the username of the user
*/
public static String getUser() {
return Version.user;
} | 3.68 |
morf_ChangeIndex_accept | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor)
*/
@Override
public void accept(SchemaChangeVisitor visitor) {
visitor.visit(this);
} | 3.68 |
flink_JobSubmissionResult_getJobID | /**
* Returns the JobID assigned to the job by the Flink runtime.
*
* @return jobID, or null if the job has been executed on a runtime without JobIDs or if the
* execution failed.
*/
public JobID getJobID() {
return jobID;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_rawBytesDownloaded | /**
* Indicate that we just downloaded some data to Azure storage.
* @param numberOfBytes The raw number of bytes downloaded (including overhead).
*/
public void rawBytesDownloaded(long numberOfBytes) {
rawBytesDownloaded.incr(numberOfBytes);
} | 3.68 |
AreaShop_CommandManager_showHelp | /**
* Shows the help page for the CommandSender.
* @param target The CommandSender to show the help to
*/
public void showHelp(CommandSender target) {
if(!target.hasPermission("areashop.help")) {
plugin.message(target, "help-noPermission");
return;
}
// Add all messages to a list
ArrayList<String> messages = new ArrayList<>();
plugin.message(target, "help-header");
plugin.message(target, "help-alias");
for(CommandAreaShop command : commands) {
String help = command.getHelp(target);
if(help != null && !help.isEmpty()) {
messages.add(help);
}
}
// Send the messages to the target
for(String message : messages) {
plugin.messageNoPrefix(target, message);
}
} | 3.68 |
hadoop_AbstractS3ACommitter_initiateJobOperation | /**
* Start the final job commit/abort commit operations.
* If configured to collect statistics,
* The IO StatisticsContext is reset.
* @param context job context
* @return a commit context through which the operations can be invoked.
* @throws IOException failure.
*/
protected CommitContext initiateJobOperation(
final JobContext context)
throws IOException {
IOStatisticsContext ioStatisticsContext =
IOStatisticsContext.getCurrentIOStatisticsContext();
CommitContext commitContext = getCommitOperations().createCommitContext(
context,
getOutputPath(),
getJobCommitThreadCount(context),
ioStatisticsContext);
commitContext.maybeResetIOStatisticsContext();
return commitContext;
} | 3.68 |
hbase_AuthMethod_getMechanismName | /** Return the SASL mechanism name */
public String getMechanismName() {
return mechanismName;
} | 3.68 |
flink_ProducerMergedPartitionFileWriter_calculateSizeAndFlushBuffers | /**
* Compute buffer's file offset and create buffers to be flushed.
*
* @param toWrite all buffers to write to create {@link
* ProducerMergedPartitionFileIndex.FlushedBuffer}s
* @param buffers receive the created {@link ProducerMergedPartitionFileIndex.FlushedBuffer}
*/
private void calculateSizeAndFlushBuffers(
List<SubpartitionBufferContext> toWrite,
List<ProducerMergedPartitionFileIndex.FlushedBuffer> buffers)
throws IOException {
List<Tuple2<Buffer, Integer>> buffersToFlush = new ArrayList<>();
long expectedBytes = 0;
for (SubpartitionBufferContext subpartitionBufferContext : toWrite) {
int subpartitionId = subpartitionBufferContext.getSubpartitionId();
for (SegmentBufferContext segmentBufferContext :
subpartitionBufferContext.getSegmentBufferContexts()) {
List<Tuple2<Buffer, Integer>> bufferAndIndexes =
segmentBufferContext.getBufferAndIndexes();
buffersToFlush.addAll(bufferAndIndexes);
for (Tuple2<Buffer, Integer> bufferWithIndex :
segmentBufferContext.getBufferAndIndexes()) {
Buffer buffer = bufferWithIndex.f0;
buffers.add(
new ProducerMergedPartitionFileIndex.FlushedBuffer(
subpartitionId,
bufferWithIndex.f1,
totalBytesWritten + expectedBytes,
buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH));
expectedBytes += buffer.readableBytes() + BufferReaderWriterUtil.HEADER_LENGTH;
}
}
}
flushBuffers(buffersToFlush, expectedBytes);
buffersToFlush.forEach(bufferWithIndex -> bufferWithIndex.f0.recycleBuffer());
} | 3.68 |
hbase_StorageClusterStatusModel_setStores | /**
* @param stores the number of stores
*/
public void setStores(int stores) {
this.stores = stores;
} | 3.68 |
graphhopper_VectorTile_getKeysBytes | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public com.google.protobuf.ByteString
getKeysBytes(int index) {
return keys_.getByteString(index);
} | 3.68 |
AreaShop_BuyRegion_getBuyer | /**
* Get the UUID of the owner of this region.
* @return The UUID of the owner of this region
*/
public UUID getBuyer() {
String buyer = config.getString("buy.buyer");
if(buyer != null) {
try {
return UUID.fromString(buyer);
} catch(IllegalArgumentException e) {
// Incorrect UUID
}
}
return null;
} | 3.68 |
flink_NormalizedKeySorter_getIterator | /**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
@Override
public final MutableObjectIterator<T> getIterator() {
return new MutableObjectIterator<T>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public T next(T target) {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer =
this.currentIndexSegment.getLong(this.currentOffset) & POINTER_MASK;
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(target, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Override
public T next() {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
};
} | 3.68 |
framework_VCalendar_setDayNames | /**
* Set the names of the week days.
*
* @param names
* The names of the days (Monday, Thursday,...)
*/
public void setDayNames(String[] names) {
assert (names.length == 7);
dayNames = names;
} | 3.68 |
MagicPlugin_Mage_setVelocity | // This does not actually apply velocity!
public void setVelocity(Vector velocity) {
this.velocity = velocity;
} | 3.68 |
hudi_HoodieInMemoryHashIndex_isImplicitWithStorage | /**
* Index needs to be explicitly updated after storage write.
*/
@Override
public boolean isImplicitWithStorage() {
return false;
} | 3.68 |
hadoop_StagingCommitter_taskAttemptWorkingPath | /**
* Get the work path for a task.
* @param context job/task complex
* @param uuid UUID
* @return a path
* @throws IOException failure to build the path
*/
private static Path taskAttemptWorkingPath(TaskAttemptContext context,
String uuid) throws IOException {
return getTaskAttemptPath(context,
Paths.getLocalTaskAttemptTempDir(
context.getConfiguration(),
uuid,
context.getTaskAttemptID()));
} | 3.68 |
framework_GridRefreshWithGetId_equals | /**
* The class intentionally has strange {@code hashCode()} and
* {@code equals()} implementation to ensure if {@code Grid} relies on
* bean id rather than on bean hashcode/equals identification.
*
* {@see Object.equals}
*/
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
TestObject myObject = (TestObject) o;
if (id != myObject.id)
return false;
return name != null ? name.equals(myObject.name)
: myObject.name == null;
} | 3.68 |
hmily_HmilyMySQLUpdateStatement_getLimit | /**
* Get order by segment.
*
* @return order by segment
*/
public Optional<HmilyLimitSegment> getLimit() {
return Optional.ofNullable(limit);
} | 3.68 |
open-banking-gateway_ValidationIssue_toString | /**
* @return JSON representation of current object.
*/
@Override
public String toString() {
return "{"
+ "\"type\":\"" + type + "\""
+ ", \"scope\":\"" + scope + "\""
+ ", \"code\":\"" + code + "\""
+ ", \"captionMessage\":\"" + captionMessage + "\""
+ "}";
} | 3.68 |
streampipes_PipelineManager_getAllPipelines | /**
* Returns all pipelines
*
* @return all pipelines
*/
public static List<Pipeline> getAllPipelines() {
return StorageDispatcher.INSTANCE.getNoSqlStore().getPipelineStorageAPI().getAllPipelines();
} | 3.68 |
flink_MemoryManager_availableMemory | /**
* Returns the available amount of memory handled by this memory manager.
*
* @return The available amount of memory.
*/
public long availableMemory() {
return memoryBudget.getAvailableMemorySize();
} | 3.68 |
querydsl_AbstractEvaluatorFactory_createEvaluator | /**
* Create a new Evaluator instance
*
* @param <T>
* projection type
* @param source
* expression in Java source code form
* @param projection
* type of the source expression
* @param names
* names of the arguments
* @param types
* types of the arguments
* @param constants
* @return
*/
@SuppressWarnings("unchecked")
@Override
public synchronized <T> Evaluator<T> createEvaluator(String source, ClassType projection, String[] names,
Type[] types, Class<?>[] classes, Map<String, Object> constants) {
try {
final String id = toId(source, projection.getJavaClass(), types, constants.values());
Method method = cache.get(id);
if (method == null) {
Class<?> clazz;
try {
clazz = loader.loadClass(id);
} catch (ClassNotFoundException e) {
compile(source, projection, names, types, id, constants);
// reload
clazz = loader.loadClass(id);
}
method = findEvalMethod(clazz);
cache.put(id, method);
}
return new MethodEvaluator<T>(method, constants, (Class) projection.getJavaClass());
} catch (ClassNotFoundException e) {
throw new CodegenException(e);
} catch (SecurityException e) {
throw new CodegenException(e);
} catch (IOException e) {
throw new CodegenException(e);
}
} | 3.68 |
flink_ModifyKindSet_newBuilder | /** Builder for configuring and creating instances of {@link ModifyKindSet}. */
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
hmily_HmilyConsistentHashLoadBalance_select | /**
* Use load balancing to select invoker.
*
* @param invocation invocation
* @return Invoker
* @throws NoInvokerException NoInvokerException
*/
@Override
public Invoker<T> select(final InvokeContext invocation) throws NoInvokerException {
long consistentHash = Math.abs(StringUtils.convertLong(invocation.getAttachment(Constants.TARS_CONSISTENT_HASH), 0));
consistentHash = consistentHash & 0xFFFFFFFFL;
ConcurrentSkipListMap<Long, Invoker<T>> conHashInvokers = conHashInvokersCache;
if (conHashInvokers != null && !conHashInvokers.isEmpty()) {
if (!conHashInvokers.containsKey(consistentHash)) {
SortedMap<Long, Invoker<T>> tailMap = conHashInvokers.tailMap(consistentHash);
if (tailMap.isEmpty()) {
consistentHash = conHashInvokers.firstKey();
} else {
consistentHash = tailMap.firstKey();
}
}
Invoker<T> invoker = conHashInvokers.get(consistentHash);
if (invoker.isAvailable()) {
return invoker;
}
ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl());
if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) {
LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString());
stat.setLastRetryTime(System.currentTimeMillis());
return invoker;
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(config.getSimpleObjectName() + " can't find active invoker using consistent hash loadbalance. try to use normal hash");
}
List<Invoker<T>> sortedInvokers = sortedInvokersCache;
if (sortedInvokers == null || sortedInvokers.isEmpty()) {
throw new NoInvokerException("no such active connection invoker");
}
List<Invoker<T>> list = new ArrayList<Invoker<T>>();
for (Invoker<T> invoker : sortedInvokers) {
if (!invoker.isAvailable()) {
//Shield then call
ServantInvokerAliveStat stat = ServantInvokerAliveChecker.get(invoker.getUrl());
if (stat.isAlive() || (stat.getLastRetryTime() + (config.getTryTimeInterval() * 1000)) < System.currentTimeMillis()) {
list.add(invoker);
}
} else {
list.add(invoker);
}
}
//TODO When all is not available. Whether to randomly extract one
if (list.isEmpty()) {
throw new NoInvokerException(config.getSimpleObjectName() + " try to select active invoker, size=" + sortedInvokers.size() + ", no such active connection invoker");
}
Invoker<T> invoker = list.get((int) (consistentHash % list.size()));
if (!invoker.isAvailable()) {
LOGGER.info("try to use inactive invoker|" + invoker.getUrl().toIdentityString());
ServantInvokerAliveChecker.get(invoker.getUrl()).setLastRetryTime(System.currentTimeMillis());
}
return HmilyLoadBalanceUtils.doSelect(invoker, sortedInvokersCache);
} | 3.68 |
framework_VScrollTable_getNavigationPageUpKey | /**
* Get the key the moves the selection one page up in the table. By default
* this is the Page Up key but by overriding this you can change the key to
* whatever you want.
*
* @return
*/
protected int getNavigationPageUpKey() {
return KeyCodes.KEY_PAGEUP;
} | 3.68 |
AreaShop_GeneralRegion_getSignsFeature | /**
* Get the signs feature to manipulate and update signs.
* @return The SignsFeature of this region
*/
public SignsFeature getSignsFeature() {
return getFeature(SignsFeature.class);
} | 3.68 |
dubbo_URLParam_getRawParam | /**
* get raw string like parameters
*
* @return raw string like parameters
*/
public String getRawParam() {
if (StringUtils.isNotEmpty(rawParam)) {
return rawParam;
} else {
// empty if parameters have been modified or init by Map
return toString();
}
} | 3.68 |
hadoop_StagingCommitter_getPendingTaskAttemptsPath | /**
* Compute the path where the output of pending task attempts are stored.
* @param context the context of the job with pending tasks.
* @return the path where the output of pending task attempts are stored.
*/
private static Path getPendingTaskAttemptsPath(JobContext context, Path out) {
return new Path(getJobAttemptPath(context, out), TEMPORARY);
} | 3.68 |
framework_VaadinSession_getSession | /**
* Gets the underlying session to which this service session is currently
* associated.
*
* @return the wrapped session for this context
*/
public WrappedSession getSession() {
/*
* This is used to fetch the underlying session and there is no need for
* having a lock when doing this. On the contrary this is sometimes done
* to be able to lock the session.
*/
return session;
} | 3.68 |
hadoop_ParentQueue_addDynamicChildQueue | // New method to add child queue
private CSQueue addDynamicChildQueue(String childQueuePath, boolean isLeaf)
throws SchedulerDynamicEditException {
writeLock.lock();
try {
// Check if queue exists, if queue exists, write a warning message (this
// should not happen, since it will be handled before calling this method)
// , but we will move on.
CSQueue queue =
queueContext.getQueueManager().getQueueByFullName(
childQueuePath);
if (queue != null) {
LOG.warn(
"This should not happen, trying to create queue=" + childQueuePath
+ ", however the queue already exists");
return queue;
}
// Check if the max queue limit is exceeded.
int maxQueues = queueContext.getConfiguration().
getAutoCreatedQueuesV2MaxChildQueuesLimit(getQueuePath());
if (childQueues.size() >= maxQueues) {
throw new SchedulerDynamicEditException(
"Cannot auto create queue " + childQueuePath + ". Max Child "
+ "Queue limit exceeded which is configured as: " + maxQueues
+ " and number of child queues is: " + childQueues.size());
}
// First, check if we allow creation or not
boolean weightsAreUsed = false;
try {
weightsAreUsed = getCapacityConfigurationTypeForQueues(childQueues)
== QueueCapacityType.WEIGHT;
} catch (IOException e) {
LOG.warn("Caught Exception during auto queue creation", e);
}
if (!weightsAreUsed && queueContext.getConfiguration().isLegacyQueueMode()) {
throw new SchedulerDynamicEditException(
"Trying to create new queue=" + childQueuePath
+ " but not all the queues under parent=" + this.getQueuePath()
+ " are using weight-based capacity. Failed to created queue");
}
CSQueue newQueue = createNewQueue(childQueuePath, isLeaf);
this.childQueues.add(newQueue);
updateLastSubmittedTimeStamp();
// Call updateClusterResource.
// Which will deal with all effectiveMin/MaxResource
// Calculation
this.updateClusterResource(queueContext.getClusterResource(),
new ResourceLimits(queueContext.getClusterResource()));
return newQueue;
} finally {
writeLock.unlock();
}
} | 3.68 |
framework_LegacyLocatorStrategy_getDOMPathForElement | /**
* Generates a String locator using domChild[x] parts for the element
* relative to the baseElement.
*
* @param element
* The target element
* @param baseElement
* The starting point for the locator. The generated path is
* relative to this element.
* @return A String locator that can be used to locate the target element
* using {@link #getElementByDOMPath(Element, String)} or null if
* the locator String cannot be created.
*/
private String getDOMPathForElement(Element element, Element baseElement) {
Element e = element;
String path = "";
while (true) {
int childIndex = -1;
Element siblingIterator = e;
while (siblingIterator != null) {
childIndex++;
siblingIterator = siblingIterator.getPreviousSiblingElement()
.cast();
}
path = PARENTCHILD_SEPARATOR + "domChild[" + childIndex + "]"
+ path;
JavaScriptObject parent = e.getParentElement();
if (parent == null) {
return null;
}
// The parent check is a work around for Firefox 15 which fails to
// compare elements properly (#9534)
if (parent == baseElement) {
break;
}
e = parent.cast();
}
return path;
} | 3.68 |
hbase_BulkLoadHFilesTool_tableExists | /**
* @throws TableNotFoundException if table does not exist.
*/
private void tableExists(AsyncClusterConnection conn, TableName tableName) throws IOException {
if (!FutureUtils.get(conn.getAdmin().tableExists(tableName))) {
throwAndLogTableNotFoundException(tableName);
}
} | 3.68 |
flink_TSetClientInfoReq_isSetSessionHandle | /**
* Returns true if field sessionHandle is set (has been assigned a value) and false otherwise
*/
public boolean isSetSessionHandle() {
return this.sessionHandle != null;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.