name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_AbstractSqlDialectTest_expectedSqlForMathOperations8 | /**
* @return expected SQL for math operation 8
*/
protected String expectedSqlForMathOperations8() {
return "a + b + c + d + e";
} | 3.68 |
flink_StatusWatermarkValve_adjustAlignedChannelStatuses | /**
* Adjust the {@link #alignedChannelStatuses} when an element({@link InputChannelStatus}) in it
* was modified. The {@link #alignedChannelStatuses} is a priority queue, when an element in it
* was modified, we need to adjust the element's position to ensure its priority order.
*
* @param inputChannelStatus the modified input channel status
*/
private void adjustAlignedChannelStatuses(InputChannelStatus inputChannelStatus) {
alignedChannelStatuses.adjustModifiedElement(inputChannelStatus);
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_validateAudiences | /**
* Validate whether any of the accepted audience claims is present in the
* issued token claims list for audience. Override this method in subclasses
* in order to customize the audience validation behavior.
*
* @param jwtToken
* the JWT token where the allowed audiences will be found
* @return true if an expected audience is present, otherwise false
*/
protected boolean validateAudiences(SignedJWT jwtToken) {
boolean valid = false;
try {
List<String> tokenAudienceList = jwtToken.getJWTClaimsSet()
.getAudience();
// if there were no expected audiences configured then just
// consider any audience acceptable
if (audiences == null) {
valid = true;
} else {
// if any of the configured audiences is found then consider it
// acceptable
boolean found = false;
for (String aud : tokenAudienceList) {
if (audiences.contains(aud)) {
LOG.debug("JWT token audience has been successfully validated");
valid = true;
break;
}
}
if (!valid) {
LOG.warn("JWT audience validation failed.");
}
}
} catch (ParseException pe) {
LOG.warn("Unable to parse the JWT token.", pe);
}
return valid;
} | 3.68 |
flink_PropertiesUtil_getLong | /**
* Get long from properties. This method only logs if the long is not valid.
*
* @param config Properties
* @param key key in Properties
* @param defaultValue default value if value is not set
* @return default or value of key
*/
public static long getLong(Properties config, String key, long defaultValue, Logger logger) {
try {
return getLong(config, key, defaultValue);
} catch (IllegalArgumentException iae) {
logger.warn(iae.getMessage());
return defaultValue;
}
} | 3.68 |
flink_FlinkExtendedParser_parseFlinkExtendedCommand | /**
* Convert the statement which matches some special command of Flink to {@link Operation}.
*
* @return the operation for Flink's extended command, empty for no match Flink's extended
* command.
*/
public static Optional<Operation> parseFlinkExtendedCommand(String statement) {
for (ExtendedParseStrategy strategy : PARSE_STRATEGIES) {
if (strategy.match(statement)) {
return Optional.of(strategy.convert(statement));
}
}
return Optional.empty();
} | 3.68 |
querydsl_AbstractFetchableMongodbQuery_fetchResults | /**
* Fetch results with the specific fields
*
* @param paths fields to return
* @return results
*/
public QueryResults<K> fetchResults(Path<?>... paths) {
getQueryMixin().setProjection(paths);
return fetchResults();
} | 3.68 |
querydsl_BeanPath_createArray | /**
* Create a new array path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
protected <A, E> ArrayPath<A, E> createArray(String property, Class<? super A> type) {
return add(new ArrayPath<A, E>(type, forProperty(property)));
} | 3.68 |
hbase_ReplicationMarkerChore_getRowKey | /**
* Creates a rowkey with region server name and timestamp.
* @param serverName region server name
* @param timestamp timestamp
*/
public static byte[] getRowKey(String serverName, long timestamp) {
// converting to string since this will help seeing the timestamp in string format using
// hbase shell commands.
String timestampStr = String.valueOf(timestamp);
final String rowKeyStr = serverName + DELIMITER + timestampStr;
return Bytes.toBytes(rowKeyStr);
} | 3.68 |
hmily_TransactionContext_setOneFinally | /**
* Sets one finally.
*
* @param oneFinally the one finally
*/
public void setOneFinally(final Finally oneFinally) {
this.oneFinally = oneFinally;
} | 3.68 |
flink_AsyncSinkWriter_nonBlockingFlush | /**
* Determines if a call to flush will be non-blocking (i.e. {@code inFlightRequestsCount} is
* strictly smaller than {@code maxInFlightRequests}). Also requires one of the following
* requirements to be met:
*
* <ul>
* <li>The number of elements buffered is greater than or equal to the {@code maxBatchSize}
* <li>The sum of the size in bytes of all records in the buffer is greater than or equal to
* {@code maxBatchSizeInBytes}
* </ul>
*/
private void nonBlockingFlush() throws InterruptedException {
while (!rateLimitingStrategy.shouldBlock(createRequestInfo())
&& (bufferedRequestEntries.size() >= getNextBatchSizeLimit()
|| bufferedRequestEntriesTotalSizeInBytes >= maxBatchSizeInBytes)) {
flush();
}
} | 3.68 |
hadoop_AbstractS3ACommitter_getText | /**
* Source for messages.
* @return text
*/
public String getText() {
return text;
} | 3.68 |
hadoop_DiskBalancerWorkItem_setErrorCount | /**
* Sets the Error counts for this step.
*
* @param errorCount long.
*/
public void setErrorCount(long errorCount) {
this.errorCount = errorCount;
} | 3.68 |
hbase_ConnectionUtils_calcPriority | /**
* Select the priority for the rpc call.
* <p/>
* The rules are:
* <ol>
* <li>If user set a priority explicitly, then just use it.</li>
* <li>For system table, use {@link HConstants#SYSTEMTABLE_QOS}.</li>
* <li>For other tables, use {@link HConstants#NORMAL_QOS}.</li>
* </ol>
* @param priority the priority set by user, can be {@link HConstants#PRIORITY_UNSET}.
* @param tableName the table we operate on
*/
static int calcPriority(int priority, TableName tableName) {
if (priority != HConstants.PRIORITY_UNSET) {
return priority;
} else {
return getPriority(tableName);
}
} | 3.68 |
flink_HiveTableUtil_checkAcidTable | /**
* Check whether to read or write on the hive ACID table.
*
* @param tableOptions Hive table options.
* @param tablePath Identifier table path.
* @throws FlinkHiveException Thrown, if the source or sink table is transactional.
*/
public static void checkAcidTable(Map<String, String> tableOptions, ObjectPath tablePath) {
String tableIsTransactional = tableOptions.get("transactional");
if (tableIsTransactional == null) {
tableIsTransactional = tableOptions.get("transactional".toUpperCase());
}
if (tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true")) {
throw new FlinkHiveException(
String.format("Reading or writing ACID table %s is not supported.", tablePath));
}
} | 3.68 |
pulsar_AuthenticationSasl_setAuthParams | // use passed in parameter to config ange get jaasCredentialsContainer.
private void setAuthParams(Map<String, String> authParams) throws PulsarClientException {
this.configuration = authParams;
// read section from config files of kerberos
this.loginContextName = authParams
.getOrDefault(JAAS_CLIENT_SECTION_NAME, JAAS_DEFAULT_CLIENT_SECTION_NAME);
this.serverType = authParams
.getOrDefault(SASL_SERVER_TYPE, SASL_BROKER_PROTOCOL);
// init the static jaasCredentialsContainer that shares amongst client.
if (!initializedJAAS) {
synchronized (this) {
if (jaasCredentialsContainer == null) {
log.info("JAAS loginContext is: {}.", loginContextName);
try {
jaasCredentialsContainer = new JAASCredentialsContainer(
loginContextName,
new ClientCallbackHandler(),
configuration);
initializedJAAS = true;
} catch (LoginException e) {
log.error("JAAS login in client failed", e);
throw new PulsarClientException(e);
}
}
}
}
} | 3.68 |
graphhopper_StorableProperties_put | /**
* Before it saves this value it creates a string out of it.
*/
public synchronized StorableProperties put(String key, Object val) {
if (!key.equals(toLowerCase(key)))
throw new IllegalArgumentException("Do not use upper case keys (" + key + ") for StorableProperties since 0.7");
map.put(key, val.toString());
return this;
} | 3.68 |
hadoop_PathHandle_toByteArray | /**
* @return Serialized form in bytes.
*/
default byte[] toByteArray() {
ByteBuffer bb = bytes();
byte[] ret = new byte[bb.remaining()];
bb.get(ret);
return ret;
} | 3.68 |
hadoop_Chain_addMapper | /**
* Adds a Mapper class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Mapper.
*
* @param isMap
* indicates if the Chain is for a Mapper or for a Reducer.
* @param job
* chain job.
* @param klass
* the Mapper class to add.
* @param inputKeyClass
* mapper input key class.
* @param inputValueClass
* mapper input value class.
* @param outputKeyClass
* mapper output key class.
* @param outputValueClass
* mapper output value class.
* @param mapperConf
* a configuration for the Mapper class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
@SuppressWarnings("unchecked")
protected static void addMapper(boolean isMap, Job job,
Class<? extends Mapper> klass, Class<?> inputKeyClass,
Class<?> inputValueClass, Class<?> outputKeyClass,
Class<?> outputValueClass, Configuration mapperConf) {
String prefix = getPrefix(isMap);
Configuration jobConf = job.getConfiguration();
// if a reducer chain check the Reducer has been already set
checkReducerAlreadySet(isMap, jobConf, prefix, true);
// set the mapper class
int index = getIndex(jobConf, prefix);
jobConf.setClass(prefix + CHAIN_MAPPER_CLASS + index, klass, Mapper.class);
validateKeyValueTypes(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, index, prefix);
setMapperConf(isMap, jobConf, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, mapperConf, index, prefix);
} | 3.68 |
querydsl_Expressions_dateTemplate | /**
* Create a new Template expression
*
* @param cl type of expression
* @param template template
* @param args template parameters
* @return template expression
*/
public static <T extends Comparable<?>> DateTemplate<T> dateTemplate(Class<? extends T> cl, Template template, List<?> args) {
return new DateTemplate<T>(cl, template, args);
} | 3.68 |
hbase_Bytes_zero | /**
* Fill given array with zeros at the specified position.
*/
public static void zero(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
checkArgument(length > 0, "length must be greater than 0");
checkPositionIndex(offset + length, b.length, "offset + length");
Arrays.fill(b, offset, offset + length, (byte) 0);
} | 3.68 |
graphhopper_ElevationProvider_getEle | /**
* @param node Node to read
* @return returns the height in meters or Double.NaN if invalid
*/
default double getEle(ReaderNode node) {
return getEle(node.getLat(), node.getLon());
} | 3.68 |
hadoop_AMRMProxyTokenSecretManager_retrievePassword | /**
* Retrieve the password for the given {@link AMRMTokenIdentifier}. Used by
* RPC layer to validate a remote {@link AMRMTokenIdentifier}.
*/
@Override
public byte[] retrievePassword(AMRMTokenIdentifier identifier)
throws InvalidToken {
this.readLock.lock();
try {
ApplicationAttemptId applicationAttemptId =
identifier.getApplicationAttemptId();
LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
if (!appAttemptSet.contains(applicationAttemptId)) {
throw new InvalidToken(applicationAttemptId
+ " not found in AMRMProxyTokenSecretManager.");
}
if (identifier.getKeyId() == this.currentMasterKey.getMasterKey()
.getKeyId()) {
return createPassword(identifier.getBytes(),
this.currentMasterKey.getSecretKey());
} else if (nextMasterKey != null
&& identifier.getKeyId() == this.nextMasterKey.getMasterKey()
.getKeyId()) {
return createPassword(identifier.getBytes(),
this.nextMasterKey.getSecretKey());
}
throw new InvalidToken("Invalid AMRMToken from "
+ applicationAttemptId);
} finally {
this.readLock.unlock();
}
} | 3.68 |
rocketmq-connect_WorkerSinkTask_commitOffsets | /**
* commit offset
*
* @param now
* @param closing
*/
private void commitOffsets(long now, boolean closing) {
commitOffsets(now, closing, messageQueues);
} | 3.68 |
framework_VaadinService_reinitializeSession | /**
* Discards the current session and creates a new session with the same
* contents. The purpose of this is to introduce a new session key in order
* to avoid session fixation attacks.
* <p>
* Please note that this method makes certain assumptions about how data is
* stored in the underlying session and may thus not be compatible with some
* environments.
*
* @param request
* The Vaadin request for which the session should be
* reinitialized
*/
public static void reinitializeSession(VaadinRequest request) {
WrappedSession oldSession = request.getWrappedSession();
// Stores all attributes (security key, reference to this context
// instance) so they can be added to the new session
Set<String> attributeNames = oldSession.getAttributeNames();
Map<String, Object> attrs = new HashMap<>(attributeNames.size() * 2);
for (String name : attributeNames) {
Object value = oldSession.getAttribute(name);
if (value instanceof VaadinSession) {
// set flag to avoid cleanup
VaadinSession serviceSession = (VaadinSession) value;
serviceSession.lock();
try {
serviceSession.setAttribute(
PRESERVE_UNBOUND_SESSION_ATTRIBUTE, Boolean.TRUE);
} finally {
serviceSession.unlock();
}
}
attrs.put(name, value);
}
// Invalidate the current session
oldSession.invalidate();
// Create a new session
WrappedSession newSession = request.getWrappedSession();
// Restores all attributes (security key, reference to this context
// instance)
for (String name : attrs.keySet()) {
Object value = attrs.get(name);
newSession.setAttribute(name, value);
// Ensure VaadinServiceSession knows where it's stored
if (value instanceof VaadinSession) {
VaadinSession serviceSession = (VaadinSession) value;
VaadinService service = serviceSession.getService();
// Use the same lock instance in the new session
service.setSessionLock(newSession,
serviceSession.getLockInstance());
service.storeSession(serviceSession, newSession);
serviceSession.lock();
try {
serviceSession.setAttribute(
PRESERVE_UNBOUND_SESSION_ATTRIBUTE, null);
} finally {
serviceSession.unlock();
}
}
}
} | 3.68 |
flink_DataType_getConversionClass | /**
* Returns the corresponding conversion class for representing values. If no conversion class
* was defined manually, the default conversion defined by the logical type is used.
*
* @see LogicalType#getDefaultConversion()
* @return the expected conversion class
*/
public Class<?> getConversionClass() {
return conversionClass;
} | 3.68 |
hudi_ClusteringCommitSink_commitIfNecessary | /**
* Condition to commit: the commit buffer has equal size with the clustering plan operations
* and all the clustering commit event {@link ClusteringCommitEvent} has the same clustering instant time.
*
* @param instant Clustering commit instant time
* @param events Commit events ever received for the instant
*/
private void commitIfNecessary(String instant, Collection<ClusteringCommitEvent> events) {
HoodieClusteringPlan clusteringPlan = clusteringPlanCache.computeIfAbsent(instant, k -> {
try {
Option<Pair<HoodieInstant, HoodieClusteringPlan>> clusteringPlanOption = ClusteringUtils.getClusteringPlan(
this.writeClient.getHoodieTable().getMetaClient(), HoodieTimeline.getReplaceCommitInflightInstant(instant));
return clusteringPlanOption.get().getRight();
} catch (Exception e) {
throw new HoodieException(e);
}
});
boolean isReady = clusteringPlan.getInputGroups().size() == events.size();
if (!isReady) {
return;
}
if (events.stream().anyMatch(ClusteringCommitEvent::isFailed)) {
try {
// handle failure case
ClusteringUtil.rollbackClustering(table, writeClient, instant);
} finally {
// remove commitBuffer to avoid obsolete metadata commit
reset(instant);
}
return;
}
try {
doCommit(instant, clusteringPlan, events);
} catch (Throwable throwable) {
// make it fail-safe
LOG.error("Error while committing clustering instant: " + instant, throwable);
} finally {
// reset the status
reset(instant);
}
} | 3.68 |
framework_HierarchyMapper_getFilter | /**
* Gets the current filter.
*
* @return the filter
*/
public F getFilter() {
return filter;
} | 3.68 |
hbase_AsyncRpcRetryingCallerFactory_scanSingleRegion | /**
* Create retry caller for scanning a region.
*/
public ScanSingleRegionCallerBuilder scanSingleRegion() {
return new ScanSingleRegionCallerBuilder();
} | 3.68 |
hbase_MetricsRegionServer_incrementRegionSizeReportingChoreTime | /**
* @see MetricsRegionServerQuotaSource#incrementRegionSizeReportingChoreTime(long)
*/
public void incrementRegionSizeReportingChoreTime(long time) {
quotaSource.incrementRegionSizeReportingChoreTime(time);
} | 3.68 |
hbase_TableHFileArchiveTracker_addAndReWatchTable | /**
* Add this table to the tracker and then read a watch on that node.
* <p>
* Handles situation where table is deleted in the time between the update and resetting the watch
* by deleting the table via {@link #safeStopTrackingTable(String)}
* @param tableZnode full zookeeper path to the table to be added
* @throws KeeperException if an unexpected zk exception occurs
*/
private void addAndReWatchTable(String tableZnode) throws KeeperException {
getMonitor().addTable(ZKUtil.getNodeName(tableZnode));
// re-add a watch to the table created
// and check to make sure it wasn't deleted
if (!ZKUtil.watchAndCheckExists(watcher, tableZnode)) {
safeStopTrackingTable(tableZnode);
}
} | 3.68 |
flink_CommittableCollector_ofLegacy | /**
* Creates a {@link CommittableCollector} for a list of committables. This method is mainly used
* to create a collector from the state of Sink V1.
*
* @param committables list of committables
* @param metricGroup storing the committable metrics
* @param <CommT> type of committables
* @return {@link CommittableCollector}
*/
static <CommT> CommittableCollector<CommT> ofLegacy(
List<CommT> committables, SinkCommitterMetricGroup metricGroup) {
CommittableCollector<CommT> committableCollector =
new CommittableCollector<>(0, 1, metricGroup);
// add a checkpoint with the lowest checkpoint id, this will be merged into the next
// checkpoint data, subtask id is arbitrary
CommittableSummary<CommT> summary =
new CommittableSummary<>(
0,
1,
InitContext.INITIAL_CHECKPOINT_ID,
committables.size(),
committables.size(),
0);
committableCollector.addSummary(summary);
committables.forEach(
c -> {
final CommittableWithLineage<CommT> committableWithLineage =
new CommittableWithLineage<>(c, InitContext.INITIAL_CHECKPOINT_ID, 0);
committableCollector.addCommittable(committableWithLineage);
});
return committableCollector;
} | 3.68 |
hudi_BaseHoodieWriteClient_logCompact | /**
* Ensures compaction instant is in expected state and performs Log Compaction for the workload stored in instant-time.s
*
* @param logCompactionInstantTime Compaction Instant Time
* @return Collection of Write Status
*/
protected HoodieWriteMetadata<O> logCompact(String logCompactionInstantTime, boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(logCompactionInstantTime, WriteOperationType.LOG_COMPACT, table.getMetaClient());
return tableServiceClient.logCompact(logCompactionInstantTime, shouldComplete);
} | 3.68 |
hbase_HttpServer_isInstrumentationAccessAllowed | /**
* Checks the user has privileges to access to instrumentation servlets.
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE (default value)
* it always returns TRUE.
* </p>
* <p>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE it will check
* that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns
* TRUE, otherwise it returns FALSE.
* </p>
* @param servletContext the servlet context.
* @param request the servlet request.
* @param response the servlet response.
* @return TRUE/FALSE based on the logic decribed above.
*/
public static boolean isInstrumentationAccessAllowed(ServletContext servletContext,
HttpServletRequest request, HttpServletResponse response) throws IOException {
Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
boolean access = true;
boolean adminAccess = conf
.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access;
} | 3.68 |
hbase_RawCell_checkForTagsLength | /**
* Check the length of tags. If it is invalid, throw IllegalArgumentException
* @param tagsLength the given length of tags
* @throws IllegalArgumentException if tagslength is invalid
*/
public static void checkForTagsLength(int tagsLength) {
if (tagsLength > MAX_TAGS_LENGTH) {
throw new IllegalArgumentException("tagslength " + tagsLength + " > " + MAX_TAGS_LENGTH);
}
} | 3.68 |
querydsl_GenericExporter_setSerializerConfig | /**
* Set the serializer configuration to use
*
* @param serializerConfig
*/
public void setSerializerConfig(SerializerConfig serializerConfig) {
this.serializerConfig = serializerConfig;
} | 3.68 |
hbase_ByteBufferUtils_readCompressedInt | /**
* Read integer from buffer coded in 7 bits and increment position.
* @return Read integer.
*/
public static int readCompressedInt(ByteBuffer buffer) {
byte b = buffer.get();
if ((b & NEXT_BIT_MASK) != 0) {
return (b & VALUE_MASK) + (readCompressedInt(buffer) << NEXT_BIT_SHIFT);
}
return b & VALUE_MASK;
} | 3.68 |
hbase_AsyncRegionLocationCache_removeForServer | /**
* Removes serverName from all locations in the cache, fully removing any RegionLocations which
* are empty after removing the server from it.
* @param serverName server to remove from locations
*/
public synchronized void removeForServer(ServerName serverName) {
for (Map.Entry<byte[], RegionLocations> entry : cache.entrySet()) {
byte[] regionName = entry.getKey();
RegionLocations locs = entry.getValue();
RegionLocations newLocs = locs.removeByServer(serverName);
if (locs == newLocs) {
continue;
}
if (newLocs.isEmpty()) {
cache.remove(regionName, locs);
} else {
cache.put(regionName, newLocs);
}
}
} | 3.68 |
hadoop_AbstractManifestData_marshallPath | /**
* Convert a path to a string which can be included in the JSON.
* @param path path
* @return a string value, or, if path==null, null.
*/
public static String marshallPath(@Nullable Path path) {
return path != null
? path.toUri().toString()
: null;
} | 3.68 |
flink_DefaultContainerizedExternalSystem_builder | /**
* Get a builder for {@link DefaultContainerizedExternalSystem}.
*
* @param <C> Type of underlying container
* @return An instance of builder
*/
public static <C extends GenericContainer<C>> Builder<C> builder() {
return new Builder<>();
} | 3.68 |
flink_StreamRecord_getTimestamp | /** Returns the timestamp associated with this stream value in milliseconds. */
public long getTimestamp() {
if (hasTimestamp) {
return timestamp;
} else {
return Long.MIN_VALUE;
// throw new IllegalStateException(
// "Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or
// " +
// "did you forget to call 'DataStream.assignTimestampsAndWatermarks(...)'?");
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedHints4b | /**
* @return The expected SQL for the {@link InsertStatement#useParallelDml()} ()} directive.
*/
protected String expectedHints4b() {
return "INSERT INTO " + tableName("Foo") + " SELECT a, b FROM " + tableName("Foo_1");
} | 3.68 |
hbase_ColumnRangeFilter_getMaxColumn | /** Returns the max column range for the filter */
public byte[] getMaxColumn() {
return this.maxColumn;
} | 3.68 |
hbase_AsyncRegionLocationCache_getAll | /**
* Returns all cached RegionLocations
*/
public Collection<RegionLocations> getAll() {
return Collections.unmodifiableCollection(cache.values());
} | 3.68 |
framework_GridElement_getHeaderCells | /**
* Gets list of header cell elements on given row.
*
* @param rowIndex
* Row index
* @return Header cell elements on given row.
*/
public List<GridCellElement> getHeaderCells(int rowIndex) {
List<GridCellElement> headers = new ArrayList<GridCellElement>();
for (TestBenchElement e : TestBenchElement.wrapElements(
getSubPart("#header[" + rowIndex + "]").findElements(
By.xpath("./th")),
getCommandExecutor())) {
headers.add(e.wrap(GridCellElement.class));
}
return headers;
} | 3.68 |
rocketmq-connect_WrapperStatusListener_onDeletion | /**
* Invoked after the task has been deleted. Can be called if the
* connector tasks have been reduced, or if the connector itself has
* been deleted.
*
* @param id The id of the task
*/
@Override
public void onDeletion(ConnectorTaskId id) {
managementService.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation()));
} | 3.68 |
morf_AbstractSqlDialectTest_expectedBlobLiteral | /**
* @param value the blob value to translate.
* @return The expected blob literal.
*/
protected String expectedBlobLiteral(String value) {
return String.format("'%s'", value);
} | 3.68 |
hadoop_ReadaheadPool_readaheadStream | /**
* Issue a request to readahead on the given file descriptor.
*
* @param identifier a textual identifier that will be used in error
* messages (e.g. the file name)
* @param fd the file descriptor to read ahead
* @param curPos the current offset at which reads are being issued
* @param readaheadLength the configured length to read ahead
* @param maxOffsetToRead the maximum offset that will be readahead
* (useful if, for example, only some segment of the file is
* requested by the user). Pass {@link Long#MAX_VALUE} to allow
* readahead to the end of the file.
* @param lastReadahead the result returned by the previous invocation
* of this function on this file descriptor, or null if this is
* the first call
* @return an object representing this outstanding request, or null
* if no readahead was performed
*/
public ReadaheadRequest readaheadStream(
String identifier,
FileDescriptor fd,
long curPos,
long readaheadLength,
long maxOffsetToRead,
ReadaheadRequest lastReadahead) {
Preconditions.checkArgument(curPos <= maxOffsetToRead,
"Readahead position %s higher than maxOffsetToRead %s",
curPos, maxOffsetToRead);
if (readaheadLength <= 0) {
return null;
}
long lastOffset = Long.MIN_VALUE;
if (lastReadahead != null) {
lastOffset = lastReadahead.getOffset();
}
// trigger each readahead when we have reached the halfway mark
// in the previous readahead. This gives the system time
// to satisfy the readahead before we start reading the data.
long nextOffset = lastOffset + readaheadLength / 2;
if (curPos >= nextOffset) {
// cancel any currently pending readahead, to avoid
// piling things up in the queue. Each reader should have at most
// one outstanding request in the queue.
if (lastReadahead != null) {
lastReadahead.cancel();
lastReadahead = null;
}
long length = Math.min(readaheadLength,
maxOffsetToRead - curPos);
if (length <= 0) {
// we've reached the end of the stream
return null;
}
return submitReadahead(identifier, fd, curPos, length);
} else {
return lastReadahead;
}
} | 3.68 |
hbase_QuotaObserverChore_getTableQuotaTables | /**
* Returns an unmodifiable view of all tables with table quotas.
*/
public Set<TableName> getTableQuotaTables() {
return Collections.unmodifiableSet(tablesWithTableQuotas);
} | 3.68 |
flink_ModifyKindSet_intersect | /** Returns a new ModifyKindSet with all kinds set in both this set and in another set. */
public ModifyKindSet intersect(ModifyKindSet other) {
Builder builder = new Builder();
for (ModifyKind kind : other.getContainedKinds()) {
if (this.contains(kind)) {
builder.addContainedKind(kind);
}
}
return builder.build();
} | 3.68 |
framework_Table_extractGeneratedValue | /**
* Extracts cell value from generated row
*
* @param generatedRow generated row
* @param index column index
* @param firstVisibleColumn whether the column is first visible column in the table (i.e. previous columns are hidden)
* @return cell value
*/
private Object extractGeneratedValue(GeneratedRow generatedRow, int index, boolean firstVisibleColumn) {
Object value = generatedRow.getValue();
String[] text = generatedRow.getText();
if (generatedRow.isSpanColumns()) {
if (firstVisibleColumn) {
if (value instanceof Component) {
return value;
}
if (text != null && text.length > 0) {
return text[0];
}
}
return null;
}
if (text != null && text.length > index) {
return text[index];
}
return null;
} | 3.68 |
flink_SerdeContext_get | /** Retrieve context from {@link SerializerProvider} and {@link DeserializationContext}. */
public static SerdeContext get(DatabindContext databindContext) {
final SerdeContext serdeContext =
(SerdeContext) databindContext.getAttribute(SERDE_CONTEXT_KEY);
assert serdeContext != null;
return serdeContext;
} | 3.68 |
flink_TypeMappingUtils_computePhysicalIndicesOrTimeAttributeMarkers | /**
* Computes indices of physical fields corresponding to the selected logical fields of a {@link
* TableSchema}.
*
* <p>It puts markers (idx < 0) for time attributes extracted from {@link
* DefinedProctimeAttribute} and {@link DefinedRowtimeAttributes}
*
* <p>{@link TypeMappingUtils#computePhysicalIndices(List, DataType, Function)} should be
* preferred. The time attribute markers should not be used anymore.
*
* @param tableSource Used to extract {@link DefinedRowtimeAttributes}, {@link
* DefinedProctimeAttribute} and {@link TableSource#getProducedDataType()}.
* @param logicalColumns Logical columns that describe the physical type.
* @param streamMarkers If true puts stream markers otherwise puts batch markers.
* @param nameRemapping Additional remapping of a logical to a physical field name.
* TimestampExtractor works with logical names, but accesses physical fields
* @return Physical indices of logical fields selected with {@code projectedLogicalFields} mask.
*/
public static int[] computePhysicalIndicesOrTimeAttributeMarkers(
TableSource<?> tableSource,
List<TableColumn> logicalColumns,
boolean streamMarkers,
Function<String, String> nameRemapping) {
Optional<String> proctimeAttribute = getProctimeAttribute(tableSource);
List<String> rowtimeAttributes = getRowtimeAttributes(tableSource);
List<TableColumn> columnsWithoutTimeAttributes =
logicalColumns.stream()
.filter(
col ->
!rowtimeAttributes.contains(col.getName())
&& proctimeAttribute
.map(attr -> !attr.equals(col.getName()))
.orElse(true))
.collect(Collectors.toList());
Map<TableColumn, Integer> columnsToPhysicalIndices =
TypeMappingUtils.computePhysicalIndices(
columnsWithoutTimeAttributes.stream(),
tableSource.getProducedDataType(),
nameRemapping);
return logicalColumns.stream()
.mapToInt(
logicalColumn -> {
if (proctimeAttribute
.map(attr -> attr.equals(logicalColumn.getName()))
.orElse(false)) {
verifyTimeAttributeType(logicalColumn, "Proctime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER;
}
} else if (rowtimeAttributes.contains(logicalColumn.getName())) {
verifyTimeAttributeType(logicalColumn, "Rowtime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER;
}
} else {
return columnsToPhysicalIndices.get(logicalColumn);
}
})
.toArray();
} | 3.68 |
hbase_AbstractFSWAL_isHsync | // find all the sync futures between these two txids to see if we need to issue a hsync, if no
// sync futures then just use the default one.
private boolean isHsync(long beginTxid, long endTxid) {
SortedSet<SyncFuture> futures = syncFutures.subSet(new SyncFuture().reset(beginTxid, false),
new SyncFuture().reset(endTxid + 1, false));
if (futures.isEmpty()) {
return useHsync;
}
for (SyncFuture future : futures) {
if (future.isForceSync()) {
return true;
}
}
return false;
} | 3.68 |
framework_VTabsheet_getSpacerWidth | /**
* Returns the width of the spacer cell. Valo theme has the element hidden
* by default, in which case the this returns zero.
*
* @return the width of the spacer cell in pixels
*/
private int getSpacerWidth() {
return tb.spacerTd.getOffsetWidth();
} | 3.68 |
pulsar_WatermarkTimeTriggerPolicy_getNextAlignedWindowTs | /**
* Computes the next window by scanning the events in the window and
* finds the next aligned window between the startTs and endTs. Return the end ts
* of the next aligned window, i.e. the ts when the window should fire.
*
* @param startTs the start timestamp (excluding)
* @param endTs the end timestamp (including)
* @return the aligned window end ts for the next window or Long.MAX_VALUE if there
* are no more events to be processed.
*/
private long getNextAlignedWindowTs(long startTs, long endTs) {
long nextTs = windowManager.getEarliestEventTs(startTs, endTs);
if (nextTs == Long.MAX_VALUE || (nextTs % slidingIntervalMs == 0)) {
return nextTs;
}
return nextTs + (slidingIntervalMs - (nextTs % slidingIntervalMs));
} | 3.68 |
flink_ResourceSpec_subtract | /**
* Subtracts another resource spec from this one.
*
* @param other The other resource spec to subtract.
* @return The subtracted resource spec.
*/
public ResourceSpec subtract(final ResourceSpec other) {
checkNotNull(other, "Cannot subtract null resources");
if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) {
return UNKNOWN;
}
checkArgument(
other.lessThanOrEqual(this),
"Cannot subtract a larger ResourceSpec from this one.");
Map<String, ExternalResource> resultExtendedResources = new HashMap<>(extendedResources);
for (ExternalResource resource : other.extendedResources.values()) {
resultExtendedResources.merge(
resource.getName(), resource, (v1, v2) -> v1.subtract(v2));
}
return new ResourceSpec(
this.cpuCores.subtract(other.cpuCores),
this.taskHeapMemory.subtract(other.taskHeapMemory),
this.taskOffHeapMemory.subtract(other.taskOffHeapMemory),
this.managedMemory.subtract(other.managedMemory),
resultExtendedResources);
} | 3.68 |
hbase_Scan_readAllVersions | /**
* Get all available versions.
*/
public Scan readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
return this;
} | 3.68 |
hadoop_FederationProtocolPBTranslator_getBuilder | /**
* Create or return the cached protobuf builder for this translator.
*
* @return cached Builder instance
*/
@SuppressWarnings("unchecked")
public B getBuilder() {
if (this.builder == null) {
try {
Method method = protoClass.getMethod("newBuilder");
this.builder = (B) method.invoke(null);
if (this.proto != null) {
// Merge in existing immutable proto
this.builder.mergeFrom(this.proto);
}
} catch (ReflectiveOperationException e) {
this.builder = null;
}
}
return this.builder;
} | 3.68 |
flink_NetworkBufferPool_requestPooledMemorySegment | /**
* Different from {@link #requestUnpooledMemorySegments} for unpooled segments allocation. This
* method and the below {@link #requestPooledMemorySegmentsBlocking} method are designed to be
* used from {@link LocalBufferPool} for pooled memory segments allocation. Note that these
* methods for pooled memory segments requesting and recycling are prohibited from acquiring the
* factoryLock to avoid deadlock.
*/
@Nullable
public MemorySegment requestPooledMemorySegment() {
synchronized (availableMemorySegments) {
return internalRequestMemorySegment();
}
} | 3.68 |
pulsar_ClientCnxIdleState_isReleased | /**
* @return Whether this connection has already been released.
*/
public boolean isReleased() {
return getIdleStat() == State.RELEASED;
} | 3.68 |
framework_GeneratedPropertyContainer_addGeneratedProperty | /**
* Add a new PropertyValueGenerator with given property id. This will
* override any existing properties with the same property id. Fires a
* PropertySetChangeEvent.
*
* @param propertyId
* property id
* @param generator
* a property value generator
*/
public void addGeneratedProperty(Object propertyId,
PropertyValueGenerator<?> generator) {
propertyGenerators.put(propertyId, generator);
fireContainerPropertySetChange();
} | 3.68 |
hbase_HRegion_hasMultipleColumnFamilies | /**
* Determines whether multiple column families are present Precondition: familyPaths is not null
* @param familyPaths List of (column family, hfilePath)
*/
private static boolean hasMultipleColumnFamilies(Collection<Pair<byte[], String>> familyPaths) {
boolean multipleFamilies = false;
byte[] family = null;
for (Pair<byte[], String> pair : familyPaths) {
byte[] fam = pair.getFirst();
if (family == null) {
family = fam;
} else if (!Bytes.equals(family, fam)) {
multipleFamilies = true;
break;
}
}
return multipleFamilies;
} | 3.68 |
hadoop_ECPolicyLoader_loadPolicy | /**
* Load a EC policy from a policy element in the XML configuration file.
* @param element EC policy element
* @param schemas all valid schemas of the EC policy file
* @return EC policy
*/
private ErasureCodingPolicy loadPolicy(Element element,
Map<String, ECSchema> schemas) {
NodeList fields = element.getChildNodes();
ECSchema schema = null;
int cellSize = 0;
for (int i = 0; i < fields.getLength(); i++) {
Node fieldNode = fields.item(i);
if (fieldNode instanceof Element) {
Element field = (Element) fieldNode;
String tagName = field.getTagName();
// Get the nonnull text value.
Text text = (Text) field.getFirstChild();
if (text != null) {
if (!text.isElementContentWhitespace()) {
String value = text.getData().trim();
if ("schema".equals(tagName)) {
schema = schemas.get(value);
} else if ("cellsize".equals(tagName)) {
try {
cellSize = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Bad EC policy cellsize"
+ " value " + value + " is found. It should be an integer");
}
} else {
LOG.warn("Invalid tagName: " + tagName);
}
}
} else {
throw new IllegalArgumentException("Value of <" + tagName
+ "> is null");
}
}
}
if (schema != null && cellSize > 0) {
return new ErasureCodingPolicy(schema, cellSize);
} else {
throw new RuntimeException("Bad policy is found in"
+ " EC policy configuration file");
}
} | 3.68 |
hudi_HoodieLogFormatWriter_withOutputStream | /**
* Overrides the output stream, only for test purpose.
*/
@VisibleForTesting
public void withOutputStream(FSDataOutputStream output) {
this.output = output;
} | 3.68 |
pulsar_KubernetesSecretsProviderConfigurator_configureKubernetesRuntimeSecretsProvider | // Kubernetes secrets can be exposed as volume mounts or as
// environment variables in the pods. We are currently using the
// environment variables way. Essentially the secretName/secretPath
// is attached as secretRef to the environment variables
// of a pod and kubernetes magically makes the secret pointed to by this combination available as a env variable.
@Override
public void configureKubernetesRuntimeSecretsProvider(V1PodSpec podSpec, String functionsContainerName,
Function.FunctionDetails functionDetails) {
V1Container container = null;
for (V1Container v1Container : podSpec.getContainers()) {
if (v1Container.getName().equals(functionsContainerName)) {
container = v1Container;
break;
}
}
if (container == null) {
throw new RuntimeException("No FunctionContainer found");
}
if (!StringUtils.isEmpty(functionDetails.getSecretsMap())) {
Type type = new TypeToken<Map<String, Object>>() {
}.getType();
Map<String, Object> secretsMap = new Gson().fromJson(functionDetails.getSecretsMap(), type);
for (Map.Entry<String, Object> entry : secretsMap.entrySet()) {
final V1EnvVar secretEnv = new V1EnvVar();
Map<String, String> kv = (Map<String, String>) entry.getValue();
secretEnv.name(entry.getKey())
.valueFrom(new V1EnvVarSource()
.secretKeyRef(new V1SecretKeySelector()
.name(kv.get(idKey))
.key(kv.get(keyKey))));
container.addEnvItem(secretEnv);
}
}
} | 3.68 |
hbase_AsyncTable_getRequestAttributes | /**
* Get the map of request attributes
* @return a map of request attributes supplied by the client
*/
default Map<String, byte[]> getRequestAttributes() {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_deploy | /**
* Deploy, returning the binding information.
* The base implementation calls
*
* @param retrievedIdentifier any identifier -null if deployed unbonded.
* @return binding information
* @throws IOException any failure.
*/
public DelegationBindingInfo deploy(AbstractS3ATokenIdentifier retrievedIdentifier)
throws IOException {
requireServiceStarted();
AWSCredentialProviderList credentialProviders =
retrievedIdentifier == null
? deployUnbonded()
: bindToTokenIdentifier(retrievedIdentifier);
return new DelegationBindingInfo()
.withCredentialProviders(credentialProviders);
} | 3.68 |
flink_TSetClientInfoReq_isSet | /**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case SESSION_HANDLE:
return isSetSessionHandle();
case CONFIGURATION:
return isSetConfiguration();
}
throw new java.lang.IllegalStateException();
} | 3.68 |
hmily_HmilyXaTransactionManager_setTxTotr | /**
* tx to threadLocal.
*/
private void setTxTotr(final Transaction transaction) {
synchronized (tms) {
Stack<Transaction> stack = tms.get();
if (stack == null) {
stack = new Stack<>();
tms.set(stack);
}
stack.push(transaction);
}
} | 3.68 |
flink_SinkTestSuiteBase_checkGetEnoughRecordsWithSemantic | /**
* Check whether the polling should stop.
*
* @param expected The expected list which help to stop polling
* @param result The records that have been read
* @param semantic The semantic
* @return Whether the polling should stop
*/
private boolean checkGetEnoughRecordsWithSemantic(
List<T> expected, List<T> result, CheckpointingMode semantic) {
checkNotNull(expected);
checkNotNull(result);
if (EXACTLY_ONCE.equals(semantic)) {
return expected.size() <= result.size();
} else if (AT_LEAST_ONCE.equals(semantic)) {
Set<Integer> matchedIndex = new HashSet<>();
for (T record : expected) {
int before = matchedIndex.size();
for (int i = 0; i < result.size(); i++) {
if (matchedIndex.contains(i)) {
continue;
}
if (record.equals(result.get(i))) {
matchedIndex.add(i);
break;
}
}
// if not find the record in the result
if (before == matchedIndex.size()) {
return false;
}
}
return true;
}
throw new IllegalStateException(
String.format("%s delivery guarantee doesn't support test.", semantic.name()));
} | 3.68 |
graphhopper_AlternativeRoute_calcAlternatives | /**
* @return the information necessary to handle alternative paths. Note that the paths are
* not yet extracted.
*/
public List<AlternativeInfo> calcAlternatives(final Path bestPath, final int maxPaths,
double maxWeightFactor, final double weightInfluence,
final double maxShareFactor, final double shareInfluence,
final double minPlateauFactor, final double plateauInfluence) {
final double maxWeight = maxWeightFactor * bestWeight;
final GHIntObjectHashMap<IntSet> traversalIdMap = new GHIntObjectHashMap<>();
final AtomicInteger startTID = addToMap(traversalIdMap, bestPath);
// find all 'good' alternatives from forward-SPT matching the backward-SPT and optimize by
// small total weight (1), small share and big plateau (3a+b) and do these expensive calculations
// only for plateau start candidates (2)
final List<AlternativeInfo> alternatives = new ArrayList<>(maxPaths);
double bestPlateau = bestWeight;
double bestShare = 0;
double sortBy = calcSortBy(weightInfluence, bestWeight,
shareInfluence, bestShare,
plateauInfluence, bestPlateau);
final AlternativeInfo bestAlt = new AlternativeInfo(sortBy, bestPath,
bestFwdEntry, bestBwdEntry, bestShare, getAltNames(graph, bestFwdEntry));
alternatives.add(bestAlt);
AtomicReference<SPTEntry> bestEntry = new AtomicReference<>();
bestWeightMapFrom.forEach(new IntObjectPredicate<SPTEntry>() {
@Override
public boolean apply(final int traversalId, final SPTEntry fromSPTEntry) {
SPTEntry toSPTEntry = bestWeightMapTo.get(traversalId);
if (toSPTEntry == null)
return true;
// Using the parent is required to avoid duplicate edge in Path.
// TODO we miss the turn cost weight (but at least we not duplicate the current edge weight)
if (traversalMode.isEdgeBased() && toSPTEntry.parent != null)
toSPTEntry = toSPTEntry.parent;
// The alternative path is suboptimal if U-turn (after fromSPTEntry)
if (fromSPTEntry.edge == toSPTEntry.edge)
return true;
// (1) skip too long paths
final double weight = fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath();
if (weight > maxWeight)
return true;
if (isBestPath(fromSPTEntry))
return true;
// For edge based traversal we need the next entry to find out the plateau start
SPTEntry tmpFromEntry = traversalMode.isEdgeBased() ? fromSPTEntry.parent : fromSPTEntry;
if (tmpFromEntry == null || tmpFromEntry.parent == null) {
// we can be here only if edge based and only if entry is not part of the best path
// e.g. when starting point has two edges and one is part of the best path the other edge is path of an alternative
assert traversalMode.isEdgeBased();
} else {
int nextToTraversalId = traversalMode.createTraversalId(graph.getEdgeIteratorState(tmpFromEntry.edge, tmpFromEntry.parent.adjNode), true);
SPTEntry correspondingToEntry = bestWeightMapTo.get(nextToTraversalId);
if (correspondingToEntry != null) {
if (traversalMode.isEdgeBased())
correspondingToEntry = correspondingToEntry.parent;
if (correspondingToEntry.edge == fromSPTEntry.edge)
return true;
}
}
// (3a) calculate plateau, we know we are at the beginning of the 'from'-side of
// the plateau A-B-C and go further to B
// where B is the next-'from' of A and B is also the previous-'to' of A.
//
// *<-A-B-C->*
// / \
// start end
//
// extend plateau in only one direction necessary (A to B to ...) as we know
// that the from-SPTEntry is the start of the plateau or there is no plateau at all
//
double plateauWeight = 0;
SPTEntry prevToSPTEntry = toSPTEntry, prevFrom = fromSPTEntry;
while (prevToSPTEntry.parent != null) {
int nextFromTraversalId = traversalMode.createTraversalId(graph.getEdgeIteratorState(prevToSPTEntry.edge, prevToSPTEntry.parent.adjNode), false);
SPTEntry otherFromEntry = bestWeightMapFrom.get(nextFromTraversalId);
// end of a plateau
if (otherFromEntry == null ||
otherFromEntry.parent != prevFrom ||
otherFromEntry.edge != prevToSPTEntry.edge)
break;
prevFrom = otherFromEntry;
plateauWeight += (prevToSPTEntry.getWeightOfVisitedPath() - prevToSPTEntry.parent.getWeightOfVisitedPath());
prevToSPTEntry = prevToSPTEntry.parent;
}
if (plateauWeight <= 0 || plateauWeight / weight < minPlateauFactor)
return true;
if (fromSPTEntry.parent == null)
throw new IllegalStateException("not implemented yet. in case of an edge based traversal the parent of fromSPTEntry could be null");
// (3b) calculate share
SPTEntry fromEE = getFirstShareEE(fromSPTEntry.parent, true);
SPTEntry toEE = getFirstShareEE(toSPTEntry.parent, false);
double shareWeight = fromEE.getWeightOfVisitedPath() + toEE.getWeightOfVisitedPath();
boolean smallShare = shareWeight / bestWeight < maxShareFactor;
if (smallShare) {
List<String> altNames = getAltNames(graph, fromSPTEntry);
double sortBy = calcSortBy(weightInfluence, weight, shareInfluence, shareWeight, plateauInfluence, plateauWeight);
double worstSortBy = getWorstSortBy();
// plateaus.add(new PlateauInfo(altName, plateauEdges));
if (sortBy < worstSortBy || alternatives.size() < maxPaths) {
Path path = DefaultBidirPathExtractor.extractPath(graph, weighting, fromSPTEntry, toSPTEntry, weight);
// for now do not add alternatives to set, if we do we need to remove then on alternatives.clear too (see below)
// AtomicInteger tid = addToMap(traversalIDMap, path);
// int tid = traversalMode.createTraversalId(path.calcEdges().get(0), false);
alternatives.add(new AlternativeInfo(sortBy, path, fromEE, toEE, shareWeight, altNames));
Collections.sort(alternatives, ALT_COMPARATOR);
if (alternatives.get(0) != bestAlt)
throw new IllegalStateException("best path should be always first entry");
if (alternatives.size() > maxPaths)
alternatives.subList(maxPaths, alternatives.size()).clear();
}
}
return true;
}
/**
* Extract path until we stumble over an existing traversal id
*/
SPTEntry getFirstShareEE(SPTEntry startEE, boolean reverse) {
while (startEE.parent != null) {
// TODO we could make use of traversal ID directly if stored in SPTEntry
int tid = traversalMode.createTraversalId(graph.getEdgeIteratorState(startEE.edge, startEE.parent.adjNode), reverse);
if (isAlreadyExisting(tid))
return startEE;
startEE = startEE.parent;
}
return startEE;
}
/**
* This method returns true if the specified tid is already existent in the
* traversalIDMap
*/
boolean isAlreadyExisting(final int tid) {
final AtomicBoolean exists = new AtomicBoolean(false);
traversalIdMap.forEach(new IntObjectPredicate<IntSet>() {
@Override
public boolean apply(int key, IntSet set) {
if (set.contains(tid)) {
exists.set(true);
return false;
}
return true;
}
});
return exists.get();
}
/**
* Return the current worst weight for all alternatives
*/
double getWorstSortBy() {
if (alternatives.isEmpty())
throw new IllegalStateException("Empty alternative list cannot happen");
return alternatives.get(alternatives.size() - 1).sortBy;
}
// returns true if fromSPTEntry is identical to the specified best path
boolean isBestPath(SPTEntry fromSPTEntry) {
if (traversalMode.isEdgeBased()) {
if (GHUtility.getEdgeFromEdgeKey(startTID.get()) == fromSPTEntry.edge) {
if (fromSPTEntry.parent == null)
throw new IllegalStateException("best path must have no parent but was non-null: " + fromSPTEntry);
if (bestEntry.get() != null && bestEntry.get().edge != fromSPTEntry.edge)
throw new IllegalStateException("there can be only one best entry but was " + fromSPTEntry + " vs old: " + bestEntry.get()
+ " " + graph.getEdgeIteratorState(fromSPTEntry.edge, fromSPTEntry.adjNode).fetchWayGeometry(FetchMode.ALL));
bestEntry.set(fromSPTEntry);
return true;
}
} else if (fromSPTEntry.parent == null) {
if (startTID.get() != fromSPTEntry.adjNode)
throw new IllegalStateException("Start traversal ID has to be identical to root edge entry "
+ "which is the plateau start of the best path but was: " + startTID + " vs. adjNode: " + fromSPTEntry.adjNode);
if (bestEntry.get() != null)
throw new IllegalStateException("there can be only one best entry but was " + fromSPTEntry + " vs old: " + bestEntry.get()
+ " " + graph.getEdgeIteratorState(fromSPTEntry.edge, fromSPTEntry.adjNode).fetchWayGeometry(FetchMode.ALL));
bestEntry.set(fromSPTEntry);
return true;
}
return false;
}
});
return alternatives;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setMobCompactPartitionPolicy | /**
* Set the mob compact partition policy for the family.
* @param policy policy type
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor
setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) {
return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name());
} | 3.68 |
framework_BrowserInfo_get | /**
* Singleton method to get BrowserInfo object.
*
* @return instance of BrowserInfo object
*/
public static BrowserInfo get() {
if (instance == null) {
instance = new BrowserInfo();
}
return instance;
} | 3.68 |
pulsar_ProxyConnection_spliceNIC2NIC | /**
* Use splice to zero-copy of NIC to NIC.
* @param inboundChannel input channel
* @param outboundChannel output channel
*/
protected static ChannelPromise spliceNIC2NIC(EpollSocketChannel inboundChannel,
EpollSocketChannel outboundChannel, int spliceLength) {
ChannelPromise promise = inboundChannel.newPromise();
inboundChannel.spliceTo(outboundChannel, spliceLength, promise);
promise.addListener((ChannelFutureListener) future -> {
if (!future.isSuccess() && !(future.cause() instanceof ClosedChannelException)) {
future.channel().pipeline().fireExceptionCaught(future.cause());
}
});
return promise;
} | 3.68 |
hbase_IndexBlockEncoding_getNameFromId | /**
* Find and return the name of data block encoder for the given id.
* @param encoderId id of data block encoder
* @return name, same as used in options in column family
*/
public static String getNameFromId(short encoderId) {
return getEncodingById(encoderId).toString();
} | 3.68 |
flink_CliFrontend_buildProgram | /**
* Creates a Packaged program from the given command line options and the
* effectiveConfiguration.
*
* @return A PackagedProgram (upon success)
*/
PackagedProgram buildProgram(final ProgramOptions runOptions, final Configuration configuration)
throws FileNotFoundException, ProgramInvocationException, CliArgsException {
runOptions.validate();
String[] programArgs = runOptions.getProgramArgs();
String jarFilePath = runOptions.getJarFilePath();
List<URL> classpaths = runOptions.getClasspaths();
// Get assembler class
String entryPointClass = runOptions.getEntryPointClassName();
File jarFile = jarFilePath != null ? getJarFile(jarFilePath) : null;
return PackagedProgram.newBuilder()
.setJarFile(jarFile)
.setUserClassPaths(classpaths)
.setEntryPointClassName(entryPointClass)
.setConfiguration(configuration)
.setSavepointRestoreSettings(runOptions.getSavepointRestoreSettings())
.setArguments(programArgs)
.build();
} | 3.68 |
framework_ContainerHierarchicalWrapper_hasChildren | /*
* Is the Item corresponding to the given ID a leaf node? Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean hasChildren(Object itemId) {
// If the wrapped container implements the method directly, use it
if (hierarchical) {
return ((Container.Hierarchical) container).hasChildren(itemId);
}
LinkedList<Object> list = children.get(itemId);
return (list != null && !list.isEmpty());
} | 3.68 |
hbase_ReplicationSourceWALReader_setReaderRunning | /**
* @param readerRunning the readerRunning to set
*/
public void setReaderRunning(boolean readerRunning) {
this.isReaderRunning = readerRunning;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_preWrite | /**
* Allows the implementation to perform any pre-commit operations like transitioning a commit to inflight if required.
*
* @param instantTime time of commit
*/
protected void preWrite(String instantTime) {
// Default is No-Op
} | 3.68 |
dubbo_SimpleReferenceCache_getCache | /**
* Get the cache use specified {@link KeyGenerator}.
* Create cache if not existed yet.
*/
public static SimpleReferenceCache getCache(String name, KeyGenerator keyGenerator) {
return ConcurrentHashMapUtils.computeIfAbsent(
CACHE_HOLDER, name, k -> new SimpleReferenceCache(k, keyGenerator));
} | 3.68 |
morf_SqlScriptExecutor_reclassifiedRuntimeException | /**
* Reclassify an exception if it is dialect specific and wrap in a runtime exception.
*/
private RuntimeException reclassifiedRuntimeException(Exception e, String message) {
Exception reclassifiedException = sqlDialect.getDatabaseType().reclassifyException(e);
return reclassifiedException instanceof SQLException ? new RuntimeSqlException(message, (SQLException) reclassifiedException) :
new RuntimeException(message, reclassifiedException);
} | 3.68 |
querydsl_DateExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
@Override
public DateExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
hadoop_DeregisterSubClusterRequest_newInstance | /**
* Initialize DeregisterSubClusterRequest according to subClusterId.
*
* @param subClusterId subClusterId.
* @return DeregisterSubClusterRequest.
*/
@Private
@Unstable
public static DeregisterSubClusterRequest newInstance(String subClusterId) {
DeregisterSubClusterRequest request = Records.newRecord(DeregisterSubClusterRequest.class);
request.setSubClusterId(subClusterId);
return request;
} | 3.68 |
hadoop_AzureADAuthenticator_getTokenUsingRefreshToken | /**
* Gets Azure Active Directory token using refresh token.
*
* @param authEndpoint the OAuth 2.0 token endpoint associated
* with the user's directory (obtain from
* Active Directory configuration)
* @param clientId the client ID (GUID) of the client web app obtained from Azure Active Directory configuration
* @param refreshToken the refresh token
* @return {@link AzureADToken} obtained using the refresh token
* @throws IOException throws IOException if there is a failure in connecting to Azure AD
*/
public static AzureADToken getTokenUsingRefreshToken(
final String authEndpoint, final String clientId,
final String refreshToken) throws IOException {
QueryParams qp = new QueryParams();
qp.add("grant_type", "refresh_token");
qp.add("refresh_token", refreshToken);
if (clientId != null) {
qp.add("client_id", clientId);
}
LOG.debug("AADToken: starting to fetch token using refresh token for client ID " + clientId);
return getTokenCall(authEndpoint, qp.serialize(), null, null);
} | 3.68 |
hbase_LeaseManager_closeAfterLeasesExpire | /**
* Shuts down this lease instance when all outstanding leases expire. Like {@link #close()} but
* rather than violently end all leases, waits first on extant leases to finish. Use this method
* if the lease holders could lose data, leak locks, etc. Presumes client has shutdown allocation
* of new leases.
*/
public void closeAfterLeasesExpire() {
this.stopRequested = true;
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectWithTargetInDifferentSchema | /**
* Tests that an insert from a select works when the target table is in a different schema.
*/
@Test
public void testInsertFromSelectWithTargetInDifferentSchema() {
SelectStatement sourceStmt = new SelectStatement(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(new TableReference(TEST_TABLE));
InsertStatement stmt = new InsertStatement().into(new TableReference("MYSCHEMA", OTHER_TABLE))
.fields(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(sourceStmt);
String expectedSql = "INSERT INTO " + differentSchemaTableName(OTHER_TABLE) + " (id, version, stringField, intField, floatField) SELECT id, version, stringField, intField, floatField FROM " + tableName(TEST_TABLE);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Insert with explicit field lists", ImmutableList.of(expectedSql), sql);
} | 3.68 |
flink_DateTimeUtils_convertTz | /**
* Convert datetime string from a time zone to another time zone.
*
* @param dateStr the date time string
* @param tzFrom the original time zone
* @param tzTo the target time zone
*/
public static String convertTz(String dateStr, String tzFrom, String tzTo) {
try {
return formatTimestampTz(parseTimestampTz(dateStr, tzFrom), tzTo);
} catch (ParseException e) {
return null;
}
} | 3.68 |
dubbo_ClassUtils_isGenericClass | /**
* Is generic class or not?
*
* @param type the target type
* @return if the target type is not null or <code>void</code> or Void.class, return <code>true</code>, or false
* @since 2.7.6
*/
public static boolean isGenericClass(Class<?> type) {
return type != null && !void.class.equals(type) && !Void.class.equals(type);
} | 3.68 |
hadoop_AzureBlobFileSystemStore_initializeClient | /**
* A on-off operation to initialize AbfsClient for AzureBlobFileSystem
* Operations.
*
* @param uri Uniform resource identifier for Abfs.
* @param fileSystemName Name of the fileSystem being used.
* @param accountName Name of the account being used to access Azure
* data store.
* @param isSecure Tells if https is being used or http.
* @throws IOException
*/
private void initializeClient(URI uri, String fileSystemName,
String accountName, boolean isSecure)
throws IOException {
if (this.client != null) {
return;
}
final URIBuilder uriBuilder = getURIBuilder(accountName, isSecure);
final String url = uriBuilder.toString() + AbfsHttpConstants.FORWARD_SLASH + fileSystemName;
URL baseUrl;
try {
baseUrl = new URL(url);
} catch (MalformedURLException e) {
throw new InvalidUriException(uri.toString());
}
SharedKeyCredentials creds = null;
AccessTokenProvider tokenProvider = null;
SASTokenProvider sasTokenProvider = null;
if (authType == AuthType.OAuth) {
AzureADAuthenticator.init(abfsConfiguration);
}
if (authType == AuthType.SharedKey) {
LOG.trace("Fetching SharedKey credentials");
int dotIndex = accountName.indexOf(AbfsHttpConstants.DOT);
if (dotIndex <= 0) {
throw new InvalidUriException(
uri.toString() + " - account name is not fully qualified.");
}
creds = new SharedKeyCredentials(accountName.substring(0, dotIndex),
abfsConfiguration.getStorageAccountKey());
} else if (authType == AuthType.SAS) {
LOG.trace("Fetching SAS token provider");
sasTokenProvider = abfsConfiguration.getSASTokenProvider();
} else {
LOG.trace("Fetching token provider");
tokenProvider = abfsConfiguration.getTokenProvider();
ExtensionHelper.bind(tokenProvider, uri,
abfsConfiguration.getRawConfiguration());
}
LOG.trace("Initializing AbfsClient for {}", baseUrl);
if (tokenProvider != null) {
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
tokenProvider,
populateAbfsClientContext());
} else {
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
sasTokenProvider,
populateAbfsClientContext());
}
LOG.trace("AbfsClient init complete");
} | 3.68 |
hadoop_OBSCommonUtils_extractException | /**
* Extract an exception from a failed future, and convert to an IOE.
*
* @param operation operation which failed
* @param path path operated on (may be null)
* @param ee execution exception
* @return an IOE which can be thrown
*/
static IOException extractException(final String operation,
final String path, final ExecutionException ee) {
IOException ioe;
Throwable cause = ee.getCause();
if (cause instanceof ObsException) {
ioe = translateException(operation, path, (ObsException) cause);
} else if (cause instanceof IOException) {
ioe = (IOException) cause;
} else {
ioe = new IOException(operation + " failed: " + cause, cause);
}
return ioe;
} | 3.68 |
hbase_AsyncMetaRegionLocator_getRegionLocationInCache | // only used for testing whether we have cached the location for a region.
RegionLocations getRegionLocationInCache() {
return metaRegionLocations.get();
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMaximumWithExpression | /**
* Tests select statement with maximum function using more than a simple field.
*/
@Test
public void testSelectMaximumWithExpression() {
SelectStatement stmt = select(max(field(INT_FIELD).plus(literal(1)))).from(tableRef(TEST_TABLE));
assertEquals("Select scripts are not the same", expectedSelectMaximumWithExpression(), testDialect.convertStatementToSQL(stmt));
} | 3.68 |
framework_AbstractConnector_getRpcProxy | /**
* Returns an RPC proxy object which can be used to invoke the RPC method on
* the server.
*
* @param <T>
* The type of the ServerRpc interface
* @param rpcInterface
* The ServerRpc interface to retrieve a proxy object for
* @return A proxy object which can be used to invoke the RPC method on the
* server.
*/
@SuppressWarnings("unchecked")
protected <T extends ServerRpc> T getRpcProxy(Class<T> rpcInterface) {
String name = rpcInterface.getName();
if (!rpcProxyMap.containsKey(name)) {
rpcProxyMap.put(name, RpcProxy.create(rpcInterface, this));
}
return (T) rpcProxyMap.get(name);
} | 3.68 |
hbase_VisibilityController_addLabels | /******************************
* VisibilityEndpoint service related methods
******************************/
@Override
public synchronized void addLabels(RpcController controller, VisibilityLabelsRequest request,
RpcCallback<VisibilityLabelsResponse> done) {
VisibilityLabelsResponse.Builder response = VisibilityLabelsResponse.newBuilder();
List<VisibilityLabel> visLabels = request.getVisLabelList();
if (!initialized) {
setExceptionResults(visLabels.size(),
new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"),
response);
} else {
List<byte[]> labels = new ArrayList<>(visLabels.size());
try {
if (authorizationEnabled) {
checkCallingUserAuth();
}
RegionActionResult successResult = RegionActionResult.newBuilder().build();
for (VisibilityLabel visLabel : visLabels) {
byte[] label = visLabel.getLabel().toByteArray();
labels.add(label);
response.addResult(successResult); // Just mark as success. Later it will get reset
// based on the result from
// visibilityLabelService.addLabels ()
}
if (!labels.isEmpty()) {
OperationStatus[] opStatus = this.visibilityLabelService.addLabels(labels);
logResult(true, "addLabels", "Adding labels allowed", null, labels, null);
int i = 0;
for (OperationStatus status : opStatus) {
while (!Objects.equals(response.getResult(i), successResult)) {
i++;
}
if (status.getOperationStatusCode() != SUCCESS) {
RegionActionResult.Builder failureResultBuilder = RegionActionResult.newBuilder();
failureResultBuilder
.setException(buildException(new DoNotRetryIOException(status.getExceptionMsg())));
response.setResult(i, failureResultBuilder.build());
}
i++;
}
}
} catch (AccessDeniedException e) {
logResult(false, "addLabels", e.getMessage(), null, labels, null);
LOG.error("User is not having required permissions to add labels", e);
setExceptionResults(visLabels.size(), e, response);
} catch (IOException e) {
LOG.error(e.toString(), e);
setExceptionResults(visLabels.size(), e, response);
}
}
done.run(response.build());
} | 3.68 |
pulsar_AbstractHierarchicalLedgerManager_process | /**
* Process list of items.
*
* @param data
* List of data to process
* @param processor
* Callback to process element of list when success
* @param finalCb
* Final callback to be called after all elements in the list are processed
* @param context
* Context of final callback
* @param successRc
* RC passed to final callback on success
* @param failureRc
* RC passed to final callback on failure
*/
public void process(final List<T> data, final BookkeeperInternalCallbacks.Processor<T> processor,
final AsyncCallback.VoidCallback finalCb, final Object context,
final int successRc, final int failureRc) {
if (data == null || data.size() == 0) {
finalCb.processResult(successRc, null, context);
return;
}
final int size = data.size();
final AtomicInteger current = new AtomicInteger(0);
T firstElement = data.get(0);
processor.process(firstElement, new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String path, Object ctx) {
if (rc != successRc) {
// terminal immediately
finalCb.processResult(failureRc, null, context);
return;
}
// process next element
int next = current.incrementAndGet();
if (next >= size) { // reach the end of list
finalCb.processResult(successRc, null, context);
return;
}
final T dataToProcess = data.get(next);
final AsyncCallback.VoidCallback stub = this;
scheduler.execute(() -> processor.process(dataToProcess, stub));
}
});
} | 3.68 |
framework_TreeData_addItem | /**
* Adds a data item as a child of {@code parent}. Call with {@code null} as
* parent to add a root level item. The given parent item must already exist
* in this structure, and an item can only be added to this structure once.
*
* @param parent
* the parent item for which the items are added as children
* @param item
* the item to add
* @return this
*
* @throws IllegalArgumentException
* if parent is not null and not already added to this structure
* @throws IllegalArgumentException
* if the item has already been added to this structure
* @throws NullPointerException
* if item is null
*/
public TreeData<T> addItem(T parent, T item) {
Objects.requireNonNull(item, "Item cannot be null");
if (parent != null && !contains(parent)) {
throw new IllegalArgumentException(
"Parent needs to be added before children. "
+ "To add root items, call with parent as null");
}
if (contains(item)) {
throw new IllegalArgumentException(
"Cannot add the same item multiple times: " + item);
}
putItem(item, parent);
return this;
} | 3.68 |
flink_HiveParserIntervalDayTime_normalizeSecondsAndNanos | // Ensures that the seconds and nanoseconds fields have consistent sign
protected void normalizeSecondsAndNanos() {
if (totalSeconds > 0 && nanos < 0) {
--totalSeconds;
nanos += HiveParserIntervalUtils.NANOS_PER_SEC;
} else if (totalSeconds < 0 && nanos > 0) {
++totalSeconds;
nanos -= HiveParserIntervalUtils.NANOS_PER_SEC;
}
} | 3.68 |
flink_Slide_over | /**
* Creates a sliding window. Sliding windows have a fixed size and slide by a specified slide
* interval. If the slide interval is smaller than the window size, sliding windows are
* overlapping. Thus, an element can be assigned to multiple windows.
*
* <p>For example, a sliding window of size 15 minutes with 5 minutes sliding interval groups
* elements of 15 minutes and evaluates every five minutes. Each element is contained in three
* consecutive
*
* @param size the size of the window as time or row-count interval
* @return a partially specified sliding window
*/
public static SlideWithSize over(Expression size) {
return new SlideWithSize(size);
} | 3.68 |
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_cleanupTime | /**
* Returns the cleanup time for a window, which is {@code window.maxTimestamp +
* allowedLateness}. In case this leads to a value greated than {@link Long#MAX_VALUE} then a
* cleanup time of {@link Long#MAX_VALUE} is returned.
*
* @param window the window whose cleanup time we are computing.
*/
private long cleanupTime(W window) {
if (windowAssigner.isEventTime()) {
long cleanupTime = window.maxTimestamp() + allowedLateness;
return cleanupTime >= window.maxTimestamp() ? cleanupTime : Long.MAX_VALUE;
} else {
return window.maxTimestamp();
}
} | 3.68 |
framework_DateUtil_formatClientSideTime | /**
* @param date
* the date to format
* @return given Date as String, for communicating to server-side
*/
public static String formatClientSideTime(Date date) {
DateTimeFormat dateformatDate = DateTimeFormat
.getFormat(DateConstants.CLIENT_TIME_FORMAT);
return dateformatDate.format(date);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.