name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_ConsumerConfiguration_setReadCompacted | /**
* If enabled, the consumer will read messages from the compacted topic rather than reading the full message backlog
* of the topic. This means that, if the topic has been compacted, the consumer will only see the latest value for
* each key in the topic, up until the point in the topic message backlog that has been compacted. Beyond that
* point, the messages will be sent as normal.
*
* readCompacted can only be enabled subscriptions to persistent topics, which have a single active consumer (i.e.
* failure or exclusive subscriptions). Attempting to enable it on subscriptions to a non-persistent topics or on a
* shared subscription, will lead to the subscription call throwing a PulsarClientException.
*
* @param readCompacted
* whether to read from the compacted topic
*/
public ConsumerConfiguration setReadCompacted(boolean readCompacted) {
conf.setReadCompacted(readCompacted);
return this;
} | 3.68 |
zxing_ReedSolomonDecoder_decodeWithECCount | /**
* <p>Decodes given set of received codewords, which include both data and error-correction
* codewords. Really, this means it uses Reed-Solomon to detect and correct errors, in-place,
* in the input.</p>
*
* @param received data and error-correction codewords
* @param twoS number of error-correction codewords available
* @return the number of errors corrected
* @throws ReedSolomonException if decoding fails for any reason
*/
public int decodeWithECCount(int[] received, int twoS) throws ReedSolomonException {
GenericGFPoly poly = new GenericGFPoly(field, received);
int[] syndromeCoefficients = new int[twoS];
boolean noError = true;
for (int i = 0; i < twoS; i++) {
int eval = poly.evaluateAt(field.exp(i + field.getGeneratorBase()));
syndromeCoefficients[syndromeCoefficients.length - 1 - i] = eval;
if (eval != 0) {
noError = false;
}
}
if (noError) {
return 0;
}
GenericGFPoly syndrome = new GenericGFPoly(field, syndromeCoefficients);
GenericGFPoly[] sigmaOmega =
runEuclideanAlgorithm(field.buildMonomial(twoS, 1), syndrome, twoS);
GenericGFPoly sigma = sigmaOmega[0];
GenericGFPoly omega = sigmaOmega[1];
int[] errorLocations = findErrorLocations(sigma);
int[] errorMagnitudes = findErrorMagnitudes(omega, errorLocations);
for (int i = 0; i < errorLocations.length; i++) {
int position = received.length - 1 - field.log(errorLocations[i]);
if (position < 0) {
throw new ReedSolomonException("Bad error location");
}
received[position] = GenericGF.addOrSubtract(received[position], errorMagnitudes[i]);
}
return errorLocations.length;
} | 3.68 |
framework_DateCellDayEvent_isTimeRangeTooSmall | /**
* Check if the given time range is too small for events
*
* @param start
* @param end
* @return
*/
private boolean isTimeRangeTooSmall(long start, long end) {
return (end - start) >= getMinTimeRange();
} | 3.68 |
hbase_RestoreSnapshotHelper_restoreHdfsMobRegions | /**
* Restore specified mob regions by restoring content to the snapshot state.
*/
private void restoreHdfsMobRegions(final ThreadPoolExecutor exec,
final Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions)
throws IOException {
if (regions == null || regions.isEmpty()) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo hri) throws IOException {
restoreMobRegion(hri, regionManifests.get(hri.getEncodedName()));
}
});
} | 3.68 |
flink_FileSourceSplit_length | /** Returns the number of bytes in the file region described by this source split. */
public long length() {
return length;
} | 3.68 |
morf_SqlDialect_getSqlForAverage | /**
* Converts the average function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForAverage(Function function) {
return "AVG(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
framework_FilesystemContainer_getSize | /**
* Gets the size of this file.
*
* @return size
*/
public long getSize() {
if (file.isDirectory()) {
return 0;
}
return file.length();
} | 3.68 |
incubator-hugegraph-toolchain_PropertyIndexService_list | /**
* The sort result like that, content is 'name'
* --------------+------------------------+---------------------------------
* base_value | index label name | fields
* --------------+------------------------+---------------------------------
* xxxname | xxxByName | name
* --------------+------------------------+---------------------------------
* | personByName | name
* person +------------------------+---------------------------------
* | personByAgeAndName | age name
* --------------+------------------------+---------------------------------
* | softwareByName | name
* software +------------------------+---------------------------------
* | softwareByPriveAndName | price name
* --------------+------------------------+---------------------------------
*/
public IPage<PropertyIndex> list(int connId, HugeType type, String content,
int pageNo, int pageSize) {
HugeClient client = this.client(connId);
List<IndexLabel> indexLabels = client.schema().getIndexLabels();
Map<String, List<PropertyIndex>> matchedResults = new HashMap<>();
Map<String, List<PropertyIndex>> unMatchResults = new HashMap<>();
for (IndexLabel indexLabel : indexLabels) {
if (!indexLabel.baseType().equals(type)) {
continue;
}
String baseValue = indexLabel.baseValue();
List<PropertyIndex> groupedIndexes;
// Collect indexlabels that contains content
boolean match = baseValue.contains(content);
if (match) {
groupedIndexes = matchedResults.computeIfAbsent(baseValue,
k -> new ArrayList<>());
} else {
groupedIndexes = unMatchResults.computeIfAbsent(baseValue,
k -> new ArrayList<>());
}
match = match || indexLabel.name().contains(content) ||
indexLabel.indexFields().stream()
.anyMatch(f -> f.contains(content));
if (match) {
groupedIndexes.add(convert(indexLabel));
}
}
// Sort matched results by relevance
if (!StringUtils.isEmpty(content)) {
for (Map.Entry<String, List<PropertyIndex>> entry :
matchedResults.entrySet()) {
List<PropertyIndex> groupedIndexes = entry.getValue();
groupedIndexes.sort(new Comparator<PropertyIndex>() {
final int highScore = 2;
final int lowScore = 1;
@Override
public int compare(PropertyIndex o1, PropertyIndex o2) {
int o1Score = 0;
if (o1.getName().contains(content)) {
o1Score += highScore;
}
if (o1.getFields().stream()
.anyMatch(field -> field.contains(content))) {
o1Score += lowScore;
}
int o2Score = 0;
if (o2.getName().contains(content)) {
o2Score += highScore;
}
if (o2.getFields().stream()
.anyMatch(field -> field.contains(content))) {
o2Score += lowScore;
}
return o2Score - o1Score;
}
});
}
}
List<PropertyIndex> all = new ArrayList<>();
matchedResults.values().forEach(all::addAll);
unMatchResults.values().forEach(all::addAll);
return PageUtil.page(all, pageNo, pageSize);
} | 3.68 |
hadoop_TypedBytesInput_read | /**
* Reads a typed bytes sequence and converts it to a Java object. The first
* byte is interpreted as a type code, and then the right number of
* subsequent bytes are read depending on the obtained type.
* @return the obtained object or null when the end of the file is reached
* @throws IOException
*/
public Object read() throws IOException {
int code = 1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
if (code == Type.BYTES.code) {
return new Buffer(readBytes());
} else if (code == Type.BYTE.code) {
return readByte();
} else if (code == Type.BOOL.code) {
return readBool();
} else if (code == Type.INT.code) {
return readInt();
} else if (code == Type.LONG.code) {
return readLong();
} else if (code == Type.FLOAT.code) {
return readFloat();
} else if (code == Type.DOUBLE.code) {
return readDouble();
} else if (code == Type.STRING.code) {
return readString();
} else if (code == Type.VECTOR.code) {
return readVector();
} else if (code == Type.LIST.code) {
return readList();
} else if (code == Type.MAP.code) {
return readMap();
} else if (code == Type.MARKER.code) {
return null;
} else if (50 <= code && code <= 200) { // application-specific typecodes
return new Buffer(readBytes());
} else {
throw new RuntimeException("unknown type");
}
} | 3.68 |
framework_HierarchyMapper_expand | /**
* Expands the given item.
*
* @param item
* the item to expand
* @param position
* the index of the item
* @return range of rows added by expanding the item
*/
public Range expand(T item, Integer position) {
if (doExpand(item) && position != null) {
return Range.withLength(position + 1,
(int) getHierarchy(item, false).count());
}
return Range.emptyRange();
} | 3.68 |
hadoop_ManifestStoreOperations_getEtag | /**
* Extract an etag from a status if the conditions are met.
* If the conditions are not met, return null or ""; they will
* both be treated as "no etags available"
* <pre>
* 1. The status is of a type which the implementation recognizes
* as containing an etag.
* 2. After casting the etag field can be retrieved
* 3. and that value is non-null/non-empty.
* </pre>
* @param status status, which may be null of any subclass of FileStatus.
* @return either a valid etag, or null or "".
*/
public String getEtag(FileStatus status) {
return ManifestCommitterSupport.getEtag(status);
} | 3.68 |
hbase_AbstractFSWALProvider_isMetaFile | /** Returns True if String ends in {@link #META_WAL_PROVIDER_ID} */
public static boolean isMetaFile(String p) {
return p != null && p.endsWith(META_WAL_PROVIDER_ID);
} | 3.68 |
hbase_KeyValue_getDelimiterInReverse | /**
* Find index of passed delimiter walking from end of buffer backwards.
* @param b the kv serialized byte[] to process
* @param offset the offset in the byte[]
* @param length the length in the byte[]
* @param delimiter input delimeter to fetch index from end
* @return Index of delimiter
*/
public static int getDelimiterInReverse(final byte[] b, final int offset, final int length,
final int delimiter) {
if (b == null) {
throw new IllegalArgumentException("Passed buffer is null");
}
int result = -1;
for (int i = (offset + length) - 1; i >= offset; i--) {
if (b[i] == delimiter) {
result = i;
break;
}
}
return result;
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_registerJob | /**
* Register job.
*
* @param jobName job name
* @param jobScheduleController job schedule controller
*/
public void registerJob(final String jobName, final JobScheduleController jobScheduleController) {
schedulerMap.put(jobName, jobScheduleController);
} | 3.68 |
morf_AbstractSelectStatement_innerJoin | /**
* @param subSelect the sub select statement to join on to
* @return a new select statement with the change applied.
*
* @deprecated Use {@link #crossJoin(SelectStatement)} to do a cross join;
* or add join conditions for {@link #innerJoin(SelectStatement, Criterion)}
* to make this an inner join.
*/
@Deprecated
@SuppressWarnings("deprecation")
public T innerJoin(SelectStatement subSelect) {
return copyOnWriteOrMutate(
b -> b.innerJoin(subSelect),
() -> joins.add(new Join(JoinType.INNER_JOIN, subSelect))
);
} | 3.68 |
flink_AbstractReader_handleEvent | /**
* Handles the event and returns whether the reader reached an end-of-stream event (either the
* end of the whole stream or the end of an superstep).
*/
protected boolean handleEvent(AbstractEvent event) throws IOException {
final Class<?> eventType = event.getClass();
try {
// ------------------------------------------------------------
// Runtime events
// ------------------------------------------------------------
// This event is also checked at the (single) input gate to release the respective
// channel, at which it was received.
if (eventType == EndOfPartitionEvent.class) {
return true;
} else if (eventType == EndOfSuperstepEvent.class) {
return incrementEndOfSuperstepEventAndCheck();
}
// ------------------------------------------------------------
// Task events (user)
// ------------------------------------------------------------
else if (event instanceof TaskEvent) {
taskEventHandler.publish((TaskEvent) event);
return false;
} else {
throw new IllegalStateException(
"Received unexpected event of type " + eventType + " at reader.");
}
} catch (Throwable t) {
throw new IOException(
"Error while handling event of type " + eventType + ": " + t.getMessage(), t);
}
} | 3.68 |
hadoop_HSAuditLogger_logFailure | /**
* Create a readable and parseable audit log string for a failed event.
*
* @param user
* User who made the service request.
* @param operation
* Operation requested by the user.
* @param perm
* Target permissions.
* @param target
* The target on which the operation is being performed.
* @param description
* Some additional information as to why the operation failed.
*
* <br>
* <br>
* Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val
* delimiter and hence the value fields should not contains tabs
* ('\t').
*/
public static void logFailure(String user, String operation, String perm,
String target, String description) {
if (LOG.isWarnEnabled()) {
LOG.warn(createFailureLog(user, operation, perm, target, description));
}
} | 3.68 |
hudi_HoodieWriteStat_setPath | /**
* Set path and tempPath relative to the given basePath.
*/
public void setPath(Path basePath, Path path) {
this.path = path.toString().replace(basePath + "/", "");
} | 3.68 |
framework_FileDownloader_handleConnectorRequest | /**
* {@inheritDoc}
*
* @throws IOException
* if something goes wrong with the download or the user
* cancelled the file download process.
*/
@Override
public boolean handleConnectorRequest(VaadinRequest request,
VaadinResponse response, String path) throws IOException {
if (!path.matches("dl(/.*)?")) {
// Ignore if it isn't for us
return false;
}
VaadinSession session = getSession();
session.lock();
DownloadStream stream;
try {
Resource resource = getFileDownloadResource();
if (!(resource instanceof ConnectorResource)) {
return false;
}
stream = ((ConnectorResource) resource).getStream();
String contentDisposition = stream
.getParameter(DownloadStream.CONTENT_DISPOSITION);
if (contentDisposition == null) {
contentDisposition = "attachment; " + DownloadStream
.getContentDispositionFilename(stream.getFileName());
}
stream.setParameter(DownloadStream.CONTENT_DISPOSITION,
contentDisposition);
// Content-Type to block eager browser plug-ins from hijacking
// the file
if (isOverrideContentType()) {
stream.setContentType("application/octet-stream;charset=UTF-8");
}
} finally {
session.unlock();
}
stream.writeResponse(request, response);
return true;
} | 3.68 |
hbase_RegionMover_includeExcludeRegionServers | /**
* Designates or excludes the servername whose hostname and port portion matches the list given in
* the file. Example:<br>
* If you want to designated RSs, suppose designatedFile has RS1, regionServers has RS1, RS2 and
* RS3. When we call includeExcludeRegionServers(designatedFile, regionServers, true), RS2 and RS3
* are removed from regionServers list so that regions can move to only RS1. If you want to
* exclude RSs, suppose excludeFile has RS1, regionServers has RS1, RS2 and RS3. When we call
* includeExcludeRegionServers(excludeFile, servers, false), RS1 is removed from regionServers
* list so that regions can move to only RS2 and RS3.
*/
private void includeExcludeRegionServers(String fileName, List<ServerName> regionServers,
boolean isInclude) throws IOException {
if (fileName != null) {
List<String> servers = readServersFromFile(fileName);
if (servers.isEmpty()) {
LOG.warn("No servers provided in the file: {}." + fileName);
return;
}
Iterator<ServerName> i = regionServers.iterator();
while (i.hasNext()) {
String rs = i.next().getServerName();
String rsPort = rs.split(ServerName.SERVERNAME_SEPARATOR)[0].toLowerCase() + ":"
+ rs.split(ServerName.SERVERNAME_SEPARATOR)[1];
if (isInclude != servers.contains(rsPort)) {
i.remove();
}
}
}
} | 3.68 |
hadoop_MountTableStoreImpl_checkMountTablePermission | /**
* Check parent path permission recursively. It needs WRITE permission
* of the nearest parent entry and other EXECUTE permission.
* @param src mount entry being checked
* @throws AccessControlException if mount table cannot be accessed
*/
private void checkMountTablePermission(final String src) throws IOException {
String parent = src.substring(0, src.lastIndexOf(Path.SEPARATOR));
checkMountTableEntryPermission(parent, FsAction.WRITE);
while (!parent.isEmpty()) {
parent = parent.substring(0, parent.lastIndexOf(Path.SEPARATOR));
checkMountTableEntryPermission(parent, FsAction.EXECUTE);
}
} | 3.68 |
framework_LayoutDependencyTree_hasHorizontalConnectorToLayout | /**
* Returns whether there are any managed layouts waiting for horizontal
* layouting.
*
* @return {@code true} if horizontal layouting queue is not empty,
* {@code false} otherwise
*/
public boolean hasHorizontalConnectorToLayout() {
return !getLayoutQueue(HORIZONTAL).isEmpty();
} | 3.68 |
rocketmq-connect_Base64Util_base64Encode | /**
* encode
*
* @param in
* @return
*/
public static String base64Encode(byte[] in) {
if (in == null) {
return null;
}
return Base64.getEncoder().encodeToString(in);
} | 3.68 |
hbase_ThriftUtilities_rowMutationsFromThrift | /**
* Creates a {@link RowMutations} (HBase) from a {@link TRowMutations} (Thrift)
* @param in the <code>TRowMutations</code> to convert
* @return converted <code>RowMutations</code>
*/
public static RowMutations rowMutationsFromThrift(TRowMutations in) throws IOException {
List<TMutation> mutations = in.getMutations();
RowMutations out = new RowMutations(in.getRow(), mutations.size());
for (TMutation mutation : mutations) {
if (mutation.isSetPut()) {
out.add(putFromThrift(mutation.getPut()));
}
if (mutation.isSetDeleteSingle()) {
out.add(deleteFromThrift(mutation.getDeleteSingle()));
}
}
return out;
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerMinimum | /**
* Add a minimum statistic to dynamically return the
* latest value of the source.
* @param key key of this statistic
* @param source atomic int minimum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicIntegerMinimum(String key,
AtomicInteger source) {
withLongFunctionMinimum(key, s -> source.get());
return this;
} | 3.68 |
hbase_ColumnCountGetFilter_parseFrom | /**
* Parse a serialized representation of {@link ColumnCountGetFilter}
* @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static ColumnCountGetFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.ColumnCountGetFilter proto;
try {
proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new ColumnCountGetFilter(proto.getLimit());
} | 3.68 |
hbase_WALEntryStream_getPosition | /** Returns the position of the last Entry returned by next() */
public long getPosition() {
return currentPositionOfEntry;
} | 3.68 |
framework_OptionGroupElement_getValue | /**
* Return value of the selected option in the option group.
*
* @return value of the selected option in the option group
*/
public String getValue() {
List<WebElement> options = findElements(bySelectOption);
for (WebElement option : options) {
WebElement checkedItem;
checkedItem = option.findElement(By.tagName("input"));
String checked = checkedItem.getAttribute("checked");
if (checked != null
&& checkedItem.getAttribute("checked").equals("true")) {
return option.findElement(By.tagName("label")).getText();
}
}
return null;
} | 3.68 |
hudi_HoodieMetaSyncOperations_createDatabase | /**
* Create a database in the metastore.
*/
default void createDatabase(String databaseName) {
} | 3.68 |
hadoop_ClasspathConstructor_localJVMClasspath | /**
* Get the local JVM classpath split up
* @return the list of entries on the JVM classpath env var
*/
public Collection<String> localJVMClasspath() {
return splitClasspath(System.getProperty("java.class.path"));
} | 3.68 |
dubbo_NettyHttpRestServer_getNettyServer | /**
* for triple override
*
* @return
*/
protected NettyServer getNettyServer() {
return new NettyServer();
} | 3.68 |
hadoop_IOStatisticsLogging_logIOStatisticsAtDebug | /**
* Extract any statistics from the source and log to
* this class's log at debug, if
* the log is set to log at debug.
* No-op if logging is not at debug or the source is null/of
* the wrong type/doesn't provide statistics.
* @param message message for log -this must contain "{}" for the
* statistics report to actually get logged.
* @param source source object
*/
public static void logIOStatisticsAtDebug(
String message,
Object source) {
logIOStatisticsAtDebug(LOG, message, source);
} | 3.68 |
hbase_BulkLoadHFilesTool_prepareHFileQueue | /**
* Prepare a collection of {@code LoadQueueItem} from list of source hfiles contained in the
* passed directory and validates whether the prepared queue has all the valid table column
* families in it.
* @param hfilesDir directory containing list of hfiles to be loaded into the table
* @param queue queue which needs to be loaded into the table
* @param validateHFile if true hfiles will be validated for its format
* @param silence true to ignore unmatched column families
* @throws IOException If any I/O or network error occurred
*/
public static void prepareHFileQueue(Configuration conf, AsyncClusterConnection conn,
TableName tableName, Path hfilesDir, Deque<LoadQueueItem> queue, boolean validateHFile,
boolean silence) throws IOException {
discoverLoadQueue(conf, queue, hfilesDir, validateHFile);
validateFamiliesInHFiles(FutureUtils.get(conn.getAdmin().getDescriptor(tableName)), queue,
silence);
} | 3.68 |
flink_ProcTimeMiniBatchAssignerOperator_processWatermark | /**
* Override the base implementation to completely ignore watermarks propagated from upstream (we
* rely only on the {@link AssignerWithPeriodicWatermarks} to emit watermarks from here).
*/
@Override
public void processWatermark(Watermark mark) throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if (mark.getTimestamp() == Long.MAX_VALUE && currentWatermark != Long.MAX_VALUE) {
currentWatermark = Long.MAX_VALUE;
output.emitWatermark(mark);
}
} | 3.68 |
flink_AnswerFormatter_format | /**
* TPC-DS answer set has three kind of formats, recognize them and convert to unified format.
*
* @param originFile origin answer set file from TPC-DS.
* @param destFile file to save formatted answer set.
* @throws Exception
*/
private static void format(File originFile, File destFile) throws Exception {
BufferedReader reader = new BufferedReader(new FileReader(originFile));
BufferedWriter writer = new BufferedWriter(new FileWriter(destFile));
String line;
List<Integer> colLengthList;
List<String> content = new ArrayList<>();
while ((line = reader.readLine()) != null) {
content.add(line);
}
if (isFormat1(content)) {
colLengthList =
Arrays.stream(content.get(1).split(REGEX_SPLIT_BAR))
.map(col -> col.length())
.collect(Collectors.toList());
writeContent(writer, content, colLengthList);
} else if (isFormat2(content)) {
colLengthList =
Arrays.stream(content.get(1).split(RESULT_HEAD_STRING_SPACE))
.map(col -> col.length())
.collect(Collectors.toList());
writeContent(writer, content, colLengthList);
} else {
writeContent(writer, content, null);
}
reader.close();
writer.close();
} | 3.68 |
flink_SingleOutputStreamOperator_cache | /**
* Cache the intermediate result of the transformation. Only support bounded streams and
* currently only block mode is supported. The cache is generated lazily at the first time the
* intermediate result is computed. The cache will be clear when {@link
* CachedDataStream#invalidate()} called or the {@link StreamExecutionEnvironment} close.
*
* @return CachedDataStream that can use in later job to reuse the cached intermediate result.
*/
@PublicEvolving
public CachedDataStream<T> cache() {
if (!(this.transformation instanceof PhysicalTransformation)) {
throw new IllegalStateException(
"Cache can only be called with physical transformation or side output transformation");
}
return new CachedDataStream<>(this.environment, this.transformation);
} | 3.68 |
hadoop_RouterResolver_getMembershipStore | /**
* Get the Membership store.
*
* @return Membership store.
*/
protected MembershipStore getMembershipStore() {
StateStoreService stateStore = router.getStateStore();
if (stateStore == null) {
return null;
}
return stateStore.getRegisteredRecordStore(MembershipStore.class);
} | 3.68 |
hudi_TypedProperties_putAll | /**
* This method is introduced to get rid of the scala compile error:
* <pre>
* <code>
* ambiguous reference to overloaded definition,
* both method putAll in class Properties of type (x$1: java.util.Map[_, _])Unit
* and method putAll in class Hashtable of type (x$1: java.util.Map[_ <: Object, _ <: Object])Unit
* match argument types (java.util.HashMap[Nothing,Nothing])
* properties.putAll(new java.util.HashMap())
* </code>
* </pre>
*
* @param props The properties
* @param items The new items to put
*/
public static void putAll(TypedProperties props, Map<?, ?> items) {
props.putAll(items);
} | 3.68 |
flink_AbstractFsCheckpointStorageAccess_initializeLocationForSavepoint | /**
* Creates a file system based storage location for a savepoint.
*
* <p>This methods implements the logic that decides which location to use (given optional
* parameters for a configured location and a location passed for this specific savepoint) and
* how to name and initialize the savepoint directory.
*
* @param externalLocationPointer The target location pointer for the savepoint. Must be a valid
* URI. Null, if not supplied.
* @param checkpointId The checkpoint ID of the savepoint.
* @return The checkpoint storage location for the savepoint.
* @throws IOException Thrown if the target directory could not be created.
*/
@Override
public CheckpointStorageLocation initializeLocationForSavepoint(
@SuppressWarnings("unused") long checkpointId, @Nullable String externalLocationPointer)
throws IOException {
// determine where to write the savepoint to
final Path savepointBasePath;
if (externalLocationPointer != null) {
savepointBasePath = new Path(externalLocationPointer);
} else if (defaultSavepointDirectory != null) {
savepointBasePath = defaultSavepointDirectory;
} else {
throw new IllegalArgumentException(
"No savepoint location given and no default location configured.");
}
// generate the savepoint directory
final FileSystem fs = savepointBasePath.getFileSystem();
final String prefix = "savepoint-" + jobId.toString().substring(0, 6) + '-';
Exception latestException = null;
for (int attempt = 0; attempt < 10; attempt++) {
final Path path = new Path(savepointBasePath, FileUtils.getRandomFilename(prefix));
try {
if (fs.mkdirs(path)) {
// we make the path qualified, to make it independent of default schemes and
// authorities
final Path qp = path.makeQualified(fs);
return createSavepointLocation(fs, qp);
}
} catch (Exception e) {
latestException = e;
}
}
throw new IOException(
"Failed to create savepoint directory at " + savepointBasePath, latestException);
} | 3.68 |
flink_FlinkContainersSettings_baseImage | /**
* Sets the {@code baseImage} and returns a reference to this Builder enabling method
* chaining.
*
* @param baseImage The {@code baseImage} to set.
* @return A reference to this Builder.
*/
public Builder baseImage(String baseImage) {
this.baseImage = baseImage;
this.buildFromFlinkDist = false;
return this;
} | 3.68 |
flink_PartitionedFile_getIndexEntryOffset | /**
* Returns the index entry offset of the target region and subpartition in the index file. Both
* region index and subpartition index start from 0.
*/
private long getIndexEntryOffset(int region, int subpartition) {
checkArgument(region >= 0 && region < getNumRegions(), "Illegal target region.");
checkArgument(
subpartition >= 0 && subpartition < numSubpartitions,
"Subpartition index out of bound.");
return (((long) region) * numSubpartitions + subpartition) * INDEX_ENTRY_SIZE;
} | 3.68 |
hbase_Writables_getWritable | /**
* Set bytes into the passed Writable by calling its
* {@link Writable#readFields(java.io.DataInput)}.
* @param bytes serialized bytes
* @param offset offset into array
* @param length length of data
* @param w An empty Writable (usually made by calling the null-arg constructor).
* @return The passed Writable after its readFields has been called fed by the passed
* <code>bytes</code> array or IllegalArgumentException if passed null or an empty
* <code>bytes</code> array.
* @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final int offset, final int length,
final Writable w) throws IOException {
if (bytes == null || length <= 0) {
throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array");
}
if (w == null) {
throw new IllegalArgumentException("Writable cannot be null");
}
DataInputBuffer in = new DataInputBuffer();
try {
in.reset(bytes, offset, length);
w.readFields(in);
return w;
} finally {
in.close();
}
} | 3.68 |
flink_GroupReduceNode_getOperator | /**
* Gets the operator represented by this optimizer node.
*
* @return The operator represented by this optimizer node.
*/
@Override
public GroupReduceOperatorBase<?, ?, ?> getOperator() {
return (GroupReduceOperatorBase<?, ?, ?>) super.getOperator();
} | 3.68 |
streampipes_ResetManagement_reset | /**
* Remove all configurations for this user. This includes:
* [pipeline assembly cache, pipelines, adapters, files]
*
* @param username
*/
public static void reset(String username) {
logger.info("Start resetting the system");
// Set hide tutorial to false for user
UserResourceManager.setHideTutorial(username, true);
// Clear pipeline assembly Cache
PipelineCacheManager.removeCachedPipeline(username);
PipelineCanvasMetadataCacheManager.removeCanvasMetadataFromCache(username);
// Stop and delete all pipelines
List<Pipeline> allPipelines = PipelineManager.getAllPipelines();
allPipelines.forEach(pipeline -> {
PipelineManager.stopPipeline(pipeline.getPipelineId(), true);
PipelineManager.deletePipeline(pipeline.getPipelineId());
});
// Stop and delete all adapters
AdapterMasterManagement adapterMasterManagement = new AdapterMasterManagement();
try {
List<AdapterDescription> allAdapters = adapterMasterManagement.getAllAdapterInstances();
allAdapters.forEach(adapterDescription -> {
try {
adapterMasterManagement.deleteAdapter(adapterDescription.getElementId());
} catch (AdapterException e) {
logger.error("Failed to delete adapter with id: " + adapterDescription.getElementId(), e);
}
});
} catch (AdapterException e) {
logger.error("Failed to load all adapter descriptions", e);
}
// Stop and delete all files
List<FileMetadata> allFiles = FileManager.getAllFiles();
allFiles.forEach(fileMetadata -> {
FileManager.deleteFile(fileMetadata.getFileId());
});
// Remove all data in data lake
IDataExplorerSchemaManagement dataLakeMeasureManagement = new DataExplorerSchemaManagement();
DataExplorerQueryManagement dataExplorerQueryManagement =
new DataExplorerQueryManagement(dataLakeMeasureManagement);
List<DataLakeMeasure> allMeasurements = dataLakeMeasureManagement.getAllMeasurements();
allMeasurements.forEach(measurement -> {
boolean isSuccessDataLake = dataExplorerQueryManagement.deleteData(measurement.getMeasureName());
if (isSuccessDataLake) {
dataLakeMeasureManagement.deleteMeasurementByName(measurement.getMeasureName());
}
});
// Remove all data views widgets
IDataExplorerWidgetStorage widgetStorage =
StorageDispatcher.INSTANCE.getNoSqlStore().getDataExplorerWidgetStorage();
widgetStorage.getAllDataExplorerWidgets().forEach(widget -> {
widgetStorage.deleteDataExplorerWidget(widget.getId());
});
// Remove all data views
IDashboardStorage dataLakeDashboardStorage =
StorageDispatcher.INSTANCE.getNoSqlStore().getDataExplorerDashboardStorage();
dataLakeDashboardStorage.getAllDashboards().forEach(dashboard -> {
dataLakeDashboardStorage.deleteDashboard(dashboard.getCouchDbId());
});
// Remove all dashboard widgets
IDashboardWidgetStorage dashobardWidgetStorage =
StorageDispatcher.INSTANCE.getNoSqlStore().getDashboardWidgetStorage();
dashobardWidgetStorage.getAllDashboardWidgets().forEach(widget -> {
dashobardWidgetStorage.deleteDashboardWidget(widget.getId());
});
// Remove all dashboards
IDashboardStorage dashboardStorage = StorageDispatcher.INSTANCE.getNoSqlStore().getDashboardStorage();
dashboardStorage.getAllDashboards().forEach(dashboard -> {
dashboardStorage.deleteDashboard(dashboard.getCouchDbId());
});
logger.info("Resetting the system was completed");
} | 3.68 |
framework_LayoutDependencyTree_hasConnectorsToMeasure | /**
* Returns whether there are any components waiting for either horizontal or
* vertical measuring.
*
* @return {@code true} if either measure queue contains anything,
* {@code false} otherwise
*/
public boolean hasConnectorsToMeasure() {
return !measureQueueInDirection[HORIZONTAL].isEmpty()
|| !measureQueueInDirection[VERTICAL].isEmpty();
} | 3.68 |
graphhopper_MiniPerfTest_getSum | /**
* @return time for all calls accumulated, in ms
*/
public double getSum() {
return fullTime / NS_PER_MS;
} | 3.68 |
framework_ValueProvider_identity | /**
* Returns a value provider that always returns its input argument.
*
* @param <T>
* the type of the input and output objects to the function
* @return a function that always returns its input argument
*/
public static <T> ValueProvider<T, T> identity() {
return t -> t;
} | 3.68 |
hbase_SplitTableRegionProcedure_postRollBackSplitRegion | /**
* Action after rollback a split table region action.
* @param env MasterProcedureEnv
*/
private void postRollBackSplitRegion(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.postRollBackSplitRegionAction(getUser());
}
} | 3.68 |
flink_RpcEndpoint_getSelfGateway | /**
* Returns a self gateway of the specified type which can be used to issue asynchronous calls
* against the RpcEndpoint.
*
* <p>IMPORTANT: The self gateway type must be implemented by the RpcEndpoint. Otherwise the
* method will fail.
*
* @param selfGatewayType class of the self gateway type
* @param <C> type of the self gateway to create
* @return Self gateway of the specified type which can be used to issue asynchronous rpcs
*/
public <C extends RpcGateway> C getSelfGateway(Class<C> selfGatewayType) {
return rpcService.getSelfGateway(selfGatewayType, rpcServer);
} | 3.68 |
hbase_TokenProvider_isAllowedDelegationTokenOp | /**
* @param ugi A user group information.
* @return true if delegation token operation is allowed
*/
private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOException {
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
if (authMethod == AuthenticationMethod.PROXY) {
authMethod = ugi.getRealUser().getAuthenticationMethod();
}
if (
authMethod != AuthenticationMethod.KERBEROS && authMethod != AuthenticationMethod.KERBEROS_SSL
&& authMethod != AuthenticationMethod.CERTIFICATE
) {
return false;
}
return true;
} | 3.68 |
flink_WebLogAnalysis_coGroup | /**
* If the visit iterator is empty, all pairs of the rank iterator are emitted. Otherwise, no
* pair is emitted.
*
* <p>Output Format: 0: RANK 1: URL 2: AVG_DURATION
*/
@Override
public void coGroup(
Iterable<Tuple3<Integer, String, Integer>> ranks,
Iterable<Tuple1<String>> visits,
Collector<Tuple3<Integer, String, Integer>> out) {
// Check if there is a entry in the visits relation
if (!visits.iterator().hasNext()) {
for (Tuple3<Integer, String, Integer> next : ranks) {
// Emit all rank pairs
out.collect(next);
}
}
} | 3.68 |
flink_SlidingProcessingTimeWindows_of | /**
* Creates a new {@code SlidingProcessingTimeWindows} {@link WindowAssigner} that assigns
* elements to time windows based on the element timestamp and offset.
*
* <p>For example, if you want window a stream by hour,but window begins at the 15th minutes of
* each hour, you can use {@code of(Time.hours(1),Time.minutes(15))},then you will get time
* windows start at 0:15:00,1:15:00,2:15:00,etc.
*
* <p>Rather than that,if you are living in somewhere which is not using UTC±00:00 time, such as
* China which is using UTC+08:00,and you want a time window with size of one day, and window
* begins at every 00:00:00 of local time,you may use {@code of(Time.days(1),Time.hours(-8))}.
* The parameter of offset is {@code Time.hours(-8))} since UTC+08:00 is 8 hours earlier than
* UTC time.
*
* @param size The size of the generated windows.
* @param slide The slide interval of the generated windows.
* @param offset The offset which window start would be shifted by.
* @return The time policy.
*/
public static SlidingProcessingTimeWindows of(Time size, Time slide, Time offset) {
return new SlidingProcessingTimeWindows(
size.toMilliseconds(), slide.toMilliseconds(), offset.toMilliseconds());
} | 3.68 |
flink_DoubleValue_getValue | /**
* Returns the value of the encapsulated primitive double.
*
* @return the value of the encapsulated primitive double.
*/
public double getValue() {
return this.value;
} | 3.68 |
hbase_ThriftUtilities_getFromThrift | /**
* Creates a {@link Get} (HBase) from a {@link TGet} (Thrift). This ignores any timestamps set on
* {@link TColumn} objects.
* @param in the <code>TGet</code> to convert
* @return <code>Get</code> object
* @throws IOException if an invalid time range or max version parameter is given
*/
public static Get getFromThrift(TGet in) throws IOException {
Get out = new Get(in.getRow());
// Timestamp overwrites time range if both are set
if (in.isSetTimestamp()) {
out.setTimestamp(in.getTimestamp());
} else if (in.isSetTimeRange()) {
out.setTimeRange(in.getTimeRange().getMinStamp(), in.getTimeRange().getMaxStamp());
}
if (in.isSetMaxVersions()) {
out.readVersions(in.getMaxVersions());
}
if (in.isSetFilterString()) {
ParseFilter parseFilter = new ParseFilter();
out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
}
if (in.isSetAttributes()) {
addAttributes(out, in.getAttributes());
}
if (in.isSetAuthorizations()) {
out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
}
if (in.isSetConsistency()) {
out.setConsistency(consistencyFromThrift(in.getConsistency()));
}
if (in.isSetTargetReplicaId()) {
out.setReplicaId(in.getTargetReplicaId());
}
if (in.isSetCacheBlocks()) {
out.setCacheBlocks(in.isCacheBlocks());
}
if (in.isSetStoreLimit()) {
out.setMaxResultsPerColumnFamily(in.getStoreLimit());
}
if (in.isSetStoreOffset()) {
out.setRowOffsetPerColumnFamily(in.getStoreOffset());
}
if (in.isSetExistence_only()) {
out.setCheckExistenceOnly(in.isExistence_only());
}
if (in.isSetColumns()) {
for (TColumn column : in.getColumns()) {
if (column.isSetQualifier()) {
out.addColumn(column.getFamily(), column.getQualifier());
} else {
out.addFamily(column.getFamily());
}
}
}
if (in.isSetFilterBytes()) {
out.setFilter(filterFromThrift(in.getFilterBytes()));
}
return out;
} | 3.68 |
hadoop_WorkRequest_getRetry | /**
* @return Number of previous attempts to process this work request.
*/
public int getRetry() {
return retry;
} | 3.68 |
framework_VAbstractSplitPanel_setEnabled | /**
* Sets this split panel enabled.
*
* @param enabled
* {@code true} if enabled, {@code false} if disabled
*/
public void setEnabled(boolean enabled) {
this.enabled = enabled;
} | 3.68 |
flink_FunctionIdentifier_normalizeName | /** Normalize a function name. */
public static String normalizeName(String name) {
return name.toLowerCase();
} | 3.68 |
pulsar_ManagedLedgerConfig_setLedgerOffloader | /**
* Set ledger offloader to use for offloading ledgers to longterm storage.
*
* @param offloader the ledger offloader to use
*/
public ManagedLedgerConfig setLedgerOffloader(LedgerOffloader offloader) {
this.ledgerOffloader = offloader;
return this;
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionMinimum | /**
* Add a new evaluator to the minimum statistics.
* @param key key of this statistic
* @param eval evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionMinimum(String key,
ToLongFunction<String> eval) {
activeInstance().addMinimumFunction(key, eval::applyAsLong);
return this;
} | 3.68 |
shardingsphere-elasticjob_JobConfigurationPOJO_fromJobConfiguration | /**
* Convert from job configuration.
*
* @param jobConfig job configuration
* @return job configuration POJO
*/
@SuppressWarnings("unchecked")
public static JobConfigurationPOJO fromJobConfiguration(final JobConfiguration jobConfig) {
JobConfigurationPOJO result = new JobConfigurationPOJO();
result.setJobName(jobConfig.getJobName());
result.setCron(jobConfig.getCron());
result.setTimeZone(jobConfig.getTimeZone());
result.setShardingTotalCount(jobConfig.getShardingTotalCount());
result.setShardingItemParameters(jobConfig.getShardingItemParameters());
result.setJobParameter(jobConfig.getJobParameter());
result.setMonitorExecution(jobConfig.isMonitorExecution());
result.setFailover(jobConfig.isFailover());
result.setMisfire(jobConfig.isMisfire());
result.setMaxTimeDiffSeconds(jobConfig.getMaxTimeDiffSeconds());
result.setReconcileIntervalMinutes(jobConfig.getReconcileIntervalMinutes());
result.setJobShardingStrategyType(jobConfig.getJobShardingStrategyType());
result.setJobExecutorThreadPoolSizeProviderType(jobConfig.getJobExecutorThreadPoolSizeProviderType());
result.setJobErrorHandlerType(jobConfig.getJobErrorHandlerType());
result.setJobListenerTypes(jobConfig.getJobListenerTypes());
jobConfig.getExtraConfigurations().stream()
.map(each -> TypedSPILoader.getService(YamlConfigurationConverter.class, each.getClass()).convertToYamlConfiguration(each)).forEach(result.getJobExtraConfigurations()::add);
result.setDescription(jobConfig.getDescription());
result.setProps(jobConfig.getProps());
result.setDisabled(jobConfig.isDisabled());
result.setOverwrite(jobConfig.isOverwrite());
result.setLabel(jobConfig.getLabel());
result.setStaticSharding(jobConfig.isStaticSharding());
return result;
} | 3.68 |
framework_WebBrowser_updateClientSideDetails | /**
* For internal use by VaadinServlet/VaadinPortlet only. Updates all
* properties in the class according to the given information.
*
* @param sw
* Screen width
* @param sh
* Screen height
* @param tzo
* TimeZone offset in minutes from GMT
* @param rtzo
* raw TimeZone offset in minutes from GMT (w/o DST adjustment)
* @param dstSavings
* the difference between the raw TimeZone and DST in minutes
* @param dstInEffect
* is DST currently active in the region or not?
* @param curDate
* the current date in milliseconds since the epoch
* @param touchDevice
*/
void updateClientSideDetails(String sw, String sh, String tzo, String rtzo,
String dstSavings, String dstInEffect, String tzId, String curDate,
boolean touchDevice) {
if (sw != null) {
try {
screenHeight = Integer.parseInt(sh);
screenWidth = Integer.parseInt(sw);
} catch (final NumberFormatException e) {
screenHeight = screenWidth = -1;
}
}
if (tzo != null) {
try {
// browser->java conversion: min->ms, reverse sign
timezoneOffset = -Integer.parseInt(tzo) * 60 * 1000;
} catch (final NumberFormatException e) {
timezoneOffset = 0; // default gmt+0
}
}
if (rtzo != null) {
try {
// browser->java conversion: min->ms, reverse sign
rawTimezoneOffset = -Integer.parseInt(rtzo) * 60 * 1000;
} catch (final NumberFormatException e) {
rawTimezoneOffset = 0; // default gmt+0
}
}
if (dstSavings != null) {
try {
// browser->java conversion: min->ms
this.dstSavings = Integer.parseInt(dstSavings) * 60 * 1000;
} catch (final NumberFormatException e) {
this.dstSavings = 0; // default no savings
}
}
if (dstInEffect != null) {
this.dstInEffect = Boolean.parseBoolean(dstInEffect);
}
if (tzId == null || "undefined".equals(tzId)) {
timeZoneId = null;
} else {
timeZoneId = tzId;
}
if (curDate != null) {
try {
long curTime = Long.parseLong(curDate);
clientServerTimeDelta = curTime - new Date().getTime();
} catch (final NumberFormatException e) {
clientServerTimeDelta = 0;
}
}
this.touchDevice = touchDevice;
} | 3.68 |
flink_RequestedLocalProperties_parameterizeChannel | /**
* Parametrizes the local strategy fields of a channel such that the channel produces the
* desired local properties.
*
* @param channel The channel to parametrize.
*/
public void parameterizeChannel(Channel channel) {
LocalProperties current = channel.getLocalProperties();
if (isMetBy(current)) {
// we are met, all is good
channel.setLocalStrategy(LocalStrategy.NONE);
} else if (this.ordering != null) {
channel.setLocalStrategy(
LocalStrategy.SORT,
this.ordering.getInvolvedIndexes(),
this.ordering.getFieldSortDirections());
} else if (this.groupedFields != null) {
boolean[] dirs = new boolean[this.groupedFields.size()];
Arrays.fill(dirs, true);
channel.setLocalStrategy(
LocalStrategy.SORT, Utils.createOrderedFromSet(this.groupedFields), dirs);
} else {
channel.setLocalStrategy(LocalStrategy.NONE);
}
} | 3.68 |
framework_HasValue_getEmptyValue | /**
* Returns the value that represents an empty value.
* <p>
* By default {@link HasValue} is expected to support {@code null} as empty
* values. Specific implementations might not support this.
*
* @return empty value
* @see Binder#bind(HasValue, ValueProvider, com.vaadin.server.Setter)
* Binder#bind(HasValue, ValueProvider, Setter)
*/
public default V getEmptyValue() {
return null;
} | 3.68 |
hbase_StreamUtils_readByte | /**
* Read a byte from the given stream using the read method, and throw EOFException if it returns
* -1, like the implementation in {@code DataInputStream}.
* <p/>
* This is useful because casting the return value of read method into byte directly will make us
* lose the ability to check whether there is a byte and its value is -1 or we reach EOF, as
* casting int -1 to byte also returns -1.
*/
public static byte readByte(InputStream in) throws IOException {
int r = in.read();
if (r < 0) {
throw new EOFException();
}
return (byte) r;
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_addLocalizedNodeRR | /**
* Add to the answer a localized node request, and keeps track of statistics
* on a per-allocation-id and per-subcluster bases.
*/
private void addLocalizedNodeRR(SubClusterId targetId, ResourceRequest rr) {
Preconditions
.checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName()));
if (rr.getNumContainers() > 0) {
if (!countContainersPerRM.containsKey(rr.getAllocationRequestId())) {
countContainersPerRM.put(rr.getAllocationRequestId(),
new HashMap<>());
}
if (!countContainersPerRM.get(rr.getAllocationRequestId())
.containsKey(targetId)) {
countContainersPerRM.get(rr.getAllocationRequestId()).put(targetId,
new AtomicLong(0));
}
countContainersPerRM.get(rr.getAllocationRequestId()).get(targetId)
.addAndGet(rr.getNumContainers());
if (!totNumLocalizedContainers
.containsKey(rr.getAllocationRequestId())) {
totNumLocalizedContainers.put(rr.getAllocationRequestId(),
new AtomicLong(0));
}
totNumLocalizedContainers.get(rr.getAllocationRequestId())
.addAndGet(rr.getNumContainers());
}
internalAddToAnswer(targetId, rr, false);
} | 3.68 |
hbase_HBaseTestingUtility_cleanupDataTestDirOnTestFS | /**
* Cleans a subdirectory under the test data directory on the test filesystem.
* @return True if we removed child
*/
public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
Path cpath = getDataTestDirOnTestFS(subdirName);
return getTestFileSystem().delete(cpath, true);
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_toTextDocument | /**
* Returns a {@link TextDocument} containing the extracted {@link TextBlock} s. NOTE: Only call
* this after parsing.
*
* @return The {@link TextDocument}
*/
public TextDocument toTextDocument() {
// just to be sure
flushBlock();
return new TextDocument(getTitle(), getTextBlocks());
} | 3.68 |
hbase_TableDescriptors_update | /**
* Add or update descriptor. Just call {@link #update(TableDescriptor, boolean)} with
* {@code cacheOnly} as {@code false}.
*/
default void update(TableDescriptor htd) throws IOException {
update(htd, false);
} | 3.68 |
dubbo_CallableSafeInitializer_get | /**
* Get (and initialize, if not initialized yet) the required object
*
* @return lazily initialized object
* exception
*/
// @Override
public final T get() {
T result;
while ((result = reference.get()) == null) {
if (factory.compareAndSet(null, this)) {
reference.set(initialize());
}
}
return result;
} | 3.68 |
querydsl_AbstractOracleQuery_orderSiblingsBy | /**
* ORDER SIBLINGS BY preserves any ordering specified in the hierarchical query clause and then
* applies the order_by_clause to the siblings of the hierarchy.
*
* @param path path
* @return the current object
*/
public C orderSiblingsBy(Expression<?> path) {
return addFlag(Position.BEFORE_ORDER, ORDER_SIBLINGS_BY, path);
} | 3.68 |
framework_SharedUtil_camelCaseToHumanFriendly | /**
* Converts a camelCaseString to a human friendly format (Camel case
* string).
* <p>
* In general splits words when the casing changes but also handles special
* cases such as consecutive upper case characters. Examples:
* <p>
* {@literal MyBeanContainer} becomes {@literal My Bean Container}
* {@literal AwesomeURLFactory} becomes {@literal Awesome URL Factory}
* {@literal SomeUriAction} becomes {@literal Some Uri Action}
*
* @since 7.4
* @param camelCaseString
* The input string in camelCase format
* @return A human friendly version of the input
*/
public static String camelCaseToHumanFriendly(String camelCaseString) {
String[] parts = splitCamelCase(camelCaseString);
for (int i = 0; i < parts.length; i++) {
parts[i] = capitalize(parts[i]);
}
return join(parts, " ");
} | 3.68 |
hbase_BlockCacheUtil_isFull | /**
* @return True if full; i.e. there are more items in the cache but we only loaded up the
* maximum set in configuration <code>hbase.ui.blockcache.by.file.max</code> (Default:
* DEFAULT_MAX).
*/
public boolean isFull() {
return this.count >= this.max;
} | 3.68 |
hadoop_S3ARemoteInputStream_setReadahead | /**
* Sets the number of bytes to read ahead each time.
*
* @param readahead the number of bytes to read ahead each time..
*/
@Override
public synchronized void setReadahead(Long readahead) {
// We support read head by prefetching therefore we ignore the supplied value.
if (readahead != null) {
Validate.checkNotNegative(readahead, "readahead");
}
} | 3.68 |
querydsl_ExpressionUtils_as | /**
* Create an alias expression with the given source and alias
*
* @param <D> type of expression
* @param source source
* @param alias alias
* @return source as alias
*/
public static <D> Expression<D> as(Expression<D> source, String alias) {
return as(source, path(source.getType(), alias));
} | 3.68 |
hbase_MetaTableAccessor_getClosestRegionInfo | /** Returns Get closest metatable region row to passed <code>row</code> */
@NonNull
private static RegionInfo getClosestRegionInfo(Connection connection,
@NonNull final TableName tableName, @NonNull final byte[] row) throws IOException {
byte[] searchRow = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
Scan scan = getMetaScan(connection.getConfiguration(), 1);
scan.setReversed(true);
scan.withStartRow(searchRow);
try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) {
Result result = resultScanner.next();
if (result == null) {
throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName
+ ", row=" + Bytes.toStringBinary(row));
}
RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(result);
if (regionInfo == null) {
throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row="
+ Bytes.toStringBinary(row));
}
return regionInfo;
}
} | 3.68 |
framework_CompositeValidator_setMode | /**
* Sets the mode of the validator. The valid modes are:
* <ul>
* <li>{@link CombinationMode#AND} (default)
* <li>{@link CombinationMode#OR}
* </ul>
*
* @param mode
* the mode to set.
*/
public void setMode(CombinationMode mode) {
if (mode == null) {
throw new IllegalArgumentException(
"The validator can't be set to null");
}
this.mode = mode;
} | 3.68 |
zxing_FinderPatternFinder_crossCheckVertical | /**
* <p>After a horizontal scan finds a potential finder pattern, this method
* "cross-checks" by scanning down vertically through the center of the possible
* finder pattern to see if the same proportion is detected.</p>
*
* @param startI row where a finder pattern was detected
* @param centerJ center of the section that appears to cross a finder pattern
* @param maxCount maximum reasonable number of modules that should be
* observed in any reading state, based on the results of the horizontal scan
* @return vertical center of finder pattern, or {@link Float#NaN} if not found
*/
private float crossCheckVertical(int startI, int centerJ, int maxCount,
int originalStateCountTotal) {
BitMatrix image = this.image;
int maxI = image.getHeight();
int[] stateCount = getCrossCheckStateCount();
// Start counting up from center
int i = startI;
while (i >= 0 && image.get(centerJ, i)) {
stateCount[2]++;
i--;
}
if (i < 0) {
return Float.NaN;
}
while (i >= 0 && !image.get(centerJ, i) && stateCount[1] <= maxCount) {
stateCount[1]++;
i--;
}
// If already too many modules in this state or ran off the edge:
if (i < 0 || stateCount[1] > maxCount) {
return Float.NaN;
}
while (i >= 0 && image.get(centerJ, i) && stateCount[0] <= maxCount) {
stateCount[0]++;
i--;
}
if (stateCount[0] > maxCount) {
return Float.NaN;
}
// Now also count down from center
i = startI + 1;
while (i < maxI && image.get(centerJ, i)) {
stateCount[2]++;
i++;
}
if (i == maxI) {
return Float.NaN;
}
while (i < maxI && !image.get(centerJ, i) && stateCount[3] < maxCount) {
stateCount[3]++;
i++;
}
if (i == maxI || stateCount[3] >= maxCount) {
return Float.NaN;
}
while (i < maxI && image.get(centerJ, i) && stateCount[4] < maxCount) {
stateCount[4]++;
i++;
}
if (stateCount[4] >= maxCount) {
return Float.NaN;
}
// If we found a finder-pattern-like section, but its size is more than 40% different than
// the original, assume it's a false positive
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
if (5 * Math.abs(stateCountTotal - originalStateCountTotal) >= 2 * originalStateCountTotal) {
return Float.NaN;
}
return foundPatternCross(stateCount) ? centerFromEnd(stateCount, i) : Float.NaN;
} | 3.68 |
pulsar_PulsarConnectorConfig_getNarExtractionDirectory | // --- Nar extraction config
public String getNarExtractionDirectory() {
return narExtractionDirectory;
} | 3.68 |
hbase_HDFSBlocksDistribution_addWeight | /**
* add weight
* @param weight the weight
* @param weightForSsd the weight for ssd
*/
public void addWeight(long weight, long weightForSsd) {
this.weight += weight;
this.weightForSsd += weightForSsd;
} | 3.68 |
flink_HiveParserUnparseTranslator_addCopyTranslation | /**
* Register a "copy" translation in which a node will be translated into whatever the
* translation turns out to be for another node (after previously registered translations have
* already been performed). Deferred translations are performed in the order they are
* registered, and follow the same rules regarding overlap as non-copy translations.
*
* @param targetNode node whose subtree is to be replaced
* @param sourceNode the node providing the replacement text
*/
public void addCopyTranslation(HiveParserASTNode targetNode, HiveParserASTNode sourceNode) {
if (!enabled) {
return;
}
if (targetNode.getOrigin() != null) {
return;
}
CopyTranslation copyTranslation = new CopyTranslation();
copyTranslation.targetNode = targetNode;
copyTranslation.sourceNode = sourceNode;
copyTranslations.add(copyTranslation);
} | 3.68 |
hudi_KeyRangeLookupTree_getMatchingIndexFiles | /**
* Fetches all the matching index files where the key could possibly be present.
*
* @param root refers to the current root of the look up tree
* @param lookupKey the key to be searched for
*/
private void getMatchingIndexFiles(KeyRangeNode root, String lookupKey, Set<String> matchingFileNameSet) {
if (root == null) {
return;
}
if (root.getMinRecordKey().compareTo(lookupKey) <= 0 && lookupKey.compareTo(root.getMaxRecordKey()) <= 0) {
matchingFileNameSet.addAll(root.getFileNameList());
}
if (root.getLeftSubTreeMax() != null && root.getLeftSubTreeMin().compareTo(lookupKey) <= 0
&& lookupKey.compareTo(root.getLeftSubTreeMax()) <= 0) {
getMatchingIndexFiles(root.getLeft(), lookupKey, matchingFileNameSet);
}
if (root.getRightSubTreeMax() != null && root.getRightSubTreeMin().compareTo(lookupKey) <= 0
&& lookupKey.compareTo(root.getRightSubTreeMax()) <= 0) {
getMatchingIndexFiles(root.getRight(), lookupKey, matchingFileNameSet);
}
} | 3.68 |
morf_MySqlDialect_getSqlForAddDays | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddDays(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForAddDays(Function function) {
return String.format(
"DATE_ADD(%s, INTERVAL %s DAY)",
getSqlFrom(function.getArguments().get(0)),
getSqlFrom(function.getArguments().get(1))
);
} | 3.68 |
flink_AbstractBytesMultiMap_appendRecord | /** The key is not exist before. Add key and first value to key area. */
@Override
public int appendRecord(LookupInfo<K, Iterator<RowData>> lookupInfo, BinaryRowData value)
throws IOException {
int lastPosition = (int) keyOutView.getCurrentOffset();
// write key to keyOutView
int skip = keySerializer.serializeToPages(lookupInfo.key, keyOutView);
int keyOffset = lastPosition + skip;
// skip the pointer to the tail value.
endPtrOffset = skipPointer(keyOutView);
// write a value entry: a next-pointer and value data
long pointerOfEndValue = writePointer(keyOutView, -1);
// write first value to keyOutView.
valueSerializer.serializeToPages(value, keyOutView);
if (pointerOfEndValue > Integer.MAX_VALUE) {
LOG.warn(
"We can't handle key area with more than Integer.MAX_VALUE bytes,"
+ " because the pointer is a integer.");
throw new EOFException();
}
endPtr = (int) pointerOfEndValue;
// update pointer to the tail value
updateValuePointerInKeyArea(endPtr, endPtrOffset);
return keyOffset;
} | 3.68 |
hudi_JdbcSource_incrementalFetch | /**
* Does an incremental scan with PPQ query prepared on the bases of previous checkpoint.
*
* @param lastCheckpoint Last checkpoint.
* Note that the records fetched will be exclusive of the last checkpoint (i.e. incremental column value > lastCheckpoint).
* @return The {@link Dataset} after incremental fetch from RDBMS.
*/
private Dataset<Row> incrementalFetch(Option<String> lastCheckpoint, long sourceLimit) {
try {
final String ppdQuery = "(%s) rdbms_table";
final SqlQueryBuilder queryBuilder = SqlQueryBuilder.select("*")
.from(getStringWithAltKeys(props, JdbcSourceConfig.RDBMS_TABLE_NAME))
.where(String.format(" %s > '%s'", getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN), lastCheckpoint.get()));
if (sourceLimit > 0) {
URI jdbcURI = URI.create(getStringWithAltKeys(props, JdbcSourceConfig.URL).substring(URI_JDBC_PREFIX.length()));
if (DB_LIMIT_CLAUSE.contains(jdbcURI.getScheme())) {
queryBuilder.orderBy(getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN)).limit(sourceLimit);
}
}
String query = String.format(ppdQuery, queryBuilder.toString());
LOG.info("PPD QUERY: " + query);
LOG.info(String.format("Referenced last checkpoint and prepared new predicate pushdown query for jdbc pull %s", query));
return validatePropsAndGetDataFrameReader(sparkSession, props).option(Config.RDBMS_TABLE_PROP, query).load();
} catch (Exception e) {
LOG.error("Error while performing an incremental fetch. Not all database support the PPD query we generate to do an incremental scan", e);
if (containsConfigProperty(props, JdbcSourceConfig.FALLBACK_TO_FULL_FETCH)
&& getBooleanWithAltKeys(props, JdbcSourceConfig.FALLBACK_TO_FULL_FETCH)) {
LOG.warn("Falling back to full scan.");
return fullFetch(sourceLimit);
}
throw e;
}
} | 3.68 |
flink_MailboxProcessor_prepareClose | /** Lifecycle method to close the mailbox for action submission. */
public void prepareClose() {
mailbox.quiesce();
} | 3.68 |
hbase_RotateFile_write | /**
* Writes the given data to the next file in the rotation, with a timestamp calculated based on
* the previous timestamp and the current time to make sure it is greater than the previous
* timestamp. The method also deletes the previous file, which is no longer needed.
* <p/>
* Notice that, for a newly created {@link RotateFile} instance, you need to call {@link #read()}
* first to initialize the nextFile index, before calling this method.
* @param data the data to be written to the file
* @throws IOException if an I/O error occurs while writing the data to the file
*/
public void write(byte[] data) throws IOException {
if (data.length > maxFileSize) {
throw new IOException(
"Data size " + data.length + " is greater than max allowed size " + maxFileSize);
}
long timestamp = Math.max(prevTimestamp + 1, EnvironmentEdgeManager.currentTime());
write(fs, files[nextFile], timestamp, data);
prevTimestamp = timestamp;
nextFile = 1 - nextFile;
try {
fs.delete(files[nextFile], false);
} catch (IOException e) {
// we will create new file with overwrite = true, so not a big deal here, only for speed up
// loading as we do not need to read this file when loading
LOG.debug("Failed to delete old file {}, ignoring the exception", files[nextFile], e);
}
} | 3.68 |
flink_OperationExecutor_runClusterAction | /**
* Retrieves the {@link ClusterClient} from the session and runs the given {@link ClusterAction}
* against it.
*
* @param configuration the combined configuration of {@code sessionConf} and {@code
* executionConfig}.
* @param handle the specified operation handle
* @param clusterAction the cluster action to run against the retrieved {@link ClusterClient}.
* @param <ClusterID> type of the cluster id
* @param <Result>> type of the result
* @throws SqlExecutionException if something goes wrong
*/
private <ClusterID, Result> Result runClusterAction(
Configuration configuration,
OperationHandle handle,
ClusterAction<ClusterID, Result> clusterAction)
throws SqlExecutionException {
final ClusterClientFactory<ClusterID> clusterClientFactory =
clusterClientServiceLoader.getClusterClientFactory(configuration);
final ClusterID clusterId = clusterClientFactory.getClusterId(configuration);
Preconditions.checkNotNull(clusterId, "No cluster ID found for operation " + handle);
try (final ClusterDescriptor<ClusterID> clusterDescriptor =
clusterClientFactory.createClusterDescriptor(configuration);
final ClusterClient<ClusterID> clusterClient =
clusterDescriptor.retrieve(clusterId).getClusterClient()) {
return clusterAction.runAction(clusterClient);
} catch (FlinkException e) {
throw new SqlExecutionException("Failed to run cluster action.", e);
}
} | 3.68 |
hbase_ByteBufferUtils_copyFromStreamToBuffer | /**
* Copy the given number of bytes from the given stream and put it at the current position of the
* given buffer, updating the position in the buffer.
* @param out the buffer to write data to
* @param in the stream to read data from
* @param length the number of bytes to read/write
*/
public static void copyFromStreamToBuffer(ByteBuffer out, DataInputStream in, int length)
throws IOException {
if (out.hasArray()) {
in.readFully(out.array(), out.position() + out.arrayOffset(), length);
skip(out, length);
} else {
for (int i = 0; i < length; ++i) {
out.put(in.readByte());
}
}
} | 3.68 |
MagicPlugin_BaseSpell_isOkToStandIn | /*
* Ground / location search and test functions
*/
@Deprecated // Material
public boolean isOkToStandIn(Material mat)
{
if (isHalfBlock(mat)) {
return false;
}
return passthroughMaterials.testMaterial(mat) && !unsafeMaterials.testMaterial(mat);
} | 3.68 |
hbase_HRegionServer_waitOnAllRegionsToClose | /**
* Wait on regions close.
*/
private void waitOnAllRegionsToClose(final boolean abort) {
// Wait till all regions are closed before going out.
int lastCount = -1;
long previousLogTime = 0;
Set<String> closedRegions = new HashSet<>();
boolean interrupted = false;
try {
while (!onlineRegions.isEmpty()) {
int count = getNumberOfOnlineRegions();
// Only print a message if the count of regions has changed.
if (count != lastCount) {
// Log every second at most
if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
previousLogTime = EnvironmentEdgeManager.currentTime();
lastCount = count;
LOG.info("Waiting on " + count + " regions to close");
// Only print out regions still closing if a small number else will
// swamp the log.
if (count < 10 && LOG.isDebugEnabled()) {
LOG.debug("Online Regions=" + this.onlineRegions);
}
}
}
// Ensure all user regions have been sent a close. Use this to
// protect against the case where an open comes in after we start the
// iterator of onlineRegions to close all user regions.
for (Map.Entry<String, HRegion> e : this.onlineRegions.entrySet()) {
RegionInfo hri = e.getValue().getRegionInfo();
if (
!this.regionsInTransitionInRS.containsKey(hri.getEncodedNameAsBytes())
&& !closedRegions.contains(hri.getEncodedName())
) {
closedRegions.add(hri.getEncodedName());
// Don't update zk with this close transition; pass false.
closeRegionIgnoreErrors(hri, abort);
}
}
// No regions in RIT, we could stop waiting now.
if (this.regionsInTransitionInRS.isEmpty()) {
if (!onlineRegions.isEmpty()) {
LOG.info("We were exiting though online regions are not empty,"
+ " because some regions failed closing");
}
break;
} else {
LOG.debug("Waiting on {}", this.regionsInTransitionInRS.keySet().stream()
.map(e -> Bytes.toString(e)).collect(Collectors.joining(", ")));
}
if (sleepInterrupted(200)) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
} | 3.68 |
pulsar_LedgerOffloader_streamingOffload | /**
* Begin offload the passed in ledgers to longterm storage, it will finish
* when a segment reached it's size or time.
* Should only be called once for a LedgerOffloader instance.
* Metadata passed in is for inspection purposes only and should be stored
* alongside the segment data.
*
* When the returned OffloaderHandle.getOffloadResultAsync completes, the corresponding
* ledgers has been persisted to the
* longterm storage, so it is safe to delete the original copy in bookkeeper.
*
* The uid is used to identify an attempt to offload. The implementation should
* use this to deterministically generate a unique name for the offloaded object.
* This uid will be stored in the managed ledger metadata before attempting the
* call to streamingOffload(). If a subsequent or concurrent call to streamingOffload() finds
* a uid in the metadata, it will attempt to cleanup this attempt with a call
* to #deleteOffloaded(ReadHandle,UUID). Once the offload attempt completes,
* the managed ledger will update its metadata again, to record the completion,
* ensuring that subsequent calls will not attempt to offload the same ledger
* again.
*
* @return an OffloaderHandle, which when `completeFuture()` completed, denotes that the offload has been
* successful.
*/
default CompletableFuture<OffloadHandle> streamingOffload(ManagedLedger ml, UUID uid, long beginLedger,
long beginEntry,
Map<String, String> driverMetadata) {
throw new UnsupportedOperationException();
} | 3.68 |
hudi_HoodieBackedTableMetadata_close | /**
* Close the file reader and the record scanner for the given file slice.
*
* @param partitionFileSlicePair - Partition and FileSlice
*/
private synchronized void close(Pair<String, String> partitionFileSlicePair) {
Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> readers =
partitionReaders.get().remove(partitionFileSlicePair);
closeReader(readers);
} | 3.68 |
flink_ProjectOperator_projectTuple22 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
ProjectOperator<
T,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
projectTuple22() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
tType =
new TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(fTypes);
return new ProjectOperator<
T,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
flink_ExtendedParser_getCompletionHints | /**
* Returns completion hints for the given statement at the given cursor position. The completion
* happens case insensitively.
*
* @param statement Partial or slightly incorrect SQL statement
* @param cursor cursor position
* @return completion hints that fit at the current cursor position
*/
public String[] getCompletionHints(String statement, int cursor) {
String normalizedStatement = statement.trim().toUpperCase();
List<String> hints = new ArrayList<>();
for (ExtendedParseStrategy strategy : PARSE_STRATEGIES) {
for (String hint : strategy.getHints()) {
if (hint.startsWith(normalizedStatement) && cursor < hint.length()) {
hints.add(getCompletionHint(normalizedStatement, hint));
}
}
}
return hints.toArray(new String[0]);
} | 3.68 |
hbase_StorageClusterStatusModel_setName | /**
* @param name the region server's hostname
*/
public void setName(String name) {
this.name = name;
} | 3.68 |
framework_VAbstractSplitPanel_isEnabled | /**
* Returns whether this split panel is enabled or not.
*
* @return {@code true} if enabled, {@code false} if disabled
*/
public boolean isEnabled() {
return enabled;
} | 3.68 |
pulsar_ModularLoadManagerStrategy_create | /**
* Create a placement strategy using the configuration.
*
* @param conf ServiceConfiguration to use.
* @return A placement strategy from the given configurations.
*/
static ModularLoadManagerStrategy create(final ServiceConfiguration conf) {
try {
return Reflections.createInstance(conf.getLoadBalancerLoadPlacementStrategy(),
ModularLoadManagerStrategy.class, Thread.currentThread().getContextClassLoader());
} catch (Exception e) {
throw new RuntimeException(
"Could not load LoadBalancerLoadPlacementStrategy:" + conf.getLoadBalancerLoadPlacementStrategy(),
e);
}
} | 3.68 |
flink_StreamTableEnvironment_create | /**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs that integrate with the Java-specific {@link DataStream} API.
*
* <p>It is unified for bounded and unbounded data processing.
*
* <p>A stream table environment is responsible for:
*
* <ul>
* <li>Convert a {@link DataStream} into {@link Table} and vice-versa.
* <li>Connecting to external systems.
* <li>Registering and retrieving {@link Table}s and other meta objects from a catalog.
* <li>Executing SQL statements.
* <li>Offering further configuration options.
* </ul>
*
* <p>Note: If you don't intend to use the {@link DataStream} API, {@link TableEnvironment} is
* meant for pure table programs.
*
* @param executionEnvironment The Java {@link StreamExecutionEnvironment} of the {@link
* TableEnvironment}.
* @param settings The environment settings used to instantiate the {@link TableEnvironment}.
*/
static StreamTableEnvironment create(
StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings) {
return StreamTableEnvironmentImpl.create(executionEnvironment, settings);
} | 3.68 |
hadoop_OBSLoginHelper_canonicalizeUri | /**
* Canonicalize the given URI.
*
* <p>This strips out login information.
*
* @param uri the URI to canonicalize
* @param defaultPort default port to use in canonicalized URI if the input
* URI has no port and this value is greater than 0
* @return a new, canonicalized URI.
*/
public static URI canonicalizeUri(final URI uri, final int defaultPort) {
URI newUri = uri;
if (uri.getPort() == -1 && defaultPort > 0) {
// reconstruct the uri with the default port set
try {
newUri =
new URI(
newUri.getScheme(),
null,
newUri.getHost(),
defaultPort,
newUri.getPath(),
newUri.getQuery(),
newUri.getFragment());
} catch (URISyntaxException e) {
// Should never happen!
throw new AssertionError(
"Valid URI became unparseable: " + newUri);
}
}
return newUri;
} | 3.68 |
framework_Page_setLocation | /**
* Navigates this page to the given URI. The contents of this page in the
* browser is replaced with whatever is returned for the given URI.
* <p>
* This method should not be used to start downloads, as the client side
* will assume the browser will navigate away when opening the URI. Use one
* of the {@code Page.open} methods or {@code FileDownloader} instead.
*
* @see #open(String, String)
* @see FileDownloader
*
* @param uri
* the URI to show
*/
public void setLocation(URI uri) {
setLocation(uri.toString());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.