name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_KeyValue_oswrite | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable but do not
* require a {@link DataOutput}, just take plain {@link OutputStream} Named <code>oswrite</code>
* so does not clash with {@link #write(KeyValue, DataOutput)}
* @param kv the KeyValue on which write is being requested
* @param out OutputStream to write keyValue to
* @param withTags boolean value indicating write is with Tags or not
* @return Length written on stream
* @throws IOException if any IO error happen
* @see #create(DataInput) for the inverse function
* @see #write(KeyValue, DataOutput)
* @see KeyValueUtil#oswrite(Cell, OutputStream, boolean)
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Instead use
* {@link #write(OutputStream, boolean)}
*/
@Deprecated
public static long oswrite(final KeyValue kv, final OutputStream out, final boolean withTags)
throws IOException {
ByteBufferUtils.putInt(out, kv.getSerializedSize(withTags));
return (long) kv.write(out, withTags) + Bytes.SIZEOF_INT;
} | 3.68 |
framework_Escalator_getSpacerHeightsSumUntilPx | /**
* Gets the amount of pixels occupied by spacers from the top until a
* certain spot from the top of the body.
*
* @param px
* pixels counted from the top
* @return the pixels occupied by spacers up until {@code px}
*/
public double getSpacerHeightsSumUntilPx(double px) {
return getSpacerHeightsSumBetweenPx(0,
SpacerInclusionStrategy.PARTIAL, px,
SpacerInclusionStrategy.PARTIAL);
} | 3.68 |
rocketmq-connect_RocketMqAdminUtil_createTopic | /**
* Create rocketMq topic
*
* @param config
* @param topicConfig
*/
public static void createTopic(RocketMqConfig config, TopicConfig topicConfig) {
DefaultMQAdminExt defaultMQAdminExt = null;
try {
defaultMQAdminExt = startMQAdminTool(config);
ClusterInfo clusterInfo = defaultMQAdminExt.examineBrokerClusterInfo();
HashMap<String, Set<String>> clusterAddrTable = clusterInfo.getClusterAddrTable();
Set<String> clusterNameSet = clusterAddrTable.keySet();
for (String clusterName : clusterNameSet) {
Set<String> masterSet = CommandUtil.fetchMasterAddrByClusterName(defaultMQAdminExt, clusterName);
for (String addr : masterSet) {
defaultMQAdminExt.createAndUpdateTopicConfig(addr, topicConfig);
}
}
} catch (Exception e) {
throw new RuntimeException("RocketMq create schema history topic: " + topicConfig.getTopicName() + " " +
" failed", e);
} finally {
if (defaultMQAdminExt != null) {
defaultMQAdminExt.shutdown();
}
}
} | 3.68 |
hbase_WALSplitUtil_getRegionSplitEditsPath | /**
* Path to a file under RECOVERED_EDITS_DIR directory of the region found in <code>logEntry</code>
* named for the sequenceid in the passed <code>logEntry</code>: e.g.
* /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of
* RECOVERED_EDITS_DIR under the region creating it if necessary. And also set storage policy for
* RECOVERED_EDITS_DIR if WAL_STORAGE_POLICY is configured.
* @param tableName the table name
* @param encodedRegionName the encoded region name
* @param seqId the sequence id which used to generate file name
* @param fileNameBeingSplit the file being split currently. Used to generate tmp file name.
* @param tmpDirName of the directory used to sideline old recovered edits file
* @param conf configuration
* @return Path to file into which to dump split log edits.
*/
@SuppressWarnings("deprecation")
static Path getRegionSplitEditsPath(TableName tableName, byte[] encodedRegionName, long seqId,
String fileNameBeingSplit, String tmpDirName, Configuration conf) throws IOException {
FileSystem walFS = CommonFSUtils.getWALFileSystem(conf);
Path tableDir = CommonFSUtils.getWALTableDir(conf, tableName);
String encodedRegionNameStr = Bytes.toString(encodedRegionName);
Path regionDir = HRegion.getRegionDir(tableDir, encodedRegionNameStr);
Path dir = getRegionDirRecoveredEditsDir(regionDir);
if (walFS.exists(dir) && walFS.isFile(dir)) {
Path tmp = new Path(tmpDirName);
if (!walFS.exists(tmp)) {
walFS.mkdirs(tmp);
}
tmp = new Path(tmp, HConstants.RECOVERED_EDITS_DIR + "_" + encodedRegionNameStr);
LOG.warn("Found existing old file: {}. It could be some "
+ "leftover of an old installation. It should be a folder instead. " + "So moving it to {}",
dir, tmp);
if (!walFS.rename(dir, tmp)) {
LOG.warn("Failed to sideline old file {}", dir);
}
}
if (!walFS.exists(dir) && !walFS.mkdirs(dir)) {
LOG.warn("mkdir failed on {}", dir);
} else {
String storagePolicy =
conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
CommonFSUtils.setStoragePolicy(walFS, dir, storagePolicy);
}
// Append fileBeingSplit to prevent name conflict since we may have duplicate wal entries now.
// Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
// region's replayRecoveredEdits will not delete it
String fileName = formatRecoveredEditsFileName(seqId);
fileName = getTmpRecoveredEditsFileName(fileName + "-" + fileNameBeingSplit);
return new Path(dir, fileName);
} | 3.68 |
morf_CompositeSchema_getView | /**
* @see org.alfasoftware.morf.metadata.Schema#getView(java.lang.String)
*/
@Override
public View getView(String name) {
for (Schema schema : delegates)
if (schema.viewExists(name))
return schema.getView(name);
throw new IllegalArgumentException("Unknown table [" + name + "]");
} | 3.68 |
dubbo_StringUtils_convertToSplitName | /**
* Convert camelCase or snake_case/SNAKE_CASE to kebab-case
*
* @param str
* @param split
* @return
*/
public static String convertToSplitName(String str, String split) {
if (isSnakeCase(str)) {
return snakeToSplitName(str, split);
} else {
return camelToSplitName(str, split);
}
} | 3.68 |
flink_RetractableTopNFunction_retractRecordWithoutRowNumber | /**
* Retract the input record and emit updated records. This works for outputting without
* row_number.
*
* @return true if the input record has been removed from {@link #dataState}.
*/
private boolean retractRecordWithoutRowNumber(
SortedMap<RowData, Long> sortedMap,
RowData sortKey,
RowData inputRow,
Collector<RowData> out)
throws Exception {
Iterator<Map.Entry<RowData, Long>> iterator = sortedMap.entrySet().iterator();
long nextRank = 1L; // the next rank number, should be in the rank range
boolean findsSortKey = false;
while (iterator.hasNext() && isInRankEnd(nextRank)) {
Map.Entry<RowData, Long> entry = iterator.next();
RowData key = entry.getKey();
if (!findsSortKey && key.equals(sortKey)) {
List<RowData> inputs = dataState.get(key);
if (inputs == null) {
processStateStaled(iterator);
} else {
Iterator<RowData> inputIter = inputs.iterator();
while (inputIter.hasNext() && isInRankEnd(nextRank)) {
RowData prevRow = inputIter.next();
if (!findsSortKey && equaliser.equals(prevRow, inputRow)) {
collectDelete(out, prevRow, nextRank);
nextRank -= 1;
findsSortKey = true;
inputIter.remove();
} else if (findsSortKey) {
if (nextRank == rankEnd) {
collectInsert(out, prevRow, nextRank);
}
}
nextRank += 1;
}
if (inputs.isEmpty()) {
dataState.remove(key);
} else {
dataState.put(key, inputs);
}
}
} else if (findsSortKey) {
long count = entry.getValue();
// gets the rank of last record with same sortKey
long rankOfLastRecord = nextRank + count - 1;
if (rankOfLastRecord < rankEnd) {
nextRank = rankOfLastRecord + 1;
} else {
// sends the record if there is a record recently upgrades to Top-N
int index = Long.valueOf(rankEnd - nextRank).intValue();
List<RowData> inputs = dataState.get(key);
if (inputs == null) {
processStateStaled(iterator);
} else {
RowData toAdd = inputs.get(index);
collectInsert(out, toAdd);
break;
}
}
} else {
nextRank += entry.getValue();
}
}
return findsSortKey;
} | 3.68 |
framework_ComboBoxScrollingWithArrows_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 11333;
} | 3.68 |
flink_SinkProvider_of | /** Helper method for creating a Sink provider with a provided sink parallelism. */
static SinkProvider of(Sink<RowData, ?, ?, ?> sink, @Nullable Integer sinkParallelism) {
return new SinkProvider() {
@Override
public Sink<RowData, ?, ?, ?> createSink() {
return sink;
}
@Override
public Optional<Integer> getParallelism() {
return Optional.ofNullable(sinkParallelism);
}
};
} | 3.68 |
hbase_ReplicationSink_getStats | /**
* Get a string representation of this sink's metrics
* @return string with the total replicated edits count and the date of the last edit that was
* applied
*/
public String getStats() {
long total = this.totalReplicatedEdits.get();
return total == 0
? ""
: "Sink: " + "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp()
+ ", total replicated edits: " + total;
} | 3.68 |
hbase_HFileCorruptionChecker_checkTableDir | /**
* Check all the regiondirs in the specified tableDir path to a table
*/
void checkTableDir(Path tableDir) throws IOException {
List<FileStatus> rds =
FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
if (rds == null) {
if (!fs.exists(tableDir)) {
LOG.warn("Table Directory " + tableDir
+ " does not exist. Likely due to concurrent delete. Skipping.");
missing.add(tableDir);
}
return;
}
LOG.info("Checking Table Directory {}. Number of entries (including mob) = {}", tableDir,
rds.size() + 1);
// Parallelize check at the region dir level
List<RegionDirChecker> rdcs = new ArrayList<>(rds.size() + 1);
List<Future<Void>> rdFutures;
for (FileStatus rdFs : rds) {
Path rdDir = rdFs.getPath();
RegionDirChecker work = new RegionDirChecker(rdDir);
rdcs.add(work);
}
// add mob region
rdcs.add(createMobRegionDirChecker(tableDir));
// Submit and wait for completion
try {
rdFutures = executor.invokeAll(rdcs);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs checking interrupted!", ie);
return;
}
for (int i = 0; i < rdFutures.size(); i++) {
Future<Void> f = rdFutures.get(i);
try {
f.get();
} catch (ExecutionException e) {
LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir,
e.getCause());
// rethrow IOExceptions
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
// rethrow RuntimeExceptions
if (e.getCause() instanceof RuntimeException) {
throw (RuntimeException) e.getCause();
}
// this should never happen
LOG.error("Unexpected exception encountered", e);
return; // bailing out.
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Region dirs check interrupted!", ie);
// bailing out
return;
}
}
} | 3.68 |
hadoop_CacheStats_releaseRoundDown | /**
* Release some bytes that we're using rounded down to the page size.
*
* @param count
* The number of bytes to release. We will round this down to the
* page size.
*
* @return The new number of usedBytes.
*/
long releaseRoundDown(long count) {
return usedBytesCount.releaseRoundDown(count);
} | 3.68 |
framework_ConnectorTracker_unregisterConnector | /**
* Unregister the given connector.
*
* <p>
* The lookup method {@link #getConnector(String)} only returns registered
* connectors.
* </p>
*
* @param connector
* The connector to unregister
*/
public void unregisterConnector(ClientConnector connector) {
String connectorId = connector.getConnectorId();
if (!connectorIdToConnector.containsKey(connectorId)) {
getLogger().log(Level.WARNING,
"Tried to unregister {0} ({1}) which is not registered",
new Object[] { connector.getClass().getSimpleName(),
connectorId });
return;
}
if (connectorIdToConnector.get(connectorId) != connector) {
throw new RuntimeException("The given connector with id "
+ connectorId
+ " is not the one that was registered for that id");
}
dirtyConnectors.remove(connector);
if (!isClientSideInitialized(connector)) {
// Client side has never known about this connector so there is no
// point in tracking it
removeUnregisteredConnector(connector,
uI.getSession().getGlobalResourceHandler(false));
} else if (unregisteredConnectors.add(connector)) {
// Client side knows about the connector, track it for a while if it
// becomes reattached
if (fineLogging) {
getLogger().log(Level.FINE, "Unregistered {0} ({1})",
new Object[] { connector.getClass().getSimpleName(),
connectorId });
}
} else {
getLogger().log(Level.WARNING,
"Unregistered {0} ({1}) that was already unregistered.",
new Object[] { connector.getClass().getSimpleName(),
connectorId });
}
} | 3.68 |
hadoop_RequestFactoryImpl_getStorageClass | /**
* Get the object storage class, return null if none.
* @return storage class
*/
@Override
public StorageClass getStorageClass() {
return storageClass;
} | 3.68 |
flink_EnvironmentSettings_withClassLoader | /**
* Specifies the classloader to use in the planner for operations related to code
* generation, UDF loading, operations requiring reflections on user classes, discovery of
* factories.
*
* <p>By default, this is configured using {@code
* Thread.currentThread().getContextClassLoader()}.
*
* <p>Modify the {@link ClassLoader} only if you know what you're doing.
*/
public Builder withClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
} | 3.68 |
pulsar_SessionEvent_isConnected | /**
* Check whether the state represents a connected or not-connected state.
*/
public boolean isConnected() {
switch (this) {
case Reconnected:
case SessionReestablished:
return true;
case ConnectionLost:
case SessionLost:
default:
return false;
}
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_getStrExprProcessor | /** Factory method to get StrExprProcessor. */
public HiveParserTypeCheckProcFactory.StrExprProcessor getStrExprProcessor() {
return new HiveParserTypeCheckProcFactory.StrExprProcessor();
} | 3.68 |
dubbo_BasicJsonWriter_writeArray | /**
* Write an array with the specified items. Each item in the
* list is written either as a nested object or as an attribute
* depending on its type.
*
* @param items the items to write
* @see #writeObject(Map)
*/
public void writeArray(List<?> items) {
writeArray(items, true);
} | 3.68 |
graphhopper_BBox_parseBBoxString | /**
* This method creates a BBox out of a string in format lon1,lon2,lat1,lat2
*/
public static BBox parseBBoxString(String objectAsString) {
String[] splittedObject = objectAsString.split(",");
if (splittedObject.length != 4)
throw new IllegalArgumentException("BBox should have 4 parts but was " + objectAsString);
double minLon = Double.parseDouble(splittedObject[0]);
double maxLon = Double.parseDouble(splittedObject[1]);
double minLat = Double.parseDouble(splittedObject[2]);
double maxLat = Double.parseDouble(splittedObject[3]);
return new BBox(minLon, maxLon, minLat, maxLat);
} | 3.68 |
framework_VTabsheet_blur | /**
* Removes focus from the active tab.
*
* @deprecated This method is not called by the framework code anymore.
*/
@Deprecated
public void blur() {
getActiveTab().blur();
} | 3.68 |
framework_VaadinService_preserveUIOnRefresh | /**
* Check if the given UI should be associated with the
* <code>window.name</code> so that it can be re-used if the browser window
* is reloaded. This is typically determined by the UI provider which
* typically checks the @{@link PreserveOnRefresh} annotation but UI
* providers and ultimately VaadinService implementations may choose to
* override the defaults.
*
* @param provider
* the UI provider responsible for the UI
* @param event
* the UI create event with details about the UI
*
* @return <code>true</code> if the UI should be preserved on refresh;
* <code>false</code> if a new UI instance should be initialized on
* refreshed.
*/
public boolean preserveUIOnRefresh(UIProvider provider,
UICreateEvent event) {
return provider.isPreservedOnRefresh(event);
} | 3.68 |
hadoop_TFile_prepareAppendKey | /**
* Obtain an output stream for writing a key into TFile. This may only be
* called when there is no active Key appending stream or value appending
* stream.
*
* @param length
* The expected length of the key. If length of the key is not
* known, set length = -1. Otherwise, the application must write
* exactly as many bytes as specified here before calling close on
* the returned output stream.
* @return The key appending output stream.
* @throws IOException raised on errors performing I/O.
*
*/
public DataOutputStream prepareAppendKey(int length) throws IOException {
if (state != State.READY) {
throw new IllegalStateException("Incorrect state to start a new key: "
+ state.name());
}
initDataBlock();
DataOutputStream ret = new KeyRegister(length);
state = State.IN_KEY;
return ret;
} | 3.68 |
flink_SpecificInputTypeStrategies_windowTimeIndicator | /** See {@link WindowTimeIndictorInputTypeStrategy}. */
public static InputTypeStrategy windowTimeIndicator() {
return new WindowTimeIndictorInputTypeStrategy(null);
} | 3.68 |
hadoop_CommitTaskStage_getTaskManifest | /**
* Get the manifest.
* @return The manifest.
*/
public TaskManifest getTaskManifest() {
return taskManifest;
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setConfiguration | /**
* Setter for storing a configuration setting in {@link #configuration} map.
* @param key Config key. Same as XML config key e.g. hbase.something.or.other.
* @param value String value. If null, removes the configuration.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setConfiguration(String key, String value) {
if (value == null || value.length() == 0) {
configuration.remove(key);
} else {
configuration.put(key, value);
}
return this;
} | 3.68 |
flink_SegmentsUtil_allocateReuseBytes | /**
* Allocate bytes that is only for temporary usage, it should not be stored in somewhere else.
* Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc.
*
* <p>If there are methods that can only accept a byte[], instead of a MemorySegment[]
* parameter, we can allocate a reuse bytes and copy the MemorySegment data to byte[], then call
* the method. Such as String deserialization.
*/
public static byte[] allocateReuseBytes(int length) {
byte[] bytes = BYTES_LOCAL.get();
if (bytes == null) {
if (length <= MAX_BYTES_LENGTH) {
bytes = new byte[MAX_BYTES_LENGTH];
BYTES_LOCAL.set(bytes);
} else {
bytes = new byte[length];
}
} else if (bytes.length < length) {
bytes = new byte[length];
}
return bytes;
} | 3.68 |
flink_HiveTypeUtil_toHiveTypeInfo | /**
* Convert Flink LogicalType to Hive TypeInfo. For types with a precision parameter, e.g.
* timestamp, the supported precisions in Hive and Flink can be different. Therefore the
* conversion will fail for those types if the precision is not supported by Hive and
* checkPrecision is true.
*
* @param logicalType a Flink LogicalType
* @param checkPrecision whether to fail the conversion if the precision of the LogicalType is
* not supported by Hive
* @return the corresponding Hive data type
*/
public static TypeInfo toHiveTypeInfo(LogicalType logicalType, boolean checkPrecision) {
checkNotNull(logicalType, "type cannot be null");
return logicalType.accept(new TypeInfoLogicalTypeVisitor(logicalType, checkPrecision));
} | 3.68 |
flink_ReaderInfo_getLocation | /** @return the location of the subtask that runs this source reader. */
public String getLocation() {
return location;
} | 3.68 |
framework_DesignContext_setShouldWriteDefaultValues | /**
* Set whether default attribute values should be written by the
* {@code DesignAttributeHandler#writeAttribute(String, Attributes, Object, Object, Class, DesignContext)}
* method. Default is {@code false}.
*
* @since 8.0
* @param value
* {@code true} to write default values of attributes,
* {@code false} to disable writing of default values
*/
public void setShouldWriteDefaultValues(boolean value) {
shouldWriteDefaultValues = value;
} | 3.68 |
hadoop_VersionInfoMojo_getSCMBranch | /**
* Parses SCM output and returns branch of SCM.
*
* @param scm SCM in use for this build
* @return String branch of SCM
*/
private String getSCMBranch(SCM scm) {
String branch = "Unknown";
switch (scm) {
case GIT:
for (String s : scmOut) {
if (s.startsWith("*")) {
branch = s.substring("*".length());
break;
}
}
break;
}
return branch.trim();
} | 3.68 |
flink_FlinkRelMetadataQuery_getColumnOriginNullCount | /**
* Returns origin null count of the given column.
*
* @param rel the relational expression
* @param index the index of the given column
* @return the null count of the given column if can be estimated, else return null.
*/
public Double getColumnOriginNullCount(RelNode rel, int index) {
for (; ; ) {
try {
return columnOriginNullCountHandler.getColumnOriginNullCount(rel, this, index);
} catch (JaninoRelMetadataProvider.NoHandler e) {
columnOriginNullCountHandler =
revise(e.relClass, FlinkMetadata.ColumnOriginNullCount.DEF);
}
}
} | 3.68 |
flink_PrioritizedOperatorSubtaskState_getPrioritizedRawOperatorState | /**
* Returns an immutable list with all alternative snapshots to restore the raw operator state,
* in the order in which we should attempt to restore.
*/
@Nonnull
public List<StateObjectCollection<OperatorStateHandle>> getPrioritizedRawOperatorState() {
return prioritizedRawOperatorState;
} | 3.68 |
flink_DiskCacheManager_close | /** Close this {@link DiskCacheManager}, it means no data can append to memory. */
void close() {
forceFlushCachedBuffers();
} | 3.68 |
hbase_RestoreSnapshotHelper_getRegionsToAdd | /**
* Returns the list of new regions added during the on-disk restore. The caller is responsible
* to add the regions to META. e.g MetaTableAccessor.addRegionsToMeta(...)
* @return the list of regions to add to META
*/
public List<RegionInfo> getRegionsToAdd() {
return this.regionsToAdd;
} | 3.68 |
hbase_CheckAndMutate_ifMatches | /**
* Check for match
* @param filter filter to check
* @return the CheckAndMutate object
*/
public Builder ifMatches(Filter filter) {
this.filter = Preconditions.checkNotNull(filter, "filter is null");
return this;
} | 3.68 |
flink_HybridShuffleConfiguration_getFullStrategyReleaseThreshold | /**
* When the number of buffers that have been requested exceeds this threshold, trigger the
* release operation. Used by {@link HsFullSpillingStrategy}.
*/
public float getFullStrategyReleaseThreshold() {
return fullStrategyReleaseThreshold;
} | 3.68 |
framework_VaadinService_createCriticalNotificationJSON | /**
* Creates a JSON message which, when sent to client as-is, will cause a
* critical error to be shown with the given details.
*
* @param caption
* The caption of the error or null to omit
* @param message
* The error message or null to omit
* @param details
* Additional error details or null to omit
* @param url
* A url to redirect to. If no other details are given then the
* user will be immediately redirected to this URL. Otherwise the
* message will be shown and the browser will redirect to the
* given URL only after the user acknowledges the message. If
* null then the browser will refresh the current page.
* @return A JSON string to be sent to the client
*/
public static String createCriticalNotificationJSON(String caption,
String message, String details, String url) {
String returnString = "";
try {
JsonObject appError = Json.createObject();
putValueOrJsonNull(appError, "caption", caption);
putValueOrJsonNull(appError, "url", url);
putValueOrJsonNull(appError, "message", message);
putValueOrJsonNull(appError, "details", details);
JsonObject meta = Json.createObject();
meta.put("appError", appError);
JsonObject json = Json.createObject();
json.put("changes", Json.createObject());
json.put("resources", Json.createObject());
json.put("locales", Json.createObject());
json.put("meta", meta);
json.put(ApplicationConstants.SERVER_SYNC_ID, -1);
returnString = JsonUtil.stringify(json);
} catch (JsonException e) {
getLogger().log(Level.WARNING,
"Error creating critical notification JSON message", e);
}
return "for(;;);[" + returnString + "]";
} | 3.68 |
MagicPlugin_MagicController_getLogger | /*
* Get the log, if you need to debug or log errors.
*/
@Override
public MagicLogger getLogger() {
return logger;
} | 3.68 |
hadoop_ResourceInformation_getValue | /**
* Integer value of the resource.
*
* @return value
**/
@ApiModelProperty(value = "Integer value of the resource.")
@JsonProperty("value")
public Long getValue() {
return value;
} | 3.68 |
flink_ConnectedStreams_getFirstInput | /**
* Returns the first {@link DataStream}.
*
* @return The first DataStream.
*/
public DataStream<IN1> getFirstInput() {
return inputStream1;
} | 3.68 |
pulsar_AuthenticationService_authenticateHttpRequest | /**
* Mark this function as deprecated, it is recommended to use a method with the AuthenticationDataSource
* signature to implement it.
* @deprecated use {@link #authenticateHttpRequest(HttpServletRequest, HttpServletResponse)}.
*/
@Deprecated
public String authenticateHttpRequest(HttpServletRequest request) throws AuthenticationException {
return authenticateHttpRequest(request, (AuthenticationDataSource) null);
} | 3.68 |
morf_SelectStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
super.drive(traverser);
traverser
.dispatch(groupBys)
.dispatch(having)
.dispatch(setOperators)
.dispatch(hints);
} | 3.68 |
hbase_InternalScanner_next | /**
* Grab the next row's worth of values.
* @param result return output array
* @return true if more rows exist after this one, false if scanner is done
* @throws IOException e
*/
default boolean next(List<Cell> result) throws IOException {
return next(result, NoLimitScannerContext.getInstance());
} | 3.68 |
hibernate-validator_MethodConfigurationRule_isDefinedOnParallelType | /**
* Whether {@code otherExecutable} is defined on a parallel of the declaring
* type of {@code executable} or not.
*
* @param executable the executable to check against
* @param otherExecutable the executable to check
*
* @return {@code true} if {@code otherExecutable} is defined on a parallel of the declaring type of
* {@code otherExecutable}, {@code false} otherwise
*/
protected boolean isDefinedOnParallelType(ConstrainedExecutable executable, ConstrainedExecutable otherExecutable) {
Class<?> clazz = executable.getCallable().getDeclaringClass();
Class<?> otherClazz = otherExecutable.getCallable().getDeclaringClass();
return !( clazz.isAssignableFrom( otherClazz ) || otherClazz.isAssignableFrom( clazz ) );
} | 3.68 |
flink_FunctionDefinitionFactory_createFunctionDefinition | /**
* Creates a {@link FunctionDefinition} from given {@link CatalogFunction} with the given {@link
* Context} containing the class loader of the current session, which is useful when it's needed
* to load class from class name.
*
* <p>The default implementation will call {@link #createFunctionDefinition(String,
* CatalogFunction)} directly.
*
* @param name name of the {@link CatalogFunction}
* @param catalogFunction the catalog function
* @param context the {@link Context} for creating function definition
* @return a {@link FunctionDefinition}
*/
default FunctionDefinition createFunctionDefinition(
String name, CatalogFunction catalogFunction, Context context) {
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(context.getClassLoader())) {
return createFunctionDefinition(name, catalogFunction);
}
} | 3.68 |
querydsl_JTSGeometryExpressions_xmax | /**
* Returns X maxima of a bounding box 2d or 3d or a geometry.
*
* @param expr geometry
* @return x maxima
*/
public static NumberExpression<Double> xmax(JTSGeometryExpression<?> expr) {
return Expressions.numberOperation(Double.class, SpatialOps.XMAX, expr);
} | 3.68 |
hadoop_FutureIO_propagateOptions | /**
* Propagate options to any builder, converting everything with the
* prefix to an option where, if there were 2+ dot-separated elements,
* it is converted to a schema.
* <pre>
* fs.example.s3a.option becomes "s3a.option"
* fs.example.fs.io.policy becomes "fs.io.policy"
* fs.example.something becomes "something"
* </pre>
* @param builder builder to modify
* @param conf configuration to read
* @param prefix prefix to scan/strip
* @param mandatory are the options to be mandatory or optional?
*/
public static void propagateOptions(
final FSBuilder<?, ?> builder,
final Configuration conf,
final String prefix,
final boolean mandatory) {
final String p = prefix.endsWith(".") ? prefix : (prefix + ".");
final Map<String, String> propsWithPrefix = conf.getPropsWithPrefix(p);
for (Map.Entry<String, String> entry : propsWithPrefix.entrySet()) {
// change the schema off each entry
String key = entry.getKey();
String val = entry.getValue();
if (mandatory) {
builder.must(key, val);
} else {
builder.opt(key, val);
}
}
} | 3.68 |
hadoop_PatternValidator_matches | /**
* Query to see if the pattern matches
* @param name name to validate
* @return true if the string matches the pattern
*/
public boolean matches(String name) {
return valid.matcher(name).matches();
} | 3.68 |
framework_VTabsheetBase_setConnector | /**
* Sets the connector that should be notified of events etc.
*
* For internal use only. This method may be removed or replaced in the
* future.
*
* @since 7.2
* @param connector
* the connector of this widget
*/
public void setConnector(AbstractComponentConnector connector) {
this.connector = connector;
} | 3.68 |
framework_TreeGrid_getItemCollapseAllowedProvider | /**
* Gets the item collapse allowed provider.
*
* @return the item collapse allowed provider
*/
public ItemCollapseAllowedProvider<T> getItemCollapseAllowedProvider() {
return getDataCommunicator().getItemCollapseAllowedProvider();
} | 3.68 |
flink_RocksDBHandle_restoreInstanceDirectoryFromPath | /**
* This recreates the new working directory of the recovered RocksDB instance and links/copies
* the contents from a local state.
*/
private void restoreInstanceDirectoryFromPath(Path source) throws IOException {
final Path instanceRocksDBDirectory = Paths.get(dbPath);
final Path[] files = FileUtils.listDirectory(source);
if (!new File(dbPath).mkdirs()) {
String errMsg = "Could not create RocksDB data directory: " + dbPath;
logger.error(errMsg);
throw new IOException(errMsg);
}
for (Path file : files) {
final String fileName = file.getFileName().toString();
final Path targetFile = instanceRocksDBDirectory.resolve(fileName);
if (fileName.endsWith(SST_FILE_SUFFIX)) {
try {
// hardlink'ing the immutable sst-files.
Files.createLink(targetFile, file);
continue;
} catch (IOException ioe) {
final String logMessage =
String.format(
"Could not hard link sst file %s. Trying to copy it over. This might "
+ "increase the recovery time. In order to avoid this, configure "
+ "RocksDB's working directory and the local state directory to be on the same volume.",
fileName);
if (logger.isDebugEnabled()) {
logger.debug(logMessage, ioe);
} else {
logger.info(logMessage);
}
}
}
// true copy for all other files and files that could not be hard linked.
Files.copy(file, targetFile, StandardCopyOption.REPLACE_EXISTING);
}
} | 3.68 |
hadoop_IncrementalBlockReportManager_removeAll | /** @return all the blocks removed from this IBR. */
ReceivedDeletedBlockInfo[] removeAll() {
final int size = blocks.size();
if (size == 0) {
return null;
}
final ReceivedDeletedBlockInfo[] rdbis = blocks.values().toArray(
new ReceivedDeletedBlockInfo[size]);
blocks.clear();
return rdbis;
} | 3.68 |
hbase_SimpleRegionNormalizer_computeSplitNormalizationPlans | /**
* Computes the split plans that should be executed for this table to converge average region size
* towards target average or target region count. <br />
* if the region is > 2 times larger than average, we split it. split is more high priority
* normalization action than merge.
*/
private List<NormalizationPlan> computeSplitNormalizationPlans(final NormalizeContext ctx) {
final double avgRegionSize = ctx.getAverageRegionSizeMb();
LOG.debug("Table {}, average region size: {} MB", ctx.getTableName(),
String.format("%.3f", avgRegionSize));
final List<NormalizationPlan> plans = new ArrayList<>();
for (final RegionInfo hri : ctx.getTableRegions()) {
if (skipForSplit(ctx.getRegionStates().getRegionState(hri), hri)) {
continue;
}
final long regionSizeMb = getRegionSizeMB(hri);
if (regionSizeMb > 2 * avgRegionSize) {
LOG.info(
"Table {}, large region {} has size {} MB, more than twice avg size {} MB, "
+ "splitting",
ctx.getTableName(), hri.getRegionNameAsString(), regionSizeMb,
String.format("%.3f", avgRegionSize));
plans.add(new SplitNormalizationPlan(hri, regionSizeMb));
}
}
return plans;
} | 3.68 |
framework_LayoutsCssTest_createPanelWith | /**
* Helper to create panels for different theme variants...
*/
private Panel createPanelWith(String caption, String styleName) {
VerticalLayout panelLayout = new VerticalLayout();
panelLayout.setMargin(true);
Panel panel = new Panel(caption, panelLayout);
panelLayout.addComponent(new Label("Some content"));
panel.setIcon(new ThemeResource(TestSampler.ICON_URL));
panel.setComponentError(new UserError("A error message..."));
panel.setId("layout" + debugIdCounter++);
if (styleName != null) {
panel.addStyleName(styleName);
}
return panel;
} | 3.68 |
flink_BlobServer_putBuffer | /**
* Uploads the data of the given byte array for the given job to the BLOB server.
*
* @param jobId the ID of the job the BLOB belongs to
* @param value the buffer to upload
* @param blobType whether to make the data permanent or transient
* @return the computed BLOB key identifying the BLOB on the server
* @throws IOException thrown if an I/O error occurs while writing it to a local file, or
* uploading it to the HA store
*/
private BlobKey putBuffer(@Nullable JobID jobId, byte[] value, BlobKey.BlobType blobType)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Received PUT call for BLOB of job {}.", jobId);
}
File incomingFile = createTemporaryFilename();
MessageDigest md = BlobUtils.createMessageDigest();
BlobKey blobKey = null;
try (FileOutputStream fos = new FileOutputStream(incomingFile)) {
md.update(value);
fos.write(value);
} catch (IOException ioe) {
// delete incomingFile from a failed download
if (!incomingFile.delete() && incomingFile.exists()) {
LOG.warn("Could not delete the staging file {} for job {}.", incomingFile, jobId);
}
throw ioe;
}
try {
// persist file
blobKey = moveTempFileToStore(incomingFile, jobId, md.digest(), blobType);
return blobKey;
} finally {
// delete incomingFile from a failed download
if (!incomingFile.delete() && incomingFile.exists()) {
LOG.warn(
"Could not delete the staging file {} for blob key {} and job {}.",
incomingFile,
blobKey,
jobId);
}
}
} | 3.68 |
hibernate-validator_PredefinedScopeConstraintValidatorManagerImpl_getInitializedValidator | /**
* @param validatedValueType the type of the value to be validated. Cannot be {@code null}.
* @param descriptor the constraint descriptor for which to get an initialized constraint validator. Cannot be {@code null}
* @param constraintValidatorFactory constraint factory used to instantiate the constraint validator. Cannot be {@code null}.
* @param initializationContext context used on constraint validator initialization
* @param <A> the annotation type
*
* @return an initialized constraint validator for the given type and annotation of the value to be validated.
* {@code null} is returned if no matching constraint validator could be found.
*/
@Override
public <A extends Annotation> ConstraintValidator<A, ?> getInitializedValidator(
Type validatedValueType,
ConstraintDescriptorImpl<A> descriptor,
ConstraintValidatorFactory constraintValidatorFactory,
HibernateConstraintValidatorInitializationContext initializationContext) {
Contracts.assertNotNull( validatedValueType );
Contracts.assertNotNull( descriptor );
Contracts.assertNotNull( constraintValidatorFactory );
Contracts.assertNotNull( initializationContext );
return createAndInitializeValidator( validatedValueType, descriptor, constraintValidatorFactory, initializationContext );
} | 3.68 |
hadoop_RegistryTypeUtils_retrieveAddressURLs | /**
* Get the address URLs. Guranteed to return at least one address.
* @param epr endpoint
* @return the address as a URL
* @throws InvalidRecordException if the type is wrong, there are no addresses
* or the payload ill-formatted
* @throws MalformedURLException address can't be turned into a URL
*/
public static List<URL> retrieveAddressURLs(Endpoint epr)
throws InvalidRecordException, MalformedURLException {
if (epr == null) {
throw new InvalidRecordException("", "Null endpoint");
}
List<String> addresses = retrieveAddressesUriType(epr);
List<URL> results = new ArrayList<URL>(addresses.size());
for (String address : addresses) {
results.add(new URL(address));
}
return results;
} | 3.68 |
hadoop_ManifestSuccessData_joinMap | /**
* Join any map of string to value into a string, sorting the keys first.
* @param map map to join
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return a string for reporting.
*/
protected static String joinMap(Map<String, ?> map,
String prefix,
String middle, String suffix) {
if (map == null) {
return "";
}
List<String> list = new ArrayList<>(map.keySet());
Collections.sort(list);
StringBuilder sb = new StringBuilder(list.size() * 32);
for (String k : list) {
sb.append(prefix)
.append(k)
.append(middle)
.append(map.get(k))
.append(suffix);
}
return sb.toString();
} | 3.68 |
framework_VAbstractPopupCalendar_setStyleName | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.UIObject#setStyleName(java.lang.String)
*/
@Override
public void setStyleName(String style) {
super.setStyleName(style);
updateStyleNames();
} | 3.68 |
flink_TGetQueryIdReq_isSet | /**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new java.lang.IllegalArgumentException();
}
switch (field) {
case OPERATION_HANDLE:
return isSetOperationHandle();
}
throw new java.lang.IllegalStateException();
} | 3.68 |
hadoop_AbfsClient_renameIdempotencyCheckOp | /**
* Check if the rename request failure is post a retry and if earlier rename
* request might have succeeded at back-end.
*
* If a source etag was passed in, and the error was 404, get the
* etag of any file at the destination.
* If it matches the source etag, then the rename is considered
* a success.
* Exceptions raised in the probe of the destination are swallowed,
* so that they do not interfere with the original rename failures.
* @param source source path
* @param op Rename request REST operation response with non-null HTTP response
* @param destination rename destination path
* @param sourceEtag etag of source file. may be null or empty
* @param tracingContext Tracks identifiers for request header
* @return true if the file was successfully copied
*/
public boolean renameIdempotencyCheckOp(
final String source,
final String sourceEtag,
final AbfsRestOperation op,
final String destination,
TracingContext tracingContext) {
Preconditions.checkArgument(op.hasResult(), "Operations has null HTTP response");
// removing isDir from debug logs as it can be misleading
LOG.debug("rename({}, {}) failure {}; retry={} etag {}",
source, destination, op.getResult().getStatusCode(), op.isARetriedRequest(), sourceEtag);
if (!(op.isARetriedRequest()
&& (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND))) {
// only attempt recovery if the failure was a 404 on a retried rename request.
return false;
}
if (isNotEmpty(sourceEtag)) {
// Server has returned HTTP 404, we have an etag, so see
// if the rename has actually taken place,
LOG.info("rename {} to {} failed, checking etag of destination",
source, destination);
try {
final AbfsRestOperation destStatusOp = getPathStatus(destination, false, tracingContext);
final AbfsHttpOperation result = destStatusOp.getResult();
final boolean recovered = result.getStatusCode() == HttpURLConnection.HTTP_OK
&& sourceEtag.equals(extractEtagHeader(result));
LOG.info("File rename has taken place: recovery {}",
recovered ? "succeeded" : "failed");
return recovered;
} catch (AzureBlobFileSystemException ex) {
// GetFileStatus on the destination failed, the rename did not take place
// or some other failure. log and swallow.
LOG.debug("Failed to get status of path {}", destination, ex);
}
} else {
LOG.debug("No source etag; unable to probe for the operation's success");
}
return false;
} | 3.68 |
framework_VaadinPortletRequest_getCurrent | /**
* Gets the currently processed Vaadin portlet request. The current request
* is automatically defined when the request is started. The current request
* can not be used in e.g. background threads because of the way server
* implementations reuse request instances.
*
* @return the current Vaadin portlet request instance if available,
* otherwise <code>null</code>
* @since 7.3
*/
public static VaadinPortletRequest getCurrent() {
return VaadinPortletService.getCurrentRequest();
} | 3.68 |
hbase_Addressing_isLocalAddress | /**
* Given an InetAddress, checks to see if the address is a local address, by comparing the address
* with all the interfaces on the node.
* @param addr address to check if it is local node's address
* @return true if the address corresponds to the local node
*/
public static boolean isLocalAddress(InetAddress addr) {
// Check if the address is any local or loop back
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
// Check if the address is defined on any interface
if (!local) {
try {
local = NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException e) {
local = false;
}
}
return local;
} | 3.68 |
hbase_DoubleArrayCost_getMinSkew | /**
* Return the min skew of distribution
* @param total is total number of regions
*/
public static double getMinSkew(double total, double numServers) {
if (numServers == 0) {
return 0;
}
double mean = total / numServers;
// It's possible that there aren't enough regions to go around
double min;
if (numServers > total) {
min = ((numServers - total) * mean * mean + (1 - mean) * (1 - mean) * total);
} else {
// Some will have 1 more than everything else.
int numHigh = (int) (total - (Math.floor(mean) * numServers));
int numLow = (int) (numServers - numHigh);
min = numHigh * (Math.ceil(mean) - mean) * (Math.ceil(mean) - mean)
+ numLow * (mean - Math.floor(mean)) * (mean - Math.floor(mean));
}
return Math.sqrt(min);
} | 3.68 |
morf_UpgradePathFinder_getOnlyWithUuid | /**
* Tells the UUID referenced by the candidate via the {@link OnlyWith}.
*/
public java.util.UUID getOnlyWithUuid() {
return onlyWithUuid;
}
/**
* Decides whether this step is applicable based on list of already applied steps and other conditions.
*
* @param stepsAlreadyApplied List of already applied steps.
* @param candidateSteps List of all potential candidates.
* @return true if the candidate is to be applied, false otherwise.
* @throws IllegalStateException if the {@link OnlyWith} | 3.68 |
morf_Criterion_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if (selectStatement != null) {
return operator.toString() + " " + selectStatement;
}
if (criteria.isEmpty()) {
switch(operator) {
case ISNULL:
case ISNOTNULL:
return String.format("%s %s", field, operator);
default:
return String.format("%s %s %s", field, operator, value);
}
}
StringBuilder result = new StringBuilder();
boolean only = criteria.size() == 1;
boolean first = criteria.size() > 1;
for (Criterion criterion : criteria) {
if (!only && !first) result.append(" ");
if (!first) result.append(operator).append(" ");
result.append("(").append(criterion).append(")");
first = false;
}
return result.toString();
} | 3.68 |
hmily_RepositorySupportEnum_acquire | /**
* Acquire compensate cache type compensate cache type enum.
*
* @param support the compensate cache type
* @return the compensate cache type enum
*/
public static RepositorySupportEnum acquire(final String support) {
Optional<RepositorySupportEnum> repositorySupportEnum =
Arrays.stream(RepositorySupportEnum.values())
.filter(v -> Objects.equals(v.getSupport(), support))
.findFirst();
return repositorySupportEnum.orElse(RepositorySupportEnum.MYSQL);
} | 3.68 |
hudi_ExceptionUtil_getRootCause | /**
* Fetches inner-most cause of the provided {@link Throwable}
*/
@Nonnull
public static Throwable getRootCause(@Nonnull Throwable t) {
Throwable cause = t;
while (cause.getCause() != null) {
cause = cause.getCause();
}
return cause;
} | 3.68 |
morf_SqlUtils_literal | /**
* Constructs a new {@link FieldLiteral} with a Character source.
*
* @param value the literal value to use
* @return {@link FieldLiteral}
*/
public static FieldLiteral literal(Character value) {
return new FieldLiteral(value);
} | 3.68 |
morf_ViewChanges_getViewsToDeploy | /**
* @return sorted list of the views to deploy in the order they should be deployed.
*/
public List<View> getViewsToDeploy() {
// Sort the list into creation order based on toposort result
List<String> sortedViewNamesToDeploy = newArrayList(deploySet);
Collections.sort(sortedViewNamesToDeploy, Ordering.explicit(viewCreationOrder));
// Transform the sorted list back into a set of sorted views and return
return Lists.transform(sortedViewNamesToDeploy, nameToView());
} | 3.68 |
AreaShop_SoldRegionEvent_getRefundedMoney | /**
* Get the amount that is paid back to the player.
* @return The amount of money paid back to the player
*/
public double getRefundedMoney() {
return refundedMoney;
} | 3.68 |
flink_SqlClient_openCli | /**
* Opens the CLI client for executing SQL statements.
*
* @param executor executor
*/
private void openCli(Executor executor) {
Path historyFilePath;
if (options.getHistoryFilePath() != null) {
historyFilePath = Paths.get(options.getHistoryFilePath());
} else {
historyFilePath =
Paths.get(
System.getProperty("user.home"),
SystemUtils.IS_OS_WINDOWS ? "flink-sql-history" : ".flink-sql-history");
}
boolean hasSqlFile = options.getSqlFile() != null;
boolean hasUpdateStatement = options.getUpdateStatement() != null;
if (hasSqlFile && hasUpdateStatement) {
throw new IllegalArgumentException(
String.format(
"Please use either option %s or %s. The option %s is deprecated and it's suggested to use %s instead.",
CliOptionsParser.OPTION_FILE,
CliOptionsParser.OPTION_UPDATE,
CliOptionsParser.OPTION_UPDATE.getOpt(),
CliOptionsParser.OPTION_FILE.getOpt()));
}
try (CliClient cli = new CliClient(terminalFactory, executor, historyFilePath)) {
if (options.getInitFile() != null) {
boolean success = cli.executeInitialization(readFromURL(options.getInitFile()));
if (!success) {
System.out.println(
String.format(
"Failed to initialize from sql script: %s. Please refer to the LOG for detailed error messages.",
options.getInitFile()));
return;
} else {
System.out.println(
String.format(
"Successfully initialized from sql script: %s",
options.getInitFile()));
}
}
if (!hasSqlFile && !hasUpdateStatement) {
cli.executeInInteractiveMode();
} else {
cli.executeInNonInteractiveMode(readExecutionContent());
}
}
} | 3.68 |
hmily_GsonUtils_getGson | /**
* Gets gson instance.
*
* @return the instance
*/
public static Gson getGson() {
return GsonUtils.GSON;
} | 3.68 |
hbase_ReplicationPeerConfigBuilder_putAllConfiguration | /**
* Adds all of the provided "raw" configuration entries to {@code this}.
* @param configuration A collection of raw configuration entries
* @return {@code this}
*/
@InterfaceAudience.Private
default ReplicationPeerConfigBuilder putAllConfiguration(Map<String, String> configuration) {
configuration.forEach(this::putConfiguration);
return this;
} | 3.68 |
hmily_HmilyParticipantUndoCacheManager_get | /**
* acquire hmilyTransaction.
*
* @param participantId this guava key.
* @return {@linkplain HmilyTransaction}
*/
public List<HmilyParticipantUndo> get(final Long participantId) {
try {
return loadingCache.get(participantId);
} catch (ExecutionException e) {
return Collections.emptyList();
}
} | 3.68 |
dubbo_DubboBootstrap_registry | /**
* Add an instance of {@link RegistryConfig}
*
* @param registryConfig an instance of {@link RegistryConfig}
* @return current {@link DubboBootstrap} instance
*/
public DubboBootstrap registry(RegistryConfig registryConfig) {
registryConfig.setScopeModel(applicationModel);
configManager.addRegistry(registryConfig);
return this;
} | 3.68 |
framework_CurrentInstance_setCurrent | /**
* Sets current instances for the {@link VaadinSession} and all related
* classes. The previously defined values can be restored by passing the
* returned map to {@link #restoreInstances(Map)}.
*
* @since 7.1
*
* @param session
* The VaadinSession
* @return A map containing the old values of the instances this method
* updated.
*/
public static Map<Class<?>, CurrentInstance> setCurrent(
VaadinSession session) {
Map<Class<?>, CurrentInstance> old = new HashMap<>();
old.put(VaadinSession.class, set(VaadinSession.class, session));
VaadinService service = null;
if (session != null) {
service = session.getService();
}
old.put(VaadinService.class, set(VaadinService.class, service));
return old;
} | 3.68 |
hbase_QuotaSettingsFactory_throttleTable | /**
* Throttle the specified table.
* @param tableName the table to throttle
* @param type the type of throttling
* @param limit the allowed number of request/data per timeUnit
* @param timeUnit the limit time unit
* @param scope the scope of throttling
* @return the quota settings
*/
public static QuotaSettings throttleTable(final TableName tableName, final ThrottleType type,
final long limit, final TimeUnit timeUnit, QuotaScope scope) {
return throttle(null, tableName, null, null, type, limit, timeUnit, scope);
} | 3.68 |
querydsl_ExpressionUtils_isNotNull | /**
* Create a {@code left is not null} expression
*
* @param left operation argument
* @return left is null
*/
public static Predicate isNotNull(Expression<?> left) {
return predicate(Ops.IS_NOT_NULL, left);
} | 3.68 |
flink_BootstrapTransformation_writeOperatorState | /**
* @param operatorID The operator id for the stream operator.
* @param stateBackend The state backend for the job.
* @param config Additional configurations applied to the bootstrap stream tasks.
* @param globalMaxParallelism Global max parallelism set for the savepoint.
* @param savepointPath The path where the savepoint will be written.
* @return The operator subtask states for this bootstrap transformation.
*/
DataSet<OperatorState> writeOperatorState(
OperatorID operatorID,
@Nullable StateBackend stateBackend,
Configuration config,
int globalMaxParallelism,
Path savepointPath) {
int localMaxParallelism = getMaxParallelism(globalMaxParallelism);
return writeOperatorSubtaskStates(
operatorID, stateBackend, config, savepointPath, localMaxParallelism)
.reduceGroup(new OperatorSubtaskStateReducer(operatorID, localMaxParallelism))
.name("reduce(OperatorSubtaskState)");
} | 3.68 |
hadoop_TimelineMetricOperation_aggregate | /**
* Perform the aggregation operation.
*
* @param incoming Incoming metric
* @param aggregate Base aggregation metric
* @param state Operation state
* @return Result metric for this aggregation operation
*/
public TimelineMetric aggregate(TimelineMetric incoming,
TimelineMetric aggregate, Map<Object, Object> state) {
return exec(incoming, aggregate, state);
} | 3.68 |
hadoop_HdfsUtils_isHealthy | /**
* Is the HDFS healthy?
* HDFS is considered as healthy if it is up and not in safemode.
*
* @param uri the HDFS URI. Note that the URI path is ignored.
* @return true if HDFS is healthy; false, otherwise.
*/
@SuppressWarnings("deprecation")
public static boolean isHealthy(URI uri) {
//check scheme
final String scheme = uri.getScheme();
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
throw new IllegalArgumentException("The scheme is not "
+ HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
}
final Configuration conf = new Configuration();
//disable FileSystem cache
conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
//disable client retry for rpc connection and rpc calls
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
try (DistributedFileSystem fs =
(DistributedFileSystem) FileSystem.get(uri, conf)) {
final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
if (LOG.isDebugEnabled()) {
LOG.debug("Is namenode in safemode? {}; uri={}", safemode, uri);
}
return !safemode;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got an exception for uri={}", uri, e);
}
return false;
}
} | 3.68 |
hadoop_AbstractDNSToSwitchMapping_isSingleSwitch | /**
* Predicate that indicates that the switch mapping is known to be
* single-switch. The base class returns false: it assumes all mappings are
* multi-rack. Subclasses may override this with methods that are more aware
* of their topologies.
*
* <p>
*
* This method is used when parts of Hadoop need know whether to apply
* single rack vs multi-rack policies, such as during block placement.
* Such algorithms behave differently if they are on multi-switch systems.
* </p>
*
* @return true if the mapping thinks that it is on a single switch
*/
public boolean isSingleSwitch() {
return false;
} | 3.68 |
hadoop_BulkDeleteRetryHandler_bulkDeleteRetried | /**
* Handler for failure of bulk delete requests.
* @param deleteRequest request which was retried.
* @param ex exception
*/
public void bulkDeleteRetried(
DeleteObjectsRequest deleteRequest,
Exception ex) {
LOG.debug("Retrying on error during bulk delete", ex);
if (isThrottleException(ex)) {
onDeleteThrottled(deleteRequest);
} else if (isSymptomOfBrokenConnection(ex)) {
// this is one which surfaces when an HTTPS connection is broken while
// the service is reading the result.
// it is treated as a throttle event for statistics
LOG.warn("Bulk delete operation interrupted: {}", ex.getMessage());
onDeleteThrottled(deleteRequest);
} else {
incrementStatistic(IGNORED_ERRORS);
}
} | 3.68 |
framework_Table_getTableFieldFactory | /**
* Gets the TableFieldFactory that is used to create editor for table cells.
*
* The FieldFactory is only used if the Table is editable.
*
* @return TableFieldFactory used to create the Field instances.
* @see #isEditable
*/
public TableFieldFactory getTableFieldFactory() {
return fieldFactory;
} | 3.68 |
hadoop_ApplicationACLsManager_isAdmin | /**
* Check if the given user in an admin.
*
* @param calledUGI
* UserGroupInformation for the user
* @return true if the user is an admin, false otherwise
*/
public final boolean isAdmin(final UserGroupInformation calledUGI) {
return this.adminAclsManager.isAdmin(calledUGI);
} | 3.68 |
framework_AbstractComponentConnector_updateComponentSize | /**
* Updates the component size, invoking the {@link LayoutManager layout
* manager} if necessary.
*
* @param newWidth
* The new width as a CSS string. Cannot be null.
* @param newHeight
* The new height as a CSS string. Cannot be null.
*/
protected void updateComponentSize(String newWidth, String newHeight) {
Profiler.enter("AbstractComponentConnector.updateComponentSize");
// Parent should be updated if either dimension changed between relative
// and non-relative
if (newWidth.endsWith("%") != lastKnownWidth.endsWith("%")) {
Connector parent = getParent();
if (parent instanceof ManagedLayout) {
getLayoutManager()
.setNeedsHorizontalLayout((ManagedLayout) parent);
}
}
if (newHeight.endsWith("%") != lastKnownHeight.endsWith("%")) {
Connector parent = getParent();
if (parent instanceof ManagedLayout) {
getLayoutManager()
.setNeedsVerticalLayout((ManagedLayout) parent);
}
}
lastKnownWidth = newWidth;
lastKnownHeight = newHeight;
// Set defined sizes
Widget widget = getWidget();
Profiler.enter(
"AbstractComponentConnector.updateComponentSize update styleNames");
widget.setStyleName("v-has-width", !isUndefinedWidth());
widget.setStyleName("v-has-height", !isUndefinedHeight());
Profiler.leave(
"AbstractComponentConnector.updateComponentSize update styleNames");
Profiler.enter(
"AbstractComponentConnector.updateComponentSize update DOM");
updateWidgetSize(newWidth, newHeight);
Profiler.leave(
"AbstractComponentConnector.updateComponentSize update DOM");
Profiler.leave("AbstractComponentConnector.updateComponentSize");
} | 3.68 |
framework_VTooltip_getOpenDelay | /**
* Returns the time (in ms) that should elapse after an event triggering
* tooltip showing has occurred (e.g. mouse over) before the tooltip is
* shown. If a tooltip has recently been shown, then
* {@link #getQuickOpenDelay()} is used instead of this.
*
* @return The open delay (in ms)
*/
public int getOpenDelay() {
return openDelay;
} | 3.68 |
framework_DataCommunicator_getActiveData | /**
* Returns all currently active data mapped by their id from
* DataProvider.
*
* @return map of ids to active data objects
*/
public Map<Object, T> getActiveData() {
Function<T, Object> getId = getDataProvider()::getId;
return activeData.stream().map(getKeyMapper()::get)
.collect(Collectors.toMap(getId, i -> i));
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createServerStatisticsAPI | /**
* Create server statistics API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job server statistics API
*/
public static ServerStatisticsAPI createServerStatisticsAPI(final String connectString, final String namespace, final String digest) {
return new ServerStatisticsAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
framework_DataGenerator_destroyData | /**
* Informs the {@code DataGenerator} that the given data item has been
* dropped and is no longer needed. This method should clean up any unneeded
* information stored for this item.
*
* @param item
* the dropped data item
*/
public default void destroyData(T item) {
} | 3.68 |
hadoop_HeaderProcessing_maybeSetHeader | /**
* Set a header if the value is non null.
*
* @param headers header map
* @param name header name
* @param value value to encode.
*/
private void maybeSetHeader(
final Map<String, byte[]> headers,
final String name,
final Object value) {
if (value != null) {
headers.put(name, encodeBytes(value));
}
} | 3.68 |
framework_DragAndDropService_isEnabled | /**
* <p>
* Tests if the variable owner is enabled or not. The terminal should not
* send any variable changes to disabled variable owners.
* </p>
* Implementation detail: this method is originally from the VariableOwner
* class, which has been removed in Vaadin 8.
*
* @return <code>true</code> if the variable owner is enabled,
* <code>false</code> if not
*/
@Override
public boolean isEnabled() {
return isConnectorEnabled();
} | 3.68 |
hbase_ZKSplitLogManagerCoordination_needAbandonRetries | /**
* Helper function to check whether to abandon retries in ZooKeeper AsyncCallback functions
* @param statusCode integer value of a ZooKeeper exception code
* @param action description message about the retried action
* @return true when need to abandon retries otherwise false
*/
private boolean needAbandonRetries(int statusCode, String action) {
if (statusCode == KeeperException.Code.SESSIONEXPIRED.intValue()) {
LOG.error("ZK session expired. Master is expected to shut down. Abandoning retries for "
+ "action=" + action);
return true;
}
return false;
} | 3.68 |
dubbo_ConfigManager_setApplication | /**
* Set application config
*
* @param application
* @return current application config instance
*/
@DisableInject
public void setApplication(ApplicationConfig application) {
addConfig(application);
} | 3.68 |
flink_DatabaseMetaDataUtils_createCatalogsResultSet | /**
* Create result set for catalogs. The schema columns are:
*
* <ul>
* <li>TABLE_CAT String => catalog name.
* </ul>
*
* <p>The results are ordered by catalog name.
*
* @param statement The statement for database meta data
* @param result The result for catalogs
* @return a ResultSet object in which each row has a single String column that is a catalog
* name
*/
public static FlinkResultSet createCatalogsResultSet(
Statement statement, StatementResult result) {
List<RowData> catalogs = new ArrayList<>();
result.forEachRemaining(catalogs::add);
catalogs.sort(Comparator.comparing(v -> v.getString(0)));
return new FlinkResultSet(
statement,
new CollectionResultIterator(catalogs.iterator()),
ResolvedSchema.of(TABLE_CAT_COLUMN));
} | 3.68 |
framework_AbstractSelect_removeItemSetChangeListener | /**
* Removes the Item set change listener from the object.
*
* @see Container.ItemSetChangeNotifier#removeListener(Container.ItemSetChangeListener)
*/
@Override
public void removeItemSetChangeListener(
Container.ItemSetChangeListener listener) {
if (itemSetEventListeners != null) {
itemSetEventListeners.remove(listener);
if (itemSetEventListeners.isEmpty()) {
itemSetEventListeners = null;
}
}
} | 3.68 |
morf_DataValue_defaultEquals | /**
* Default equals implementation for instances.
*
* @param obj1 this
* @param obj2 the other
* @return true if equivalent.
*/
public static boolean defaultEquals(DataValue obj1, Object obj2) {
if (obj1 == obj2) return true;
if (obj2 == null) return false;
if (!(obj2 instanceof DataValue)) return false;
DataValue other = (DataValue) obj2;
if (!obj1.getName().equals(other.getName())) return false;
if (obj1.getObject() == null) {
if (other.getObject() != null) return false;
} else if (obj1.getObject().getClass().isArray()) {
if (!other.getObject().getClass().isArray()) return false;
return Arrays.equals((byte[]) obj1.getObject(), (byte[]) other.getObject());
} else {
if (!obj1.getObject().equals(other.getObject())) return false;
}
return true;
} | 3.68 |
cron-utils_Preconditions_checkNotNullNorEmpty | /**
* Ensures that a collection reference passed as a parameter to the calling method is not null.
* nor empty.
*
* @param reference a collection reference
* @param errorMessage the exception message to use if the check fails; will be converted to a
* string using {@link String#valueOf(Object)}
* @return the non-null reference that was validated
* @throws NullPointerException if {@code reference} is null
* @throws IllegalArgumentException if {@code reference} is empty
*/
public static <T extends Collection<?>> T checkNotNullNorEmpty(final T reference, final Object errorMessage) {
if (reference == null) {
throw new NullPointerException(String.valueOf(errorMessage));
}
if (reference.isEmpty()) {
throw new IllegalArgumentException(String.valueOf(errorMessage));
}
return reference;
} | 3.68 |
flink_BinarySegmentUtils_readTimestampData | /**
* Gets an instance of {@link TimestampData} from underlying {@link MemorySegment}.
*
* @param segments the underlying MemorySegments
* @param baseOffset the base offset of current instance of {@code TimestampData}
* @param offsetAndNanos the offset of milli-seconds part and nanoseconds
* @return an instance of {@link TimestampData}
*/
public static TimestampData readTimestampData(
MemorySegment[] segments, int baseOffset, long offsetAndNanos) {
final int nanoOfMillisecond = (int) offsetAndNanos;
final int subOffset = (int) (offsetAndNanos >> 32);
final long millisecond = getLong(segments, baseOffset + subOffset);
return TimestampData.fromEpochMillis(millisecond, nanoOfMillisecond);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.