name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_WrappedFailoverProxyProvider_close | /**
* Close the proxy,
*/
@Override
public synchronized void close() throws IOException {
proxyProvider.close();
} | 3.68 |
flink_FromClasspathEntryClassInformationProvider_create | /**
* Creates a {@code FromClasspathEntryClassInformationProvider} based on the passed job class
* and classpath.
*
* @param jobClassName The job's class name.
* @param classpath The classpath the job class should be part of.
* @return The {@code FromClasspathEntryClassInformationProvider} instances collecting the
* necessary information.
* @throws IOException If some Jar listed on the classpath wasn't accessible.
* @throws FlinkException If the passed job class is not present on the passed classpath.
*/
public static FromClasspathEntryClassInformationProvider create(
String jobClassName, Iterable<URL> classpath) throws IOException, FlinkException {
Preconditions.checkNotNull(jobClassName, "No job class name passed.");
Preconditions.checkNotNull(classpath, "No classpath passed.");
return new FromClasspathEntryClassInformationProvider(jobClassName);
} | 3.68 |
hbase_Scan_isRaw | /** Returns True if this Scan is in "raw" mode. */
public boolean isRaw() {
byte[] attr = getAttribute(RAW_ATTR);
return attr == null ? false : Bytes.toBoolean(attr);
} | 3.68 |
framework_AbstractEmbedded_setAlternateText | /**
* Sets this component's alternate text that can be presented instead of the
* component's normal content for accessibility purposes.
*
* @param altText
* A short, human-readable description of this component's
* content.
*/
public void setAlternateText(String altText) {
getState().alternateText = altText;
} | 3.68 |
hudi_EmbeddedTimelineService_getRemoteFileSystemViewConfig | /**
* Retrieves proper view storage configs for remote clients to access this service.
*/
public FileSystemViewStorageConfig getRemoteFileSystemViewConfig() {
FileSystemViewStorageType viewStorageType = writeConfig.getClientSpecifiedViewStorageConfig()
.shouldEnableBackupForRemoteFileSystemView()
? FileSystemViewStorageType.REMOTE_FIRST : FileSystemViewStorageType.REMOTE_ONLY;
return FileSystemViewStorageConfig.newBuilder()
.withStorageType(viewStorageType)
.withRemoteServerHost(hostAddr)
.withRemoteServerPort(serverPort)
.withRemoteTimelineClientTimeoutSecs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientTimeoutSecs())
.withRemoteTimelineClientRetry(writeConfig.getClientSpecifiedViewStorageConfig().isRemoteTimelineClientRetryEnabled())
.withRemoteTimelineClientMaxRetryNumbers(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientMaxRetryNumbers())
.withRemoteTimelineInitialRetryIntervalMs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineInitialRetryIntervalMs())
.withRemoteTimelineClientMaxRetryIntervalMs(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientMaxRetryIntervalMs())
.withRemoteTimelineClientRetryExceptions(writeConfig.getClientSpecifiedViewStorageConfig().getRemoteTimelineClientRetryExceptions())
.build();
} | 3.68 |
hadoop_CDFPiecewiseLinearRandomGenerator_valueAt | /**
* TODO This code assumes that the empirical minimum resp. maximum is the
* epistomological minimum resp. maximum. This is probably okay for the
* minimum, because that likely represents a task where everything went well,
* but for the maximum we may want to develop a way of extrapolating past the
* maximum.
*/
@Override
public long valueAt(double probability) {
int rangeFloor = floorIndex(probability);
double segmentProbMin = getRankingAt(rangeFloor);
double segmentProbMax = getRankingAt(rangeFloor + 1);
long segmentMinValue = getDatumAt(rangeFloor);
long segmentMaxValue = getDatumAt(rangeFloor + 1);
// If this is zero, this object is based on an ill-formed cdf
double segmentProbRange = segmentProbMax - segmentProbMin;
long segmentDatumRange = segmentMaxValue - segmentMinValue;
long result = (long) ((probability - segmentProbMin) / segmentProbRange * segmentDatumRange)
+ segmentMinValue;
return result;
} | 3.68 |
hadoop_DatanodeAdminProperties_getHostName | /**
* Return the host name of the datanode.
* @return the host name of the datanode.
*/
public String getHostName() {
return hostName;
} | 3.68 |
morf_DatabaseUpgradeTableContribution_upgradeAuditTable | /**
* @return The Table descriptor of UpgradeAudit
*/
public static Table upgradeAuditTable() {
return table(UPGRADE_AUDIT_NAME)
.columns(
column("upgradeUUID", DataType.STRING, 100).primaryKey(),
column("description", DataType.STRING, 200).nullable(),
column("appliedTime", DataType.DECIMAL, 14).nullable()
);
} | 3.68 |
hudi_HoodieCatalogUtil_createHiveConf | /**
* Returns a new {@code HiveConf}.
*
* @param hiveConfDir Hive conf directory path.
* @return A HiveConf instance.
*/
public static HiveConf createHiveConf(@Nullable String hiveConfDir, org.apache.flink.configuration.Configuration flinkConf) {
// create HiveConf from hadoop configuration with hadoop conf directory configured.
Configuration hadoopConf = HadoopConfigurations.getHadoopConf(flinkConf);
// ignore all the static conf file URLs that HiveConf may have set
HiveConf.setHiveSiteLocation(null);
HiveConf.setLoadMetastoreConfig(false);
HiveConf.setLoadHiveServer2Config(false);
HiveConf hiveConf = new HiveConf(hadoopConf, HiveConf.class);
LOG.info("Setting hive conf dir as {}", hiveConfDir);
if (hiveConfDir != null) {
Path hiveSite = new Path(hiveConfDir, HIVE_SITE_FILE);
if (!hiveSite.toUri().isAbsolute()) {
// treat relative URI as local file to be compatible with previous behavior
hiveSite = new Path(new File(hiveSite.toString()).toURI());
}
try (InputStream inputStream = hiveSite.getFileSystem(hadoopConf).open(hiveSite)) {
hiveConf.addResource(inputStream, hiveSite.toString());
// trigger a read from the conf so that the input stream is read
isEmbeddedMetastore(hiveConf);
} catch (IOException e) {
throw new CatalogException(
"Failed to load hive-site.xml from specified path:" + hiveSite, e);
}
} else {
// user doesn't provide hive conf dir, we try to find it in classpath
URL hiveSite =
Thread.currentThread().getContextClassLoader().getResource(HIVE_SITE_FILE);
if (hiveSite != null) {
LOG.info("Found {} in classpath: {}", HIVE_SITE_FILE, hiveSite);
hiveConf.addResource(hiveSite);
}
}
return hiveConf;
} | 3.68 |
rocketmq-connect_CountDownLatch2_await | /**
* Causes the current thread to wait until the latch has counted down to zero, unless the thread is {@linkplain
* Thread#interrupt interrupted}, or the specified waiting time elapses.
*
* <p>If the current count is zero then this method returns immediately
* with the value {@code true}.
*
* <p>If the current count is greater than zero then the current
* thread becomes disabled for thread scheduling purposes and lies dormant until one of three things happen:
* <ul>
* <li>The count reaches zero due to invocations of the
* {@link #countDown} method; or
* <li>Some other thread {@linkplain Thread#interrupt interrupts}
* the current thread; or
* <li>The specified waiting time elapses.
* </ul>
*
* <p>If the count reaches zero then the method returns with the
* value {@code true}.
*
* <p>If the current thread:
* <ul>
* <li>has its interrupted status set on entry to this method; or
* <li>is {@linkplain Thread#interrupt interrupted} while waiting,
* </ul>
* then {@link InterruptedException} is thrown and the current thread's interrupted status is cleared.
*
* <p>If the specified waiting time elapses then the value {@code false}
* is returned. If the time is less than or equal to zero, the method will not wait at all.
*
* @param timeout the maximum time to wait
* @param unit the time unit of the {@code timeout} argument
* @return {@code true} if the count reached zero and {@code false} if the waiting time elapsed before the count
* reached zero
* @throws InterruptedException if the current thread is interrupted while waiting
*/
public boolean await(long timeout, TimeUnit unit)
throws InterruptedException {
return sync.tryAcquireSharedNanos(1, unit.toNanos(timeout));
} | 3.68 |
zxing_FinderPatternFinder_selectBestPatterns | /**
* @return the 3 best {@link FinderPattern}s from our list of candidates. The "best" are
* those have similar module size and form a shape closer to a isosceles right triangle.
* @throws NotFoundException if 3 such finder patterns do not exist
*/
private FinderPattern[] selectBestPatterns() throws NotFoundException {
int startSize = possibleCenters.size();
if (startSize < 3) {
// Couldn't find enough finder patterns
throw NotFoundException.getNotFoundInstance();
}
for (Iterator<FinderPattern> it = possibleCenters.iterator(); it.hasNext();) {
if (it.next().getCount() < CENTER_QUORUM) {
it.remove();
}
}
// A more up-to-date version would be "possibleCenters.sort(moduleComparator);"
// But we need this old syntax for android API 23 (Marshmallow) and below
// cf. https://github.com/zxing/zxing/issues/1358
Collections.sort(possibleCenters, moduleComparator);
double distortion = Double.MAX_VALUE;
FinderPattern[] bestPatterns = new FinderPattern[3];
for (int i = 0; i < possibleCenters.size() - 2; i++) {
FinderPattern fpi = possibleCenters.get(i);
float minModuleSize = fpi.getEstimatedModuleSize();
for (int j = i + 1; j < possibleCenters.size() - 1; j++) {
FinderPattern fpj = possibleCenters.get(j);
double squares0 = squaredDistance(fpi, fpj);
for (int k = j + 1; k < possibleCenters.size(); k++) {
FinderPattern fpk = possibleCenters.get(k);
float maxModuleSize = fpk.getEstimatedModuleSize();
if (maxModuleSize > minModuleSize * 1.4f) {
// module size is not similar
continue;
}
double a = squares0;
double b = squaredDistance(fpj, fpk);
double c = squaredDistance(fpi, fpk);
// sorts ascending - inlined
if (a < b) {
if (b > c) {
if (a < c) {
double temp = b;
b = c;
c = temp;
} else {
double temp = a;
a = c;
c = b;
b = temp;
}
}
} else {
if (b < c) {
if (a < c) {
double temp = a;
a = b;
b = temp;
} else {
double temp = a;
a = b;
b = c;
c = temp;
}
} else {
double temp = a;
a = c;
c = temp;
}
}
// a^2 + b^2 = c^2 (Pythagorean theorem), and a = b (isosceles triangle).
// Since any right triangle satisfies the formula c^2 - b^2 - a^2 = 0,
// we need to check both two equal sides separately.
// The value of |c^2 - 2 * b^2| + |c^2 - 2 * a^2| increases as dissimilarity
// from isosceles right triangle.
double d = Math.abs(c - 2 * b) + Math.abs(c - 2 * a);
if (d < distortion) {
distortion = d;
bestPatterns[0] = fpi;
bestPatterns[1] = fpj;
bestPatterns[2] = fpk;
}
}
}
}
if (distortion == Double.MAX_VALUE) {
throw NotFoundException.getNotFoundInstance();
}
return bestPatterns;
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isOauth2AuthenticationPreStep | /**
* Is the current consent in OAUTH-Pre-step (authentication) mode.
*/
public boolean isOauth2AuthenticationPreStep(Xs2aContext ctx) {
return ctx.isOauth2PreStepNeeded() || ctx.isEmbeddedPreAuthNeeded();
} | 3.68 |
rocketmq-connect_PluginUtils_isAliasUnique | /**
* Verify whether a given plugin's alias matches another alias in a collection of plugins.
*
* @param alias the plugin descriptor to test for alias matching.
* @param plugins the collection of plugins to test against.
* @param <U> the plugin type.
* @return false if a match was found in the collection, otherwise true.
*/
public static <U> boolean isAliasUnique(
PluginWrapper<U> alias,
Collection<PluginWrapper<U>> plugins
) {
boolean matched = false;
for (PluginWrapper<U> plugin : plugins) {
if (simpleName(alias).equals(simpleName(plugin))
|| prunedName(alias).equals(prunedName(plugin))) {
if (matched) {
return false;
}
matched = true;
}
}
return true;
} | 3.68 |
hudi_SparkInternalSchemaConverter_convertAndPruneStructTypeToInternalSchema | /**
* Convert Spark schema to Hudi internal schema, and prune fields.
* Fields without IDs are kept and assigned fallback IDs.
*
* @param sparkSchema a pruned spark schema
* @param originSchema a internal schema for hoodie table
* @return a pruned internal schema for the provided spark schema
*/
public static InternalSchema convertAndPruneStructTypeToInternalSchema(StructType sparkSchema, InternalSchema originSchema) {
List<String> pruneNames = collectColNamesFromSparkStruct(sparkSchema);
return InternalSchemaUtils.pruneInternalSchema(originSchema, pruneNames);
} | 3.68 |
hadoop_FSDirSatisfyStoragePolicyOp_satisfyStoragePolicy | /**
* Satisfy storage policy function which will add the entry to SPS call queue
* and will perform satisfaction async way.
*
* @param fsd
* fs directory
* @param bm
* block manager
* @param src
* source path
* @param logRetryCache
* whether to record RPC ids in editlog for retry cache rebuilding
* @return file status info
* @throws IOException
*/
static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
String src, boolean logRetryCache) throws IOException {
assert fsd.getFSNamesystem().hasWriteLock();
FSPermissionChecker pc = fsd.getPermissionChecker();
INodesInPath iip;
fsd.writeLock();
try {
// check operation permission.
iip = fsd.resolvePath(pc, src, DirOp.WRITE);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
INode inode = FSDirectory.resolveLastINode(iip);
if (inode.isFile() && inode.asFile().numBlocks() == 0) {
if (NameNode.LOG.isInfoEnabled()) {
NameNode.LOG.info(
"Skipping satisfy storage policy on path:{} as "
+ "this file doesn't have any blocks!",
inode.getFullPathName());
}
} else if (inodeHasSatisfyXAttr(inode)) {
NameNode.LOG
.warn("Cannot request to call satisfy storage policy on path: "
+ inode.getFullPathName()
+ ", as this file/dir was already called for satisfying "
+ "storage policy.");
} else {
XAttr satisfyXAttr = XAttrHelper
.buildXAttr(XATTR_SATISFY_STORAGE_POLICY);
List<XAttr> xAttrs = Arrays.asList(satisfyXAttr);
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
xAttrs, EnumSet.of(XAttrSetFlag.CREATE));
XAttrStorage.updateINodeXAttrs(inode, newXAttrs,
iip.getLatestSnapshotId());
fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
// Adding directory in the pending queue, so FileInodeIdCollector
// process directory child in batch and recursively
StoragePolicySatisfyManager spsManager =
fsd.getBlockManager().getSPSManager();
if (spsManager != null) {
spsManager.addPathId(inode.getId());
}
}
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
} | 3.68 |
hbase_ServerManager_countOfRegionServers | /** Returns the count of active regionservers */
public int countOfRegionServers() {
// Presumes onlineServers is a concurrent map
return this.onlineServers.size();
} | 3.68 |
flink_LocatableInputSplitAssigner_addInputSplit | /**
* Adds a single input split
*
* @param split The input split to add
*/
public void addInputSplit(LocatableInputSplitWithCount split) {
int localCount = split.getLocalCount();
if (minLocalCount == -1) {
// first split to add
this.minLocalCount = localCount;
this.elementCycleCount = 1;
this.splits.offerFirst(split);
} else if (localCount < minLocalCount) {
// split with new min local count
this.nextMinLocalCount = this.minLocalCount;
this.minLocalCount = localCount;
// all other splits have more local host than this one
this.elementCycleCount = 1;
splits.offerFirst(split);
} else if (localCount == minLocalCount) {
this.elementCycleCount++;
this.splits.offerFirst(split);
} else {
if (localCount < nextMinLocalCount) {
nextMinLocalCount = localCount;
}
splits.offerLast(split);
}
} | 3.68 |
framework_DefaultConnectionStateHandler_showDialog | /**
* Called when the reconnect dialog should be shown. This is typically when
* N seconds has passed since a problem with the connection has been
* detected
*/
protected void showDialog() {
reconnectDialog.setReconnecting(true);
reconnectDialog.show(connection);
// We never want to show loading indicator and reconnect dialog at the
// same time
connection.getLoadingIndicator().hide();
} | 3.68 |
framework_VaadinService_setSessionLock | /**
* Associates the given lock with this service and the given wrapped
* session. This method should not be called more than once when the lock is
* initialized for the session.
*
* @see #getSessionLock(WrappedSession)
* @param wrappedSession
* The wrapped session the lock is associated with
* @param lock
* The lock object
*/
protected void setSessionLock(WrappedSession wrappedSession, Lock lock) {
if (wrappedSession == null) {
throw new IllegalArgumentException(
"Can't set a lock for a null session");
}
Object currentSessionLock = wrappedSession
.getAttribute(getLockAttributeName());
assert (currentSessionLock == null
|| currentSessionLock == lock) : "Changing the lock for a session is not allowed";
wrappedSession.setAttribute(getLockAttributeName(), lock);
} | 3.68 |
framework_VCalendarPanel_onKeyDown | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyDownHandler#onKeyDown(com.google.gwt
* .event.dom.client.KeyDownEvent)
*/
@Override
public void onKeyDown(KeyDownEvent event) {
handleKeyPress(event);
} | 3.68 |
hadoop_ResourceRequest_relaxLocality | /**
* Set the <code>relaxLocality</code> of the request.
* @see ResourceRequest#setRelaxLocality(boolean)
* @param relaxLocality <code>relaxLocality</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder relaxLocality(boolean relaxLocality) {
resourceRequest.setRelaxLocality(relaxLocality);
return this;
} | 3.68 |
flink_LongHashPartition_finalizeBuildPhase | /**
* After build phase.
*
* @return build spill return buffer, if have spilled, it returns the current write buffer,
* because it was used all the time in build phase, so it can only be returned at this time.
*/
int finalizeBuildPhase(IOManager ioAccess, FileIOChannel.Enumerator probeChannelEnumerator)
throws IOException {
this.finalBufferLimit = this.buildSideWriteBuffer.getCurrentPositionInSegment();
this.partitionBuffers = this.buildSideWriteBuffer.close();
if (!isInMemory()) {
// close the channel.
this.buildSideChannel.close();
this.probeSideBuffer =
FileChannelUtil.createOutputView(
ioAccess,
probeChannelEnumerator.next(),
longTable.compressionEnabled(),
longTable.compressionCodecFactory(),
longTable.compressionBlockSize(),
segmentSize);
return 1;
} else {
return 0;
}
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getResult | /**
* Returns result.
*
* @return long
*/
public Result getResult() {
return result;
} | 3.68 |
hbase_ParseFilter_reduce | /**
* This function is called while parsing the filterString and an operator is parsed
* <p>
* @param operatorStack the stack containing the operators and parenthesis
* @param filterStack the stack containing the filters
* @param operator the operator found while parsing the filterString
*/
public void reduce(Stack<ByteBuffer> operatorStack, Stack<Filter> filterStack,
ByteBuffer operator) {
while (
!operatorStack.empty() && !ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())
&& hasHigherPriority(operatorStack.peek(), operator)
) {
filterStack.push(popArguments(operatorStack, filterStack));
}
} | 3.68 |
Activiti_DelegateInvocation_getInvocationParameters | /**
* @return an array of invocation parameters (null if the invocation takes no parameters)
*/
public Object[] getInvocationParameters() {
return invocationParameters;
} | 3.68 |
pulsar_ConsumerImpl_notifyPendingReceivedCallback | /**
* Notify waiting asyncReceive request with the received message.
*
* @param message
*/
void notifyPendingReceivedCallback(final Message<T> message, Exception exception) {
if (pendingReceives.isEmpty()) {
return;
}
// fetch receivedCallback from queue
final CompletableFuture<Message<T>> receivedFuture = nextPendingReceive();
if (receivedFuture == null) {
return;
}
if (exception != null) {
internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(exception));
return;
}
if (message == null) {
IllegalStateException e = new IllegalStateException("received message can't be null");
internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(e));
return;
}
if (getCurrentReceiverQueueSize() == 0) {
// call interceptor and complete received callback
trackMessage(message);
interceptAndComplete(message, receivedFuture);
return;
}
// increase permits for available message-queue
messageProcessed(message);
// call interceptor and complete received callback
interceptAndComplete(message, receivedFuture);
} | 3.68 |
hbase_ScannerContext_mayHaveMoreCellsInRow | /**
* @return true when we have more cells for the current row. This usually because we have reached
* a limit in the middle of a row
*/
boolean mayHaveMoreCellsInRow() {
return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
|| scannerState == NextState.BATCH_LIMIT_REACHED;
} | 3.68 |
pulsar_MetadataStore_getDefaultMetadataCacheConfig | /**
* Returns the default metadata cache config.
*
* @return default metadata cache config
*/
default MetadataCacheConfig getDefaultMetadataCacheConfig() {
return MetadataCacheConfig.builder().build();
} | 3.68 |
flink_LogicalTypeMerging_findAvgAggType | /** Finds the result type of a decimal average aggregation. */
public static LogicalType findAvgAggType(LogicalType argType) {
final LogicalType resultType;
if (argType.is(DECIMAL)) {
// a hack to make legacy types possible until we drop them
if (argType instanceof LegacyTypeInformationType) {
return argType;
}
// adopted from
// https://docs.microsoft.com/en-us/sql/t-sql/functions/avg-transact-sql
// however, we count by BIGINT, therefore divide by DECIMAL(20,0),
// but the end result is actually the same, which is DECIMAL(38, MAX(6, s)).
resultType = LogicalTypeMerging.findDivisionDecimalType(38, getScale(argType), 20, 0);
} else {
resultType = argType;
}
return resultType.copy(argType.isNullable());
} | 3.68 |
AreaShop_GithubUpdateCheck_isChecking | /**
* Check if an update check is running.
* @return true if an update check is running
*/
public boolean isChecking() {
return checking;
} | 3.68 |
flink_MutableHashTable_buildBloomFilterForBucket | /**
* Set all the bucket memory except bucket header as the bit set of bloom filter, and use hash
* code of build records to build bloom filter.
*/
final void buildBloomFilterForBucket(
int bucketInSegmentPos, MemorySegment bucket, HashPartition<BT, PT> p) {
final int count = bucket.getShort(bucketInSegmentPos + HEADER_COUNT_OFFSET);
if (count <= 0) {
return;
}
int[] hashCodes = new int[count];
// As the hashcode and bloom filter occupy same bytes, so we read all hashcode out at first
// and then write back to bloom filter.
for (int i = 0; i < count; i++) {
hashCodes[i] =
bucket.getInt(bucketInSegmentPos + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN);
}
this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
for (int hashCode : hashCodes) {
this.bloomFilter.addHash(hashCode);
}
buildBloomFilterForExtraOverflowSegments(bucketInSegmentPos, bucket, p);
} | 3.68 |
dubbo_ConfigurableMetadataServiceExporter_setMetadataService | // for unit test
public void setMetadataService(MetadataServiceDelegation metadataService) {
this.metadataService = metadataService;
} | 3.68 |
framework_AbstractComponent_setCaptionAsHtml | /**
* Sets whether the caption is rendered as HTML.
* <p>
* If set to true, the captions are rendered in the browser as HTML and the
* developer is responsible for ensuring no harmful HTML is used. If set to
* false, the caption is rendered in the browser as plain text.
* <p>
* The default is false, i.e. to render that caption as plain text.
*
* @param captionAsHtml
* true if the captions are rendered as HTML, false if rendered
* as plain text
*/
public void setCaptionAsHtml(boolean captionAsHtml) {
getState().captionAsHtml = captionAsHtml;
} | 3.68 |
hbase_AvlUtil_get | /**
* Return the node that matches the specified key or null in case of node not found.
* @param root the current root of the tree
* @param key the key for the node we are trying to find
* @param keyComparator the comparator to use to match node and key
* @return the node that matches the specified key or null in case of node not found.
*/
public static <TNode extends AvlNode> TNode get(TNode root, final Object key,
final AvlKeyComparator<TNode> keyComparator) {
while (root != null) {
int cmp = keyComparator.compareKey(root, key);
if (cmp > 0) {
root = (TNode) root.avlLeft;
} else if (cmp < 0) {
root = (TNode) root.avlRight;
} else {
return (TNode) root;
}
}
return null;
} | 3.68 |
AreaShop_RentRegion_isRenter | /**
* Check if a player is the renter of this region.
* @param player Player to check
* @return true if this player rents this region, otherwise false
*/
public boolean isRenter(Player player) {
return player != null && isRenter(player.getUniqueId());
} | 3.68 |
streampipes_SpOpcUaClient_connect | /***
* Establishes appropriate connection to OPC UA endpoint depending on the {@link SpOpcUaClient} instance
*
* @throws UaException An exception occurring during OPC connection
*/
public void connect()
throws UaException, ExecutionException, InterruptedException, SpConfigurationException, URISyntaxException {
OpcUaClientConfig clientConfig = new MiloOpcUaConfigurationProvider().makeClientConfig(spOpcConfig);
this.client = OpcUaClient.create(clientConfig);
client.connect().get();
} | 3.68 |
hadoop_HsController_singleTaskCounter | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleTaskCounter()
*/
@Override
public void singleTaskCounter() throws IOException{
super.singleTaskCounter();
} | 3.68 |
hudi_HoodieMergeHandle_initializeIncomingRecordsMap | /**
* Initialize a spillable map for incoming records.
*/
protected void initializeIncomingRecordsMap() {
try {
// Load the new records in a map
long memoryForMerge = IOUtils.getMaxMemoryPerPartitionMerge(taskContextSupplier, config);
LOG.info("MaxMemoryPerPartitionMerge => " + memoryForMerge);
this.keyToNewRecords = new ExternalSpillableMap<>(memoryForMerge, config.getSpillableMapBasePath(),
new DefaultSizeEstimator(), new HoodieRecordSizeEstimator(writeSchema),
config.getCommonConfig().getSpillableDiskMapType(),
config.getCommonConfig().isBitCaskDiskMapCompressionEnabled());
} catch (IOException io) {
throw new HoodieIOException("Cannot instantiate an ExternalSpillableMap", io);
}
} | 3.68 |
framework_VaadinService_writeUncachedStringResponse | /**
* Writes the given string as a response with headers to prevent caching and
* using the given content type.
*
* @param response
* The response reference
* @param contentType
* The content type of the response
* @param responseString
* The actual response
* @throws IOException
* If an error occurred while writing the response
* @since 8.3.2
*/
public void writeUncachedStringResponse(VaadinResponse response,
String contentType, String responseString) throws IOException {
// Response might contain sensitive information, so prevent all forms of
// caching
response.setNoCacheHeaders();
writeStringResponse(response, contentType, responseString);
} | 3.68 |
hadoop_LongLong_set | /** Set the values. */
LongLong set(long d0, long d1) {
this.d0 = d0;
this.d1 = d1;
return this;
} | 3.68 |
framework_AbstractOrderedLayout_getCustomAttributes | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#getCustomAttributes()
*/
@Override
protected Collection<String> getCustomAttributes() {
Collection<String> customAttributes = super.getCustomAttributes();
customAttributes.add("margin");
customAttributes.add("margin-left");
customAttributes.add("margin-right");
customAttributes.add("margin-top");
customAttributes.add("margin-bottom");
return customAttributes;
} | 3.68 |
hadoop_DiffList_unmodifiableList | /**
* Returns an unmodifiable diffList.
* @param diffs DiffList
* @param <T> Type of the object in the the diffList
* @return Unmodifiable diffList
*/
static <T extends Comparable<Integer>> DiffList<T> unmodifiableList(
DiffList<T> diffs) {
return new DiffList<T>() {
@Override
public T get(int i) {
return diffs.get(i);
}
@Override
public boolean isEmpty() {
return diffs.isEmpty();
}
@Override
public int size() {
return diffs.size();
}
@Override
public T remove(int i) {
throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public boolean addLast(T t) {
throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public void addFirst(T t) {
throw new UnsupportedOperationException("This list is unmodifiable.");
}
@Override
public int binarySearch(int i) {
return diffs.binarySearch(i);
}
@Override
public Iterator<T> iterator() {
return diffs.iterator();
}
@Override
public List<T> getMinListForRange(int startIndex, int endIndex,
INodeDirectory dir) {
return diffs.getMinListForRange(startIndex, endIndex, dir);
}
};
} | 3.68 |
flink_LargeRecordHandler_close | /**
* Closes all structures and deletes all temporary files. Even in the presence of failures, this
* method will try and continue closing files and deleting temporary files.
*
* @throws IOException Thrown if an error occurred while closing/deleting the files.
*/
public void close() throws IOException {
// we go on closing and deleting files in the presence of failures.
// we remember the first exception to occur and re-throw it later
Throwable ex = null;
synchronized (this) {
if (closed) {
return;
}
closed = true;
// close the writers
if (recordsOutFile != null) {
try {
recordsOutFile.close();
recordsOutFile = null;
} catch (Throwable t) {
LOG.error("Cannot close the large records spill file.", t);
ex = t;
}
}
if (keysOutFile != null) {
try {
keysOutFile.close();
keysOutFile = null;
} catch (Throwable t) {
LOG.error("Cannot close the large records key spill file.", t);
ex = ex == null ? t : ex;
}
}
// close the readers
if (recordsReader != null) {
try {
recordsReader.close();
recordsReader = null;
} catch (Throwable t) {
LOG.error("Cannot close the large records reader.", t);
ex = ex == null ? t : ex;
}
}
if (keysReader != null) {
try {
keysReader.close();
keysReader = null;
} catch (Throwable t) {
LOG.error("Cannot close the large records key reader.", t);
ex = ex == null ? t : ex;
}
}
// delete the spill files
if (recordsChannel != null) {
try {
ioManager.deleteChannel(recordsChannel);
recordsChannel = null;
} catch (Throwable t) {
LOG.error("Cannot delete the large records spill file.", t);
ex = ex == null ? t : ex;
}
}
if (keysChannel != null) {
try {
ioManager.deleteChannel(keysChannel);
keysChannel = null;
} catch (Throwable t) {
LOG.error("Cannot delete the large records key spill file.", t);
ex = ex == null ? t : ex;
}
}
// close the key sorter
if (keySorter != null) {
try {
keySorter.close();
keySorter = null;
} catch (Throwable t) {
LOG.error(
"Cannot properly dispose the key sorter and clean up its temporary files.",
t);
ex = ex == null ? t : ex;
}
}
memManager.release(memory);
recordCounter = 0;
}
// re-throw the exception, if necessary
if (ex != null) {
throw new IOException(
"An error occurred cleaning up spill files in the large record handler.", ex);
}
} | 3.68 |
hadoop_ContainerStatus_getCapability | /**
* Get the <code>Resource</code> allocated to the container.
* @return <code>Resource</code> allocated to the container
*/
@Public
@Unstable
public Resource getCapability() {
throw new UnsupportedOperationException(
"subclass must implement this method");
} | 3.68 |
morf_DeleteStatement_copyOnWriteOrMutate | /**
* Either shallow copies and mutates the result, returning it,
* or mutates the statement directly, depending on
* {@link AliasedField#immutableDslEnabled()}.
*
* TODO for removal along with mutable behaviour.
*
* @param transform A transform which modifies the shallow copy builder.
* @param mutator Code which applies the local changes instead.
* @return The result (which may be {@code this}).
*/
private DeleteStatement copyOnWriteOrMutate(Function<DeleteStatementBuilder, DeleteStatementBuilder> transform, Runnable mutator) {
if (AliasedField.immutableDslEnabled()) {
return transform.apply(shallowCopy()).build();
} else {
mutator.run();
return this;
}
} | 3.68 |
hbase_HRegion_getReadPoint | /** Returns readpoint considering given IsolationLevel. Pass {@code null} for default */
public long getReadPoint(IsolationLevel isolationLevel) {
if (isolationLevel != null && isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
// This scan can read even uncommitted transactions
return Long.MAX_VALUE;
}
return mvcc.getReadPoint();
} | 3.68 |
flink_PatternStream_inEventTime | /** Sets the time characteristic to event time. */
public PatternStream<T> inEventTime() {
return new PatternStream<>(builder.inEventTime());
} | 3.68 |
hadoop_CoderUtil_getNullIndexes | /**
* Get indexes array for items marked as null, either erased or
* not to read.
* @return indexes array
*/
static <T> int[] getNullIndexes(T[] inputs) {
int[] nullIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] == null) {
nullIndexes[idx++] = i;
}
}
return Arrays.copyOf(nullIndexes, idx);
} | 3.68 |
framework_GridDropEvent_getDropLocation | /**
* Get the location of the drop within the row.
* <p>
* <em>NOTE: the location will be {@link DropLocation#EMPTY} if:
* <ul>
* <li>dropped on an empty grid</li>
* <li>dropping on rows was not possible because of
* {@link DropMode#ON_GRID } was used</li>
* <li>{@link DropMode#ON_TOP} is used and the drop happened on empty space
* after last row or on top of the header / footer</li>
* </ul>
* </em>
*
* @return location of the drop in relative to the
* {@link #getDropTargetRow()} or {@link DropLocation#EMPTY} if no
* target row present
* @see GridDropTarget#setDropMode(DropMode)
*/
public DropLocation getDropLocation() {
return dropLocation;
} | 3.68 |
hbase_CellUtil_matchingRowColumn | /** Compares the row and column of two keyvalues for equality */
public static boolean matchingRowColumn(final Cell left, final Cell right) {
short lrowlength = left.getRowLength();
short rrowlength = right.getRowLength();
// match length
if (lrowlength != rrowlength) {
return false;
}
byte lfamlength = left.getFamilyLength();
byte rfamlength = right.getFamilyLength();
if (lfamlength != rfamlength) {
return false;
}
int lqlength = left.getQualifierLength();
int rqlength = right.getQualifierLength();
if (lqlength != rqlength) {
return false;
}
if (!matchingRows(left, lrowlength, right, rrowlength)) {
return false;
}
return matchingColumn(left, lfamlength, lqlength, right, rfamlength, rqlength);
} | 3.68 |
framework_VaadinService_getSystemMessages | /**
* Gets the system message to use for a specific locale. This method may
* also be implemented to use information from current instances of various
* objects, which means that this method might return different values for
* the same locale under different circumstances.
*
* @param locale
* the desired locale for the system messages
* @param request
* @return the system messages to use
*/
public SystemMessages getSystemMessages(Locale locale,
VaadinRequest request) {
SystemMessagesInfo systemMessagesInfo = new SystemMessagesInfo();
systemMessagesInfo.setLocale(locale);
systemMessagesInfo.setService(this);
systemMessagesInfo.setRequest(request);
return getSystemMessagesProvider()
.getSystemMessages(systemMessagesInfo);
} | 3.68 |
flink_CheckpointStatsCounts_incrementRestoredCheckpoints | /** Increments the number of restored checkpoints. */
void incrementRestoredCheckpoints() {
numRestoredCheckpoints++;
} | 3.68 |
flink_CheckpointProperties_forceCheckpoint | /**
* Returns whether the checkpoint should be forced.
*
* <p>Forced checkpoints ignore the configured maximum number of concurrent checkpoints and
* minimum time between checkpoints. Furthermore, they are not subsumed by more recent
* checkpoints as long as they are pending.
*
* @return <code>true</code> if the checkpoint should be forced; <code>false</code> otherwise.
* @see CheckpointCoordinator
* @see PendingCheckpoint
*/
boolean forceCheckpoint() {
return forced;
} | 3.68 |
flink_CommonExecTableSourceScan_createSourceFunctionTransformation | /**
* Adopted from {@link StreamExecutionEnvironment#addSource(SourceFunction, String,
* TypeInformation)} but with custom {@link Boundedness}.
*
* @deprecated This method relies on the {@link
* org.apache.flink.streaming.api.functions.source.SourceFunction} API, which is due to be
* removed.
*/
@Deprecated
protected Transformation<RowData> createSourceFunctionTransformation(
StreamExecutionEnvironment env,
SourceFunction<RowData> function,
boolean isBounded,
String operatorName,
TypeInformation<RowData> outputTypeInfo) {
env.clean(function);
final int parallelism;
boolean parallelismConfigured = false;
if (function instanceof ParallelSourceFunction) {
parallelism = env.getParallelism();
} else {
parallelism = 1;
parallelismConfigured = true;
}
final Boundedness boundedness;
if (isBounded) {
boundedness = Boundedness.BOUNDED;
} else {
boundedness = Boundedness.CONTINUOUS_UNBOUNDED;
}
final StreamSource<RowData, ?> sourceOperator = new StreamSource<>(function, !isBounded);
return new LegacySourceTransformation<>(
operatorName,
sourceOperator,
outputTypeInfo,
parallelism,
boundedness,
parallelismConfigured);
} | 3.68 |
hudi_HoodieRowDataCreateHandle_canWrite | /**
* Returns {@code true} if this handle can take in more writes. else {@code false}.
*/
public boolean canWrite() {
return fileWriter.canWrite();
} | 3.68 |
graphhopper_BitUtil_toUnsignedLong | /**
* This method handles the specified (potentially negative) int as unsigned bit representation
* and returns the positive converted long.
*/
public static long toUnsignedLong(int x) {
return ((long) x) & 0xFFFF_FFFFL;
} | 3.68 |
hadoop_LocalJobOutputFiles_getSpillIndexFileForWrite | /**
* Create a local map spill index file name.
*
* @param spillNumber the number
* @param size the size of the file
*/
public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {
String path = String
.format(SPILL_INDEX_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
morf_OracleMetaDataProvider_getTable | /**
* {@inheritDoc}
*
* <p>The {@link Table} implementation returned may contain {@link Column} implementations
* which evaluate the metadata elements ({@link Column#getType()}, {@link Column#getWidth()}
* etc.) lazily. If the database column type is not supported, this may throw an
* {@link UnexpectedDataTypeException} when evaluated. This allows tables with unsupported
* data types to be enumerated (and thus co-exist in the database schema) but not be supported
* by the application.</p>
*
* @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String)
*/
@Override
public Table getTable(String name) {
return tableMap().get(name.toUpperCase());
} | 3.68 |
pulsar_FunctionMetaDataManager_getAllFunctionMetaData | /**
* Get a list of all the meta for every function.
* @return list of function metadata
*/
public synchronized List<FunctionMetaData> getAllFunctionMetaData() {
List<FunctionMetaData> ret = new LinkedList<>();
for (Map<String, Map<String, FunctionMetaData>> i : this.functionMetaDataMap.values()) {
for (Map<String, FunctionMetaData> j : i.values()) {
ret.addAll(j.values());
}
}
return ret;
} | 3.68 |
streampipes_StreamPipesClient_create | /**
* Create a new StreamPipes API client with custom port and HTTPS settings
*
* @param streamPipesHost The hostname of the StreamPipes instance without scheme
* @param streamPipesPort The port of the StreamPipes instance
* @param credentials The credentials object
* @param httpsDisabled Set true if the instance is not served over HTTPS
*/
public static StreamPipesClient create(String streamPipesHost,
Integer streamPipesPort,
CredentialsProvider credentials,
boolean httpsDisabled) {
return new StreamPipesClient(streamPipesHost, streamPipesPort, credentials, httpsDisabled);
} | 3.68 |
zilla_DefaultBufferPool_release | /**
* Releases a slot so it may be used by other streams
* @param slot - Id of a previously acquired slot
*/
@Override
public void release(
int slot)
{
assert used.get(slot);
used.clear(slot);
availableSlots.value++;
poolBuffer.putLongOrdered(usedIndex + (slot << 3), 0L);
} | 3.68 |
flink_LogicalFile_advanceLastCheckpointId | /**
* A logical file may share across checkpoints (especially for shared state). When this logical
* file is used/reused by a checkpoint, update the last checkpoint id that uses this logical
* file.
*
* @param checkpointId the checkpoint that uses this logical file.
*/
public void advanceLastCheckpointId(long checkpointId) {
if (checkpointId > lastUsedCheckpointID) {
this.lastUsedCheckpointID = checkpointId;
}
} | 3.68 |
hbase_Addressing_createInetSocketAddressFromHostAndPortStr | /**
* Create a socket address
* @param hostAndPort Formatted as <code><hostname> ':' <port></code>
* @return An InetSocketInstance
*/
public static InetSocketAddress
createInetSocketAddressFromHostAndPortStr(final String hostAndPort) {
return new InetSocketAddress(parseHostname(hostAndPort), parsePort(hostAndPort));
} | 3.68 |
flink_SubtaskStateStats_getPersistedData | /** @return the total number of persisted bytes during the checkpoint. */
public long getPersistedData() {
return persistedData;
} | 3.68 |
hadoop_WritableFactories_getFactory | /**
* Define a factory for a class.
* @param c input c.
* @return a factory for a class.
*/
public static WritableFactory getFactory(Class c) {
return CLASS_TO_FACTORY.get(c);
} | 3.68 |
hadoop_ExternalCall_run | // invoked by ipc handler
@Override
public final Void run() throws IOException {
try {
result = action.run();
sendResponse();
} catch (Throwable t) {
abortResponse(t);
}
return null;
} | 3.68 |
hadoop_CommonAuditContext_createInstance | /**
* Demand invoked to create the instance for this thread.
* @return an instance.
*/
private static CommonAuditContext createInstance() {
CommonAuditContext context = new CommonAuditContext();
context.init();
return context;
} | 3.68 |
hbase_HRegion_flush | /**
* Flush the cache.
* <p>
* When this method is called the cache will be flushed unless:
* <ol>
* <li>the cache is empty</li>
* <li>the region is closed.</li>
* <li>a flush is already in progress</li>
* <li>writes are disabled</li>
* </ol>
* <p>
* This method may block for some time, so it should not be called from a time-sensitive thread.
* @param flushAllStores whether we want to force a flush of all stores
* @return FlushResult indicating whether the flush was successful or not and if the region needs
* compacting
* @throws IOException general io exceptions because a snapshot was not properly persisted.
*/
// TODO HBASE-18905. We might have to expose a requestFlush API for CPs
public FlushResult flush(boolean flushAllStores) throws IOException {
return flushcache(flushAllStores, false, FlushLifeCycleTracker.DUMMY);
} | 3.68 |
hadoop_SliderFileSystem_deleteComponentDir | /**
* Deletes the component directory.
*
* @param serviceVersion
* @param compName
* @throws IOException
*/
public void deleteComponentDir(String serviceVersion, String compName)
throws IOException {
Path path = getComponentDir(serviceVersion, compName);
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);
LOG.debug("deleted dir {}", path);
}
Path publicResourceDir = getComponentPublicResourceDir(serviceVersion,
compName);
if (fileSystem.exists(publicResourceDir)) {
fileSystem.delete(publicResourceDir, true);
LOG.debug("deleted public resource dir {}", publicResourceDir);
}
} | 3.68 |
zxing_GridSampler_setGridSampler | /**
* Sets the implementation of GridSampler used by the library. One global
* instance is stored, which may sound problematic. But, the implementation provided
* ought to be appropriate for the entire platform, and all uses of this library
* in the whole lifetime of the JVM. For instance, an Android activity can swap in
* an implementation that takes advantage of native platform libraries.
*
* @param newGridSampler The platform-specific object to install.
*/
public static void setGridSampler(GridSampler newGridSampler) {
gridSampler = newGridSampler;
} | 3.68 |
flink_PekkoUtils_createDefaultActorSystem | /**
* Creates an actor system with the default config and listening on a random port of the
* localhost.
*
* @return default actor system listening on a random port of the localhost
*/
@VisibleForTesting
public static ActorSystem createDefaultActorSystem() {
return createActorSystem(getDefaultConfig());
} | 3.68 |
flink_BlobServer_getServerSocket | /** Access to the server socket, for testing. */
ServerSocket getServerSocket() {
return this.serverSocket;
} | 3.68 |
hmily_ThreadLocalHmilyContext_remove | /**
* clean threadLocal for gc.
*/
public void remove() {
CURRENT_LOCAL.remove();
} | 3.68 |
hbase_GroupingTableMapper_map | /**
* Extract the grouping columns from value to construct a new key. Pass the new key and value to
* reduce. If any of the grouping columns are not found in the value, the record is skipped.
* @param key The current key.
* @param value The current value.
* @param context The current context.
* @throws IOException When writing the record fails.
* @throws InterruptedException When the job is aborted.
*/
@Override
public void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
byte[][] keyVals = extractKeyValues(value);
if (keyVals != null) {
ImmutableBytesWritable tKey = createGroupKey(keyVals);
context.write(tKey, value);
}
} | 3.68 |
shardingsphere-elasticjob_SensitiveInfoUtils_filterSensitiveIps | /**
* Filter sensitive IP addresses.
*
* @param target IP addresses to be filtered
* @return filtered IP addresses
*/
public static List<String> filterSensitiveIps(final List<String> target) {
final Map<String, String> fakeIpMap = new HashMap<>();
final AtomicInteger step = new AtomicInteger();
return target.stream().map(input -> {
Matcher matcher = IP_PATTERN.matcher(input);
String result = input;
while (matcher.find()) {
String realIp = matcher.group();
String fakeIp;
if (fakeIpMap.containsKey(realIp)) {
fakeIp = fakeIpMap.get(realIp);
} else {
fakeIp = FAKE_IP_SAMPLE + step.incrementAndGet();
fakeIpMap.put(realIp, fakeIp);
}
result = result.replace(realIp, fakeIp);
}
return result;
}).collect(Collectors.toList());
} | 3.68 |
flink_FileCatalogStore_storeCatalog | /**
* Stores the specified catalog in the catalog store.
*
* @param catalogName the name of the catalog
* @param catalog the catalog descriptor to store
* @throws CatalogException if the catalog store is not open or if there is an error storing the
* catalog
*/
@Override
public void storeCatalog(String catalogName, CatalogDescriptor catalog)
throws CatalogException {
checkOpenState();
Path catalogPath = getCatalogPath(catalogName);
try {
FileSystem fs = catalogPath.getFileSystem();
if (fs.exists(catalogPath)) {
throw new CatalogException(
String.format(
"Catalog %s's store file %s is already exist.",
catalogName, catalogPath));
}
try (FSDataOutputStream os = fs.create(catalogPath, WriteMode.NO_OVERWRITE)) {
YAML_MAPPER.writeValue(os, catalog.getConfiguration().toMap());
}
LOG.info("Catalog {}'s configuration saved to file {}", catalogName, catalogPath);
} catch (CatalogException e) {
throw e;
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed to store catalog %s's configuration to file %s.",
catalogName, catalogPath),
e);
}
} | 3.68 |
dubbo_AbstractServiceRestMetadataResolver_findRestCapableMethod | /**
* Find the method with the capable for REST from the specified service method and its override method
*
* @param processingEnv {@link ProcessingEnvironment}
* @param serviceType
* @param serviceInterfaceType
* @param serviceMethod
* @return <code>null</code> if can't be found
*/
private ExecutableElement findRestCapableMethod(
ProcessingEnvironment processingEnv,
TypeElement serviceType,
TypeElement serviceInterfaceType,
ExecutableElement serviceMethod) {
// try to judge the override first
ExecutableElement overrideMethod = getOverrideMethod(processingEnv, serviceType, serviceMethod);
if (supports(processingEnv, serviceType, serviceInterfaceType, overrideMethod)) {
return overrideMethod;
}
// or, try to judge the declared method
return supports(processingEnv, serviceType, serviceInterfaceType, serviceMethod) ? serviceMethod : null;
} | 3.68 |
dubbo_BasicJsonWriter_print | /**
* Write the specified text.
*
* @param string the content to write
*/
public IndentingWriter print(String string) {
write(string.toCharArray(), 0, string.length());
return this;
} | 3.68 |
hbase_Bytes_toByteArrays | /**
* Create a byte[][] where first and only entry is <code>column</code>
* @param column operand
* @return A byte array of a byte array where first and only entry is <code>column</code>
*/
public static byte[][] toByteArrays(final byte[] column) {
byte[][] result = new byte[1][];
result[0] = column;
return result;
} | 3.68 |
flink_DelegationTokenProvider_serviceConfigPrefix | /** Config prefix of the service. */
default String serviceConfigPrefix() {
return String.format("%s.%s", CONFIG_PREFIX, serviceName());
} | 3.68 |
hadoop_HAState_prepareToEnterState | /**
* Method to be overridden by subclasses to prepare to enter a state.
* This method is called <em>without</em> the context being locked,
* and after {@link #prepareToExitState(HAContext)} has been called
* for the previous state, but before {@link #exitState(HAContext)}
* has been called for the previous state.
* @param context HA context
* @throws ServiceFailedException on precondition failure
*/
public void prepareToEnterState(final HAContext context)
throws ServiceFailedException {} | 3.68 |
framework_StringToIntegerConverter_convertToModel | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object,
* java.lang.Class, java.util.Locale)
*/
@Override
public Integer convertToModel(String value,
Class<? extends Integer> targetType, Locale locale)
throws ConversionException {
Number n = convertToNumber(value, targetType, locale);
if (n == null) {
return null;
}
int intValue = n.intValue();
if (intValue == n.longValue()) {
// If the value of n is outside the range of long, the return value
// of longValue() is either Long.MIN_VALUE or Long.MAX_VALUE. The
// above comparison promotes int to long and thus does not need to
// consider wrap-around.
return intValue;
}
throw new ConversionException("Could not convert '" + value + "' to "
+ Integer.class.getName() + ": value out of range");
} | 3.68 |
AreaShop_GeneralRegion_setup | /**
* Shared setup of all constructors.
*/
public void setup() {
features = new HashMap<>();
} | 3.68 |
hbase_HMaster_shutdown | /**
* Shutdown the cluster. Master runs a coordinated stop of all RegionServers and then itself.
*/
public void shutdown() throws IOException {
TraceUtil.trace(() -> {
if (cpHost != null) {
cpHost.preShutdown();
}
// Tell the servermanager cluster shutdown has been called. This makes it so when Master is
// last running server, it'll stop itself. Next, we broadcast the cluster shutdown by setting
// the cluster status as down. RegionServers will notice this change in state and will start
// shutting themselves down. When last has exited, Master can go down.
if (this.serverManager != null) {
this.serverManager.shutdownCluster();
}
if (this.clusterStatusTracker != null) {
try {
this.clusterStatusTracker.setClusterDown();
} catch (KeeperException e) {
LOG.error("ZooKeeper exception trying to set cluster as down in ZK", e);
}
}
// Stop the procedure executor. Will stop any ongoing assign, unassign, server crash etc.,
// processing so we can go down.
if (this.procedureExecutor != null) {
this.procedureExecutor.stop();
}
// Shutdown our cluster connection. This will kill any hosted RPCs that might be going on;
// this is what we want especially if the Master is in startup phase doing call outs to
// hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on
// the rpc to timeout.
if (this.asyncClusterConnection != null) {
this.asyncClusterConnection.close();
}
}, "HMaster.shutdown");
} | 3.68 |
hbase_AsyncAdmin_restoreSnapshot | /**
* Restore the specified snapshot on the original table. (The table must be disabled) If
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
* failsafe snapshot name is configurable by using the property
* "hbase.snapshot.restore.failsafe.name".
* @param snapshotName name of the snapshot to restore
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
*/
default CompletableFuture<Void> restoreSnapshot(String snapshotName,
boolean takeFailSafeSnapshot) {
return restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
} | 3.68 |
hadoop_AbstractConfigurableFederationPolicy_validate | /**
* Overridable validation step for the policy configuration.
*
* @param newPolicyInfo the configuration to test.
*
* @throws FederationPolicyInitializationException if the configuration is not
* valid.
*/
public void validate(WeightedPolicyInfo newPolicyInfo)
throws FederationPolicyInitializationException {
if (newPolicyInfo == null) {
throw new FederationPolicyInitializationException(
"The policy to " + "validate should not be null.");
}
} | 3.68 |
flink_Schema_primaryKey | /**
* Declares a primary key constraint for a set of given columns. Primary key uniquely
* identify a row in a table. Neither of columns in a primary can be nullable. The primary
* key is informational only. It will not be enforced. It can be used for optimizations. It
* is the data owner's responsibility to ensure uniqueness of the data.
*
* <p>The primary key will be assigned a generated name in the format {@code PK_col1_col2}.
*
* @param columnNames columns that form a unique primary key
*/
public Builder primaryKey(List<String> columnNames) {
Preconditions.checkNotNull(columnNames, "Primary key column names must not be null.");
final String generatedConstraintName =
columnNames.stream().collect(Collectors.joining("_", "PK_", ""));
return primaryKeyNamed(generatedConstraintName, columnNames);
} | 3.68 |
flink_RowData_createFieldGetter | /**
* Creates an accessor for getting elements in an internal row data structure at the given
* position.
*
* @param fieldType the element type of the row
* @param fieldPos the element position of the row
*/
static FieldGetter createFieldGetter(LogicalType fieldType, int fieldPos) {
final FieldGetter fieldGetter;
// ordered by type root definition
switch (fieldType.getTypeRoot()) {
case CHAR:
case VARCHAR:
fieldGetter = row -> row.getString(fieldPos);
break;
case BOOLEAN:
fieldGetter = row -> row.getBoolean(fieldPos);
break;
case BINARY:
case VARBINARY:
fieldGetter = row -> row.getBinary(fieldPos);
break;
case DECIMAL:
final int decimalPrecision = getPrecision(fieldType);
final int decimalScale = getScale(fieldType);
fieldGetter = row -> row.getDecimal(fieldPos, decimalPrecision, decimalScale);
break;
case TINYINT:
fieldGetter = row -> row.getByte(fieldPos);
break;
case SMALLINT:
fieldGetter = row -> row.getShort(fieldPos);
break;
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
fieldGetter = row -> row.getInt(fieldPos);
break;
case BIGINT:
case INTERVAL_DAY_TIME:
fieldGetter = row -> row.getLong(fieldPos);
break;
case FLOAT:
fieldGetter = row -> row.getFloat(fieldPos);
break;
case DOUBLE:
fieldGetter = row -> row.getDouble(fieldPos);
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final int timestampPrecision = getPrecision(fieldType);
fieldGetter = row -> row.getTimestamp(fieldPos, timestampPrecision);
break;
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException();
case ARRAY:
fieldGetter = row -> row.getArray(fieldPos);
break;
case MULTISET:
case MAP:
fieldGetter = row -> row.getMap(fieldPos);
break;
case ROW:
case STRUCTURED_TYPE:
final int rowFieldCount = getFieldCount(fieldType);
fieldGetter = row -> row.getRow(fieldPos, rowFieldCount);
break;
case DISTINCT_TYPE:
fieldGetter =
createFieldGetter(((DistinctType) fieldType).getSourceType(), fieldPos);
break;
case RAW:
fieldGetter = row -> row.getRawValue(fieldPos);
break;
case NULL:
case SYMBOL:
case UNRESOLVED:
default:
throw new IllegalArgumentException();
}
if (!fieldType.isNullable()) {
return fieldGetter;
}
return row -> {
if (row.isNullAt(fieldPos)) {
return null;
}
return fieldGetter.getFieldOrNull(row);
};
} | 3.68 |
hadoop_TFile_getLocationNear | /**
* Get the location pointing to the beginning of the first key-value pair in
* a compressed block whose byte offset in the TFile is greater than or
* equal to the specified offset.
*
* @param offset
* the user supplied offset.
* @return the location to the corresponding entry; or end() if no such
* entry exists.
*/
Location getLocationNear(long offset) {
int blockIndex = readerBCF.getBlockIndexNear(offset);
if (blockIndex == -1) return end;
return new Location(blockIndex, 0);
} | 3.68 |
flink_MemorySegmentFactory_allocateUnpooledOffHeapMemory | /**
* Allocates some unpooled off-heap memory and creates a new memory segment that represents that
* memory.
*
* @param size The size of the off-heap memory segment to allocate.
* @param owner The owner to associate with the off-heap memory segment.
* @return A new memory segment, backed by unpooled off-heap memory.
*/
public static MemorySegment allocateUnpooledOffHeapMemory(int size, Object owner) {
ByteBuffer memory = allocateDirectMemory(size);
return new MemorySegment(memory, owner);
} | 3.68 |
hadoop_AbfsThrottlingInterceptFactory_getInstance | /**
* Returns an instance of AbfsThrottlingIntercept.
*
* @param accountName The account for which we need instance of throttling intercept.
@param abfsConfiguration The object of abfsconfiguration class.
* @return Instance of AbfsThrottlingIntercept.
*/
static synchronized AbfsThrottlingIntercept getInstance(String accountName,
AbfsConfiguration abfsConfiguration) {
abfsConfig = abfsConfiguration;
AbfsThrottlingIntercept intercept;
if (!abfsConfiguration.isAutoThrottlingEnabled()) {
return AbfsNoOpThrottlingIntercept.INSTANCE;
}
// If singleton is enabled use a static instance of the intercept class for all accounts
if (!abfsConfiguration.accountThrottlingEnabled()) {
intercept = AbfsClientThrottlingIntercept.initializeSingleton(
abfsConfiguration);
} else {
// Return the instance from the map
intercept = interceptMap.get(accountName);
if (intercept == null) {
intercept = new AbfsClientThrottlingIntercept(accountName,
abfsConfiguration);
interceptMap.put(accountName, intercept);
}
}
return intercept;
} | 3.68 |
framework_ColorPickerPopup_setSwatchesTabVisible | /**
* Sets the visibility of the Swatches tab.
*
* @param visible
* The visibility of the Swatches tab
*/
public void setSwatchesTabVisible(boolean visible) {
if (visible && !isTabVisible(swatchesTab)) {
tabs.addTab(swatchesTab, "Swatches", null);
checkIfTabsNeeded();
} else if (!visible && isTabVisible(swatchesTab)) {
tabs.removeComponent(swatchesTab);
checkIfTabsNeeded();
}
} | 3.68 |
morf_MySqlDialect_dropPrimaryKey | /**
* ALTER TABLE `XYZ` DROP PRIMARY KEY
*/
private String dropPrimaryKey(String tableName) {
return "ALTER TABLE `" + tableName + "` DROP PRIMARY KEY";
} | 3.68 |
incubator-hugegraph-toolchain_FileMappingController_loadParameter | /**
* TODO: All file mapping share one load paramter now, should be separated
* in actually
*/
@PostMapping("load-parameter")
public void loadParameter(@RequestBody LoadParameter newEntity) {
this.checkLoadParameter(newEntity);
List<FileMapping> mappings = this.service.listAll();
for (FileMapping mapping : mappings) {
LoadParameter oldEntity = mapping.getLoadParameter();
LoadParameter entity = this.mergeEntity(oldEntity, newEntity);
mapping.setLoadParameter(entity);
this.service.update(mapping);
}
} | 3.68 |
flink_GenericInMemoryCatalog_isFullPartitionSpec | /** Check if the given partitionSpec is full partition spec for the given table. */
private boolean isFullPartitionSpec(ObjectPath tablePath, CatalogPartitionSpec partitionSpec)
throws TableNotExistException {
CatalogBaseTable baseTable = getTable(tablePath);
if (!(baseTable instanceof CatalogTable)) {
return false;
}
CatalogTable table = (CatalogTable) baseTable;
List<String> partitionKeys = table.getPartitionKeys();
Map<String, String> spec = partitionSpec.getPartitionSpec();
// The size of partition spec should not exceed the size of partition keys
return partitionKeys.size() == spec.size() && spec.keySet().containsAll(partitionKeys);
} | 3.68 |
hadoop_FilePool_refresh | /**
* (Re)generate cache of input FileStatus objects.
*/
public void refresh() throws IOException {
updateLock.writeLock().lock();
try {
root = new InnerDesc(fs, fs.getFileStatus(path),
new MinFileFilter(conf.getLong(GRIDMIX_MIN_FILE, 128 * 1024 * 1024),
conf.getLong(GRIDMIX_MAX_TOTAL, 100L * (1L << 40))));
if (0 == root.getSize()) {
throw new IOException("Found no satisfactory file in " + path);
}
} finally {
updateLock.writeLock().unlock();
}
} | 3.68 |
hbase_CatalogFamilyFormat_getStartCodeColumn | /**
* Returns the column qualifier for server start code column for replicaId
* @param replicaId the replicaId of the region
* @return a byte[] for server start code column qualifier
*/
public static byte[] getStartCodeColumn(int replicaId) {
return replicaId == 0
? HConstants.STARTCODE_QUALIFIER
: Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
} | 3.68 |
hadoop_NativeS3FileSystem_initialize | /**
* Always fail to initialize.
* @throws IOException always.
*/
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
throw new IOException(UNSUPPORTED);
} | 3.68 |
flink_QueryableStateConfiguration_getProxyPortRange | /**
* Returns the port range where the queryable state client proxy can listen. See {@link
* org.apache.flink.configuration.QueryableStateOptions#PROXY_PORT_RANGE
* QueryableStateOptions.PROXY_PORT_RANGE}.
*/
public Iterator<Integer> getProxyPortRange() {
return proxyPortRange;
} | 3.68 |
framework_Payload_getValue | /**
* Gets the value of this payload.
*
* @return value of this payload
*/
public String getValue() {
return value;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.