name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_FavoredNodesPlan_getFavoredNodes | /**
* Returns the list of favored region server for this region based on the plan
*/
public List<ServerName> getFavoredNodes(RegionInfo region) {
return favoredNodesMap.get(region.getRegionNameAsString());
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_setPriorityQueueStateType | /**
* Sets the type of the priority queue state. It will fallback to the default value, if it is
* not explicitly set.
*/
public void setPriorityQueueStateType(PriorityQueueStateType priorityQueueStateType) {
this.priorityQueueConfig.setPriorityQueueStateType(priorityQueueStateType);
} | 3.68 |
hbase_PermissionStorage_loadAll | /**
* Load all permissions from the region server holding {@code _acl_}, primarily intended for
* testing purposes.
*/
static Map<byte[], ListMultimap<String, UserPermission>> loadAll(Configuration conf)
throws IOException {
Map<byte[], ListMultimap<String, UserPermission>> allPerms =
new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
// do a full scan of _acl_, filtering on only first table region rows
Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY);
ResultScanner scanner = null;
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
scanner = table.getScanner(scan);
try {
for (Result row : scanner) {
ListMultimap<String, UserPermission> resultPerms =
parsePermissions(row.getRow(), row, null, null, null, false);
allPerms.put(row.getRow(), resultPerms);
}
} finally {
if (scanner != null) {
scanner.close();
}
}
}
}
return allPerms;
} | 3.68 |
hbase_RegionPlacementMaintainer_invert | /**
* Copy a given matrix into a new matrix, transforming each row index and each column index
* according to the inverse of the randomization scheme that was created at construction time.
* @param matrix the cost matrix to be inverted
* @return a new matrix with row and column indices inverted
*/
public float[][] invert(float[][] matrix) {
float[][] result = new float[rows][cols];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
result[rowInverse[i]][colInverse[j]] = matrix[i][j];
}
}
return result;
} | 3.68 |
hadoop_AbstractS3ACommitter_getJobContext | /**
* Get the job/task context this committer was instantiated with.
* @return the context.
*/
public final JobContext getJobContext() {
return jobContext;
} | 3.68 |
pulsar_MessagePayload_release | /**
* Release the resources if necessary.
*
* NOTE: For a MessagePayload object that is created from {@link MessagePayloadFactory#DEFAULT}, this method must be
* called to avoid memory leak.
*/
default void release() {
// No ops
} | 3.68 |
hbase_MetricsREST_incrementSucessfulGetRequests | /**
* @param inc How much to add to sucessfulGetCount.
*/
public void incrementSucessfulGetRequests(final int inc) {
source.incrementSucessfulGetRequests(inc);
} | 3.68 |
hbase_ZKWatcher_registerListener | /**
* Register the specified listener to receive ZooKeeper events.
* @param listener the listener to register
*/
public void registerListener(ZKListener listener) {
listeners.add(listener);
} | 3.68 |
hbase_ScannerModel_getEndTime | /** Returns the upper bound on timestamps of items of interest */
@XmlAttribute
public long getEndTime() {
return endTime;
} | 3.68 |
hadoop_NodePlan_parseJson | /**
* Parses a Json string and converts to NodePlan.
*
* @param json - Json String
* @return NodePlan
* @throws IOException
*/
public static NodePlan parseJson(String json) throws IOException {
return READER.readValue(json);
} | 3.68 |
framework_LegacyWindow_removeListener | /**
* Removes a {@link BrowserWindowResizeListener} from this UI. The listener
* will no longer be notified when the browser window is resized.
*
* @param resizeListener
* the listener to remove
* @deprecated As of 7.0, use the similarly named api in Page instead
*/
@Deprecated
public void removeListener(BrowserWindowResizeListener resizeListener) {
getPage().removeBrowserWindowResizeListener(resizeListener);
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_getRPCStartTime | /**
* Accesses the start time for the currently running RPC on the monitored Handler.
* @return the start timestamp or -1 if there is no RPC currently running.
*/
@Override
public long getRPCStartTime() {
if (getState() != State.RUNNING) {
return -1;
}
return rpcStartTime;
} | 3.68 |
hadoop_S3ClientFactory_withPathStyleAccess | /**
* Set path access option.
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withPathStyleAccess(
final boolean value) {
pathStyleAccess = value;
return this;
} | 3.68 |
AreaShop_TeleportFeature_cannotSpawnBeside | /**
* Check if a player can spawn next to it.
* @param material Material to check (assumed that this is somewhere around the player)
* @return true when it is safe to spawn next to, otherwise false
*/
private static boolean cannotSpawnBeside(Material material) {
String name = material.name();
return name.contains("LAVA")
|| name.contains("CACTUS")
|| name.equals("FIRE")
|| name.contains("MAGMA");
} | 3.68 |
flink_ExecNodeGraphJsonSerializer_validate | /** Check whether the given {@link ExecNodeGraph} is completely legal. */
private static void validate(ExecNodeGraph execGraph) {
ExecNodeVisitor visitor = new ExecNodeGraphValidator();
execGraph.getRootNodes().forEach(visitor::visit);
} | 3.68 |
hbase_MasterObserver_postDisableTable | /**
* Called after the disableTable operation has been requested. Called as part of disable table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void postDisableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
streampipes_SupportedFormats_cborFormat | /**
* Defines that a pipeline element (data processor or data sink) supports processing messaging
* arriving in Cbor format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat cborFormat() {
return new TransportFormat(MessageFormat.CBOR);
} | 3.68 |
flink_KMeans_open | /** Reads the centroid values from a broadcast variable into a collection. */
@Override
public void open(OpenContext openContext) throws Exception {
this.centroids = getRuntimeContext().getBroadcastVariable("centroids");
} | 3.68 |
zxing_MaskUtil_getDataMaskBit | /**
* Return the mask bit for "getMaskPattern" at "x" and "y". See 8.8 of JISX0510:2004 for mask
* pattern conditions.
*/
static boolean getDataMaskBit(int maskPattern, int x, int y) {
int intermediate;
int temp;
switch (maskPattern) {
case 0:
intermediate = (y + x) & 0x1;
break;
case 1:
intermediate = y & 0x1;
break;
case 2:
intermediate = x % 3;
break;
case 3:
intermediate = (y + x) % 3;
break;
case 4:
intermediate = ((y / 2) + (x / 3)) & 0x1;
break;
case 5:
temp = y * x;
intermediate = (temp & 0x1) + (temp % 3);
break;
case 6:
temp = y * x;
intermediate = ((temp & 0x1) + (temp % 3)) & 0x1;
break;
case 7:
temp = y * x;
intermediate = ((temp % 3) + ((y + x) & 0x1)) & 0x1;
break;
default:
throw new IllegalArgumentException("Invalid mask pattern: " + maskPattern);
}
return intermediate == 0;
} | 3.68 |
morf_SchemaUtils_index | /**
* Build an index.
* <p>
* Use the methods on {@link IndexBuilder} to provide columns and optional
* properties.
* </p>
*
* @param name The name of the index.
* @return An {@link IndexBuilder} for the index.
*/
public static IndexBuilder index(String name) {
return new IndexBuilderImpl(name);
} | 3.68 |
framework_LayoutManager_getOuterHeight | /**
* Gets the outer height (including margins, paddings and borders) of the
* given element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* -1 is returned if the element has not been measured. If 0 is returned, it
* might indicate that the element is not attached to the DOM.
* <p>
* The value returned by this method is always rounded up. To get the exact
* outer width, use {@link #getOuterHeightDouble(Element)}
*
* @param element
* the element to get the measured size for
* @return the measured outer height (including margins, paddings and
* borders) of the element in pixels.
*/
public final int getOuterHeight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return (int) Math
.ceil(getMeasuredSize(element, nullSize).getOuterHeight());
} | 3.68 |
flink_CatalogManager_getSchemaResolver | /** Returns a {@link SchemaResolver} for creating {@link ResolvedSchema} from {@link Schema}. */
public SchemaResolver getSchemaResolver() {
return schemaResolver;
} | 3.68 |
flink_InputGateMetrics_refreshAndGetMin | /**
* Iterates over all input channels and collects the minimum number of queued buffers in a
* channel in a best-effort way.
*
* @return minimum number of queued buffers per channel (<tt>0</tt> if no channels exist)
*/
int refreshAndGetMin() {
int min = Integer.MAX_VALUE;
Collection<InputChannel> channels = inputGate.getInputChannels().values();
for (InputChannel channel : channels) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = (RemoteInputChannel) channel;
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
min = Math.min(min, size);
}
}
if (min == Integer.MAX_VALUE) { // in case all channels are local, or the channel collection
// was empty
return 0;
}
return min;
} | 3.68 |
hudi_HoodieTableMetaClient_getArchivePath | /**
* @return path where archived timeline is stored
*/
public String getArchivePath() {
String archiveFolder = tableConfig.getArchivelogFolder();
return getMetaPath() + Path.SEPARATOR + archiveFolder;
} | 3.68 |
hadoop_SinglePendingCommit_getEtags | /** @return ordered list of etags. */
public List<String> getEtags() {
return etags;
} | 3.68 |
hadoop_GetClusterNodeAttributesResponse_newInstance | /**
* Create instance of GetClusterNodeAttributesResponse.
*
* @param attributes Map of Node attributeKey to Type.
* @return GetClusterNodeAttributesResponse.
*/
public static GetClusterNodeAttributesResponse newInstance(
Set<NodeAttributeInfo> attributes) {
GetClusterNodeAttributesResponse response =
Records.newRecord(GetClusterNodeAttributesResponse.class);
response.setNodeAttributes(attributes);
return response;
} | 3.68 |
hudi_StreamSync_getHoodieClientConfig | /**
* Helper to construct Write Client config.
*
* @param schema Schema
*/
private HoodieWriteConfig getHoodieClientConfig(Schema schema) {
final boolean combineBeforeUpsert = true;
final boolean autoCommit = false;
// NOTE: Provided that we're injecting combined properties
// (from {@code props}, including CLI overrides), there's no
// need to explicitly set up some configuration aspects that
// are based on these (for ex Clustering configuration)
HoodieWriteConfig.Builder builder =
HoodieWriteConfig.newBuilder()
.withPath(cfg.targetBasePath)
.combineInput(cfg.filterDupes, combineBeforeUpsert)
.withCompactionConfig(
HoodieCompactionConfig.newBuilder()
.withInlineCompaction(cfg.isInlineCompactionEnabled())
.build()
)
.withPayloadConfig(
HoodiePayloadConfig.newBuilder()
.withPayloadClass(cfg.payloadClassName)
.withPayloadOrderingField(cfg.sourceOrderingField)
.build())
.forTable(cfg.targetTableName)
.withAutoCommit(autoCommit)
.withProps(props);
if (schema != null) {
builder.withSchema(getSchemaForWriteConfig(schema).toString());
}
HoodieWriteConfig config = builder.build();
if (config.writeCommitCallbackOn()) {
// set default value for {@link HoodieWriteCommitKafkaCallbackConfig} if needed.
if (HoodieWriteCommitKafkaCallback.class.getName().equals(config.getCallbackClass())) {
HoodieWriteCommitKafkaCallbackConfig.setCallbackKafkaConfigIfNeeded(config);
}
// set default value for {@link HoodieWriteCommitPulsarCallbackConfig} if needed.
if (HoodieWriteCommitPulsarCallback.class.getName().equals(config.getCallbackClass())) {
HoodieWriteCommitPulsarCallbackConfig.setCallbackPulsarConfigIfNeeded(config);
}
}
HoodieClusteringConfig clusteringConfig = HoodieClusteringConfig.from(props);
// Validate what deltastreamer assumes of write-config to be really safe
ValidationUtils.checkArgument(config.inlineCompactionEnabled() == cfg.isInlineCompactionEnabled(),
String.format("%s should be set to %s", INLINE_COMPACT.key(), cfg.isInlineCompactionEnabled()));
ValidationUtils.checkArgument(config.inlineClusteringEnabled() == clusteringConfig.isInlineClusteringEnabled(),
String.format("%s should be set to %s", INLINE_CLUSTERING.key(), clusteringConfig.isInlineClusteringEnabled()));
ValidationUtils.checkArgument(config.isAsyncClusteringEnabled() == clusteringConfig.isAsyncClusteringEnabled(),
String.format("%s should be set to %s", ASYNC_CLUSTERING_ENABLE.key(), clusteringConfig.isAsyncClusteringEnabled()));
ValidationUtils.checkArgument(!config.shouldAutoCommit(),
String.format("%s should be set to %s", AUTO_COMMIT_ENABLE.key(), autoCommit));
ValidationUtils.checkArgument(config.shouldCombineBeforeInsert() == cfg.filterDupes,
String.format("%s should be set to %s", COMBINE_BEFORE_INSERT.key(), cfg.filterDupes));
ValidationUtils.checkArgument(config.shouldCombineBeforeUpsert(),
String.format("%s should be set to %s", COMBINE_BEFORE_UPSERT.key(), combineBeforeUpsert));
return config;
} | 3.68 |
flink_BinaryInMemorySortBuffer_write | /**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException Thrown, if an error occurred while serializing the record into the
* buffers.
*/
public boolean write(RowData record) throws IOException {
// check whether we need a new memory segment for the sort index
if (!checkNextIndexOffset()) {
return false;
}
// serialize the record into the data buffers
int skip;
try {
skip = this.inputSerializer.serializeToPages(record, this.recordCollector);
} catch (EOFException e) {
return false;
}
final long newOffset = this.recordCollector.getCurrentOffset();
long currOffset = currentDataBufferOffset + skip;
writeIndexAndNormalizedKey(record, currOffset);
this.currentDataBufferOffset = newOffset;
return true;
} | 3.68 |
hadoop_AbfsClient_deleteIdempotencyCheckOp | /**
* Check if the delete request failure is post a retry and if delete failure
* qualifies to be a success response assuming idempotency.
*
* There are below scenarios where delete could be incorrectly deducted as
* success post request retry:
* 1. Target was originally not existing and initial delete request had to be
* re-tried.
* 2. Parallel delete issued from any other store interface rather than
* delete issued from this filesystem instance.
* These are few corner cases and usually returning a success at this stage
* should help the job to continue.
* @param op Delete request REST operation response with non-null HTTP response
* @return REST operation response post idempotency check
*/
public AbfsRestOperation deleteIdempotencyCheckOp(final AbfsRestOperation op) {
Preconditions.checkArgument(op.hasResult(), "Operations has null HTTP response");
if ((op.isARetriedRequest())
&& (op.getResult().getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND)
&& DEFAULT_DELETE_CONSIDERED_IDEMPOTENT) {
// Server has returned HTTP 404, which means path no longer
// exists. Assuming delete result to be idempotent, return success.
final AbfsRestOperation successOp = getAbfsRestOperation(
AbfsRestOperationType.DeletePath,
HTTP_METHOD_DELETE,
op.getUrl(),
op.getRequestHeaders());
successOp.hardSetResult(HttpURLConnection.HTTP_OK);
LOG.debug("Returning success response from delete idempotency logic");
return successOp;
}
return op;
} | 3.68 |
querydsl_MetaDataExporter_setSourceEncoding | /**
* Set the source encoding
*
* @param sourceEncoding
*/
public void setSourceEncoding(String sourceEncoding) {
this.sourceEncoding = sourceEncoding;
} | 3.68 |
hbase_HRegion_waitForFlushes | /**
* Wait for all current flushes of the region to complete
*/
public void waitForFlushes() {
waitForFlushes(0);// Unbound wait
} | 3.68 |
hbase_ProcedureStore_forceUpdate | /**
* Suggest that the upper layer should update the state of some procedures. Ignore this call
* will not effect correctness but performance.
* <p/>
* For a WAL based ProcedureStore implementation, if all the procedures stored in a WAL file
* have been deleted, or updated later in another WAL file, then we can delete the WAL file. If
* there are old procedures in a WAL file which are never deleted or updated, then we can not
* delete the WAL file and this will cause we hold lots of WAL file and slow down the master
* restarts. So here we introduce this method to tell the upper layer that please update the
* states of these procedures so that we can delete the old WAL file.
* @param procIds the id for the procedures
*/
default void forceUpdate(long[] procIds) {
} | 3.68 |
hadoop_OBSFileSystem_getBoundedCopyPartThreadPool | /**
* Return bounded thread pool for copy part.
*
* @return the bounded thread pool for copy part
*/
ThreadPoolExecutor getBoundedCopyPartThreadPool() {
return boundedCopyPartThreadPool;
} | 3.68 |
hadoop_ServiceShutdownHook_register | /**
* Register the service for shutdown with Hadoop's
* {@link ShutdownHookManager}.
* @param priority shutdown hook priority
*/
public synchronized void register(int priority) {
unregister();
ShutdownHookManager.get().addShutdownHook(this, priority);
} | 3.68 |
AreaShop_BuyRegion_setBuyer | /**
* Set the buyer of this region.
* @param buyer The UUID of the player that should be set as buyer
*/
public void setBuyer(UUID buyer) {
if(buyer == null) {
setSetting("buy.buyer", null);
setSetting("buy.buyerName", null);
} else {
setSetting("buy.buyer", buyer.toString());
setSetting("buy.buyerName", Utils.toName(buyer));
}
} | 3.68 |
flink_ClusterEntrypoint_closeClusterComponent | /**
* Close cluster components and deregister the Flink application from the resource management
* system by signalling the {@link ResourceManager}.
*
* @param applicationStatus to terminate the application with
* @param shutdownBehaviour shutdown behaviour
* @param diagnostics additional information about the shut down, can be {@code null}
* @return Future which is completed once the shut down
*/
private CompletableFuture<Void> closeClusterComponent(
ApplicationStatus applicationStatus,
ShutdownBehaviour shutdownBehaviour,
@Nullable String diagnostics) {
synchronized (lock) {
if (clusterComponent != null) {
switch (shutdownBehaviour) {
case GRACEFUL_SHUTDOWN:
return clusterComponent.stopApplication(applicationStatus, diagnostics);
case PROCESS_FAILURE:
default:
return clusterComponent.stopProcess();
}
} else {
return CompletableFuture.completedFuture(null);
}
}
} | 3.68 |
pulsar_TopicsBase_completeLookup | // Release lookup semaphore and add result to redirectAddresses if current broker doesn't own the topic.
private synchronized void completeLookup(Pair<List<String>, Boolean> result, List<String> redirectAddresses,
CompletableFuture<Void> future) {
pulsar().getBrokerService().getLookupRequestSemaphore().release();
// Left is lookup result of secure/insecure address if lookup succeed, Right is address is the owner's address
// or it's a address to redirect lookup.
if (!result.getLeft().isEmpty()) {
if (result.getRight()) {
// If address is for owner of topic partition, add to head and it'll have higher priority
// compare to broker for look redirect.
redirectAddresses.add(0, isRequestHttps() ? result.getLeft().get(1) : result.getLeft().get(0));
} else {
redirectAddresses.add(redirectAddresses.size(), isRequestHttps()
? result.getLeft().get(1) : result.getLeft().get(0));
}
}
future.complete(null);
} | 3.68 |
hadoop_LocalJobOutputFiles_getSpillIndexFile | /**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
String path = String
.format(SPILL_INDEX_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathToRead(path, conf);
} | 3.68 |
hadoop_Cluster_getChildQueues | /**
* Returns immediate children of queueName.
* @param queueName
* @return array of JobQueueInfo which are children of queueName
* @throws IOException
*/
public QueueInfo[] getChildQueues(String queueName)
throws IOException, InterruptedException {
return client.getChildQueues(queueName);
} | 3.68 |
flink_DataStream_rebalance | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are distributed
* evenly to instances of the next operation in a round-robin fashion.
*
* @return The DataStream with rebalance partitioning set.
*/
public DataStream<T> rebalance() {
return setConnectionType(new RebalancePartitioner<T>());
} | 3.68 |
hbase_Bytes_getLength | /** Returns the number of valid bytes in the buffer */
public int getLength() {
if (this.bytes == null) {
throw new IllegalStateException(
"Uninitialiized. Null constructor " + "called w/o accompaying readFields invocation");
}
return this.length;
} | 3.68 |
hudi_HDFSParquetImporterUtils_load | /**
* Imports records to Hoodie table.
*
* @param client Hoodie Client
* @param instantTime Instant Time
* @param hoodieRecords Hoodie Records
* @param <T> Type
*/
public <T extends HoodieRecordPayload> JavaRDD<WriteStatus> load(SparkRDDWriteClient<T> client, String instantTime,
JavaRDD<HoodieRecord<T>> hoodieRecords) {
switch (this.command.toLowerCase()) {
case "upsert": {
return client.upsert(hoodieRecords, instantTime);
}
case "bulkinsert": {
return client.bulkInsert(hoodieRecords, instantTime);
}
default: {
return client.insert(hoodieRecords, instantTime);
}
}
} | 3.68 |
morf_JdbcUrlElements_getHostName | /**
* @return the server host name.
*/
public String getHostName() {
return hostName;
} | 3.68 |
hadoop_BatchedRequests_getApplicationId | /**
* Get Application Id.
* @return Application Id.
*/
public ApplicationId getApplicationId() {
return applicationId;
} | 3.68 |
flink_TieredStorageConfiguration_getTieredStorageBufferSize | /**
* Get the buffer size in tiered storage.
*
* @return the buffer size.
*/
public int getTieredStorageBufferSize() {
return tieredStorageBufferSize;
} | 3.68 |
druid_IPRange_getIPSubnetMask | /**
* Return the encapsulated subnet mask
*
* @return The IP range's subnet mask.
*/
public final IPAddress getIPSubnetMask() {
return ipSubnetMask;
} | 3.68 |
hadoop_StageConfig_getIOStatistics | /**
* IOStatistics to update.
*/
public IOStatisticsStore getIOStatistics() {
return iostatistics;
} | 3.68 |
framework_VAbsoluteLayout_add | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.Panel#add(com.google.gwt.user.client.ui
* .Widget)
*/
@Override
public void add(Widget child) {
AbsoluteWrapper wrapper = new AbsoluteWrapper(child);
wrapper.updateStyleNames();
super.add(wrapper, canvas);
} | 3.68 |
graphhopper_JaroWinkler_distance | /**
* Return 1 - similarity.
*/
public final double distance(final String s1, final String s2) {
return 1.0 - similarity(s1, s2);
} | 3.68 |
zilla_ManyToOneRingBuffer_write | /**
* {@inheritDoc}
*/
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int srcIndex, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
boolean isSuccessful = false;
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY != recordIndex)
{
buffer.putInt(typeOffset(recordIndex), msgTypeId);
buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, srcIndex, length);
buffer.putIntOrdered(lengthOffset(recordIndex), recordLength);
isSuccessful = true;
}
return isSuccessful;
} | 3.68 |
hadoop_S3APrefetchingInputStream_hasCapability | /**
* Indicates whether the given {@code capability} is supported by this stream.
*
* @param capability the capability to check.
* @return true if the given {@code capability} is supported by this stream, false otherwise.
*/
@Override
public boolean hasCapability(String capability) {
if (!isClosed()) {
return inputStream.hasCapability(capability);
}
return false;
} | 3.68 |
framework_VCalendar_isZeroLengthMidnightEvent | /**
* Is the calendar event zero seconds long and is occurring at midnight.
*
* @param event
* The event to check
* @return
*/
public static boolean isZeroLengthMidnightEvent(CalendarEvent event) {
return areDatesEqualToSecond(event.getStartTime(), event.getEndTime())
&& isMidnight(event.getEndTime());
} | 3.68 |
hbase_Table_get | /**
* Extracts specified cells from the given rows, as a batch.
* @param gets The objects that specify what data to fetch and from which rows.
* @return The data coming from the specified rows, if it exists. If the row specified doesn't
* exist, the {@link Result} instance returned won't contain any
* {@link org.apache.hadoop.hbase.Cell}s, as indicated by {@link Result#isEmpty()}. If
* there are any failures even after retries, there will be a <code>null</code> in the
* results' array for those Gets, AND an exception will be thrown. The ordering of the
* Result array corresponds to the order of the list of passed in Gets.
* @throws IOException if a remote or network exception occurs.
* @since 0.90.0
* @apiNote {@link #put(List)} runs pre-flight validations on the input list on client. Currently
* {@link #get(List)} doesn't run any validations on the client-side, currently there is
* no need, but this may change in the future. An {@link IllegalArgumentException} will
* be thrown in this case.
*/
default Result[] get(List<Get> gets) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
framework_CalendarWeekDropHandler_drop | /*
* (non-Javadoc)
*
* @see
* com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#drop(com.vaadin
* .terminal.gwt.client.ui.dd.VDragEvent)
*/
@Override
public boolean drop(VDragEvent drag) {
if (isLocationValid(drag.getElementOver())) {
updateDropDetails(drag);
deEmphasis();
return super.drop(drag);
} else {
deEmphasis();
return false;
}
} | 3.68 |
flink_GenericDataSourceBase_setStatisticsKey | /**
* Sets the key under which statistics about this data source may be obtained from the
* statistics cache. Useful for testing purposes, when providing mock statistics.
*
* @param statisticsKey The key for the statistics object.
*/
public void setStatisticsKey(String statisticsKey) {
this.statisticsKey = statisticsKey;
} | 3.68 |
flink_DeltaIterationBase_getInitialSolutionSet | /**
* Returns the initial solution set input, or null, if none is set.
*
* @return The iteration's initial solution set input.
*/
public Operator getInitialSolutionSet() {
return getFirstInput();
} | 3.68 |
hbase_MasterObserver_postRollBackMergeRegionsAction | /**
* This will be called after the roll back of the regions merge.
* @param ctx the environment to interact with the framework and master
*/
default void postRollBackMergeRegionsAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo[] regionsToMerge)
throws IOException {
} | 3.68 |
framework_AutoScroller_stop | /**
* Stops the automatic scrolling.
*/
public void stop() {
if (handlerRegistration != null) {
handlerRegistration.removeHandler();
handlerRegistration = null;
}
if (autoScroller != null) {
autoScroller.stop();
autoScroller = null;
}
removeNativeHandler();
} | 3.68 |
hadoop_ActiveAuditManagerS3A_set | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void set(final String key, final String value) {
span.set(key, value);
} | 3.68 |
hadoop_SelectBinding_buildCSVOutput | /**
* Build CSV output format for a request.
* @param ownerConf FS owner configuration
* @param builderOptions options on the specific request
* @return the output format
* @throws IllegalArgumentException argument failure
* @throws IOException validation failure
*/
public OutputSerialization buildCSVOutput(
final Configuration ownerConf,
final Configuration builderOptions)
throws IllegalArgumentException, IOException {
String fieldDelimiter = xopt(builderOptions,
ownerConf,
CSV_OUTPUT_FIELD_DELIMITER,
CSV_OUTPUT_FIELD_DELIMITER_DEFAULT);
String recordDelimiter = xopt(builderOptions,
ownerConf,
CSV_OUTPUT_RECORD_DELIMITER,
CSV_OUTPUT_RECORD_DELIMITER_DEFAULT);
String quoteCharacter = xopt(builderOptions,
ownerConf,
CSV_OUTPUT_QUOTE_CHARACTER,
CSV_OUTPUT_QUOTE_CHARACTER_DEFAULT);
String quoteEscapeCharacter = xopt(builderOptions,
ownerConf,
CSV_OUTPUT_QUOTE_ESCAPE_CHARACTER,
CSV_OUTPUT_QUOTE_ESCAPE_CHARACTER_DEFAULT);
String quoteFields = xopt(builderOptions,
ownerConf,
CSV_OUTPUT_QUOTE_FIELDS,
CSV_OUTPUT_QUOTE_FIELDS_ALWAYS).toUpperCase(Locale.ENGLISH);
CSVOutput.Builder csvOutputBuilder = CSVOutput.builder()
.quoteCharacter(quoteCharacter)
.quoteFields(QuoteFields.fromValue(quoteFields))
.fieldDelimiter(fieldDelimiter)
.recordDelimiter(recordDelimiter);
if (!quoteEscapeCharacter.isEmpty()) {
csvOutputBuilder.quoteEscapeCharacter(quoteEscapeCharacter);
}
// output is CSV, always
return OutputSerialization.builder()
.csv(csvOutputBuilder.build())
.build();
} | 3.68 |
framework_FilesystemContainer_getType | /**
* Gets the specified property's data type. "Name" is a <code>String</code>,
* "Size" is a <code>Long</code>, "Last Modified" is a <code>Date</code>. If
* propertyId is not one of those, <code>null</code> is returned.
*
* @param propertyId
* the ID of the property whose type is requested.
* @return data type of the requested property, or <code>null</code>
*/
@Override
public Class<?> getType(Object propertyId) {
if (propertyId.equals(PROPERTY_NAME)) {
return String.class;
}
if (propertyId.equals(PROPERTY_ICON)) {
return Resource.class;
}
if (propertyId.equals(PROPERTY_SIZE)) {
return Long.class;
}
if (propertyId.equals(PROPERTY_LASTMODIFIED)) {
return Date.class;
}
return null;
} | 3.68 |
hadoop_StateStoreSerializableImpl_serialize | /**
* Serialize a record using the serializer.
*
* @param record Record to serialize.
* @param <T> Type of the state store record.
* @return Byte array with the serialization of the record.
*/
protected <T extends BaseRecord> byte[] serialize(T record) {
return serializer.serialize(record);
} | 3.68 |
flink_DataStatistics_cacheBaseStatistics | /**
* Caches the given statistics. They are later retrievable under the given identifier.
*
* @param statistics The statistics to cache.
* @param identifier The identifier which may be later used to retrieve the statistics.
*/
public void cacheBaseStatistics(BaseStatistics statistics, String identifier) {
synchronized (this.baseStatisticsCache) {
this.baseStatisticsCache.put(identifier, statistics);
}
} | 3.68 |
hbase_ChecksumUtil_numBytes | /**
* Returns the number of bytes needed to store the checksums for a specified data size
* @param datasize number of bytes of data
* @param bytesPerChecksum number of bytes in a checksum chunk
* @return The number of bytes needed to store the checksum values
*/
static long numBytes(long datasize, int bytesPerChecksum) {
return numChunks(datasize, bytesPerChecksum) * HFileBlock.CHECKSUM_SIZE;
} | 3.68 |
hadoop_SingleFilePerBlockCache_put | /**
* Puts the given block in this cache.
*
* @param blockNumber the block number, used as a key for blocks map.
* @param buffer buffer contents of the given block to be added to this cache.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @throws IOException if either local dir allocator fails to allocate file or if IO error
* occurs while writing the buffer content to the file.
* @throws IllegalArgumentException if buffer is null, or if buffer.limit() is zero or negative.
*/
@Override
public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
LocalDirAllocator localDirAllocator) throws IOException {
if (closed.get()) {
return;
}
checkNotNull(buffer, "buffer");
if (blocks.containsKey(blockNumber)) {
Entry entry = blocks.get(blockNumber);
entry.takeLock(Entry.LockType.READ);
try {
validateEntry(entry, buffer);
} finally {
entry.releaseLock(Entry.LockType.READ);
}
addToLinkedListHead(entry);
return;
}
Validate.checkPositiveInteger(buffer.limit(), "buffer.limit()");
Path blockFilePath = getCacheFilePath(conf, localDirAllocator);
long size = Files.size(blockFilePath);
if (size != 0) {
String message =
String.format("[%d] temp file already has data. %s (%d)",
blockNumber, blockFilePath, size);
throw new IllegalStateException(message);
}
writeFile(blockFilePath, buffer);
long checksum = BufferData.getChecksum(buffer);
Entry entry = new Entry(blockNumber, blockFilePath, buffer.limit(), checksum);
blocks.put(blockNumber, entry);
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
// the input stream can lead to the removal of the cache file even before blocks is added
// with the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
prefetchingStatistics.blockAddedToFileCache();
addToLinkedListAndEvictIfRequired(entry);
} | 3.68 |
flink_RawFormatFactory_checkFieldType | /** Checks the given field type is supported. */
private static void checkFieldType(LogicalType fieldType) {
if (!supportedTypes.contains(fieldType.getTypeRoot())) {
throw new ValidationException(
String.format(
"The 'raw' format doesn't supports '%s' as column type.",
fieldType.asSummaryString()));
}
} | 3.68 |
querydsl_StringExpression_min | /**
* Create a {@code min(this)} expression
*
* <p>Get the minimum value of this expression (aggregation)</p>
*
* @return min(this)
*/
@Override
public StringExpression min() {
if (min == null) {
min = Expressions.stringOperation(Ops.AggOps.MIN_AGG, mixin);
}
return min;
} | 3.68 |
framework_WebBrowser_isIOS | /**
* Tests if the browser is run in iOS.
*
* @return true if run in iOS false if the user is not using iOS or if no
* information on the browser is present
*/
public boolean isIOS() {
return browserDetails.isIOS();
} | 3.68 |
flink_CopyOnWriteStateMap_stateSnapshot | /**
* Creates a snapshot of this {@link CopyOnWriteStateMap}, to be written in checkpointing. The
* snapshot integrity is protected through copy-on-write from the {@link CopyOnWriteStateMap}.
* Users should call {@link #releaseSnapshot(StateMapSnapshot)} after using the returned object.
*
* @return a snapshot from this {@link CopyOnWriteStateMap}, for checkpointing.
*/
@Nonnull
@Override
public CopyOnWriteStateMapSnapshot<K, N, S> stateSnapshot() {
return new CopyOnWriteStateMapSnapshot<>(this);
} | 3.68 |
flink_DefaultExecutionGraph_deserializeAccumulators | /**
* Deserializes accumulators from a task state update.
*
* <p>This method never throws an exception!
*
* @param state The task execution state from which to deserialize the accumulators.
* @return The deserialized accumulators, of null, if there are no accumulators or an error
* occurred.
*/
private Map<String, Accumulator<?, ?>> deserializeAccumulators(
TaskExecutionStateTransition state) {
AccumulatorSnapshot serializedAccumulators = state.getAccumulators();
if (serializedAccumulators != null) {
try {
return serializedAccumulators.deserializeUserAccumulators(userClassLoader);
} catch (Throwable t) {
// we catch Throwable here to include all form of linking errors that may
// occur if user classes are missing in the classpath
LOG.error("Failed to deserialize final accumulator results.", t);
}
}
return null;
} | 3.68 |
zxing_CameraManager_setTorch | /**
* Convenience method for {@link com.google.zxing.client.android.CaptureActivity}
*
* @param newSetting if {@code true}, light should be turned on if currently off. And vice versa.
*/
public synchronized void setTorch(boolean newSetting) {
OpenCamera theCamera = camera;
if (theCamera != null && newSetting != configManager.getTorchState(theCamera.getCamera())) {
boolean wasAutoFocusManager = autoFocusManager != null;
if (wasAutoFocusManager) {
autoFocusManager.stop();
autoFocusManager = null;
}
configManager.setTorch(theCamera.getCamera(), newSetting);
if (wasAutoFocusManager) {
autoFocusManager = new AutoFocusManager(context, theCamera.getCamera());
autoFocusManager.start();
}
}
} | 3.68 |
framework_VComboBox_filterOptions | /**
* Filters the options at certain page using the given filter.
*
* @param page
* The page to filter
* @param filter
* The filter to apply to the components
*/
public void filterOptions(int page, String filter) {
debug("VComboBox: filterOptions(" + page + ", " + filter + ")");
if (filter.equals(lastFilter) && currentPage == page
&& suggestionPopup.isAttached()) {
// already have the page
dataReceivedHandler.dataReceived();
return;
}
if (!filter.equals(lastFilter)) {
// when filtering, let the server decide the page unless we've
// set the filter to empty and explicitly said that we want to see
// the results starting from page 0.
if (filter.isEmpty() && page != 0) {
// let server decide
page = -1;
} else {
page = 0;
}
}
dataReceivedHandler.startWaitingForFilteringResponse();
connector.requestPage(page, filter);
lastFilter = filter;
// If the data was updated from cache, the page has been updated too, if
// not, update
if (dataReceivedHandler.isWaitingForFilteringResponse()) {
currentPage = page;
}
} | 3.68 |
hmily_HmilyTransaction_registerParticipant | /**
* registerParticipant.
*
* @param hmilyParticipant {@linkplain HmilyParticipant}
*/
public void registerParticipant(final HmilyParticipant hmilyParticipant) {
if (Objects.nonNull(hmilyParticipant)) {
hmilyParticipants.add(hmilyParticipant);
}
} | 3.68 |
flink_QueryableStateClient_getKvState | /**
* Returns a future holding the serialized request result.
*
* @param jobId JobID of the job the queryable state belongs to
* @param queryableStateName Name under which the state is queryable
* @param keyHashCode Integer hash code of the key (result of a call to {@link
* Object#hashCode()}
* @param serializedKeyAndNamespace Serialized key and namespace to query KvState instance with
* @return Future holding the serialized result
*/
private CompletableFuture<KvStateResponse> getKvState(
final JobID jobId,
final String queryableStateName,
final int keyHashCode,
final byte[] serializedKeyAndNamespace) {
LOG.debug("Sending State Request to {}.", remoteAddress);
try {
KvStateRequest request =
new KvStateRequest(
jobId, queryableStateName, keyHashCode, serializedKeyAndNamespace);
return client.sendRequest(remoteAddress, request);
} catch (Exception e) {
LOG.error("Unable to send KVStateRequest: ", e);
return FutureUtils.completedExceptionally(e);
}
} | 3.68 |
flink_FeedbackTransformation_getFeedbackEdges | /** Returns the list of feedback {@code Transformations}. */
public List<Transformation<T>> getFeedbackEdges() {
return feedbackEdges;
} | 3.68 |
hbase_MemStoreCompactorSegmentsIterator_refillKVS | /*
* Refill kev-value set (should be invoked only when KVS is empty) Returns true if KVS is
* non-empty
*/
private boolean refillKVS() {
// if there is nothing expected next in compactingScanner
if (!hasMore) {
return false;
}
// clear previous KVS, first initiated in the constructor
kvs.clear();
for (;;) {
try {
hasMore = compactingScanner.next(kvs, scannerContext);
} catch (IOException e) {
// should not happen as all data are in memory
throw new IllegalStateException(e);
}
if (!kvs.isEmpty()) {
kvsIterator = kvs.iterator();
return true;
} else if (!hasMore) {
return false;
}
}
} | 3.68 |
hudi_SparkRDDWriteClient_initializeMetadataTable | /**
* Initialize the metadata table if needed. Creating the metadata table writer
* will trigger the initial bootstrapping from the data table.
*
* @param inFlightInstantTimestamp - The in-flight action responsible for the metadata table initialization
*/
private void initializeMetadataTable(Option<String> inFlightInstantTimestamp) {
if (!config.isMetadataTableEnabled()) {
return;
}
try (HoodieTableMetadataWriter writer = SparkHoodieBackedTableMetadataWriter.create(context.getHadoopConf().get(), config,
context, inFlightInstantTimestamp)) {
if (writer.isInitialized()) {
writer.performTableServices(inFlightInstantTimestamp);
}
} catch (Exception e) {
throw new HoodieException("Failed to instantiate Metadata table ", e);
}
} | 3.68 |
hadoop_S3ACachingBlockManager_read | /**
* Reads into the given {@code buffer} {@code size} bytes from the underlying file
* starting at {@code startOffset}.
*
* @param buffer the buffer to read data in to.
* @param startOffset the offset at which reading starts.
* @param size the number bytes to read.
* @return number of bytes read.
*/
@Override
public int read(ByteBuffer buffer, long startOffset, int size)
throws IOException {
return this.reader.read(buffer, startOffset, size);
} | 3.68 |
hudi_BucketStreamWriteFunction_bootstrapIndexIfNeed | /**
* Get partition_bucket -> fileID mapping from the existing hudi table.
* This is a required operation for each restart to avoid having duplicate file ids for one bucket.
*/
private void bootstrapIndexIfNeed(String partition) {
if (OptionsResolver.isInsertOverwrite(config)) {
// skips the index loading for insert overwrite operation.
return;
}
if (bucketIndex.containsKey(partition)) {
return;
}
LOG.info(String.format("Loading Hoodie Table %s, with path %s", this.metaClient.getTableConfig().getTableName(),
this.metaClient.getBasePath() + "/" + partition));
// Load existing fileID belongs to this task
Map<Integer, String> bucketToFileIDMap = new HashMap<>();
this.writeClient.getHoodieTable().getHoodieView().getLatestFileSlices(partition).forEach(fileSlice -> {
String fileId = fileSlice.getFileId();
int bucketNumber = BucketIdentifier.bucketIdFromFileId(fileId);
if (isBucketToLoad(bucketNumber, partition)) {
LOG.info(String.format("Should load this partition bucket %s with fileId %s", bucketNumber, fileId));
// Validate that one bucketId has only ONE fileId
if (bucketToFileIDMap.containsKey(bucketNumber)) {
throw new RuntimeException(String.format("Duplicate fileId %s from bucket %s of partition %s found "
+ "during the BucketStreamWriteFunction index bootstrap.", fileId, bucketNumber, partition));
} else {
LOG.info(String.format("Adding fileId %s to the bucket %s of partition %s.", fileId, bucketNumber, partition));
bucketToFileIDMap.put(bucketNumber, fileId);
}
}
});
bucketIndex.put(partition, bucketToFileIDMap);
} | 3.68 |
flink_RowDataUtil_isAccumulateMsg | /**
* Returns true if the message is either {@link RowKind#INSERT} or {@link RowKind#UPDATE_AFTER},
* which refers to an accumulate operation of aggregation.
*/
public static boolean isAccumulateMsg(RowData row) {
RowKind kind = row.getRowKind();
return kind == RowKind.INSERT || kind == RowKind.UPDATE_AFTER;
} | 3.68 |
hudi_MetadataTableUtils_shouldUseBatchLookup | /**
* Whether to use batch lookup for listing the latest base files in metadata table.
* <p>
* Note that metadata table has to be enabled, and the storage type of the file system view
* cannot be EMBEDDED_KV_STORE or SPILLABLE_DISK (these two types are not integrated with
* metadata table, see HUDI-5612).
*
* @param config Write configs.
* @return {@code true} if using batch lookup; {@code false} otherwise.
*/
public static boolean shouldUseBatchLookup(HoodieTableConfig tableConfig, HoodieWriteConfig config) {
FileSystemViewStorageType storageType =
config.getClientSpecifiedViewStorageConfig().getStorageType();
return tableConfig.isMetadataTableAvailable()
&& !FileSystemViewStorageType.EMBEDDED_KV_STORE.equals(storageType)
&& !FileSystemViewStorageType.SPILLABLE_DISK.equals(storageType);
} | 3.68 |
hudi_SparkInternalSchemaConverter_convertDecimalType | /**
* Convert decimal type to other Type.
* Now only support Decimal -> Decimal/String
* TODO: support more types
*/
private static boolean convertDecimalType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
DataType oldType = oldV.dataType();
if (newType instanceof DecimalType || newType instanceof StringType) {
for (int i = 0; i < len; i++) {
if (oldV.isNullAt(i)) {
newV.putNull(i);
continue;
}
Decimal oldDecimal = oldV.getDecimal(i, ((DecimalType) oldType).precision(), ((DecimalType) oldType).scale());
if (newType instanceof DecimalType) {
oldDecimal.changePrecision(((DecimalType) newType).precision(), ((DecimalType) newType).scale());
newV.putDecimal(i, oldDecimal, ((DecimalType) newType).precision());
} else if (newType instanceof StringType) {
newV.putByteArray(i, getUTF8Bytes(oldDecimal.toString()));
}
}
return true;
}
return false;
} | 3.68 |
dubbo_RegistryDirectory_mergeUrl | /**
* Merge url parameters. the order is: override > -D >Consumer > Provider
*
* @param providerUrl
* @return
*/
private URL mergeUrl(URL providerUrl) {
if (providerUrl instanceof ServiceAddressURL) {
providerUrl = overrideWithConfigurator(providerUrl);
} else {
providerUrl = moduleModel
.getApplicationModel()
.getBeanFactory()
.getBean(ClusterUtils.class)
.mergeUrl(providerUrl, queryMap); // Merge the consumer side parameters
providerUrl = overrideWithConfigurator(providerUrl);
providerUrl = providerUrl.addParameter(
Constants.CHECK_KEY,
String.valueOf(
false)); // Do not check whether the connection is successful or not, always create Invoker!
}
// FIXME, kept for mock
if (providerUrl.hasParameter(MOCK_KEY) || providerUrl.getAnyMethodParameter(MOCK_KEY) != null) {
providerUrl = providerUrl.removeParameter(MOCK_KEY);
}
if ((providerUrl.getPath() == null || providerUrl.getPath().length() == 0)
&& DUBBO_PROTOCOL.equals(providerUrl.getProtocol())) { // Compatible version 1.0
// fix by tony.chenl DUBBO-44
String path = directoryUrl.getServiceInterface();
if (path != null) {
int i = path.indexOf('/');
if (i >= 0) {
path = path.substring(i + 1);
}
i = path.lastIndexOf(':');
if (i >= 0) {
path = path.substring(0, i);
}
providerUrl = providerUrl.setPath(path);
}
}
return providerUrl;
} | 3.68 |
hbase_KeyValue_compare | /**
* Compares the Key of a cell -- with fields being more significant in this order: rowkey,
* colfam/qual, timestamp, type, mvcc
*/
@Override
public int compare(final Cell left, final Cell right) {
int compare = CellComparatorImpl.COMPARATOR.compare(left, right);
return compare;
} | 3.68 |
hbase_MetaTableLocator_getMetaRegionState | /**
* Load the meta region state from the meta region server ZNode.
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and
* operation
* @param replicaId the ID of the replica
* @throws KeeperException if a ZooKeeper operation fails
*/
public static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId)
throws KeeperException {
RegionState regionState = null;
try {
byte[] data = ZKUtil.getData(zkw, zkw.getZNodePaths().getZNodeForReplica(replicaId));
regionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
} catch (DeserializationException e) {
throw ZKUtil.convert(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return regionState;
} | 3.68 |
dubbo_AbstractMetadataReport_publishAll | /**
* not private. just for unittest.
*/
void publishAll() {
logger.info("start to publish all metadata.");
this.doHandleMetadataCollection(allMetadataReports);
} | 3.68 |
pulsar_MetadataStoreExtended_getMetadataEventSynchronizer | /**
* Get {@link MetadataEventSynchronizer} to notify and synchronize metadata events.
*
* @return
*/
default Optional<MetadataEventSynchronizer> getMetadataEventSynchronizer() {
return Optional.empty();
} | 3.68 |
flink_ExpressionResolver_getAllResolverRules | /** List of rules that will be applied during expression resolution. */
public static List<ResolverRule> getAllResolverRules() {
return Arrays.asList(
ResolverRules.UNWRAP_API_EXPRESSION,
ResolverRules.LOOKUP_CALL_BY_NAME,
ResolverRules.FLATTEN_STAR_REFERENCE,
ResolverRules.EXPAND_COLUMN_FUNCTIONS,
ResolverRules.OVER_WINDOWS,
ResolverRules.FIELD_RESOLVE,
ResolverRules.QUALIFY_BUILT_IN_FUNCTIONS,
ResolverRules.RESOLVE_SQL_CALL,
ResolverRules.RESOLVE_CALL_BY_ARGUMENTS);
} | 3.68 |
hbase_NettyFutureUtils_safeClose | /**
* Close the channel and eat the returned future by logging the error when the future is completed
* with error.
*/
public static void safeClose(ChannelOutboundInvoker channel) {
consume(channel.close());
} | 3.68 |
flink_RestServerEndpoint_createUploadDir | /** Creates the upload dir if needed. */
static void createUploadDir(
final Path uploadDir, final Logger log, final boolean initialCreation)
throws IOException {
if (!Files.exists(uploadDir)) {
if (initialCreation) {
log.info("Upload directory {} does not exist. ", uploadDir);
} else {
log.warn(
"Upload directory {} has been deleted externally. "
+ "Previously uploaded files are no longer available.",
uploadDir);
}
checkAndCreateUploadDir(uploadDir, log);
}
} | 3.68 |
hbase_AbstractRpcClient_getCompressor | /**
* Encapsulate the ugly casting and RuntimeException conversion in private method.
* @param conf configuration
* @return The compressor to use on this client.
*/
private static CompressionCodec getCompressor(final Configuration conf) {
String className = conf.get("hbase.client.rpc.compressor", null);
if (className == null || className.isEmpty()) {
return null;
}
try {
return Class.forName(className).asSubclass(CompressionCodec.class).getDeclaredConstructor()
.newInstance();
} catch (Exception e) {
throw new RuntimeException("Failed getting compressor " + className, e);
}
} | 3.68 |
MagicPlugin_PreLoadEvent_registerPVPManager | /**
* Register a PVPManager, for controlling whether or not players can harm other players with magic.
*
* @param manager The manager to add.
*/
public void registerPVPManager(PVPManager manager) {
pvpManagers.add(manager);
} | 3.68 |
flink_DefaultCheckpointPlanCalculator_hasActiveUpstreamVertex | /**
* Every task must have active upstream tasks if
*
* <ol>
* <li>ALL_TO_ALL connection and some predecessors are still running.
* <li>POINTWISE connection and all predecessors are still running.
* </ol>
*
* @param distribution The distribution pattern between the upstream vertex and the current
* vertex.
* @param upstreamRunningTasks The running tasks of the upstream vertex.
* @return Whether every task of the current vertex is connected to some active predecessors.
*/
private boolean hasActiveUpstreamVertex(
DistributionPattern distribution, BitSet upstreamRunningTasks) {
return (distribution == DistributionPattern.ALL_TO_ALL
&& upstreamRunningTasks.cardinality() > 0)
|| (distribution == DistributionPattern.POINTWISE
&& upstreamRunningTasks.cardinality() == upstreamRunningTasks.size());
} | 3.68 |
hadoop_StageConfig_withName | /**
* Set name of task/job.
* @param value new value
* @return the builder
*/
public StageConfig withName(String value) {
name = value;
return this;
} | 3.68 |
cron-utils_Preconditions_checkArgument | /**
* Ensures the truth of an expression involving one or more parameters to the calling method.
*
* @param expression a boolean expression
* @param errorMessageTemplate a template for the exception message should the check fail. The
* message is formed by replacing each {@code %s} placeholder in the template with an
* argument. These are matched by position - the first {@code %s} gets {@code
* errorMessageArgs[0]}, etc. Unmatched arguments will be appended to the formatted message
* in square braces. Unmatched placeholders will be left as-is.
* @param errorMessageArgs the arguments to be substituted into the message template. Arguments
* are converted to strings using {@link String#valueOf(Object)}.
* @throws IllegalArgumentException if {@code expression} is false
* @throws NullPointerException if the check fails and either {@code errorMessageTemplate} or
* {@code errorMessageArgs} is null (don't let this happen)
*/
public static void checkArgument(final boolean expression,
final String errorMessageTemplate,
final Object... errorMessageArgs) {
if (!expression) {
throw new IllegalArgumentException(format(errorMessageTemplate, errorMessageArgs));
}
} | 3.68 |
morf_SchemaChangeSequence_addIndex | /**
* @see org.alfasoftware.morf.upgrade.SchemaEditor#addIndex(java.lang.String, org.alfasoftware.morf.metadata.Index)
*/
@Override
public void addIndex(String tableName, Index index) {
AddIndex addIndex = new AddIndex(tableName, index);
visitor.visit(addIndex);
schemaAndDataChangeVisitor.visit(addIndex);
} | 3.68 |
flink_ExtractionUtils_extractMethodParameterNames | /** Extracts the parameter names of a method if possible. */
static @Nullable List<String> extractMethodParameterNames(Method method) {
return extractExecutableNames(method);
} | 3.68 |
hbase_CompactionTool_doMapReduce | /**
* Execute compaction, using a Map-Reduce job.
*/
private int doMapReduce(final FileSystem fs, final Set<Path> toCompactDirs,
final boolean compactOnce, final boolean major) throws Exception {
Configuration conf = getConf();
conf.setBoolean(CONF_COMPACT_ONCE, compactOnce);
conf.setBoolean(CONF_COMPACT_MAJOR, major);
Job job = new Job(conf);
job.setJobName("CompactionTool");
job.setJarByClass(CompactionTool.class);
job.setMapperClass(CompactionMapper.class);
job.setInputFormatClass(CompactionInputFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
job.setMapSpeculativeExecution(false);
job.setNumReduceTasks(0);
// add dependencies (including HBase ones)
TableMapReduceUtil.addDependencyJars(job);
Path stagingDir = JobUtil.getQualifiedStagingDir(conf);
FileSystem stagingFs = stagingDir.getFileSystem(conf);
try {
// Create input file with the store dirs
Path inputPath = new Path(stagingDir, "compact-" + EnvironmentEdgeManager.currentTime());
List<Path> storeDirs =
CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs);
CompactionInputFormat.addInputPath(job, inputPath);
// Initialize credential for secure cluster
TableMapReduceUtil.initCredentials(job);
// Despite the method name this will get delegation token for the filesystem
TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]),
conf);
// Start the MR Job and wait
return job.waitForCompletion(true) ? 0 : 1;
} finally {
fs.delete(stagingDir, true);
}
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_incompatible | /**
* Returns a result that indicates there is no possible way for the new serializer to be
* use-able. This normally indicates that there is no common Java class between what the
* previous bytes can be deserialized into and what can be written by the new serializer.
*
* <p>In this case, there is no possible way for the new serializer to continue to be used, even
* with migration. Recovery of the Flink job will fail.
*
* @return a result that indicates incompatibility between the new and previous serializer.
*/
public static <T> TypeSerializerSchemaCompatibility<T> incompatible() {
return new TypeSerializerSchemaCompatibility<>(Type.INCOMPATIBLE, null);
} | 3.68 |
framework_Image_removeClickListener | /**
* Remove a click listener from the component. The listener should earlier
* have been added using {@link #addClickListener(ClickListener)}.
*
* @param listener
* The listener to remove
*
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #addClickListener(ClickListener)}.
*/
@Deprecated
public void removeClickListener(ClickListener listener) {
removeListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class,
listener);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.