name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_TimelineWriteResponse_getEntityType | /**
* Get the entity type.
*
* @return the entity type
*/
@XmlElement(name = "entitytype")
public String getEntityType() {
return entityType;
} | 3.68 |
hbase_HRegion_applyToMemStore | /**
* @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be
* set; when set we will run operations that make sense in the increment/append
* scenario but that do not make sense otherwise.
*/
private void applyToMemStore(HStore store, List<Cell> cells, boolean delta,
MemStoreSizing memstoreAccounting) {
// Any change in how we update Store/MemStore needs to also be done in other applyToMemStore!!!!
boolean upsert = delta && store.getColumnFamilyDescriptor().getMaxVersions() == 1;
if (upsert) {
store.upsert(cells, getSmallestReadPoint(), memstoreAccounting);
} else {
store.add(cells, memstoreAccounting);
}
} | 3.68 |
hudi_AvroSchemaCompatibility_toString | /**
* {@inheritDoc}
*/
@Override
public String toString() {
return String.format("SchemaPairCompatibility{result:%s, readerSchema:%s, writerSchema:%s, description:%s}",
mResult, mReader, mWriter, mDescription);
} | 3.68 |
framework_VAccordion_updateTabStyleName | /**
* Updates the stack item's style name from the TabState.
*
* @param newStyleName
* the new style name
*/
private void updateTabStyleName(String newStyleName) {
if (newStyleName != null && !newStyleName.isEmpty()) {
if (!newStyleName.equals(styleName)) {
// If we have a new style name
if (styleName != null && !styleName.isEmpty()) {
// Remove old style name if present
removeStyleDependentName(styleName);
}
// Set new style name
addStyleDependentName(newStyleName);
styleName = newStyleName;
}
} else if (styleName != null) {
// Remove the set stylename if no stylename is present in the
// uidl
removeStyleDependentName(styleName);
styleName = null;
}
} | 3.68 |
hadoop_JobID_getJobIDsPattern | /**
* Returns a regex pattern which matches task IDs. Arguments can
* be given null, in which case that part of the regex will be generic.
* For example to obtain a regex matching <i>any job</i>
* run on the jobtracker started at <i>200707121733</i>, we would use :
* <pre>
* JobID.getTaskIDsPattern("200707121733", null);
* </pre>
* which will return :
* <pre> "job_200707121733_[0-9]*" </pre>
* @param jtIdentifier jobTracker identifier, or null
* @param jobId job number, or null
* @return a regex pattern matching JobIDs
*/
@Deprecated
public static String getJobIDsPattern(String jtIdentifier, Integer jobId) {
StringBuilder builder = new StringBuilder(JOB).append(SEPARATOR);
builder.append(getJobIDsPatternWOPrefix(jtIdentifier, jobId));
return builder.toString();
} | 3.68 |
rocketmq-connect_MemoryStateManagementServiceImpl_getAll | /**
* Get the states of all tasks for the given connector.
*
* @param connector the connector name
* @return a map from task ids to their respective status
*/
@Override
public synchronized Collection<TaskStatus> getAll(String connector) {
return new HashSet<>(tasks.row(connector).values());
} | 3.68 |
flink_EnvironmentInformation_getOpenFileHandlesLimit | /**
* Tries to retrieve the maximum number of open file handles. This method will only work on
* UNIX-based operating systems with Sun/Oracle Java versions.
*
* <p>If the number of max open file handles cannot be determined, this method returns {@code
* -1}.
*
* @return The limit of open file handles, or {@code -1}, if the limit could not be determined.
*/
public static long getOpenFileHandlesLimit() {
if (OperatingSystem
.isWindows()) { // getMaxFileDescriptorCount method is not available on Windows
return -1L;
}
Class<?> sunBeanClass;
try {
sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
} catch (ClassNotFoundException e) {
return -1L;
}
try {
Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount");
Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean());
return (Long) result;
} catch (Throwable t) {
LOG.warn("Unexpected error when accessing file handle limit", t);
return -1L;
}
} | 3.68 |
hadoop_OBSCommonUtils_rejectRootDirectoryDelete | /**
* Implements the specific logic to reject root directory deletion. The caller
* must return the result of this call, rather than attempt to continue with
* the delete operation: deleting root directories is never allowed. This
* method simply implements the policy of when to return an exit code versus
* raise an exception.
*
* @param bucket bucket name
* @param isEmptyDir flag indicating if the directory is empty
* @param recursive recursive flag from command
* @return a return code for the operation
* @throws PathIOException if the operation was explicitly rejected.
*/
static boolean rejectRootDirectoryDelete(final String bucket,
final boolean isEmptyDir,
final boolean recursive)
throws IOException {
LOG.info("obs delete the {} root directory of {}", bucket, recursive);
if (isEmptyDir) {
return true;
}
if (recursive) {
return false;
} else {
// reject
throw new PathIOException(bucket, "Cannot delete root path");
}
} | 3.68 |
hadoop_GroupMappingServiceProvider_getGroupsSet | /**
* Get all various group memberships of a given user.
* Returns EMPTY set in case of non-existing user
* @param user User's name
* @return set of group memberships of user
* @throws IOException raised on errors performing I/O.
*/
default Set<String> getGroupsSet(String user) throws IOException {
//Override to form the set directly to avoid another conversion
return new LinkedHashSet<>(getGroups(user));
} | 3.68 |
querydsl_Expressions_constantAs | /**
* Create a {@code source as alias} expression
*
* @param source source
* @param alias alias
* @return source as alias
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static <D> SimpleExpression<D> constantAs(D source, Path<D> alias) {
if (source == null) {
return as((Expression) nullExpression(), alias);
} else {
return as(ConstantImpl.create(source), alias);
}
} | 3.68 |
hadoop_AllocateResponse_updatedNodes | /**
* Set the <code>updatedNodes</code> of the response.
* @see AllocateResponse#setUpdatedNodes(List)
* @param updatedNodes <code>updatedNodes</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder updatedNodes(
List<NodeReport> updatedNodes) {
allocateResponse.setUpdatedNodes(updatedNodes);
return this;
} | 3.68 |
hudi_LSMTimelineWriter_getCandidateFiles | /**
* Returns at most {@code filesBatch} number of source files
* restricted by the gross file size by 1GB.
*/
private List<String> getCandidateFiles(List<HoodieLSMTimelineManifest.LSMFileEntry> files, int filesBatch) throws IOException {
List<String> candidates = new ArrayList<>();
long totalFileLen = 0L;
for (int i = 0; i < filesBatch; i++) {
HoodieLSMTimelineManifest.LSMFileEntry fileEntry = files.get(i);
if (totalFileLen > MAX_FILE_SIZE_IN_BYTES) {
return candidates;
}
// we may also need to consider a single file that is very close to the threshold in size,
// to avoid the write amplification,
// for e.g, two 800MB files compact into a 1.6GB file.
totalFileLen += fileEntry.getFileLen();
candidates.add(fileEntry.getFileName());
}
return candidates;
} | 3.68 |
hbase_ProcedureExecutor_startWorkers | /**
* Start the workers.
*/
public void startWorkers() throws IOException {
if (!running.compareAndSet(false, true)) {
LOG.warn("Already running");
return;
}
// Start the executors. Here we must have the lastProcId set.
LOG.trace("Start workers {}", workerThreads.size());
timeoutExecutor.start();
workerMonitorExecutor.start();
for (WorkerThread worker : workerThreads) {
worker.start();
}
// Internal chores
workerMonitorExecutor.add(new WorkerMonitor());
// Add completed cleaner chore
addChore(new CompletedProcedureCleaner<>(conf, store, procExecutionLock, completed,
nonceKeysToProcIdsMap));
} | 3.68 |
querydsl_JTSGeometryExpression_dimension | /**
* The inherent dimension of this geometric object, which must be less than or equal
* to the coordinate dimension. In non-homogeneous collections, this will return the largest topological
* dimension of the contained objects.
*
* @return dimension
*/
public NumberExpression<Integer> dimension() {
if (dimension == null) {
dimension = Expressions.numberOperation(Integer.class, SpatialOps.DIMENSION, mixin);
}
return dimension;
} | 3.68 |
flink_PythonTableFunctionOperator_isFinishResult | /** The received udtf execution result is a finish message when it is a byte with value 0x00. */
private boolean isFinishResult(byte[] rawUdtfResult, int length) {
return length == 1 && rawUdtfResult[0] == 0x00;
} | 3.68 |
hudi_WaitStrategyFactory_build | /**
* Build WaitStrategy for disruptor
*/
public static WaitStrategy build(String name) {
DisruptorWaitStrategyType strategyType = DisruptorWaitStrategyType.valueOf(name);
switch (strategyType) {
case BLOCKING_WAIT:
return new BlockingWaitStrategy();
case SLEEPING_WAIT:
return new SleepingWaitStrategy();
case YIELDING_WAIT:
return new YieldingWaitStrategy();
case BUSY_SPIN_WAIT:
return new BusySpinWaitStrategy();
default:
throw new HoodieException("Unsupported Executor Type " + name);
}
} | 3.68 |
framework_Escalator_getCalculatedWidth | /**
* Returns the actual width in the DOM.
*
* @return the width in pixels in the DOM. Returns -1 if the column
* needs measuring, but has not been yet measured
*/
public double getCalculatedWidth() {
/*
* This might return an untrue value (e.g. during init/onload),
* since we haven't had a proper chance to actually calculate
* widths yet.
*
* This is fixed during Escalator.onLoad, by the call to
* "measureAndSetWidthIfNeeded", which fixes "everything".
*/
if (!measuringRequested) {
return calculatedWidth;
} else {
return -1;
}
} | 3.68 |
hadoop_LocalJobOutputFiles_getInputFile | /**
* Return a local reduce input file created earlier
*
* @param mapId a map task id
*/
public Path getInputFile(int mapId) throws IOException {
return lDirAlloc.getLocalPathToRead(
String.format(REDUCE_INPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT, Integer.valueOf(mapId)),
conf);
} | 3.68 |
hbase_Timer_updateNanos | /**
* Update the timer with the given duration in nanoseconds
* @param durationNanos the duration of the event in ns
*/
default void updateNanos(long durationNanos) {
update(durationNanos, TimeUnit.NANOSECONDS);
} | 3.68 |
flink_OptimizableHashSet_arraySize | /**
* Returns the least power of two smaller than or equal to 2<sup>30</sup> and larger than or
* equal to <code>Math.ceil( expected / f )</code>.
*
* @param expected the expected number of elements in a hash table.
* @param f the load factor.
* @return the minimum possible size for a backing array.
* @throws IllegalArgumentException if the necessary size is larger than 2<sup>30</sup>.
*/
public static int arraySize(int expected, float f) {
long s = Math.max(2L, nextPowerOfTwo((long) Math.ceil((double) ((float) expected / f))));
if (s > (Integer.MAX_VALUE / 2 + 1)) {
throw new IllegalArgumentException(
"Too large (" + expected + " expected elements with load factor " + f + ")");
} else {
return (int) s;
}
} | 3.68 |
hmily_HmilyAggregationType_isAggregationType | /**
* Is aggregation type.
* @param aggregationType aggregation type
* @return is aggregation type or not
*/
public static boolean isAggregationType(final String aggregationType) {
return Arrays.stream(values()).anyMatch(each -> aggregationType.equalsIgnoreCase(each.name()));
} | 3.68 |
hbase_LruAdaptiveBlockCache_cacheBlock | /**
* Cache the block with the specified name and buffer.
* <p>
* TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache
* sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an
* switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap,
* otherwise the caching size is based on off-heap.
* @param cacheKey block's cache key
* @param buf block buffer
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false);
} | 3.68 |
hbase_KeyValueScanner_getScannerOrder | /**
* Get the order of this KeyValueScanner. This is only relevant for StoreFileScanners. This is
* required for comparing multiple files to find out which one has the latest data.
* StoreFileScanners are ordered from 0 (oldest) to newest in increasing order.
*/
default long getScannerOrder() {
return 0;
} | 3.68 |
hadoop_RmSingleLineParser_aggregateSkyline | /**
* Aggregates different jobs' {@link ResourceSkyline}s within the same
* pipeline together.
*
* @param resourceSkyline newly extracted {@link ResourceSkyline}.
* @param recurrenceId the {@link RecurrenceId} which the resourceSkyline
* belongs to.
* @param skylineRecords a {@link Map} which stores the
* {@link ResourceSkyline}s for all pipelines during this parsing.
*/
private void aggregateSkyline(final ResourceSkyline resourceSkyline,
final RecurrenceId recurrenceId,
final Map<RecurrenceId, List<ResourceSkyline>> skylineRecords) {
List<ResourceSkyline> resourceSkylines = skylineRecords.get(recurrenceId);
if (resourceSkylines == null) {
resourceSkylines = new ArrayList<ResourceSkyline>();
skylineRecords.put(recurrenceId, resourceSkylines);
}
resourceSkylines.add(resourceSkyline);
} | 3.68 |
hbase_MasterObserver_postSetRegionServerQuota | /**
* Called after the quota for the region server is stored.
* @param ctx the environment to interact with the framework and master
* @param regionServer the name of the region server
* @param quotas the resulting quota for the region server
*/
default void postSetRegionServerQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String regionServer, final GlobalQuotaSettings quotas) throws IOException {
} | 3.68 |
framework_BasicEventProvider_eventChange | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventChangeListener
* #eventChange
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventSetChange)
*/
@Override
public void eventChange(EventChangeEvent changeEvent) {
// naive implementation
fireEventSetChange();
} | 3.68 |
flink_SkipListUtils_getLevel | /**
* Returns the level of the node.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
*/
public static int getLevel(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + KEY_META_OFFSET) & BYTE_MASK;
} | 3.68 |
hudi_HoodieAvroUtils_rewriteRecords | /**
* Converts list of {@link GenericRecord} provided into the {@link GenericRecord} adhering to the
* provided {@code newSchema}.
* <p>
* To better understand conversion rules please check {@link #rewriteRecord(GenericRecord, Schema)}
*/
public static List<GenericRecord> rewriteRecords(List<GenericRecord> records, Schema newSchema) {
return records.stream().map(r -> rewriteRecord(r, newSchema)).collect(Collectors.toList());
} | 3.68 |
graphhopper_NodeBasedWitnessPathSearcher_findUpperBound | /**
* Runs or continues a Dijkstra search starting at the startNode and ignoring the ignoreNode given in init().
* If the shortest path is found we return its weight. However, this method also returns early if any path was
* found for which the weight is below or equal to the given acceptedWeight, or the given maximum number of settled
* nodes is exceeded. In these cases the returned weight can be larger than the actual weight of the shortest path.
* In any case we get an upper bound for the real shortest path weight.
*
* @param targetNode the target of the search. if this node is settled we return the weight of the shortest path
* @param acceptedWeight once we find a path with weight smaller than or equal to this we return the weight. the
* returned weight might be larger than the weight of the real shortest path. if there is
* no path with weight smaller than or equal to this we stop the search and return the best
* path we found.
* @param maxSettledNodes once the number of settled nodes exceeds this number we return the currently found best
* weight path. in this case we might not have found a path at all.
* @return the weight of the found path or {@link Double#POSITIVE_INFINITY} if no path was found
*/
public double findUpperBound(int targetNode, double acceptedWeight, int maxSettledNodes) {
// todo: for historic reasons we count the number of settled nodes for each call of this method
// *not* the total number of settled nodes since starting the search (which corresponds
// to the size of the settled part of the shortest path tree). it's probably worthwhile
// to change this in the future.
while (!heap.isEmpty() && settledNodes < maxSettledNodes && heap.peekKey() <= acceptedWeight) {
if (weights[targetNode] <= acceptedWeight)
// we found *a* path to the target node (not necessarily the shortest), and the weight is acceptable, so we stop
return weights[targetNode];
int node = heap.poll();
PrepareGraphEdgeIterator iter = outEdgeExplorer.setBaseNode(node);
while (iter.next()) {
int adjNode = iter.getAdjNode();
if (adjNode == ignoreNode)
continue;
double weight = weights[node] + iter.getWeight();
if (Double.isInfinite(weight))
continue;
double adjWeight = weights[adjNode];
if (adjWeight == Double.POSITIVE_INFINITY) {
weights[adjNode] = weight;
heap.insert(weight, adjNode);
changedNodes.add(adjNode);
} else if (weight < adjWeight) {
weights[adjNode] = weight;
heap.update(weight, adjNode);
}
}
settledNodes++;
if (node == targetNode)
// we have settled the target node, we now know the exact weight of the shortest path and return
return weights[node];
}
return weights[targetNode];
} | 3.68 |
querydsl_BeanPath_createNumber | /**
* Create a new Number path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
protected <A extends Number & Comparable<?>> NumberPath<A> createNumber(String property, Class<? super A> type) {
return add(new NumberPath<A>((Class) type, forProperty(property)));
} | 3.68 |
hadoop_ApplicationRowKey_decode | /*
* (non-Javadoc)
*
* Decodes an application row key of the form
* clusterId!userName!flowName!flowRunId!appId represented in byte format
* and converts it into an ApplicationRowKey object.flowRunId is inverted
* while decoding as it was inverted while encoding.
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#decode(byte[])
*/
@Override
public ApplicationRowKey decode(byte[] rowKey) {
byte[][] rowKeyComponents =
Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
if (rowKeyComponents.length != 5) {
throw new IllegalArgumentException("the row key is not valid for "
+ "an application");
}
String clusterId =
Separator.decode(Bytes.toString(rowKeyComponents[0]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String userId =
Separator.decode(Bytes.toString(rowKeyComponents[1]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String flowName =
Separator.decode(Bytes.toString(rowKeyComponents[2]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
Long flowRunId =
LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
String appId = appIDKeyConverter.decode(rowKeyComponents[4]);
return new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
appId);
} | 3.68 |
graphhopper_BaseGraph_copyProperties | /**
* This method copies the properties of one {@link EdgeIteratorState} to another.
*
* @return the updated iterator the properties where copied to.
*/
EdgeIteratorState copyProperties(EdgeIteratorState from, EdgeIteratorStateImpl to) {
long edgePointer = store.toEdgePointer(to.getEdge());
store.writeFlags(edgePointer, from.getFlags());
// copy the rest with higher level API
to.setDistance(from.getDistance()).
setKeyValues(from.getKeyValues()).
setWayGeometry(from.fetchWayGeometry(FetchMode.PILLAR_ONLY));
return to;
} | 3.68 |
morf_SqlDialect_getSqlForSome | /**
* Converts the some function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForSome(Function function) {
return getSqlForMax(function);
} | 3.68 |
hudi_HoodieConsistentHashingMetadata_getTimestampFromFile | /**
* Get instant time from the hashing metadata filename
* Pattern of the filename: <instant>.HASHING_METADATA_FILE_SUFFIX
*/
public static String getTimestampFromFile(String filename) {
return filename.split("\\.")[0];
} | 3.68 |
hbase_ZKWatcher_interruptedException | /**
* Handles InterruptedExceptions in client calls.
* @param ie the InterruptedException instance thrown
* @throws KeeperException the exception to throw, transformed from the InterruptedException
*/
public void interruptedException(InterruptedException ie) throws KeeperException {
interruptedExceptionNoThrow(ie, true);
// Throw a system error exception to let upper level handle it
KeeperException keeperException = new KeeperException.SystemErrorException();
keeperException.initCause(ie);
throw keeperException;
} | 3.68 |
framework_InfoSection_meta | /*
* (non-Javadoc)
*
* @see com.vaadin.client.debug.internal.Section#meta(com.vaadin.client.
* ApplicationConnection, com.vaadin.client.ValueMap)
*/
@Override
public void meta(ApplicationConnection ac, ValueMap meta) {
} | 3.68 |
framework_GridMultiSelect_setSelectAllCheckBoxVisibility | /**
* Sets the select all checkbox visibility mode.
* <p>
* The default value is {@link SelectAllCheckBoxVisibility#DEFAULT}, which
* means that the checkbox is only visible if the grid's data provider is
* in- memory.
*
* @param visibility
* the visiblity mode to use
* @see SelectAllCheckBoxVisibility
*/
public void setSelectAllCheckBoxVisibility(
SelectAllCheckBoxVisibility visibility) {
model.setSelectAllCheckBoxVisibility(visibility);
} | 3.68 |
pulsar_CmdConsume_run | /**
* Run the consume command.
*
* @return 0 for success, < 0 otherwise
*/
public int run() throws PulsarClientException, IOException {
if (mainOptions.size() != 1) {
throw (new ParameterException("Please provide one and only one topic name."));
}
if (this.subscriptionName == null || this.subscriptionName.isEmpty()) {
throw (new ParameterException("Subscription name is not provided."));
}
if (this.numMessagesToConsume < 0) {
throw (new ParameterException("Number of messages should be zero or positive."));
}
String topic = this.mainOptions.get(0);
if (this.serviceURL.startsWith("ws")) {
return consumeFromWebSocket(topic);
} else {
return consume(topic);
}
} | 3.68 |
framework_VaadinFinderLocatorStrategy_getElementByPathStartingAt | /**
* {@inheritDoc}
*/
@Override
public Element getElementByPathStartingAt(String path, Element root) {
List<Element> elements = getElementsByPathStartingAt(path, root);
if (elements.isEmpty()) {
return null;
}
return elements.get(0);
} | 3.68 |
querydsl_ComparableExpression_loeAny | /**
* Create a {@code this <= any right} expression
*
* @param right rhs of the comparison
* @return this <= any right
*/
public BooleanExpression loeAny(SubQueryExpression<? extends T> right) {
return loe(ExpressionUtils.any(right));
} | 3.68 |
flink_CliFrontend_triggerSavepoint | /** Sends a SavepointTriggerMessage to the job manager. */
private void triggerSavepoint(
ClusterClient<?> clusterClient,
JobID jobId,
String savepointDirectory,
SavepointFormatType formatType,
Duration clientTimeout)
throws FlinkException {
logAndSysout("Triggering savepoint for job " + jobId + '.');
CompletableFuture<String> savepointPathFuture =
clusterClient.triggerSavepoint(jobId, savepointDirectory, formatType);
logAndSysout("Waiting for response...");
try {
final String savepointPath =
savepointPathFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS);
logAndSysout("Savepoint completed. Path: " + savepointPath);
logAndSysout("You can resume your program from this savepoint with the run command.");
} catch (Exception e) {
Throwable cause = ExceptionUtils.stripExecutionException(e);
throw new FlinkException(
"Triggering a savepoint for the job " + jobId + " failed.", cause);
}
} | 3.68 |
hbase_MasterObserver_preLockHeartbeat | /**
* Called before heartbeat to a lock.
* @param ctx the environment to interact with the framework and master
*/
default void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName tn,
String description) throws IOException {
} | 3.68 |
hadoop_StageConfig_withConfiguration | /**
* Set configuration.
* @param value new value
* @return the builder
*/
public StageConfig withConfiguration(Configuration value) {
conf = value;
return this;
} | 3.68 |
hbase_MiniHBaseCluster_getLiveRegionServerThreads | /** Returns List of live region server threads (skips the aborted and the killed) */
public List<JVMClusterUtil.RegionServerThread> getLiveRegionServerThreads() {
return this.hbaseCluster.getLiveRegionServers();
} | 3.68 |
flink_FsJobArchivist_archiveJob | /**
* Writes the given {@link AccessExecutionGraph} to the {@link FileSystem} pointed to by {@link
* JobManagerOptions#ARCHIVE_DIR}.
*
* @param rootPath directory to which the archive should be written to
* @param jobId job id
* @param jsonToArchive collection of json-path pairs to that should be archived
* @return path to where the archive was written, or null if no archive was created
* @throws IOException
*/
public static Path archiveJob(
Path rootPath, JobID jobId, Collection<ArchivedJson> jsonToArchive) throws IOException {
try {
FileSystem fs = rootPath.getFileSystem();
Path path = new Path(rootPath, jobId.toString());
OutputStream out = fs.create(path, FileSystem.WriteMode.NO_OVERWRITE);
try (JsonGenerator gen = jacksonFactory.createGenerator(out, JsonEncoding.UTF8)) {
gen.writeStartObject();
gen.writeArrayFieldStart(ARCHIVE);
for (ArchivedJson archive : jsonToArchive) {
gen.writeStartObject();
gen.writeStringField(PATH, archive.getPath());
gen.writeStringField(JSON, archive.getJson());
gen.writeEndObject();
}
gen.writeEndArray();
gen.writeEndObject();
} catch (Exception e) {
fs.delete(path, false);
throw e;
}
LOG.info("Job {} has been archived at {}.", jobId, path);
return path;
} catch (IOException e) {
LOG.error("Failed to archive job.", e);
throw e;
}
} | 3.68 |
framework_SQLContainer_isModified | /**
* Returns modify state of the container.
*
* @return true if contents of this container have been modified
*/
public boolean isModified() {
return !removedItems.isEmpty() || !addedItems.isEmpty()
|| !modifiedItems.isEmpty();
} | 3.68 |
Activiti_ProcessEngines_getProcessEngineInfo | /**
* Get initialization results. Only info will we available for process engines which were added in the {@link ProcessEngines#init()}. No {@link ProcessEngineInfo} is available for engines which were
* registered programatically.
*/
public static ProcessEngineInfo getProcessEngineInfo(String processEngineName) {
return processEngineInfosByName.get(processEngineName);
} | 3.68 |
flink_SinkFunction_writeWatermark | /**
* Writes the given watermark to the sink. This function is called for every watermark.
*
* <p>This method is intended for advanced sinks that propagate watermarks.
*
* @param watermark The watermark.
* @throws Exception This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
default void writeWatermark(Watermark watermark) throws Exception {} | 3.68 |
hbase_BlockCache_isMetaBlock | /**
* Check if block type is meta or index block
* @param blockType block type of a given HFile block
* @return true if block type is non-data block
*/
default boolean isMetaBlock(BlockType blockType) {
return blockType != null && blockType.getCategory() != BlockType.BlockCategory.DATA;
} | 3.68 |
morf_ConcatenatedField_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser.dispatch(getConcatenationFields());
} | 3.68 |
open-banking-gateway_DatasafeMetadataStorage_getIdValue | /**
* Converts String id value into Entity id
* @param id Entity id
*/
protected Long getIdValue(String id) {
return Long.valueOf(id);
} | 3.68 |
flink_Configuration_removeKey | /**
* Removes given key from the configuration.
*
* @param key key of a config option to remove
* @return true is config has been removed, false otherwise
*/
public boolean removeKey(String key) {
synchronized (this.confData) {
boolean removed = this.confData.remove(key) != null;
removed |= removePrefixMap(confData, key);
return removed;
}
} | 3.68 |
dubbo_AbstractServiceNameMapping_setApplicationModel | // just for test
public void setApplicationModel(ApplicationModel applicationModel) {
this.applicationModel = applicationModel;
} | 3.68 |
pulsar_ResourceUnitRanking_estimateLoadPercentage | /**
* Estimate the load percentage which is the max percentage of all resource usages.
*/
private void estimateLoadPercentage() {
double cpuUsed = this.systemResourceUsage.cpu.usage;
double cpuAllocated = cpuUsageByMsgRate
* (this.allocatedQuota.getMsgRateIn() + this.allocatedQuota.getMsgRateOut());
double cpuPreAllocated = cpuUsageByMsgRate
* (this.preAllocatedQuota.getMsgRateIn() + this.preAllocatedQuota.getMsgRateOut());
this.allocatedLoadPercentageCPU = (this.systemResourceUsage.cpu.limit <= 0) ? 0
: Math.min(100, 100 * cpuAllocated / this.systemResourceUsage.cpu.limit);
this.estimatedLoadPercentageCPU = (this.systemResourceUsage.cpu.limit <= 0) ? 0
: Math.min(100,
100 * (Math.max(cpuUsed, cpuAllocated) + cpuPreAllocated) / this.systemResourceUsage.cpu.limit);
double memUsed = this.systemResourceUsage.memory.usage;
double memAllocated = this.allocatedQuota.getMemory();
double memPreAllocated = this.preAllocatedQuota.getMemory();
this.allocatedLoadPercentageMemory = (this.systemResourceUsage.memory.limit <= 0) ? 0
: Math.min(100, 100 * memAllocated / this.systemResourceUsage.memory.limit);
this.estimatedLoadPercentageMemory = (this.systemResourceUsage.memory.limit <= 0) ? 0
: Math.min(100, 100 * (Math.max(memUsed, memAllocated) + memPreAllocated)
/ this.systemResourceUsage.memory.limit);
double bandwidthInUsed = this.systemResourceUsage.bandwidthIn.usage;
double bandwidthInAllocated = this.allocatedQuota.getBandwidthIn() / KBITS_TO_BYTES;
double bandwidthInPreAllocated = this.preAllocatedQuota.getBandwidthIn() / KBITS_TO_BYTES;
this.allocatedLoadPercentageBandwidthIn = (this.systemResourceUsage.bandwidthIn.limit <= 0) ? 0
: Math.min(100, 100 * bandwidthInAllocated / this.systemResourceUsage.bandwidthIn.limit);
this.estimatedLoadPercentageBandwidthIn = (this.systemResourceUsage.bandwidthIn.limit <= 0) ? 0
: Math.min(100, 100 * (Math.max(bandwidthInUsed, bandwidthInAllocated) + bandwidthInPreAllocated)
/ this.systemResourceUsage.bandwidthIn.limit);
double bandwidthOutUsed = this.systemResourceUsage.bandwidthOut.usage;
double bandwidthOutAllocated = this.allocatedQuota.getBandwidthOut() / KBITS_TO_BYTES;
double bandwidthOutPreAllocated = this.preAllocatedQuota.getBandwidthOut() / KBITS_TO_BYTES;
this.allocatedLoadPercentageBandwidthOut = (this.systemResourceUsage.bandwidthOut.limit <= 0) ? 0
: Math.min(100, 100 * bandwidthOutAllocated / this.systemResourceUsage.bandwidthOut.limit);
this.estimatedLoadPercentageBandwidthOut = (this.systemResourceUsage.bandwidthOut.limit <= 0) ? 0
: Math.min(100, 100 * (Math.max(bandwidthOutUsed, bandwidthOutAllocated) + bandwidthOutPreAllocated)
/ this.systemResourceUsage.bandwidthOut.limit);
double directMemoryUsed = this.systemResourceUsage.directMemory.usage;
this.estimatedLoadPercentageDirectMemory = (this.systemResourceUsage.directMemory.limit <= 0) ? 0
: Math.min(100, 100 * directMemoryUsed / this.systemResourceUsage.directMemory.limit);
this.estimatedLoadPercentage = Math.max(this.estimatedLoadPercentageCPU,
Math.max(this.estimatedLoadPercentageMemory, Math.max(this.estimatedLoadPercentageDirectMemory,
Math.max(this.estimatedLoadPercentageBandwidthIn, this.estimatedLoadPercentageBandwidthOut))));
this.estimatedMessageRate = this.allocatedQuota.getMsgRateIn() + this.allocatedQuota.getMsgRateOut()
+ this.preAllocatedQuota.getMsgRateIn() + this.preAllocatedQuota.getMsgRateOut();
} | 3.68 |
framework_RichTextAreaElement_setValue | /**
* Set value of the field element.
*
* @param chars
* new value of the field
* @since 8.4
*/
public void setValue(CharSequence chars) throws ReadOnlyException {
if (isReadOnly()) {
throw new ReadOnlyException();
}
JavascriptExecutor executor = (JavascriptExecutor) getDriver();
executor.executeScript("var bodyE=arguments[0].contentDocument.body;\n"
+ "bodyE.innerHTML=arguments[1]; \n"
+ "var ev = document.createEvent('HTMLEvents');\n"
+ "ev.initEvent('change', true, false); \n"
+ "bodyE.dispatchEvent(ev);", getEditorIframe(), chars);
} | 3.68 |
framework_VDebugWindow_getTimingTooltip | /**
* Gets a nicely formatted string with timing information suitable for
* display in tooltips.
*
* @param sinceStart
* @param sinceReset
* @return
*/
static String getTimingTooltip(int sinceStart, int sinceReset) {
String title = formatDuration(sinceStart) + " since start";
title += ", " + formatDuration(sinceReset) + " since timer reset";
title += " @ "
+ DateTimeFormat.getFormat("HH:mm:ss.SSS").format(new Date());
return title;
} | 3.68 |
flink_NFACompiler_getTrueFunction | /** @return An true function extended with stop(until) condition if necessary. */
@SuppressWarnings("unchecked")
private IterativeCondition<T> getTrueFunction() {
IterativeCondition<T> trueCondition = BooleanConditions.trueFunction();
if (currentGroupPattern != null && currentGroupPattern.getUntilCondition() != null) {
trueCondition =
extendWithUntilCondition(
trueCondition,
(IterativeCondition<T>) currentGroupPattern.getUntilCondition(),
true);
}
return trueCondition;
} | 3.68 |
hadoop_AbfsOutputStream_hflush | /** Flush out the data in client's user buffer. After the return of
* this call, new readers will see the data.
* @throws IOException if any error occurs
*/
@Override
public void hflush() throws IOException {
if (supportFlush) {
flushInternal(false);
}
} | 3.68 |
morf_SelectStatementBuilder_useIndex | /**
* If supported by the dialect, hints to the database that a particular index should be used
* in the query, but places no obligation on the database to do so.
*
* <p>In general, as with all query plan modification, <strong>do not use this unless you know
* exactly what you are doing</strong>.</p>
*
* <p>As for all query plan modification (see also {@link #optimiseForRowCount(int)}
* and {@link #useImplicitJoinOrder()}): where supported on the target database, these directives
* applied in the SQL in the order they are called on {@link SelectStatement}. This usually
* affects their precedence or relative importance, depending on the platform.</p>
*
* @param table The table whose index to use.
* @param indexName The name of the index to use.
* @return this, for method chaining.
*/
public SelectStatementBuilder useIndex(TableReference table, String indexName) {
this.hints.add(new UseIndex(table, indexName));
return this;
} | 3.68 |
framework_NativeButtonIconAndText_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12780;
} | 3.68 |
framework_ColorPickerTestUI_updateDisplay | // This is called whenever a colorpicker popup is closed
/**
* Update display.
*
* @param fg
* the fg
* @param bg
* the bg
*/
public void updateDisplay(Color fg, Color bg) {
java.awt.Color awtFg = new java.awt.Color(fg.getRed(), fg.getGreen(),
fg.getBlue());
java.awt.Color awtBg = new java.awt.Color(bg.getRed(), bg.getGreen(),
bg.getBlue());
StreamResource.StreamSource imagesource = new MyImageSource(awtFg,
awtBg);
Date now = new Date();
SimpleDateFormat format = new SimpleDateFormat("hhmmss");
StreamResource imageresource = new StreamResource(imagesource,
"myimage" + format.format(now) + ".png");
imageresource.setCacheTime(0);
display.setSource(imageresource);
} | 3.68 |
flink_TaskManagerLocation_getFQDNHostname | /**
* Returns the fully-qualified domain name of the TaskManager provided by {@link
* #hostNameSupplier}.
*
* @return The fully-qualified domain name of the TaskManager.
*/
public String getFQDNHostname() {
return hostNameSupplier.getFqdnHostName();
} | 3.68 |
hbase_MiniHBaseCluster_countServedRegions | /**
* Counts the total numbers of regions being served by the currently online region servers by
* asking each how many regions they have. Does not look at hbase:meta at all. Count includes
* catalog tables.
* @return number of regions being served by all region servers
*/
public long countServedRegions() {
long count = 0;
for (JVMClusterUtil.RegionServerThread rst : getLiveRegionServerThreads()) {
count += rst.getRegionServer().getNumberOfOnlineRegions();
}
return count;
} | 3.68 |
flink_CustomSinkOperatorUidHashes_setWriterUidHash | /**
* Sets the uid hash of the writer operator used to recover state.
*
* @param writerUidHash uid hash denoting writer operator
* @return {@link SinkOperatorUidHashesBuilder}
*/
public SinkOperatorUidHashesBuilder setWriterUidHash(String writerUidHash) {
this.writerUidHash = writerUidHash;
return this;
} | 3.68 |
framework_Navigator_parseStateParameterMap | /**
* Parses the state parameter to a map using the given separator string.
*
* @param separator
* the string (typically one character) used to separate values
* from each other
* @return The navigation state as Map<String, String>.
* @since 8.1
*/
protected Map<String, String> parseStateParameterMap(String separator) {
if (getState() == null || getState().isEmpty()) {
return Collections.emptyMap();
}
String state = getState();
int viewSeparatorLocation = state.indexOf(DEFAULT_VIEW_SEPARATOR);
String parameterString;
if (viewSeparatorLocation == -1) {
parameterString = "";
} else {
parameterString = state.substring(viewSeparatorLocation + 1,
state.length());
}
return parseParameterStringToMap(parameterString, separator);
} | 3.68 |
hbase_ChecksumUtil_verifyChunkedSums | /**
* Like the hadoop's {@link DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String, long)},
* this method will also verify checksum of each chunk in data. the difference is: this method can
* accept {@link ByteBuff} as arguments, we can not add it in hadoop-common so defined here.
* @param dataChecksum to calculate the checksum.
* @param data as the input
* @param checksums to compare
* @param pathName indicate that the data is read from which file.
* @return a flag indicate the checksum match or mismatch.
* @see org.apache.hadoop.util.DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String,
* long)
*/
private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff data,
ByteBuff checksums, String pathName) {
// Almost all of the HFile Block are about 64KB, and it would be a SingleByteBuff, use the
// Hadoop's verify checksum directly, because it'll use the native checksum, which has no extra
// byte[] allocation or copying. (HBASE-21917)
if (data instanceof SingleByteBuff && checksums instanceof SingleByteBuff) {
// the checksums ByteBuff must also be an SingleByteBuff because it's duplicated from data.
ByteBuffer dataBB = (ByteBuffer) (data.nioByteBuffers()[0]).duplicate()
.position(data.position()).limit(data.limit());
ByteBuffer checksumBB = (ByteBuffer) (checksums.nioByteBuffers()[0]).duplicate()
.position(checksums.position()).limit(checksums.limit());
try {
dataChecksum.verifyChunkedSums(dataBB, checksumBB, pathName, 0);
return true;
} catch (ChecksumException e) {
return false;
}
}
// If the block is a MultiByteBuff. we use a small byte[] to update the checksum many times for
// reducing GC pressure. it's a rare case.
int checksumTypeSize = dataChecksum.getChecksumType().size;
if (checksumTypeSize == 0) {
return true;
}
// we have 5 checksum type now: NULL,DEFAULT,MIXED,CRC32,CRC32C. the former three need 0 byte,
// and the other two need 4 bytes.
assert checksumTypeSize == 4;
int bytesPerChecksum = dataChecksum.getBytesPerChecksum();
int startDataPos = data.position();
data.mark();
checksums.mark();
try {
// allocate an small buffer for reducing young GC (HBASE-21917), and copy 256 bytes from
// ByteBuff to update the checksum each time. if we upgrade to an future JDK and hadoop
// version which support DataCheckSum#update(ByteBuffer), we won't need to update the checksum
// multiple times then.
byte[] buf = new byte[CHECKSUM_BUF_SIZE];
byte[] sum = new byte[checksumTypeSize];
while (data.remaining() > 0) {
int n = Math.min(data.remaining(), bytesPerChecksum);
checksums.get(sum);
dataChecksum.reset();
for (int remain = n, len; remain > 0; remain -= len) {
// Copy 256 bytes from ByteBuff to update the checksum each time, if the remaining
// bytes is less than 256, then just update the remaining bytes.
len = Math.min(CHECKSUM_BUF_SIZE, remain);
data.get(buf, 0, len);
dataChecksum.update(buf, 0, len);
}
int calculated = (int) dataChecksum.getValue();
int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000)
| (sum[2] << 8 & 0xff00) | (sum[3] & 0xff);
if (calculated != stored) {
if (LOG.isTraceEnabled()) {
long errPos = data.position() - startDataPos - n;
LOG.trace("Checksum error: {} at {} expected: {} got: {}", pathName, errPos, stored,
calculated);
}
return false;
}
}
} finally {
data.reset();
checksums.reset();
}
return true;
} | 3.68 |
framework_BrowserWindowOpener_setResource | /**
* Sets the provided {@code resource} for this instance. The
* {@code resource} will be opened in a new browser window/tab when the
* extended component is clicked.
*
* @since 7.4
*
* @param resource
* resource to open
*/
public void setResource(Resource resource) {
setResource(BrowserWindowOpenerState.locationResource, resource);
} | 3.68 |
hbase_MiniBatchOperationInProgress_getOperation | /** Returns The operation(Mutation) at the specified position. */
public T getOperation(int index) {
return operations[getAbsoluteIndex(index)];
} | 3.68 |
hadoop_MultipleInputs_getInputFormatMap | /**
* Retrieves a map of {@link Path}s to the {@link InputFormat} class
* that should be used for them.
*
* @param conf The confuration of the job
* @see #addInputPath(JobConf, Path, Class)
* @return A map of paths to inputformats for the job
*/
static Map<Path, InputFormat> getInputFormatMap(JobConf conf) {
Map<Path, InputFormat> m = new HashMap<Path, InputFormat>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.formats").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
InputFormat inputFormat;
try {
inputFormat = (InputFormat) ReflectionUtils.newInstance(conf
.getClassByName(split[1]), conf);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), inputFormat);
}
return m;
} | 3.68 |
hadoop_RoleModel_resource | /**
* Given a path, return the S3 resource to it.
* If {@code isDirectory} is true, a "/" is added to the path.
* This is critical when adding wildcard permissions under
* a directory, and also needed when locking down dir-as-file
* and dir-as-directory-marker access.
* @param path a path
* @param isDirectory is this a directory?
* @param addWildcard add a * to the tail of the key?
* @return a resource for a statement.
*/
public static String resource(Path path,
final boolean isDirectory,
boolean addWildcard) {
String key = pathToKey(path);
if (isDirectory && !key.isEmpty()) {
key = key + "/";
}
return resource(path.toUri().getHost(), key, addWildcard);
} | 3.68 |
framework_XhrConnection_setPayload | /**
* Sets the payload which was sent to the server.
*
* @param payload
* the payload which was sent to the server
*/
public void setPayload(JsonObject payload) {
this.payload = payload;
} | 3.68 |
flink_TypeInferenceUtil_adaptArguments | /**
* Adapts the call's argument if necessary.
*
* <p>This includes casts that need to be inserted, reordering of arguments (*), or insertion of
* default values (*) where (*) is future work.
*/
public static CallContext adaptArguments(
TypeInference typeInference, CallContext callContext, @Nullable DataType outputType) {
return adaptArguments(typeInference, callContext, outputType, true);
} | 3.68 |
flink_SortOperationFactory_createSort | /**
* Creates a valid {@link SortQueryOperation}.
*
* <p><b>NOTE:</b> If the collation is not explicitly specified for an expression, the
* expression is wrapped in a default ascending order. If no expression is specified, the result
* is not sorted but only limited.
*
* @param orders expressions describing order
* @param child relational expression on top of which to apply the sort operation
* @param postResolverFactory factory for creating resolved expressions
* @return valid sort operation
*/
QueryOperation createSort(
List<ResolvedExpression> orders,
QueryOperation child,
PostResolverFactory postResolverFactory) {
final OrderWrapper orderWrapper = new OrderWrapper(postResolverFactory);
List<ResolvedExpression> convertedOrders =
orders.stream().map(f -> f.accept(orderWrapper)).collect(Collectors.toList());
return new SortQueryOperation(convertedOrders, child);
} | 3.68 |
flink_HybridShuffleConfiguration_getNumRetainedInMemoryRegionsMax | /** Max number of hybrid retained regions in memory. */
public long getNumRetainedInMemoryRegionsMax() {
return numRetainedInMemoryRegionsMax;
} | 3.68 |
morf_H2Dialect_getColumnRepresentation | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getColumnRepresentation(org.alfasoftware.morf.metadata.DataType,
* int, int)
*/
@Override
protected String getColumnRepresentation(DataType dataType, int width, int scale) {
switch (dataType) {
case STRING:
return width == 0 ? "VARCHAR" : String.format("VARCHAR(%d)", width);
case DECIMAL:
return width == 0 ? "DECIMAL" : String.format("DECIMAL(%d,%d)", width, scale);
case DATE:
return "DATE";
case BOOLEAN:
return "BIT";
case BIG_INTEGER:
return "BIGINT";
case INTEGER:
return "INTEGER";
case BLOB:
return "LONGVARBINARY";
case CLOB:
return "NCLOB";
default:
throw new UnsupportedOperationException("Cannot map column with type [" + dataType + "]");
}
} | 3.68 |
hbase_Client_executeURI | /**
* Execute a transaction method given a complete URI.
* @param method the transaction method
* @param headers HTTP header values to send
* @param uri a properly urlencoded URI
* @return the HTTP response code
*/
public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri)
throws IOException {
// method.setURI(new URI(uri, true));
for (Map.Entry<String, String> e : extraHeaders.entrySet()) {
method.addHeader(e.getKey(), e.getValue());
}
if (headers != null) {
for (Header header : headers) {
method.addHeader(header);
}
}
long startTime = EnvironmentEdgeManager.currentTime();
if (resp != null) EntityUtils.consumeQuietly(resp.getEntity());
resp = httpClient.execute(method);
if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
// Authentication error
LOG.debug("Performing negotiation with the server.");
negotiate(method, uri);
resp = httpClient.execute(method);
}
long endTime = EnvironmentEdgeManager.currentTime();
if (LOG.isTraceEnabled()) {
LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " "
+ resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms");
}
return resp;
} | 3.68 |
hbase_ProcedureExecutor_executeRollback | /**
* Execute the rollback of the procedure step. It updates the store with the new state (stack
* index) or will remove completly the procedure in case it is a child.
*/
private LockState executeRollback(Procedure<TEnvironment> proc) {
try {
proc.doRollback(getEnvironment());
} catch (IOException e) {
LOG.debug("Roll back attempt failed for {}", proc, e);
return LockState.LOCK_YIELD_WAIT;
} catch (InterruptedException e) {
handleInterruptedException(proc, e);
return LockState.LOCK_YIELD_WAIT;
} catch (Throwable e) {
// Catch NullPointerExceptions or similar errors...
LOG.error(HBaseMarkers.FATAL, "CODE-BUG: Uncaught runtime exception for " + proc, e);
}
// allows to kill the executor before something is stored to the wal.
// useful to test the procedure recovery.
if (testing != null && testing.shouldKillBeforeStoreUpdate()) {
String msg = "TESTING: Kill before store update";
LOG.debug(msg);
stop();
throw new RuntimeException(msg);
}
cleanupAfterRollbackOneStep(proc);
return LockState.LOCK_ACQUIRED;
} | 3.68 |
hbase_MasterFileSystem_checkRootDir | /**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @return hbase.rootdir (after checks for existence and bootstrapping if needed populating the
* directory with necessary bootup files).
*/
private void checkRootDir(final Path rd, final Configuration c, final FileSystem fs)
throws IOException {
int threadWakeFrequency = c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, threadWakeFrequency);
// Filesystem is good. Go ahead and check for hbase.rootdir.
FileStatus status;
try {
status = fs.getFileStatus(rd);
} catch (FileNotFoundException e) {
status = null;
}
int versionFileWriteAttempts = c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
try {
if (status == null) {
if (!fs.mkdirs(rd)) {
throw new IOException("Can not create configured '" + HConstants.HBASE_DIR + "' " + rd);
}
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, threadWakeFrequency, versionFileWriteAttempts);
} else {
if (!status.isDirectory()) {
throw new IllegalArgumentException(
"Configured '" + HConstants.HBASE_DIR + "' " + rd + " is not a directory.");
}
// as above
FSUtils.checkVersion(fs, rd, true, threadWakeFrequency, versionFileWriteAttempts);
}
} catch (DeserializationException de) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}",
HConstants.HBASE_DIR, rd, de);
throw new IOException(de);
} catch (IllegalArgumentException iae) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for '{}' {}",
HConstants.HBASE_DIR, rd, iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, threadWakeFrequency)) {
FSUtils.setClusterId(fs, rd, new ClusterId(), threadWakeFrequency);
}
clusterId = FSUtils.getClusterId(fs, rd);
} | 3.68 |
pulsar_ManagedCursorImpl_trySetStateToClosing | /**
* Try set {@link #state} to {@link State#Closing}.
* @return false if the {@link #state} already is {@link State#Closing} or {@link State#Closed}.
*/
private boolean trySetStateToClosing() {
final AtomicBoolean notClosing = new AtomicBoolean(false);
STATE_UPDATER.updateAndGet(this, state -> {
switch (state){
case Closing:
case Closed: {
notClosing.set(false);
return state;
}
default: {
notClosing.set(true);
return State.Closing;
}
}
});
return notClosing.get();
} | 3.68 |
dubbo_AbstractConfig_computeAttributedMethods | /**
* compute attributed getter methods, subclass can override this method to add/remove attributed methods
*
* @return
*/
protected List<Method> computeAttributedMethods() {
Class<? extends AbstractConfig> cls = this.getClass();
BeanInfo beanInfo = getBeanInfo(cls);
List<Method> methods = new ArrayList<>(beanInfo.getMethodDescriptors().length);
for (MethodDescriptor methodDescriptor : beanInfo.getMethodDescriptors()) {
Method method = methodDescriptor.getMethod();
if (MethodUtils.isGetter(method) || isParametersGetter(method)) {
// filter non attribute
Parameter parameter = method.getAnnotation(Parameter.class);
if (parameter != null && !parameter.attribute()) {
continue;
}
String propertyName = calculateAttributeFromGetter(method.getName());
// filter non-writable property, exclude non property methods, fix #4225
if (!isWritableProperty(beanInfo, propertyName)) {
continue;
}
methods.add(method);
}
}
return methods;
} | 3.68 |
hbase_MetricsConnection_incrMetaCacheHit | /** Increment the number of meta cache hits. */
public void incrMetaCacheHit() {
metaCacheHits.inc();
} | 3.68 |
flink_IOUtils_copyBytes | /**
* Copies from one stream to another.
*
* @param in InputStream to read from
* @param out OutputStream to write to
* @param close whether or not close the InputStream and OutputStream at the end. The streams
* are closed in the finally clause.
* @throws IOException thrown if an I/O error occurs while copying
*/
public static void copyBytes(final InputStream in, final OutputStream out, final boolean close)
throws IOException {
copyBytes(in, out, BLOCKSIZE, close);
} | 3.68 |
flink_MetricGroup_histogram | /**
* Registers a new {@link Histogram} with Flink.
*
* @param name name of the histogram
* @param histogram histogram to register
* @param <H> histogram type
* @return the registered histogram
*/
default <H extends Histogram> H histogram(int name, H histogram) {
return histogram(String.valueOf(name), histogram);
} | 3.68 |
hadoop_FederationStateStoreFacade_getPolicyConfiguration | /**
* Returns the {@link SubClusterPolicyConfiguration} for the specified queue.
*
* @param queue the queue whose policy is required
* @return the corresponding configured policy, or {@code null} if there is no
* mapping for the queue
* @throws YarnException if the call to the state store is unsuccessful
*/
public SubClusterPolicyConfiguration getPolicyConfiguration(final String queue)
throws YarnException {
if (federationCache.isCachingEnabled()) {
return getPoliciesConfigurations().get(queue);
} else {
GetSubClusterPolicyConfigurationRequest request =
GetSubClusterPolicyConfigurationRequest.newInstance(queue);
GetSubClusterPolicyConfigurationResponse response =
stateStore.getPolicyConfiguration(request);
if (response == null) {
return null;
} else {
return response.getPolicyConfiguration();
}
}
} | 3.68 |
morf_OracleDialect_getSqlForOrderByFieldNullValueHandling | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForOrderByFieldNullValueHandling(org.alfasoftware.morf.sql.element.FieldReference)
*/
@Override
protected String getSqlForOrderByFieldNullValueHandling(FieldReference orderByField) {
if (orderByField.getNullValueHandling().isPresent()) {
switch (orderByField.getNullValueHandling().get()) {
case FIRST:
return " " + NULLS_FIRST;
case LAST:
return " " + NULLS_LAST;
case NONE:
default:
return "";
}
} else if (orderByField.getDirection() != null) {
return ASCENDING.equals(orderByField.getDirection()) ? " " + NULLS_FIRST : " " + NULLS_LAST;
} else {
return " " + defaultNullOrder();
}
} | 3.68 |
framework_AbstractRemoteDataSource_onDropFromCache | /**
* A hook that can be overridden to do something whenever a row has been
* dropped from the cache. DataSource no longer has anything in the given
* index.
*
* @since 7.6
* @param rowIndex
* the index of the dropped row
* @param removed
* the removed row object
*/
protected void onDropFromCache(int rowIndex, T removed) {
// Call old version as a fallback (someone might have used it)
onDropFromCache(rowIndex);
} | 3.68 |
zxing_BitMatrix_toString | /**
* @param setString representation of a set bit
* @param unsetString representation of an unset bit
* @param lineSeparator newline character in string representation
* @return string representation of entire matrix utilizing given strings and line separator
* @deprecated call {@link #toString(String,String)} only, which uses \n line separator always
*/
@Deprecated
public String toString(String setString, String unsetString, String lineSeparator) {
return buildToString(setString, unsetString, lineSeparator);
} | 3.68 |
hmily_HmilyTransactionHolder_remove | /**
* clean threadLocal help gc.
*/
public void remove() {
CURRENT.remove();
} | 3.68 |
hbase_Query_getAuthorizations | /** Returns The authorizations this Query is associated with. n */
public Authorizations getAuthorizations() throws DeserializationException {
byte[] authorizationsBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY);
if (authorizationsBytes == null) return null;
return ProtobufUtil.toAuthorizations(authorizationsBytes);
} | 3.68 |
morf_Cast_getDataType | /**
* @return the dataType
*/
public DataType getDataType() {
return dataType;
} | 3.68 |
hudi_HoodieFlinkWriteClient_initMetadataTable | /**
* Initialized the metadata table on start up, should only be called once on driver.
*/
public void initMetadataTable() {
((HoodieFlinkTableServiceClient<T>) tableServiceClient).initMetadataTable();
} | 3.68 |
framework_Table_removeColumnResizeListener | /**
* Removes a column resize listener from the Table.
*
* @param listener
* The listener to remove
*/
public void removeColumnResizeListener(ColumnResizeListener listener) {
removeListener(TableConstants.COLUMN_RESIZE_EVENT_ID,
ColumnResizeEvent.class, listener);
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_resumeConnector | /**
* resume connector
*
* @param connectorName
*/
@Override
public void resumeConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException("Connector [" + connectorName + "] does not exist");
}
Struct connectTargetState = new Struct(TARGET_STATE_V0);
connectTargetState.put(FIELD_STATE, TargetState.STARTED.name());
connectTargetState.put(FIELD_EPOCH, System.currentTimeMillis());
byte[] serializedTargetState = converter.fromConnectData(topic, TARGET_STATE_V0, connectTargetState);
log.debug("Writing target state {} for connector {}", TargetState.STARTED.name(), connectorName);
notify(TARGET_STATE_KEY(connectorName), serializedTargetState);
} | 3.68 |
flink_MetricListener_getMetricGroup | /**
* Get the root metric group of this listener. Note that only metrics and groups registered
* under this group will be listened.
*
* @return Root metric group
*/
public MetricGroup getMetricGroup() {
return this.rootMetricGroup;
} | 3.68 |
flink_MultipleParameterTool_getMultiParameter | /**
* Returns the Collection of String values for the given key. If the key does not exist it will
* return null.
*/
public Collection<String> getMultiParameter(String key) {
addToDefaults(key, null);
unrequestedParameters.remove(key);
return data.getOrDefault(key, null);
} | 3.68 |
hadoop_ResourceRequest_numContainers | /**
* Set the <code>numContainers</code> of the request.
* @see ResourceRequest#setNumContainers(int)
* @param numContainers <code>numContainers</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder numContainers(int numContainers) {
resourceRequest.setNumContainers(numContainers);
return this;
} | 3.68 |
morf_RemoveIndex_isApplied | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
if (!schema.tableExists(tableName))
return false;
Table table = schema.getTable(tableName);
SchemaHomology homology = new SchemaHomology();
for (Index index : table.indexes()) {
if (homology.indexesMatch(index, indexToBeRemoved)) {
return false;
}
}
return true;
} | 3.68 |
hmily_PropertyName_getLastElement | /**
* Gets last element.
*
* @return the last element
*/
public String getLastElement() {
int elementSize = getElementSize();
return elementSize != 0 ? getElement(elementSize - 1) : "";
} | 3.68 |
hudi_SparkRecordMergingUtils_getCachedFieldIdToFieldMapping | /**
* @param avroSchema Avro schema.
* @return The field ID to {@link StructField} instance mapping.
*/
public static Map<Integer, StructField> getCachedFieldIdToFieldMapping(Schema avroSchema) {
return FIELD_ID_TO_FIELD_MAPPING_CACHE.computeIfAbsent(avroSchema, schema -> {
StructType structType = HoodieInternalRowUtils.getCachedSchema(schema);
Map<Integer, StructField> schemaFieldIdMapping = new HashMap<>();
int fieldId = 0;
for (StructField field : structType.fields()) {
schemaFieldIdMapping.put(fieldId, field);
fieldId++;
}
return schemaFieldIdMapping;
});
} | 3.68 |
hbase_EventHandler_handleException | /**
* Event exception handler, may be overridden
* @param t Throwable object
*/
protected void handleException(Throwable t) {
String msg = "Caught throwable while processing event " + eventType;
LOG.error(msg, t);
if (server != null && (t instanceof Error || t instanceof RuntimeException)) {
server.abort(msg, t);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.