name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Log4jUtils_disableZkAndClientLoggers | /**
* Disables Zk- and HBase client logging
*/
public static void disableZkAndClientLoggers() {
// disable zookeeper log to avoid it mess up command output
setLogLevel("org.apache.zookeeper", "OFF");
// disable hbase zookeeper tool log to avoid it mess up command output
setLogLevel("org.apache.hadoop.hbase.zookeeper", "OFF");
// disable hbase client log to avoid it mess up command output
setLogLevel("org.apache.hadoop.hbase.client", "OFF");
} | 3.68 |
morf_SelectStatement_optimiseForRowCount | /**
* If supported by the dialect, requests that the database arranges the query plan such that the
* first <code>rowCount</code> rows are returned as fast as possible, even if the statistics
* indicate that this means the full result set will take longer to be returned.
*
* <p>Note that this is <em>not</em> to be confused with directives which limit the number of
* rows to be returned. It does not limit the number of rows, only optimises the query plan
* to target returning that number as fast as possible.</p>
*
* <p>In general, as with all query plan modification, <strong>do not use this unless you know
* exactly what you are doing</strong>. This is designed to support the case where the
* {@link SelectStatement} is actually a driver query, and for each record, other processing
* is being performed. Since the processing as a whole may involve not just the database
* but also the application server cluster, secondary databases or external systems, we are
* making the educated guess that by trying to get processing running in parallel across
* all these systems as soon as possible, we will achieve a higher overall throughput than
* having these other systems waiting around until the database has finished doing
* preparatory work to try and optimise its own throughput.</p>
*
* <p>As for all query plan modification (see also {@link #useIndex(TableReference, String)}
* and {@link #useImplicitJoinOrder()}): where supported on the target database, these directives
* applied in the SQL in the order they are called on {@link SelectStatement}. This usually
* affects their precedence or relative importance, depending on the platform.</p>
*
* @param rowCount The number of rows for which to optimise the query plan.
* @return a new select statement with the change applied.
*/
public SelectStatement optimiseForRowCount(int rowCount) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.optimiseForRowCount(rowCount),
() -> this.hints.add(new OptimiseForRowCount(rowCount))
);
} | 3.68 |
hadoop_User_getName | /**
* Get the full name of the user.
*/
@Override
public String getName() {
return fullName;
} | 3.68 |
flink_RouteResult_param | /**
* Extracts the param in {@code pathParams} first, then falls back to the first matching param
* in {@code queryParams}.
*
* @return {@code null} if there's no match
*/
public String param(String name) {
String pathValue = pathParams.get(name);
return (pathValue == null) ? queryParam(name) : pathValue;
} | 3.68 |
hadoop_BufferedIOStatisticsOutputStream_getIOStatistics | /**
* Ask the inner stream for their IOStatistics.
* @return any IOStatistics offered by the inner stream.
*/
@Override
public IOStatistics getIOStatistics() {
return retrieveIOStatistics(out);
} | 3.68 |
hbase_GroupingTableMapper_setConf | /**
* Sets the configuration. This is used to set up the grouping details.
* @param configuration The configuration to set.
* @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration)
*/
@Override
public void setConf(Configuration configuration) {
this.conf = configuration;
String[] cols = conf.get(GROUP_COLUMNS, "").split(" ");
columns = new byte[cols.length][];
for (int i = 0; i < cols.length; i++) {
columns[i] = Bytes.toBytes(cols[i]);
}
} | 3.68 |
hadoop_LoadManifestsStage_executeStage | /**
* Load the manifests.
* @param arguments stage arguments
* @return the summary and a list of manifests.
* @throws IOException IO failure.
*/
@Override
protected LoadManifestsStage.Result executeStage(
final LoadManifestsStage.Arguments arguments) throws IOException {
EntryFileIO entryFileIO = new EntryFileIO(getStageConfig().getConf());
final Path manifestDir = getTaskManifestDir();
LOG.info("{}: Executing Manifest Job Commit with manifests in {}",
getName(),
manifestDir);
final Path entrySequenceData = arguments.getEntrySequenceData();
// the entry writer for queuing data.
entryWriter = entryFileIO.launchEntryWriter(
entryFileIO.createWriter(entrySequenceData),
arguments.queueCapacity);
try {
// sync fs before the list
msync(manifestDir);
// build a list of all task manifests successfully committed,
// which will break out if the writing is stopped (due to any failure)
final RemoteIterator<FileStatus> manifestFiles =
haltableRemoteIterator(listManifests(),
() -> entryWriter.isActive());
processAllManifests(manifestFiles);
maybeAddIOStatistics(getIOStatistics(), manifestFiles);
LOG.info("{}: Summary of {} manifests loaded in {}: {}",
getName(),
summaryInfo.manifestCount,
manifestDir,
summaryInfo);
// close cleanly
entryWriter.close();
// if anything failed, raise it.
entryWriter.maybeRaiseWriteException();
// collect any stats
} catch (EntryWriteException e) {
// something went wrong while writing.
// raise anything on the write thread,
entryWriter.maybeRaiseWriteException();
// falling back to that from the worker thread
throw e;
} finally {
// close which is a no-op if the clean close was invoked;
// it is not a no-op if something went wrong with reading/parsing/processing
// the manifests.
entryWriter.close();
}
final LoadedManifestData loadedManifestData = new LoadedManifestData(
new ArrayList<>(directories.values()), // new array to free up the map
entrySequenceData,
entryWriter.getCount());
return new LoadManifestsStage.Result(summaryInfo, loadedManifestData);
} | 3.68 |
pulsar_DateFormatter_format | /**
* @return a String representing a particular time instant
*/
public static String format(Instant instant) {
return DATE_FORMAT.format(instant);
} | 3.68 |
hadoop_MoveStep_setDestinationVolume | /**
* Sets destination volume.
*
* @param destinationVolume - volume
*/
public void setDestinationVolume(DiskBalancerVolume destinationVolume) {
this.destinationVolume = destinationVolume;
} | 3.68 |
flink_FileOutputFormat_initDefaultsFromConfiguration | /**
* Initialize defaults for output format. Needs to be a static method because it is configured
* for local cluster execution.
*
* @param configuration The configuration to load defaults from
*/
public static void initDefaultsFromConfiguration(Configuration configuration) {
final boolean overwrite = configuration.getBoolean(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE);
DEFAULT_WRITE_MODE = overwrite ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE;
final boolean alwaysCreateDirectory =
configuration.getBoolean(CoreOptions.FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY);
DEFAULT_OUTPUT_DIRECTORY_MODE =
alwaysCreateDirectory ? OutputDirectoryMode.ALWAYS : OutputDirectoryMode.PARONLY;
} | 3.68 |
hadoop_SchedulerHealth_getAggregateReleaseCount | /**
* Get the aggregate of all the release count.
*
* @return aggregate release count
*/
public Long getAggregateReleaseCount() {
return getAggregateOperationCount(Operation.RELEASE);
} | 3.68 |
framework_DateTimeFieldElement_setDateTime | /**
* Sets the value to the given date and time.
*
* @param value
* the date and time to set.
*/
public void setDateTime(LocalDateTime value) {
setISOValue(value.format(getISOFormatter()));
} | 3.68 |
framework_Table_resetVariablesAndPageBuffer | /**
* Resets and paints "to be painted next" variables. Also reset pageBuffer
*/
private void resetVariablesAndPageBuffer(PaintTarget target)
throws PaintException {
reqFirstRowToPaint = -1;
reqRowsToPaint = -1;
containerChangeToBeRendered = false;
target.addVariable(this, "reqrows", reqRowsToPaint);
target.addVariable(this, "reqfirstrow", reqFirstRowToPaint);
} | 3.68 |
hbase_AbstractFSWALProvider_recoverLease | // For HBASE-15019
public static void recoverLease(Configuration conf, Path path) {
try {
final FileSystem dfs = CommonFSUtils.getCurrentFileSystem(conf);
RecoverLeaseFSUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
@Override
public boolean progress() {
LOG.debug("Still trying to recover WAL lease: " + path);
return true;
}
});
} catch (IOException e) {
LOG.warn("unable to recover lease for WAL: " + path, e);
}
} | 3.68 |
hadoop_BCFile_getDefaultCompressionName | /**
* Get the name of the default compression algorithm.
*
* @return the name of the default compression algorithm.
*/
public String getDefaultCompressionName() {
return dataIndex.getDefaultCompressionAlgorithm().getName();
} | 3.68 |
flink_BaseHybridHashTable_findSmallerPrime | /** Let prime number be the numBuckets, to avoid partition hash and bucket hash congruences. */
private static int findSmallerPrime(int num) {
for (; num > 1; num--) {
if (isPrimeNumber(num)) {
return num;
}
}
return num;
} | 3.68 |
dubbo_ConfigurationCache_computeIfAbsent | /**
* Get Cached Value
*
* @param key key
* @param function function to produce value, should not return `null`
* @return value
*/
public String computeIfAbsent(String key, Function<String, String> function) {
String value = cache.get(key);
// value might be empty here!
// empty value from config center will be cached here
if (value == null) {
// lock free, tolerate repeat apply, will return previous value
cache.putIfAbsent(key, function.apply(key));
value = cache.get(key);
}
return value;
} | 3.68 |
hbase_AssignmentVerificationReport_getNumRegionsOnFavoredNodeByPosition | /**
* Return the number of regions based on the position (primary/secondary/ tertiary) assigned to
* their favored nodes
* @return the number of regions
*/
int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
return favoredNodes[position.ordinal()];
} | 3.68 |
framework_Form_getItemProperty | /**
* The property identified by the property id.
*
* <p>
* The property data source of the field specified with property id is
* returned. If there is a (with specified property id) having no data
* source, the field is returned instead of the data source.
* </p>
*
* @see Item#getItemProperty(Object)
*/
@Override
public Property getItemProperty(Object id) {
final Field<?> field = fields.get(id);
if (field == null) {
// field does not exist or it is not (yet) created for this property
return ownProperties.get(id);
}
final Property<?> property = field.getPropertyDataSource();
if (property != null) {
return property;
} else {
return field;
}
} | 3.68 |
framework_AbstractInMemoryContainer_registerNewItem | /**
* Registers a new item as having been added to the container. This can
* involve storing the item or any relevant information about it in internal
* container-specific collections if necessary, as well as registering
* listeners etc.
*
* The full identifier list in {@link AbstractInMemoryContainer} has already
* been updated to reflect the new item when this method is called.
*
* @param position
* @param itemId
* @param item
*/
protected void registerNewItem(int position, ITEMIDTYPE itemId,
ITEMCLASS item) {
} | 3.68 |
hadoop_MutableStat_lastStat | /**
* Return a SampleStat object that supports
* calls like StdDev and Mean.
* @return SampleStat
*/
public SampleStat lastStat() {
return changed() ? intervalStat : prevStat;
} | 3.68 |
graphhopper_GraphHopper_initLocationIndex | /**
* Initializes the location index after the import is done.
*/
protected void initLocationIndex() {
if (locationIndex != null)
throw new IllegalStateException("Cannot initialize locationIndex twice!");
locationIndex = createLocationIndex(baseGraph.getDirectory());
} | 3.68 |
hbase_ObjectPool_purge | /**
* Removes stale references of shared objects from the pool. References newly becoming stale may
* still remain.
* <p/>
* The implementation of this method is expected to be lightweight when there is no stale
* reference with the Oracle (Sun) implementation of {@code ReferenceQueue}, because
* {@code ReferenceQueue.poll} just checks a volatile instance variable in {@code ReferenceQueue}.
*/
public void purge() {
if (purgeLock.tryLock()) {// no parallel purge
try {
while (true) {
@SuppressWarnings("unchecked")
Reference<V> ref = (Reference<V>) staleRefQueue.poll();
if (ref == null) {
break;
}
referenceCache.remove(getReferenceKey(ref), ref);
}
} finally {
purgeLock.unlock();
}
}
} | 3.68 |
framework_Label_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addValueChangeListener(Property.ValueChangeListener)}
*/
@Override
@Deprecated
public void addListener(Property.ValueChangeListener listener) {
addValueChangeListener(listener);
} | 3.68 |
morf_RemoveColumn_getTableName | /**
* Gets the name of the table.
*
* @return the table's name
*/
public String getTableName() {
return tableName;
} | 3.68 |
pulsar_PulsarConnectorConfig_setZookeeperUri | /**
* @deprecated use {@link #setMetadataUrl(String)}
*/
@Deprecated
@Config("pulsar.zookeeper-uri")
public PulsarConnectorConfig setZookeeperUri(String zookeeperUri) {
if (hasMetadataUrl) {
return this;
}
this.metadataUrl = zookeeperUri;
return this;
} | 3.68 |
hadoop_BlockStorageMovementNeeded_clearQueuesWithNotification | /**
* Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
* and notify to clean up required resources.
*/
public synchronized void clearQueuesWithNotification() {
// Remove xAttr from directories
Long trackId;
while ((trackId = ctxt.getNextSPSPath()) != null) {
try {
// Remove xAttr for file
ctxt.removeSPSHint(trackId);
} catch (IOException ie) {
LOG.warn("Failed to remove SPS xattr for track id " + trackId, ie);
}
}
// File's directly added to storageMovementNeeded, So try to remove
// xAttr for file
ItemInfo itemInfo;
while ((itemInfo = get()) != null) {
try {
// Remove xAttr for file
if (!itemInfo.isDir()) {
ctxt.removeSPSHint(itemInfo.getFile());
}
} catch (IOException ie) {
LOG.warn(
"Failed to remove SPS xattr for track id "
+ itemInfo.getFile(), ie);
}
}
this.clearAll();
} | 3.68 |
framework_Escalator_hasColumnAndRowData | /**
* Check whether there are both columns and any row data (for either
* headers, body or footer).
*
* @return <code>true</code> if header, body or footer has rows and there
* are columns
*/
private boolean hasColumnAndRowData() {
return (header.getRowCount() > 0 || body.getRowCount() > 0
|| footer.getRowCount() > 0)
&& columnConfiguration.getColumnCount() > 0;
} | 3.68 |
framework_ShortcutAction_getModifiers | /**
* Get the {@link ModifierKey}s required for the shortcut to react.
*
* @return modifier keys for this shortcut
*/
public int[] getModifiers() {
return modifiers;
} | 3.68 |
framework_VAbstractCalendarPanel_setAssistiveLabelNextMonth | /**
* Set assistive label for the next month element.
*
* @param label
* the label to set
* @since 8.4
*/
public void setAssistiveLabelNextMonth(String label) {
nextMonthAssistiveLabel = label;
} | 3.68 |
hbase_CellArrayImmutableSegment_reinitializeCellSet | /*------------------------------------------------------------------------*/
// Create CellSet based on CellChunkMap from current ConcurrentSkipListMap based CellSet
// (without compacting iterator)
// We do not consider cells bigger than chunks!
private void reinitializeCellSet(int numOfCells, KeyValueScanner segmentScanner,
CellSet oldCellSet, MemStoreCompactionStrategy.Action action) {
Cell[] cells = new Cell[numOfCells]; // build the Cell Array
Cell curCell;
int idx = 0;
int numUniqueKeys = 0;
Cell prev = null;
try {
while ((curCell = segmentScanner.next()) != null) {
cells[idx++] = curCell;
if (action == MemStoreCompactionStrategy.Action.FLATTEN_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumn(prev, curCell)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;
}
}
prev = curCell;
}
} catch (IOException ie) {
throw new IllegalStateException(ie);
} finally {
segmentScanner.close();
}
if (action != MemStoreCompactionStrategy.Action.FLATTEN_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false);
// update the CellSet of this Segment
this.setCellSet(oldCellSet, new CellSet(cam, numUniqueKeys));
} | 3.68 |
framework_Form_removeAllProperties | /**
* Removes all properties and fields from the form.
*
* @return the Success of the operation. Removal of all fields succeeded if
* (and only if) the return value is <code>true</code>.
*/
public boolean removeAllProperties() {
boolean success = true;
for (Object property : propertyIds.toArray()) {
if (!removeItemProperty(property)) {
success = false;
}
}
return success;
} | 3.68 |
pulsar_JdbcUtils_getTableId | /**
* Get the {@link TableId} for the given tableName.
*/
public static TableId getTableId(Connection connection, String tableName) throws Exception {
DatabaseMetaData metadata = connection.getMetaData();
try (ResultSet rs = metadata.getTables(null, null, tableName, new String[]{"TABLE", "PARTITIONED TABLE"})) {
if (rs.next()) {
String catalogName = rs.getString(1);
String schemaName = rs.getString(2);
String gotTableName = rs.getString(3);
checkState(tableName.equals(gotTableName),
"TableName not match: " + tableName + " Got: " + gotTableName);
if (log.isDebugEnabled()) {
log.debug("Get Table: {}, {}, {}", catalogName, schemaName, tableName);
}
return TableId.of(catalogName, schemaName, tableName);
} else {
throw new Exception("Not able to find table: " + tableName);
}
}
} | 3.68 |
hbase_AccessChecker_initGroupService | /*
* Initialize the group service.
*/
private void initGroupService(Configuration conf) {
if (groupService == null) {
if (conf.getBoolean(User.TestingGroups.TEST_CONF, false)) {
UserProvider.setGroups(new User.TestingGroups(UserProvider.getGroups()));
groupService = UserProvider.getGroups();
} else {
groupService = Groups.getUserToGroupsMappingService(conf);
}
}
} | 3.68 |
hadoop_DockerCommandExecutor_isRemovable | /**
* Is the container in a removable state?
*
* @param containerStatus the container's {@link DockerContainerStatus}.
* @return is the container in a removable state.
*/
public static boolean isRemovable(DockerContainerStatus containerStatus) {
return !containerStatus.equals(DockerContainerStatus.NONEXISTENT)
&& !containerStatus.equals(DockerContainerStatus.UNKNOWN)
&& !containerStatus.equals(DockerContainerStatus.REMOVING)
&& !containerStatus.equals(DockerContainerStatus.RUNNING);
} | 3.68 |
flink_StateAssignmentOperation_reAssignSubKeyedStates | // TODO rewrite based on operator id
private Tuple2<List<KeyedStateHandle>, List<KeyedStateHandle>> reAssignSubKeyedStates(
OperatorState operatorState,
List<KeyGroupRange> keyGroupPartitions,
int subTaskIndex,
int newParallelism,
int oldParallelism) {
List<KeyedStateHandle> subManagedKeyedState;
List<KeyedStateHandle> subRawKeyedState;
if (newParallelism == oldParallelism) {
if (operatorState.getState(subTaskIndex) != null) {
subManagedKeyedState =
operatorState.getState(subTaskIndex).getManagedKeyedState().asList();
subRawKeyedState = operatorState.getState(subTaskIndex).getRawKeyedState().asList();
} else {
subManagedKeyedState = emptyList();
subRawKeyedState = emptyList();
}
} else {
subManagedKeyedState =
getManagedKeyedStateHandles(
operatorState, keyGroupPartitions.get(subTaskIndex));
subRawKeyedState =
getRawKeyedStateHandles(operatorState, keyGroupPartitions.get(subTaskIndex));
}
if (subManagedKeyedState.isEmpty() && subRawKeyedState.isEmpty()) {
return new Tuple2<>(emptyList(), emptyList());
} else {
return new Tuple2<>(subManagedKeyedState, subRawKeyedState);
}
} | 3.68 |
framework_SystemMessagesInfo_getLocale | /**
* The locale of the UI related to the {@link SystemMessages} request.
*
* @return The Locale or null if the locale is not known
*/
public Locale getLocale() {
return locale;
} | 3.68 |
streampipes_DataLakeMeasure_getTimestampFieldName | /**
* This can be used to get the name of the timestamp property without the stream prefix
*
* @return the name of the timestamp property
*/
@TsIgnore
@JsonIgnore
public String getTimestampFieldName() {
return timestampField.split(STREAM_PREFIX_DELIMITER)[1];
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isEmbedded | /**
* Is the current consent authorization in EMBEDDED mode.
*/
public boolean isEmbedded(Xs2aContext ctx) {
return EMBEDDED.name().equalsIgnoreCase(ctx.getAspspScaApproach());
} | 3.68 |
pulsar_ConcurrentOpenHashMap_keys | /**
* @return a new list of all keys (makes a copy)
*/
public List<K> keys() {
List<K> keys = new ArrayList<>((int) size());
forEach((key, value) -> keys.add(key));
return keys;
} | 3.68 |
hmily_HmilyParticipantCacheManager_removeByKey | /**
* remove guava cache by key.
*
* @param participantId guava cache key.
*/
public void removeByKey(final Long participantId) {
if (Objects.nonNull(participantId)) {
LOADING_CACHE.invalidate(participantId);
}
} | 3.68 |
pulsar_AuthenticationProvider_authenticateHttpRequestAsync | /**
* Validate the authentication for the given credentials with the specified authentication data.
*
* <p>Implementations of this method MUST modify the request by adding the {@link AuthenticatedRoleAttributeName}
* and the {@link AuthenticatedDataAttributeName} attributes.</p>
*
* <p>Warning: the calling thread is an IO thread. Any implementations that rely on blocking behavior
* must ensure that the execution is completed on using a separate thread pool to ensure IO threads
* are never blocked.</p>
*
* @return Set response, according to passed in request, and return whether we should do following chain.doFilter.
* @throws Exception when authentication failed
* and return whether we should do following chain.doFilter or not.
*/
default CompletableFuture<Boolean> authenticateHttpRequestAsync(HttpServletRequest request,
HttpServletResponse response) {
try {
return CompletableFuture.completedFuture(this.authenticateHttpRequest(request, response));
} catch (Exception e) {
return FutureUtil.failedFuture(e);
}
} | 3.68 |
flink_TimestampStringUtils_fromLocalDateTime | /** Convert a {@link LocalDateTime} to a calcite's {@link TimestampString}. */
public static TimestampString fromLocalDateTime(LocalDateTime ldt) {
return new TimestampString(
ldt.getYear(),
ldt.getMonthValue(),
ldt.getDayOfMonth(),
ldt.getHour(),
ldt.getMinute(),
ldt.getSecond())
.withNanos(ldt.getNano());
} | 3.68 |
pulsar_WindowManager_onTrigger | /**
* The callback invoked by the trigger policy.
*/
@Override
public boolean onTrigger() {
List<Event<T>> windowEvents = null;
List<Event<T>> expired = null;
lock.lock();
try {
/*
* scan the entire window to handle out of order events in
* the case of time based windows.
*/
windowEvents = scanEvents(true);
expired = new ArrayList<>(expiredEvents);
expiredEvents.clear();
} finally {
lock.unlock();
}
List<Event<T>> events = new ArrayList<>();
List<Event<T>> newEvents = new ArrayList<>();
for (Event<T> event : windowEvents) {
events.add(event);
if (!prevWindowEvents.contains(event)) {
newEvents.add(event);
}
}
prevWindowEvents.clear();
if (!events.isEmpty()) {
prevWindowEvents.addAll(windowEvents);
if (log.isDebugEnabled()) {
log.debug("invoking windowLifecycleListener onActivation, [{}] events in window.", events.size());
}
windowLifecycleListener.onActivation(events, newEvents, expired,
evictionPolicy.getContext().getReferenceTime());
} else {
log.debug("No events in the window, skipping onActivation");
}
triggerPolicy.reset();
return !events.isEmpty();
} | 3.68 |
graphhopper_NodeBasedNodeContractor_findAndHandleShortcuts | /**
* Searches for shortcuts and calls the given handler on each shortcut that is found. The graph is not directly
* changed by this method.
* Returns the 'degree' of the given node (disregarding edges from/to already contracted nodes).
* Note that here the degree is not the total number of adjacent edges, but only the number of incoming edges
*/
private long findAndHandleShortcuts(int node, PrepareShortcutHandler handler, int maxVisitedNodes) {
long degree = 0;
PrepareGraphEdgeIterator incomingEdges = inEdgeExplorer.setBaseNode(node);
// collect outgoing nodes (goal-nodes) only once
while (incomingEdges.next()) {
int fromNode = incomingEdges.getAdjNode();
if (fromNode == node)
throw new IllegalStateException("Unexpected loop-edge at node: " + node);
final double incomingEdgeWeight = incomingEdges.getWeight();
// this check is important to prevent calling calcMillis on inaccessible edges and also allows early exit
if (Double.isInfinite(incomingEdgeWeight)) {
continue;
}
// collect outgoing nodes (goal-nodes) only once
PrepareGraphEdgeIterator outgoingEdges = outEdgeExplorer.setBaseNode(node);
witnessPathSearcher.init(fromNode, node);
degree++;
while (outgoingEdges.next()) {
int toNode = outgoingEdges.getAdjNode();
// no need to search for witnesses going from a node back to itself
if (fromNode == toNode)
continue;
// Limit weight as ferries or forbidden edges can increase local search too much.
// If we decrease the correct weight we only explore less and introduce more shortcuts.
// I.e. no change to accuracy is made.
double existingDirectWeight = incomingEdgeWeight + outgoingEdges.getWeight();
if (Double.isInfinite(existingDirectWeight))
continue;
dijkstraSW.start();
dijkstraCount++;
double maxWeight = witnessPathSearcher.findUpperBound(toNode, existingDirectWeight, maxVisitedNodes);
dijkstraSW.stop();
if (maxWeight <= existingDirectWeight)
// FOUND witness path, so do not add shortcut
continue;
handler.handleShortcut(fromNode, toNode, existingDirectWeight,
outgoingEdges.getPrepareEdge(), outgoingEdges.getOrigEdgeCount(),
incomingEdges.getPrepareEdge(), incomingEdges.getOrigEdgeCount());
}
}
return degree;
} | 3.68 |
hudi_BaseHoodieWriteClient_addColumn | /**
* add columns to table.
*
* @param colName col name to be added. if we want to add col to a nested filed, the fullName should be specified
* @param schema col type to be added.
* @param doc col doc to be added.
* @param position col position to be added
* @param positionType col position change type. now support three change types: first/after/before
*/
public void addColumn(String colName, Schema schema, String doc, String position, TableChange.ColumnPositionChange.ColumnPositionType positionType) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft())
.applyAddChange(colName, AvroInternalSchemaConverter.convertToField(schema), doc, position, positionType);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
morf_ChangePrimaryKeyColumns_getNewPrimaryKeyColumns | /**
* @return the new primary key column names
*/
public List<String> getNewPrimaryKeyColumns() {
return newPrimaryKeyColumns;
} | 3.68 |
morf_SqlDialect_getSqlForSum | /**
* Converts the sum function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForSum(Function function) {
return "SUM(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hbase_HFileBlockIndex_add | /**
* The same as {@link #add(byte[], long, int, long)} but does not take the key/value into
* account. Used for single-level indexes.
* @see #add(byte[], long, int, long)
*/
@Override
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
add(firstKey, blockOffset, onDiskDataSize, -1);
} | 3.68 |
pulsar_Schema_PROTOBUF_NATIVE | /**
* Create a Protobuf-Native schema type with schema definition.
*
* @param schemaDefinition schemaDefinition the definition of the schema
* @return a Schema instance
*/
static <T extends com.google.protobuf.GeneratedMessageV3> Schema<T> PROTOBUF_NATIVE(
SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newProtobufNativeSchema(schemaDefinition);
} | 3.68 |
hadoop_GroupsService_getGroups | /**
* @deprecated use {@link #getGroupsSet(String user)}
*/
@Deprecated
@Override
public List<String> getGroups(String user) throws IOException {
return hGroups.getGroups(user);
} | 3.68 |
hudi_HoodieMetadataWriteUtils_createMetadataWriteConfig | /**
* Create a {@code HoodieWriteConfig} to use for the Metadata Table. This is used by async
* indexer only.
*
* @param writeConfig {@code HoodieWriteConfig} of the main dataset writer
* @param failedWritesCleaningPolicy Cleaning policy on failed writes
*/
public static HoodieWriteConfig createMetadataWriteConfig(
HoodieWriteConfig writeConfig, HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) {
String tableName = writeConfig.getTableName() + METADATA_TABLE_NAME_SUFFIX;
final long maxLogFileSizeBytes = writeConfig.getMetadataConfig().getMaxLogFileSize();
// Create the write config for the metadata table by borrowing options from the main write config.
HoodieWriteConfig.Builder builder = HoodieWriteConfig.newBuilder()
.withEngineType(writeConfig.getEngineType())
.withTimelineLayoutVersion(TimelineLayoutVersion.CURR_VERSION)
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder()
.withConsistencyCheckEnabled(writeConfig.getConsistencyGuardConfig().isConsistencyCheckEnabled())
.withInitialConsistencyCheckIntervalMs(writeConfig.getConsistencyGuardConfig().getInitialConsistencyCheckIntervalMs())
.withMaxConsistencyCheckIntervalMs(writeConfig.getConsistencyGuardConfig().getMaxConsistencyCheckIntervalMs())
.withMaxConsistencyChecks(writeConfig.getConsistencyGuardConfig().getMaxConsistencyChecks())
.build())
.withWriteConcurrencyMode(WriteConcurrencyMode.SINGLE_WRITER)
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).withFileListingParallelism(writeConfig.getFileListingParallelism()).build())
.withAutoCommit(true)
.withAvroSchemaValidate(false)
.withEmbeddedTimelineServerEnabled(false)
.withMarkersType(MarkerType.DIRECT.name())
.withRollbackUsingMarkers(false)
.withPath(HoodieTableMetadata.getMetadataTableBasePath(writeConfig.getBasePath()))
.withSchema(HoodieMetadataRecord.getClassSchema().toString())
.forTable(tableName)
// we will trigger cleaning manually, to control the instant times
.withCleanConfig(HoodieCleanConfig.newBuilder()
.withAsyncClean(DEFAULT_METADATA_ASYNC_CLEAN)
.withAutoClean(false)
.withCleanerParallelism(MDT_DEFAULT_PARALLELISM)
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
.withFailedWritesCleaningPolicy(failedWritesCleaningPolicy)
.retainCommits(DEFAULT_METADATA_CLEANER_COMMITS_RETAINED)
.build())
// we will trigger archive manually, to ensure only regular writer invokes it
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
.archiveCommitsWith(
writeConfig.getMinCommitsToKeep() + 1, writeConfig.getMaxCommitsToKeep() + 1)
.withAutoArchive(false)
.build())
// we will trigger compaction manually, to control the instant times
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
.withInlineCompaction(false)
.withMaxNumDeltaCommitsBeforeCompaction(writeConfig.getMetadataCompactDeltaCommitMax())
.withEnableOptimizedLogBlocksScan(String.valueOf(writeConfig.enableOptimizedLogBlocksScan()))
// Compaction on metadata table is used as a barrier for archiving on main dataset and for validating the
// deltacommits having corresponding completed commits. Therefore, we need to compact all fileslices of all
// partitions together requiring UnBoundedCompactionStrategy.
.withCompactionStrategy(new UnBoundedCompactionStrategy())
// Check if log compaction is enabled, this is needed for tables with a lot of records.
.withLogCompactionEnabled(writeConfig.isLogCompactionEnabledOnMetadata())
// Below config is only used if isLogCompactionEnabled is set.
.withLogCompactionBlocksThreshold(writeConfig.getMetadataLogCompactBlocksThreshold())
.build())
.withStorageConfig(HoodieStorageConfig.newBuilder().hfileMaxFileSize(MDT_MAX_HFILE_SIZE_BYTES)
.logFileMaxSize(maxLogFileSizeBytes)
// Keeping the log blocks as large as the log files themselves reduces the number of HFile blocks to be checked for
// presence of keys
.logFileDataBlockMaxSize(maxLogFileSizeBytes).build())
.withRollbackParallelism(MDT_DEFAULT_PARALLELISM)
.withFinalizeWriteParallelism(MDT_DEFAULT_PARALLELISM)
.withKeyGenerator(HoodieTableMetadataKeyGenerator.class.getCanonicalName())
.withPopulateMetaFields(DEFAULT_METADATA_POPULATE_META_FIELDS)
.withWriteStatusClass(FailOnFirstErrorWriteStatus.class)
.withReleaseResourceEnabled(writeConfig.areReleaseResourceEnabled());
// RecordKey properties are needed for the metadata table records
final Properties properties = new Properties();
properties.put(HoodieTableConfig.RECORDKEY_FIELDS.key(), RECORD_KEY_FIELD_NAME);
properties.put("hoodie.datasource.write.recordkey.field", RECORD_KEY_FIELD_NAME);
builder.withProperties(properties);
if (writeConfig.isMetricsOn()) {
// Table Name is needed for metric reporters prefix
Properties commonProperties = new Properties();
commonProperties.put(HoodieWriteConfig.TBL_NAME.key(), tableName);
builder.withMetricsConfig(HoodieMetricsConfig.newBuilder()
.fromProperties(commonProperties)
.withReporterType(writeConfig.getMetricsReporterType().toString())
.withExecutorMetrics(writeConfig.isExecutorMetricsEnabled())
.on(true).build());
switch (writeConfig.getMetricsReporterType()) {
case GRAPHITE:
builder.withMetricsGraphiteConfig(HoodieMetricsGraphiteConfig.newBuilder()
.onGraphitePort(writeConfig.getGraphiteServerPort())
.toGraphiteHost(writeConfig.getGraphiteServerHost())
.usePrefix(writeConfig.getGraphiteMetricPrefix()).build());
break;
case JMX:
builder.withMetricsJmxConfig(HoodieMetricsJmxConfig.newBuilder()
.onJmxPort(writeConfig.getJmxPort())
.toJmxHost(writeConfig.getJmxHost())
.build());
break;
case PROMETHEUS_PUSHGATEWAY:
HoodieMetricsPrometheusConfig prometheusConfig = HoodieMetricsPrometheusConfig.newBuilder()
.withPushgatewayJobname(writeConfig.getPushGatewayJobName())
.withPushgatewayRandomJobnameSuffix(writeConfig.getPushGatewayRandomJobNameSuffix())
.withPushgatewayLabels(writeConfig.getPushGatewayLabels())
.withPushgatewayReportPeriodInSeconds(String.valueOf(writeConfig.getPushGatewayReportPeriodSeconds()))
.withPushgatewayHostName(writeConfig.getPushGatewayHost())
.withPushgatewayPortNum(writeConfig.getPushGatewayPort()).build();
builder.withProperties(prometheusConfig.getProps());
break;
case DATADOG:
case PROMETHEUS:
case CONSOLE:
case INMEMORY:
case CLOUDWATCH:
break;
default:
throw new HoodieMetadataException("Unsupported Metrics Reporter type " + writeConfig.getMetricsReporterType());
}
}
HoodieWriteConfig metadataWriteConfig = builder.build();
// Inline compaction and auto clean is required as we do not expose this table outside
ValidationUtils.checkArgument(!metadataWriteConfig.isAutoClean(), "Cleaning is controlled internally for Metadata table.");
ValidationUtils.checkArgument(!metadataWriteConfig.inlineCompactionEnabled(), "Compaction is controlled internally for metadata table.");
// Auto commit is required
ValidationUtils.checkArgument(metadataWriteConfig.shouldAutoCommit(), "Auto commit is required for Metadata Table");
ValidationUtils.checkArgument(metadataWriteConfig.getWriteStatusClassName().equals(FailOnFirstErrorWriteStatus.class.getName()),
"MDT should use " + FailOnFirstErrorWriteStatus.class.getName());
// Metadata Table cannot have metadata listing turned on. (infinite loop, much?)
ValidationUtils.checkArgument(!metadataWriteConfig.isMetadataTableEnabled(), "File listing cannot be used for Metadata Table");
return metadataWriteConfig;
} | 3.68 |
hadoop_AzureBlobFileSystem_setPermission | /**
* Set permission of a path.
*
* @param path The path
* @param permission Access permission
*/
@Override
public void setPermission(final Path path, final FsPermission permission)
throws IOException {
LOG.debug("AzureBlobFileSystem.setPermission path: {}", path);
TracingContext tracingContext = new TracingContext(clientCorrelationId,
fileSystemId, FSOperationType.SET_PERMISSION, true, tracingHeaderFormat, listener);
if (!getIsNamespaceEnabled(tracingContext)) {
super.setPermission(path, permission);
return;
}
if (permission == null) {
throw new IllegalArgumentException("The permission can't be null");
}
Path qualifiedPath = makeQualified(path);
try {
abfsStore.setPermission(qualifiedPath, permission, tracingContext);
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.68 |
dubbo_LoggerFactory_setLevel | /**
* Set the current logging level
*
* @param level logging level
*/
public static void setLevel(Level level) {
loggerAdapter.setLevel(level);
} | 3.68 |
hudi_TimestampBasedAvroKeyGenerator_initIfNeeded | /**
* The function takes care of lazily initialising dateTimeFormatter variables only once.
*/
private void initIfNeeded() {
if (this.inputFormatter == null) {
this.inputFormatter = parser.getInputFormatter();
}
if (this.partitionFormatter == null) {
this.partitionFormatter = DateTimeFormat.forPattern(outputDateFormat);
if (this.outputDateTimeZone != null) {
partitionFormatter = partitionFormatter.withZone(outputDateTimeZone);
}
}
} | 3.68 |
hbase_RegionStateStore_deleteRegion | // ============================================================================================
// Delete Region State helpers
// ============================================================================================
/**
* Deletes the specified region.
*/
public void deleteRegion(final RegionInfo regionInfo) throws IOException {
deleteRegions(Collections.singletonList(regionInfo));
} | 3.68 |
morf_DatabaseMetaDataProvider_setColumnAutonumbered | /**
* Sets column being autonumbered from a result set.
*
* @param tableName Name of the table.
* @param column Column builder to set to.
* @param columnResultSet Result set to be read.
* @return Resulting column builder.
* @throws SQLException Upon errors.
*/
@SuppressWarnings("unused")
protected ColumnBuilder setColumnAutonumbered(RealName tableName, ColumnBuilder column, ResultSet columnResultSet) throws SQLException {
boolean autoNumbered = "YES".equals(columnResultSet.getString(COLUMN_IS_AUTOINCREMENT));
return autoNumbered ? column.autoNumbered(-1) : column;
} | 3.68 |
graphhopper_MatrixResponse_getWeight | /**
* Returns the weight for the specific entry (from -> to) in arbitrary units ('costs'), or
* {@link Double#MAX_VALUE} in case no connection was found (and {@link GHMRequest#setFailFast(boolean)} was set
* to true).
*/
public double getWeight(int from, int to) {
if (hasErrors()) {
throw new IllegalStateException("Cannot return weight (" + from + "," + to + ") if errors occurred " + getErrors());
}
if (from >= weights.length) {
throw new IllegalStateException("Cannot get 'from' " + from + " from weights with size " + weights.length);
} else if (to >= weights[from].length) {
throw new IllegalStateException("Cannot get 'to' " + to + " from weights with size " + weights[from].length);
}
return weights[from][to];
} | 3.68 |
dubbo_ScopeClusterInvoker_destroyInjvmInvoker | /**
* Destroy the existing InjvmInvoker.
*/
private void destroyInjvmInvoker() {
if (injvmInvoker != null) {
injvmInvoker.destroy();
injvmInvoker = null;
}
} | 3.68 |
flink_SubtaskStateStats_getEndToEndDuration | /**
* Computes the duration since the given trigger timestamp.
*
* <p>If the trigger timestamp is greater than the ACK timestamp, this returns <code>0</code>.
*
* @param triggerTimestamp Trigger timestamp of the checkpoint.
* @return Duration since the given trigger timestamp.
*/
public long getEndToEndDuration(long triggerTimestamp) {
return Math.max(0, ackTimestamp - triggerTimestamp);
} | 3.68 |
flink_RequestedLocalProperties_setOrdering | /**
* Sets the order for these interesting local properties.
*
* @param ordering The order to set.
*/
public void setOrdering(Ordering ordering) {
this.ordering = ordering;
} | 3.68 |
flink_InPlaceMutableHashTable_appendPointerAndRecord | /**
* Appends a pointer and a record.
*
* @param pointer The pointer to write (Note: this is NOT the position to write to!)
* @param record The record to write
* @return A pointer to the written data
* @throws IOException (EOFException specifically, if memory ran out)
*/
public long appendPointerAndRecord(long pointer, T record) throws IOException {
setWritePosition(appendPosition);
return noSeekAppendPointerAndRecord(pointer, record);
} | 3.68 |
hadoop_FsStatus_getRemaining | /**
* Return the number of remaining bytes on the file system.
* @return remaining.
*/
public long getRemaining() {
return remaining;
} | 3.68 |
hbase_ThriftHttpServlet_getAuthHeader | /**
* Returns the base64 encoded auth header payload
* @throws HttpAuthenticationException if a remote or network exception occurs
*/
private String getAuthHeader(HttpServletRequest request) throws HttpAuthenticationException {
String authHeader = request.getHeader(HttpHeaders.AUTHORIZATION);
// Each http request must have an Authorization header
if (authHeader == null || authHeader.isEmpty()) {
throw new HttpAuthenticationException(
"Authorization header received " + "from the client is empty.");
}
String authHeaderBase64String;
int beginIndex = (NEGOTIATE + " ").length();
authHeaderBase64String = authHeader.substring(beginIndex);
// Authorization header must have a payload
if (authHeaderBase64String.isEmpty()) {
throw new HttpAuthenticationException(
"Authorization header received " + "from the client does not contain any data.");
}
return authHeaderBase64String;
} | 3.68 |
framework_GwtRpcButtonConnector_doRPC | /*
* Make an RPC to test our bug.
*/
private void doRPC() {
log("GwtRpcButtonTestConnector onClick");
GwtRpcServiceTestAsync service = GWT.create(GwtRpcServiceTest.class);
service.giveMeThat("honey", "sugar", new AsyncCallback<String>() {
@Override
public void onSuccess(String result) {
showResult(result, SUCCESS_LABEL_ID);
}
@Override
public void onFailure(Throwable caught) {
showResult(caught.getMessage(), FAIL_LABEL_ID);
}
/*
* Show the result box.
*/
private void showResult(String result, String labelID) {
DialogBox box = new DialogBox(true);
Label label = new Label(result);
label.getElement().setId(labelID);
box.add(label);
box.center();
box.show();
}
});
} | 3.68 |
hadoop_ResourceCalculator_compare | /**
* On a cluster with capacity {@code clusterResource}, compare {@code lhs}
* and {@code rhs} considering all resources.
*
* @param clusterResource cluster capacity
* @param lhs First {@link Resource} to compare
* @param rhs Second {@link Resource} to compare
* @return -1 if {@code lhs} is smaller, 0 if equal and 1 if it is larger
*/
public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
return compare(clusterResource, lhs, rhs, false);
} | 3.68 |
hadoop_ExternalSPSFilePathCollector_processPath | /**
* Recursively scan the given path and add the file info to SPS service for
* processing.
*/
private long processPath(Long startID, String childPath) {
long pendingWorkCount = 0; // to be satisfied file counter
for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
final DirectoryListing children;
try {
children = dfs.getClient().listPaths(childPath,
lastReturnedName, false);
} catch (IOException e) {
LOG.warn("Failed to list directory " + childPath
+ ". Ignore the directory and continue.", e);
return pendingWorkCount;
}
if (children == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("The scanning start dir/sub dir " + childPath
+ " does not have childrens.");
}
return pendingWorkCount;
}
for (HdfsFileStatus child : children.getPartialListing()) {
if (child.isFile()) {
service.addFileToProcess(new ItemInfo(startID, child.getFileId()),
false);
checkProcessingQueuesFree();
pendingWorkCount++; // increment to be satisfied file count
} else {
String childFullPathName = child.getFullName(childPath);
if (child.isDirectory()) {
if (!childFullPathName.endsWith(Path.SEPARATOR)) {
childFullPathName = childFullPathName + Path.SEPARATOR;
}
pendingWorkCount += processPath(startID, childFullPathName);
}
}
}
if (children.hasMore()) {
lastReturnedName = children.getLastName();
} else {
return pendingWorkCount;
}
}
} | 3.68 |
hadoop_TimelinePutResponse_setEntityId | /**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;
} | 3.68 |
flink_InFlightRequestTracker_registerRequest | /**
* Registers an in-flight request.
*
* @return {@code true} if the request could be registered; {@code false} if the tracker has
* already been terminated.
*/
public boolean registerRequest() {
return phaser.register() >= 0;
} | 3.68 |
hbase_HRegionFileSystem_getFileSystem | /** Returns the underlying {@link FileSystem} */
public FileSystem getFileSystem() {
return this.fs;
} | 3.68 |
hadoop_AuxServiceConfiguration_files | /**
* Array of list of files that needs to be created and made available as
* volumes in the service component containers.
**/
public AuxServiceConfiguration files(List<AuxServiceFile> fileList) {
this.files = fileList;
return this;
} | 3.68 |
hadoop_FedBalanceContext_setForceCloseOpenFiles | /**
* Force close open files.
* @param value true if force close all the open files.
* @return the builder.
*/
public Builder setForceCloseOpenFiles(boolean value) {
this.forceCloseOpenFiles = value;
return this;
} | 3.68 |
flink_Plan_setJobId | /**
* Sets the ID of the job that the dataflow plan belongs to. If this ID is set to {@code null},
* then the dataflow represents its own independent job.
*
* @param jobId The ID of the job that the dataflow plan belongs to.
*/
public void setJobId(JobID jobId) {
this.jobId = jobId;
} | 3.68 |
flink_ResourceCounter_getTotalResource | /**
* Computes the total resources in this counter.
*
* @return the total resources in this counter
*/
public ResourceProfile getTotalResource() {
return resources.entrySet().stream()
.map(entry -> entry.getKey().multiply(entry.getValue()))
.reduce(ResourceProfile.ZERO, ResourceProfile::merge);
} | 3.68 |
rocketmq-connect_MemoryConfigManagementServiceImpl_pauseConnector | /**
* pause connector
*
* @param connectorName
*/
@Override
public void pauseConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException("Connector [" + connectorName + "] does not exist");
}
ConnectKeyValue config = connectorKeyValueStore.get(connectorName);
config.setTargetState(TargetState.PAUSED);
connectorKeyValueStore.put(connectorName, config.nextGeneration());
triggerListener();
} | 3.68 |
flink_GenericDataSinkBase_setLocalOrder | /**
* Sets the order in which the sink must write its data within each fragment in the distributed
* file system. For any value other then <tt>NONE</tt>, this will cause the system to perform a
* local sort, or try to reuse an order from a previous operation.
*
* @param localOrder The local order to write the data in.
*/
public void setLocalOrder(Ordering localOrder) {
this.localOrdering = localOrder;
} | 3.68 |
hadoop_LocalSASKeyGeneratorImpl_getStorageAccountInstance | /**
* Helper method that creates CloudStorageAccount Instance using the
* storage account key.
* @param accountName Name of the storage account
* @param accountKey Storage Account key
* @return CloudStorageAccount instance for the storage account.
* @throws SASKeyGenerationException
*/
private CloudStorageAccount getStorageAccountInstance(String accountName,
String accountKey) throws SASKeyGenerationException {
if (!storageAccountMap.containsKey(accountName)) {
if (accountKey == null || accountKey.isEmpty()) {
throw new SASKeyGenerationException(
"No key for Storage account " + accountName);
}
CloudStorageAccount account = null;
try {
account =
new CloudStorageAccount(new StorageCredentialsAccountAndKey(
accountName, accountKey));
} catch (URISyntaxException uriSyntaxEx) {
throw new SASKeyGenerationException("Encountered URISyntaxException "
+ "for account " + accountName, uriSyntaxEx);
}
storageAccountMap.put(accountName, account);
}
return storageAccountMap.get(accountName);
} | 3.68 |
hadoop_Find_buildDescription | /** Build the description used by the help command. */
private static String buildDescription(ExpressionFactory factory) {
ArrayList<Expression> operators = new ArrayList<Expression>();
ArrayList<Expression> primaries = new ArrayList<Expression>();
for (Class<? extends Expression> exprClass : EXPRESSIONS) {
Expression expr = factory.createExpression(exprClass, null);
if (expr.isOperator()) {
operators.add(expr);
} else {
primaries.add(expr);
}
}
Collections.sort(operators, new Comparator<Expression>() {
@Override
public int compare(Expression arg0, Expression arg1) {
return arg0.getClass().getName().compareTo(arg1.getClass().getName());
}
});
Collections.sort(primaries, new Comparator<Expression>() {
@Override
public int compare(Expression arg0, Expression arg1) {
return arg0.getClass().getName().compareTo(arg1.getClass().getName());
}
});
StringBuilder sb = new StringBuilder();
for (String line : HELP) {
sb.append(line).append("\n");
}
sb.append("\n")
.append("The following primary expressions are recognised:\n");
for (Expression expr : primaries) {
for (String line : expr.getUsage()) {
sb.append(" ").append(line).append("\n");
}
for (String line : expr.getHelp()) {
sb.append(" ").append(line).append("\n");
}
sb.append("\n");
}
sb.append("The following operators are recognised:\n");
for (Expression expr : operators) {
for (String line : expr.getUsage()) {
sb.append(" ").append(line).append("\n");
}
for (String line : expr.getHelp()) {
sb.append(" ").append(line).append("\n");
}
sb.append("\n");
}
return sb.toString();
} | 3.68 |
hadoop_AppToFlowColumn_getColumnQualifier | /**
* @return the column name value
*/
private String getColumnQualifier() {
return columnQualifier;
} | 3.68 |
hbase_ConnectionUtils_setServerSideHConnectionRetriesConfig | /**
* Changes the configuration to set the number of retries needed when using Connection internally,
* e.g. for updating catalog tables, etc. Call this method before we create any Connections.
* @param c The Configuration instance to set the retries into.
* @param log Used to log what we set in here.
*/
public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
final Logger log) {
// TODO: Fix this. Not all connections from server side should have 10 times the retries.
int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
// Go big. Multiply by 10. If we can't get to meta after this many retries
// then something seriously wrong.
int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
int retries = hcRetries * serversideMultiplier;
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
log.info(sn + " server-side Connection retries=" + retries);
} | 3.68 |
hmily_CuratorZookeeperExceptionHandler_handleException | /**
* Handle exception.
*
* <p>Ignore interrupt and connection invalid exception.</p>
*
* @param cause to be handled exception
*/
public static void handleException(final Exception cause) {
if (null == cause) {
return;
}
if (isIgnoredException(cause) || null != cause.getCause() && isIgnoredException(cause.getCause())) {
log.debug("Ignored exception for: {}", cause.getMessage());
} else if (cause instanceof InterruptedException) {
Thread.currentThread().interrupt();
} else {
throw new ConfigException(cause);
}
} | 3.68 |
graphhopper_VirtualEdgeIteratorState_setUnfavored | /**
* This method sets edge to unfavored status for routing from the start or to the stop location.
*/
public void setUnfavored(boolean unfavored) {
this.unfavored = unfavored;
} | 3.68 |
rocketmq-connect_ProcessingContext_report | /**
* report errors
*/
public void report() {
if (reporters.size() == 1) {
reporters.iterator().next().report(this);
}
reporters.stream().forEach(r -> r.report(this));
} | 3.68 |
hbase_AsyncAdmin_cloneSnapshot | /**
* Create a new table by cloning the snapshot content.
* @param snapshotName name of the snapshot to be cloned
* @param tableName name of the table where the snapshot will be restored
* @param restoreAcl <code>true</code> to restore acl of snapshot
*/
default CompletableFuture<Void> cloneSnapshot(String snapshotName, TableName tableName,
boolean restoreAcl) {
return cloneSnapshot(snapshotName, tableName, restoreAcl, null);
} | 3.68 |
hmily_HmilyXaTransactionManager_initialized | /**
* Initialized hmily xa transaction manager.
*
* @return the hmily xa transaction manager
*/
public static HmilyXaTransactionManager initialized() {
return new HmilyXaTransactionManager();
} | 3.68 |
framework_AbstractOrderedLayoutConnector_init | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentConnector#init()
*/
@Override
public void init() {
super.init();
getWidget().setLayoutManager(getLayoutManager());
} | 3.68 |
hibernate-validator_PathImpl_isValidJavaIdentifier | /**
* Validate that the given identifier is a valid Java identifier according to the Java Language Specification,
* <a href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-3.8">chapter 3.8</a>
*
* @param identifier string identifier to validate
*
* @return true if the given identifier is a valid Java Identifier
*
* @throws IllegalArgumentException if the given identifier is {@code null}
*/
private static boolean isValidJavaIdentifier(String identifier) {
Contracts.assertNotNull( identifier, "identifier param cannot be null" );
if ( identifier.length() == 0 || !Character.isJavaIdentifierStart( (int) identifier.charAt( 0 ) ) ) {
return false;
}
for ( int i = 1; i < identifier.length(); i++ ) {
if ( !Character.isJavaIdentifierPart( (int) identifier.charAt( i ) ) ) {
return false;
}
}
return true;
} | 3.68 |
zxing_PDF417HighLevelEncoder_encodeHighLevel | /**
* Performs high-level encoding of a PDF417 message using the algorithm described in annex P
* of ISO/IEC 15438:2001(E). If byte compaction has been selected, then only byte compaction
* is used.
*
* @param msg the message
* @param compaction compaction mode to use
* @param encoding character encoding used to encode in default or byte compaction
* or {@code null} for default / not applicable
* @param autoECI encode input minimally using multiple ECIs if needed
* If autoECI encoding is specified and additionally {@code encoding} is specified, then the encoder
* will use the specified {@link Charset} for any character that can be encoded by it, regardless
* if a different encoding would lead to a more compact encoding. When no {@code encoding} is specified
* then charsets will be chosen so that the byte representation is minimal.
* @return the encoded message (the char values range from 0 to 928)
*/
static String encodeHighLevel(String msg, Compaction compaction, Charset encoding, boolean autoECI)
throws WriterException {
if (msg.isEmpty()) {
throw new WriterException("Empty message not allowed");
}
if (encoding == null && !autoECI) {
for (int i = 0; i < msg.length(); i++) {
if (msg.charAt(i) > 255) {
throw new WriterException("Non-encodable character detected: " + msg.charAt(i) + " (Unicode: " +
(int) msg.charAt(i) +
"). Consider specifying EncodeHintType.PDF417_AUTO_ECI and/or EncodeTypeHint.CHARACTER_SET.");
}
}
}
//the codewords 0..928 are encoded as Unicode characters
StringBuilder sb = new StringBuilder(msg.length());
ECIInput input;
if (autoECI) {
input = new MinimalECIInput(msg, encoding, -1);
} else {
input = new NoECIInput(msg);
if (encoding == null) {
encoding = DEFAULT_ENCODING;
} else if (!DEFAULT_ENCODING.equals(encoding)) {
CharacterSetECI eci = CharacterSetECI.getCharacterSetECI(encoding);
if (eci != null) {
encodingECI(eci.getValue(), sb);
}
}
}
int len = input.length();
int p = 0;
int textSubMode = SUBMODE_ALPHA;
// User selected encoding mode
switch (compaction) {
case TEXT:
encodeText(input, p, len, sb, textSubMode);
break;
case BYTE:
if (autoECI) {
encodeMultiECIBinary(input, 0, input.length(), TEXT_COMPACTION, sb);
} else {
byte[] msgBytes = input.toString().getBytes(encoding);
encodeBinary(msgBytes, p, msgBytes.length, BYTE_COMPACTION, sb);
}
break;
case NUMERIC:
sb.append((char) LATCH_TO_NUMERIC);
encodeNumeric(input, p, len, sb);
break;
default:
int encodingMode = TEXT_COMPACTION; //Default mode, see 4.4.2.1
while (p < len) {
while (p < len && input.isECI(p)) {
encodingECI(input.getECIValue(p), sb);
p++;
}
if (p >= len) {
break;
}
int n = determineConsecutiveDigitCount(input, p);
if (n >= 13) {
sb.append((char) LATCH_TO_NUMERIC);
encodingMode = NUMERIC_COMPACTION;
textSubMode = SUBMODE_ALPHA; //Reset after latch
encodeNumeric(input, p, n, sb);
p += n;
} else {
int t = determineConsecutiveTextCount(input, p);
if (t >= 5 || n == len) {
if (encodingMode != TEXT_COMPACTION) {
sb.append((char) LATCH_TO_TEXT);
encodingMode = TEXT_COMPACTION;
textSubMode = SUBMODE_ALPHA; //start with submode alpha after latch
}
textSubMode = encodeText(input, p, t, sb, textSubMode);
p += t;
} else {
int b = determineConsecutiveBinaryCount(input, p, autoECI ? null : encoding);
if (b == 0) {
b = 1;
}
byte[] bytes = autoECI ? null : input.subSequence(p, p + b).toString().getBytes(encoding);
if (((bytes == null && b == 1) || (bytes != null && bytes.length == 1))
&& encodingMode == TEXT_COMPACTION) {
//Switch for one byte (instead of latch)
if (autoECI) {
encodeMultiECIBinary(input, p, 1, TEXT_COMPACTION, sb);
} else {
encodeBinary(bytes, 0, 1, TEXT_COMPACTION, sb);
}
} else {
//Mode latch performed by encodeBinary()
if (autoECI) {
encodeMultiECIBinary(input, p, p + b, encodingMode, sb);
} else {
encodeBinary(bytes, 0, bytes.length, encodingMode, sb);
}
encodingMode = BYTE_COMPACTION;
textSubMode = SUBMODE_ALPHA; //Reset after latch
}
p += b;
}
}
}
break;
}
return sb.toString();
} | 3.68 |
zilla_HpackContext_staticIndex18 | // Index in static table for the given name of length 18
private static int staticIndex18(DirectBuffer name)
{
return (name.getByte(17) == 'e' && STATIC_TABLE[48].name.equals(name)) ? 48 : -1; // proxy-authenticate
} | 3.68 |
AreaShop_RentRegion_getPlayerName | /**
* Get the name of the player renting this region.
* @return Name of the player renting this region, if unavailable by UUID it will return the old cached name, if that is unavailable it will return <UNKNOWN>
*/
public String getPlayerName() {
String result = Utils.toName(getRenter());
if(result == null || result.isEmpty()) {
result = config.getString("rent.renterName");
if(result == null || result.isEmpty()) {
result = "<UNKNOWN>";
}
}
return result;
} | 3.68 |
framework_FlyweightRow_addCells | /**
* Adds cell representations (i.e. new columns) for the indicated cell range
* and updates the subsequent indexing.
*
* @param index
* start index of the range
* @param numberOfColumns
* length of the range
*/
public void addCells(final int index, final int numberOfColumns) {
for (int i = 0; i < numberOfColumns; i++) {
final int col = index + i;
cells.add(col, new FlyweightCell(this, col));
}
updateRestOfCells(index + numberOfColumns);
} | 3.68 |
querydsl_ComparableExpression_coalesce | /**
* Create a {@code coalesce(this, args...)} expression
*
* @param args additional arguments
* @return coalesce
*/
@Override
@SuppressWarnings({"unchecked"})
public ComparableExpression<T> coalesce(T... args) {
Coalesce<T> coalesce = new Coalesce<T>(getType(), mixin);
for (T arg : args) {
coalesce.add(arg);
}
return coalesce.getValue();
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_setRPCPacket | /**
* Gives this instance a reference to the protobuf received by the RPC, so that it can later
* compute its size if asked for it.
* @param param The protobuf received by the RPC for this call
*/
@Override
public void setRPCPacket(Message param) {
this.packet = param;
} | 3.68 |
framework_Tree_collapseItemsRecursively | /**
* Collapses the items recursively.
*
* Collapse all the children recursively starting from an item. Operation
* succeeds only if all expandable items are collapsed.
*
* @param startItemId
* ID of the initial item
* @return True if the collapse operation succeeded
*/
public boolean collapseItemsRecursively(Object startItemId) {
boolean result = true;
// Initial stack
final Stack<Object> todo = new Stack<Object>();
todo.add(startItemId);
// Collapse recursively
while (!todo.isEmpty()) {
final Object id = todo.pop();
if (areChildrenAllowed(id) && !collapseItem(id)) {
result = false;
}
if (hasChildren(id)) {
todo.addAll(getChildren(id));
}
}
return result;
} | 3.68 |
framework_DateTimeField_setAssistiveText | /**
* Set a description that explains the usage of the Widget for users of
* assistive devices.
*
* @param description
* String with the description
*/
public void setAssistiveText(String description) {
getState().descriptionForAssistiveDevices = description;
} | 3.68 |
hadoop_CRC64_compute | /**
* @param input byte arrays.
* @return long value of the CRC-64 checksum of the data.
* */
public long compute(byte[] input) {
init();
for (int i = 0; i < input.length; i++) {
value = TABLE[(input[i] ^ (int) value) & 0xFF] ^ (value >>> 8);
}
return ~value;
} | 3.68 |
hudi_CollectionUtils_diff | /**
* Returns difference b/w {@code one} {@link List} of elements and {@code another}
*
* NOTE: This is less optimal counterpart to {@link #diff(Collection, Collection)}, accepting {@link List}
* as a holding collection to support duplicate elements use-cases
*/
public static <E> List<E> diff(Collection<E> one, Collection<E> another) {
List<E> diff = new ArrayList<>(one);
diff.removeAll(another);
return diff;
} | 3.68 |
hbase_Client_negotiate | /**
* Initiate client side Kerberos negotiation with the server.
* @param method method to inject the authentication token into.
* @param uri the String to parse as a URL.
* @throws IOException if unknown protocol is found.
*/
private void negotiate(HttpUriRequest method, String uri) throws IOException {
try {
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
KerberosAuthenticator authenticator = new KerberosAuthenticator();
authenticator.authenticate(new URL(uri), token);
// Inject the obtained negotiated token in the method cookie
injectToken(method, token);
} catch (AuthenticationException e) {
LOG.error("Failed to negotiate with the server.", e);
throw new IOException(e);
}
} | 3.68 |
hbase_MutableRegionInfo_containsRange | /**
* Returns true if the given inclusive range of rows is fully contained by this region. For
* example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return
* true, but if this is passed ["b","z"] it will return false.
* @throws IllegalArgumentException if the range passed is invalid (ie. end < start)
*/
@Override
public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName);
if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) {
throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey)
+ " > " + Bytes.toStringBinary(rangeEndKey));
}
boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0;
boolean lastKeyInRange = cellComparator.compareRows(rangeEndKey, endKey) < 0
|| Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY);
return firstKeyInRange && lastKeyInRange;
} | 3.68 |
hbase_MobUtils_cleanExpiredMobFiles | /**
* Cleans the expired mob files. Cleans the files whose creation date is older than (current -
* columnFamily.ttl), and the minVersions of that column family is 0.
* @param fs The current file system.
* @param conf The current configuration.
* @param tableName The current table name.
* @param columnDescriptor The descriptor of the current column family.
* @param cacheConfig The cacheConfig that disables the block cache.
* @param current The current time.
*/
public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
throws IOException {
long timeToLive = columnDescriptor.getTimeToLive();
if (Integer.MAX_VALUE == timeToLive) {
// no need to clean, because the TTL is not set.
return;
}
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(current - timeToLive * 1000);
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
Date expireDate = calendar.getTime();
LOG.info("MOB HFiles older than " + expireDate.toGMTString() + " will be deleted!");
FileStatus[] stats = null;
Path mobTableDir = CommonFSUtils.getTableDir(getMobHome(conf), tableName);
Path path = getMobFamilyPath(conf, tableName, columnDescriptor.getNameAsString());
try {
stats = fs.listStatus(path);
} catch (FileNotFoundException e) {
LOG.warn("Failed to find the mob file " + path, e);
}
if (null == stats) {
// no file found
return;
}
List<HStoreFile> filesToClean = new ArrayList<>();
int deletedFileCount = 0;
for (FileStatus file : stats) {
String fileName = file.getPath().getName();
try {
if (HFileLink.isHFileLink(file.getPath())) {
HFileLink hfileLink = HFileLink.buildFromHFileLinkPattern(conf, file.getPath());
fileName = hfileLink.getOriginPath().getName();
}
Date fileDate = parseDate(MobFileName.getDateFromName(fileName));
if (LOG.isDebugEnabled()) {
LOG.debug("Checking file {}", fileName);
}
if (fileDate.getTime() < expireDate.getTime()) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} is an expired file", fileName);
}
filesToClean
.add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true));
if (
filesToClean.size() >= conf.getInt(MOB_CLEANER_BATCH_SIZE_UPPER_BOUND,
DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND)
) {
if (
removeMobFiles(conf, fs, tableName, mobTableDir, columnDescriptor.getName(),
filesToClean)
) {
deletedFileCount += filesToClean.size();
}
filesToClean.clear();
}
}
} catch (Exception e) {
LOG.error("Cannot parse the fileName " + fileName, e);
}
}
if (
!filesToClean.isEmpty() && removeMobFiles(conf, fs, tableName, mobTableDir,
columnDescriptor.getName(), filesToClean)
) {
deletedFileCount += filesToClean.size();
}
LOG.info("Table {} {} expired mob files in total are deleted", tableName, deletedFileCount);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.