name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Find_parseExpression | /**
* Parse a list of arguments to to extract the {@link Expression} elements.
* The input Deque will be modified to remove the used elements.
*
* @param args arguments to be parsed
* @return list of {@link Expression} elements applicable to this command
* @throws IOException if list can not be parsed
*/
private Expression parseExpression(Deque<String> args) throws IOException {
Deque<Expression> primaries = new LinkedList<Expression>();
Deque<Expression> operators = new LinkedList<Expression>();
Expression prevExpr = getExpression(And.class);
while (!args.isEmpty()) {
String arg = args.pop();
if ("(".equals(arg)) {
Expression expr = parseExpression(args);
primaries.add(expr);
prevExpr = new BaseExpression() {
@Override
public Result apply(PathData item, int depth) throws IOException {
return Result.PASS;
}
}; // stub the previous expression to be a non-op
} else if (")".equals(arg)) {
break;
} else if (isExpression(arg)) {
Expression expr = getExpression(arg);
expr.addArguments(args);
if (expr.isOperator()) {
while (!operators.isEmpty()) {
if (operators.peek().getPrecedence() >= expr.getPrecedence()) {
Expression op = operators.pop();
op.addChildren(primaries);
primaries.push(op);
} else {
break;
}
}
operators.push(expr);
} else {
if (!prevExpr.isOperator()) {
Expression and = getExpression(And.class);
while (!operators.isEmpty()) {
if (operators.peek().getPrecedence() >= and.getPrecedence()) {
Expression op = operators.pop();
op.addChildren(primaries);
primaries.push(op);
} else {
break;
}
}
operators.push(and);
}
primaries.push(expr);
}
prevExpr = expr;
} else {
throw new IOException("Unexpected argument: " + arg);
}
}
while (!operators.isEmpty()) {
Expression operator = operators.pop();
operator.addChildren(primaries);
primaries.push(operator);
}
return primaries.isEmpty() ? getExpression(Print.class) : primaries.pop();
} | 3.68 |
hudi_HiveQueryDDLExecutor_getTableSchema | //TODO Duplicating it here from HMSDLExecutor as HiveQueryQL has no way of doing it on its own currently. Need to refactor it
@Override
public Map<String, String> getTableSchema(String tableName) {
try {
// HiveMetastoreClient returns partition keys separate from Columns, hence get both and merge to
// get the Schema of the table.
final long start = System.currentTimeMillis();
Table table = metaStoreClient.getTable(databaseName, tableName);
Map<String, String> partitionKeysMap =
table.getPartitionKeys().stream().collect(Collectors.toMap(FieldSchema::getName, f -> f.getType().toUpperCase()));
Map<String, String> columnsMap =
table.getSd().getCols().stream().collect(Collectors.toMap(FieldSchema::getName, f -> f.getType().toUpperCase()));
Map<String, String> schema = new HashMap<>();
schema.putAll(columnsMap);
schema.putAll(partitionKeysMap);
final long end = System.currentTimeMillis();
LOG.info(String.format("Time taken to getTableSchema: %s ms", (end - start)));
return schema;
} catch (Exception e) {
throw new HoodieHiveSyncException("Failed to get table schema for : " + tableName, e);
}
} | 3.68 |
hudi_ColumnStatsIndices_getColStatsTargetPos | // the column schema:
// |- file_name: string
// |- min_val: row
// |- max_val: row
// |- null_cnt: long
// |- val_cnt: long
// |- column_name: string
private static int[] getColStatsTargetPos() {
RowType colStatsRowType = (RowType) COL_STATS_DATA_TYPE.getLogicalType();
return Stream.of(
HoodieMetadataPayload.COLUMN_STATS_FIELD_FILE_NAME,
HoodieMetadataPayload.COLUMN_STATS_FIELD_MIN_VALUE,
HoodieMetadataPayload.COLUMN_STATS_FIELD_MAX_VALUE,
HoodieMetadataPayload.COLUMN_STATS_FIELD_NULL_COUNT,
HoodieMetadataPayload.COLUMN_STATS_FIELD_VALUE_COUNT,
HoodieMetadataPayload.COLUMN_STATS_FIELD_COLUMN_NAME)
.mapToInt(colStatsRowType::getFieldIndex)
.toArray();
} | 3.68 |
flink_ConfigOptions_durationType | /** Defines that the value of the option should be of {@link Duration} type. */
public TypedConfigOptionBuilder<Duration> durationType() {
return new TypedConfigOptionBuilder<>(key, Duration.class);
} | 3.68 |
hudi_HoodieFileGroup_getLatestFileSliceBeforeOrOn | /**
* Obtain the latest file slice, upto a instantTime i.e <= maxInstantTime.
*/
public Option<FileSlice> getLatestFileSliceBeforeOrOn(String maxInstantTime) {
return Option.fromJavaOptional(getAllFileSlices().filter(slice -> compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN_OR_EQUALS, maxInstantTime)).findFirst());
} | 3.68 |
framework_GridRowDragger_getTargetDataProviderUpdater | /**
* Returns the target grid data provider updater.
*
* @return target grid drop handler
*/
public TargetDataProviderUpdater<T> getTargetDataProviderUpdater() {
return targetDataProviderUpdater;
} | 3.68 |
AreaShop_BuyRegion_getFormattedResellPrice | /**
* Get the formatted string of the resellprice (includes prefix and suffix).
* @return The formatted string of the resellprice
*/
public String getFormattedResellPrice() {
return Utils.formatCurrency(getResellPrice());
} | 3.68 |
hbase_SpaceViolationPolicyEnforcementFactory_createWithoutViolation | /**
* Creates the "default" {@link SpaceViolationPolicyEnforcement} for a table that isn't in
* violation. This is used to have uniform policy checking for tables in and not quotas. This
* policy will still verify that new bulk loads do not exceed the configured quota limit.
* @param rss RegionServerServices instance the policy enforcement should use.
* @param tableName The target HBase table.
* @param snapshot The current quota snapshot for the {@code tableName}, can be null.
*/
public SpaceViolationPolicyEnforcement createWithoutViolation(RegionServerServices rss,
TableName tableName, SpaceQuotaSnapshot snapshot) {
if (snapshot == null) {
// If we have no snapshot, this is equivalent to no quota for this table.
// We should do use the (singleton instance) of this policy to do nothing.
return MissingSnapshotViolationPolicyEnforcement.getInstance();
}
// We have a snapshot which means that there is a quota set on this table, but it's not in
// violation of that quota. We need to construct a policy for this table.
SpaceQuotaStatus status = snapshot.getQuotaStatus();
if (status.isInViolation()) {
throw new IllegalArgumentException(
tableName + " is in violation. Logic error. Snapshot=" + snapshot);
}
// We have a unique size snapshot to use. Create an instance for this tablename + snapshot.
DefaultViolationPolicyEnforcement enforcement = new DefaultViolationPolicyEnforcement();
enforcement.initialize(rss, tableName, snapshot);
return enforcement;
} | 3.68 |
morf_H2Dialect_getSqlForAddDays | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddDays(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForAddDays(Function function) {
return String.format(
"DATEADD('DAY', %s, %s)",
getSqlFrom(function.getArguments().get(1)),
getSqlFrom(function.getArguments().get(0))
);
} | 3.68 |
framework_AbstractOrderedLayout_getDefaultComponentAlignment | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.AlignmentHandler#getDefaultComponentAlignment()
*/
@Override
public Alignment getDefaultComponentAlignment() {
return defaultComponentAlignment;
} | 3.68 |
hbase_OrderedBytes_skipVaruint64 | /**
* Skip {@code src} over the encoded varuint64.
* @param src source buffer
* @param cmp if true, parse the compliment of the value.
* @return the number of bytes skipped.
*/
static int skipVaruint64(PositionedByteRange src, boolean cmp) {
final int len = lengthVaruint64(src, cmp);
src.setPosition(src.getPosition() + len);
return len;
} | 3.68 |
flink_DelimitedInputFormat_reachedEnd | /**
* Checks whether the current split is at its end.
*
* @return True, if the split is at its end, false otherwise.
*/
@Override
public boolean reachedEnd() {
return this.end;
} | 3.68 |
rocketmq-connect_Worker_checkAndReconfigureConnectors | /**
* check and reconfigure connectors
*
* @param assigns
*/
private void checkAndReconfigureConnectors(Map<String, ConnectKeyValue> assigns) {
if (assigns == null || assigns.isEmpty()) {
return;
}
for (String connectName : assigns.keySet()) {
if (!connectors.containsKey(connectName)) {
// new
continue;
}
WorkerConnector connector = connectors.get(connectName);
ConnectKeyValue oldConfig = connector.getKeyValue();
ConnectKeyValue newConfig = assigns.get(connectName);
if (!oldConfig.equals(newConfig)) {
connector.reconfigure(newConfig);
}
}
} | 3.68 |
zxing_ReedSolomonDecoder_decode | /**
* <p>Decodes given set of received codewords, which include both data and error-correction
* codewords. Really, this means it uses Reed-Solomon to detect and correct errors, in-place,
* in the input.</p>
*
* @param received data and error-correction codewords
* @param twoS number of error-correction codewords available
* @throws ReedSolomonException if decoding fails for any reason
*/
public void decode(int[] received, int twoS) throws ReedSolomonException {
decodeWithECCount(received, twoS);
} | 3.68 |
flink_CheckpointStatsTracker_reportFailedCheckpoint | /**
* Callback when a checkpoint fails.
*
* @param failed The failed checkpoint stats.
*/
void reportFailedCheckpoint(FailedCheckpointStats failed) {
statsReadWriteLock.lock();
try {
counts.incrementFailedCheckpoints();
history.replacePendingCheckpointById(failed);
dirty = true;
logCheckpointStatistics(failed);
} finally {
statsReadWriteLock.unlock();
}
} | 3.68 |
hbase_ServerManager_getMinToStart | /**
* Calculate min necessary to start. This is not an absolute. It is just a friction that will
* cause us hang around a bit longer waiting on RegionServers to check-in.
*/
private int getMinToStart() {
if (master.isInMaintenanceMode()) {
// If in maintenance mode, then in process region server hosting meta will be the only server
// available
return 1;
}
int minimumRequired = 1;
int minToStart = this.master.getConfiguration().getInt(WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
// Ensure we are never less than minimumRequired else stuff won't work.
return Math.max(minToStart, minimumRequired);
} | 3.68 |
hadoop_ShadedProtobufHelper_tokenFromProto | /**
* Create a hadoop token from a protobuf token.
* @param tokenProto token
* @return a new token
*/
public static Token<? extends TokenIdentifier> tokenFromProto(
TokenProto tokenProto) {
Token<? extends TokenIdentifier> token = new Token<>(
tokenProto.getIdentifier().toByteArray(),
tokenProto.getPassword().toByteArray(),
new Text(tokenProto.getKind()),
new Text(tokenProto.getService()));
return token;
} | 3.68 |
framework_TreeTable_getHierarchyColumnId | /**
* @return the identifier of column into which the hierarchy will be
* visualized or null if the column is not explicitly defined.
*/
public Object getHierarchyColumnId() {
return hierarchyColumnId;
} | 3.68 |
pulsar_Message_getReaderSchema | /**
* Get the schema associated to the message.
* Please note that this schema is usually equal to the Schema you passed
* during the construction of the Consumer or the Reader.
* But if you are consuming the topic using the GenericObject interface
* this method will return the schema associated with the message.
* @return The schema used to decode the payload of message.
* @see Schema#AUTO_CONSUME()
*/
default Optional<Schema<?>> getReaderSchema() {
return Optional.empty();
} | 3.68 |
hbase_StoreFlusher_createScanner | /**
* Creates the scanner for flushing snapshot. Also calls coprocessors.
* @return The scanner; null if coprocessor is canceling the flush.
*/
protected final InternalScanner createScanner(List<KeyValueScanner> snapshotScanners,
FlushLifeCycleTracker tracker) throws IOException {
ScanInfo scanInfo;
if (store.getCoprocessorHost() != null) {
scanInfo = store.getCoprocessorHost().preFlushScannerOpen(store, tracker);
} else {
scanInfo = store.getScanInfo();
}
final long smallestReadPoint = store.getSmallestReadPoint();
InternalScanner scanner = new StoreScanner(store, scanInfo, snapshotScanners,
ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, PrivateConstants.OLDEST_TIMESTAMP);
if (store.getCoprocessorHost() != null) {
try {
return store.getCoprocessorHost().preFlush(store, scanner, tracker);
} catch (IOException ioe) {
scanner.close();
throw ioe;
}
}
return scanner;
} | 3.68 |
hadoop_MapReduceJobPropertiesParser_accept | // Accepts a key if there is a corresponding key in the current mapreduce
// configuration
private boolean accept(String key) {
return getLatestKeyName(key) != null;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_setDestPath | /**
* Sets the destination path.
*
* @param destPath - Path
*/
public void setDestPath(String destPath) {
this.destPath = destPath;
} | 3.68 |
framework_Upload_interruptUpload | /**
* Interrupts the upload currently being received. The interruption will be
* done by the receiving thread so this method will return immediately and
* the actual interrupt will happen a bit later.
*/
public void interruptUpload() {
if (isUploading) {
interrupted = true;
}
} | 3.68 |
hadoop_AbfsRestOperation_createHttpOperation | /**
* Creates new object of {@link AbfsHttpOperation} with the url, method, and
* requestHeaders fields of the AbfsRestOperation object.
*/
@VisibleForTesting
AbfsHttpOperation createHttpOperation() throws IOException {
return new AbfsHttpOperation(url, method, requestHeaders);
} | 3.68 |
framework_CalendarComponentEvents_getNewStart | /**
* Returns the updated start date/time of the event.
*
* @return The new date for the event
*/
public Date getNewStart() {
return startTime;
} | 3.68 |
framework_FileDownloader_setOverrideContentType | /**
* Sets whether the content type of served resources should be overridden to
* <code>application/octet-stream</code> to reduce the risk of a browser
* plugin choosing to display the resource instead of downloading it. This
* is by default set to <code>true</code>.
* <p>
* Please note that this only affects Connector resources (e.g.
* {@link FileResource} and {@link ClassResource}) but not other resource
* types (e.g. {@link ExternalResource} or {@link ThemeResource}).
* </p>
*
* @param overrideContentType
* <code>true</code> to override the content type if possible;
* <code>false</code> to use the original content type.
*/
public void setOverrideContentType(boolean overrideContentType) {
this.overrideContentType = overrideContentType;
} | 3.68 |
flink_InternalTimerServiceImpl_snapshotTimersForKeyGroup | /**
* Snapshots the timers (both processing and event time ones) for a given {@code keyGroupIdx}.
*
* @param keyGroupIdx the id of the key-group to be put in the snapshot.
* @return a snapshot containing the timers for the given key-group, and the serializers for
* them
*/
public InternalTimersSnapshot<K, N> snapshotTimersForKeyGroup(int keyGroupIdx) {
return new InternalTimersSnapshot<>(
keySerializer,
namespaceSerializer,
eventTimeTimersQueue.getSubsetForKeyGroup(keyGroupIdx),
processingTimeTimersQueue.getSubsetForKeyGroup(keyGroupIdx));
} | 3.68 |
morf_SqlDialect_matchConditionSqlForMergeFields | /**
* Creates matching conditions SQL for a list of fields used in the ON section
* of a Merge Statement. For example:
* "table1.fieldA = table2.fieldA AND table1.fieldB = table2.fieldB".
*
* @param statement the merge statement.
* @param selectAlias the alias of the select statement of a merge statement.
* @param targetTableName the name of the target table into which to merge.
* @return The corresponding SQL
*/
protected String matchConditionSqlForMergeFields(MergeStatement statement, String selectAlias, String targetTableName) {
Iterable<String> expressions = Iterables.transform(statement.getTableUniqueKey(),
field -> String.format("%s.%s = %s.%s", targetTableName, field.getImpliedName(), selectAlias, field.getImpliedName()));
return Joiner.on(" AND ").join(expressions);
} | 3.68 |
hudi_CompactionUtil_inferChangelogMode | /**
* Infers the changelog mode based on the data file schema(including metadata fields).
*
* <p>We can improve the code if the changelog mode is set up as table config.
*
* @param conf The configuration
* @param metaClient The meta client
*/
public static void inferChangelogMode(Configuration conf, HoodieTableMetaClient metaClient) throws Exception {
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchemaFromDataFile();
if (tableAvroSchema.getField(HoodieRecord.OPERATION_METADATA_FIELD) != null) {
conf.setBoolean(FlinkOptions.CHANGELOG_ENABLED, true);
}
} | 3.68 |
hadoop_Base64_decode | /**
* Decodes a given Base64 string into its corresponding byte array.
*
* @param data
* the Base64 string, as a <code>String</code> object, to decode
*
* @return the corresponding decoded byte array
* @throws IllegalArgumentException
* If the string is not a valid base64 encoded string
*/
public static byte[] decode(final String data) {
if (data == null) {
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int byteArrayLength = 3 * data.length() / 4;
if (data.endsWith("==")) {
byteArrayLength -= 2;
}
else if (data.endsWith("=")) {
byteArrayLength -= 1;
}
final byte[] retArray = new byte[byteArrayLength];
int byteDex = 0;
int charDex = 0;
for (; charDex < data.length(); charDex += 4) {
// get 4 chars, convert to 3 bytes
final int char1 = DECODE_64[(byte) data.charAt(charDex)];
final int char2 = DECODE_64[(byte) data.charAt(charDex + 1)];
final int char3 = DECODE_64[(byte) data.charAt(charDex + 2)];
final int char4 = DECODE_64[(byte) data.charAt(charDex + 3)];
if (char1 < 0 || char2 < 0 || char3 == -1 || char4 == -1) {
// invalid character(-1), or bad padding (-2)
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int tVal = char1 << 18;
tVal += char2 << 12;
tVal += (char3 & 0xff) << 6;
tVal += char4 & 0xff;
if (char3 == -2) {
// two "==" pad chars, check bits 12-24
tVal &= 0x00FFF000;
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
}
else if (char4 == -2) {
// one pad char "=" , check bits 6-24.
tVal &= 0x00FFFFC0;
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
retArray[byteDex++] = (byte) (tVal >> 8 & 0xFF);
}
else {
// No pads take all 3 bytes, bits 0-24
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
retArray[byteDex++] = (byte) (tVal >> 8 & 0xFF);
retArray[byteDex++] = (byte) (tVal & 0xFF);
}
}
return retArray;
} | 3.68 |
querydsl_AbstractHibernateQuery_setFlushMode | /**
* Override the current session flush mode, just for this query.
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q setFlushMode(FlushMode flushMode) {
this.flushMode = flushMode;
return (Q) this;
} | 3.68 |
hbase_SortedCompactionPolicy_removeExcessFiles | /**
* @param candidates pre-filtrate
*/
protected void removeExcessFiles(ArrayList<HStoreFile> candidates, boolean isUserCompaction,
boolean isMajorCompaction) {
int excess = candidates.size() - comConf.getMaxFilesToCompact();
if (excess > 0) {
if (isMajorCompaction && isUserCompaction) {
LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact()
+ " files because of a user-requested major compaction");
} else {
LOG.debug(
"Too many admissible files. Excluding " + excess + " files from compaction candidates");
candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear();
}
}
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsGetObjectKeyDepth | /**
* Get the depth of an absolute path, that is the number of '/' in the path.
*
* @param key object key
* @return depth
*/
static int fsGetObjectKeyDepth(final String key) {
int depth = 0;
for (int idx = key.indexOf('/');
idx >= 0; idx = key.indexOf('/', idx + 1)) {
depth++;
}
return key.endsWith("/") ? depth - 1 : depth;
} | 3.68 |
morf_RemoveTable_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
if(schema.tableExists(tableToBeRemoved.getName().toUpperCase())){
throw new IllegalArgumentException("Cannot perform reversal for [" + tableToBeRemoved.getName() + "] table removal as it already exists.");
}
return new AugmentedSchema(schema, tableToBeRemoved);
} | 3.68 |
hbase_ColumnRangeFilter_getMaxColumnInclusive | /** Returns true if max column is inclusive, false otherwise */
public boolean getMaxColumnInclusive() {
return this.maxColumnInclusive;
} | 3.68 |
hbase_MetricRegistryInfo_isExistingSource | /**
* Returns whether or not this MetricRegistry is for an existing BaseSource
* @return true if this MetricRegistry is for an existing BaseSource.
*/
public boolean isExistingSource() {
return existingSource;
} | 3.68 |
hbase_RegionCoprocessorHost_postPut | /**
* @param put The Put object
* @param edit The WALEdit object.
* @exception IOException Exception
*/
public void postPut(final Put put, final WALEdit edit) throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postPut(this, put, edit);
}
});
} | 3.68 |
morf_HumanReadableStatementProducer_removeIndex | /** @see org.alfasoftware.morf.upgrade.SchemaEditor#removeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index) **/
@Override
public void removeIndex(String tableName, Index index) {
consumer.schemaChange(HumanReadableStatementHelper.generateRemoveIndexString(tableName, index));
} | 3.68 |
framework_VaadinPortletSession_setSharedRenderParameter | /**
* Sets a shared portlet parameter.
*
* Internally, an action may be created and opened, as shared parameters
* cannot be set directly from all types of requests.
*
* Setting shared render parameters from background threads is not
* supported.
*
* The parameters and values need to be kept in the context until sent. Any
* memory leaks if the action fails are limited to the session.
*
* Shared parameters set or read by a portlet need to be declared in
* portlet.xml .
*
* @param uI
* a window in which a temporary action URL can be opened if
* necessary
* @param name
* parameter identifier
* @param value
* parameter value
*/
public void setSharedRenderParameter(UI uI, String name, String value)
throws IllegalStateException {
PortletResponse response = getCurrentResponse();
if (response instanceof MimeResponse) {
String actionKey = "" + System.currentTimeMillis();
while (sharedParameterActionNameMap.containsKey(actionKey)) {
actionKey += ".";
}
PortletURL actionUrl = generateActionURL(actionKey);
if (actionUrl != null) {
sharedParameterActionNameMap.put(actionKey, name);
sharedParameterActionValueMap.put(actionKey, value);
uI.getPage().setLocation(actionUrl.toString());
} else {
// this should never happen as we already know the response is a
// MimeResponse
throw new IllegalStateException(
"Shared parameters can only be set from a portlet request");
}
} else if (response instanceof StateAwareResponse) {
((StateAwareResponse) response).setRenderParameter(name, value);
} else {
throw new IllegalStateException(
"Shared parameters can only be set from a portlet request");
}
} | 3.68 |
flink_RouteResult_queryParam | /**
* Extracts the first matching param in {@code queryParams}.
*
* @return {@code null} if there's no match
*/
public String queryParam(String name) {
List<String> values = queryParams.get(name);
return (values == null) ? null : values.get(0);
} | 3.68 |
flink_ClusterEntrypointUtils_configureUncaughtExceptionHandler | /**
* Sets the uncaught exception handler for current thread based on configuration.
*
* @param config the configuration to read.
*/
public static void configureUncaughtExceptionHandler(Configuration config) {
Thread.setDefaultUncaughtExceptionHandler(
new ClusterUncaughtExceptionHandler(
config.get(ClusterOptions.UNCAUGHT_EXCEPTION_HANDLING)));
} | 3.68 |
hbase_TinyLfuBlockCache_recordEviction | /**
* Records an eviction. The number of eviction operations and evicted blocks are identical, as an
* eviction is triggered immediately when the capacity has been exceeded. An eviction is performed
* asynchronously. See the library's documentation for details on write buffers, batching, and
* maintenance behavior.
*/
private void recordEviction() {
// FIXME: Currently does not capture the insertion time
stats.evicted(Long.MAX_VALUE, true);
stats.evict();
} | 3.68 |
flink_FlinkAggregateJoinTransposeRule_keyColumns | /**
* Computes the closure of a set of columns according to a given list of constraints. Each 'x =
* y' constraint causes bit y to be set if bit x is set, and vice versa.
*/
private static ImmutableBitSet keyColumns(
ImmutableBitSet aggregateColumns,
com.google.common.collect.ImmutableList<RexNode> predicates) {
SortedMap<Integer, BitSet> equivalence = new TreeMap<>();
for (RexNode predicate : predicates) {
populateEquivalences(equivalence, predicate);
}
ImmutableBitSet keyColumns = aggregateColumns;
for (Integer aggregateColumn : aggregateColumns) {
final BitSet bitSet = equivalence.get(aggregateColumn);
if (bitSet != null) {
keyColumns = keyColumns.union(bitSet);
}
}
return keyColumns;
} | 3.68 |
hadoop_AbstractS3ACommitter_getDestS3AFS | /**
* Get the destination as an S3A Filesystem; casting it.
* @return the dest S3A FS.
* @throws IOException if the FS cannot be instantiated.
*/
public S3AFileSystem getDestS3AFS() throws IOException {
return (S3AFileSystem) getDestFS();
} | 3.68 |
shardingsphere-elasticjob_YamlEngine_unmarshal | /**
* Unmarshal YAML.
*
* @param yamlContent YAML content
* @param classType class type
* @param <T> type of class
* @return object from YAML
*/
public static <T> T unmarshal(final String yamlContent, final Class<T> classType) {
LoaderOptions loaderOptions = new LoaderOptions();
loaderOptions.setTagInspector(tagInspector -> tagInspector.getClassName().startsWith("org.apache.shardingsphere.elasticjob"));
return new Yaml(loaderOptions).loadAs(yamlContent, classType);
} | 3.68 |
framework_Window_removeAllCloseShortcuts | /**
* Removes all close shortcuts. This includes the default ESCAPE shortcut.
* It is up to the user to add back any and all keyboard close shortcuts
* they may require. For more fine-grained control over shortcuts, use
* {@link #removeCloseShortcut(int, int...)}.
*
* @since 7.6
*/
public void removeAllCloseShortcuts() {
for (CloseShortcut shortcut : closeShortcuts) {
removeAction(shortcut);
}
closeShortcuts.clear();
} | 3.68 |
flink_KeyGroupStatePartitionStreamProvider_getKeyGroupId | /** Returns the key group that corresponds to the data in the provided stream. */
public int getKeyGroupId() {
return keyGroupId;
} | 3.68 |
framework_VTabsheet_isFirstVisibleTabClient | /**
* Returns whether the given tab index matches the first visible tab on
* the client.
*
* @param index
* the index to check
* @return {@code true} if the given index matches the first visible tab
* that hasn't been scrolled out of view, {@code false}
* otherwise
*/
private boolean isFirstVisibleTabClient(int index) {
return getNextVisibleTab(tabsheet.scrollerIndex - 1) == index;
} | 3.68 |
hbase_MobUtils_getTableNameTag | /**
* Gets the table name tag.
* @param cell The current cell.
* @return The table name tag.
*/
private static Optional<Tag> getTableNameTag(Cell cell) {
Optional<Tag> tag = Optional.empty();
if (cell.getTagsLength() > 0) {
tag = PrivateCellUtil.getTag(cell, TagType.MOB_TABLE_NAME_TAG_TYPE);
}
return tag;
} | 3.68 |
framework_VFormLayout_onClick | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt
* .event.dom.client.ClickEvent)
*/
@Override
public void onClick(ClickEvent event) {
Caption caption = (Caption) event.getSource();
if (caption.getOwner() != null) {
if (caption.getOwner() instanceof Focusable) {
((Focusable) caption.getOwner()).focus();
} else if (caption
.getOwner() instanceof com.google.gwt.user.client.ui.Focusable) {
((com.google.gwt.user.client.ui.Focusable) caption
.getOwner()).setFocus(true);
}
}
} | 3.68 |
framework_BasicBackwardHandler_backward | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.ui.CalendarComponentEvents.BackwardHandler#
* backward
* (com.vaadin.addon.calendar.ui.CalendarComponentEvents.BackwardEvent)
*/
@Override
public void backward(BackwardEvent event) {
Date start = event.getComponent().getStartDate();
Date end = event.getComponent().getEndDate();
// calculate amount to move back
int durationInDays = (int) (((end.getTime()) - start.getTime())
/ DateConstants.DAYINMILLIS);
durationInDays++;
// for week view durationInDays = -7, for day view durationInDays = -1
durationInDays = -durationInDays;
// set new start and end times
Calendar javaCalendar = event.getComponent().getInternalCalendar();
javaCalendar.setTime(start);
javaCalendar.add(Calendar.DATE, durationInDays);
Date newStart = javaCalendar.getTime();
javaCalendar.setTime(end);
javaCalendar.add(Calendar.DATE, durationInDays);
Date newEnd = javaCalendar.getTime();
if (start.equals(end)) { // day view
int firstDay = event.getComponent().getFirstVisibleDayOfWeek();
int lastDay = event.getComponent().getLastVisibleDayOfWeek();
int dayOfWeek = javaCalendar.get(Calendar.DAY_OF_WEEK);
// we suppose that 7 >= lastDay >= firstDay >= 1
while (!(firstDay <= dayOfWeek && dayOfWeek <= lastDay)) {
javaCalendar.add(Calendar.DATE, -1);
dayOfWeek = javaCalendar.get(Calendar.DAY_OF_WEEK);
}
newStart = javaCalendar.getTime();
newEnd = javaCalendar.getTime();
}
setDates(event, newStart, newEnd);
} | 3.68 |
pulsar_AuthenticationDataProvider_getTlsKeyStoreParams | /**
* Used for TLS authentication with keystore type.
*
* @return a KeyStoreParams for the client certificate chain, or null if the data are not available
*/
default KeyStoreParams getTlsKeyStoreParams() {
return null;
} | 3.68 |
flink_FutureUtils_handleException | /**
* Checks that the given {@link CompletableFuture} is not completed exceptionally with the
* specified class. If the future is completed exceptionally with the specific class, then try
* to recover using a given exception handler. If the exception does not match the specified
* class, just pass it through to later stages.
*
* @param completableFuture to assert for a given exception
* @param exceptionClass exception class to assert for
* @param exceptionHandler to call if the future is completed exceptionally with the specific
* exception
* @return completable future, that can recover from a specified exception
*/
public static <T, E extends Throwable> CompletableFuture<T> handleException(
CompletableFuture<? extends T> completableFuture,
Class<E> exceptionClass,
Function<? super E, ? extends T> exceptionHandler) {
final CompletableFuture<T> handledFuture = new CompletableFuture<>();
checkNotNull(completableFuture)
.whenComplete(
(result, throwable) -> {
if (throwable == null) {
handledFuture.complete(result);
} else if (exceptionClass.isAssignableFrom(throwable.getClass())) {
final E exception = exceptionClass.cast(throwable);
try {
handledFuture.complete(exceptionHandler.apply(exception));
} catch (Throwable t) {
handledFuture.completeExceptionally(t);
}
} else {
handledFuture.completeExceptionally(throwable);
}
});
return handledFuture;
} | 3.68 |
framework_VFilterSelect_createSuggestionPopup | /**
* This method will create the SuggestionPopup used by the VFilterSelect
* instance. It is invoked during the Constructor and should only be
* overridden if a custom SuggestionPopup shall be used. The overriding
* method cannot use any instance variables.
*
* @since 7.1.5
* @return SuggestionPopup instance used by this VFilterSelect
*/
protected SuggestionPopup createSuggestionPopup() {
return new SuggestionPopup();
} | 3.68 |
hbase_HbckTableInfo_sidelineBigOverlaps | /**
* Sideline some regions in a big overlap group so that it will have fewer regions, and it is
* easier to merge them later on.
* @param bigOverlap the overlapped group with regions more than maxMerge
*/
void sidelineBigOverlaps(Collection<HbckRegionInfo> bigOverlap) throws IOException {
int overlapsToSideline = bigOverlap.size() - hbck.getMaxMerge();
if (overlapsToSideline > hbck.getMaxOverlapsToSideline()) {
overlapsToSideline = hbck.getMaxOverlapsToSideline();
}
List<HbckRegionInfo> regionsToSideline =
RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
FileSystem fs = FileSystem.get(conf);
for (HbckRegionInfo regionToSideline : regionsToSideline) {
try {
LOG.info("Closing region: " + regionToSideline);
hbck.closeRegion(regionToSideline);
} catch (IOException ioe) {
LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ",
ioe);
} catch (InterruptedException e) {
LOG.warn("Was unable to close region " + regionToSideline + ". Just continuing... ", e);
}
try {
LOG.info("Offlining region: " + regionToSideline);
hbck.offline(regionToSideline.getRegionName());
} catch (IOException ioe) {
LOG.warn(
"Unable to offline region from master: " + regionToSideline + ". Just continuing... ",
ioe);
}
LOG.info("Before sideline big overlapped region: " + regionToSideline.toString());
Path sidelineRegionDir = hbck.sidelineRegionDir(fs, TO_BE_LOADED, regionToSideline);
if (sidelineRegionDir != null) {
sidelinedRegions.put(sidelineRegionDir, regionToSideline);
LOG.info("After sidelined big overlapped region: "
+ regionToSideline.getRegionNameAsString() + " to " + sidelineRegionDir.toString());
hbck.fixes++;
}
}
} | 3.68 |
flink_LongCounter_add | /** Consider using {@link #add(long)} instead for primitive long values */
@Override
public void add(Long value) {
this.localValue += value;
} | 3.68 |
hbase_QuotaFilter_setUserFilter | /**
* Set the user filter regex
* @param regex the user filter
* @return the quota filter object
*/
public QuotaFilter setUserFilter(final String regex) {
this.userRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.68 |
dubbo_SimpleReferenceCache_destroyAll | /**
* clear and destroy all {@link ReferenceConfigBase} in the cache.
*/
@Override
public void destroyAll() {
if (CollectionUtils.isEmptyMap(referenceKeyMap)) {
return;
}
referenceKeyMap.forEach((_k, referencesOfKey) -> {
for (ReferenceConfigBase<?> rc : referencesOfKey) {
destroyReference(rc);
}
});
referenceKeyMap.clear();
referenceTypeMap.clear();
} | 3.68 |
flink_ExternalResourceOptions_keyWithResourceNameAndSuffix | /** Generate the config option key with resource_name and suffix. */
private static String keyWithResourceNameAndSuffix(String resourceName, String suffix) {
return String.format(
"%s.%s.%s",
EXTERNAL_RESOURCE_PREFIX,
Preconditions.checkNotNull(resourceName),
Preconditions.checkNotNull(suffix));
} | 3.68 |
hbase_LossyCounting_sweep | /**
* sweep low frequency data
*/
public void sweep() {
for (Map.Entry<T, Integer> entry : data.entrySet()) {
if (entry.getValue() < currentTerm) {
T metric = entry.getKey();
data.remove(metric);
if (listener != null) {
listener.sweep(metric);
}
}
}
} | 3.68 |
morf_InlineTableUpgrader_visit | /**
* @see org.alfasoftware.morf.upgrade.SchemaChangeVisitor#visit(org.alfasoftware.morf.upgrade.AnalyseTable)
*/
@Override
public void visit(AnalyseTable analyseTable) {
currentSchema = analyseTable.apply(currentSchema);
writeStatements(sqlDialect.getSqlForAnalyseTable(currentSchema.getTable(analyseTable.getTableName())));
} | 3.68 |
flink_TimeWindowUtil_getNextTriggerWatermark | /** Method to get the next watermark to trigger window. */
public static long getNextTriggerWatermark(
long currentWatermark, long interval, ZoneId shiftTimezone, boolean useDayLightSaving) {
if (currentWatermark == Long.MAX_VALUE) {
return currentWatermark;
}
long triggerWatermark;
// consider the DST timezone
if (useDayLightSaving) {
long utcWindowStart =
getWindowStartWithOffset(
toUtcTimestampMills(currentWatermark, shiftTimezone), 0L, interval);
triggerWatermark = toEpochMillsForTimer(utcWindowStart + interval - 1, shiftTimezone);
} else {
long start = getWindowStartWithOffset(currentWatermark, 0L, interval);
triggerWatermark = start + interval - 1;
}
if (triggerWatermark > currentWatermark) {
return triggerWatermark;
} else {
return triggerWatermark + interval;
}
} | 3.68 |
hudi_BaseHoodieWriteClient_index | /**
* Runs INDEX action to build out the metadata partitions as planned for the given instant time.
*
* @param indexInstantTime - instant time for the requested INDEX action
* @return {@link Option<HoodieIndexCommitMetadata>} after successful indexing.
*/
public Option<HoodieIndexCommitMetadata> index(String indexInstantTime) {
return createTable(config, hadoopConf).index(context, indexInstantTime);
} | 3.68 |
pulsar_MessageIdAdv_getFirstChunkMessageId | /**
* Get the message ID of the first chunk if the current message ID represents the position of a chunked message.
*
* @implNote A chunked message is distributed across different BookKeeper entries. The message ID of a chunked
* message is composed of two message IDs that represent positions of the first and the last chunk. The message ID
* itself represents the position of the last chunk.
*
* @return null if the message is not a chunked message
*/
default MessageIdAdv getFirstChunkMessageId() {
return null;
} | 3.68 |
hbase_GsonUtil_createGson | /**
* Create a builder which is used to create a Gson instance.
* <p/>
* Will set some common configs for the builder.
*/
public static GsonBuilder createGson() {
return new GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING)
.registerTypeAdapter(LongAdder.class, new TypeAdapter<LongAdder>() {
@Override
public void write(JsonWriter out, LongAdder value) throws IOException {
out.value(value.longValue());
}
@Override
public LongAdder read(JsonReader in) throws IOException {
LongAdder value = new LongAdder();
value.add(in.nextLong());
return value;
}
});
} | 3.68 |
hbase_HRegion_rowIsInRange | /**
* Determines if the specified row is within the row range specified by the specified RegionInfo
* @param info RegionInfo that specifies the row range
* @param row row to be checked
* @return true if the row is within the range specified by the RegionInfo
*/
public static boolean rowIsInRange(RegionInfo info, final byte[] row) {
return ((info.getStartKey().length == 0) || (Bytes.compareTo(info.getStartKey(), row) <= 0))
&& ((info.getEndKey().length == 0) || (Bytes.compareTo(info.getEndKey(), row) > 0));
} | 3.68 |
hadoop_CachingBlockManager_numAvailable | /**
* Number of ByteBuffers available to be acquired.
*
* @return the number of available buffers.
*/
public int numAvailable() {
return bufferPool.numAvailable();
} | 3.68 |
flink_HiveParserASTNodeOrigin_getObjectType | /** @return the type of the object from which an HiveParserASTNode originated, e.g. "view". */
public String getObjectType() {
return objectType;
} | 3.68 |
hbase_ServerRegionReplicaUtil_isMetaRegionReplicaReplicationEnabled | /** Returns True if hbase:meta Region Read Replica is enabled. */
public static boolean isMetaRegionReplicaReplicationEnabled(Configuration conf, TableName tn) {
return TableName.isMetaTableName(tn) && conf.getBoolean(
REGION_REPLICA_REPLICATION_CATALOG_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION_CATALOG);
} | 3.68 |
framework_AbstractComponent_setSizeUndefined | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Sizeable#setSizeUndefined()
*/
@Override
public void setSizeUndefined() {
setWidthUndefined();
setHeightUndefined();
} | 3.68 |
flink_ColumnStats_copy | /**
* Create a deep copy of "this" instance.
*
* @return a deep copy
*/
public ColumnStats copy() {
if (maxValue != null || minValue != null) {
return new ColumnStats(
this.ndv,
this.nullCount,
this.avgLen,
this.maxLen,
this.maxValue,
this.minValue);
} else {
return new ColumnStats(
this.ndv, this.nullCount, this.avgLen, this.maxLen, this.max, this.min);
}
} | 3.68 |
hadoop_SystemErasureCodingPolicies_getByID | /**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByID(byte id) {
return SYSTEM_POLICIES_BY_ID.get(id);
} | 3.68 |
hbase_HFileCorruptionChecker_getQuarantinedMobFiles | /** Returns the set of successfully quarantined paths after checkTables is called. */
public Collection<Path> getQuarantinedMobFiles() {
return new HashSet<>(quarantinedMobFiles);
} | 3.68 |
hbase_TableDescriptorBuilder_hashCode | /** Returns hash code */
@Override
public int hashCode() {
int result = this.name.hashCode();
if (this.families.size() > 0) {
for (ColumnFamilyDescriptor e : this.families.values()) {
result ^= e.hashCode();
}
}
result ^= values.hashCode();
return result;
} | 3.68 |
framework_MultiSelectionModelImpl_asMultiSelect | /**
* Gets a wrapper for using this grid as a multiselect in a binder.
*
* @return a multiselect wrapper for grid
*/
@Override
public MultiSelect<T> asMultiSelect() {
return new MultiSelect<T>() {
@Override
public void setValue(Set<T> value) {
Objects.requireNonNull(value);
Set<T> copy = value.stream().map(Objects::requireNonNull)
.collect(Collectors.toCollection(LinkedHashSet::new));
updateSelection(copy, new LinkedHashSet<>(getSelectedItems()));
}
@Override
public Set<T> getValue() {
return getSelectedItems();
}
@Override
public Registration addValueChangeListener(
com.vaadin.data.HasValue.ValueChangeListener<Set<T>> listener) {
return addSelectionListener(
event -> listener.valueChange(event));
}
@Override
public void setRequiredIndicatorVisible(
boolean requiredIndicatorVisible) {
// TODO support required indicator for grid ?
throw new UnsupportedOperationException(
"Required indicator is not supported in grid.");
}
@Override
public boolean isRequiredIndicatorVisible() {
// TODO support required indicator for grid ?
throw new UnsupportedOperationException(
"Required indicator is not supported in grid.");
}
@Override
public void setReadOnly(boolean readOnly) {
setUserSelectionAllowed(!readOnly);
}
@Override
public boolean isReadOnly() {
return !isUserSelectionAllowed();
}
@Override
public void updateSelection(Set<T> addedItems,
Set<T> removedItems) {
MultiSelectionModelImpl.this.updateSelection(addedItems,
removedItems);
}
@Override
public Set<T> getSelectedItems() {
return MultiSelectionModelImpl.this.getSelectedItems();
}
@Override
public Registration addSelectionListener(
MultiSelectionListener<T> listener) {
return MultiSelectionModelImpl.this
.addMultiSelectionListener(listener);
}
};
} | 3.68 |
flink_NetUtils_getIPv6UrlRepresentation | /**
* Creates a compressed URL style representation of an Inet6Address.
*
* <p>This method copies and adopts code from Google's Guava library. We re-implement this here
* in order to reduce dependency on Guava. The Guava library has frequently caused dependency
* conflicts in the past.
*/
private static String getIPv6UrlRepresentation(byte[] addressBytes) {
// first, convert bytes to 16 bit chunks
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (addressBytes[2 * i] & 0xFF) << 8 | (addressBytes[2 * i + 1] & 0xFF);
}
// now, find the sequence of zeros that should be compressed
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
// convert into text form
StringBuilder buf = new StringBuilder(40);
buf.append('[');
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
buf.append(']');
return buf.toString();
} | 3.68 |
hadoop_CallableSupplier_waitForCompletion | /**
* Wait for a single of future to complete, extracting IOEs afterwards.
* @param future future to wait for.
* @param <T> type
* @return the result
* @throws IOException if one of the called futures raised an IOE.
* @throws RuntimeException if one of the futures raised one.
*/
public static <T> T waitForCompletion(
final CompletableFuture<T> future)
throws IOException {
try (DurationInfo ignore =
new DurationInfo(LOG, false, "Waiting for task completion")) {
return future.join();
} catch (CancellationException e) {
throw new IOException(e);
} catch (CompletionException e) {
raiseInnerCause(e);
return null;
}
} | 3.68 |
hbase_OrderedBytes_encodeFloat32 | /**
* Encode a 32-bit floating point value using the fixed-length encoding. Encoding format is
* described at length in {@link #encodeFloat64(PositionedByteRange, double, Order)}.
* @return the number of bytes written.
* @see #decodeFloat32(PositionedByteRange)
* @see #encodeFloat64(PositionedByteRange, double, Order)
*/
public static int encodeFloat32(PositionedByteRange dst, float val, Order ord) {
final int offset = dst.getOffset(), start = dst.getPosition();
int i = Float.floatToIntBits(val);
i ^= ((i >> (Integer.SIZE - 1)) | Integer.MIN_VALUE);
dst.put(FIXED_FLOAT32).put((byte) (i >> 24)).put((byte) (i >> 16)).put((byte) (i >> 8))
.put((byte) i);
ord.apply(dst.getBytes(), offset + start, 5);
return 5;
} | 3.68 |
hbase_ReplicationSourceWALReader_readWALEntries | // We need to get the WALEntryBatch from the caller so we can add entries in there
// This is required in case there is any exception in while reading entries
// we do not want to loss the existing entries in the batch
protected void readWALEntries(WALEntryStream entryStream, WALEntryBatch batch)
throws InterruptedException {
Path currentPath = entryStream.getCurrentPath();
for (;;) {
Entry entry = entryStream.next();
batch.setLastWalPosition(entryStream.getPosition());
entry = filterEntry(entry);
if (entry != null) {
if (addEntryToBatch(batch, entry)) {
break;
}
}
WALEntryStream.HasNext hasNext = entryStream.hasNext();
// always return if we have switched to a new file
if (switched(entryStream, currentPath)) {
batch.setEndOfFile(true);
break;
}
if (hasNext != WALEntryStream.HasNext.YES) {
// For hasNext other than YES, it is OK to just retry.
// As for RETRY and RETRY_IMMEDIATELY, the correct action is to retry, and for NO, it will
// return NO again when you call the method next time, so it is OK to just return here and
// let the loop in the upper layer to call hasNext again.
break;
}
}
} | 3.68 |
framework_Range_withOnly | /**
* Creates a range object representing a single integer.
*
* @param integer
* the number to represent as a range
* @return the range represented by <code>integer</code>
*/
public static Range withOnly(final int integer) {
return new Range(integer, integer + 1);
} | 3.68 |
hbase_WALUtil_writeFlushMarker | /**
* Write a flush marker indicating a start / abort or a complete of a region flush
* <p/>
* This write is for internal use only. Not for external client consumption.
*/
public static WALKeyImpl writeFlushMarker(WAL wal, NavigableMap<byte[], Integer> replicationScope,
RegionInfo hri, final FlushDescriptor f, boolean sync, MultiVersionConcurrencyControl mvcc,
RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = doFullMarkerAppendTransaction(wal, replicationScope, hri,
WALEdit.createFlushWALEdit(hri, f), mvcc, null, sync, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended flush marker " + TextFormat.shortDebugString(f));
}
return walKey;
} | 3.68 |
open-banking-gateway_HbciConsentInfo_isPasswordPresentInConsent | /**
* Check that password present in consent (needed for getting payment status without new interactive authorization).
*/
public boolean isPasswordPresentInConsent(HbciContext ctx) {
HbciConsent hbciDialogConsent = ctx.getHbciDialogConsent();
if (hbciDialogConsent == null) {
return false;
}
Credentials credentials = hbciDialogConsent.getCredentials();
if (credentials == null) {
return false;
}
return null != credentials.getPin();
} | 3.68 |
hadoop_MultipleInputs_getMapperTypeMap | /**
* Retrieves a map of {@link Path}s to the {@link Mapper} class that
* should be used for them.
*
* @param conf The confuration of the job
* @see #addInputPath(JobConf, Path, Class, Class)
* @return A map of paths to mappers for the job
*/
@SuppressWarnings("unchecked")
static Map<Path, Class<? extends Mapper>> getMapperTypeMap(JobConf conf) {
if (conf.get("mapreduce.input.multipleinputs.dir.mappers") == null) {
return Collections.emptyMap();
}
Map<Path, Class<? extends Mapper>> m = new HashMap<Path, Class<? extends Mapper>>();
String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.mappers").split(",");
for (String pathMapping : pathMappings) {
String[] split = pathMapping.split(";");
Class<? extends Mapper> mapClass;
try {
mapClass = (Class<? extends Mapper>) conf.getClassByName(split[1]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
m.put(new Path(split[0]), mapClass);
}
return m;
} | 3.68 |
hmily_DbTypeUtils_buildByDriverClassName | /**
* check db type.
* @param driverClassName driverClassName
* @return mysql sqlserver oracle postgresql.
*/
public static String buildByDriverClassName(final String driverClassName) {
String dbType = null;
if (driverClassName.contains(CommonConstant.DB_MYSQL)) {
dbType = CommonConstant.DB_MYSQL;
} else if (driverClassName.contains(CommonConstant.DB_SQLSERVER)) {
dbType = CommonConstant.DB_SQLSERVER;
} else if (driverClassName.contains(CommonConstant.DB_ORACLE)) {
dbType = CommonConstant.DB_ORACLE;
} else if (driverClassName.contains(CommonConstant.DB_POSTGRESQL)) {
dbType = CommonConstant.DB_POSTGRESQL;
}
return dbType;
} | 3.68 |
hadoop_ExitUtil_haltCalled | /**
* @return true if halt has been called.
*/
public static boolean haltCalled() {
// Either we set this member or we actually called Runtime#halt
return FIRST_HALT_EXCEPTION.get() != null;
} | 3.68 |
zxing_PDF417ResultMetadata_getTimestamp | /**
* unix epock timestamp, elapsed seconds since 1970-01-01
*
* @return elapsed seconds, -1 if not set
*/
public long getTimestamp() {
return timestamp;
} | 3.68 |
hmily_GsonUtils_getInstance | /**
* Gets instance.
*
* @return the instance
*/
public static GsonUtils getInstance() {
return INSTANCE;
} | 3.68 |
dubbo_ReactorServerCalls_manyToMany | /**
* Implements a stream -> stream call as Flux -> Flux
*
* @param responseObserver response StreamObserver
* @param func service implementation
* @return request StreamObserver
*/
public static <T, R> StreamObserver<T> manyToMany(
StreamObserver<R> responseObserver, Function<Flux<T>, Flux<R>> func) {
// responseObserver is also a subscription of publisher, we can use it to request more data
ServerTripleReactorPublisher<T> serverPublisher =
new ServerTripleReactorPublisher<T>((CallStreamObserver<R>) responseObserver);
try {
Flux<R> responseFlux = func.apply(Flux.from(serverPublisher));
ServerTripleReactorSubscriber<R> serverSubscriber =
responseFlux.subscribeWith(new ServerTripleReactorSubscriber<>());
serverSubscriber.subscribe((CallStreamObserver<R>) responseObserver);
serverPublisher.startRequest();
} catch (Throwable throwable) {
responseObserver.onError(throwable);
}
return serverPublisher;
} | 3.68 |
morf_MergeStatementBuilder_input | /**
* For updating existing records, references the new field value being merged, i.e. the value provided by the select.
* To reference the existing value being replaced, use {@link #existing(String)}.
*
* @param name Name of the referenced field.
* @return Reference to the new field value being merged.
*/
public AliasedField input(String name) {
return new MergeStatement.InputField(name);
} | 3.68 |
flink_SSLUtils_createSSLClientSocketFactory | /**
* Creates a factory for SSL Client Sockets from the given configuration. SSL Client Sockets are
* always part of internal communication.
*/
public static SocketFactory createSSLClientSocketFactory(Configuration config)
throws Exception {
SSLContext sslContext = createInternalSSLContext(config, true);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled");
}
return sslContext.getSocketFactory();
} | 3.68 |
hbase_RESTServer_run | /**
* Runs the REST server.
*/
public synchronized void run() throws Exception {
Pair<FilterHolder, Class<? extends ServletContainer>> pair =
loginServerPrincipal(userProvider, conf);
FilterHolder authFilter = pair.getFirst();
Class<? extends ServletContainer> containerClass = pair.getSecond();
RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
// set up the Jersey servlet container for Jetty
ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest")
.register(JacksonJaxbJsonProvider.class);
// Using our custom ServletContainer is tremendously important. This is what makes sure the
// UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself.
ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application);
ServletHolder sh = new ServletHolder(servletContainer);
// Set the default max thread number to 100 to limit
// the number of concurrent requests so that REST server doesn't OOM easily.
// Jetty set the default max thread number to 250, if we don't set it.
//
// Our default min thread number 2 is the same as that used by Jetty.
int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100);
int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2);
// Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use
// bounded {@link ArrayBlockingQueue} with the given size
int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1);
int idleTimeout =
servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000);
QueuedThreadPool threadPool = queueSize > 0
? new QueuedThreadPool(maxThreads, minThreads, idleTimeout,
new ArrayBlockingQueue<>(queueSize))
: new QueuedThreadPool(maxThreads, minThreads, idleTimeout);
this.server = new Server(threadPool);
// Setup JMX
MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
server.addEventListener(mbContainer);
server.addBean(mbContainer);
String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080);
int httpHeaderCacheSize =
servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
httpConfig.setSecurePort(servicePort);
httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
httpConfig.setSendServerVersion(false);
httpConfig.setSendDateHeader(false);
ServerConnector serverConnector;
boolean isSecure = false;
if (conf.getBoolean(REST_SSL_ENABLED, false)) {
isSecure = true;
HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
httpsConfig.addCustomizer(new SecureRequestCustomizer());
SslContextFactory.Server sslCtxFactory = new SslContextFactory.Server();
String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE);
String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null);
String keyPassword =
HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password);
sslCtxFactory.setKeyStorePath(keystore);
if (StringUtils.isNotBlank(keystoreType)) {
sslCtxFactory.setKeyStoreType(keystoreType);
}
sslCtxFactory.setKeyStorePassword(password);
sslCtxFactory.setKeyManagerPassword(keyPassword);
String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE);
if (StringUtils.isNotBlank(trustStore)) {
sslCtxFactory.setTrustStorePath(trustStore);
}
String trustStorePassword =
HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null);
if (StringUtils.isNotBlank(trustStorePassword)) {
sslCtxFactory.setTrustStorePassword(trustStorePassword);
}
String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE);
if (StringUtils.isNotBlank(trustStoreType)) {
sslCtxFactory.setTrustStoreType(trustStoreType);
}
String[] excludeCiphers = servlet.getConfiguration()
.getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
if (excludeCiphers.length != 0) {
sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
}
String[] includeCiphers = servlet.getConfiguration()
.getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
if (includeCiphers.length != 0) {
sslCtxFactory.setIncludeCipherSuites(includeCiphers);
}
String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS,
ArrayUtils.EMPTY_STRING_ARRAY);
if (excludeProtocols.length != 0) {
sslCtxFactory.setExcludeProtocols(excludeProtocols);
}
String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS,
ArrayUtils.EMPTY_STRING_ARRAY);
if (includeProtocols.length != 0) {
sslCtxFactory.setIncludeProtocols(includeProtocols);
}
serverConnector = new ServerConnector(server,
new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()),
new HttpConnectionFactory(httpsConfig));
} else {
serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
}
int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1);
if (acceptQueueSize >= 0) {
serverConnector.setAcceptQueueSize(acceptQueueSize);
}
serverConnector.setPort(servicePort);
serverConnector.setHost(host);
server.addConnector(serverConnector);
server.setStopAtShutdown(true);
// set up context
ServletContextHandler ctxHandler =
new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
ctxHandler.addServlet(sh, PATH_SPEC_ANY);
if (authFilter != null) {
ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
}
// Load filters from configuration.
String[] filterClasses =
servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName());
for (String filter : filterClasses) {
filter = filter.trim();
ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
}
addCSRFFilter(ctxHandler, conf);
addClickjackingPreventionFilter(ctxHandler, conf);
addSecurityHeadersFilter(ctxHandler, conf, isSecure);
HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration()
.getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
// Put up info server.
int port = conf.getInt("hbase.rest.info.port", 8085);
if (port >= 0) {
conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
this.infoServer = new InfoServer("rest", a, port, false, conf);
this.infoServer.setAttribute("hbase.conf", conf);
this.infoServer.start();
}
// start server
server.start();
} | 3.68 |
graphhopper_StringEncodedValue_roundUp | /**
* @param value the value to be rounded
* @return the value rounded to the highest integer with the same number of leading zeros
*/
private static int roundUp(int value) {
return -1 >>> Integer.numberOfLeadingZeros(value);
} | 3.68 |
flink_DataStream_rescale | /**
* Sets the partitioning of the {@link DataStream} so that the output elements are distributed
* evenly to a subset of instances of the next operation in a round-robin fashion.
*
* <p>The subset of downstream operations to which the upstream operation sends elements depends
* on the degree of parallelism of both the upstream and downstream operation. For example, if
* the upstream operation has parallelism 2 and the downstream operation has parallelism 4, then
* one upstream operation would distribute elements to two downstream operations while the other
* upstream operation would distribute to the other two downstream operations. If, on the other
* hand, the downstream operation has parallelism 2 while the upstream operation has parallelism
* 4 then two upstream operations will distribute to one downstream operation while the other
* two upstream operations will distribute to the other downstream operations.
*
* <p>In cases where the different parallelisms are not multiples of each other one or several
* downstream operations will have a differing number of inputs from upstream operations.
*
* @return The DataStream with rescale partitioning set.
*/
@PublicEvolving
public DataStream<T> rescale() {
return setConnectionType(new RescalePartitioner<T>());
} | 3.68 |
hadoop_ManifestSuccessData_recordJobFailure | /**
* Note a failure by setting success flag to false,
* then add the exception to the diagnostics.
* @param thrown throwable
*/
public void recordJobFailure(Throwable thrown) {
setSuccess(false);
String stacktrace = ExceptionUtils.getStackTrace(thrown);
diagnostics.put(DiagnosticKeys.EXCEPTION, thrown.toString());
diagnostics.put(DiagnosticKeys.STACKTRACE, stacktrace);
} | 3.68 |
hudi_RunLengthDecoder_readDictionaryIdData | /**
* It is used to decode dictionary IDs.
*/
private void readDictionaryIdData(int total, WritableIntVector c, int rowId) {
int left = total;
while (left > 0) {
if (this.currentCount == 0) {
this.readNextGroup();
}
int n = Math.min(left, this.currentCount);
switch (mode) {
case RLE:
c.setInts(rowId, n, currentValue);
break;
case PACKED:
c.setInts(rowId, n, currentBuffer, currentBufferIdx);
currentBufferIdx += n;
break;
default:
throw new AssertionError();
}
rowId += n;
left -= n;
currentCount -= n;
}
} | 3.68 |
hadoop_StoreContext_incrementStatistic | /**
* Increment a statistic by a specific value.
* This increments both the instrumentation and storage statistics.
* @param statistic The operation to increment
* @param count the count to increment
*/
public void incrementStatistic(Statistic statistic, long count) {
instrumentation.incrementCounter(statistic, count);
} | 3.68 |
hbase_PrivateCellUtil_overlappingKeys | /**
* Returns true if the first range start1...end1 overlaps with the second range start2...end2,
* assuming the byte arrays represent row keys
*/
public static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2,
final byte[] end2) {
return (end2.length == 0 || start1.length == 0 || Bytes.compareTo(start1, end2) < 0)
&& (end1.length == 0 || start2.length == 0 || Bytes.compareTo(start2, end1) < 0);
} | 3.68 |
querydsl_GuavaGroupBy_multimap | /**
* Create a new aggregating map expression using a backing LinkedHashMap
*
* @param key key for the map entries
* @param value value for the map entries
* @return wrapper expression
*/
public static <K, V, T, U> AbstractGroupExpression<Pair<K, V>, Multimap<T, U>> multimap(GroupExpression<K, T> key,
GroupExpression<V, U> value) {
return new GMultimap.Mixin<K, V, T, U, Multimap<T, U>>(key, value, GMultimap.createLinked(QPair.create(key, value)));
} | 3.68 |
hbase_MemStoreFlusher_registerFlushRequestListener | /**
* Register a MemstoreFlushListener
*/
@Override
public void registerFlushRequestListener(final FlushRequestListener listener) {
this.flushRequestListeners.add(listener);
} | 3.68 |
flink_DataStream_writeUsingOutputFormat | /**
* Writes the dataStream into an output, described by an OutputFormat.
*
* <p>The output is not participating in Flink's checkpointing!
*
* <p>For writing to a file system periodically, the use of the {@link
* org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} is recommended.
*
* @param format The output format
* @return The closed DataStream
* @deprecated Please use the {@link
* org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} explicitly
* using the {@link #addSink(SinkFunction)} method.
*/
@Deprecated
@PublicEvolving
public DataStreamSink<T> writeUsingOutputFormat(OutputFormat<T> format) {
return addSink(new OutputFormatSinkFunction<>(format));
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.