name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_HiveParserSubQueryUtils_checkForTopLevelSubqueries | /*
* Given a TOK_SELECT this checks IF there is a subquery
* it is top level expression, else it throws an error
*/
public static void checkForTopLevelSubqueries(HiveParserASTNode selExprList)
throws SemanticException {
// should be either SELECT or SELECT DISTINCT
assert (selExprList.getType() == HiveASTParser.TOK_SELECT
|| selExprList.getType() == HiveASTParser.TOK_SELECTDI);
for (int i = 0; i < selExprList.getChildCount(); i++) {
HiveParserASTNode selExpr = (HiveParserASTNode) selExprList.getChild(i);
// could get either query hint or select expr
assert (selExpr.getType() == HiveASTParser.TOK_SELEXPR
|| selExpr.getType() == HiveASTParser.QUERY_HINT);
if (selExpr.getType() == HiveASTParser.QUERY_HINT) {
// skip query hints
continue;
}
if (selExpr.getChildCount() == 1
&& selExpr.getChild(0).getType() == HiveASTParser.TOK_SUBQUERY_EXPR) {
if (selExprList.getType() == HiveASTParser.TOK_SELECTDI) {
throw new SemanticException(
ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg(
"Invalid subquery. Subquery with DISTINCT clause is not supported!"));
}
continue; // we are good since subquery is top level expression
}
// otherwise we need to make sure that there is no subquery at any level
for (int j = 0; j < selExpr.getChildCount(); j++) {
checkForSubqueries((HiveParserASTNode) selExpr.getChild(j));
}
}
} | 3.68 |
hmily_HmilyResourceManager_register | /**
* Register.
*
* @param hmilyTacResource the hmily resource
*/
public static void register(final HmilyTacResource hmilyTacResource) {
DATASOURCE_CACHE.put(hmilyTacResource.getResourceId(), hmilyTacResource);
} | 3.68 |
hbase_MobFileCache_evictFile | /**
* Evicts the cached file by the name.
* @param fileName The name of a cached file.
*/
public void evictFile(String fileName) {
if (isCacheEnabled) {
IdLock.Entry lockEntry = null;
try {
// obtains the lock to close the cached file.
lockEntry = keyLock.getLockEntry(hashFileName(fileName));
CachedMobFile evictedFile = map.remove(fileName);
if (evictedFile != null) {
evictedFile.close();
evictedFileCount.increment();
}
} catch (IOException e) {
LOG.error("Failed to evict the file " + fileName, e);
} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
}
} | 3.68 |
hbase_HMaster_listNamespaces | /**
* List namespace names
* @return All namespace names
*/
public List<String> listNamespaces() throws IOException {
checkInitialized();
List<String> namespaces = new ArrayList<>();
if (cpHost != null) {
cpHost.preListNamespaces(namespaces);
}
for (NamespaceDescriptor namespace : clusterSchemaService.getNamespaces()) {
namespaces.add(namespace.getName());
}
if (cpHost != null) {
cpHost.postListNamespaces(namespaces);
}
return namespaces;
} | 3.68 |
hbase_RpcServer_getRequestUser | /**
* Returns the user credentials associated with the current RPC request or not present if no
* credentials were provided.
* @return A User
*/
public static Optional<User> getRequestUser() {
Optional<RpcCall> ctx = getCurrentCall();
return ctx.isPresent() ? ctx.get().getRequestUser() : Optional.empty();
} | 3.68 |
hadoop_AbfsHttpOperation_getConnOutputStream | /**
* Gets the connection output stream.
* @return output stream.
* @throws IOException
*/
OutputStream getConnOutputStream() throws IOException {
return connection.getOutputStream();
} | 3.68 |
hbase_HFileContext_heapSize | /**
* HeapSize implementation. NOTE : The heap size should be altered when new state variable are
* added.
* @return heap size of the HFileContext
*/
@Override
public long heapSize() {
long size = FIXED_OVERHEAD;
if (this.hfileName != null) {
size += ClassSize.STRING + this.hfileName.length();
}
if (this.columnFamily != null) {
size += ClassSize.sizeOfByteArray(this.columnFamily.length);
}
if (this.tableName != null) {
size += ClassSize.sizeOfByteArray(this.tableName.length);
}
return size;
} | 3.68 |
hudi_BootstrapExecutor_syncHive | /**
* Sync to Hive.
*/
private void syncHive() {
if (cfg.enableHiveSync || cfg.enableMetaSync) {
TypedProperties metaProps = new TypedProperties();
metaProps.putAll(props);
metaProps.put(META_SYNC_BASE_PATH.key(), cfg.targetBasePath);
metaProps.put(META_SYNC_BASE_FILE_FORMAT.key(), cfg.baseFileFormat);
if (props.getBoolean(HIVE_SYNC_BUCKET_SYNC.key(), HIVE_SYNC_BUCKET_SYNC.defaultValue())) {
metaProps.put(HIVE_SYNC_BUCKET_SYNC_SPEC.key(), HiveSyncConfig.getBucketSpec(props.getString(HoodieIndexConfig.BUCKET_INDEX_HASH_FIELD.key()),
props.getInteger(HoodieIndexConfig.BUCKET_INDEX_NUM_BUCKETS.key())));
}
try (HiveSyncTool hiveSyncTool = new HiveSyncTool(metaProps, configuration)) {
hiveSyncTool.syncHoodieTable();
}
}
} | 3.68 |
flink_PermanentBlobService_readFile | /**
* Returns the content of the file for the BLOB with the provided job ID the blob key.
*
* <p>Compared to {@code getFile}, {@code readFile} will attempt to read the entire file after
* retrieving it. If file reading and file retrieving is done in the same WRITE lock, it can
* avoid the scenario that the path to the file is deleted concurrently by other threads when
* the file is retrieved but not read yet.
*
* @param jobId ID of the job this blob belongs to
* @param key BLOB key associated with the requested file
* @return The content of the BLOB.
* @throws java.io.FileNotFoundException if the BLOB does not exist;
* @throws IOException if any other error occurs when retrieving the file.
*/
default byte[] readFile(JobID jobId, PermanentBlobKey key) throws IOException {
// The default implementation doesn't guarantee that the file won't be deleted concurrently
// by other threads while reading the contents.
return FileUtils.readAllBytes(getFile(jobId, key).toPath());
} | 3.68 |
hbase_KeyValue_getKeyValueDataStructureSize | /**
* Computes the number of bytes that a <code>KeyValue</code> instance with the provided
* characteristics would take up for its underlying data structure.
* @param klength key length
* @param vlength value length
* @param tagsLength total length of the tags
* @return the <code>KeyValue</code> data structure length
*/
public static long getKeyValueDataStructureSize(int klength, int vlength, int tagsLength) {
if (tagsLength == 0) {
return (long) KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE + klength + vlength;
}
return (long) KeyValue.KEYVALUE_WITH_TAGS_INFRASTRUCTURE_SIZE + klength + vlength + tagsLength;
} | 3.68 |
hmily_HmilyThreadFactory_create | /**
* create custom thread factory.
*
* @param namePrefix prefix
* @param daemon daemon
* @return {@linkplain ThreadFactory}
*/
public static ThreadFactory create(final String namePrefix, final boolean daemon) {
return new HmilyThreadFactory(namePrefix, daemon);
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withLongFunctionGauge | /**
* Add a new evaluator to the gauge statistics.
* @param key key of this statistic
* @param eval evaluator for the statistic
* @return the builder.
*/
public DynamicIOStatisticsBuilder withLongFunctionGauge(String key,
ToLongFunction<String> eval) {
activeInstance().addGaugeFunction(key, eval::applyAsLong);
return this;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getPlanID | /**
* Returns planID.
*
* @return String
*/
public String getPlanID() {
return planID;
} | 3.68 |
morf_Criterion_exists | /**
* Helper method to create a new "EXISTS" expression.
*
* <blockquote><pre>
* SelectStatement stmt = new SelectStatement(new Table("agreement")).where(Criterion.eq(new Field("agreementnumber"), "A0001"));
* Criterion.exists(stmt);</pre></blockquote>
*
* @param selectStatement the select statement to evaluate
* @return a new Criterion object
*/
public static Criterion exists(SelectStatement selectStatement) {
return new Criterion(Operator.EXISTS, selectStatement);
} | 3.68 |
graphhopper_AlternativeRoute_addToMap | /**
* This method adds the traversal IDs of the specified path as set to the specified map.
*/
AtomicInteger addToMap(GHIntObjectHashMap<IntSet> map, Path path) {
IntSet set = new GHIntHashSet();
final AtomicInteger startTID = new AtomicInteger(-1);
for (EdgeIteratorState iterState : path.calcEdges()) {
int tid = traversalMode.createTraversalId(iterState, false);
set.add(tid);
if (startTID.get() < 0) {
// for node based traversal we need to explicitly add base node as starting node and to list
if (!traversalMode.isEdgeBased()) {
tid = iterState.getBaseNode();
set.add(tid);
}
startTID.set(tid);
}
}
map.put(startTID.get(), set);
return startTID;
} | 3.68 |
flink_OSSTestCredentials_getOSSEndpoint | /**
* Get OSS endpoint used to connect.
*
* @return OSS endpoint
*/
public static String getOSSEndpoint() {
if (ENDPOINT != null) {
return ENDPOINT;
} else {
throw new IllegalStateException("OSS endpoint is not available");
}
} | 3.68 |
framework_VVideo_updateElementDynamicSize | /**
* Updates the dimensions of the widget.
*
* @param width
* width to set (in pixels)
* @param height
* height to set (in pixels)
*/
@SuppressWarnings("deprecation")
private void updateElementDynamicSize(int width, int height) {
video.getStyle().setWidth(width, Unit.PX);
video.getStyle().setHeight(height, Unit.PX);
Util.notifyParentOfSizeChange(this, true);
} | 3.68 |
querydsl_SQLExpressions_denseRank | /**
* As an aggregate function, DENSE_RANK calculates the dense rank of a hypothetical row identified
* by the arguments of the function with respect to a given sort specification. The arguments of
* the function must all evaluate to constant expressions within each aggregate group, because they
* identify a single row within each group. The constant argument expressions and the expressions
* in the order_by_clause of the aggregate match by position. Therefore, the number of arguments
* must be the same and types must be compatible.
*
* @param args arguments
* @return dense_rank(args)
*/
public static WithinGroup<Long> denseRank(Expression<?>... args) {
return new WithinGroup<Long>(Long.class, SQLOps.DENSERANK2, args);
} | 3.68 |
hadoop_WriteOperationHelper_operationRetried | /**
* Callback from {@link Invoker} when an operation is retried.
* @param text text of the operation
* @param ex exception
* @param retries number of retries
* @param idempotent is the method idempotent
*/
void operationRetried(String text, Exception ex, int retries,
boolean idempotent) {
LOG.info("{}: Retried {}: {}", text, retries, ex.toString());
LOG.debug("Stack", ex);
owner.operationRetried(text, ex, retries, idempotent);
} | 3.68 |
hadoop_AbstractDNSToSwitchMapping_isMappingSingleSwitch | /**
* Query for a {@link DNSToSwitchMapping} instance being on a single
* switch.
* <p>
* This predicate simply assumes that all mappings not derived from
* this class are multi-switch.
* @param mapping the mapping to query
* @return true if the base class says it is single switch, or the mapping
* is not derived from this class.
*/
public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
return mapping != null && mapping instanceof AbstractDNSToSwitchMapping
&& ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
} | 3.68 |
hadoop_TextInputWriter_writeUTF8 | // Write an object to the output stream using UTF-8 encoding
protected void writeUTF8(Object object) throws IOException {
byte[] bval;
int valSize;
if (object instanceof BytesWritable) {
BytesWritable val = (BytesWritable) object;
bval = val.getBytes();
valSize = val.getLength();
} else if (object instanceof Text) {
Text val = (Text) object;
bval = val.getBytes();
valSize = val.getLength();
} else {
String sval = object.toString();
bval = sval.getBytes(StandardCharsets.UTF_8);
valSize = bval.length;
}
clientOut.write(bval, 0, valSize);
} | 3.68 |
hbase_IndividualBytesFieldCell_getSequenceId | // 6) Sequence id
@Override
public long getSequenceId() {
return seqId;
} | 3.68 |
graphhopper_VectorTile_getGeometryList | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public java.util.List<java.lang.Integer>
getGeometryList() {
return java.util.Collections.unmodifiableList(geometry_);
} | 3.68 |
morf_TableSetSchema_isEmptyDatabase | /**
* @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase()
*/
@Override
public boolean isEmptyDatabase() {
return tables.isEmpty();
} | 3.68 |
hadoop_CoderUtil_findFirstValidInput | /**
* Find the valid input from all the inputs.
* @param inputs input buffers to look for valid input
* @return the first valid input
*/
static <T> T findFirstValidInput(T[] inputs) {
for (T input : inputs) {
if (input != null) {
return input;
}
}
throw new HadoopIllegalArgumentException(
"Invalid inputs are found, all being null");
} | 3.68 |
rocketmq-connect_MemoryStateManagementServiceImpl_stop | /**
* Stop dependent services (if needed)
*/
@Override
public void stop() {
} | 3.68 |
flink_JobMasterId_toUUID | /** Creates a UUID with the bits from this JobMasterId. */
public UUID toUUID() {
return new UUID(getUpperPart(), getLowerPart());
} | 3.68 |
flink_BloomFilter_estimateFalsePositiveProbability | /**
* Compute the false positive probability based on given input entries and bits size. Note: this
* is just the math expected value, you should not expect the fpp in real case would under the
* return value for certain.
*
* @param inputEntries
* @param bitSize
* @return
*/
public static double estimateFalsePositiveProbability(long inputEntries, int bitSize) {
int numFunction = optimalNumOfHashFunctions(inputEntries, bitSize);
double p = Math.pow(Math.E, -(double) numFunction * inputEntries / bitSize);
double estimatedFPP = Math.pow(1 - p, numFunction);
return estimatedFPP;
} | 3.68 |
AreaShop_BuyingRegionEvent_getPlayer | /**
* Get the player that is trying to buy the region.
* @return The player that is trying to buy the region
*/
public OfflinePlayer getPlayer() {
return player;
} | 3.68 |
dubbo_TreePathDynamicConfiguration_getConfigBasePath | /**
* Get the config base path from the specified {@link URL connection URl}
*
* @param url the specified {@link URL connection URl}
* @return non-null
*/
protected String getConfigBasePath(URL url) {
String configBasePath = url.getParameter(CONFIG_BASE_PATH_PARAM_NAME, DEFAULT_CONFIG_BASE_PATH);
if (StringUtils.isNotEmpty(configBasePath) && !configBasePath.startsWith(PATH_SEPARATOR)) {
configBasePath = PATH_SEPARATOR + configBasePath;
}
return configBasePath;
} | 3.68 |
flink_ResourceUri_getResourceType | /** Get resource type info. */
public ResourceType getResourceType() {
return resourceType;
} | 3.68 |
flink_OutputFormatBase_open | /** Open the format and initializes the flush system. */
@Override
public final void open(int taskNumber, int numTasks) {
throwable = new AtomicReference<>();
this.semaphore = new Semaphore(maxConcurrentRequests);
this.callback =
new FutureCallback<V>() {
@Override
public void onSuccess(V ignored) {
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
throwable.compareAndSet(null, t);
LOG.error("Error while writing value.", t);
semaphore.release();
}
};
postOpen();
} | 3.68 |
flink_IterativeDataSet_getMaxIterations | /**
* Gets the maximum number of iterations.
*
* @return The maximum number of iterations.
*/
public int getMaxIterations() {
return maxIterations;
} | 3.68 |
pulsar_MessageIdImpl_toByteArray | // batchIndex is -1 if message is non-batched message and has the batchIndex for a batch message
protected byte[] toByteArray(int batchIndex, int batchSize) {
MessageIdData msgId = writeMessageIdData(null, batchIndex, batchSize);
int size = msgId.getSerializedSize();
ByteBuf serialized = Unpooled.buffer(size, size);
msgId.writeTo(serialized);
return serialized.array();
} | 3.68 |
morf_CaseStatement_toString | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder("CASE ");
for (WhenCondition whenCondition : whenConditions) {
result.append(whenCondition).append(" ");
}
result.append("ELSE ").append(defaultValue);
result.append(" END");
result.append(super.toString());
return result.toString();
} | 3.68 |
hbase_TableDescriptorBuilder_setMergeEnabled | /**
* Setting the table region merge enable flag.
* @param isEnable True if enable region merge.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) {
return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.68 |
flink_NonSpanningWrapper_transferTo | /** Copies the data and transfers the "ownership" (i.e. clears current wrapper). */
void transferTo(ByteBuffer dst) {
segment.get(position, dst, remaining());
clear();
} | 3.68 |
hadoop_WrappedMapper_getInputSplit | /**
* Get the input split for this map.
*/
public InputSplit getInputSplit() {
return mapContext.getInputSplit();
} | 3.68 |
flink_JoinOperator_projectSecond | /**
* Continues a ProjectJoin transformation and adds fields of the second join input.
*
* <p>If the second join input is a {@link Tuple} {@link DataSet}, fields can be selected by
* their index. If the second join input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link
* org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectFirst(int...)} and
* {@link
* org.apache.flink.api.java.operators.JoinOperator.JoinProjection#projectSecond(int...)}.
*
* @param secondFieldIndexes If the second input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended JoinProjection.
* @see Tuple
* @see DataSet
*/
protected JoinProjection<I1, I2> projectSecond(int... secondFieldIndexes) {
boolean isSecondTuple;
isSecondTuple = ds2.getType() instanceof TupleTypeInfo && secondFieldIndexes.length > 0;
if (!isSecondTuple && secondFieldIndexes.length != 0) {
// field index provided for non-Tuple input
throw new IllegalArgumentException(
"Input is not a Tuple. Call projectSecond() without arguments to include it.");
} else if (secondFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException(
"You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isSecondTuple) {
// extend index and flag arrays
this.fieldIndexes =
Arrays.copyOf(
this.fieldIndexes,
this.fieldIndexes.length + secondFieldIndexes.length);
this.isFieldInFirst =
Arrays.copyOf(
this.isFieldInFirst,
this.isFieldInFirst.length + secondFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs2;
for (int i = 0; i < secondFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(secondFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = false;
this.fieldIndexes[offset + i] = secondFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst =
Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = false;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.68 |
hbase_HFileReaderImpl_prefetchComplete | /**
* Returns false if block prefetching was requested for this file and has not completed, true
* otherwise
*/
@Override
public boolean prefetchComplete() {
return PrefetchExecutor.isCompleted(path);
} | 3.68 |
hmily_PropertyKeyParse_isFrom | /**
* Is from boolean.
*
* @param from the from
* @return the boolean
*/
boolean isFrom(final T from) {
return Objects.equals(from, this.from);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedWindowFunctionStatements | /**
* @return The expected SQL statements resulting from converting the elements of windowFunctions()
*/
protected List<String> expectedWindowFunctionStatements(){
String paddedNullOrder = StringUtils.isEmpty(nullOrder())? StringUtils.EMPTY : " "+nullOrder();
String paddedNullOrderDesc = StringUtils.isEmpty(nullOrder())? StringUtils.EMPTY : " "+nullOrderForDirection(Direction.DESCENDING);
return Lists.newArrayList(
"COUNT(*) OVER ()",
"COUNT(*) OVER (PARTITION BY field1)",
"SUM(field1) OVER (PARTITION BY field2, field3 ORDER BY field4"+paddedNullOrder+")",
"MAX(field1) OVER (PARTITION BY field2, field3 ORDER BY field4"+paddedNullOrder+")",
"MIN(field1) OVER (PARTITION BY field2, field3 ORDER BY field4 DESC"+paddedNullOrderDesc+", field5"+paddedNullOrder+")",
"MIN(field1) OVER ( ORDER BY field2"+paddedNullOrder+")",
"ROW_NUMBER() OVER (PARTITION BY field2, field3 ORDER BY field4"+paddedNullOrder+")",
"ROW_NUMBER() OVER ( ORDER BY field2"+paddedNullOrder+")",
"(SELECT MIN(field1) OVER ( ORDER BY field2"+paddedNullOrder+") AS window FROM "+tableName("srcTable")+")"
);
} | 3.68 |
flink_BinarySegmentUtils_copyToBytes | /**
* Copy segments to target byte[].
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param bytes target byte[].
* @param bytesOffset target byte[] offset.
* @param numBytes the number bytes to copy.
*/
public static byte[] copyToBytes(
MemorySegment[] segments, int offset, byte[] bytes, int bytesOffset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
segments[0].get(offset, bytes, bytesOffset, numBytes);
} else {
copyMultiSegmentsToBytes(segments, offset, bytes, bytesOffset, numBytes);
}
return bytes;
} | 3.68 |
hbase_CoprocessorClassLoader_loadResourceUsingParentFirst | /**
* Determines whether we should attempt to load the given resource using the parent first before
* attempting to load the resource using this ClassLoader.
* @param name the name of the resource to test.
* @return true if we should attempt to load the resource using the parent first; false if we
* should attempt to load the resource using this ClassLoader first.
*/
protected boolean loadResourceUsingParentFirst(String name) {
for (Pattern resourcePattern : RESOURCE_LOAD_PARENT_FIRST_PATTERNS) {
if (resourcePattern.matcher(name).matches()) {
return true;
}
}
return false;
} | 3.68 |
flink_CollectIteratorAssert_hasReachedEnd | /**
* Whether all pointers have reached the end of lists.
*
* @return True if all pointers have reached the end.
*/
private boolean hasReachedEnd() {
for (RecordsFromSplit<T> recordsFromSplit : recordsFromSplits) {
if (recordsFromSplit.hasNext()) {
return false;
}
}
return true;
} | 3.68 |
morf_DatabaseDataSetProducer_records | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#records(java.lang.String)
*/
@Override
public Iterable<Record> records(String tableName) {
final Table table = getSchema().getTable(tableName);
return new Iterable<Record>() {
@Override
public Iterator<Record> iterator() {
List<String> columnOrdering = null;
for (Map.Entry<String, List<String>> entry : orderingOverrides.entrySet()) {
if (entry.getKey().equalsIgnoreCase(table.getName())) {
columnOrdering = entry.getValue();
break;
}
}
ResultSetIterator resultSetIterator = new ResultSetIterator(table, columnOrdering, connection, Optional.of(connectionResources), sqlDialect);
openResultSets.add(resultSetIterator);
return resultSetIterator;
}
};
} | 3.68 |
hadoop_FileIoProvider_getRandomAccessFile | /**
* Create a RandomAccessFile using
* {@link RandomAccessFile#RandomAccessFile(File, String)}.
*
* Wraps the created input stream to intercept IO calls
* before delegating to the wrapped RandomAccessFile.
*
* @param volume target volume. null if unavailable.
* @param f File object.
* @param mode See {@link RandomAccessFile} for a description
* of the mode string.
* @return RandomAccessFile representing the given file.
* @throws FileNotFoundException
*/
public RandomAccessFile getRandomAccessFile(
@Nullable FsVolumeSpi volume, File f,
String mode) throws FileNotFoundException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
RandomAccessFile raf = null;
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
raf = new WrappedRandomAccessFile(volume, f, mode);
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return raf;
} catch(Exception e) {
IOUtils.closeStream(raf);
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hadoop_HsCountersPage_content | /**
* The content of this page is the CountersBlock now.
* @return CountersBlock.class
*/
@Override protected Class<? extends SubView> content() {
return CountersBlock.class;
} | 3.68 |
dubbo_CuratorZookeeperClient_getClient | /**
* just for unit test
*
* @return
*/
CuratorFramework getClient() {
return client;
} | 3.68 |
hbase_MobFileCleanupUtil_archiveMobFiles | /**
* Archives the mob files.
* @param conf The current configuration.
* @param tableName The table name.
* @param family The name of the column family.
* @param storeFiles The files to be archived.
* @throws IOException exception
*/
private static void archiveMobFiles(Configuration conf, TableName tableName, Admin admin,
byte[] family, List<Path> storeFiles) throws IOException {
if (storeFiles.size() == 0) {
// nothing to remove
LOG.debug("Skipping archiving old MOB files - no files found for table={} cf={}", tableName,
Bytes.toString(family));
return;
}
Path mobTableDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), tableName);
FileSystem fs = storeFiles.get(0).getFileSystem(conf);
for (Path p : storeFiles) {
LOG.debug("MOB Cleaner is archiving: {}", p);
HFileArchiver.archiveStoreFile(conf, fs, MobUtils.getMobRegionInfo(tableName), mobTableDir,
family, p);
}
} | 3.68 |
hbase_Replication_startReplicationService | /**
* If replication is enabled and this cluster is a master, it starts
*/
@Override
public void startReplicationService() throws IOException {
this.replicationManager.init();
this.server.getChoreService().scheduleChore(new ReplicationStatisticsChore(
"ReplicationSourceStatistics", server, (int) TimeUnit.SECONDS.toMillis(statsPeriodInSecond)));
LOG.info("{} started", this.server.toString());
} | 3.68 |
framework_Escalator_fillAndPopulateEscalatorRowsIfNeeded | /**
* Adds new physical escalator rows to the DOM at the given index if
* there's still a need for more escalator rows.
* <p>
* If Escalator already is at (or beyond) max capacity, this method does
* nothing to the DOM.
*
* @param index
* the index at which to add new escalator rows.
* <em>Note:</em>It is assumed that the index is both the
* visual index and the logical index.
* @param numberOfRows
* the number of rows to add at <code>index</code>
* @return a list of the added rows
*/
private List<TableRowElement> fillAndPopulateEscalatorRowsIfNeeded(
final int index, final int numberOfRows) {
final int escalatorRowsStillFit = getMaxVisibleRowCount()
- getDomRowCount();
final int escalatorRowsNeeded = Math.min(numberOfRows,
escalatorRowsStillFit);
if (escalatorRowsNeeded > 0) {
final List<TableRowElement> addedRows = paintInsertStaticRows(
index, escalatorRowsNeeded);
visualRowOrder.addAll(index, addedRows);
double y = index * getDefaultRowHeight()
+ spacerContainer.getSpacerHeightsSumUntilIndex(index);
for (int i = index; i < visualRowOrder.size(); i++) {
final TableRowElement tr;
if (i - index < addedRows.size()) {
tr = addedRows.get(i - index);
} else {
tr = visualRowOrder.get(i);
}
setRowPosition(tr, 0, y);
y += getDefaultRowHeight();
y += spacerContainer.getSpacerHeight(i);
}
return addedRows;
} else {
return Collections.emptyList();
}
} | 3.68 |
hbase_MunkresAssignment_starInRow | /**
* Find a starred zero in a specified row. If there are no starred zeroes in the specified row,
* then null will be returned.
* @param r the index of the row to be searched
* @return pair of row and column indices of starred zero or null
*/
private Pair<Integer, Integer> starInRow(int r) {
for (int c = 0; c < cols; c++) {
if (mask[r][c] == STAR) {
return new Pair<>(r, c);
}
}
return null;
} | 3.68 |
framework_ServerRpcQueue_removeMatching | /**
* Removes any pending invocation of the given method from the queue.
*
* @param invocation
* The invocation to remove
*/
public void removeMatching(MethodInvocation invocation) {
Iterator<MethodInvocation> iter = pendingInvocations.values()
.iterator();
while (iter.hasNext()) {
MethodInvocation mi = iter.next();
if (mi.equals(invocation)) {
iter.remove();
}
}
} | 3.68 |
hmily_ConsistentHashSelector_select | /**
* Select singleton executor.
*
* @param key the key
* @return the singleton executor
*/
public SingletonExecutor select(final String key) {
byte[] digest = md5(key);
return selectForKey(hash(digest, 0));
} | 3.68 |
flink_CompactingHashTable_insertBucketEntryFromStart | /**
* IMPORTANT!!! We pass only the partition number, because we must make sure we get a fresh
* partition reference. The partition reference used during search for the key may have become
* invalid during the compaction.
*/
private void insertBucketEntryFromStart(
MemorySegment bucket,
int bucketInSegmentPos,
int hashCode,
long pointer,
int partitionNumber)
throws IOException {
boolean checkForResize = false;
// find the position to put the hash code and pointer
final int count = bucket.getInt(bucketInSegmentPos + HEADER_COUNT_OFFSET);
if (count < NUM_ENTRIES_PER_BUCKET) {
// we are good in our current bucket, put the values
bucket.putInt(
bucketInSegmentPos + BUCKET_HEADER_LENGTH + (count * HASH_CODE_LEN),
hashCode); // hash code
bucket.putLong(
bucketInSegmentPos + BUCKET_POINTER_START_OFFSET + (count * POINTER_LEN),
pointer); // pointer
bucket.putInt(bucketInSegmentPos + HEADER_COUNT_OFFSET, count + 1); // update count
} else {
// we need to go to the overflow buckets
final InMemoryPartition<T> p = this.partitions.get(partitionNumber);
final long originalForwardPointer =
bucket.getLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET);
final long forwardForNewBucket;
if (originalForwardPointer != BUCKET_FORWARD_POINTER_NOT_SET) {
// forward pointer set
final int overflowSegNum = (int) (originalForwardPointer >>> 32);
final int segOffset = (int) originalForwardPointer;
final MemorySegment seg = p.overflowSegments[overflowSegNum];
final int obCount = seg.getInt(segOffset + HEADER_COUNT_OFFSET);
// check if there is space in this overflow bucket
if (obCount < NUM_ENTRIES_PER_BUCKET) {
// space in this bucket and we are done
seg.putInt(
segOffset + BUCKET_HEADER_LENGTH + (obCount * HASH_CODE_LEN),
hashCode); // hash code
seg.putLong(
segOffset + BUCKET_POINTER_START_OFFSET + (obCount * POINTER_LEN),
pointer); // pointer
seg.putInt(segOffset + HEADER_COUNT_OFFSET, obCount + 1); // update count
return;
} else {
// no space here, we need a new bucket. this current overflow bucket will be the
// target of the new overflow bucket
forwardForNewBucket = originalForwardPointer;
}
} else {
// no overflow bucket yet, so we need a first one
forwardForNewBucket = BUCKET_FORWARD_POINTER_NOT_SET;
}
// we need a new overflow bucket
MemorySegment overflowSeg;
final int overflowBucketNum;
final int overflowBucketOffset;
// first, see if there is space for an overflow bucket remaining in the last overflow
// segment
if (p.nextOverflowBucket == 0) {
// no space left in last bucket, or no bucket yet, so create an overflow segment
overflowSeg = getNextBuffer();
overflowBucketOffset = 0;
overflowBucketNum = p.numOverflowSegments;
// add the new overflow segment
if (p.overflowSegments.length <= p.numOverflowSegments) {
MemorySegment[] newSegsArray = new MemorySegment[p.overflowSegments.length * 2];
System.arraycopy(
p.overflowSegments, 0, newSegsArray, 0, p.overflowSegments.length);
p.overflowSegments = newSegsArray;
}
p.overflowSegments[p.numOverflowSegments] = overflowSeg;
p.numOverflowSegments++;
checkForResize = true;
} else {
// there is space in the last overflow bucket
overflowBucketNum = p.numOverflowSegments - 1;
overflowSeg = p.overflowSegments[overflowBucketNum];
overflowBucketOffset = p.nextOverflowBucket << NUM_INTRA_BUCKET_BITS;
}
// next overflow bucket is one ahead. if the segment is full, the next will be at the
// beginning
// of a new segment
p.nextOverflowBucket =
(p.nextOverflowBucket == this.bucketsPerSegmentMask
? 0
: p.nextOverflowBucket + 1);
// insert the new overflow bucket in the chain of buckets
// 1) set the old forward pointer
// 2) let the bucket in the main table point to this one
overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, forwardForNewBucket);
final long pointerToNewBucket =
(((long) overflowBucketNum) << 32) | ((long) overflowBucketOffset);
bucket.putLong(bucketInSegmentPos + HEADER_FORWARD_OFFSET, pointerToNewBucket);
// finally, insert the values into the overflow buckets
overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode); // hash code
overflowSeg.putLong(
overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer
// set the count to one
overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1);
if (checkForResize && !this.isResizing) {
// check if we should resize buckets
if (this.buckets.length <= getOverflowSegmentCount()) {
resizeHashTable();
}
}
}
} | 3.68 |
morf_DatabaseSchemaManager_viewCache | /**
* Returns the cached set of views in the database.
*/
private Map<String, View> viewCache(ProducerCache producerCache) {
if (!viewsLoaded.get()) {
cacheViews(producerCache.get().getSchema().views());
}
return views.get();
} | 3.68 |
morf_Function_power | /**
* Helper method to create a function for raising one argument to the power of another.
*<p>
* Example : power(10,3) would become 1000
*</p>
* @param operand1 the base
* @param operand2 the exponent
* @return an instance of the multiply function.
*/
public static Function power(AliasedField operand1, AliasedField operand2) {
return new Function(FunctionType.POWER, operand1, operand2);
} | 3.68 |
morf_SelectStatement_groupBy | /**
* Specifies that the records should be grouped by the specified fields.
*
* <blockquote><pre>
* select()
* .from(tableRef("Foo"))
* .groupBy(groupByFields);</pre></blockquote>
*
* @param fields the fields to group by
* @return a new select statement with the change applied.
*/
public SelectStatement groupBy(Iterable<? extends AliasedFieldBuilder> fields) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.groupBy(fields),
() -> {
if (fields == null) {
throw new IllegalArgumentException("Field was null in group by clause");
}
// Add the list
groupBys.addAll(Builder.Helper.buildAll(fields));
}
);
} | 3.68 |
hbase_RegionSplitter_main | /**
* The main function for the RegionSplitter application. Common uses:
* <p>
* <ul>
* <li>create a table named 'myTable' with 60 pre-split regions containing 2 column families
* 'test' & 'rs', assuming the keys are hex-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs myTable
* HexStringSplit
* </ul>
* <li>create a table named 'myTable' with 50 pre-split regions, assuming the keys are
* decimal-encoded ASCII:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 50 myTable DecimalStringSplit
* </ul>
* <li>perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 outstanding splits at
* a time, assuming keys are uniformly distributed bytes:
* <ul>
* <li>bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -r -o 2 myTable UniformSplit
* </ul>
* </ul>
* There are three SplitAlgorithms built into RegionSplitter, HexStringSplit, DecimalStringSplit,
* and UniformSplit. These are different strategies for choosing region boundaries. See their
* source code for details. Usage: RegionSplitter <TABLE> <SPLITALGORITHM> <-c
* <# regions> -f <family:family:...> | -r [-o <# outstanding splits>]> [-D
* <conf.param=value>] HBase IO problem user requested exit problem parsing user input
*/
@SuppressWarnings("static-access")
public static void main(String[] args) throws IOException, InterruptedException, ParseException {
Configuration conf = HBaseConfiguration.create();
// parse user input
Options opt = new Options();
opt.addOption(OptionBuilder.withArgName("property=value").hasArg()
.withDescription("Override HBase Configuration Settings").create("D"));
opt.addOption(OptionBuilder.withArgName("region count").hasArg()
.withDescription("Create a new table with a pre-split number of regions").create("c"));
opt.addOption(OptionBuilder.withArgName("family:family:...").hasArg()
.withDescription("Column Families to create with new table. Required with -c").create("f"));
opt.addOption("h", false, "Print this usage help");
opt.addOption("r", false, "Perform a rolling split of an existing region");
opt.addOption(OptionBuilder.withArgName("count").hasArg()
.withDescription("Max outstanding splits that have unfinished major compactions")
.create("o"));
opt.addOption(null, "firstrow", true, "First Row in Table for Split Algorithm");
opt.addOption(null, "lastrow", true, "Last Row in Table for Split Algorithm");
opt.addOption(null, "risky", false, "Skip verification steps to complete quickly. "
+ "STRONGLY DISCOURAGED for production systems. ");
CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("D")) {
for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
if (cmd.hasOption("risky")) {
conf.setBoolean("split.verify", false);
}
boolean createTable = cmd.hasOption("c") && cmd.hasOption("f");
boolean rollingSplit = cmd.hasOption("r");
boolean oneOperOnly = createTable ^ rollingSplit;
if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) {
new HelpFormatter().printHelp("bin/hbase regionsplitter <TABLE> <SPLITALGORITHM>\n"
+ "SPLITALGORITHM is the java class name of a class implementing "
+ "SplitAlgorithm, or one of the special strings HexStringSplit or "
+ "DecimalStringSplit or UniformSplit, which are built-in split algorithms. "
+ "HexStringSplit treats keys as hexadecimal ASCII, and "
+ "DecimalStringSplit treats keys as decimal ASCII, and "
+ "UniformSplit treats keys as arbitrary bytes.", opt);
return;
}
TableName tableName = TableName.valueOf(cmd.getArgs()[0]);
String splitClass = cmd.getArgs()[1];
SplitAlgorithm splitAlgo = newSplitAlgoInstance(conf, splitClass);
if (cmd.hasOption("firstrow")) {
splitAlgo.setFirstRow(cmd.getOptionValue("firstrow"));
}
if (cmd.hasOption("lastrow")) {
splitAlgo.setLastRow(cmd.getOptionValue("lastrow"));
}
if (createTable) {
conf.set("split.count", cmd.getOptionValue("c"));
createPresplitTable(tableName, splitAlgo, cmd.getOptionValue("f").split(":"), conf);
}
if (rollingSplit) {
if (cmd.hasOption("o")) {
conf.set("split.outstanding", cmd.getOptionValue("o"));
}
rollingSplit(tableName, splitAlgo, conf);
}
} | 3.68 |
flink_DataTypeTemplate_isAllowRawGlobally | /** Returns whether RAW types are allowed everywhere. */
boolean isAllowRawGlobally() {
return allowRawGlobally != null && allowRawGlobally;
} | 3.68 |
morf_SqlParameter_parameter | /**
* Constructs a new SQL named parameter from a column.
*
* @param column the parameter column.
* @return Builder.
*/
public static Builder parameter(Column column) {
return parameter(column.getName())
.type(column.getType())
.width(column.getWidth(), column.getScale());
} | 3.68 |
flink_Transformation_setUidHash | /**
* Sets an user provided hash for this operator. This will be used AS IS the create the
* JobVertexID.
*
* <p>The user provided hash is an alternative to the generated hashes, that is considered when
* identifying an operator through the default hash mechanics fails (e.g. because of changes
* between Flink versions).
*
* <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
* The provided hash needs to be unique per transformation and job. Otherwise, job submission
* will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
* operator chain and trying so will let your job fail.
*
* <p>A use case for this is in migration between Flink versions or changing the jobs in a way
* that changes the automatically generated hashes. In this case, providing the previous hashes
* directly through this method (e.g. obtained from old logs) can help to reestablish a lost
* mapping from states to their target operator.
*
* @param uidHash The user provided hash for this operator. This will become the JobVertexID,
* which is shown in the logs and web ui.
*/
public void setUidHash(String uidHash) {
Preconditions.checkNotNull(uidHash);
Preconditions.checkArgument(
uidHash.matches("^[0-9A-Fa-f]{32}$"),
"Node hash must be a 32 character String that describes a hex code. Found: "
+ uidHash);
this.userProvidedNodeHash = uidHash;
} | 3.68 |
hudi_ConsistentBucketIdentifier_splitBucket | /**
* Split bucket in the range middle, also generate the corresponding file ids
*
* TODO support different split criteria, e.g., distribute records evenly using statistics
*
* @param bucket parent bucket
* @return lists of children buckets
*/
public Option<List<ConsistentHashingNode>> splitBucket(@NotNull ConsistentHashingNode bucket) {
ConsistentHashingNode formerBucket = getFormerBucket(bucket.getValue());
long mid = (long) formerBucket.getValue() + bucket.getValue()
+ (formerBucket.getValue() < bucket.getValue() ? 0 : (HoodieConsistentHashingMetadata.HASH_VALUE_MASK + 1L));
mid = (mid >> 1) & HoodieConsistentHashingMetadata.HASH_VALUE_MASK;
// Cannot split as it already is the smallest bucket range
if (mid == formerBucket.getValue() || mid == bucket.getValue()) {
return Option.empty();
}
return Option.of(Arrays.asList(
new ConsistentHashingNode((int) mid, FSUtils.createNewFileIdPfx(), ConsistentHashingNode.NodeTag.REPLACE),
new ConsistentHashingNode(bucket.getValue(), FSUtils.createNewFileIdPfx(), ConsistentHashingNode.NodeTag.REPLACE))
);
} | 3.68 |
zxing_UPCEReader_convertUPCEtoUPCA | /**
* Expands a UPC-E value back into its full, equivalent UPC-A code value.
*
* @param upce UPC-E code as string of digits
* @return equivalent UPC-A code as string of digits
*/
public static String convertUPCEtoUPCA(String upce) {
char[] upceChars = new char[6];
upce.getChars(1, 7, upceChars, 0);
StringBuilder result = new StringBuilder(12);
result.append(upce.charAt(0));
char lastChar = upceChars[5];
switch (lastChar) {
case '0':
case '1':
case '2':
result.append(upceChars, 0, 2);
result.append(lastChar);
result.append("0000");
result.append(upceChars, 2, 3);
break;
case '3':
result.append(upceChars, 0, 3);
result.append("00000");
result.append(upceChars, 3, 2);
break;
case '4':
result.append(upceChars, 0, 4);
result.append("00000");
result.append(upceChars[4]);
break;
default:
result.append(upceChars, 0, 5);
result.append("0000");
result.append(lastChar);
break;
}
// Only append check digit in conversion if supplied
if (upce.length() >= 8) {
result.append(upce.charAt(7));
}
return result.toString();
} | 3.68 |
framework_Page_setTitle | /**
* Sets the page title. The page title is displayed by the browser e.g. as
* the title of the browser window or as the title of the tab.
* <p>
* If this value is set to null, the previously set page title will be left
* as-is. Set to empty string to clear the title.
*
* @param title
* the page title to set
*/
public void setTitle(String title) {
getState(true).title = title;
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsRemoveKeysByDepth | // Batch delete sub objects one depth by one depth to avoid that parents and
// children in a same
// batch.
// A batch deletion might be split into some concurrent deletions to promote
// the performance, but
// it
// can't make sure that an object is deleted before it's children.
private static void fsRemoveKeysByDepth(final OBSFileSystem owner,
final FileStatus[] arFileStatus)
throws ObsException, IOException {
if (arFileStatus.length <= 0) {
// exit fast if there is no keys to delete
return;
}
// Find all leaf keys in the list.
String key;
int depth = Integer.MAX_VALUE;
List<KeyAndVersion> leafKeys = new ArrayList<>(
owner.getMaxEntriesToDelete());
for (int idx = arFileStatus.length - 1; idx >= 0; idx--) {
if (leafKeys.size() >= owner.getMaxEntriesToDelete()) {
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
}
key = OBSCommonUtils.pathToKey(owner, arFileStatus[idx].getPath());
// Check file.
if (!arFileStatus[idx].isDirectory()) {
// A file must be a leaf.
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
// Check leaf folder at current depth.
int keyDepth = fsGetObjectKeyDepth(key);
if (keyDepth == depth) {
// Any key at current depth must be a leaf.
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
if (keyDepth < depth) {
// The last batch delete at current depth.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
// Go on at the upper depth.
depth = keyDepth;
leafKeys.add(new KeyAndVersion(key, null));
continue;
}
LOG.warn(
"The objects list is invalid because it isn't sorted by"
+ " path depth.");
throw new ObsException("System failure");
}
// The last batch delete at the minimum depth of all keys.
OBSCommonUtils.removeKeys(owner, leafKeys, true, false);
} | 3.68 |
hadoop_FilterFileSystem_startLocalOutput | /**
* Returns a local File that the user can write output to. The caller
* provides both the eventual FS target name and the local working
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
*/
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
} | 3.68 |
rocketmq-connect_PositionStorageWriter_beginFlush | /**
* begin flush offset
*
* @return
*/
public synchronized boolean beginFlush() {
if (isFlushing()) {
throw new ConnectException("PositionStorageWriter is already flushing");
}
if (data.isEmpty()) {
return false;
}
this.toFlush = this.data;
this.data = new HashMap<>();
return true;
} | 3.68 |
hadoop_ByteArrayDecodingState_convertToByteBufferState | /**
* Convert to a ByteBufferDecodingState when it's backed by on-heap arrays.
*/
ByteBufferDecodingState convertToByteBufferState() {
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
for (int i = 0; i < inputs.length; i++) {
newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
inputOffsets[i], decodeLength);
}
for (int i = 0; i < outputs.length; i++) {
newOutputs[i] = ByteBuffer.allocateDirect(decodeLength);
}
ByteBufferDecodingState bbdState = new ByteBufferDecodingState(decoder,
decodeLength, erasedIndexes, newInputs, newOutputs);
return bbdState;
} | 3.68 |
AreaShop_RentRegion_getTimeLeft | /**
* Get the time that is left on the region.
* @return The time left on the region
*/
public long getTimeLeft() {
if(isRented()) {
return this.getRentedUntil() - Calendar.getInstance().getTimeInMillis();
} else {
return 0;
}
} | 3.68 |
dubbo_DefaultFilterChainBuilder_buildClusterInvokerChain | /**
* build consumer cluster filter chain
*/
@Override
public <T> ClusterInvoker<T> buildClusterInvokerChain(
final ClusterInvoker<T> originalInvoker, String key, String group) {
ClusterInvoker<T> last = originalInvoker;
URL url = originalInvoker.getUrl();
List<ModuleModel> moduleModels = getModuleModelsFromUrl(url);
List<ClusterFilter> filters;
if (moduleModels != null && moduleModels.size() == 1) {
filters = ScopeModelUtil.getExtensionLoader(ClusterFilter.class, moduleModels.get(0))
.getActivateExtension(url, key, group);
} else if (moduleModels != null && moduleModels.size() > 1) {
filters = new ArrayList<>();
List<ExtensionDirector> directors = new ArrayList<>();
for (ModuleModel moduleModel : moduleModels) {
List<ClusterFilter> tempFilters = ScopeModelUtil.getExtensionLoader(ClusterFilter.class, moduleModel)
.getActivateExtension(url, key, group);
filters.addAll(tempFilters);
directors.add(moduleModel.getExtensionDirector());
}
filters = sortingAndDeduplication(filters, directors);
} else {
filters =
ScopeModelUtil.getExtensionLoader(ClusterFilter.class, null).getActivateExtension(url, key, group);
}
if (!CollectionUtils.isEmpty(filters)) {
for (int i = filters.size() - 1; i >= 0; i--) {
final ClusterFilter filter = filters.get(i);
final Invoker<T> next = last;
last = new CopyOfClusterFilterChainNode<>(originalInvoker, next, filter);
}
return new ClusterCallbackRegistrationInvoker<>(originalInvoker, last, filters);
}
return last;
} | 3.68 |
flink_SharedBuffer_flushCache | /**
* Flush the event and node from cache to state.
*
* @throws Exception Thrown if the system cannot access the state.
*/
void flushCache() throws Exception {
if (!entryCache.asMap().isEmpty()) {
entries.putAll(entryCache.asMap());
entryCache.invalidateAll();
}
if (!eventsBufferCache.asMap().isEmpty()) {
eventsBuffer.putAll(eventsBufferCache.asMap());
eventsBufferCache.invalidateAll();
}
} | 3.68 |
flink_DynamicTableFactory_getPhysicalRowDataType | /**
* Returns the physical schema to use for encoding and decoding records. The returned row
* data type contains only physical columns. It does not include computed or metadata
* columns. A factory can use the returned data type to configure the table connector, and
* can manipulate it using the {@link DataType} static methods:
*
* <pre>{@code
* // Project some fields into a new data type
* DataType projectedDataType = Projection.of(projectedIndexes)
* .project(context.getPhysicalRowDataType());
*
* // Create key data type
* DataType keyDataType = Projection.of(context.getPrimaryKeyIndexes())
* .project(context.getPhysicalRowDataType());
*
* // Create a new data type filtering columns of the original data type
* DataType myOwnDataType = DataTypes.ROW(
* DataType.getFields(context.getPhysicalRowDataType())
* .stream()
* .filter(myFieldFilterPredicate)
* .toArray(DataTypes.Field[]::new))
* }</pre>
*
* <p>Shortcut for {@code getCatalogTable().getResolvedSchema().toPhysicalRowDataType()}.
*
* @see ResolvedSchema#toPhysicalRowDataType()
*/
default DataType getPhysicalRowDataType() {
return getCatalogTable().getResolvedSchema().toPhysicalRowDataType();
} | 3.68 |
hbase_StoreFileInfo_computeRefFileHDFSBlockDistribution | /**
* helper function to compute HDFS blocks distribution of a given reference file.For reference
* file, we don't compute the exact value. We use some estimate instead given it might be good
* enough. we assume bottom part takes the first half of reference file, top part takes the second
* half of the reference file. This is just estimate, given midkey ofregion != midkey of HFile,
* also the number and size of keys vary. If this estimate isn't good enough, we can improve it
* later.
* @param fs The FileSystem
* @param reference The reference
* @param status The reference FileStatus
* @return HDFS blocks distribution
*/
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(final FileSystem fs,
final Reference reference, final FileStatus status) throws IOException {
if (status == null) {
return null;
}
long start = 0;
long length = 0;
if (Reference.isTopFileRegion(reference.getFileRegion())) {
start = status.getLen() / 2;
length = status.getLen() - status.getLen() / 2;
} else {
start = 0;
length = status.getLen() / 2;
}
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
} | 3.68 |
framework_TextFileProperty_getValue | /*
* (non-Javadoc)
*
* @see com.vaadin.data.Property#getValue()
*/
@Override
public String getValue() {
if (file == null) {
return null;
}
try {
FileInputStream fis = new FileInputStream(file);
InputStreamReader isr = charset == null ? new InputStreamReader(fis)
: new InputStreamReader(fis, charset);
BufferedReader r = new BufferedReader(isr);
StringBuilder b = new StringBuilder();
char[] buf = new char[8 * 1024];
int len;
while ((len = r.read(buf)) != -1) {
b.append(buf, 0, len);
}
r.close();
isr.close();
fis.close();
return b.toString();
} catch (FileNotFoundException e) {
return null;
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.68 |
flink_TaskExecutionState_getID | /**
* Returns the ID of the task this result belongs to
*
* @return the ID of the task this result belongs to
*/
public ExecutionAttemptID getID() {
return this.executionId;
} | 3.68 |
flink_StopWithSavepointTerminationHandlerImpl_terminateSuccessfully | /**
* Handles the successful termination of the {@code StopWithSavepointTerminationHandler}.
*
* @param completedSavepoint the completed savepoint
*/
private void terminateSuccessfully(CompletedCheckpoint completedSavepoint) {
result.complete(completedSavepoint.getExternalPointer());
} | 3.68 |
flink_NetworkBufferPool_tryRedistributeBuffers | // Must be called from synchronized block
private void tryRedistributeBuffers(int numberOfSegmentsToRequest) throws IOException {
assert Thread.holdsLock(factoryLock);
if (numTotalRequiredBuffers + numberOfSegmentsToRequest > totalNumberOfMemorySegments) {
throw new IOException(
String.format(
"Insufficient number of network buffers: "
+ "required %d, but only %d available. %s.",
numberOfSegmentsToRequest,
totalNumberOfMemorySegments - numTotalRequiredBuffers,
getConfigDescription()));
}
this.numTotalRequiredBuffers += numberOfSegmentsToRequest;
try {
redistributeBuffers();
} catch (Throwable t) {
this.numTotalRequiredBuffers -= numberOfSegmentsToRequest;
redistributeBuffers();
ExceptionUtils.rethrow(t);
}
} | 3.68 |
flink_ExceptionUtils_logExceptionIfExcepted | /**
* Log the given exception in debug level if it is a {@link FlinkExpectedException}.
*
* @param e the given exception
* @param log logger
*/
public static void logExceptionIfExcepted(Throwable e, Logger log) {
if (e instanceof FlinkExpectedException) {
log.debug("Expected exception.", e);
}
} | 3.68 |
morf_CorrectPrimaryKeyColumns_assertExistingPrimaryKey | /**
* @see org.alfasoftware.morf.upgrade.ChangePrimaryKeyColumns#assertExistingPrimaryKey(java.util.List,
* org.alfasoftware.morf.metadata.Table)
*/
@Override
protected void assertExistingPrimaryKey(List<String> from, Table table) {
// Can't check the existing state as we don't know what it is.
} | 3.68 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_start | /**
* Now we will also fetch some cells along with the scanner id when opening a scanner, so we also
* need to process the ScanResponse for the open scanner request. The HBaseRpcController for the
* open scanner request is also needed because we may have some data in the CellScanner which is
* contained in the controller.
* @return {@code true} if we should continue, otherwise {@code false}.
*/
public CompletableFuture<Boolean> start(HBaseRpcController controller,
ScanResponse respWhenOpen) {
onComplete(controller, respWhenOpen);
return future;
} | 3.68 |
flink_TableDescriptor_format | /**
* Defines the format to be used for this table.
*
* <p>Note that not every connector requires a format to be specified, while others may use
* multiple formats.
*
* <p>Options of the provided {@param formatDescriptor} are automatically prefixed. For
* example,
*
* <pre>{@code
* descriptorBuilder.format(KafkaOptions.KEY_FORMAT, FormatDescriptor.forFormat("json")
* .option(JsonOptions.IGNORE_PARSE_ERRORS, true)
* .build()
* }</pre>
*
* <p>will result in the options
*
* <pre>{@code
* 'key.format' = 'json'
* 'key.json.ignore-parse-errors' = 'true'
* }</pre>
*/
public Builder format(
ConfigOption<String> formatOption, FormatDescriptor formatDescriptor) {
Preconditions.checkNotNull(formatOption, "Format option must not be null.");
Preconditions.checkNotNull(formatDescriptor, "Format descriptor must not be null.");
option(formatOption, formatDescriptor.getFormat());
final String optionPrefix =
FactoryUtil.getFormatPrefix(formatOption, formatDescriptor.getFormat());
formatDescriptor
.getOptions()
.forEach(
(key, value) -> {
if (key.startsWith(optionPrefix)) {
throw new ValidationException(
String.format(
"Format options set using #format(FormatDescriptor) should not contain the prefix '%s', but found '%s'.",
optionPrefix, key));
}
final String prefixedKey = optionPrefix + key;
option(prefixedKey, value);
});
return this;
} | 3.68 |
hudi_GenericRecordFullPayloadSizeEstimator_estimate | /**
* This method estimates the size of the payload if all entries of this payload were populated with one value.
* For eg. A primitive data type such as String will be populated with {@link UUID} so the length if 36 bytes
* whereas a complex data type such as an Array of type Int, will be populated with exactly 1 Integer value.
*/
protected int estimate(Schema schema) {
long size = 0;
for (Schema.Field f : schema.getFields()) {
size += typeEstimate(f.schema());
}
return (int) size;
} | 3.68 |
framework_TwinColSelect_getColumns | /**
* Gets the number of columns for the component.
*
* @see #setColumns(int)
* @deprecated As of 7.0. "Columns" does not reflect the exact number of
* characters that will be displayed. It is better to use
* setWidth together with "em" to control the width of the
* field.
*/
@Deprecated
public int getColumns() {
return columns;
} | 3.68 |
hbase_Result_isEmpty | /**
* Check if the underlying Cell [] is empty or not
* @return true if empty
*/
public boolean isEmpty() {
return this.cells == null || this.cells.length == 0;
} | 3.68 |
hadoop_TimelineDomain_getOwner | /**
* Get the domain owner
*
* @return the domain owner
*/
@XmlElement(name = "owner")
public String getOwner() {
return owner;
} | 3.68 |
hadoop_TaskPool_runSingleThreaded | /**
* Single threaded execution.
* @param task task to execute
* @param <E> exception which may be raised in execution.
* @return true if the operation executed successfully
* @throws E any exception raised.
* @throws IOException IOExceptions raised by remote iterator or in execution.
*/
private <E extends Exception> boolean runSingleThreaded(Task<I, E> task)
throws E, IOException {
List<I> succeeded = new ArrayList<>();
List<Exception> exceptions = new ArrayList<>();
RemoteIterator<I> iterator = items;
boolean threw = true;
try {
while (iterator.hasNext()) {
I item = iterator.next();
try {
task.run(item);
succeeded.add(item);
} catch (Exception e) {
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
LOG.error("Failed to clean up on failure", e);
// keep going
}
}
if (stopOnFailure) {
break;
}
}
}
threw = false;
} catch (IOException iteratorIOE) {
// an IOE is reaised here during iteration
LOG.debug("IOException when iterating through {}", iterator, iteratorIOE);
throw iteratorIOE;
} finally {
// threw handles exceptions that were *not* caught by the catch block,
// and exceptions that were caught and possibly handled by onFailure
// are kept in exceptions.
if (threw || !exceptions.isEmpty()) {
if (revertTask != null) {
boolean failed = false;
for (I item : succeeded) {
try {
revertTask.run(item);
} catch (Exception e) {
LOG.error("Failed to revert task", e);
failed = true;
// keep going
}
if (stopRevertsOnFailure && failed) {
break;
}
}
}
if (abortTask != null) {
boolean failed = false;
while (iterator.hasNext()) {
try {
abortTask.run(iterator.next());
} catch (Exception e) {
failed = true;
LOG.error("Failed to abort task", e);
// keep going
}
if (stopAbortsOnFailure && failed) {
break;
}
}
}
}
}
if (!suppressExceptions && !exceptions.isEmpty()) {
TaskPool.<E>throwOne(exceptions);
}
return exceptions.isEmpty();
} | 3.68 |
hadoop_DiskBalancerWorkStatus_currentStateString | /**
* Return current state as a string.
*
* @throws IOException
**/
public String currentStateString() throws IOException {
return MAPPER_WITH_INDENT_OUTPUT.writeValueAsString(currentState);
} | 3.68 |
flink_RestClientConfiguration_getMaxContentLength | /**
* Returns the max content length that the REST client endpoint could handle.
*
* @return max content length that the REST client endpoint could handle
*/
public int getMaxContentLength() {
return maxContentLength;
} | 3.68 |
flink_LocalInputPreferredSlotSharingStrategy_build | /**
* Build ExecutionSlotSharingGroups for all vertices in the topology. The
* ExecutionSlotSharingGroup of a vertex is determined in order below:
*
* <p>1. try finding an existing group of the corresponding co-location constraint.
*
* <p>2. try finding an available group of its producer vertex if the producer is in the
* same slot sharing group.
*
* <p>3. try finding any available group.
*
* <p>4. create a new group.
*/
private Map<ExecutionVertexID, ExecutionSlotSharingGroup> build() {
final LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> allVertices =
getExecutionVertices();
// loop on job vertices so that an execution vertex will not be added into a group
// if that group better fits another execution vertex
for (List<SchedulingExecutionVertex> executionVertices : allVertices.values()) {
final List<SchedulingExecutionVertex> remaining =
tryFindOptimalAvailableExecutionSlotSharingGroupFor(executionVertices);
findAvailableOrCreateNewExecutionSlotSharingGroupFor(remaining);
updateConstraintToExecutionSlotSharingGroupMap(executionVertices);
}
return executionSlotSharingGroupMap;
} | 3.68 |
hadoop_RequestFactoryImpl_withEncryptionSecrets | /**
* Encryption secrets.
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withEncryptionSecrets(
final EncryptionSecrets value) {
encryptionSecrets = value;
return this;
} | 3.68 |
hbase_RestoreTablesClient_checkTargetTables | /**
* Validate target tables.
* @param tTableArray target tables
* @param isOverwrite overwrite existing table
* @throws IOException exception
*/
private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) throws IOException {
ArrayList<TableName> existTableList = new ArrayList<>();
ArrayList<TableName> disabledTableList = new ArrayList<>();
// check if the tables already exist
try (Admin admin = conn.getAdmin()) {
for (TableName tableName : tTableArray) {
if (admin.tableExists(tableName)) {
existTableList.add(tableName);
if (admin.isTableDisabled(tableName)) {
disabledTableList.add(tableName);
}
} else {
LOG.info("HBase table " + tableName
+ " does not exist. It will be created during restore process");
}
}
}
if (existTableList.size() > 0) {
if (!isOverwrite) {
LOG.error("Existing table (" + existTableList + ") found in the restore target, please add "
+ "\"-o\" as overwrite option in the command if you mean"
+ " to restore to these existing tables");
throw new IOException(
"Existing table found in target while no \"-o\" " + "as overwrite option found");
} else {
if (disabledTableList.size() > 0) {
LOG.error("Found offline table in the restore target, "
+ "please enable them before restore with \"-overwrite\" option");
LOG.info("Offline table list in restore target: " + disabledTableList);
throw new IOException(
"Found offline table in the target when restore with \"-overwrite\" option");
}
}
}
} | 3.68 |
flink_StreamExecutionEnvironment_setDefaultLocalParallelism | /**
* Sets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @param parallelism The parallelism to use as the default local parallelism.
*/
@PublicEvolving
public static void setDefaultLocalParallelism(int parallelism) {
defaultLocalParallelism = parallelism;
} | 3.68 |
hbase_TableMapReduceUtil_setNumReduceTasks | /**
* Sets the number of reduce tasks for the given job configuration to the number of regions the
* given table has.
* @param table The table to get the region count for.
* @param job The current job to adjust.
* @throws IOException When retrieving the table details fails.
*/
public static void setNumReduceTasks(String table, Job job) throws IOException {
job.setNumReduceTasks(getRegionCount(job.getConfiguration(), TableName.valueOf(table)));
} | 3.68 |
querydsl_GeometryExpression_isEmpty | /**
* Returns 1 (TRUE) if this geometric object is the empty Geometry. If true, then this
* geometric object represents the empty point set ∅ for the coordinate space.
*
* @return empty
*/
public BooleanExpression isEmpty() {
if (empty == null) {
empty = Expressions.booleanOperation(SpatialOps.IS_EMPTY, mixin);
}
return empty;
} | 3.68 |
flink_RestartPipelinedRegionFailoverStrategy_getTasksNeedingRestart | /**
* Returns a set of IDs corresponding to the set of vertices that should be restarted. In this
* strategy, all task vertices in 'involved' regions are proposed to be restarted. The
* 'involved' regions are calculated with rules below: 1. The region containing the failed task
* is always involved 2. If an input result partition of an involved region is not available,
* i.e. Missing or Corrupted, the region containing the partition producer task is involved 3.
* If a region is involved, all of its consumer regions are involved
*
* @param executionVertexId ID of the failed task
* @param cause cause of the failure
* @return set of IDs of vertices to restart
*/
@Override
public Set<ExecutionVertexID> getTasksNeedingRestart(
ExecutionVertexID executionVertexId, Throwable cause) {
final SchedulingPipelinedRegion failedRegion =
topology.getPipelinedRegionOfVertex(executionVertexId);
if (failedRegion == null) {
// TODO: show the task name in the log
throw new IllegalStateException(
"Can not find the failover region for task " + executionVertexId, cause);
}
// if the failure cause is data consumption error, mark the corresponding data partition to
// be failed,
// so that the failover process will try to recover it
Optional<PartitionException> dataConsumptionException =
ExceptionUtils.findThrowable(cause, PartitionException.class);
if (dataConsumptionException.isPresent()) {
resultPartitionAvailabilityChecker.markResultPartitionFailed(
dataConsumptionException.get().getPartitionId().getPartitionId());
}
// calculate the tasks to restart based on the result of regions to restart
Set<ExecutionVertexID> tasksToRestart = new HashSet<>();
for (SchedulingPipelinedRegion region : getRegionsToRestart(failedRegion)) {
for (SchedulingExecutionVertex vertex : region.getVertices()) {
// we do not need to restart tasks which are already in the initial state
if (vertex.getState() != ExecutionState.CREATED) {
tasksToRestart.add(vertex.getId());
}
}
}
// the previous failed partition will be recovered. remove its failed state from the checker
if (dataConsumptionException.isPresent()) {
resultPartitionAvailabilityChecker.removeResultPartitionFromFailedState(
dataConsumptionException.get().getPartitionId().getPartitionId());
}
return tasksToRestart;
} | 3.68 |
querydsl_AbstractSQLUpdateClause_populate | /**
* Populate the UPDATE clause with the properties of the given bean using the given Mapper.
*
* @param obj object to use for population
* @param mapper mapper to use
* @return the current object
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public <T> C populate(T obj, Mapper<T> mapper) {
Collection<? extends Path<?>> primaryKeyColumns = entity.getPrimaryKey() != null
? entity.getPrimaryKey().getLocalColumns()
: Collections.<Path<?>>emptyList();
Map<Path<?>, Object> values = mapper.createMap(entity, obj);
for (Map.Entry<Path<?>, Object> entry : values.entrySet()) {
if (!primaryKeyColumns.contains(entry.getKey())) {
set((Path) entry.getKey(), entry.getValue());
}
}
return (C) this;
} | 3.68 |
hadoop_TracingContext_getHeader | /**
* Return header representing the request associated with the tracingContext
* @return Header string set into X_MS_CLIENT_REQUEST_ID
*/
public String getHeader() {
return header;
} | 3.68 |
hbase_MetricsAssignmentManager_getSplitProcMetrics | /** Returns Set of common metrics for split procedure */
public ProcedureMetrics getSplitProcMetrics() {
return splitProcMetrics;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.