name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hmily_HmilyColumnExtractor_extract | /**
* Get left value if left value of expression is column segment.
*
* @param expression expression segment
* @return column segment
*/
public static Optional<HmilyColumnSegment> extract(final HmilyExpressionSegment expression) {
if (expression instanceof HmilyBinaryOperationExpression && ((HmilyBinaryOperationExpression) expression).getLeft() instanceof HmilyColumnSegment) {
HmilyColumnSegment column = (HmilyColumnSegment) ((HmilyBinaryOperationExpression) expression).getLeft();
return Optional.of(column);
}
if (expression instanceof HmilyInExpression && ((HmilyInExpression) expression).getLeft() instanceof HmilyColumnSegment) {
HmilyColumnSegment column = (HmilyColumnSegment) ((HmilyInExpression) expression).getLeft();
return Optional.of(column);
}
if (expression instanceof HmilyBetweenExpression && ((HmilyBetweenExpression) expression).getLeft() instanceof HmilyColumnSegment) {
HmilyColumnSegment column = (HmilyColumnSegment) ((HmilyBetweenExpression) expression).getLeft();
return Optional.of(column);
}
return Optional.empty();
} | 3.68 |
Activiti_DelegateHelper_getField | /**
* Returns the {@link FieldExtension} matching the provided 'fieldName' which
* is defined for the current activity of the provided
* {@link DelegateExecution}.
* <p>
* Returns null if no such {@link FieldExtension} can be found.
* <p>
* If the execution is currently being used for executing an
* {@link ExecutionListener}, the field of the listener will be returned. Use
* {@link #getFlowElementField(DelegateExecution, String)} or
* {@link #getListenerField(DelegateExecution, String)} for specifically
* getting the field from either the flow element or the listener.
*/
public static FieldExtension getField(DelegateExecution execution,
String fieldName) {
if (isExecutingExecutionListener(execution)) {
return getListenerField(execution,
fieldName);
} else {
return getFlowElementField(execution,
fieldName);
}
} | 3.68 |
hudi_HoodieTimeline_getInflightInstant | /**
* Returns the inflight instant corresponding to the instant being passed. Takes care of changes in action names
* between inflight and completed instants (compaction <=> commit) and (logcompaction <==> deltacommit).
* @param instant Hoodie Instant
* @param metaClient Hoodie metaClient to fetch tableType and fileSystem.
* @return Inflight Hoodie Instant
*/
static HoodieInstant getInflightInstant(final HoodieInstant instant, final HoodieTableMetaClient metaClient) {
if (metaClient.getTableType() == HoodieTableType.MERGE_ON_READ) {
if (instant.getAction().equals(COMMIT_ACTION)) {
return new HoodieInstant(true, COMPACTION_ACTION, instant.getTimestamp());
} else if (instant.getAction().equals(DELTA_COMMIT_ACTION)) {
// Deltacommit is used by both ingestion and logcompaction.
// So, distinguish both of them check for the inflight file being present.
HoodieActiveTimeline rawActiveTimeline = new HoodieActiveTimeline(metaClient, false);
Option<HoodieInstant> logCompactionInstant = Option.fromJavaOptional(rawActiveTimeline.getInstantsAsStream()
.filter(hoodieInstant -> hoodieInstant.getTimestamp().equals(instant.getTimestamp())
&& LOG_COMPACTION_ACTION.equals(hoodieInstant.getAction())).findFirst());
if (logCompactionInstant.isPresent()) {
return new HoodieInstant(true, LOG_COMPACTION_ACTION, instant.getTimestamp());
}
}
}
return new HoodieInstant(true, instant.getAction(), instant.getTimestamp());
} | 3.68 |
flink_Either_setValue | /**
* Sets the encapsulated value to another value
*
* @param value the new value of the encapsulated value
*/
public void setValue(R value) {
this.value = value;
} | 3.68 |
framework_LayoutDependencyTree_setNeedsHorizontalLayout | /**
* Informs this LayoutDependencyTree that the horizontal size of a managed
* layout might have changed and it needs layouting, or that the layouting
* is no longer necessary. If there are blockers, layouting will be delayed
* and cannot be disabled before the blockers have been removed. Logs a
* warning if no dependency is found.
*
* @param connectorId
* the connector id of the managed layout whose horizontal size
* might have changed
* @param needsLayout
* {@code true} if layouting should be enabled, {@code false} if
* layouting should be disabled (disabling is only effective if
* there are no blockers)
*/
public void setNeedsHorizontalLayout(String connectorId,
boolean needsLayout) {
LayoutDependency dependency = getDependency(connectorId, HORIZONTAL);
if (dependency != null) {
dependency.setNeedsLayout(needsLayout);
} else {
getLogger()
.warning("No dependency found in setNeedsHorizontalLayout");
}
} | 3.68 |
dubbo_DubboBootstrap_reset | /**
* Try reset dubbo status for new instance.
*
* @deprecated For testing purposes only
*/
@Deprecated
public static void reset(boolean destroy) {
if (destroy) {
if (instance != null) {
instance.destroy();
instance = null;
}
FrameworkModel.destroyAll();
} else {
instance = null;
}
ApplicationModel.reset();
} | 3.68 |
framework_VTextArea_setWordWrap | /**
* Sets whether the words should wrap or not.
*
* @param wordWrap
* {@code true} if the words should wrap, {@code false} otherwise
*/
public void setWordWrap(boolean wordWrap) {
if (wordWrap == this.wordWrap) {
return;
}
if (wordWrap) {
getElement().removeAttribute("wrap");
getElement().getStyle().clearOverflowY();
getElement().getStyle().setOverflowX(Overflow.HIDDEN);
getElement().getStyle().clearWhiteSpace();
} else {
getElement().setAttribute("wrap", "off");
getElement().getStyle().setOverflow(Overflow.AUTO);
getElement().getStyle().setWhiteSpace(WhiteSpace.PRE);
}
if (BrowserInfo.get().isOpera()
|| (BrowserInfo.get().isWebkit() && wordWrap)) {
// Opera fails to dynamically update the wrap attribute so we detach
// and reattach the whole TextArea.
// Webkit fails to properly reflow the text when enabling wrapping,
// same workaround
WidgetUtil.detachAttach(getElement());
}
this.wordWrap = wordWrap;
} | 3.68 |
hadoop_AbstractS3ACommitter_getUUID | /**
* The Job UUID, as passed in or generated.
* @return the UUID for the job.
*/
@VisibleForTesting
public final String getUUID() {
return uuid;
} | 3.68 |
hadoop_ManifestCommitterSupport_createJobSummaryFilename | /**
* Create the filename for a report from the jobID.
* @param jobId jobId
* @return filename for a report.
*/
public static String createJobSummaryFilename(String jobId) {
return String.format(SUMMARY_FILENAME_FORMAT, jobId);
} | 3.68 |
framework_BrowserInfo_requiresOverflowAutoFix | /**
* Indicates whether the browser might require juggling to properly update
* sizes inside elements with overflow: auto.
*
* @return <code>true</code> if the browser requires the workaround,
* otherwise <code>false</code>
*/
public boolean requiresOverflowAutoFix() {
return (getWebkitVersion() > 0 || getOperaVersion() >= 11
|| getIEVersion() >= 10 || isFirefox())
&& WidgetUtil.getNativeScrollbarSize() > 0;
} | 3.68 |
hbase_MetaTableAccessor_putsToMetaTable | /**
* Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
* @param connection connection we're using
* @param ps Put to add to hbase:meta
*/
public static void putsToMetaTable(final Connection connection, final List<Put> ps)
throws IOException {
if (ps.isEmpty()) {
return;
}
try (Table t = getMetaHTable(connection)) {
debugLogMutations(ps);
// the implementation for putting a single Put is much simpler so here we do a check first.
if (ps.size() == 1) {
t.put(ps.get(0));
} else {
t.put(ps);
}
}
} | 3.68 |
hbase_HRegion_sync | /**
* Calls sync with the given transaction ID
* @param txid should sync up to which transaction
* @throws IOException If anything goes wrong with DFS
*/
private void sync(long txid, Durability durability) throws IOException {
if (this.getRegionInfo().isMetaRegion()) {
this.wal.sync(txid);
} else {
switch (durability) {
case USE_DEFAULT:
// do what table defaults to
if (shouldSyncWAL()) {
this.wal.sync(txid);
}
break;
case SKIP_WAL:
// nothing do to
break;
case ASYNC_WAL:
// nothing do to
break;
case SYNC_WAL:
this.wal.sync(txid, false);
break;
case FSYNC_WAL:
this.wal.sync(txid, true);
break;
default:
throw new RuntimeException("Unknown durability " + durability);
}
}
} | 3.68 |
morf_SchemaUtils_type | /**
* Create a column type.
*
* @param type The {@link DataType} of the field.
* @param width The field width.
* @param scale The field scale.
* @param nullable Whether the field should be nullable.
* @return The {@link ColumnType}.
*/
public static ColumnType type(DataType type, int width, int scale, boolean nullable) {
ColumnBuilder builder = SchemaUtils.column(null, type, width, scale);
if (nullable) {
builder = builder.nullable();
}
return builder;
} | 3.68 |
hbase_KeyValue_getTimestamp | /** Return the timestamp. */
long getTimestamp(final int keylength) {
int tsOffset = getTimestampOffset(keylength);
return Bytes.toLong(this.bytes, tsOffset);
} | 3.68 |
querydsl_AbstractPostgreSQLQuery_of | /**
* FOR UPDATE / FOR SHARE OF tables
*
* @param paths tables
* @return the current object
*/
public C of(RelationalPath<?>... paths) {
StringBuilder builder = new StringBuilder(" of ");
for (RelationalPath<?> path : paths) {
if (builder.length() > 4) {
builder.append(", ");
}
builder.append(getConfiguration().getTemplates().quoteIdentifier(path.getTableName()));
}
return addFlag(Position.END, builder.toString());
} | 3.68 |
morf_Function_floor | /**
* Helper method to create an instance of the "floor" SQL function, which will
* round the provided value down to an integer value.
* <p>
* Example : 3.2, 3.5 and 3.9 will all round to 3
* </p>
*
* @param expression the expression to evaluate
* @return an instance of the floor function
*/
public static Function floor(AliasedField expression) {
return new Function(FunctionType.FLOOR, expression);
} | 3.68 |
dubbo_ApplicationModel_initApplicationExts | // already synchronized in constructor
private void initApplicationExts() {
Set<ApplicationExt> exts = this.getExtensionLoader(ApplicationExt.class).getSupportedExtensionInstances();
for (ApplicationExt ext : exts) {
ext.initialize();
}
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_compatibleAfterMigration | /**
* Returns a result that indicates that the new serializer can be used after migrating the
* written bytes, i.e. reading it with the old serializer and then writing it again with the new
* serializer.
*
* @return a result that indicates that the new serializer can be used after migrating the
* written bytes.
*/
public static <T> TypeSerializerSchemaCompatibility<T> compatibleAfterMigration() {
return new TypeSerializerSchemaCompatibility<>(Type.COMPATIBLE_AFTER_MIGRATION, null);
} | 3.68 |
hbase_CacheConfig_isInMemory | /** Returns true if blocks in this file should be flagged as in-memory */
public boolean isInMemory() {
return this.inMemory;
} | 3.68 |
framework_Embedded_getCodetype | /**
* Gets the MIME-Type of the code.
*
* @return the MIME-Type of the code.
*/
public String getCodetype() {
return getState(false).codetype;
} | 3.68 |
flink_BlobUtils_getIncomingDirectory | /**
* Returns the BLOB service's directory for incoming (job-unrelated) files. The directory is
* created if it does not exist yet.
*
* @param storageDir storage directory used be the BLOB service
* @return the BLOB service's directory for incoming files
* @throws IOException if creating the directory fails
*/
static File getIncomingDirectory(File storageDir) throws IOException {
final File incomingDir = new File(storageDir, "incoming");
Files.createDirectories(incomingDir.toPath());
return incomingDir;
} | 3.68 |
graphhopper_WaySegmentParser_setRelationPreprocessor | /**
* @param relationPreprocessor callback function that receives OSM relations during the first pass
*/
public Builder setRelationPreprocessor(Consumer<ReaderRelation> relationPreprocessor) {
waySegmentParser.relationPreprocessor = relationPreprocessor;
return this;
} | 3.68 |
hbase_HRegion_getEffectiveDurability | /**
* Returns effective durability from the passed durability and the table descriptor.
*/
private Durability getEffectiveDurability(Durability d) {
return d == Durability.USE_DEFAULT ? this.regionDurability : d;
} | 3.68 |
streampipes_EpProperties_longEp | /**
* Creates a new primitive property of type integer and the provided domain property.
*
* @param label A human-readable identifier of the property presented to users in the StreamPipes UI.
* If you do not want to have a label besides the runtime name, use
* {@link org.apache.streampipes.sdk.helpers.Labels}
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperty The semantics of the list property as a String. The string should correspond to a URI
* provided by a vocabulary. Use one of the vocabularies provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyPrimitive longEp(Label label, String runtimeName, String domainProperty) {
return ep(label, XSD.LONG.toString(), runtimeName, domainProperty);
} | 3.68 |
hadoop_ZKPathDumper_toString | /**
* Trigger the recursive registry dump.
* @return a string view of the registry
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ZK tree for ").append(root).append('\n');
expand(builder, root, 1);
return builder.toString();
} | 3.68 |
hbase_OrderedBytes_isNumericInfinite | /**
* Return true when the next encoded value in {@code src} uses Numeric encoding and is
* {@code Infinite}, false otherwise.
*/
public static boolean isNumericInfinite(PositionedByteRange src) {
byte x = (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
return NEG_INF == x || POS_INF == x;
} | 3.68 |
zxing_MatrixUtil_findMSBSet | // Return the position of the most significant bit set (to one) in the "value". The most
// significant bit is position 32. If there is no bit set, return 0. Examples:
// - findMSBSet(0) => 0
// - findMSBSet(1) => 1
// - findMSBSet(255) => 8
static int findMSBSet(int value) {
return 32 - Integer.numberOfLeadingZeros(value);
} | 3.68 |
flink_RequestJobsWithIDsOverview_readResolve | /** Preserve the singleton property by returning the singleton instance */
private Object readResolve() {
return INSTANCE;
} | 3.68 |
querydsl_MapExpressionBase_isEmpty | /**
* Create a {@code this.isEmpty()} expression
*
* @return this.isEmpty()
*/
public final BooleanExpression isEmpty() {
if (empty == null) {
empty = Expressions.booleanOperation(Ops.MAP_IS_EMPTY, mixin);
}
return empty;
} | 3.68 |
zxing_LuminanceSource_isRotateSupported | /**
* @return Whether this subclass supports counter-clockwise rotation.
*/
public boolean isRotateSupported() {
return false;
} | 3.68 |
flink_InputFormatTableSource_isBounded | /** Always returns true which indicates this is a bounded source. */
@Override
public final boolean isBounded() {
return true;
} | 3.68 |
graphhopper_GHDirectory_getDefaultType | /**
* This method returns the default DAType of the specified DataAccess (as string). If preferInts is true then this
* method returns e.g. RAM_INT if the type of the specified DataAccess is RAM.
*/
public DAType getDefaultType(String dataAccess, boolean preferInts) {
DAType type = getDefault(dataAccess, typeFallback);
if (preferInts && type.isInMemory())
return type.isStoring() ? RAM_INT_STORE : RAM_INT;
return type;
} | 3.68 |
flink_PlanNode_addOutgoingChannel | /**
* Adds a channel to a successor node to this node.
*
* @param channel The channel to the successor.
*/
public void addOutgoingChannel(Channel channel) {
this.outChannels.add(channel);
} | 3.68 |
hudi_AbstractTableFileSystemView_getLatestBaseFile | /**
* Get Latest base file for a partition and file-Id.
*/
@Override
public final Option<HoodieBaseFile> getLatestBaseFile(String partitionStr, String fileId) {
try {
readLock.lock();
String partitionPath = formatPartitionKey(partitionStr);
ensurePartitionLoadedCorrectly(partitionPath);
if (isFileGroupReplaced(partitionPath, fileId)) {
return Option.empty();
} else {
return fetchLatestBaseFile(partitionPath, fileId)
.map(df -> addBootstrapBaseFileIfPresent(new HoodieFileGroupId(partitionPath, fileId), df));
}
} finally {
readLock.unlock();
}
} | 3.68 |
hudi_HoodieStreamer_sync | /**
* Main method to start syncing.
*/
public void sync() throws Exception {
if (bootstrapExecutor.isPresent()) {
LOG.info("Performing bootstrap. Source=" + bootstrapExecutor.get().getBootstrapConfig().getBootstrapSourceBasePath());
bootstrapExecutor.get().execute();
} else {
ingestionService.ifPresent(HoodieIngestionService::startIngestion);
}
} | 3.68 |
morf_SqlDialect_getUpdateStatementSetFieldSql | /**
* Returns the SET clause for an SQL UPDATE statement based on the
* {@link List} of {@link AliasedField}s provided.
*
* @param fields The {@link List} of {@link AliasedField}s to create the SET
* statement from
* @return The SET clause as a string
*/
protected String getUpdateStatementSetFieldSql(List<AliasedField> fields) {
return " SET " + getUpdateStatementAssignmentsSql(fields);
} | 3.68 |
framework_ConnectorTracker_getConnectorAndParentInfo | /**
* Returns {@link #getConnectorString(ClientConnector)} for the connector
* and its parent (if it has a parent).
*
* @param connector
* The connector
* @return A string describing the connector and its parent
*/
private String getConnectorAndParentInfo(ClientConnector connector) {
String message = getConnectorString(connector);
if (connector.getParent() != null) {
message += " (parent: " + getConnectorString(connector.getParent())
+ ")";
}
return message;
} | 3.68 |
querydsl_EnumExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
@Override
public EnumExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
framework_VCalendar_isEventResizeAllowed | /**
* Is resizing an event allowed.
*/
public boolean isEventResizeAllowed() {
return eventResizeAllowed;
} | 3.68 |
zilla_HpackContext_staticIndex | /*
* Index in static table for the name. There aren't many entries, so will use
* the length and last byte in the name to look up the entry.
*
* @return index in static table if present
* -1 otherwise
*/
private static int staticIndex(DirectBuffer name)
{
switch (name.capacity())
{
case 3: return staticIndex3(name);
case 4: return staticIndex4(name);
case 5: return staticIndex5(name);
case 6: return staticIndex6(name);
case 7: return staticIndex7(name);
case 8: return staticIndex8(name);
case 10: return staticIndex10(name);
case 11: return staticIndex11(name);
case 12: return staticIndex12(name);
case 13: return staticIndex13(name);
case 14: return staticIndex14(name);
case 15: return staticIndex15(name);
case 16: return staticIndex16(name);
case 17: return staticIndex17(name);
case 18: return staticIndex18(name);
case 19: return staticIndex19(name);
case 25: return staticIndex25(name);
case 27: return staticIndex27(name);
default: return -1;
}
} | 3.68 |
flink_OutputFormatBase_close | /** Close the format waiting for pending writes and reports errors. */
@Override
public final void close() throws IOException {
checkAsyncErrors();
flush();
checkAsyncErrors();
postClose();
} | 3.68 |
flink_StaticFileServerHandler_setContentTypeHeader | /**
* Sets the content type header for the HTTP Response.
*
* @param response HTTP response
* @param file file to extract content type
*/
public static void setContentTypeHeader(HttpResponse response, File file) {
String mimeType = MimeTypes.getMimeTypeForFileName(file.getName());
String mimeFinal = mimeType != null ? mimeType : MimeTypes.getDefaultMimeType();
response.headers().set(CONTENT_TYPE, mimeFinal);
} | 3.68 |
hadoop_NMContainerTokenSecretManager_retrievePassword | /**
* Override of this is to validate ContainerTokens generated by using
* different {@link MasterKey}s.
*/
@Override
public synchronized byte[] retrievePassword(
ContainerTokenIdentifier identifier) throws SecretManager.InvalidToken {
int keyId = identifier.getMasterKeyId();
MasterKeyData masterKeyToUse = null;
if (this.previousMasterKey != null
&& keyId == this.previousMasterKey.getMasterKey().getKeyId()) {
// A container-launch has come in with a token generated off the last
// master-key
masterKeyToUse = this.previousMasterKey;
} else if (keyId == super.currentMasterKey.getMasterKey().getKeyId()) {
// A container-launch has come in with a token generated off the current
// master-key
masterKeyToUse = super.currentMasterKey;
}
if (nodeHostAddr != null
&& !identifier.getNmHostAddress().equals(nodeHostAddr)) {
// Valid container token used for incorrect node.
throw new SecretManager.InvalidToken("Given Container "
+ identifier.getContainerID().toString()
+ " identifier is not valid for current Node manager. Expected : "
+ nodeHostAddr + " Found : " + identifier.getNmHostAddress());
}
if (masterKeyToUse != null) {
return retrievePasswordInternal(identifier, masterKeyToUse);
}
// Invalid request. Like startContainer() with token generated off
// old-master-keys.
throw new SecretManager.InvalidToken("Given Container "
+ identifier.getContainerID().toString()
+ " seems to have an illegally generated token.");
} | 3.68 |
hbase_RequestConverter_buildCreateTableRequest | /**
* Creates a protocol buffer CreateTableRequest
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
final byte[][] splitKeys, final long nonceGroup, final long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
if (splitKeys != null) {
for (byte[] key : splitKeys) {
builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key));
}
}
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.68 |
dubbo_AbstractReferenceConfig_isGeneric | /**
* @deprecated Replace to {@link AbstractReferenceConfig#getGeneric()}
*/
@Deprecated
@Parameter(excluded = true, attribute = false)
public Boolean isGeneric() {
return this.generic != null ? ProtocolUtils.isGeneric(generic) : null;
} | 3.68 |
hbase_TableQuotaSnapshotStore_getQuotaForTable | /**
* Fetches the table quota. Visible for mocking/testing.
*/
Quotas getQuotaForTable(TableName table) throws IOException {
return QuotaTableUtil.getTableQuota(conn, table);
} | 3.68 |
morf_SchemaBean_viewNames | /**
* @see org.alfasoftware.morf.metadata.Schema#viewNames()
*/
@Override
public Collection<String> viewNames() {
// Implemented like this rather than views.keySet() to retain case
Set<String> names = new HashSet<>();
for (View view : views.values()) {
names.add(view.getName());
}
return names;
} | 3.68 |
hbase_MultiTableSnapshotInputFormatImpl_restoreSnapshots | /**
* Restore each (snapshot name, restore directory) pair in snapshotToDir
* @param conf configuration to restore with
* @param snapshotToDir mapping from snapshot names to restore directories
* @param fs filesystem to do snapshot restoration on
*/
public void restoreSnapshots(Configuration conf, Map<String, Path> snapshotToDir, FileSystem fs)
throws IOException {
// TODO: restore from record readers to parallelize.
Path rootDir = CommonFSUtils.getRootDir(conf);
for (Map.Entry<String, Path> entry : snapshotToDir.entrySet()) {
String snapshotName = entry.getKey();
Path restoreDir = entry.getValue();
LOG.info("Restoring snapshot " + snapshotName + " into " + restoreDir
+ " for MultiTableSnapshotInputFormat");
restoreSnapshot(conf, snapshotName, rootDir, restoreDir, fs);
}
} | 3.68 |
hudi_StreamerUtil_instantTimeDiffSeconds | /**
* Returns the time interval in seconds between the given instant time.
*/
public static long instantTimeDiffSeconds(String newInstantTime, String oldInstantTime) {
try {
long newTimestamp = HoodieActiveTimeline.parseDateFromInstantTime(newInstantTime).getTime();
long oldTimestamp = HoodieActiveTimeline.parseDateFromInstantTime(oldInstantTime).getTime();
return (newTimestamp - oldTimestamp) / 1000;
} catch (ParseException e) {
throw new HoodieException("Get instant time diff with interval [" + oldInstantTime + ", " + newInstantTime + "] error", e);
}
} | 3.68 |
flink_HashPartition_finalizeProbePhase | /**
* @param keepUnprobedSpilledPartitions If true then partitions that were spilled but received
* no further probe requests will be retained; used for build-side outer joins.
* @return The number of write-behind buffers reclaimable after this method call.
* @throws IOException
*/
public int finalizeProbePhase(
List<MemorySegment> freeMemory,
List<HashPartition<BT, PT>> spilledPartitions,
boolean keepUnprobedSpilledPartitions)
throws IOException {
if (isInMemory()) {
// in this case, return all memory buffers
// return the overflow segments
for (int k = 0; k < this.numOverflowSegments; k++) {
freeMemory.add(this.overflowSegments[k]);
}
this.overflowSegments = null;
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
// return the partition buffers
for (MemorySegment partitionBuffer : this.partitionBuffers) {
freeMemory.add(partitionBuffer);
}
this.partitionBuffers = null;
return 0;
} else if (this.probeSideRecordCounter == 0 && !keepUnprobedSpilledPartitions) {
// partition is empty, no spilled buffers
// return the memory buffer
freeMemory.add(this.probeSideBuffer.getCurrentSegment());
// delete the spill files
this.probeSideChannel.close();
this.buildSideChannel.deleteChannel();
this.probeSideChannel.deleteChannel();
return 0;
} else {
// flush the last probe side buffer and register this partition as pending
this.probeSideBuffer.close();
this.probeSideChannel.close();
spilledPartitions.add(this);
return 1;
}
} | 3.68 |
hadoop_FSBuilderSupport_getLong | /**
* Get a long value with resilience to unparseable values.
* @param key key to log
* @param defVal default value
* @return long value
*/
public long getLong(String key, long defVal) {
final String v = options.getTrimmed(key, "");
if (v.isEmpty()) {
return defVal;
}
try {
return options.getLong(key, defVal);
} catch (NumberFormatException e) {
final String msg = String.format(
"The option %s value \"%s\" is not a long integer; using the default value %s",
key, v, defVal);
// not a long,
LOG_PARSE_ERROR.warn(msg);
LOG.debug("{}", msg, e);
return defVal;
}
} | 3.68 |
hbase_HRegionFileSystem_getRegionDir | /** Returns {@link Path} to the region directory. */
public Path getRegionDir() {
return regionDir;
} | 3.68 |
hadoop_AbstractS3ACommitter_abortPendingUploads | /**
* Abort all pending uploads in the list.
* @param commitContext commit context
* @param pending pending uploads
* @param suppressExceptions should exceptions be suppressed?
* @param deleteRemoteFiles should remote files be deleted?
* @throws IOException any exception raised
*/
protected void abortPendingUploads(
final CommitContext commitContext,
final ActiveCommit pending,
final boolean suppressExceptions,
final boolean deleteRemoteFiles) throws IOException {
if (pending.isEmpty()) {
LOG.info("{}: no pending commits to abort", getRole());
} else {
try (DurationInfo d = new DurationInfo(LOG,
"Aborting %s uploads", pending.size())) {
TaskPool.foreach(pending.getSourceFiles())
.executeWith(commitContext.getOuterSubmitter())
.suppressExceptions(suppressExceptions)
.run(path ->
loadAndAbort(commitContext,
pending,
path,
suppressExceptions,
deleteRemoteFiles));
}
}
} | 3.68 |
hadoop_PublishedConfiguration_shallowCopy | /**
* This makes a copy without the nested content -so is suitable
* for returning as part of the list of a parent's values
* @return the copy
*/
public PublishedConfiguration shallowCopy() {
PublishedConfiguration that = new PublishedConfiguration();
that.description = this.description;
that.updated = this.updated;
that.updatedTime = this.updatedTime;
return that;
} | 3.68 |
hadoop_IdentityTransformer_transformUserOrGroupForSetRequest | /**
* Perform Identity transformation when setting owner on a path.
* There are four possible input:
* 1.short name; 2.$superuser; 3.Fully qualified name; 4. principal id.
* <pre>
* short name could be transformed to:
* - A service principal id or $superuser, if short name belongs a daemon service
* stated in substitution list AND "fs.azure.identity.transformer.service.principal.id"
* is set with $superuser or a principal id.
* - Fully qualified name, if "fs.azure.identity.transformer.domain.name" is set in configuration.
*
* $superuser, fully qualified name and principalId should not be transformed.
* </pre>
* @param userOrGroup the user or group to be set as owner.
* @return user or group after transformation.
* */
public String transformUserOrGroupForSetRequest(String userOrGroup) {
if (userOrGroup == null || userOrGroup.isEmpty() || skipUserIdentityReplacement) {
return userOrGroup;
}
// case 1: when the owner to be set is stated in substitution list.
if (isInSubstitutionList(userOrGroup)) {
return servicePrincipalId;
}
// case 2: when the owner is a short name of the user principal name(UPN).
if (shouldUseFullyQualifiedUserName(userOrGroup)) {
return getFullyQualifiedName(userOrGroup);
}
return userOrGroup;
} | 3.68 |
hmily_HmilyRepositoryFacade_createHmilyTransaction | /**
* Create hmily transaction string.
*
* @param hmilyTransaction the hmily transaction
*/
public void createHmilyTransaction(final HmilyTransaction hmilyTransaction) {
checkRows(hmilyRepository.createHmilyTransaction(hmilyTransaction));
} | 3.68 |
framework_TouchScrollDelegate_stopScrolling | /**
* Forces the scroll delegate to cancels scrolling process. Can be called by
* users if they e.g. decide to handle touch event by themselves after all
* (e.g. a pause after touch start before moving touch -> interpreted as
* long touch/click or drag start).
*/
public void stopScrolling() {
handlerRegistration.removeHandler();
handlerRegistration = null;
if (moved) {
moveTransformationToScrolloffset();
} else {
activeScrollDelegate = null;
}
} | 3.68 |
flink_SyntaxHighlightStyle_getHintStyle | /**
* Returns the style for a SQL hint, such as {@literal /*+ This is a hint *}{@literal /}.
*
* @return Style for SQL hints
*/
public AttributedStyle getHintStyle() {
return hintStyle;
} | 3.68 |
hadoop_Paths_addUUID | /**
* Insert the UUID to a path if it is not there already.
* If there is a trailing "." in the prefix after the last slash, the
* UUID is inserted before it with a "-" prefix; otherwise appended.
*
* Examples:
* <pre>
* /example/part-0000 ==> /example/part-0000-0ab34
* /example/part-0001.gz.csv ==> /example/part-0001-0ab34.gz.csv
* /example/part-0002-0abc3.gz.csv ==> /example/part-0002-0abc3.gz.csv
* /example0abc3/part-0002.gz.csv ==> /example0abc3/part-0002.gz.csv
* </pre>
*
*
* @param pathStr path as a string; must not have a trailing "/".
* @param uuid UUID to append; must not be empty
* @return new path.
*/
public static String addUUID(String pathStr, String uuid) {
Preconditions.checkArgument(StringUtils.isNotEmpty(pathStr), "empty path");
Preconditions.checkArgument(StringUtils.isNotEmpty(uuid), "empty uuid");
// In some cases, Spark will add the UUID to the filename itself.
if (pathStr.contains(uuid)) {
return pathStr;
}
int dot; // location of the first '.' in the file name
int lastSlash = pathStr.lastIndexOf('/');
if (lastSlash >= 0) {
Preconditions.checkState(lastSlash + 1 < pathStr.length(),
"Bad path: " + pathStr);
dot = pathStr.indexOf('.', lastSlash);
} else {
dot = pathStr.indexOf('.');
}
if (dot >= 0) {
return pathStr.substring(0, dot) + "-" + uuid + pathStr.substring(dot);
} else {
return pathStr + "-" + uuid;
}
} | 3.68 |
hadoop_ValueAggregatorMapper_map | /**
* the map function. It iterates through the value aggregator descriptor
* list to generate aggregation id/value pairs and emit them.
*/
public void map(K1 key, V1 value,
Context context) throws IOException, InterruptedException {
Iterator<?> iter =
ValueAggregatorJobBase.aggregatorDescriptorList.iterator();
while (iter.hasNext()) {
ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next();
Iterator<Entry<Text, Text>> ens =
ad.generateKeyValPairs(key, value).iterator();
while (ens.hasNext()) {
Entry<Text, Text> en = ens.next();
context.write(en.getKey(), en.getValue());
}
}
} | 3.68 |
dubbo_ReactorClientCalls_manyToMany | /**
* Implements a stream -> stream call as Flux -> Flux
*
* @param invoker invoker
* @param requestFlux the flux with request
* @param methodDescriptor the method descriptor
* @return the flux with response
*/
public static <TRequest, TResponse, TInvoker> Flux<TResponse> manyToMany(
Invoker<TInvoker> invoker, Flux<TRequest> requestFlux, StubMethodDescriptor methodDescriptor) {
try {
ClientTripleReactorSubscriber<TRequest> clientSubscriber =
requestFlux.subscribeWith(new ClientTripleReactorSubscriber<>());
ClientTripleReactorPublisher<TResponse> clientPublisher = new ClientTripleReactorPublisher<>(
s -> clientSubscriber.subscribe((CallStreamObserver<TRequest>) s), clientSubscriber::cancel);
return Flux.from(clientPublisher)
.doOnSubscribe(dummy ->
StubInvocationUtil.biOrClientStreamCall(invoker, methodDescriptor, clientPublisher));
} catch (Throwable throwable) {
return Flux.error(throwable);
}
} | 3.68 |
flink_EnvironmentSettings_withConfiguration | /** Add extra configuration to {@link EnvironmentSettings}. */
public Builder withConfiguration(Configuration configuration) {
this.configuration.addAll(configuration);
return this;
} | 3.68 |
flink_CatalogManager_alterDatabase | /**
* Modify an existing database.
*
* @param catalogName Name of the catalog for database
* @param databaseName Name of the database to be dropped
* @param newDatabase The new database definition
* @param ignoreIfNotExists Flag to specify behavior when the given database does not exist: if
* set to false, throw an exception, if set to true, do nothing.
* @throws DatabaseNotExistException if the given database does not exist
* @throws CatalogException in case of any runtime exception
*/
public void alterDatabase(
String catalogName,
String databaseName,
CatalogDatabase newDatabase,
boolean ignoreIfNotExists)
throws DatabaseNotExistException, CatalogException {
Catalog catalog = getCatalogOrError(catalogName);
catalog.alterDatabase(databaseName, newDatabase, ignoreIfNotExists);
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
AlterDatabaseEvent.createEvent(
CatalogContext.createContext(catalogName, catalog),
databaseName,
newDatabase,
ignoreIfNotExists)));
} | 3.68 |
hadoop_AbfsCountersImpl_lookupMetric | /**
* Look up a Metric from registered set.
*
* @param name name of metric.
* @return the metric or null.
*/
private MutableMetric lookupMetric(String name) {
return getRegistry().get(name);
} | 3.68 |
morf_Function_max | /**
* Helper method to create an instance of the "maximum" SQL function.
*
* @param fieldToEvaluate the field to evaluate in the maximum function. This can be any expression resulting in a single column of data.
* @return an instance of the maximum function
*/
public static Function max(AliasedField fieldToEvaluate) {
return new Function(FunctionType.MAX, fieldToEvaluate);
} | 3.68 |
hudi_FlinkClientUtil_getHadoopConf | /**
* Returns the hadoop configuration with possible hadoop conf paths.
* E.G. the configurations under path $HADOOP_CONF_DIR and $HADOOP_HOME.
*/
public static org.apache.hadoop.conf.Configuration getHadoopConf() {
// create hadoop configuration with hadoop conf directory configured.
org.apache.hadoop.conf.Configuration hadoopConf = null;
for (String possibleHadoopConfPath : HadoopUtils.possibleHadoopConfPaths(new Configuration())) {
hadoopConf = getHadoopConfiguration(possibleHadoopConfPath);
if (hadoopConf != null) {
break;
}
}
if (hadoopConf == null) {
hadoopConf = new org.apache.hadoop.conf.Configuration();
}
return hadoopConf;
} | 3.68 |
flink_FlinkImageBuilder_build | /** Build the image. */
public ImageFromDockerfile build() throws ImageBuildException {
sanityCheck();
final String finalImageName = imageNamePrefix + "-" + imageNameSuffix;
try {
if (baseImage == null) {
baseImage = FLINK_BASE_IMAGE_BUILD_NAME;
if (flinkDist == null) {
flinkDist = FileUtils.findFlinkDist();
}
// Build base image first
buildBaseImage(flinkDist);
}
final Path flinkConfFile = createTemporaryFlinkConfFile(conf, tempDirectory);
final Path log4jPropertiesFile = createTemporaryLog4jPropertiesFile(tempDirectory);
// Copy flink-conf.yaml into image
filesToCopy.put(
flinkConfFile,
Paths.get(flinkHome, "conf", GlobalConfiguration.FLINK_CONF_FILENAME));
filesToCopy.put(
log4jPropertiesFile, Paths.get(flinkHome, "conf", LOG4J_PROPERTIES_FILENAME));
final ImageFromDockerfile image =
new ImageFromDockerfile(finalImageName)
.withDockerfileFromBuilder(
builder -> {
// Build from base image
builder.from(baseImage);
// Copy files into image
filesToCopy.forEach(
(from, to) ->
builder.copy(to.toString(), to.toString()));
builder.cmd(startupCommand);
});
filesToCopy.forEach((from, to) -> image.withFileFromPath(to.toString(), from));
return image;
} catch (Exception e) {
throw new ImageBuildException(finalImageName, e);
}
} | 3.68 |
morf_SqlDialect_appendJoins | /**
* appends joins clauses to the result
*
* @param result joins will be appended here
* @param stmt statement with joins clauses
* @param innerJoinKeyword The keyword for INNER JOIN
* @param <T> The type of {@link AbstractSelectStatement}
*/
protected <T extends AbstractSelectStatement<T>> void appendJoins(StringBuilder result, AbstractSelectStatement<T> stmt, String innerJoinKeyword) {
for (Join currentJoin : stmt.getJoins()) {
appendJoin(result, currentJoin, innerJoinKeyword);
}
} | 3.68 |
graphhopper_CustomModelParser_getInterface | /**
* @return the interface as string of the provided EncodedValue, e.g. IntEncodedValue (only interface) or
* BooleanEncodedValue (first interface). For StringEncodedValue we return IntEncodedValue to return the index
* instead of the String for faster comparison.
*/
private static String getInterface(EncodedValue enc) {
if (enc instanceof StringEncodedValue) return IntEncodedValue.class.getSimpleName();
if (enc.getClass().getInterfaces().length == 0) return enc.getClass().getSimpleName();
return enc.getClass().getInterfaces()[0].getSimpleName();
} | 3.68 |
shardingsphere-elasticjob_ShardingService_hasShardingInfoInOfflineServers | /**
* Query has sharding info in offline servers or not.
*
* @return has sharding info in offline servers or not
*/
public boolean hasShardingInfoInOfflineServers() {
List<String> onlineInstances = jobNodeStorage.getJobNodeChildrenKeys(InstanceNode.ROOT);
int shardingTotalCount = configService.load(true).getShardingTotalCount();
for (int i = 0; i < shardingTotalCount; i++) {
if (!onlineInstances.contains(jobNodeStorage.getJobNodeData(ShardingNode.getInstanceNode(i)))) {
return true;
}
}
return false;
} | 3.68 |
dubbo_RpcContext_set | /**
* set value.
*
* @param key
* @param value
* @return context
*/
public RpcContext set(String key, Object value) {
newRpcContext.set(key, value);
return this;
} | 3.68 |
flink_DataStreamSinkProvider_consumeDataStream | /**
* Consumes the given Java {@link DataStream} and returns the sink transformation {@link
* DataStreamSink}.
*
* @deprecated Use {@link DataStreamSinkProvider#consumeDataStream(ProviderContext, DataStream)}
* and correctly set a unique identifier for each data stream transformation.
*/
@Deprecated
default DataStreamSink<?> consumeDataStream(DataStream<RowData> dataStream) {
throw new UnsupportedOperationException(
"This method is deprecated. "
+ "Use consumeDataStream(ProviderContext, DataStream<RowData>) instead");
} | 3.68 |
flink_SkipListUtils_helpSetPrevAndNextNode | /**
* Set the previous node and the next node of the given node at the given level. The level must
* be positive.
*
* @param node the node.
* @param prevNode the previous node to set.
* @param nextNode the next node to set.
* @param level the level to find the next node.
* @param spaceAllocator the space allocator.
*/
static void helpSetPrevAndNextNode(
long node, long prevNode, long nextNode, int level, Allocator spaceAllocator) {
Preconditions.checkArgument(node != HEAD_NODE, "head node does not have previous node");
Preconditions.checkArgument(level > 0, "only index level have previous node");
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int topLevel = getLevel(segment, offsetInByteBuffer);
putNextIndexNode(segment, offsetInByteBuffer, level, nextNode);
putPrevIndexNode(segment, offsetInByteBuffer, topLevel, level, prevNode);
} | 3.68 |
zxing_MonochromeRectangleDetector_blackWhiteRange | /**
* Computes the start and end of a region of pixels, either horizontally or vertically, that could
* be part of a Data Matrix barcode.
*
* @param fixedDimension if scanning horizontally, this is the row (the fixed vertical location)
* where we are scanning. If scanning vertically it's the column, the fixed horizontal location
* @param maxWhiteRun largest run of white pixels that can still be considered part of the
* barcode region
* @param minDim minimum pixel location, horizontally or vertically, to consider
* @param maxDim maximum pixel location, horizontally or vertically, to consider
* @param horizontal if true, we're scanning left-right, instead of up-down
* @return int[] with start and end of found range, or null if no such range is found
* (e.g. only white was found)
*/
private int[] blackWhiteRange(int fixedDimension, int maxWhiteRun, int minDim, int maxDim, boolean horizontal) {
int center = (minDim + maxDim) / 2;
// Scan left/up first
int start = center;
while (start >= minDim) {
if (horizontal ? image.get(start, fixedDimension) : image.get(fixedDimension, start)) {
start--;
} else {
int whiteRunStart = start;
do {
start--;
} while (start >= minDim && !(horizontal ? image.get(start, fixedDimension) :
image.get(fixedDimension, start)));
int whiteRunSize = whiteRunStart - start;
if (start < minDim || whiteRunSize > maxWhiteRun) {
start = whiteRunStart;
break;
}
}
}
start++;
// Then try right/down
int end = center;
while (end < maxDim) {
if (horizontal ? image.get(end, fixedDimension) : image.get(fixedDimension, end)) {
end++;
} else {
int whiteRunStart = end;
do {
end++;
} while (end < maxDim && !(horizontal ? image.get(end, fixedDimension) :
image.get(fixedDimension, end)));
int whiteRunSize = end - whiteRunStart;
if (end >= maxDim || whiteRunSize > maxWhiteRun) {
end = whiteRunStart;
break;
}
}
}
end--;
return end > start ? new int[]{start, end} : null;
} | 3.68 |
AreaShop_RentRegion_getFormattedMoneyBackAmount | /**
* Get the formatted string of the amount of the moneyBack amount.
* @return String with currency symbols and proper fractional part
*/
public String getFormattedMoneyBackAmount() {
return Utils.formatCurrency(getMoneyBackAmount());
} | 3.68 |
hadoop_FederationPolicyInitializationContext_setFederationSubclusterResolver | /**
* Setter for the {@link SubClusterResolver}.
*
* @param federationSubclusterResolver the {@link SubClusterResolver} to be
* used for initialization.
*/
public void setFederationSubclusterResolver(
SubClusterResolver federationSubclusterResolver) {
this.federationSubclusterResolver = federationSubclusterResolver;
} | 3.68 |
framework_TypeDataStore_isNoLayoutProperty | /**
* Checks whether the provided property is annotated with {@link NoLayout}.
*
* @param property
* the property to check
*
* @since 7.4
*
* @return <code>true</code> if the property has a NoLayout annotation;
* otherwise <code>false</code>
*/
public static boolean isNoLayoutProperty(Property property) {
return hasNoLayout(get().jsTypeData,
property.getBeanType().getSignature(), property.getName());
} | 3.68 |
flink_AsynchronousBlockWriter_getReturnQueue | /**
* Gets the queue in which the memory segments are queued after the asynchronous write is
* completed.
*
* @return The queue with the written memory segments.
*/
@Override
public LinkedBlockingQueue<MemorySegment> getReturnQueue() {
return this.returnSegments;
} | 3.68 |
hudi_BitCaskDiskMap_sizeOfFileOnDiskInBytes | /**
* Number of bytes spilled to disk.
*/
@Override
public long sizeOfFileOnDiskInBytes() {
return filePosition.get();
} | 3.68 |
hadoop_FederationStateStoreFacade_storeNewToken | /**
* The Router Supports Store RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}.
*
* @param identifier delegation tokens from the RM.
* @param renewDate renewDate.
* @param tokenInfo tokenInfo.
* @throws YarnException if the call to the state store is unsuccessful.
* @throws IOException An IO Error occurred.
*/
public void storeNewToken(RMDelegationTokenIdentifier identifier,
long renewDate, String tokenInfo) throws YarnException, IOException {
LOG.info("storing RMDelegation token with sequence number: {}.",
identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, renewDate, tokenInfo);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
stateStore.storeNewToken(request);
} | 3.68 |
framework_VDateField_getResolutions | /**
* Returns all available resolutions for the field in the ascending order
* (which is the same as order of enumeration ordinals).
* <p>
* The method uses {@link #doGetResolutions()} to make sure that the order
* is the correct one.
*
* @see #doGetResolutions()
*
* @return stream of all available resolutions in the ascending order.
*/
public Stream<R> getResolutions() {
return Stream.of(doGetResolutions()).sorted();
} | 3.68 |
framework_HierarchyMapper_collapse | /**
* Collapses the given item.
*
* @param item
* the item to collapse
* @param position
* the index of the item
*
* @return range of rows removed by collapsing the item
*/
public Range collapse(T item, Integer position) {
Range removedRows = Range.emptyRange();
if (isExpanded(item)) {
if (position != null) {
removedRows = Range.withLength(position + 1,
(int) getHierarchy(item, false).count());
}
expandedItemIds.remove(getDataProvider().getId(item));
}
return removedRows;
} | 3.68 |
morf_H2DatabaseInspector_inspect | /**
* @see org.alfasoftware.morf.diagnostics.DatabaseInspector#inspect()
*/
@Override
public void inspect() {
if (connectionResources instanceof AbstractConnectionResources &&
"H2".equals(connectionResources.getDatabaseType())) {
try {
log.info("Launching H2 database inspector...");
Server.startWebServer(connectionResources.getDataSource().getConnection());
} catch (SQLException e) {
throw new IllegalStateException("Failed to start the H2 Database Inspector web server", e);
}
}
} | 3.68 |
AreaShop_FileManager_saveGroupsNow | /**
* Save the groups file to disk synchronously.
*/
public void saveGroupsNow() {
AreaShop.debug("saveGroupsNow() done");
saveGroupsRequired = false;
try {
groupsConfig.save(groupsPath);
} catch(IOException e) {
AreaShop.warn("Groups file could not be saved: " + groupsPath);
}
} | 3.68 |
flink_Bucket_getNew | /**
* Creates a new empty {@code Bucket}.
*
* @param subtaskIndex the index of the subtask creating the bucket.
* @param bucketId the identifier of the bucket, as returned by the {@link BucketAssigner}.
* @param bucketPath the path to where the part files for the bucket will be written to.
* @param initialPartCounter the initial counter for the part files of the bucket.
* @param bucketWriter the {@link BucketWriter} used to write part files in the bucket.
* @param rollingPolicy the policy based on which a bucket rolls its currently open part file
* and opens a new one.
* @param fileListener the listener about the status of file.
* @param <IN> the type of input elements to the sink.
* @param <BucketID> the type of the identifier of the bucket, as returned by the {@link
* BucketAssigner}
* @param outputFileConfig the part file configuration.
* @return The new Bucket.
*/
static <IN, BucketID> Bucket<IN, BucketID> getNew(
final int subtaskIndex,
final BucketID bucketId,
final Path bucketPath,
final long initialPartCounter,
final BucketWriter<IN, BucketID> bucketWriter,
final RollingPolicy<IN, BucketID> rollingPolicy,
@Nullable final FileLifeCycleListener<BucketID> fileListener,
final OutputFileConfig outputFileConfig) {
return new Bucket<>(
subtaskIndex,
bucketId,
bucketPath,
initialPartCounter,
bucketWriter,
rollingPolicy,
fileListener,
outputFileConfig);
} | 3.68 |
hadoop_AbfsInputStream_reset | /**
* Not supported by this stream. Throws {@link UnsupportedOperationException}
*/
@Override
public synchronized void reset() throws IOException {
throw new UnsupportedOperationException("mark()/reset() not supported on this stream");
} | 3.68 |
hbase_MasterObserver_postSwitchExceedThrottleQuota | /**
* Called after switching exceed throttle quota state.
* @param ctx the coprocessor instance's environment
* @param oldValue the previously exceed throttle quota value
* @param newValue the newly exceed throttle quota value
*/
default void postSwitchExceedThrottleQuota(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final boolean oldValue,
final boolean newValue) throws IOException {
} | 3.68 |
flink_MemorySegment_putDouble | /**
* Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position
* in the system's native byte order. This method offers the best speed for double writing and
* should be used unless a specific byte order is required. In most cases, it suffices to know
* that the byte order in which the value is written is the same as the one in which it is read
* (such as transient storage in memory, or serialization for I/O and network), making this
* method the preferable choice.
*
* @param index The position at which the memory will be written.
* @param value The double value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public void putDouble(int index, double value) {
putLong(index, Double.doubleToRawLongBits(value));
} | 3.68 |
hadoop_DynamicIOStatistics_addGaugeFunction | /**
* add a mapping of a key to a gauge function.
* @param key the key
* @param eval the evaluator
*/
void addGaugeFunction(String key, Function<String, Long> eval) {
gauges.addFunction(key, eval);
} | 3.68 |
hudi_HFileBootstrapIndex_getPartitionKey | /**
* Returns partition-key to be used in HFile.
* @param partition Partition-Path
* @return
*/
private static String getPartitionKey(String partition) {
return getKeyValueString(PARTITION_KEY_PREFIX, partition);
} | 3.68 |
hbase_EnableTableProcedure_setTableStateToEnabling | /**
* Mark table state to Enabling
* @param env MasterProcedureEnv
* @param tableName the target table
*/
protected static void setTableStateToEnabling(final MasterProcedureEnv env,
final TableName tableName) throws IOException {
// Set table disabling flag up in zk.
LOG.info("Attempting to enable the table " + tableName);
env.getMasterServices().getTableStateManager().setTableState(tableName,
TableState.State.ENABLING);
} | 3.68 |
rocketmq-connect_ClusterConfigState_taskCount | /**
* Get the number of tasks assigned for the given connector.
*
* @param connectorName name of the connector to look up tasks for
* @return the number of tasks
*/
public int taskCount(String connectorName) {
Integer count = connectorTaskCounts.get(connectorName);
return count == null ? 0 : count;
} | 3.68 |
flink_WatermarkSpec_getRowtimeAttribute | /**
* Returns the name of a rowtime attribute.
*
* <p>The referenced attribute must be present in the {@link ResolvedSchema} and must be of
* {@link TimestampType}.
*/
public String getRowtimeAttribute() {
return rowtimeAttribute;
} | 3.68 |
hadoop_TimestampGenerator_currentTime | /**
* Returns the current wall clock time in milliseconds, multiplied by the
* required precision.
*
* @return current timestamp.
*/
public long currentTime() {
// We want to align cell timestamps with current time.
// cell timestamps are not be less than
// System.currentTimeMillis() * TS_MULTIPLIER.
return System.currentTimeMillis() * TS_MULTIPLIER;
} | 3.68 |
hbase_CommonFSUtils_isRecoveredEdits | /**
* Checks if the given path is the one with 'recovered.edits' dir.
* @param path must not be null
* @return True if we recovered edits
*/
public static boolean isRecoveredEdits(Path path) {
return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
} | 3.68 |
graphhopper_GHDirectory_configure | /**
* Configure the DAType (specified by the value) of a single DataAccess object (specified by the key). For "MMAP" you
* can prepend "preload." to the name and specify a percentage which preloads the DataAccess into physical memory of
* the specified percentage (only applied for load, not for import).
* As keys can be patterns the order is important and the LinkedHashMap is forced as type.
*/
public Directory configure(LinkedHashMap<String, String> config) {
for (Map.Entry<String, String> kv : config.entrySet()) {
String value = kv.getValue().trim();
if (kv.getKey().startsWith("preload."))
try {
String pattern = kv.getKey().substring("preload.".length());
mmapPreloads.put(pattern, Integer.parseInt(value));
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("DataAccess " + kv.getKey() + " has an incorrect preload value: " + value);
}
else {
String pattern = kv.getKey();
defaultTypes.put(pattern, DAType.fromString(value));
}
}
return this;
} | 3.68 |
zxing_PDF417HighLevelEncoder_encodeText | /**
* Encode parts of the message using Text Compaction as described in ISO/IEC 15438:2001(E),
* chapter 4.4.2.
*
* @param input the input
* @param startpos the start position within the message
* @param count the number of characters to encode
* @param sb receives the encoded codewords
* @param initialSubmode should normally be SUBMODE_ALPHA
* @return the text submode in which this method ends
*/
private static int encodeText(ECIInput input,
int startpos,
int count,
StringBuilder sb,
int initialSubmode) throws WriterException {
StringBuilder tmp = new StringBuilder(count);
int submode = initialSubmode;
int idx = 0;
while (true) {
if (input.isECI(startpos + idx)) {
encodingECI(input.getECIValue(startpos + idx), sb);
idx++;
} else {
char ch = input.charAt(startpos + idx);
switch (submode) {
case SUBMODE_ALPHA:
if (isAlphaUpper(ch)) {
if (ch == ' ') {
tmp.append((char) 26); //space
} else {
tmp.append((char) (ch - 65));
}
} else {
if (isAlphaLower(ch)) {
submode = SUBMODE_LOWER;
tmp.append((char) 27); //ll
continue;
} else if (isMixed(ch)) {
submode = SUBMODE_MIXED;
tmp.append((char) 28); //ml
continue;
} else {
tmp.append((char) 29); //ps
tmp.append((char) PUNCTUATION[ch]);
break;
}
}
break;
case SUBMODE_LOWER:
if (isAlphaLower(ch)) {
if (ch == ' ') {
tmp.append((char) 26); //space
} else {
tmp.append((char) (ch - 97));
}
} else {
if (isAlphaUpper(ch)) {
tmp.append((char) 27); //as
tmp.append((char) (ch - 65));
//space cannot happen here, it is also in "Lower"
break;
} else if (isMixed(ch)) {
submode = SUBMODE_MIXED;
tmp.append((char) 28); //ml
continue;
} else {
tmp.append((char) 29); //ps
tmp.append((char) PUNCTUATION[ch]);
break;
}
}
break;
case SUBMODE_MIXED:
if (isMixed(ch)) {
tmp.append((char) MIXED[ch]);
} else {
if (isAlphaUpper(ch)) {
submode = SUBMODE_ALPHA;
tmp.append((char) 28); //al
continue;
} else if (isAlphaLower(ch)) {
submode = SUBMODE_LOWER;
tmp.append((char) 27); //ll
continue;
} else {
if (startpos + idx + 1 < count &&
!input.isECI(startpos + idx + 1) &&
isPunctuation(input.charAt(startpos + idx + 1))) {
submode = SUBMODE_PUNCTUATION;
tmp.append((char) 25); //pl
continue;
}
tmp.append((char) 29); //ps
tmp.append((char) PUNCTUATION[ch]);
}
}
break;
default: //SUBMODE_PUNCTUATION
if (isPunctuation(ch)) {
tmp.append((char) PUNCTUATION[ch]);
} else {
submode = SUBMODE_ALPHA;
tmp.append((char) 29); //al
continue;
}
}
idx++;
if (idx >= count) {
break;
}
}
}
char h = 0;
int len = tmp.length();
for (int i = 0; i < len; i++) {
boolean odd = (i % 2) != 0;
if (odd) {
h = (char) ((h * 30) + tmp.charAt(i));
sb.append(h);
} else {
h = tmp.charAt(i);
}
}
if ((len % 2) != 0) {
sb.append((char) ((h * 30) + 29)); //ps
}
return submode;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlStatementFormat | /**
* Provides the tests for the correct format expected.
* This method can be overridden for dialect specific formatting.
*/
protected void expectedSqlStatementFormat() {
// When
String statement1 = testDialect.formatSqlStatement("END;");
String statement2 = testDialect.formatSqlStatement("test");
// Then
assertEquals("The SQL statement should be [END;;]" , "END;;", statement1);
assertEquals("The SQL statement should be [test;]" , "test;", statement2);
} | 3.68 |
hadoop_AbfsIoUtils_dumpHeadersToDebugLog | /**
* Dump the headers of a request/response to the log at DEBUG level.
* @param origin header origin for log
* @param headers map of headers.
*/
public static void dumpHeadersToDebugLog(final String origin,
final Map<String, List<String>> headers) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}", origin);
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
String key = entry.getKey();
if (key == null) {
key = "HTTP Response";
}
String values = StringUtils.join(";", entry.getValue());
if (key.contains("Cookie")) {
values = "*cookie info*";
}
if (key.equals("sig")) {
values = "XXXX";
}
LOG.debug(" {}={}",
key,
values);
}
}
} | 3.68 |
framework_TouchScrollDelegate_setElements | /**
* Registers the given elements as scrollable, removing previously
* registered scrollables from this handler.
*
* @param scrollables
* The elements that should be scrollable
*/
public void setElements(Element... scrollables) {
if (requiresDelegate()) {
for (Element e : delegate.scrollableElements) {
e.removeClassName(SCROLLABLE_CLASSNAME);
}
delegate.scrollableElements.clear();
}
for (Element e : scrollables) {
addElement(e);
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.