name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RollingFileSystemSink_createOrAppendLogFile | /**
* Create a new log file and return the {@link FSDataOutputStream}. If a
* file with the specified path already exists, open the file for append
* instead.
*
* Once the file is open, update {@link #currentFSOutStream},
* {@link #currentOutStream}, and {@#link #currentFilePath}.
*
* @param initial the target path
* @throws IOException thrown if the call to see the append operation fails.
*/
private void createOrAppendLogFile(Path targetFile) throws IOException {
// First try blindly creating the file. If we fail, it either means
// the file exists, or the operation actually failed. We do it this way
// because if we check whether the file exists, it might still be created
// by the time we try to create it. Creating first works like a
// test-and-set.
try {
currentFSOutStream = fileSystem.create(targetFile, false);
currentOutStream = new PrintStream(currentFSOutStream, true,
StandardCharsets.UTF_8.name());
} catch (IOException ex) {
// Try appending instead. If we fail, if means the file doesn't
// actually exist yet or the operation actually failed.
try {
currentFSOutStream = fileSystem.append(targetFile);
currentOutStream = new PrintStream(currentFSOutStream, true,
StandardCharsets.UTF_8.name());
} catch (IOException ex2) {
// If the original create failed for a legit but transitory
// reason, the append will fail because the file now doesn't exist,
// resulting in a confusing stack trace. To avoid that, we set
// the cause of the second exception to be the first exception.
// It's still a tiny bit confusing, but it's enough
// information that someone should be able to figure it out.
ex2.initCause(ex);
throw ex2;
}
}
currentFilePath = targetFile;
} | 3.68 |
hudi_HoodieTableMetadataUtil_metadataPartitionExists | /**
* Check if the given metadata partition exists.
*
* @param basePath base path of the dataset
* @param context instance of {@link HoodieEngineContext}.
*/
public static boolean metadataPartitionExists(String basePath, HoodieEngineContext context, MetadataPartitionType partitionType) {
final String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(basePath);
FileSystem fs = FSUtils.getFs(metadataTablePath, context.getHadoopConf().get());
try {
return fs.exists(new Path(metadataTablePath, partitionType.getPartitionPath()));
} catch (Exception e) {
throw new HoodieIOException(String.format("Failed to check metadata partition %s exists.", partitionType.getPartitionPath()));
}
} | 3.68 |
framework_SingleSelectionModelImpl_setSelectedFromServer | /**
* Sets the selection based on server API call. Does nothing if the
* selection would not change; otherwise updates the selection and fires a
* selection change event with {@code isUserOriginated == false}.
*
* @param item
* the item to select or {@code null} to clear selection
*/
protected void setSelectedFromServer(T item) {
if (isSelected(item)) {
// Avoid generating an extra key when item matches a stale one.
return;
}
T oldSelection = this.getSelectedItem()
.orElse(asSingleSelect().getEmptyValue());
doSetSelected(item);
fireEvent(new SingleSelectionEvent<>(getGrid(), asSingleSelect(),
oldSelection, false));
} | 3.68 |
hadoop_WriteOperationHelper_revertCommit | /**
* Revert a commit by deleting the file.
* Relies on retry code in filesystem.
* Does not attempt to recreate the parent directory
* @throws IOException on problems
* @param destKey destination key
*/
@Retries.OnceTranslated
public void revertCommit(String destKey) throws IOException {
once("revert commit", destKey,
withinAuditSpan(getAuditSpan(), () -> {
Path destPath = owner.keyToQualifiedPath(destKey);
owner.deleteObjectAtPath(destPath,
destKey, true);
}));
} | 3.68 |
hbase_ScannerModel_getLimit | /** Returns the limit specification */
@XmlAttribute
public int getLimit() {
return limit;
} | 3.68 |
hbase_AbstractStateMachineNamespaceProcedure_addOrUpdateNamespace | /**
* Insert/update the row into the ns family of meta table.
* @param env MasterProcedureEnv
*/
protected static void addOrUpdateNamespace(MasterProcedureEnv env, NamespaceDescriptor ns)
throws IOException {
getTableNamespaceManager(env).addOrUpdateNamespace(ns);
} | 3.68 |
hbase_ColumnValueFilter_getFamily | /** Returns the column family */
public byte[] getFamily() {
return family;
} | 3.68 |
flink_DefaultCheckpointPlan_checkNoPartlyFinishedVertexUsedUnionListState | /**
* If a job vertex using {@code UnionListState} has part of tasks FINISHED where others are
* still in RUNNING state, the checkpoint would be aborted since it might cause incomplete
* {@code UnionListState}.
*/
private void checkNoPartlyFinishedVertexUsedUnionListState(
Map<JobVertexID, ExecutionJobVertex> partlyFinishedVertex,
Map<OperatorID, OperatorState> operatorStates) {
for (ExecutionJobVertex vertex : partlyFinishedVertex.values()) {
if (hasUsedUnionListState(vertex, operatorStates)) {
throw new PartialFinishingNotSupportedByStateException(
String.format(
"The vertex %s (id = %s) has used"
+ " UnionListState, but part of its tasks are FINISHED.",
vertex.getName(), vertex.getJobVertexId()));
}
}
} | 3.68 |
framework_VScrollTable_refreshContent | /**
* Sends request to refresh content at this position.
*/
public void refreshContent() {
isRequestHandlerRunning = true;
int first = (int) (firstRowInViewPort - pageLength * cacheRate);
int reqRows = (int) (2 * pageLength * cacheRate + pageLength);
if (first < 0) {
reqRows = reqRows + first;
first = 0;
}
setReqFirstRow(first);
setReqRows(reqRows);
run();
} | 3.68 |
hbase_Writables_copyWritable | /**
* Copy one Writable to another. Copies bytes using data streams.
* @param bytes Source Writable
* @param tgt Target Writable
* @return The target Writable.
* @throws IOException e
*/
public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException {
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
try {
tgt.readFields(dis);
} finally {
dis.close();
}
return tgt;
} | 3.68 |
hadoop_StagingCommitter_getFinalKey | /**
* Returns the final S3 key for a relative path. Subclasses can override this
* method to upload files to a different S3 location.
* <p>
* This implementation concatenates the relative path with the key prefix
* from the output path.
* If {@link CommitConstants#FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES} is
* set, then the task UUID is also included in the calculation
*
* @param relative the path of a file relative to the task attempt path
* @param context the JobContext or TaskAttemptContext for this job
* @return the S3 key where the file will be uploaded
*/
protected String getFinalKey(String relative, JobContext context) {
if (uniqueFilenames) {
return getS3KeyPrefix(context) + "/"
+ Paths.addUUID(relative, getUUID());
} else {
return getS3KeyPrefix(context) + "/" + relative;
}
} | 3.68 |
morf_WindowFunction_getOrderBys | /**
* @return the fields to order by.
*/
public ImmutableList<AliasedField> getOrderBys() {
return orderBys;
} | 3.68 |
hudi_CompactionUtils_getDeltaCommitsSinceLatestCompaction | /**
* Returns a pair of (timeline containing the delta commits after the latest completed
* compaction commit, the completed compaction commit instant), if the latest completed
* compaction commit is present; a pair of (timeline containing all the delta commits,
* the first delta commit instant), if there is no completed compaction commit.
*
* @param activeTimeline Active timeline of a table.
* @return Pair of timeline containing delta commits and an instant.
*/
public static Option<Pair<HoodieTimeline, HoodieInstant>> getDeltaCommitsSinceLatestCompaction(
HoodieActiveTimeline activeTimeline) {
Option<HoodieInstant> lastCompaction = activeTimeline.getCommitTimeline()
.filterCompletedInstants().lastInstant();
HoodieTimeline deltaCommits = activeTimeline.getDeltaCommitTimeline();
final HoodieInstant latestInstant;
if (lastCompaction.isPresent()) {
latestInstant = lastCompaction.get();
// timeline containing the delta commits after the latest completed compaction commit,
// and the completed compaction commit instant
return Option.of(Pair.of(deltaCommits.findInstantsModifiedAfterByCompletionTime(latestInstant.getTimestamp()), latestInstant));
} else {
if (deltaCommits.countInstants() > 0) {
latestInstant = deltaCommits.firstInstant().get();
// timeline containing all the delta commits, and the first delta commit instant
return Option.of(Pair.of(deltaCommits, latestInstant));
} else {
return Option.empty();
}
}
} | 3.68 |
hmily_NacosClient_pull | /**
* Pull input stream.
*
* @param config the config
* @return the input stream
*/
InputStream pull(final NacosConfig config) {
Properties properties = new Properties();
properties.put(NACOS_SERVER_ADDR_KEY, config.getServer());
try {
configService = NacosFactory.createConfigService(properties);
String content = configService.getConfig(config.getDataId(), config.getGroup(), config.getTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("nacos content {}", content);
}
if (StringUtils.isBlank(content)) {
return null;
}
return new ByteArrayInputStream(content.getBytes());
} catch (NacosException e) {
throw new ConfigException(e);
}
} | 3.68 |
hadoop_TextView_puts | /**
* Print strings as a line (new line appended at the end, a la C/Tcl puts).
* @param args the strings to print
*/
public void puts(Object... args) {
echo(args);
writer().println();
} | 3.68 |
hudi_OptionsResolver_isLockRequired | /**
* Returns whether the writer txn should be guarded by lock.
*/
public static boolean isLockRequired(Configuration conf) {
return conf.getBoolean(FlinkOptions.METADATA_ENABLED) || isMultiWriter(conf);
} | 3.68 |
framework_HasComponents_getContainer | /**
* Gets the component container.
*
*/
public HasComponents getContainer() {
return (HasComponents) getSource();
} | 3.68 |
flink_FlinkContainersSettings_getZookeeperHostname | /**
* Gets Zookeeper hostname.
*
* @return The Zookeeper hostname.
*/
public String getZookeeperHostname() {
return zookeeperHostname;
} | 3.68 |
hbase_ClientIdGenerator_generateClientId | /**
* Returns a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
* though that new UUID in java by default is just a random number.
*/
public static byte[] generateClientId() {
byte[] selfBytes = getIpAddressBytes();
Long pid = getPid();
long tid = Thread.currentThread().getId();
long ts = EnvironmentEdgeManager.currentTime();
byte[] id = new byte[selfBytes.length + ((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG];
int offset = Bytes.putBytes(id, 0, selfBytes, 0, selfBytes.length);
if (pid != null) {
offset = Bytes.putLong(id, offset, pid);
}
offset = Bytes.putLong(id, offset, tid);
offset = Bytes.putLong(id, offset, ts);
assert offset == id.length;
return id;
} | 3.68 |
AreaShop_RegionSign_getRegion | /**
* Get the region this sign is linked to.
* @return The region this sign is linked to
*/
public GeneralRegion getRegion() {
return signsFeature.getRegion();
} | 3.68 |
dubbo_Bytes_unzip | /**
* unzip.
*
* @param bytes compressed byte array.
* @return byte uncompressed array.
* @throws IOException
*/
public static byte[] unzip(byte[] bytes) throws IOException {
UnsafeByteArrayInputStream bis = new UnsafeByteArrayInputStream(bytes);
UnsafeByteArrayOutputStream bos = new UnsafeByteArrayOutputStream();
InputStream is = new InflaterInputStream(bis);
try {
IOUtils.write(is, bos);
return bos.toByteArray();
} finally {
is.close();
bis.close();
bos.close();
}
} | 3.68 |
hudi_ByteBufferBackedInputStream_getPosition | /**
* Returns current position of the stream
*/
public int getPosition() {
return buffer.position() - bufferOffset;
} | 3.68 |
pulsar_SinkContext_resume | /**
* Resume requesting messages.
* @param topic - topic name
* @param partition - partition id (0 for non-partitioned topics)
*/
default void resume(String topic, int partition) throws PulsarClientException {
throw new UnsupportedOperationException("not implemented");
} | 3.68 |
hadoop_ReduceTaskAttemptInfo_getMergeRuntime | /**
* Get the runtime for the <b>merge</b> phase of the reduce task-attempt
*
* @return the runtime for the <b>merge</b> phase of the reduce task-attempt
*/
public long getMergeRuntime() {
return mergeTime;
} | 3.68 |
hbase_CellChunkImmutableSegment_createCellReference | /*------------------------------------------------------------------------*/
// for a given cell, write the cell representation on the index chunk
private int createCellReference(ByteBufferKeyValue cell, ByteBuffer idxBuffer, int idxOffset) {
int offset = idxOffset;
int dataChunkID = cell.getChunkId();
offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID); // write data chunk id
offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset()); // offset
offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getSerializedSize()); // length
offset = ByteBufferUtils.putLong(idxBuffer, offset, cell.getSequenceId()); // seqId
return offset;
} | 3.68 |
framework_VCalendar_set24HFormat | /**
* Should the 24h time format be used.
*
* @param format
* True if the 24h format should be used else the 12h format is
* used
*/
public void set24HFormat(boolean format) {
this.format = format;
} | 3.68 |
hudi_FileSystemViewManager_clearFileSystemView | /**
* Drops reference to File-System Views. Future calls to view results in creating a new view
*
* @param basePath
*/
public void clearFileSystemView(String basePath) {
SyncableFileSystemView view = globalViewMap.remove(basePath);
if (view != null) {
view.close();
}
} | 3.68 |
flink_OptimizerNode_computeUnionOfInterestingPropertiesFromSuccessors | /**
* Computes all the interesting properties that are relevant to this node. The interesting
* properties are a union of the interesting properties on each outgoing connection. However, if
* two interesting properties on the outgoing connections overlap, the interesting properties
* will occur only once in this set. For that, this method deduplicates and merges the
* interesting properties. This method returns copies of the original interesting properties
* objects and leaves the original objects, contained by the connections, unchanged.
*/
public void computeUnionOfInterestingPropertiesFromSuccessors() {
List<DagConnection> conns = getOutgoingConnections();
if (conns.size() == 0) {
// no incoming, we have none ourselves
this.intProps = new InterestingProperties();
} else {
this.intProps = conns.get(0).getInterestingProperties().clone();
for (int i = 1; i < conns.size(); i++) {
this.intProps.addInterestingProperties(conns.get(i).getInterestingProperties());
}
}
this.intProps.dropTrivials();
} | 3.68 |
hbase_Scan_getMvccReadPoint | /**
* Get the mvcc read point used to open a scanner.
*/
long getMvccReadPoint() {
return mvccReadPoint;
} | 3.68 |
hadoop_AbfsInputStream_available | /**
* Return the size of the remaining available bytes
* if the size is less than or equal to {@link Integer#MAX_VALUE},
* otherwise, return {@link Integer#MAX_VALUE}.
*
* This is to match the behavior of DFSInputStream.available(),
* which some clients may rely on (HBase write-ahead log reading in
* particular).
*/
@Override
public synchronized int available() throws IOException {
if (closed) {
throw new IOException(
FSExceptionMessages.STREAM_IS_CLOSED);
}
final long remaining = this.contentLength - this.getPos();
return remaining <= Integer.MAX_VALUE
? (int) remaining : Integer.MAX_VALUE;
} | 3.68 |
hudi_ProtoConversionUtil_getAvroSchemaForMessageClass | /**
* Creates an Avro {@link Schema} for the provided class. Assumes that the class is a protobuf {@link Message}.
* @param clazz The protobuf class
* @param schemaConfig configuration used to determine how to handle particular cases when converting from the proto schema
* @return An Avro schema
*/
public static Schema getAvroSchemaForMessageClass(Class clazz, SchemaConfig schemaConfig) {
return new AvroSupport(schemaConfig).getSchema(clazz);
} | 3.68 |
shardingsphere-elasticjob_PropertiesPreconditions_checkPositiveInteger | /**
* Check property value is positive integer.
*
* @param props properties to be checked
* @param key property key to be checked
*/
public static void checkPositiveInteger(final Properties props, final String key) {
String propertyValue = props.getProperty(key);
if (null == propertyValue) {
return;
}
int integerValue;
try {
integerValue = Integer.parseInt(propertyValue);
} catch (final NumberFormatException ignored) {
throw new IllegalArgumentException(String.format("The property `%s` should be integer.", key));
}
Preconditions.checkArgument(integerValue > 0, "The property `%s` should be positive.", key);
} | 3.68 |
hbase_CheckAndMutate_getRow | /** Returns the row */
@Override
public byte[] getRow() {
return row;
} | 3.68 |
hbase_RegionPlacementMaintainer_getRegionsMovement | /**
* Return how many regions will move per table since their primary RS will change
* @param newPlan - new AssignmentPlan
* @return how many primaries will move per table
*/
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
Map<TableName, Integer> movesPerTable = new HashMap<>();
SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
Map<TableName, List<RegionInfo>> tableToRegions = snapshot.getTableToRegionMap();
FavoredNodesPlan oldPlan = snapshot.getExistingAssignmentPlan();
Set<TableName> tables = snapshot.getTableSet();
for (TableName table : tables) {
int movedPrimaries = 0;
if (!this.targetTableSet.isEmpty() && !this.targetTableSet.contains(table)) {
continue;
}
List<RegionInfo> regions = tableToRegions.get(table);
for (RegionInfo region : regions) {
List<ServerName> oldServers = oldPlan.getFavoredNodes(region);
List<ServerName> newServers = newPlan.getFavoredNodes(region);
if (oldServers != null && newServers != null) {
ServerName oldPrimary = oldServers.get(0);
ServerName newPrimary = newServers.get(0);
if (oldPrimary.compareTo(newPrimary) != 0) {
movedPrimaries++;
}
}
}
movesPerTable.put(table, movedPrimaries);
}
return movesPerTable;
} | 3.68 |
open-banking-gateway_ConsentAuthorizationEncryptionServiceProvider_generateKey | /**
* Generates random symmetric key.
* @return Symmetric key
*/
public SecretKeyWithIv generateKey() {
return oper.generateKey();
} | 3.68 |
hadoop_ExternalStoragePolicySatisfier_main | /**
* Main method to start SPS service.
*/
public static void main(String[] args) throws Exception {
NameNodeConnector nnc = null;
ExternalSPSContext context = null;
try {
StringUtils.startupShutdownMessage(StoragePolicySatisfier.class, args,
LOG);
HdfsConfiguration spsConf = new HdfsConfiguration();
// login with SPS keytab
secureLogin(spsConf);
StoragePolicySatisfier sps = new StoragePolicySatisfier(spsConf);
nnc = getNameNodeConnector(spsConf);
context = new ExternalSPSContext(sps, nnc);
sps.init(context);
sps.start(StoragePolicySatisfierMode.EXTERNAL);
context.initMetrics(sps);
if (sps != null) {
sps.join();
}
} catch (Throwable e) {
LOG.error("Failed to start storage policy satisfier.", e);
terminate(1, e);
} finally {
if (nnc != null) {
nnc.close();
}
if (context!= null) {
if (context.getSpsBeanMetrics() != null) {
context.closeMetrics();
}
}
}
} | 3.68 |
hadoop_IOStatisticsBinding_aggregateMeanStatistics | /**
* Aggregate the mean statistics.
* This returns a new instance.
* @param l left value
* @param r right value
* @return aggregate value
*/
public static MeanStatistic aggregateMeanStatistics(
MeanStatistic l, MeanStatistic r) {
MeanStatistic res = l.copy();
res.add(r);
return res;
} | 3.68 |
zxing_LuminanceSource_invert | /**
* @return a wrapper of this {@code LuminanceSource} which inverts the luminances it returns -- black becomes
* white and vice versa, and each value becomes (255-value).
*/
public LuminanceSource invert() {
return new InvertedLuminanceSource(this);
} | 3.68 |
framework_ListenerMethod_readObject | /* Special serialization to handle method references */
private void readObject(ObjectInputStream in)
throws IOException, ClassNotFoundException {
in.defaultReadObject();
try {
String name = (String) in.readObject();
Class<?>[] paramTypes = (Class<?>[]) in.readObject();
// We can not use getMethod directly as we want to support anonymous
// inner classes
method = findHighestMethod(target.getClass(), name, paramTypes);
} catch (SecurityException e) {
getLogger().log(Level.SEVERE, "Internal deserialization error", e);
}
} | 3.68 |
querydsl_TimeExpression_coalesce | /**
* Create a {@code coalesce(this, args...)} expression
*
* @param args additional arguments
* @return coalesce
*/
@Override
@SuppressWarnings({"unchecked"})
public TimeExpression<T> coalesce(T... args) {
Coalesce<T> coalesce = new Coalesce<T>(getType(), mixin);
for (T arg : args) {
coalesce.add(arg);
}
return coalesce.asTime();
} | 3.68 |
flink_RuntimeRestAPIVersion_isStableVersion | /**
* Returns whether this version is considered stable.
*
* @return whether this version is stable
*/
@Override
public boolean isStableVersion() {
return isStable;
} | 3.68 |
rocketmq-connect_ClusterConfigState_allTaskConfigs | /**
* get all task configs
*
* @param connector
* @return
*/
public List<Map<String, String>> allTaskConfigs(String connector) {
Map<Integer, Map<String, String>> taskConfigs = new TreeMap<>();
for (Map.Entry<ConnectorTaskId, Map<String, String>> taskConfigEntry : this.taskConfigs.entrySet()) {
if (taskConfigEntry.getKey().connector().equals(connector)) {
Map<String, String> configs = taskConfigEntry.getValue();
taskConfigs.put(taskConfigEntry.getKey().task(), configs);
}
}
return Collections.unmodifiableList(new ArrayList<>(taskConfigs.values()));
} | 3.68 |
pulsar_StreamingDataBlockHeaderImpl_fromStream | // Construct DataBlockHeader from InputStream, which contains `HEADER_MAX_SIZE` bytes readable.
public static StreamingDataBlockHeaderImpl fromStream(InputStream stream) throws IOException {
CountingInputStream countingStream = new CountingInputStream(stream);
DataInputStream dis = new DataInputStream(countingStream);
int magic = dis.readInt();
if (magic != MAGIC_WORD) {
throw new IOException("Data block header magic word not match. read: " + magic
+ " expected: " + MAGIC_WORD);
}
long headerLen = dis.readLong();
long blockLen = dis.readLong();
long firstEntryId = dis.readLong();
long ledgerId = dis.readLong();
long toSkip = headerLen - countingStream.getCount();
if (dis.skip(toSkip) != toSkip) {
throw new EOFException("Header was too small");
}
return new StreamingDataBlockHeaderImpl(headerLen, blockLen, ledgerId, firstEntryId);
} | 3.68 |
hbase_HFileReaderImpl_validateBlockType | /**
* Compares the actual type of a block retrieved from cache or disk with its expected type and
* throws an exception in case of a mismatch. Expected block type of {@link BlockType#DATA} is
* considered to match the actual block type [@link {@link BlockType#ENCODED_DATA} as well.
* @param block a block retrieved from cache or disk
* @param expectedBlockType the expected block type, or null to skip the check
*/
private void validateBlockType(HFileBlock block, BlockType expectedBlockType) throws IOException {
if (expectedBlockType == null) {
return;
}
BlockType actualBlockType = block.getBlockType();
if (expectedBlockType.isData() && actualBlockType.isData()) {
// We consider DATA to match ENCODED_DATA for the purpose of this
// verification.
return;
}
if (actualBlockType != expectedBlockType) {
throw new IOException("Expected block type " + expectedBlockType + ", " + "but got "
+ actualBlockType + ": " + block + ", path=" + path);
}
} | 3.68 |
AreaShop_Utils_millisToHumanFormat | /**
* Convert milliseconds to a human readable format.
* @param milliseconds The amount of milliseconds to convert
* @return A formatted string based on the language file
*/
public static String millisToHumanFormat(long milliseconds) {
long timeLeft = milliseconds + 500;
// To seconds
timeLeft /= 1000;
if(timeLeft <= 0) {
return Message.fromKey("timeleft-ended").getPlain();
} else if(timeLeft == 1) {
return Message.fromKey("timeleft-second").replacements(timeLeft).getPlain();
} else if(timeLeft <= 120) {
return Message.fromKey("timeleft-seconds").replacements(timeLeft).getPlain();
}
// To minutes
timeLeft /= 60;
if(timeLeft <= 120) {
return Message.fromKey("timeleft-minutes").replacements(timeLeft).getPlain();
}
// To hours
timeLeft /= 60;
if(timeLeft <= 48) {
return Message.fromKey("timeleft-hours").replacements(timeLeft).getPlain();
}
// To days
timeLeft /= 24;
if(timeLeft <= 60) {
return Message.fromKey("timeleft-days").replacements(timeLeft).getPlain();
}
// To months
timeLeft /= 30;
if(timeLeft <= 24) {
return Message.fromKey("timeleft-months").replacements(timeLeft).getPlain();
}
// To years
timeLeft /= 12;
return Message.fromKey("timeleft-years").replacements(timeLeft).getPlain();
} | 3.68 |
flink_AvroSchemaConverter_convertToSchema | /**
* Converts Flink SQL {@link LogicalType} (can be nested) into an Avro schema.
*
* <p>The "{rowName}_" is used as the nested row type name prefix in order to generate the right
* schema. Nested record type that only differs with type name is still compatible.
*
* @param logicalType logical type
* @param rowName the record name
* @return Avro's {@link Schema} matching this logical type.
*/
public static Schema convertToSchema(LogicalType logicalType, String rowName) {
int precision;
boolean nullable = logicalType.isNullable();
switch (logicalType.getTypeRoot()) {
case NULL:
return SchemaBuilder.builder().nullType();
case BOOLEAN:
Schema bool = SchemaBuilder.builder().booleanType();
return nullable ? nullableSchema(bool) : bool;
case TINYINT:
case SMALLINT:
case INTEGER:
Schema integer = SchemaBuilder.builder().intType();
return nullable ? nullableSchema(integer) : integer;
case BIGINT:
Schema bigint = SchemaBuilder.builder().longType();
return nullable ? nullableSchema(bigint) : bigint;
case FLOAT:
Schema f = SchemaBuilder.builder().floatType();
return nullable ? nullableSchema(f) : f;
case DOUBLE:
Schema d = SchemaBuilder.builder().doubleType();
return nullable ? nullableSchema(d) : d;
case CHAR:
case VARCHAR:
Schema str = SchemaBuilder.builder().stringType();
return nullable ? nullableSchema(str) : str;
case BINARY:
case VARBINARY:
Schema binary = SchemaBuilder.builder().bytesType();
return nullable ? nullableSchema(binary) : binary;
case TIMESTAMP_WITHOUT_TIME_ZONE:
// use long to represents Timestamp
final TimestampType timestampType = (TimestampType) logicalType;
precision = timestampType.getPrecision();
org.apache.avro.LogicalType avroLogicalType;
if (precision <= 3) {
avroLogicalType = LogicalTypes.timestampMillis();
} else {
throw new IllegalArgumentException(
"Avro does not support TIMESTAMP type "
+ "with precision: "
+ precision
+ ", it only supports precision less than 3.");
}
Schema timestamp = avroLogicalType.addToSchema(SchemaBuilder.builder().longType());
return nullable ? nullableSchema(timestamp) : timestamp;
case DATE:
// use int to represents Date
Schema date = LogicalTypes.date().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(date) : date;
case TIME_WITHOUT_TIME_ZONE:
precision = ((TimeType) logicalType).getPrecision();
if (precision > 3) {
throw new IllegalArgumentException(
"Avro does not support TIME type with precision: "
+ precision
+ ", it only supports precision less than 3.");
}
// use int to represents Time, we only support millisecond when deserialization
Schema time =
LogicalTypes.timeMillis().addToSchema(SchemaBuilder.builder().intType());
return nullable ? nullableSchema(time) : time;
case DECIMAL:
DecimalType decimalType = (DecimalType) logicalType;
// store BigDecimal as byte[]
Schema decimal =
LogicalTypes.decimal(decimalType.getPrecision(), decimalType.getScale())
.addToSchema(SchemaBuilder.builder().bytesType());
return nullable ? nullableSchema(decimal) : decimal;
case ROW:
RowType rowType = (RowType) logicalType;
List<String> fieldNames = rowType.getFieldNames();
// we have to make sure the record name is different in a Schema
SchemaBuilder.FieldAssembler<Schema> builder =
SchemaBuilder.builder().record(rowName).fields();
for (int i = 0; i < rowType.getFieldCount(); i++) {
String fieldName = fieldNames.get(i);
LogicalType fieldType = rowType.getTypeAt(i);
SchemaBuilder.GenericDefault<Schema> fieldBuilder =
builder.name(fieldName)
.type(convertToSchema(fieldType, rowName + "_" + fieldName));
if (fieldType.isNullable()) {
builder = fieldBuilder.withDefault(null);
} else {
builder = fieldBuilder.noDefault();
}
}
Schema record = builder.endRecord();
return nullable ? nullableSchema(record) : record;
case MULTISET:
case MAP:
Schema map =
SchemaBuilder.builder()
.map()
.values(
convertToSchema(
extractValueTypeToAvroMap(logicalType), rowName));
return nullable ? nullableSchema(map) : map;
case ARRAY:
ArrayType arrayType = (ArrayType) logicalType;
Schema array =
SchemaBuilder.builder()
.array()
.items(convertToSchema(arrayType.getElementType(), rowName));
return nullable ? nullableSchema(array) : array;
case RAW:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
default:
throw new UnsupportedOperationException(
"Unsupported to derive Schema for type: " + logicalType);
}
} | 3.68 |
dubbo_ReflectUtils_getProperty | /**
* Get the value from the specified bean and its getter method.
*
* @param bean the bean instance
* @param methodName the name of getter
* @param <T> the type of property value
* @return
* @since 2.7.5
*/
public static <T> T getProperty(Object bean, String methodName) {
Class<?> beanClass = bean.getClass();
BeanInfo beanInfo = null;
T propertyValue = null;
try {
beanInfo = Introspector.getBeanInfo(beanClass);
propertyValue = (T) Stream.of(beanInfo.getMethodDescriptors())
.filter(methodDescriptor -> methodName.equals(methodDescriptor.getName()))
.findFirst()
.map(method -> {
try {
return method.getMethod().invoke(bean);
} catch (Exception e) {
// ignore
}
return null;
})
.get();
} catch (Exception e) {
}
return propertyValue;
} | 3.68 |
framework_SASSAddonImportFileCreator_updateTheme | /**
* Updates a themes addons.scss with the addon themes found on the
* classpath.
*
* @param themeDirectory
* The target theme directory
*/
public static void updateTheme(String themeDirectory) throws IOException {
File addonImports = new File(themeDirectory, ADDON_IMPORTS_FILE);
if (!addonImports.exists()) {
// Ensure directory exists
addonImports.getParentFile().mkdirs();
// Ensure file exists
addonImports.createNewFile();
}
LocationInfo info = ClassPathExplorer
.getAvailableWidgetSetsAndStylesheets();
try (PrintStream printStream = new PrintStream(
new FileOutputStream(addonImports))) {
printStream.println("/* " + ADDON_IMPORTS_FILE_TEXT + " */");
printStream.println("/* Do not manually edit this file. */");
printStream.println();
Map<String, URL> addonThemes = info.getAddonStyles();
// Sort addon styles so that CSS imports are first and SCSS import
// last
List<String> paths = new ArrayList<>(addonThemes.keySet());
Collections.sort(paths, (String path1, String path2) -> {
if (path1.toLowerCase(Locale.ROOT).endsWith(".css")
&& path2.toLowerCase(Locale.ROOT).endsWith(".scss")) {
return -1;
}
if (path1.toLowerCase(Locale.ROOT).endsWith(".scss")
&& path2.toLowerCase(Locale.ROOT).endsWith(".css")) {
return 1;
}
return 0;
});
List<String> mixins = new ArrayList<>();
for (String path : paths) {
mixins.addAll(
addImport(printStream, path, addonThemes.get(path)));
printStream.println();
}
createAddonsMixin(printStream, mixins);
} catch (FileNotFoundException e) {
// Should not happen since file is checked before this
getLogger().log(Level.WARNING, "Error updating addons.scss", e);
}
} | 3.68 |
morf_OracleDialect_rebuildSequenceAndTrigger | /**
* If the table has an auto-numbered column, rebuild its sequence and trigger.
*
* @param table The {@link Table}.
* @return The SQL statements to run.
*/
private Collection<String> rebuildSequenceAndTrigger(Table table,Column sequence) {
// This requires drop/create trigger/sequence privileges so we avoid where we can.
if(sequence == null) {
return Lists.newArrayList(dropTrigger(table));
}
List<String> statements = new ArrayList<>();
statements.add(dropTrigger(table));
statements.add(dropSequence(table));
statements.add(createSequenceStartingFromExistingData(table, sequence));
statements.addAll(createTrigger(table, sequence));
return statements;
} | 3.68 |
framework_TabSheetScrollOnTabClose_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Scroll position should not change when closing tabs.";
} | 3.68 |
flink_LocalInputChannel_releaseAllResources | /** Releases the partition reader. */
@Override
void releaseAllResources() throws IOException {
if (!isReleased) {
isReleased = true;
ResultSubpartitionView view = subpartitionView;
if (view != null) {
view.releaseAllResources();
subpartitionView = null;
}
}
} | 3.68 |
flink_HiveJdbcParameterUtils_setVariables | /**
* Use the {@param parameters} to set {@param hiveConf} or {@param hiveVariables} according to
* what kinds of the parameter belongs.
*/
public static void setVariables(
HiveConf hiveConf, Map<String, String> sessionConfigs, Map<String, String> parameters) {
for (Map.Entry<String, String> entry : parameters.entrySet()) {
String key = entry.getKey();
if (key.startsWith(SET_PREFIX)) {
String newKey = key.substring(SET_PREFIX.length());
HiveSetProcessor.setVariable(hiveConf, sessionConfigs, newKey, entry.getValue());
} else if (!key.startsWith(USE_PREFIX)) {
sessionConfigs.put(key, entry.getValue());
}
}
} | 3.68 |
flink_MemorySegment_copyTo | /**
* Bulk copy method. Copies {@code numBytes} bytes from this memory segment, starting at
* position {@code offset} to the target memory segment. The bytes will be put into the target
* segment starting at position {@code targetOffset}.
*
* @param offset The position where the bytes are started to be read from in this memory
* segment.
* @param target The memory segment to copy the bytes to.
* @param targetOffset The position in the target memory segment to copy the chunk to.
* @param numBytes The number of bytes to copy.
* @throws IndexOutOfBoundsException If either of the offsets is invalid, or the source segment
* does not contain the given number of bytes (starting from offset), or the target segment
* does not have enough space for the bytes (counting from targetOffset).
*/
public void copyTo(int offset, MemorySegment target, int targetOffset, int numBytes) {
final byte[] thisHeapRef = this.heapMemory;
final byte[] otherHeapRef = target.heapMemory;
final long thisPointer = this.address + offset;
final long otherPointer = target.address + targetOffset;
if ((numBytes | offset | targetOffset) >= 0
&& thisPointer <= this.addressLimit - numBytes
&& otherPointer <= target.addressLimit - numBytes) {
UNSAFE.copyMemory(thisHeapRef, thisPointer, otherHeapRef, otherPointer, numBytes);
} else if (this.address > this.addressLimit) {
throw new IllegalStateException("this memory segment has been freed.");
} else if (target.address > target.addressLimit) {
throw new IllegalStateException("target memory segment has been freed.");
} else {
throw new IndexOutOfBoundsException(
String.format(
"offset=%d, targetOffset=%d, numBytes=%d, address=%d, targetAddress=%d",
offset, targetOffset, numBytes, this.address, target.address));
}
} | 3.68 |
pulsar_ResourceGroup_rgResourceUsageListener | // Transport manager mandated op.
public void rgResourceUsageListener(String broker, ResourceUsage resourceUsage) {
NetworkUsage p;
p = resourceUsage.getPublish();
this.getUsageFromMonitoredEntity(ResourceGroupMonitoringClass.Publish, p, broker);
p = resourceUsage.getDispatch();
this.getUsageFromMonitoredEntity(ResourceGroupMonitoringClass.Dispatch, p, broker);
// Punt storage for now.
} | 3.68 |
flink_OptimizerNode_getInterestingProperties | /**
* Gets the properties that are interesting for this node to produce.
*
* @return The interesting properties for this node, or null, if not yet computed.
*/
public InterestingProperties getInterestingProperties() {
return this.intProps;
} | 3.68 |
pulsar_ResourceGroupService_getRgQuotaCalculationTime | // Visibility for testing.
protected static Summary.Child.Value getRgQuotaCalculationTime() {
return rgQuotaCalculationLatency.get();
} | 3.68 |
flink_ApplicationStatus_processExitCode | /**
* Gets the process exit code associated with this status.
*
* @return The associated process exit code.
*/
public int processExitCode() {
return processExitCode;
} | 3.68 |
morf_Join_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if (table != null) {
return type.toString() + " " + table.toString() + " ON " + criterion;
} else {
return type.toString() + " (" + subSelect.toString() + ") ON " + criterion;
}
} | 3.68 |
hbase_AbstractFSWALProvider_getServerNameFromWALDirectoryName | /**
* This function returns region server name from a log file name which is in one of the following
* formats:
* <ul>
* <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...</li>
* <li>hdfs://<name node>/hbase/.logs/<server name>/...</li>
* </ul>
* @return null if the passed in logFile isn't a valid WAL file path
*/
public static ServerName getServerNameFromWALDirectoryName(Path logFile) {
String logDirName = logFile.getParent().getName();
// We were passed the directory and not a file in it.
if (logDirName.equals(HConstants.HREGION_LOGDIR_NAME)) {
logDirName = logFile.getName();
}
ServerName serverName = null;
if (logDirName.endsWith(SPLITTING_EXT)) {
logDirName = logDirName.substring(0, logDirName.length() - SPLITTING_EXT.length());
}
try {
serverName = ServerName.parseServerName(logDirName);
} catch (IllegalArgumentException | IllegalStateException ex) {
serverName = null;
LOG.warn("Cannot parse a server name from path={}", logFile, ex);
}
if (serverName != null && serverName.getStartCode() < 0) {
LOG.warn("Invalid log file path={}, start code {} is less than 0", logFile,
serverName.getStartCode());
serverName = null;
}
return serverName;
} | 3.68 |
flink_PermanentBlobCache_run | /** Cleans up BLOBs which are not referenced anymore. */
@Override
public void run() {
synchronized (jobRefCounters) {
Iterator<Map.Entry<JobID, RefCount>> entryIter =
jobRefCounters.entrySet().iterator();
final long currentTimeMillis = System.currentTimeMillis();
while (entryIter.hasNext()) {
Map.Entry<JobID, RefCount> entry = entryIter.next();
RefCount ref = entry.getValue();
if (ref.references <= 0
&& ref.keepUntil > 0
&& currentTimeMillis >= ref.keepUntil) {
JobID jobId = entry.getKey();
final File localFile =
new File(
BlobUtils.getStorageLocationPath(
storageDir.deref().getAbsolutePath(), jobId));
/*
* NOTE: normally it is not required to acquire the write lock to delete the job's
* storage directory since there should be no one accessing it with the ref
* counter being 0 - acquire it just in case, to always be on the safe side
*/
readWriteLock.writeLock().lock();
boolean success = false;
try {
blobCacheSizeTracker.untrackAll(jobId);
FileUtils.deleteDirectory(localFile);
success = true;
} catch (Throwable t) {
log.warn(
"Failed to locally delete job directory "
+ localFile.getAbsolutePath(),
t);
} finally {
readWriteLock.writeLock().unlock();
}
// let's only remove this directory from cleanup if the cleanup was
// successful
// (does not need the write lock)
if (success) {
entryIter.remove();
}
}
}
}
} | 3.68 |
rocketmq-connect_RebalanceImpl_doRebalance | /**
* Distribute connectors and tasks according to the {@link RebalanceImpl#allocateConnAndTaskStrategy}.
*/
public void doRebalance() {
List<String> curAliveWorkers = clusterManagementService.getAllAliveWorkers();
if (curAliveWorkers != null) {
if (clusterManagementService instanceof ClusterManagementServiceImpl) {
log.info("Current Alive workers : " + curAliveWorkers.size());
} else if (clusterManagementService instanceof MemoryClusterManagementServiceImpl) {
log.info("Current alive worker : " + curAliveWorkers.iterator().next());
}
}
// exculde delete connector
Map<String, ConnectKeyValue> curConnectorConfigs = configManagementService.getConnectorConfigs();
log.trace("Current ConnectorConfigs : " + curConnectorConfigs);
Map<String, List<ConnectKeyValue>> curTaskConfigs = configManagementService.getTaskConfigs();
log.trace("Current TaskConfigs : " + curTaskConfigs);
ConnAndTaskConfigs allocateResult = allocateConnAndTaskStrategy.allocate(curAliveWorkers, clusterManagementService.getCurrentWorker(), curConnectorConfigs, curTaskConfigs);
log.trace("Allocated connector:{}", allocateResult.getConnectorConfigs());
log.trace("Allocated task:{}", allocateResult.getTaskConfigs());
updateProcessConfigsInRebalance(allocateResult);
} | 3.68 |
hbase_AbstractRecoveredEditsOutputSink_deleteOneWithFewerEntries | // delete the one with fewer wal entries
private void deleteOneWithFewerEntries(RecoveredEditsWriter editsWriter, Path dst)
throws IOException {
long dstMinLogSeqNum = -1L;
try (WALStreamReader reader =
walSplitter.getWalFactory().createStreamReader(walSplitter.walFS, dst)) {
WAL.Entry entry = reader.next();
if (entry != null) {
dstMinLogSeqNum = entry.getKey().getSequenceId();
}
} catch (EOFException e) {
LOG.debug("Got EOF when reading first WAL entry from {}, an empty or broken WAL file?", dst,
e);
}
if (editsWriter.minLogSeqNum < dstMinLogSeqNum) {
LOG.warn("Found existing old edits file. It could be the result of a previous failed"
+ " split attempt or we have duplicated wal entries. Deleting " + dst + ", length="
+ walSplitter.walFS.getFileStatus(dst).getLen());
if (!walSplitter.walFS.delete(dst, false)) {
LOG.warn("Failed deleting of old {}", dst);
throw new IOException("Failed deleting of old " + dst);
}
} else {
LOG
.warn("Found existing old edits file and we have less entries. Deleting " + editsWriter.path
+ ", length=" + walSplitter.walFS.getFileStatus(editsWriter.path).getLen());
if (!walSplitter.walFS.delete(editsWriter.path, false)) {
LOG.warn("Failed deleting of {}", editsWriter.path);
throw new IOException("Failed deleting of " + editsWriter.path);
}
}
} | 3.68 |
hadoop_TimelineDomains_addDomain | /**
* Add a single domain into the existing domain list
*
* @param domain
* a single domain
*/
public void addDomain(TimelineDomain domain) {
domains.add(domain);
} | 3.68 |
framework_DragHandle_removeFromParent | /**
* Removes this drag handle from whatever it was attached to.
*/
public void removeFromParent() {
if (parent != null) {
parent.removeChild(element);
parent = null;
}
} | 3.68 |
flink_DeltaIteration_parallelism | /**
* Sets the parallelism for the iteration.
*
* @param parallelism The parallelism.
* @return The iteration object, for function call chaining.
*/
public DeltaIteration<ST, WT> parallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism);
this.parallelism = parallelism;
return this;
} | 3.68 |
hadoop_BytesWritable_toString | /**
* Generate the stream of bytes as hex pairs separated by ' '.
*/
@Override
public String toString() {
return IntStream.range(0, size)
.mapToObj(idx -> String.format("%02x", bytes[idx]))
.collect(Collectors.joining(" "));
} | 3.68 |
Activiti_TreeValueExpression_isDeferred | /**
* Answer <code>true</code> if this is a deferred expression (containing
* sub-expressions starting with <code>#{</code>)
*/
public boolean isDeferred() {
return deferred;
} | 3.68 |
graphhopper_VectorTileDecoder_isAutoScale | /**
* Get the autoScale setting.
*
* @return autoScale
*/
public boolean isAutoScale() {
return autoScale;
} | 3.68 |
Activiti_TreeValueExpression_getType | /**
* Evaluates the expression as an lvalue and answers the result type.
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* and to determine the result from the last base/property pair
* @return lvalue evaluation type or <code>null</code> for rvalue expressions
* @throws ELException if evaluation fails (e.g. property not found, type conversion failed, ...)
*/
@Override
public Class<?> getType(ELContext context) throws ELException {
return node.getType(bindings, context);
} | 3.68 |
morf_SelectStatementBuilder_withParallelQueryPlan | /**
* Request that this query is executed with a parallel execution plan and with the given degree of parallelism. If the database implementation does not support, or is configured to disable parallel query execution, then this request will have no effect.
*
* <p>For queries that are likely to conduct a full table scan, a parallel execution plan may result in the results being delivered faster, although the exact effect depends on
* the underlying database, the nature of the data and the nature of the query.</p>
*
* <p>Note that the executed use cases of this are rare. Caution is needed because if multiple requests are made by the application to run parallel queries, the resulting resource contention may result in worse performance - this is not intended for queries that are submitted in parallel by the application.</p>
*
* @param degreeOfParallelism Degree of parallelism to be specified in the hint.
* @return this, for method chaining.
*/
public SelectStatementBuilder withParallelQueryPlan(int degreeOfParallelism) {
this.hints.add(new ParallelQueryHint(degreeOfParallelism));
return this;
} | 3.68 |
flink_ExecutionConfig_setTaskCancellationTimeout | /**
* Sets the timeout (in milliseconds) after which an ongoing task cancellation is considered
* failed, leading to a fatal TaskManager error.
*
* <p>The cluster default is configured via {@link
* TaskManagerOptions#TASK_CANCELLATION_TIMEOUT}.
*
* <p>The value <code>0</code> disables the timeout. In this case a stuck cancellation will not
* lead to a fatal error.
*
* @param timeout The task cancellation timeout (in milliseconds).
*/
@PublicEvolving
public ExecutionConfig setTaskCancellationTimeout(long timeout) {
checkArgument(timeout >= 0, "Timeout needs to be >= 0.");
configuration.set(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT, timeout);
return this;
} | 3.68 |
framework_VAbstractCalendarPanel_isYear | /**
* Returns {@code true} if the provided {@code resolution} represents an
* year.
*
* @param resolution
* the given resolution
* @return {@code true} if the {@code resolution} represents a year
*/
protected boolean isYear(R resolution) {
return parent.isYear(resolution);
} | 3.68 |
hbase_StructIterator_skip | /**
* Bypass the next encoded value.
* @return the number of bytes skipped.
*/
public int skip() {
if (!hasNext()) {
throw new NoSuchElementException();
}
DataType<?> t = types[idx++];
if (src.getPosition() == src.getLength() && t.isNullable()) {
return 0;
}
return t.skip(src);
} | 3.68 |
morf_AbstractSqlDialectTest_testBinaryFormatter | /**
* Tests formatting of binary values in record derived from a {@link ResultSet}.
*
* @throws SQLException when a database access error occurs
*/
@Test
public void testBinaryFormatter() throws SQLException {
assertEquals("Value not transformed into Base64", "REVG", checkDatabaseByteArrayToRecordValue(new byte[] {68,69,70}));
assertEquals("Value not transformed into Base64", "//79", checkDatabaseByteArrayToRecordValue(new byte[] { -1, -2, -3 }));
assertNull("Null should result in null value", checkDatabaseByteArrayToRecordValue(null));
assertEquals(
"Value not transformed into Base64",
BASE64_ENCODED,
checkDatabaseByteArrayToRecordValue(BYTE_ARRAY));
} | 3.68 |
flink_SourceCoordinatorContext_callInCoordinatorThread | /**
* A helper method that delegates the callable to the coordinator thread if the current thread
* is not the coordinator thread, otherwise call the callable right away.
*
* @param callable the callable to delegate.
*/
private <V> V callInCoordinatorThread(Callable<V> callable, String errorMessage) {
// Ensure the split assignment is done by the coordinator executor.
if (!coordinatorThreadFactory.isCurrentThreadCoordinatorThread()) {
try {
final Callable<V> guardedCallable =
() -> {
try {
return callable.call();
} catch (Throwable t) {
LOG.error("Uncaught Exception in Source Coordinator Executor", t);
ExceptionUtils.rethrowException(t);
return null;
}
};
return coordinatorExecutor.submit(guardedCallable).get();
} catch (InterruptedException | ExecutionException e) {
throw new FlinkRuntimeException(errorMessage, e);
}
}
try {
return callable.call();
} catch (Throwable t) {
LOG.error("Uncaught Exception in Source Coordinator Executor", t);
throw new FlinkRuntimeException(errorMessage, t);
}
} | 3.68 |
framework_Navigator_runAfterLeaveConfirmation | /**
* Triggers {@link View#beforeLeave(ViewBeforeLeaveEvent)} for the current
* view with the given action.
* <p>
* This method is typically called by
* {@link #navigateTo(View, String, String)} but can be called from
* application code when you want to e.g. show a confirmation dialog before
* perfoming an action which is not a navigation but which would cause the
* view to be hidden, e.g. logging out.
* <p>
* Note that this method will not trigger any {@link ViewChangeListener}s as
* it does not navigate to a new view. Use {@link #navigateTo(String)} to
* change views and trigger all listeners.
*
* @param action
* the action to execute when the view confirms it is ok to leave
* @since 8.1
*/
public void runAfterLeaveConfirmation(ViewLeaveAction action) {
View currentView = getCurrentView();
if (currentView == null) {
action.run();
} else {
ViewBeforeLeaveEvent beforeLeaveEvent = new ViewBeforeLeaveEvent(
this, action);
currentView.beforeLeave(beforeLeaveEvent);
if (!beforeLeaveEvent.isNavigateRun()) {
// The event handler prevented navigation
// Revert URL to previous state in case the navigation was
// caused by the back-button
revertNavigation();
}
}
} | 3.68 |
flink_JoinNode_computeOperatorSpecificDefaultEstimates | /**
* The default estimates build on the principle of inclusion: The smaller input key domain is
* included in the larger input key domain. We also assume that every key from the larger input
* has one join partner in the smaller input. The result cardinality is hence the larger one.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
long card1 = getFirstPredecessorNode().getEstimatedNumRecords();
long card2 = getSecondPredecessorNode().getEstimatedNumRecords();
this.estimatedNumRecords = (card1 < 0 || card2 < 0) ? -1 : Math.max(card1, card2);
if (this.estimatedNumRecords >= 0) {
float width1 = getFirstPredecessorNode().getEstimatedAvgWidthPerOutputRecord();
float width2 = getSecondPredecessorNode().getEstimatedAvgWidthPerOutputRecord();
float width = (width1 <= 0 || width2 <= 0) ? -1 : width1 + width2;
if (width > 0) {
this.estimatedOutputSize = (long) (width * this.estimatedNumRecords);
}
}
} | 3.68 |
hudi_InternalFilter_write | /**
* Serialize the fields of this object to <code>out</code>.
*
* @param out <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
public void write(DataOutput out) throws IOException {
out.writeInt(VERSION);
out.writeInt(this.nbHash);
out.writeByte(this.hashType);
out.writeInt(this.vectorSize);
} | 3.68 |
hbase_ThriftServer_main | /**
* Start up the Thrift2 server.
*/
public static void main(String[] args) throws Exception {
final Configuration conf = HBaseConfiguration.create();
// for now, only time we return is on an argument error.
final int status = ToolRunner.run(conf, new ThriftServer(conf), args);
System.exit(status);
} | 3.68 |
zilla_HttpClientFactory_encodeLiteral | // TODO dynamic table, Huffman, never indexed
private void encodeLiteral(
HpackLiteralHeaderFieldFW.Builder builder,
HpackContext hpackContext,
DirectBuffer nameBuffer,
DirectBuffer valueBuffer)
{
builder.type(WITHOUT_INDEXING);
final int nameIndex = hpackContext.index(nameBuffer);
if (nameIndex != -1)
{
builder.name(nameIndex);
}
else
{
builder.name(nameBuffer, 0, nameBuffer.capacity());
}
builder.value(valueBuffer, 0, valueBuffer.capacity());
} | 3.68 |
flink_MutableRecordAndPosition_set | /** Updates the record and position in this object. */
public void set(E record, long offset, long recordSkipCount) {
this.record = record;
this.offset = offset;
this.recordSkipCount = recordSkipCount;
} | 3.68 |
pulsar_ManagedLedgerConfig_setRetentionSizeInMB | /**
* The retention size is used to set a maximum retention size quota on the ManagedLedger.
* <p>
* Retention size and retention time ({@link #setRetentionTime(int, TimeUnit)}) are together used to retain the
* ledger data when there are no cursors or when all the cursors have marked the data for deletion.
* Data will be deleted in this case when both retention time and retention size settings don't prevent deleting
* the data marked for deletion.
* <p>
* A retention size of 0 (default) will make data to be deleted immediately.
* <p>
* A retention size of -1, means to have an unlimited retention size.
*
* @param retentionSizeInMB
* quota for message retention
*/
public ManagedLedgerConfig setRetentionSizeInMB(long retentionSizeInMB) {
this.retentionSizeInMB = retentionSizeInMB;
return this;
} | 3.68 |
flink_TableConfigUtils_getAggPhaseStrategy | /**
* Returns the aggregate phase strategy configuration.
*
* @param tableConfig TableConfig object
* @return the aggregate phase strategy
*/
public static AggregatePhaseStrategy getAggPhaseStrategy(ReadableConfig tableConfig) {
String aggPhaseConf = tableConfig.get(TABLE_OPTIMIZER_AGG_PHASE_STRATEGY).trim();
if (aggPhaseConf.isEmpty()) {
return AggregatePhaseStrategy.AUTO;
} else {
return AggregatePhaseStrategy.valueOf(aggPhaseConf);
}
} | 3.68 |
morf_SqlInternalUtils_defaultOrderByToAscending | /**
* Sets the fields in an ORDER BY to use ascending order if not specified.
*
* This method will be removed when statement immutability is turned on permanently.
*/
@Deprecated
static void defaultOrderByToAscending(Iterable<AliasedField> orderBys) {
for (AliasedField currentField : orderBys) {
if (currentField instanceof FieldReference && ((FieldReference) currentField).getDirection() == Direction.NONE) {
((FieldReference) currentField).setDirection(Direction.ASCENDING);
}
}
} | 3.68 |
hadoop_DumpUtil_dumpChunk | /**
* Print data in hex format in a chunk.
* @param chunk chunk.
*/
public static void dumpChunk(ECChunk chunk) {
String str;
if (chunk == null) {
str = "<EMPTY>";
} else {
byte[] bytes = chunk.toBytesArray();
str = DumpUtil.bytesToHex(bytes, 16);
}
System.out.println(str);
} | 3.68 |
flink_SingleOutputStreamOperator_getSideOutput | /**
* Gets the {@link DataStream} that contains the elements that are emitted from an operation
* into the side output with the given {@link OutputTag}.
*
* @see org.apache.flink.streaming.api.functions.ProcessFunction.Context#output(OutputTag,
* Object)
*/
public <X> SideOutputDataStream<X> getSideOutput(OutputTag<X> sideOutputTag) {
sideOutputTag = clean(requireNonNull(sideOutputTag));
// make a defensive copy
sideOutputTag = new OutputTag<X>(sideOutputTag.getId(), sideOutputTag.getTypeInfo());
TypeInformation<?> type = requestedSideOutputs.get(sideOutputTag);
if (type != null && !type.equals(sideOutputTag.getTypeInfo())) {
throw new UnsupportedOperationException(
"A side output with a matching id was "
+ "already requested with a different type. This is not allowed, side output "
+ "ids need to be unique.");
}
requestedSideOutputs.put(sideOutputTag, sideOutputTag.getTypeInfo());
SideOutputTransformation<X> sideOutputTransformation =
new SideOutputTransformation<>(this.getTransformation(), sideOutputTag);
return new SideOutputDataStream<>(this.getExecutionEnvironment(), sideOutputTransformation);
} | 3.68 |
framework_VTree_getNavigationDownKey | /**
* Get the key that moves the selection head downwards. By default it is the
* down arrow key but by overriding this you can change the key to whatever
* you want.
*
* @return The keycode of the key
*/
protected int getNavigationDownKey() {
return KeyCodes.KEY_DOWN;
} | 3.68 |
hudi_DayBasedCompactionStrategy_getPartitionPathWithoutPartitionKeys | /**
* If is Hive style partition path, convert it to regular partition path. e.g. year=2019/month=11/day=24 => 2019/11/24
*/
protected static String getPartitionPathWithoutPartitionKeys(String partitionPath) {
if (partitionPath.contains("=")) {
return partitionPath.replaceFirst(".*?=", "").replaceAll("/.*?=", "/");
}
return partitionPath;
} | 3.68 |
flink_ExecutionEnvironment_clearJobListeners | /** Clear all registered {@link JobListener}s. */
@PublicEvolving
public void clearJobListeners() {
this.jobListeners.clear();
} | 3.68 |
hadoop_Utils_readString | /**
* Read a String as a VInt n, followed by n Bytes in Text format.
*
* @param in
* The input stream.
* @return The string
* @throws IOException raised on errors performing I/O.
*/
public static String readString(DataInput in) throws IOException {
int length = readVInt(in);
if (length == -1) return null;
byte[] buffer = new byte[length];
in.readFully(buffer);
return Text.decode(buffer);
} | 3.68 |
hbase_Procedure_getResult | /** Returns the serialized result if any, otherwise null */
public byte[] getResult() {
return result;
} | 3.68 |
flink_WatermarkSpec_getWatermarkExpression | /** Returns the {@link ResolvedExpression} for watermark generation. */
public ResolvedExpression getWatermarkExpression() {
return watermarkExpression;
} | 3.68 |
flink_SourceReaderTestBase_testRead | /** Simply test the reader reads all the splits fine. */
@Test
void testRead() throws Exception {
try (SourceReader<Integer, SplitT> reader = createReader()) {
reader.addSplits(getSplits(numSplits, NUM_RECORDS_PER_SPLIT, Boundedness.BOUNDED));
ValidatingSourceOutput output = new ValidatingSourceOutput();
while (output.count < totalNumRecords) {
reader.pollNext(output);
}
output.validate();
}
} | 3.68 |
flink_MimeTypes_getMimeTypeForExtension | /**
* Gets the MIME type for the file with the given extension. If the mime type is not recognized,
* this method returns null.
*
* @param fileExtension The file extension.
* @return The MIME type, or {@code null}, if the file extension is not recognized.
*/
public static String getMimeTypeForExtension(String fileExtension) {
return MIME_MAP.get(fileExtension.toLowerCase());
} | 3.68 |
morf_H2Dialect_renameTableStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#renameTableStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> renameTableStatements(Table from, Table to) {
Builder<String> builder = ImmutableList.builder();
if (!primaryKeysForTable(from).isEmpty()) {
builder.add(dropPrimaryKeyConstraintStatement(from));
}
builder.add("ALTER TABLE " + schemaNamePrefix() + from.getName() + " RENAME TO " + to.getName());
if (!primaryKeysForTable(to).isEmpty()) {
builder.add(addPrimaryKeyConstraintStatement(to, namesOfColumns(primaryKeysForTable(to))));
}
return builder.build();
} | 3.68 |
flink_AvailabilityProvider_getUnavailableToResetUnavailable | /**
* Creates a new uncompleted future as the current state and returns the previous
* uncompleted one.
*/
public CompletableFuture<?> getUnavailableToResetUnavailable() {
CompletableFuture<?> toNotify = availableFuture;
availableFuture = new CompletableFuture<>();
return toNotify;
} | 3.68 |
flink_SemanticPropUtil_addSourceFieldOffsets | /**
* Creates SemanticProperties by adding offsets to each input field index of the given
* SemanticProperties.
*
* @param props The SemanticProperties to which the offset is added.
* @param numInputFields1 The original number of fields of the first input.
* @param numInputFields2 The original number of fields of the second input.
* @param offset1 The offset that is added to each input field index of the first input.
* @param offset2 The offset that is added to each input field index of the second input.
* @return New SemanticProperties with added offsets.
*/
public static DualInputSemanticProperties addSourceFieldOffsets(
DualInputSemanticProperties props,
int numInputFields1,
int numInputFields2,
int offset1,
int offset2) {
DualInputSemanticProperties offsetProps = new DualInputSemanticProperties();
// add offset to read fields on first input
if (props.getReadFields(0) != null) {
FieldSet offsetReadFields = new FieldSet();
for (int r : props.getReadFields(0)) {
offsetReadFields = offsetReadFields.addField(r + offset1);
}
offsetProps.addReadFields(0, offsetReadFields);
}
// add offset to read fields on second input
if (props.getReadFields(1) != null) {
FieldSet offsetReadFields = new FieldSet();
for (int r : props.getReadFields(1)) {
offsetReadFields = offsetReadFields.addField(r + offset2);
}
offsetProps.addReadFields(1, offsetReadFields);
}
// add offset to forward fields on first input
for (int s = 0; s < numInputFields1; s++) {
FieldSet targetFields = props.getForwardingTargetFields(0, s);
for (int t : targetFields) {
offsetProps.addForwardedField(0, s + offset1, t);
}
}
// add offset to forward fields on second input
for (int s = 0; s < numInputFields2; s++) {
FieldSet targetFields = props.getForwardingTargetFields(1, s);
for (int t : targetFields) {
offsetProps.addForwardedField(1, s + offset2, t);
}
}
return offsetProps;
} | 3.68 |
flink_MergeTableLikeUtil_computeMergingStrategies | /**
* Calculates merging strategies for all options. It applies options given by a user to the
* {@link #defaultMergingStrategies}. The {@link MergingStrategy} specified for {@link
* FeatureOption#ALL} overwrites all the default options. Those can be further changed with a
* specific {@link FeatureOption}.
*/
public Map<FeatureOption, MergingStrategy> computeMergingStrategies(
List<SqlTableLike.SqlTableLikeOption> mergingOptions) {
Map<FeatureOption, MergingStrategy> result = new HashMap<>(defaultMergingStrategies);
Optional<SqlTableLike.SqlTableLikeOption> maybeAllOption =
mergingOptions.stream()
.filter(option -> option.getFeatureOption() == FeatureOption.ALL)
.findFirst();
maybeAllOption.ifPresent(
(allOption) -> {
MergingStrategy strategy = allOption.getMergingStrategy();
for (FeatureOption featureOption : FeatureOption.values()) {
if (featureOption != FeatureOption.ALL) {
result.put(featureOption, strategy);
}
}
});
for (SqlTableLike.SqlTableLikeOption mergingOption : mergingOptions) {
result.put(mergingOption.getFeatureOption(), mergingOption.getMergingStrategy());
}
return result;
} | 3.68 |
hadoop_SchedulerHealth_getResourcesReserved | /**
* Get the resources reserved in the last scheduler run.
*
* @return resources reserved
*/
public Resource getResourcesReserved() {
return getResourceDetails(Operation.RESERVATION);
} | 3.68 |
dubbo_NettyHttpRestServer_getChannelOptionMap | /**
* create channel options map
*
* @param url
* @return
*/
protected Map<ChannelOption, Object> getChannelOptionMap(URL url) {
Map<ChannelOption, Object> options = new HashMap<>();
options.put(ChannelOption.SO_REUSEADDR, Boolean.TRUE);
options.put(ChannelOption.TCP_NODELAY, Boolean.TRUE);
options.put(
ChannelOption.SO_BACKLOG,
url.getPositiveParameter(BACKLOG_KEY, org.apache.dubbo.remoting.Constants.DEFAULT_BACKLOG));
options.put(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
return options;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.