name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_BroadcastPartitioner_selectChannel | /**
* Note: Broadcast mode could be handled directly for all the output channels in record writer,
* so it is no need to select channels via this method.
*/
@Override
public int selectChannel(SerializationDelegate<StreamRecord<T>> record) {
throw new UnsupportedOperationException(
"Broadcast partitioner does not support select channels.");
} | 3.68 |
morf_XmlDataSetProducer_viewExists | /**
* @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String)
*/
@Override
public boolean viewExists(String name) {
return false;
} | 3.68 |
hbase_PrivateCellUtil_findCommonPrefixInFlatKey | /**
* Find length of common prefix in keys of the cells, considering key as byte[] if serialized in
* {@link KeyValue}. The key format is <2 bytes rk len><rk><1 byte cf
* len><cf><qualifier><8 bytes timestamp><1 byte type>
* @param c1 the cell
* @param c2 the cell
* @param bypassFamilyCheck when true assume the family bytes same in both cells. Pass it as true
* when dealing with Cells in same CF so as to avoid some checks
* @param withTsType when true check timestamp and type bytes also.
* @return length of common prefix
*/
public static int findCommonPrefixInFlatKey(Cell c1, Cell c2, boolean bypassFamilyCheck,
boolean withTsType) {
// Compare the 2 bytes in RK length part
short rLen1 = c1.getRowLength();
short rLen2 = c2.getRowLength();
int commonPrefix = KeyValue.ROW_LENGTH_SIZE;
if (rLen1 != rLen2) {
// early out when the RK length itself is not matching
return ByteBufferUtils.findCommonPrefix(Bytes.toBytes(rLen1), 0, KeyValue.ROW_LENGTH_SIZE,
Bytes.toBytes(rLen2), 0, KeyValue.ROW_LENGTH_SIZE);
}
// Compare the RKs
int rkCommonPrefix = 0;
if (c1 instanceof ByteBufferExtendedCell && c2 instanceof ByteBufferExtendedCell) {
rkCommonPrefix =
ByteBufferUtils.findCommonPrefix(((ByteBufferExtendedCell) c1).getRowByteBuffer(),
((ByteBufferExtendedCell) c1).getRowPosition(), rLen1,
((ByteBufferExtendedCell) c2).getRowByteBuffer(),
((ByteBufferExtendedCell) c2).getRowPosition(), rLen2);
} else {
// There cannot be a case where one cell is BBCell and other is KeyValue. This flow comes
// either
// in flush or compactions. In flushes both cells are KV and in case of compaction it will be
// either
// KV or BBCell
rkCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getRowArray(), c1.getRowOffset(), rLen1,
c2.getRowArray(), c2.getRowOffset(), rLen2);
}
commonPrefix += rkCommonPrefix;
if (rkCommonPrefix != rLen1) {
// Early out when RK is not fully matching.
return commonPrefix;
}
// Compare 1 byte CF length part
byte fLen1 = c1.getFamilyLength();
if (bypassFamilyCheck) {
// This flag will be true when caller is sure that the family will be same for both the cells
// Just make commonPrefix to increment by the family part
commonPrefix += KeyValue.FAMILY_LENGTH_SIZE + fLen1;
} else {
byte fLen2 = c2.getFamilyLength();
if (fLen1 != fLen2) {
// early out when the CF length itself is not matching
return commonPrefix;
}
// CF lengths are same so there is one more byte common in key part
commonPrefix += KeyValue.FAMILY_LENGTH_SIZE;
// Compare the CF names
int fCommonPrefix;
if (c1 instanceof ByteBufferExtendedCell && c2 instanceof ByteBufferExtendedCell) {
fCommonPrefix =
ByteBufferUtils.findCommonPrefix(((ByteBufferExtendedCell) c1).getFamilyByteBuffer(),
((ByteBufferExtendedCell) c1).getFamilyPosition(), fLen1,
((ByteBufferExtendedCell) c2).getFamilyByteBuffer(),
((ByteBufferExtendedCell) c2).getFamilyPosition(), fLen2);
} else {
fCommonPrefix = ByteBufferUtils.findCommonPrefix(c1.getFamilyArray(), c1.getFamilyOffset(),
fLen1, c2.getFamilyArray(), c2.getFamilyOffset(), fLen2);
}
commonPrefix += fCommonPrefix;
if (fCommonPrefix != fLen1) {
return commonPrefix;
}
}
// Compare the Qualifiers
int qLen1 = c1.getQualifierLength();
int qLen2 = c2.getQualifierLength();
int qCommon;
if (c1 instanceof ByteBufferExtendedCell && c2 instanceof ByteBufferExtendedCell) {
qCommon =
ByteBufferUtils.findCommonPrefix(((ByteBufferExtendedCell) c1).getQualifierByteBuffer(),
((ByteBufferExtendedCell) c1).getQualifierPosition(), qLen1,
((ByteBufferExtendedCell) c2).getQualifierByteBuffer(),
((ByteBufferExtendedCell) c2).getQualifierPosition(), qLen2);
} else {
qCommon = ByteBufferUtils.findCommonPrefix(c1.getQualifierArray(), c1.getQualifierOffset(),
qLen1, c2.getQualifierArray(), c2.getQualifierOffset(), qLen2);
}
commonPrefix += qCommon;
if (!withTsType || Math.max(qLen1, qLen2) != qCommon) {
return commonPrefix;
}
// Compare the timestamp parts
int tsCommonPrefix = ByteBufferUtils.findCommonPrefix(Bytes.toBytes(c1.getTimestamp()), 0,
KeyValue.TIMESTAMP_SIZE, Bytes.toBytes(c2.getTimestamp()), 0, KeyValue.TIMESTAMP_SIZE);
commonPrefix += tsCommonPrefix;
if (tsCommonPrefix != KeyValue.TIMESTAMP_SIZE) {
return commonPrefix;
}
// Compare the type
if (c1.getTypeByte() == c2.getTypeByte()) {
commonPrefix += KeyValue.TYPE_SIZE;
}
return commonPrefix;
} | 3.68 |
framework_CustomizedSystemMessages_setInternalErrorURL | /**
* Sets the URL to go to when an internal error occurs.
*
* @param internalErrorURL
* the URL to go to, or null to reload current
*/
public void setInternalErrorURL(String internalErrorURL) {
this.internalErrorURL = internalErrorURL;
} | 3.68 |
hbase_DeleteNamespaceProcedure_prepareDelete | /**
* Action before any real action of deleting namespace.
* @param env MasterProcedureEnv
*/
private boolean prepareDelete(final MasterProcedureEnv env) throws IOException {
if (getTableNamespaceManager(env).doesNamespaceExist(namespaceName) == false) {
setFailure("master-delete-namespace", new NamespaceNotFoundException(namespaceName));
return false;
}
if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(namespaceName)) {
setFailure("master-delete-namespace",
new ConstraintException("Reserved namespace " + namespaceName + " cannot be removed."));
return false;
}
int tableCount = 0;
try {
tableCount = env.getMasterServices().listTableDescriptorsByNamespace(namespaceName).size();
} catch (FileNotFoundException fnfe) {
setFailure("master-delete-namespace", new NamespaceNotFoundException(namespaceName));
return false;
}
if (tableCount > 0) {
setFailure("master-delete-namespace",
new ConstraintException("Only empty namespaces can be removed. Namespace " + namespaceName
+ " has " + tableCount + " tables"));
return false;
}
// This is used for rollback
nsDescriptor = getTableNamespaceManager(env).get(namespaceName);
return true;
} | 3.68 |
framework_MarkedAsDirtyConnectorEvent_getUi | /**
* Get the UI for which the connector event was fired
*
* @return target ui for event
*/
public UI getUi() {
return ui;
} | 3.68 |
hbase_MunkresAssignment_starInCol | /**
* Find a starred zero in the specified column. If there are no starred zeroes in the specified
* row, then null will be returned.
* @param c the index of the column to be searched
* @return pair of row and column indices of starred zero or null
*/
private Pair<Integer, Integer> starInCol(int c) {
for (int r = 0; r < rows; r++) {
if (mask[r][c] == STAR) {
return new Pair<>(r, c);
}
}
return null;
} | 3.68 |
hudi_BaseHoodieWriteClient_initTable | /**
* Instantiates and initializes instance of {@link HoodieTable}, performing crucial bootstrapping
* operations such as:
*
* NOTE: This method is engine-agnostic and SHOULD NOT be overloaded, please check on
* {@link #doInitTable(WriteOperationType, HoodieTableMetaClient, Option)} instead
*
* <ul>
* <li>Checking whether upgrade/downgrade is required</li>
* <li>Bootstrapping Metadata Table (if required)</li>
* <li>Initializing metrics contexts</li>
* </ul>
*/
public final HoodieTable initTable(WriteOperationType operationType, Option<String> instantTime) {
HoodieTableMetaClient metaClient = createMetaClient(true);
// Setup write schemas for deletes
if (WriteOperationType.isDelete(operationType)) {
setWriteSchemaForDeletes(metaClient);
}
doInitTable(operationType, metaClient, instantTime);
HoodieTable table = createTable(config, hadoopConf, metaClient);
// Validate table properties
metaClient.validateTableProperties(config.getProps());
switch (operationType) {
case INSERT:
case INSERT_PREPPED:
case UPSERT:
case UPSERT_PREPPED:
case BULK_INSERT:
case BULK_INSERT_PREPPED:
case INSERT_OVERWRITE:
case INSERT_OVERWRITE_TABLE:
setWriteTimer(table.getMetaClient().getCommitActionType());
break;
case CLUSTER:
case COMPACT:
case LOG_COMPACT:
tableServiceClient.setTableServiceTimer(operationType);
break;
default:
}
return table;
} | 3.68 |
hadoop_ApplicationRowKeyPrefix_getRowKeyPrefix | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.application.
* RowKeyPrefix#getRowKeyPrefix()
*/
@Override
public byte[] getRowKeyPrefix() {
return super.getRowKey();
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_recursive | /**
* Create the parent directory if they do not exist.
*
* @return B Generics Type.
*/
public B recursive() {
recursive = true;
return getThisBuilder();
} | 3.68 |
hadoop_YarnServerSecurityUtils_authorizeRequest | /**
* Authorizes the current request and returns the AMRMTokenIdentifier for the
* current application.
*
* @return the AMRMTokenIdentifier instance for the current user
* @throws YarnException exceptions from yarn servers.
*/
public static AMRMTokenIdentifier authorizeRequest() throws YarnException {
UserGroupInformation remoteUgi;
try {
remoteUgi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
String msg =
"Cannot obtain the user-name for authorizing ApplicationMaster. "
+ "Got exception: " + StringUtils.stringifyException(e);
LOG.warn(msg);
throw RPCUtil.getRemoteException(msg);
}
boolean tokenFound = false;
String message = "";
AMRMTokenIdentifier appTokenIdentifier = null;
try {
appTokenIdentifier = selectAMRMTokenIdentifier(remoteUgi);
if (appTokenIdentifier == null) {
tokenFound = false;
message = "No AMRMToken found for user " + remoteUgi.getUserName();
} else {
tokenFound = true;
}
} catch (IOException e) {
tokenFound = false;
message = "Got exception while looking for AMRMToken for user "
+ remoteUgi.getUserName();
}
if (!tokenFound) {
LOG.warn(message);
throw RPCUtil.getRemoteException(message);
}
return appTokenIdentifier;
} | 3.68 |
flink_Tuple22_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
copy() {
return new Tuple22<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18, this.f19, this.f20, this.f21);
} | 3.68 |
hbase_MetricRegistryInfo_getMetricsName | /**
* Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL
*/
public String getMetricsName() {
return metricsName;
} | 3.68 |
framework_VaadinService_getCurrent | /**
* Gets the currently used Vaadin service. The current service is
* automatically defined when processing requests related to the service
* (see {@link ThreadLocal}) and in {@link VaadinSession#access(Runnable)}
* and {@link UI#access(Runnable)}. In other cases, (e.g. from background
* threads, the current service is not automatically defined.
*
* @return the current Vaadin service instance if available, otherwise
* <code>null</code>
*
* @see #setCurrentInstances(VaadinRequest, VaadinResponse)
*/
public static VaadinService getCurrent() {
return CurrentInstance.get(VaadinService.class);
} | 3.68 |
framework_VListSelect_setRows | /**
* Sets the number of visible items for the list select.
*
* @param rows
* the number of items to show
* @see ListBox#setVisibleItemCount(int)
*/
public void setRows(int rows) {
if (select.getVisibleItemCount() != rows) {
select.setVisibleItemCount(rows);
}
} | 3.68 |
dubbo_ScopeModel_getDesc | /**
* @return to describe string of this scope model
*/
public String getDesc() {
if (this.desc == null) {
this.desc = buildDesc();
}
return this.desc;
} | 3.68 |
hadoop_BlockReaderUtil_readAll | /* See {@link BlockReader#readAll(byte[], int, int)} */
public static int readAll(BlockReader reader,
byte[] buf, int offset, int len) throws IOException {
int n = 0;
for (;;) {
int nread = reader.read(buf, offset + n, len - n);
if (nread <= 0)
return (n == 0) ? nread : n;
n += nread;
if (n >= len)
return n;
}
} | 3.68 |
flink_BinaryArrayData_calculateFixLengthPartSize | /**
* It store real value when type is primitive. It store the length and offset of variable-length
* part when type is string, map, etc.
*/
public static int calculateFixLengthPartSize(LogicalType type) {
// ordered by type root definition
switch (type.getTypeRoot()) {
case BOOLEAN:
case TINYINT:
return 1;
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
case DECIMAL:
case BIGINT:
case DOUBLE:
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case INTERVAL_DAY_TIME:
case ARRAY:
case MULTISET:
case MAP:
case ROW:
case STRUCTURED_TYPE:
case RAW:
// long and double are 8 bytes;
// otherwise it stores the length and offset of the variable-length part for types
// such as is string, map, etc.
return 8;
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException();
case SMALLINT:
return 2;
case INTEGER:
case FLOAT:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
return 4;
case DISTINCT_TYPE:
return calculateFixLengthPartSize(((DistinctType) type).getSourceType());
case NULL:
case SYMBOL:
case UNRESOLVED:
default:
throw new IllegalArgumentException();
}
} | 3.68 |
dubbo_DubboServiceAddressURL_equals | /**
* ignore consumer url compare.
* It's only meaningful for comparing two AddressURLs related to the same consumerURL.
*
* @param obj
* @return
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof DubboServiceAddressURL)) {
return false;
}
if (overrideURL == null) {
return super.equals(obj);
} else {
DubboServiceAddressURL other = (DubboServiceAddressURL) obj;
boolean overrideEquals = Objects.equals(
overrideURL.getParameters(), other.getOverrideURL().getParameters());
if (!overrideEquals) {
return false;
}
Map<String, String> params = this.getParameters();
for (Map.Entry<String, String> entry : params.entrySet()) {
String key = entry.getKey();
if (overrideURL.getParameters().containsKey(key)) {
continue;
}
if (!entry.getValue().equals(other.getUrlParam().getParameter(key))) {
return false;
}
}
}
return true;
} | 3.68 |
hadoop_ExcessRedundancyMap_contains | /**
* @return does this map contains a redundancy corresponding to the given
* datanode and the given block?
*/
synchronized boolean contains(DatanodeDescriptor dn, BlockInfo blk) {
final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
return set != null && set.contains(blk);
} | 3.68 |
hadoop_InMemoryConfigurationStore_logMutation | /**
* This method does not log as it does not support backing store.
* The mutation to be applied on top of schedConf will be directly passed
* in confirmMutation.
*/
@Override
public void logMutation(LogMutation logMutation) {
} | 3.68 |
hudi_DataSourceUtils_createUserDefinedBulkInsertPartitioner | /**
* Create a UserDefinedBulkInsertPartitioner class via reflection,
* <br>
* if the class name of UserDefinedBulkInsertPartitioner is configured through the HoodieWriteConfig.
*
* @see HoodieWriteConfig#getUserDefinedBulkInsertPartitionerClass()
*/
private static Option<BulkInsertPartitioner> createUserDefinedBulkInsertPartitioner(HoodieWriteConfig config)
throws HoodieException {
String bulkInsertPartitionerClass = config.getUserDefinedBulkInsertPartitionerClass();
try {
return StringUtils.isNullOrEmpty(bulkInsertPartitionerClass)
? Option.empty() :
Option.of((BulkInsertPartitioner) ReflectionUtils.loadClass(bulkInsertPartitionerClass, config));
} catch (Throwable e) {
throw new HoodieException("Could not create UserDefinedBulkInsertPartitioner class " + bulkInsertPartitionerClass, e);
}
} | 3.68 |
hadoop_AbstractS3AStatisticsSource_lookupCounterValue | /**
* {@inheritDoc}
*/
public Long lookupCounterValue(final String name) {
return ioStatistics.counters().get(name);
} | 3.68 |
framework_WindowElement_maximize | /**
* Clicks the maximize button of the window.
*/
public void maximize() {
if (!isMaximized()) {
getMaximizeButton().click();
} else {
throw new IllegalStateException(
"Window is already maximized, cannot maximize.");
}
} | 3.68 |
hbase_MetricsHeapMemoryManager_increaseAboveHeapOccupancyLowWatermarkCounter | /**
* Increase the counter for heap occupancy percent above low watermark
*/
public void increaseAboveHeapOccupancyLowWatermarkCounter() {
source.increaseAboveHeapOccupancyLowWatermarkCounter();
} | 3.68 |
hmily_MotanHmilyInventoryApplication_main | /**
* main.
*
* @param args args.
*/
public static void main(final String[] args) {
SpringApplication springApplication = new SpringApplication(MotanHmilyInventoryApplication.class);
springApplication.setWebApplicationType(WebApplicationType.NONE);
springApplication.run(args);
MotanSwitcherUtil.setSwitcherValue(MotanConstants.REGISTRY_HEARTBEAT_SWITCHER, true);
System.out.println("MotanHmilyInventoryApplication server start...");
} | 3.68 |
hbase_SaslClientAuthenticationProviders_getNumRegisteredProviders | /**
* Returns the number of providers that have been registered.
*/
public int getNumRegisteredProviders() {
return providers.size();
} | 3.68 |
flink_SavepointRestoreSettings_restoreSavepoint | /**
* Returns whether to restore from savepoint.
*
* @return <code>true</code> if should restore from savepoint.
*/
public boolean restoreSavepoint() {
return restorePath != null;
} | 3.68 |
hudi_SparkRecordMergingUtils_isPartial | /**
* @param schema Avro schema to check.
* @param mergedSchema The merged schema for the merged record.
* @return whether the Avro schema is partial compared to the merged schema.
*/
public static boolean isPartial(Schema schema, Schema mergedSchema) {
return !schema.equals(mergedSchema);
} | 3.68 |
hbase_SnapshotManager_deleteSnapshot | /**
* Delete the specified snapshot
* @throws SnapshotDoesNotExistException If the specified snapshot does not exist.
* @throws IOException For filesystem IOExceptions
*/
public void deleteSnapshot(SnapshotDescription snapshot) throws IOException {
// check to see if it is completed
if (!isSnapshotCompleted(snapshot)) {
throw new SnapshotDoesNotExistException(ProtobufUtil.createSnapshotDesc(snapshot));
}
String snapshotName = snapshot.getName();
// first create the snapshot description and check to see if it exists
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
// Get snapshot info from file system. The one passed as parameter is a "fake" snapshotInfo with
// just the "name" and it does not contains the "real" snapshot information
snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
// call coproc pre hook
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
org.apache.hadoop.hbase.client.SnapshotDescription snapshotPOJO = null;
if (cpHost != null) {
snapshotPOJO = ProtobufUtil.createSnapshotDesc(snapshot);
cpHost.preDeleteSnapshot(snapshotPOJO);
}
LOG.debug("Deleting snapshot: " + snapshotName);
// delete the existing snapshot
if (!fs.delete(snapshotDir, true)) {
throw new HBaseSnapshotException("Failed to delete snapshot directory: " + snapshotDir);
}
// call coproc post hook
if (cpHost != null) {
cpHost.postDeleteSnapshot(snapshotPOJO);
}
} | 3.68 |
hbase_RegionCoprocessorHost_postGet | /**
* @param get the Get request
* @param results the result set
* @exception IOException Exception
*/
public void postGet(final Get get, final List<Cell> results) throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postGetOp(this, get, results);
}
});
} | 3.68 |
framework_AbstractSelect_select | /**
* Selects an item.
*
* <p>
* In single select mode selecting item identified by
* {@link #getNullSelectionItemId()} sets the value of the property to null.
* </p>
*
* @param itemId
* the identifier of Item to be selected.
* @see #getNullSelectionItemId()
* @see #setNullSelectionItemId(Object)
*
*/
public void select(Object itemId) {
if (!isMultiSelect()) {
setValue(itemId);
} else if (!isSelected(itemId) && itemId != null
&& items.containsId(itemId)) {
final Set<Object> s = new HashSet<Object>((Set<?>) getValue());
s.add(itemId);
setValue(s);
}
} | 3.68 |
flink_CustomSinkOperatorUidHashes_setCommitterUidHash | /**
* Sets the uid hash of the committer operator used to recover state.
*
* @param committerUidHash uid hash denoting the committer operator
* @return {@link SinkOperatorUidHashesBuilder}
*/
public SinkOperatorUidHashesBuilder setCommitterUidHash(String committerUidHash) {
this.committerUidHash = committerUidHash;
return this;
} | 3.68 |
hbase_OrderedBytes_blobVarDecodedLength | /**
* Calculate the expected BlobVar decoded length based on encoded length.
*/
static int blobVarDecodedLength(int len) {
return ((len - 1) // 1-byte header
* 7) // 7-bits of payload per encoded byte
/ 8; // 8-bits per byte
} | 3.68 |
flink_DefaultContext_load | /**
* Build the {@link DefaultContext} from flink-conf.yaml, dynamic configuration and users
* specified jars.
*
* @param dynamicConfig user specified configuration.
* @param dependencies user specified jars
* @param discoverExecutionConfig flag whether to load the execution configuration
*/
public static DefaultContext load(
Configuration dynamicConfig, List<URL> dependencies, boolean discoverExecutionConfig) {
// 1. find the configuration directory
String flinkConfigDir = CliFrontend.getConfigurationDirectoryFromEnv();
// 2. load the global configuration
Configuration configuration = GlobalConfiguration.loadConfiguration(flinkConfigDir);
configuration.addAll(dynamicConfig);
// 3. load the custom command lines
List<CustomCommandLine> commandLines =
CliFrontend.loadCustomCommandLines(configuration, flinkConfigDir);
// initialize default file system
FileSystem.initialize(
configuration, PluginUtils.createPluginManagerFromRootFolder(configuration));
if (discoverExecutionConfig) {
Options commandLineOptions = collectCommandLineOptions(commandLines);
try {
CommandLine deploymentCommandLine =
CliFrontendParser.parse(commandLineOptions, new String[] {}, true);
configuration.addAll(
createExecutionConfig(
deploymentCommandLine,
commandLineOptions,
commandLines,
dependencies));
} catch (Exception e) {
throw new SqlGatewayException(
"Could not load available CLI with Environment Deployment entry.", e);
}
}
return new DefaultContext(configuration, dependencies);
} | 3.68 |
hbase_AbstractFSWAL_getCurrentFileName | /**
* This is a convenience method that computes a new filename with a given using the current WAL
* file-number
*/
public Path getCurrentFileName() {
return computeFilename(this.filenum.get());
} | 3.68 |
hudi_HoodieDataBlock_list2Iterator | /**
* Converts the given list to closable iterator.
*/
static <T> ClosableIterator<T> list2Iterator(List<T> list) {
Iterator<T> iterator = list.iterator();
return new ClosableIterator<T>() {
@Override
public void close() {
// ignored
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public T next() {
return iterator.next();
}
};
} | 3.68 |
flink_MetricStore_getTaskMetricStore | /**
* Returns the {@link ComponentMetricStore} for the given job/task ID.
*
* @param jobID job ID
* @param taskID task ID
* @return ComponentMetricStore for given IDs, or null if no store for the given arguments
* exists
*/
public synchronized TaskMetricStore getTaskMetricStore(String jobID, String taskID) {
JobMetricStore job = jobID == null ? null : jobs.get(jobID);
if (job == null || taskID == null) {
return null;
}
return TaskMetricStore.unmodifiable(job.getTaskMetricStore(taskID));
} | 3.68 |
dubbo_NettyChannel_removeChannelIfDisconnected | /**
* Remove the inactive channel.
*
* @param ch netty channel
*/
static void removeChannelIfDisconnected(Channel ch) {
if (ch != null && !ch.isActive()) {
NettyChannel nettyChannel = CHANNEL_MAP.remove(ch);
if (nettyChannel != null) {
nettyChannel.markActive(false);
}
}
} | 3.68 |
hbase_ExportSnapshot_preserveAttributes | /**
* Try to Preserve the files attribute selected by the user copying them from the source file
* This is only required when you are exporting as a different user than "hbase" or on a system
* that doesn't have the "hbase" user. This is not considered a blocking failure since the user
* can force a chmod with the user that knows is available on the system.
*/
private boolean preserveAttributes(final Path path, final FileStatus refStat) {
FileStatus stat;
try {
stat = outputFs.getFileStatus(path);
} catch (IOException e) {
LOG.warn("Unable to get the status for file=" + path);
return false;
}
try {
if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
outputFs.setPermission(path, new FsPermission(filesMode));
} else if (refStat != null && !stat.getPermission().equals(refStat.getPermission())) {
outputFs.setPermission(path, refStat.getPermission());
}
} catch (IOException e) {
LOG.warn("Unable to set the permission for file=" + stat.getPath() + ": " + e.getMessage());
return false;
}
boolean hasRefStat = (refStat != null);
String user = stringIsNotEmpty(filesUser) || !hasRefStat ? filesUser : refStat.getOwner();
String group = stringIsNotEmpty(filesGroup) || !hasRefStat ? filesGroup : refStat.getGroup();
if (stringIsNotEmpty(user) || stringIsNotEmpty(group)) {
try {
if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) {
outputFs.setOwner(path, user, group);
}
} catch (IOException e) {
LOG.warn(
"Unable to set the owner/group for file=" + stat.getPath() + ": " + e.getMessage());
LOG.warn("The user/group may not exist on the destination cluster: user=" + user
+ " group=" + group);
return false;
}
}
return true;
} | 3.68 |
hudi_AvroInternalSchemaConverter_visitInternalSchemaToBuildAvroSchema | /**
* Converts hudi type into an Avro Schema.
*
* @param type a hudi type.
* @param cache use to cache intermediate convert result to save cost.
* @param recordName auto-generated record name used as a fallback, in case
* {@link org.apache.hudi.internal.schema.Types.RecordType} doesn't bear original record-name
* @return a Avro schema match this type
*/
private static Schema visitInternalSchemaToBuildAvroSchema(Type type, Map<Type, Schema> cache, String recordName) {
switch (type.typeId()) {
case RECORD:
Types.RecordType record = (Types.RecordType) type;
List<Schema> schemas = new ArrayList<>();
record.fields().forEach(f -> {
String nestedRecordName = recordName + AVRO_NAME_DELIMITER + f.name();
Schema tempSchema = visitInternalSchemaToBuildAvroSchema(f.type(), cache, nestedRecordName);
// convert tempSchema
Schema result = f.isOptional() ? AvroInternalSchemaConverter.nullableSchema(tempSchema) : tempSchema;
schemas.add(result);
});
// check visited
Schema recordSchema;
recordSchema = cache.get(record);
if (recordSchema != null) {
return recordSchema;
}
recordSchema = visitInternalRecordToBuildAvroRecord(record, schemas, recordName);
cache.put(record, recordSchema);
return recordSchema;
case ARRAY:
Types.ArrayType array = (Types.ArrayType) type;
Schema elementSchema;
elementSchema = visitInternalSchemaToBuildAvroSchema(array.elementType(), cache, recordName);
Schema arraySchema;
arraySchema = cache.get(array);
if (arraySchema != null) {
return arraySchema;
}
arraySchema = visitInternalArrayToBuildAvroArray(array, elementSchema);
cache.put(array, arraySchema);
return arraySchema;
case MAP:
Types.MapType map = (Types.MapType) type;
Schema keySchema;
Schema valueSchema;
keySchema = visitInternalSchemaToBuildAvroSchema(map.keyType(), cache, recordName);
valueSchema = visitInternalSchemaToBuildAvroSchema(map.valueType(), cache, recordName);
Schema mapSchema;
mapSchema = cache.get(map);
if (mapSchema != null) {
return mapSchema;
}
mapSchema = visitInternalMapToBuildAvroMap(map, keySchema, valueSchema);
cache.put(map, mapSchema);
return mapSchema;
default:
Schema primitiveSchema = visitInternalPrimitiveToBuildAvroPrimitiveType((Type.PrimitiveType) type, recordName);
cache.put(type, primitiveSchema);
return primitiveSchema;
}
} | 3.68 |
pulsar_ManagedLedgerConfig_getMaxBatchDeletedIndexToPersist | /**
* @return max batch deleted index that will be persisted and recoverd.
*/
public int getMaxBatchDeletedIndexToPersist() {
return maxBatchDeletedIndexToPersist;
} | 3.68 |
rocketmq-connect_RetryWithToleranceOperator_error | /**
* error
*
* @return
*/
public Throwable error() {
return this.context.error();
} | 3.68 |
hbase_SpaceLimitSettings_buildProtoAddQuota | /**
* Builds a {@link SpaceQuota} protobuf object given the arguments.
* @param sizeLimit The size limit of the quota.
* @param violationPolicy The action to take when the quota is exceeded.
* @return The protobuf SpaceQuota representation.
*/
private SpaceLimitRequest buildProtoAddQuota(long sizeLimit,
SpaceViolationPolicy violationPolicy) {
return buildProtoFromQuota(SpaceQuota.newBuilder().setSoftLimit(sizeLimit)
.setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(violationPolicy)).build());
} | 3.68 |
rocketmq-connect_TopicNameStrategy_subjectName | /**
* generate subject name
*
* @param topic
* @param isKey
* @return
*/
public static String subjectName(String topic, boolean isKey) {
return isKey ? topic + "-key" : topic + "-value";
} | 3.68 |
morf_AbstractSqlDialectTest_testPrepareStatementParameter | /**
* Tests the logic used for transferring a {@link Record} value to a
* {@link PreparedStatement}.
*
* @throws SQLException when a database access error occurs
*/
@Test
public void testPrepareStatementParameter() throws SQLException {
final SqlParameter dateColumn = parameter(column("dateColumn", DataType.DATE));
final SqlParameter decimalColumn = parameter(column("decimalColumn", DataType.DECIMAL, 9, 5));
final SqlParameter stringColumn = parameter(column("stringColumn", DataType.STRING, 4));
final SqlParameter integerColumn = parameter(column("integerColumn", DataType.INTEGER));
final SqlParameter bigIntegerColumn = parameter(column("bigIntegerColumn", DataType.BIG_INTEGER));
final SqlParameter blobColumn = parameter(column("blobColumn", DataType.BLOB));
final SqlParameter clobColumn = parameter(column("clobColumn", DataType.CLOB));
// Boolean
verifyBooleanPrepareStatementParameter();
// Date
verify(callPrepareStatementParameter(dateColumn, null)).setObject(dateColumn, null);
verify(callPrepareStatementParameter(dateColumn, "2012-12-01")).setDate(dateColumn, java.sql.Date.valueOf("2012-12-01"));
// Decimal
verify(callPrepareStatementParameter(decimalColumn, null)).setBigDecimal(decimalColumn, null);
NamedParameterPreparedStatement mockStatement = callPrepareStatementParameter(decimalColumn, "3");
ArgumentCaptor<BigDecimal> bigDecimalCapture = ArgumentCaptor.forClass(BigDecimal.class);
verify(mockStatement).setBigDecimal(eq(decimalColumn), bigDecimalCapture.capture());
assertTrue("BigDecimal not correctly set on statement. Expected 3, was: " + bigDecimalCapture.getValue(), bigDecimalCapture.getValue().compareTo(new BigDecimal(3)) == 0);
// String
verify(callPrepareStatementParameter(stringColumn, null)).setString(stringColumn, null);
verify(callPrepareStatementParameter(stringColumn, "")).setString(stringColumn, null);
verify(callPrepareStatementParameter(stringColumn, "test")).setString(stringColumn, "test");
// Integer
verify(callPrepareStatementParameter(integerColumn, null)).setObject(integerColumn, null);
mockStatement = callPrepareStatementParameter(integerColumn, "23");
ArgumentCaptor<Integer>intCapture = ArgumentCaptor.forClass(Integer.class);
verify(mockStatement).setInt(eq(integerColumn), intCapture.capture());
assertEquals("Integer not correctly set on statement", 23, intCapture.getValue().intValue());
// Big Integer
verify(callPrepareStatementParameter(bigIntegerColumn, null)).setObject(bigIntegerColumn, null);
mockStatement = callPrepareStatementParameter(bigIntegerColumn, "345345423234234234");
ArgumentCaptor<Long> bigIntCapture = ArgumentCaptor.forClass(Long.class);
verify(mockStatement).setLong(eq(bigIntegerColumn), bigIntCapture.capture());
assertEquals("Big integer not correctly set on statement", 345345423234234234L, bigIntCapture.getValue().longValue());
// Blob
verifyBlobColumnCallPrepareStatementParameter(blobColumn);
// Clob
verify(callPrepareStatementParameter(clobColumn, null)).setString(clobColumn, null);
verify(callPrepareStatementParameter(clobColumn, "")).setString(clobColumn, null);
verify(callPrepareStatementParameter(clobColumn, "test")).setString(clobColumn, "test");
} | 3.68 |
hbase_LossyCounting_calculateCurrentTerm | /**
* Calculate and set current term
*/
private void calculateCurrentTerm() {
this.currentTerm = (int) Math.ceil(1.0 * totalDataCount / (double) bucketSize);
} | 3.68 |
flink_Dispatcher_createDirtyJobResultEntryIfMissingAsync | /**
* Creates a dirty entry in the {@link #jobResultStore} if there's no entry at all for the given
* {@code executionGraph} in the {@code JobResultStore}.
*
* @param executionGraph The {@link AccessExecutionGraph} for which the {@link JobResult} shall
* be persisted.
* @param hasCleanJobResultEntry The decision the dirty entry check is based on.
* @return {@code CompletableFuture} that completes as soon as the entry exists.
*/
private CompletableFuture<Void> createDirtyJobResultEntryIfMissingAsync(
AccessExecutionGraph executionGraph, boolean hasCleanJobResultEntry) {
final JobID jobId = executionGraph.getJobID();
if (hasCleanJobResultEntry) {
log.warn("Job {} is already marked as clean but clean up was triggered again.", jobId);
return FutureUtils.completedVoidFuture();
} else {
return jobResultStore
.hasDirtyJobResultEntryAsync(jobId)
.thenCompose(
hasDirtyJobResultEntry ->
createDirtyJobResultEntryAsync(
executionGraph, hasDirtyJobResultEntry));
}
} | 3.68 |
flink_ExpandColumnFunctionsRule_isIndexRangeCall | /** Whether the expression is a column index range expression, e.g. withColumns(1 ~ 2). */
private boolean isIndexRangeCall(UnresolvedCallExpression expression) {
return expression.getFunctionDefinition() == RANGE_TO
&& expression.getChildren().get(0) instanceof ValueLiteralExpression
&& expression.getChildren().get(1) instanceof ValueLiteralExpression;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_verifyAndConvertToStandardFormat | /**
* Checks if the given rawDir belongs to this account/container, and
* if so returns the canonicalized path for it. Otherwise return null.
*/
private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
URI asUri = new URI(rawDir);
if (asUri.getAuthority() == null
|| asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(
sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
// Applies to me.
return trim(asUri.getPath(), "/");
} else {
// Doen't apply to me.
return null;
}
} | 3.68 |
hbase_BalanceRequest_build | /**
* Build the {@link BalanceRequest}
*/
public BalanceRequest build() {
return new BalanceRequest(dryRun, ignoreRegionsInTransition);
} | 3.68 |
hbase_RegionCoprocessorHost_preOpen | /**
* Invoked before a region open.
* @throws IOException Signals that an I/O exception has occurred.
*/
public void preOpen() throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preOpen(this);
}
});
} | 3.68 |
hadoop_Paths_getPartition | /**
* Returns the partition of a relative file path, or null if the path is a
* file name with no relative directory.
*
* @param relative a relative file path
* @return the partition of the relative file path
*/
protected static String getPartition(String relative) {
return getParent(relative);
} | 3.68 |
hadoop_ManifestCommitter_getConf | /**
* Get the config of the task attempt this instance was constructed
* with.
* @return a configuration.
*/
public Configuration getConf() {
return baseConfig.getConf();
} | 3.68 |
morf_LoggingSqlScriptVisitor_executionEnd | /**
* {@inheritDoc}
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#executionEnd()
*/
@Override
public void executionEnd() {
log.info(logSchemaPositionPrefix() + "SQL Script Complete");
} | 3.68 |
hbase_OrderedBytes_isBlobVar | /**
* Return true when the next encoded value in {@code src} uses BlobVar encoding, false otherwise.
*/
public static boolean isBlobVar(PositionedByteRange src) {
return BLOB_VAR
== (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
framework_TranslatedURLReference_setConnection | /**
* Sets the application connection this instance is connected to. Called
* internally by the framework.
*
* @param connection
* the application connection this instance is connected to
*/
public void setConnection(ApplicationConnection connection) {
this.connection = connection;
} | 3.68 |
hadoop_FsAction_and | /**
* AND operation.
* @param that FsAction that.
* @return FsAction.
*/
public FsAction and(FsAction that) {
return vals[ordinal() & that.ordinal()];
} | 3.68 |
querydsl_MathExpressions_radians | /**
* Create a {@code rad(num)} expression
*
* <p>Converts degrees to radians</p>
*
* @param num numeric expression
* @return rad(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> radians(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.RAD, num);
} | 3.68 |
hadoop_ManifestSuccessData_dumpMetrics | /**
* Dump the metrics (if any) to a string.
* The metrics are sorted for ease of viewing.
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return the dumped string
*/
public String dumpMetrics(String prefix, String middle, String suffix) {
return joinMap(metrics, prefix, middle, suffix);
} | 3.68 |
AreaShop_AreaShop_getLanguageManager | /**
* Function to get the LanguageManager.
* @return the LanguageManager
*/
public LanguageManager getLanguageManager() {
return languageManager;
} | 3.68 |
dubbo_ApplicationModel_getExecutorRepository | /**
* @deprecated Replace to {@link ApplicationModel#getApplicationExecutorRepository()}
*/
@Deprecated
public static ExecutorRepository getExecutorRepository() {
return defaultModel().getApplicationExecutorRepository();
} | 3.68 |
flink_SourceCoordinatorContext_registerSourceReader | /**
* Register a source reader.
*
* @param subtaskId the subtask id of the source reader.
* @param attemptNumber the attempt number of the source reader.
* @param location the location of the source reader.
*/
void registerSourceReader(int subtaskId, int attemptNumber, String location) {
final Map<Integer, ReaderInfo> attemptReaders =
registeredReaders.computeIfAbsent(subtaskId, k -> new ConcurrentHashMap<>());
checkState(
!attemptReaders.containsKey(attemptNumber),
"ReaderInfo of subtask %s (#%s) already exists.",
subtaskId,
attemptNumber);
attemptReaders.put(attemptNumber, new ReaderInfo(subtaskId, location));
sendCachedSplitsToNewlyRegisteredReader(subtaskId, attemptNumber);
} | 3.68 |
morf_SqlDialect_rebuildTriggers | /**
* Drops and recreates the triggers and supporting items for the target table.
*
* @param table the table for which to rebuild triggers
* @return a collection of sql statements to execute
*/
public Collection<String> rebuildTriggers(@SuppressWarnings("unused") Table table) {
return SqlDialect.NO_STATEMENTS;
} | 3.68 |
flink_HadoopConfigLoader_mirrorCertainHadoopConfig | // mirror certain keys to make use more uniform across implementations
// with different keys
private org.apache.hadoop.conf.Configuration mirrorCertainHadoopConfig(
org.apache.hadoop.conf.Configuration hadoopConfig) {
for (String[] mirrored : mirroredConfigKeys) {
String value = hadoopConfig.get(mirrored[0], null);
if (value != null) {
hadoopConfig.set(mirrored[1], value);
}
}
return hadoopConfig;
} | 3.68 |
hbase_BloomFilterFactory_getMaxFold | /** Returns the value for Bloom filter max fold in the given configuration */
public static int getMaxFold(Configuration conf) {
return conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD, MAX_ALLOWED_FOLD_FACTOR);
} | 3.68 |
querydsl_GuavaGroupByBuilder_asSortedSetMultimap | /**
* Get the results as multi map
*
* @param expression value expression
* @param comparator key comparator
* @param valueComparator value comparator
* @param <V> Value type
* @return new result transformer
*/
public <V> ResultTransformer<TreeMultimap<K, V>> asSortedSetMultimap(Expression<V> expression,
final Comparator<? super K> comparator,
final Comparator<? super V> valueComparator) {
final Expression<V> lookup = getLookup(expression);
return new GroupByMultimap<K, V, TreeMultimap<K, V>>(key, expression) {
@Override
protected TreeMultimap<K, V> transform(Multimap<K, Group> groups) {
TreeMultimap<K, V> results = TreeMultimap.create(comparator, valueComparator);
for (Map.Entry<K, Group> entry : groups.entries()) {
results.put(entry.getKey(), entry.getValue().getOne(lookup));
}
return results;
}
};
} | 3.68 |
hbase_HFileCorruptionChecker_getHFilesChecked | /** Returns number of hfiles checked in the last HfileCorruptionChecker run */
public int getHFilesChecked() {
return hfilesChecked.get();
} | 3.68 |
hadoop_UnmanagedApplicationManager_reAttachUAM | /**
* Re-attach to an existing UAM in the resource manager.
*
* @param amrmToken the UAM token
* @throws IOException if re-attach fails
* @throws YarnException if re-attach fails
*/
public void reAttachUAM(Token<AMRMTokenIdentifier> amrmToken)
throws IOException, YarnException {
this.connectionInitiated = true;
// Creates the UAM connection
createUAMProxy(amrmToken);
} | 3.68 |
hbase_LruBlockCache_evictBlock | /**
* Evict the block, and it will be cached by the victim handler if exists && block may be
* read again later
* @param evictedByEvictionProcess true if the given block is evicted by EvictionThread
* @return the heap size of evicted block
*/
protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) {
final MutableBoolean evicted = new MutableBoolean(false);
// Note: 'map' must be a ConcurrentHashMap or the supplier may be invoked more than once.
map.computeIfPresent(block.getCacheKey(), (k, v) -> {
// Run the victim handler before we remove the mapping in the L1 map. It must complete
// quickly because other removal or insertion operations can be blocked in the meantime.
if (evictedByEvictionProcess && victimHandler != null) {
victimHandler.cacheBlock(k, v.getBuffer());
}
// Decrease the block's reference count, and if refCount is 0, then it'll auto-deallocate. DO
// NOT move this up because if we do that then the victimHandler may access the buffer with
// refCnt = 0 which is disallowed.
v.getBuffer().release();
evicted.setTrue();
// By returning null from the supplier we remove the mapping from the L1 map.
return null;
});
// If we didn't find anything to evict there is nothing more to do here.
if (evicted.isFalse()) {
return 0;
}
// We evicted the block so update L1 statistics.
updateSizeMetrics(block, true);
long val = elements.decrementAndGet();
if (LOG.isTraceEnabled()) {
long size = map.size();
assertCounterSanity(size, val);
}
BlockType bt = block.getBuffer().getBlockType();
if (bt.isBloom()) {
bloomBlockElements.decrement();
} else if (bt.isIndex()) {
indexBlockElements.decrement();
} else if (bt.isData()) {
dataBlockElements.decrement();
}
if (evictedByEvictionProcess) {
// When the eviction of the block happened because of invalidation of HFiles, no need to
// update the stats counter.
stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
}
return block.heapSize();
} | 3.68 |
morf_SqlDialect_getSqlForMod | /**
* Converts the mod function into SQL.
*
* @param function the function to convert.
* @return a string representation of the SQL.
*/
protected String getSqlForMod(Function function) {
return String.format("MOD(%s, %s)", getSqlFrom(function.getArguments().get(0)), getSqlFrom(function.getArguments().get(1)));
} | 3.68 |
hadoop_HsController_jobCounters | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobCounters()
*/
@Override
public void jobCounters() {
super.jobCounters();
} | 3.68 |
rocketmq-connect_RocketMQScheduledReporter_reportGauges | /**
* report gauges
*
* @param gauges
*/
private void reportGauges(SortedMap<MetricName, Object> gauges) {
gauges.forEach((name, value) -> {
send(name, Double.parseDouble(value.toString()));
});
} | 3.68 |
rocketmq-connect_Worker_checkRunningTasks | /**
* check running task
*
* @param connectorConfig
*/
private void checkRunningTasks(Map<String, List<ConnectKeyValue>> connectorConfig) {
// STEP 1: check running tasks and put to error status
for (Runnable runnable : runningTasks) {
WorkerTask workerTask = (WorkerTask) runnable;
String connectorName = workerTask.id().connector();
ConnectKeyValue taskConfig = workerTask.currentTaskConfig();
List<ConnectKeyValue> taskConfigs = connectorConfig.get(connectorName);
WorkerTaskState state = ((WorkerTask) runnable).getState();
switch (state) {
case ERROR:
errorTasks.add(runnable);
runningTasks.remove(runnable);
break;
case RUNNING:
if (isNeedStop(taskConfig, taskConfigs)) {
try {
// remove committer offset
sourceTaskOffsetCommitter.ifPresent(commiter -> commiter.remove(workerTask.id()));
workerTask.doClose();
} catch (Exception e) {
log.error("workerTask stop exception, workerTask: " + workerTask.currentTaskConfig(), e);
}
log.info("Task stopping, connector name {}, config {}", workerTask.id().connector(), workerTask.currentTaskConfig());
runningTasks.remove(runnable);
stoppingTasks.put(runnable, System.currentTimeMillis());
} else {
//status redress
redressRunningStatus(workerTask);
// set target state
TargetState targetState = configManagementService.snapshot().targetState(connectorName);
if (targetState != null) {
workerTask.transitionTo(targetState);
}
}
break;
default:
log.error("[BUG] Illegal State in when checking running tasks, {} is in {} state",
((WorkerTask) runnable).id().connector(), state);
break;
}
}
} | 3.68 |
hudi_FormatUtils_getParallelProducers | /**
* Setup log and parquet reading in parallel. Both write to central buffer.
*/
private List<HoodieProducer<HoodieRecord<?>>> getParallelProducers(
HoodieUnMergedLogRecordScanner.Builder scannerBuilder
) {
List<HoodieProducer<HoodieRecord<?>>> producers = new ArrayList<>();
producers.add(new FunctionBasedQueueProducer<>(queue -> {
HoodieUnMergedLogRecordScanner scanner =
scannerBuilder.withLogRecordScannerCallback(queue::insertRecord).build();
// Scan all the delta-log files, filling in the queue
scanner.scan();
return null;
}));
return producers;
} | 3.68 |
framework_VaadinService_cleanupSession | /**
* Closes inactive UIs in the given session, removes closed UIs from the
* session, and closes the session if it is itself inactive. This operation
* should not be performed without first acquiring the session lock. By
* default called at the end of each request, after sending the response.
*
* @param session
* the session to clean up
*
* @since 8.10
*/
public void cleanupSession(VaadinSession session) {
if (isSessionActive(session)) {
closeInactiveUIs(session);
removeClosedUIs(session);
} else {
if (session.getState() == State.OPEN) {
closeSession(session);
if (session.getSession() != null) {
getLogger().log(Level.FINE, "Closing inactive session {0}",
session.getSession().getId());
}
}
if (session.getSession() != null) {
/*
* If the VaadinSession has no WrappedSession then it has
* already been removed from the HttpSession and we do not have
* to do it again
*/
removeSession(session.getSession());
}
/*
* The session was destroyed during this request and therefore no
* destroy event has yet been sent
*/
fireSessionDestroy(session);
}
} | 3.68 |
hbase_HMaster_balanceOrUpdateMetrics | /**
* Trigger a normal balance, see {@link HMaster#balance()} . If the balance is not executed this
* time, the metrics related to the balance will be updated. When balance is running, related
* metrics will be updated at the same time. But if some checking logic failed and cause the
* balancer exit early, we lost the chance to update balancer metrics. This will lead to user
* missing the latest balancer info.
*/
public BalanceResponse balanceOrUpdateMetrics() throws IOException {
synchronized (this.balancer) {
BalanceResponse response = balance();
if (!response.isBalancerRan()) {
Map<TableName, Map<ServerName, List<RegionInfo>>> assignments =
this.assignmentManager.getRegionStates().getAssignmentsForBalancer(this.tableStateManager,
this.serverManager.getOnlineServersList());
for (Map<ServerName, List<RegionInfo>> serverMap : assignments.values()) {
serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
}
this.balancer.updateBalancerLoadInfo(assignments);
}
return response;
}
} | 3.68 |
hbase_ByteBufferUtils_toInt | /**
* Reads an int value at the given buffer's offset.
* @param buffer input byte buffer to read
* @param offset input offset where int is
* @return int value at offset
*/
public static int toInt(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toInt(buffer, offset);
} | 3.68 |
framework_LegacyCommunicationManager_getTagForType | /**
* @deprecated As of 7.1. Will be removed in the future.
*/
@Deprecated
public String getTagForType(Class<? extends ClientConnector> class1) {
Integer id = typeToKey.get(class1);
if (id == null) {
id = nextTypeKey++;
typeToKey.put(class1, id);
if (getLogger().isLoggable(Level.FINE)) {
getLogger().log(Level.FINE, "Mapping {0} to {1}",
new Object[] { class1.getName(), id });
}
}
return id.toString();
} | 3.68 |
flink_ConfigUtils_encodeCollectionToConfig | /**
* Puts a {@link Collection} of values of type {@code IN} in a {@link WritableConfig} as a
* {@link ConfigOption} of type {@link List} of type {@code OUT}. If the {@code values} is
* {@code null} or empty, then nothing is put in the configuration.
*
* @param configuration the configuration object to put the list in
* @param key the {@link ConfigOption option} to serve as the key for the list in the
* configuration
* @param values the collection of values to put as value for the {@code key}
* @param mapper the transformation function from {@code IN} to {@code OUT}.
*/
public static <IN, OUT> void encodeCollectionToConfig(
final WritableConfig configuration,
final ConfigOption<List<OUT>> key,
@Nullable final Collection<IN> values,
final Function<IN, OUT> mapper) {
checkNotNull(configuration);
checkNotNull(key);
checkNotNull(mapper);
if (values == null) {
return;
}
final List<OUT> encodedOption =
values.stream()
.filter(Objects::nonNull)
.map(mapper)
.filter(Objects::nonNull)
.collect(Collectors.toCollection(ArrayList::new));
configuration.set(key, encodedOption);
} | 3.68 |
hbase_ImmutableMemStoreLAB_forceCopyOfBigCellInto | /**
* The process of merging assumes all cells are allocated on mslab. There is a rare case in which
* the first immutable segment, participating in a merge, is a CSLM. Since the CSLM hasn't been
* flattened yet, and there is no point in flattening it (since it is going to be merged), its big
* cells (for whom size > maxAlloc) must be copied into mslab. This method copies the passed cell
* into the first mslab in the mslabs list, returning either a new cell instance over the copied
* data, or null when this cell cannt be copied.
*/
@Override
public Cell forceCopyOfBigCellInto(Cell cell) {
MemStoreLAB mslab = this.mslabs.get(0);
return mslab.forceCopyOfBigCellInto(cell);
} | 3.68 |
Activiti_TreeMethodExpression_isLiteralText | /**
* @return <code>true</code> if this is a literal text expression
*/
@Override
public boolean isLiteralText() {
return node.isLiteralText();
} | 3.68 |
flink_RocksDBPriorityQueueConfig_getRocksDBPriorityQueueSetCacheSize | /**
* Gets the cache size of rocksDB priority queue set. It will fall back to the default value if
* it is not explicitly set.
*/
public int getRocksDBPriorityQueueSetCacheSize() {
return rocksDBPriorityQueueSetCacheSize == UNDEFINED_ROCKSDB_PRIORITY_QUEUE_SET_CACHE_SIZE
? ROCKSDB_TIMER_SERVICE_FACTORY_CACHE_SIZE.defaultValue()
: rocksDBPriorityQueueSetCacheSize;
} | 3.68 |
flink_HiveInspectors_getConversion | /**
* Get conversion for converting Flink object to Hive object from an ObjectInspector and the
* corresponding Flink DataType.
*/
public static HiveObjectConversion getConversion(
ObjectInspector inspector, LogicalType dataType, HiveShim hiveShim) {
if (inspector instanceof PrimitiveObjectInspector) {
HiveObjectConversion conversion;
if (inspector instanceof BooleanObjectInspector
|| inspector instanceof StringObjectInspector
|| inspector instanceof ByteObjectInspector
|| inspector instanceof ShortObjectInspector
|| inspector instanceof IntObjectInspector
|| inspector instanceof LongObjectInspector
|| inspector instanceof FloatObjectInspector
|| inspector instanceof DoubleObjectInspector
|| inspector instanceof BinaryObjectInspector
|| inspector instanceof VoidObjectInspector) {
conversion = IdentityConversion.INSTANCE;
} else if (inspector instanceof DateObjectInspector) {
conversion = hiveShim::toHiveDate;
} else if (inspector instanceof TimestampObjectInspector) {
conversion = hiveShim::toHiveTimestamp;
} else if (inspector instanceof HiveCharObjectInspector) {
conversion =
o ->
o == null
? null
: new HiveChar(
(String) o, ((CharType) dataType).getLength());
} else if (inspector instanceof HiveVarcharObjectInspector) {
conversion =
o ->
o == null
? null
: new HiveVarchar(
(String) o, ((VarCharType) dataType).getLength());
} else if (inspector instanceof HiveDecimalObjectInspector) {
conversion = o -> o == null ? null : HiveDecimal.create((BigDecimal) o);
} else if (inspector instanceof HiveIntervalYearMonthObjectInspector) {
conversion =
o -> {
if (o == null) {
return null;
} else {
Period period = (Period) o;
return new HiveIntervalYearMonth(
period.getYears(), period.getMonths());
}
};
} else if (inspector instanceof HiveIntervalDayTimeObjectInspector) {
conversion =
o -> {
if (o == null) {
return null;
} else {
Duration duration = (Duration) o;
return new HiveIntervalDayTime(
duration.getSeconds(), duration.getNano());
}
};
} else {
throw new FlinkHiveUDFException(
"Unsupported primitive object inspector " + inspector.getClass().getName());
}
// if the object inspector prefers Writable objects, we should add an extra conversion
// for that
// currently this happens for constant arguments for UDFs
if (((PrimitiveObjectInspector) inspector).preferWritable()) {
conversion = new WritableHiveObjectConversion(conversion, hiveShim);
}
return conversion;
}
if (inspector instanceof ListObjectInspector) {
HiveObjectConversion eleConvert =
getConversion(
((ListObjectInspector) inspector).getListElementObjectInspector(),
((ArrayType) dataType).getElementType(),
hiveShim);
return o -> {
if (o == null) {
return null;
}
Object[] array = (Object[]) o;
List<Object> result = new ArrayList<>();
for (Object ele : array) {
result.add(eleConvert.toHiveObject(ele));
}
return result;
};
}
if (inspector instanceof MapObjectInspector) {
MapObjectInspector mapInspector = (MapObjectInspector) inspector;
MapType kvType = (MapType) dataType;
HiveObjectConversion keyConversion =
getConversion(
mapInspector.getMapKeyObjectInspector(), kvType.getKeyType(), hiveShim);
HiveObjectConversion valueConversion =
getConversion(
mapInspector.getMapValueObjectInspector(),
kvType.getValueType(),
hiveShim);
return o -> {
if (o == null) {
return null;
}
Map<Object, Object> map = (Map) o;
Map<Object, Object> result = CollectionUtil.newHashMapWithExpectedSize(map.size());
for (Map.Entry<Object, Object> entry : map.entrySet()) {
result.put(
keyConversion.toHiveObject(entry.getKey()),
valueConversion.toHiveObject(entry.getValue()));
}
return result;
};
}
if (inspector instanceof StructObjectInspector) {
StructObjectInspector structInspector = (StructObjectInspector) inspector;
List<? extends StructField> structFields = structInspector.getAllStructFieldRefs();
List<RowType.RowField> rowFields = ((RowType) dataType).getFields();
HiveObjectConversion[] conversions = new HiveObjectConversion[structFields.size()];
for (int i = 0; i < structFields.size(); i++) {
conversions[i] =
getConversion(
structFields.get(i).getFieldObjectInspector(),
rowFields.get(i).getType(),
hiveShim);
}
return o -> {
if (o == null) {
return null;
}
Row row = (Row) o;
List<Object> result = new ArrayList<>(row.getArity());
for (int i = 0; i < row.getArity(); i++) {
result.add(conversions[i].toHiveObject(row.getField(i)));
}
return result;
};
}
throw new FlinkHiveUDFException(
String.format(
"Flink doesn't support convert object conversion for %s yet", inspector));
} | 3.68 |
flink_PythonOperatorChainingOptimizer_apply | /**
* Perform chaining optimization. It will iterate the transformations defined in the given
* StreamExecutionEnvironment and update them with the chained transformations. Besides, it will
* return the transformation after chaining optimization for the given transformation.
*/
@SuppressWarnings("unchecked")
public static Transformation<?> apply(
StreamExecutionEnvironment env, Transformation<?> transformation) throws Exception {
if (env.getConfiguration().get(PythonOptions.PYTHON_OPERATOR_CHAINING_ENABLED)) {
final Field transformationsField =
StreamExecutionEnvironment.class.getDeclaredField("transformations");
transformationsField.setAccessible(true);
final List<Transformation<?>> transformations =
(List<Transformation<?>>) transformationsField.get(env);
final Tuple2<List<Transformation<?>>, Transformation<?>> resultTuple =
optimize(transformations, transformation);
transformationsField.set(env, resultTuple.f0);
return resultTuple.f1;
} else {
return transformation;
}
} | 3.68 |
flink_EventTimeSessionWindows_withDynamicGap | /**
* Creates a new {@code SessionWindows} {@link WindowAssigner} that assigns elements to sessions
* based on the element timestamp.
*
* @param sessionWindowTimeGapExtractor The extractor to use to extract the time gap from the
* input elements
* @return The policy.
*/
@PublicEvolving
public static <T> DynamicEventTimeSessionWindows<T> withDynamicGap(
SessionWindowTimeGapExtractor<T> sessionWindowTimeGapExtractor) {
return new DynamicEventTimeSessionWindows<>(sessionWindowTimeGapExtractor);
} | 3.68 |
druid_IPAddress_toString | /**
* Return the string representation of the IP Address following the common decimal-dotted notation xxx.xxx.xxx.xxx.
*
* @return Return the string representation of the IP address.
*/
public String toString() {
StringBuilder result = new StringBuilder();
int temp;
temp = ipAddress & 0x000000FF;
result.append(temp);
result.append(".");
temp = (ipAddress >> 8) & 0x000000FF;
result.append(temp);
result.append(".");
temp = (ipAddress >> 16) & 0x000000FF;
result.append(temp);
result.append(".");
temp = (ipAddress >> 24) & 0x000000FF;
result.append(temp);
return result.toString();
} | 3.68 |
flink_AbstractKeyedStateBackend_getPartitionedState | /**
* TODO: NOTE: This method does a lot of work caching / retrieving states just to update the
* namespace. This method should be removed for the sake of namespaces being lazily fetched from
* the keyed state backend, or being set on the state directly.
*
* @see KeyedStateBackend
*/
@SuppressWarnings("unchecked")
@Override
public <N, S extends State> S getPartitionedState(
final N namespace,
final TypeSerializer<N> namespaceSerializer,
final StateDescriptor<S, ?> stateDescriptor)
throws Exception {
checkNotNull(namespace, "Namespace");
if (lastName != null && lastName.equals(stateDescriptor.getName())) {
lastState.setCurrentNamespace(namespace);
return (S) lastState;
}
InternalKvState<K, ?, ?> previous = keyValueStatesByName.get(stateDescriptor.getName());
if (previous != null) {
lastState = previous;
lastState.setCurrentNamespace(namespace);
lastName = stateDescriptor.getName();
return (S) previous;
}
final S state = getOrCreateKeyedState(namespaceSerializer, stateDescriptor);
final InternalKvState<K, N, ?> kvState = (InternalKvState<K, N, ?>) state;
lastName = stateDescriptor.getName();
lastState = kvState;
kvState.setCurrentNamespace(namespace);
return state;
} | 3.68 |
hadoop_PlacementConstraints_or | /**
* A disjunction of constraints.
*
* @param children the children constraints, one of which should be satisfied
* @return the resulting placement constraint
*/
public static Or or(AbstractConstraint... children) {
return new Or(children);
} | 3.68 |
flink_FutureUtils_handleUncaughtException | /**
* Checks that the given {@link CompletableFuture} is not completed exceptionally. If the future
* is completed exceptionally, then it will call the given uncaught exception handler.
*
* @param completableFuture to assert for no exceptions
* @param uncaughtExceptionHandler to call if the future is completed exceptionally
*/
public static void handleUncaughtException(
CompletableFuture<?> completableFuture,
Thread.UncaughtExceptionHandler uncaughtExceptionHandler) {
handleUncaughtException(
completableFuture, uncaughtExceptionHandler, FatalExitExceptionHandler.INSTANCE);
} | 3.68 |
hadoop_DefaultCodec_createDirectDecompressor | /**
* {@inheritDoc}
*/
@Override
public DirectDecompressor createDirectDecompressor() {
return ZlibFactory.getZlibDirectDecompressor(conf);
} | 3.68 |
hbase_ServerManager_registerListener | /**
* Add the listener to the notification list.
* @param listener The ServerListener to register
*/
public void registerListener(final ServerListener listener) {
this.listeners.add(listener);
} | 3.68 |
querydsl_Expressions_simpleOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T> SimpleOperation<T> simpleOperation(Class<? extends T> type, Operator operator,
Expression<?>... args) {
return new SimpleOperation<T>(type, operator, args);
} | 3.68 |
framework_VAccordion_getStackItems | /**
* Returns an iterable over all the stack items.
*
* @return the iterable
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public Iterable<StackItem> getStackItems() {
return (Iterable) getChildren();
} | 3.68 |
framework_TreeGrid_getHierarchyColumn | /**
* Get the currently set hierarchy column.
*
* @return the currently set hierarchy column, or {@code null} if no column
* has been explicitly set
*/
public Column<T, ?> getHierarchyColumn() {
return getColumnByInternalId(getState(false).hierarchyColumnId);
} | 3.68 |
flink_SkipListUtils_buildLevelIndex | /**
* Build the level index for the given node.
*
* @param node the node.
* @param level level of the node.
* @param keySegment memory segment of the key in the node.
* @param keyOffset offset of the key in memory segment.
* @param levelIndexHeader the head level index.
* @param spaceAllocator the space allocator.
*/
static void buildLevelIndex(
long node,
int level,
MemorySegment keySegment,
int keyOffset,
LevelIndexHeader levelIndexHeader,
Allocator spaceAllocator) {
int currLevel = level;
long prevNode =
findPredecessor(keySegment, keyOffset, currLevel, levelIndexHeader, spaceAllocator);
long currentNode = helpGetNextNode(prevNode, currLevel, levelIndexHeader, spaceAllocator);
for (; ; ) {
if (currentNode != NIL_NODE) {
int c = compareSegmentAndNode(keySegment, keyOffset, currentNode, spaceAllocator);
if (c > 0) {
prevNode = currentNode;
currentNode =
helpGetNextNode(
currentNode, currLevel, levelIndexHeader, spaceAllocator);
continue;
}
}
helpSetPrevAndNextNode(node, prevNode, currentNode, currLevel, spaceAllocator);
helpSetNextNode(prevNode, node, currLevel, levelIndexHeader, spaceAllocator);
helpSetPrevNode(currentNode, node, currLevel, spaceAllocator);
currLevel--;
if (currLevel == 0) {
break;
}
currentNode = helpGetNextNode(prevNode, currLevel, levelIndexHeader, spaceAllocator);
}
} | 3.68 |
flink_TypeExtractor_isValidPojoField | /**
* Checks if the given field is a valid pojo field: - it is public OR - there are getter and
* setter methods for the field.
*
* @param f field to check
* @param clazz class of field
* @param typeHierarchy type hierarchy for materializing generic types
*/
private boolean isValidPojoField(Field f, Class<?> clazz, List<Type> typeHierarchy) {
if (Modifier.isPublic(f.getModifiers())) {
return true;
} else {
boolean hasGetter = false, hasSetter = false;
final String fieldNameLow = f.getName().toLowerCase().replaceAll("_", "");
Type fieldType = f.getGenericType();
Class<?> fieldTypeWrapper = ClassUtils.primitiveToWrapper(f.getType());
TypeVariable<?> fieldTypeGeneric = null;
if (fieldType instanceof TypeVariable) {
fieldTypeGeneric = (TypeVariable<?>) fieldType;
fieldType = materializeTypeVariable(typeHierarchy, (TypeVariable<?>) fieldType);
}
for (Method m : clazz.getMethods()) {
final String methodNameLow =
m.getName().endsWith("_$eq")
? m.getName()
.toLowerCase()
.replaceAll("_", "")
.replaceFirst("\\$eq$", "_\\$eq")
: m.getName().toLowerCase().replaceAll("_", "");
// check for getter
if ( // The name should be "get<FieldName>" or "<fieldName>" (for scala) or
// "is<fieldName>" for boolean fields.
(methodNameLow.equals("get" + fieldNameLow)
|| methodNameLow.equals("is" + fieldNameLow)
|| methodNameLow.equals(fieldNameLow))
&&
// no arguments for the getter
m.getParameterCount() == 0
&&
// return type is same as field type (or the generic variant of it)
(m.getGenericReturnType().equals(fieldType)
|| (m.getReturnType().equals(fieldTypeWrapper))
|| (m.getGenericReturnType().equals(fieldTypeGeneric)))) {
hasGetter = true;
}
// check for setters (<FieldName>_$eq for scala)
if ((methodNameLow.equals("set" + fieldNameLow)
|| methodNameLow.equals(fieldNameLow + "_$eq"))
&& m.getParameterCount() == 1
&& // one parameter of the field's type
(m.getGenericParameterTypes()[0].equals(fieldType)
|| (m.getParameterTypes()[0].equals(fieldTypeWrapper))
|| (m.getGenericParameterTypes()[0].equals(fieldTypeGeneric)))
&&
// return type is void (or the class self).
(m.getReturnType().equals(Void.TYPE) || m.getReturnType().equals(clazz))) {
hasSetter = true;
}
}
if (hasGetter && hasSetter) {
return true;
} else {
if (!hasGetter && clazz != Row.class) {
LOG.info(clazz + " does not contain a getter for field " + f.getName());
}
if (!hasSetter && clazz != Row.class) {
LOG.info(clazz + " does not contain a setter for field " + f.getName());
}
return false;
}
}
} | 3.68 |
flink_Costs_addDiskCost | /**
* Adds the costs for disk to the current disk costs for this Costs object.
*
* @param bytes The disk cost to add, in bytes to be written and read.
*/
public void addDiskCost(double bytes) {
this.diskCost = (this.diskCost < 0 || bytes < 0) ? UNKNOWN : this.diskCost + bytes;
} | 3.68 |
flink_OperationManager_fetchResults | /**
* Get the results of the operation.
*
* @param operationHandle identifies the {@link Operation}.
* @param token identifies which batch of data to fetch.
* @param maxRows the maximum number of rows to fetch.
* @return ResultSet contains the results.
*/
public ResultSet fetchResults(OperationHandle operationHandle, long token, int maxRows) {
return getOperation(operationHandle).fetchResults(token, maxRows);
} | 3.68 |
hbase_PreemptiveFastFailException_getFailureCount | /** Returns failure count */
public long getFailureCount() {
return failureCount;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.