name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
dubbo_AppScriptStateRouter_setScriptRule | // for testing purpose
public void setScriptRule(ScriptRule scriptRule) {
this.scriptRule = scriptRule;
} | 3.68 |
pulsar_ClientCnxIdleState_doIdleDetect | /**
* Check whether the connection is idle, and if so, set the idle-state to #{@link State#IDLE}.
* If the state is already idle and the {@param maxIdleSeconds} is reached, set the state to
* #{@link State#RELEASING}.
*/
public void doIdleDetect(long maxIdleSeconds) {
if (isReleasing()) {
return;
}
if (isIdle()) {
if (maxIdleSeconds * 1000 + idleMarkTime < System.currentTimeMillis()) {
tryMarkReleasing();
}
return;
}
if (clientCnx.idleCheck()) {
tryMarkIdleAndInitIdleTime();
}
} | 3.68 |
framework_VaadinService_getServiceInitListeners | /**
* Gets all available service init listeners. A custom Vaadin service
* implementation can override this method to discover init listeners in
* some other way in addition to the default implementation that uses
* {@link ServiceLoader}. This could for example be used to allow defining
* an init listener as an OSGi service or as a Spring bean.
*
* @since 8.0
*
* @return an iterator of available service init listeners
*/
protected Iterator<VaadinServiceInitListener> getServiceInitListeners() {
ServiceLoader<VaadinServiceInitListener> loader = ServiceLoader
.load(VaadinServiceInitListener.class, getClassLoader());
return loader.iterator();
} | 3.68 |
hbase_RegionCoprocessorHost_postInstantiateDeleteTracker | /**
* @deprecated Since 2.0 with out any replacement and will be removed in 3.0
*/
@Deprecated
public DeleteTracker postInstantiateDeleteTracker(DeleteTracker result) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(
new ObserverOperationWithResult<RegionObserver, DeleteTracker>(regionObserverGetter, result) {
@Override
public DeleteTracker call(RegionObserver observer) throws IOException {
return observer.postInstantiateDeleteTracker(this, getResult());
}
});
} | 3.68 |
graphhopper_ReaderElement_getFirstPriorityTag | /**
* Returns the first existing tag of the specified list where the order is important.
*
* @return an empty string if nothing found
*/
public String getFirstPriorityTag(List<String> restrictions) {
for (String str : restrictions) {
Object value = properties.get(str);
if (value != null)
return (String) value;
}
return "";
} | 3.68 |
graphhopper_VectorTile_setNameBytes | /**
* <code>required string name = 1;</code>
*/
public Builder setNameBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
name_ = value;
onChanged();
return this;
} | 3.68 |
framework_Type_getProperties | /**
* @return
* @throws NoDataException
*
* @deprecated As of 7.0.1, use {@link #getPropertiesAsArray()} instead for
* improved performance
*/
@Deprecated
public Collection<Property> getProperties() throws NoDataException {
return TypeDataStore.getProperties(this);
} | 3.68 |
framework_CheckBox_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#readDesign(org.jsoup.nodes.Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
if (design.hasAttr("checked")) {
this.setValue(DesignAttributeHandler.readAttribute("checked",
design.attributes(), Boolean.class), false);
}
} | 3.68 |
dubbo_ServiceDiscoveryRegistryDirectory_toInvokers | /**
* Turn urls into invokers, and if url has been refer, will not re-reference.
* the items that will be put into newUrlInvokeMap will be removed from oldUrlInvokerMap.
*
* @param oldUrlInvokerMap it might be modified during the process.
* @param urls
* @return invokers
*/
private Map<ProtocolServiceKeyWithAddress, Invoker<T>> toInvokers(
Map<ProtocolServiceKeyWithAddress, Invoker<T>> oldUrlInvokerMap, List<URL> urls) {
Map<ProtocolServiceKeyWithAddress, Invoker<T>> newUrlInvokerMap =
new ConcurrentHashMap<>(urls == null ? 1 : (int) (urls.size() / 0.75f + 1));
if (urls == null || urls.isEmpty()) {
return newUrlInvokerMap;
}
for (URL url : urls) {
InstanceAddressURL instanceAddressURL = (InstanceAddressURL) url;
if (EMPTY_PROTOCOL.equals(instanceAddressURL.getProtocol())) {
continue;
}
if (!getUrl().getOrDefaultFrameworkModel()
.getExtensionLoader(Protocol.class)
.hasExtension(instanceAddressURL.getProtocol())) {
// 4-1 - Unsupported protocol
logger.error(
PROTOCOL_UNSUPPORTED,
"protocol extension does not installed",
"",
"Unsupported protocol.",
new IllegalStateException("Unsupported protocol " + instanceAddressURL.getProtocol()
+ " in notified url: "
+ instanceAddressURL + " from registry " + getUrl().getAddress() + " to consumer "
+ NetUtils.getLocalHost() + ", supported protocol: "
+ getUrl().getOrDefaultFrameworkModel()
.getExtensionLoader(Protocol.class)
.getSupportedExtensions()));
continue;
}
instanceAddressURL.setProviderFirstParams(providerFirstParams);
// Override provider urls if needed
if (enableConfigurationListen) {
instanceAddressURL = overrideWithConfigurator(instanceAddressURL);
}
// filter all the service available (version wildcard, group wildcard, protocol wildcard)
int port = instanceAddressURL.getPort();
List<ProtocolServiceKey> matchedProtocolServiceKeys =
instanceAddressURL.getMetadataInfo().getMatchedServiceInfos(consumerProtocolServiceKey).stream()
.filter(serviceInfo -> serviceInfo.getPort() <= 0 || serviceInfo.getPort() == port)
.map(MetadataInfo.ServiceInfo::getProtocolServiceKey)
.collect(Collectors.toList());
// see org.apache.dubbo.common.ProtocolServiceKey.isSameWith
// check if needed to override the consumer url
boolean shouldWrap = matchedProtocolServiceKeys.size() != 1
|| !consumerProtocolServiceKey.isSameWith(matchedProtocolServiceKeys.get(0));
for (ProtocolServiceKey matchedProtocolServiceKey : matchedProtocolServiceKeys) {
ProtocolServiceKeyWithAddress protocolServiceKeyWithAddress =
new ProtocolServiceKeyWithAddress(matchedProtocolServiceKey, instanceAddressURL.getAddress());
Invoker<T> invoker =
oldUrlInvokerMap == null ? null : oldUrlInvokerMap.get(protocolServiceKeyWithAddress);
if (invoker == null
|| urlChanged(
invoker,
instanceAddressURL,
matchedProtocolServiceKey)) { // Not in the cache, refer again
try {
boolean enabled;
if (instanceAddressURL.hasParameter(DISABLED_KEY)) {
enabled = !instanceAddressURL.getParameter(DISABLED_KEY, false);
} else {
enabled = instanceAddressURL.getParameter(ENABLED_KEY, true);
}
if (enabled) {
if (shouldWrap) {
URL newConsumerUrl = ConcurrentHashMapUtils.computeIfAbsent(
customizedConsumerUrlMap, matchedProtocolServiceKey, k -> consumerUrl
.setProtocol(k.getProtocol())
.addParameter(CommonConstants.GROUP_KEY, k.getGroup())
.addParameter(CommonConstants.VERSION_KEY, k.getVersion()));
RpcContext.getServiceContext().setConsumerUrl(newConsumerUrl);
invoker = new InstanceWrappedInvoker<>(
protocol.refer(serviceType, instanceAddressURL),
newConsumerUrl,
matchedProtocolServiceKey);
} else {
invoker = protocol.refer(serviceType, instanceAddressURL);
}
}
} catch (Throwable t) {
logger.error(
PROTOCOL_FAILED_REFER_INVOKER,
"",
"",
"Failed to refer invoker for interface:" + serviceType + ",url:(" + instanceAddressURL
+ ")" + t.getMessage(),
t);
}
if (invoker != null) { // Put new invoker in cache
newUrlInvokerMap.put(protocolServiceKeyWithAddress, invoker);
}
} else {
newUrlInvokerMap.put(protocolServiceKeyWithAddress, invoker);
oldUrlInvokerMap.remove(protocolServiceKeyWithAddress, invoker);
}
}
}
return newUrlInvokerMap;
} | 3.68 |
hbase_OpenRegionHandler_getException | /** Returns Null or the run exception; call this method after thread is done. */
Throwable getException() {
return this.exception;
} | 3.68 |
framework_Label_addValueChangeListener | /**
* Adds the value change listener.
*
* @param listener
* the Listener to be added.
* @see Property.ValueChangeNotifier#addListener(Property.ValueChangeListener)
*/
@Override
public void addValueChangeListener(Property.ValueChangeListener listener) {
addListener(Label.ValueChangeEvent.class, listener,
VALUE_CHANGE_METHOD);
} | 3.68 |
framework_DesignContext_getPackagePrefix | /**
* Gets the prefix mapping for a given package, or <code>null</code> if
* there is no mapping for the package.
*
* @see #addPackagePrefix(String, String)
* @see #getPackagePrefixes()
*
* @since 7.5.0
* @param packageName
* the package name to get a prefix for
* @return the prefix for the package, or <code>null</code> if no prefix is
* registered
*/
public String getPackagePrefix(String packageName) {
if (VAADIN_UI_PACKAGE.equals(packageName)) {
return isLegacyPrefixEnabled() ? LEGACY_PREFIX : VAADIN_PREFIX;
} else {
return packageToPrefix.get(packageName);
}
} | 3.68 |
hudi_HoodieTableFactory_setupHoodieKeyOptions | /**
* Sets up the hoodie key options (e.g. record key and partition key) from the table definition.
*/
private static void setupHoodieKeyOptions(Configuration conf, CatalogTable table) {
List<String> pkColumns = table.getSchema().getPrimaryKey()
.map(UniqueConstraint::getColumns).orElse(Collections.emptyList());
if (pkColumns.size() > 0) {
// the PRIMARY KEY syntax always has higher priority than option FlinkOptions#RECORD_KEY_FIELD
String recordKey = String.join(",", pkColumns);
conf.setString(FlinkOptions.RECORD_KEY_FIELD, recordKey);
}
List<String> partitionKeys = table.getPartitionKeys();
if (partitionKeys.size() > 0) {
// the PARTITIONED BY syntax always has higher priority than option FlinkOptions#PARTITION_PATH_FIELD
conf.setString(FlinkOptions.PARTITION_PATH_FIELD, String.join(",", partitionKeys));
}
// set index key for bucket index if not defined
if (conf.getString(FlinkOptions.INDEX_TYPE).equals(HoodieIndex.IndexType.BUCKET.name())) {
if (conf.getString(FlinkOptions.INDEX_KEY_FIELD).isEmpty()) {
conf.setString(FlinkOptions.INDEX_KEY_FIELD, conf.getString(FlinkOptions.RECORD_KEY_FIELD));
} else {
Set<String> recordKeySet =
Arrays.stream(conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",")).collect(Collectors.toSet());
Set<String> indexKeySet =
Arrays.stream(conf.getString(FlinkOptions.INDEX_KEY_FIELD).split(",")).collect(Collectors.toSet());
if (!recordKeySet.containsAll(indexKeySet)) {
throw new HoodieValidationException(
FlinkOptions.INDEX_KEY_FIELD + " should be a subset of or equal to the recordKey fields");
}
}
}
// tweak the key gen class if possible
final String[] partitions = conf.getString(FlinkOptions.PARTITION_PATH_FIELD).split(",");
final String[] pks = conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(",");
if (partitions.length == 1) {
final String partitionField = partitions[0];
if (partitionField.isEmpty()) {
conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, NonpartitionedAvroKeyGenerator.class.getName());
LOG.info("Table option [{}] is reset to {} because this is a non-partitioned table",
FlinkOptions.KEYGEN_CLASS_NAME.key(), NonpartitionedAvroKeyGenerator.class.getName());
return;
}
DataType partitionFieldType = table.getSchema().getFieldDataType(partitionField)
.orElseThrow(() -> new HoodieValidationException("Field " + partitionField + " does not exist"));
if (pks.length <= 1 && DataTypeUtils.isDatetimeType(partitionFieldType)) {
// timestamp based key gen only supports simple primary key
setupTimestampKeygenOptions(conf, partitionFieldType);
return;
}
}
boolean complexHoodieKey = pks.length > 1 || partitions.length > 1;
if (complexHoodieKey && FlinkOptions.isDefaultValueDefined(conf, FlinkOptions.KEYGEN_CLASS_NAME)) {
conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, ComplexAvroKeyGenerator.class.getName());
LOG.info("Table option [{}] is reset to {} because record key or partition path has two or more fields",
FlinkOptions.KEYGEN_CLASS_NAME.key(), ComplexAvroKeyGenerator.class.getName());
}
} | 3.68 |
hbase_OrderedBytes_skip | /**
* Skip {@code buff}'s position forward over one encoded value.
* @return number of bytes skipped.
*/
public static int skip(PositionedByteRange src) {
final int start = src.getPosition();
byte header = src.get();
Order ord = (-1 == Integer.signum(header)) ? DESCENDING : ASCENDING;
header = ord.apply(header);
switch (header) {
case NULL:
case NEG_INF:
return 1;
case NEG_LARGE: /* Large negative number: 0x08, ~E, ~M */
skipVaruint64(src, DESCENDING != ord);
skipSignificand(src, DESCENDING != ord);
return src.getPosition() - start;
case NEG_MED_MIN: /* Medium negative number: 0x13-E, ~M */
case NEG_MED_MIN + 0x01:
case NEG_MED_MIN + 0x02:
case NEG_MED_MIN + 0x03:
case NEG_MED_MIN + 0x04:
case NEG_MED_MIN + 0x05:
case NEG_MED_MIN + 0x06:
case NEG_MED_MIN + 0x07:
case NEG_MED_MIN + 0x08:
case NEG_MED_MIN + 0x09:
case NEG_MED_MAX:
skipSignificand(src, DESCENDING != ord);
return src.getPosition() - start;
case NEG_SMALL: /* Small negative number: 0x14, -E, ~M */
skipVaruint64(src, DESCENDING == ord);
skipSignificand(src, DESCENDING != ord);
return src.getPosition() - start;
case ZERO:
return 1;
case POS_SMALL: /* Small positive number: 0x16, ~-E, M */
skipVaruint64(src, DESCENDING != ord);
skipSignificand(src, DESCENDING == ord);
return src.getPosition() - start;
case POS_MED_MIN: /* Medium positive number: 0x17+E, M */
case POS_MED_MIN + 0x01:
case POS_MED_MIN + 0x02:
case POS_MED_MIN + 0x03:
case POS_MED_MIN + 0x04:
case POS_MED_MIN + 0x05:
case POS_MED_MIN + 0x06:
case POS_MED_MIN + 0x07:
case POS_MED_MIN + 0x08:
case POS_MED_MIN + 0x09:
case POS_MED_MAX:
skipSignificand(src, DESCENDING == ord);
return src.getPosition() - start;
case POS_LARGE: /* Large positive number: 0x22, E, M */
skipVaruint64(src, DESCENDING == ord);
skipSignificand(src, DESCENDING == ord);
return src.getPosition() - start;
case POS_INF:
return 1;
case NAN:
return 1;
case FIXED_INT8:
src.setPosition(src.getPosition() + 1);
return src.getPosition() - start;
case FIXED_INT16:
src.setPosition(src.getPosition() + 2);
return src.getPosition() - start;
case FIXED_INT32:
src.setPosition(src.getPosition() + 4);
return src.getPosition() - start;
case FIXED_INT64:
src.setPosition(src.getPosition() + 8);
return src.getPosition() - start;
case FIXED_FLOAT32:
src.setPosition(src.getPosition() + 4);
return src.getPosition() - start;
case FIXED_FLOAT64:
src.setPosition(src.getPosition() + 8);
return src.getPosition() - start;
case TEXT:
// for null-terminated values, skip to the end.
do {
header = ord.apply(src.get());
} while (header != TERM);
return src.getPosition() - start;
case BLOB_VAR:
// read until we find a 0 in the MSB
do {
header = ord.apply(src.get());
} while ((byte) (header & 0x80) != TERM);
return src.getPosition() - start;
case BLOB_COPY:
if (Order.DESCENDING == ord) {
// if descending, read to termination byte.
do {
header = ord.apply(src.get());
} while (header != TERM);
return src.getPosition() - start;
} else {
// otherwise, just skip to the end.
src.setPosition(src.getLength());
return src.getPosition() - start;
}
default:
throw unexpectedHeader(header);
}
} | 3.68 |
hbase_TableMapReduceUtil_findContainingJar | /**
* Find a jar that contains a class of the same name, if any. It will return a jar file, even if
* that is not the first thing on the class path that has a class with the same name. Looks first
* on the classpath and then in the <code>packagedClasses</code> map.
* @param my_class the class to find.
* @return a jar file that contains the class, or null.
*/
private static String findContainingJar(Class<?> my_class, Map<String, String> packagedClasses)
throws IOException {
ClassLoader loader = my_class.getClassLoader();
String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
if (loader != null) {
// first search the classpath
for (Enumeration<URL> itr = loader.getResources(class_file); itr.hasMoreElements();) {
URL url = itr.nextElement();
if ("jar".equals(url.getProtocol())) {
String toReturn = url.getPath();
if (toReturn.startsWith("file:")) {
toReturn = toReturn.substring("file:".length());
}
// URLDecoder is a misnamed class, since it actually decodes
// x-www-form-urlencoded MIME type rather than actual
// URL encoding (which the file path has). Therefore it would
// decode +s to ' 's which is incorrect (spaces are actually
// either unencoded or encoded as "%20"). Replace +s first, so
// that they are kept sacred during the decoding process.
toReturn = toReturn.replaceAll("\\+", "%2B");
toReturn = URLDecoder.decode(toReturn, "UTF-8");
return toReturn.replaceAll("!.*$", "");
}
}
}
// now look in any jars we've packaged using JarFinder. Returns null when
// no jar is found.
return packagedClasses.get(class_file);
} | 3.68 |
flink_WindowedStream_sum | /**
* Applies an aggregation that sums every window of the pojo data stream at the given field for
* every window.
*
* <p>A field expression is either the name of a public field or a getter method with
* parentheses of the stream's underlying type. A dot can be used to drill down into objects, as
* in {@code "field1.getInnerField2()" }.
*
* @param field The field to sum
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> sum(String field) {
return aggregate(new SumAggregator<>(field, input.getType(), input.getExecutionConfig()));
} | 3.68 |
hudi_BaseHoodieWriteClient_doInitTable | /**
* Performs necessary bootstrapping operations (for ex, validating whether Metadata Table has to be bootstrapped).
*
* <p>NOTE: THIS OPERATION IS EXECUTED UNDER LOCK, THEREFORE SHOULD AVOID ANY OPERATIONS
* NOT REQUIRING EXTERNAL SYNCHRONIZATION
*
* @param metaClient instance of {@link HoodieTableMetaClient}
* @param instantTime current inflight instant time
*/
protected void doInitTable(WriteOperationType operationType, HoodieTableMetaClient metaClient, Option<String> instantTime) {
Option<HoodieInstant> ownerInstant = Option.empty();
if (instantTime.isPresent()) {
ownerInstant = Option.of(new HoodieInstant(true, CommitUtils.getCommitActionType(operationType, metaClient.getTableType()), instantTime.get()));
}
this.txnManager.beginTransaction(ownerInstant, Option.empty());
try {
tryUpgrade(metaClient, instantTime);
initMetadataTable(instantTime);
} finally {
this.txnManager.endTransaction(ownerInstant);
}
} | 3.68 |
hmily_DatabaseTypeFactory_getDatabaseTypeByURL | /**
* Get database type by URL.
*
* @param url database URL
* @return database type
*/
public static DatabaseType getDatabaseTypeByURL(final String url) {
return DATABASE_TYPES.values().stream().filter(each -> matchURLs(url, each)).findAny().orElse(DATABASE_TYPES.get("MySQL"));
} | 3.68 |
flink_StreamExecutionEnvironment_createLocalEnvironmentWithWebUI | /**
* Creates a {@link LocalStreamEnvironment} for local program execution that also starts the web
* monitoring UI.
*
* <p>The local execution environment will run the program in a multi-threaded fashion in the
* same JVM as the environment was created in. It will use the parallelism specified in the
* parameter.
*
* <p>If the configuration key 'rest.port' was set in the configuration, that particular port
* will be used for the web UI. Otherwise, the default port (8081) will be used.
*/
@PublicEvolving
public static StreamExecutionEnvironment createLocalEnvironmentWithWebUI(Configuration conf) {
checkNotNull(conf, "conf");
if (!conf.contains(RestOptions.PORT)) {
// explicitly set this option so that it's not set to 0 later
conf.setInteger(RestOptions.PORT, RestOptions.PORT.defaultValue());
}
return createLocalEnvironment(conf);
} | 3.68 |
hudi_OrcUtils_getHoodieKeyIterator | /**
* Provides a closable iterator for reading the given ORC file.
*
* @param configuration configuration to build fs object
* @param filePath The ORC file path
* @return {@link ClosableIterator} of {@link HoodieKey}s for reading the ORC file
*/
@Override
public ClosableIterator<HoodieKey> getHoodieKeyIterator(Configuration configuration, Path filePath) {
try {
Configuration conf = new Configuration(configuration);
conf.addResource(FSUtils.getFs(filePath.toString(), conf).getConf());
Reader reader = OrcFile.createReader(filePath, OrcFile.readerOptions(conf));
Schema readSchema = HoodieAvroUtils.getRecordKeyPartitionPathSchema();
TypeDescription orcSchema = AvroOrcUtils.createOrcSchema(readSchema);
RecordReader recordReader = reader.rows(new Options(conf).schema(orcSchema));
List<String> fieldNames = orcSchema.getFieldNames();
// column indices for the RECORD_KEY_METADATA_FIELD, PARTITION_PATH_METADATA_FIELD fields
int keyCol = -1;
int partitionCol = -1;
for (int i = 0; i < fieldNames.size(); i++) {
if (fieldNames.get(i).equals(HoodieRecord.RECORD_KEY_METADATA_FIELD)) {
keyCol = i;
}
if (fieldNames.get(i).equals(HoodieRecord.PARTITION_PATH_METADATA_FIELD)) {
partitionCol = i;
}
}
if (keyCol == -1 || partitionCol == -1) {
throw new HoodieException(String.format("Couldn't find row keys or partition path in %s.", filePath));
}
return new OrcReaderIterator<>(recordReader, readSchema, orcSchema);
} catch (IOException e) {
throw new HoodieIOException("Failed to open reader from ORC file:" + filePath, e);
}
} | 3.68 |
pulsar_ResourceLockImpl_silentRevalidateOnce | /**
* Revalidate the distributed lock if it is not released.
* This method is thread-safe and it will perform multiple re-validation operations in turn.
*/
synchronized CompletableFuture<Void> silentRevalidateOnce() {
return sequencer.sequential(() -> revalidate(value))
.thenRun(() -> log.info("Successfully revalidated the lock on {}", path))
.exceptionally(ex -> {
synchronized (ResourceLockImpl.this) {
Throwable realCause = FutureUtil.unwrapCompletionException(ex);
if (realCause instanceof BadVersionException || realCause instanceof LockBusyException) {
log.warn("Failed to revalidate the lock at {}. Marked as expired. {}",
path, realCause.getMessage());
state = State.Released;
expiredFuture.complete(null);
} else {
// We failed to revalidate the lock due to connectivity issue
// Continue assuming we hold the lock, until we can revalidate it, either
// on Reconnected or SessionReestablished events.
revalidateAfterReconnection = true;
log.warn("Failed to revalidate the lock at {}. Retrying later on reconnection {}", path,
realCause.getMessage());
}
}
return null;
});
} | 3.68 |
framework_ComponentSizeValidator_formHasNonRelativeWidthComponent | /**
* Comparability form component which is defined in the different jar.
*
* TODO : Normally this logic shouldn't be here. But it means that the whole
* this class has wrong design and implementation and should be refactored.
*/
private static boolean formHasNonRelativeWidthComponent(Component form) {
HasComponents parent = (HasComponents) form;
for (Component aParent : parent) {
if (!hasRelativeWidth(aParent)) {
return true;
}
}
return false;
} | 3.68 |
flink_SinkTestSuiteBase_pollAndAppendResultData | /**
* Poll records from the sink.
*
* @param result Append records to which list
* @param reader The sink reader
* @param expected The expected list which help to stop polling
* @param retryTimes The retry times
* @param semantic The semantic
* @return Collection of records in the Sink
*/
private List<T> pollAndAppendResultData(
List<T> result,
ExternalSystemDataReader<T> reader,
List<T> expected,
int retryTimes,
CheckpointingMode semantic) {
long timeoutMs = 1000L;
int retryIndex = 0;
while (retryIndex++ < retryTimes
&& !checkGetEnoughRecordsWithSemantic(expected, result, semantic)) {
result.addAll(reader.poll(Duration.ofMillis(timeoutMs)));
}
return result;
} | 3.68 |
hudi_HiveSchemaUtil_getSchemaDifference | /**
* Get the schema difference between the storage schema and hive table schema.
*/
public static SchemaDifference getSchemaDifference(MessageType storageSchema, Map<String, String> tableSchema,
List<String> partitionKeys) {
return getSchemaDifference(storageSchema, tableSchema, partitionKeys, false);
} | 3.68 |
flink_FileDataIndexCache_put | /**
* Put regions to cache.
*
* @param subpartition the subpartition's id of regions.
* @param fileRegions regions to be cached.
*/
public void put(int subpartition, List<T> fileRegions) {
TreeMap<Integer, T> treeMap = subpartitionFirstBufferIndexRegions.get(subpartition);
for (T region : fileRegions) {
internalCache.put(
new CachedRegionKey(subpartition, region.getFirstBufferIndex()), PLACEHOLDER);
treeMap.put(region.getFirstBufferIndex(), region);
}
} | 3.68 |
AreaShop_RegionGroup_removeMember | /**
* Remove a member from the group.
* @param region The region to remove
* @return true if the region was in the group before, otherwise false
*/
public boolean removeMember(GeneralRegion region) {
if(regions.remove(region.getName())) {
setSetting("regions", new ArrayList<>(regions));
saveRequired();
return true;
}
return false;
} | 3.68 |
rocketmq-connect_DefaultJdbcRecordBinder_getSqlTypeForSchema | /**
* Dialects not supporting `setObject(index, null)` can override this method
* to provide a specific sqlType, as per the JDBC documentation
*
* @param schema the schema
* @return the SQL type
*/
protected Integer getSqlTypeForSchema(Schema schema) {
return null;
} | 3.68 |
hadoop_FsCommand_getCommandName | // historical abstract method in Command
@Override
public String getCommandName() {
return getName();
} | 3.68 |
flink_OpFusionCodegenSpecGenerator_setup | /**
* Initializes the operator spec generator needed information. This method must be called before
* produce and consume related method.
*/
public void setup(Context context) {
this.managedMemoryFraction = context.getManagedMemoryFraction();
this.opFusionCodegenSpec.setup(opFusionContext);
} | 3.68 |
morf_AbstractSqlDialectTest_testLeftTrim | /**
* Tests that Left Trim functionality works.
*/
@Test
public void testLeftTrim() {
// Given
Function leftTrim = leftTrim(new FieldReference("field1"));
SelectStatement selectStatement = new SelectStatement(leftTrim).from(new TableReference("schedule"));
// When
String result = testDialect.convertStatementToSQL(selectStatement);
// Then
assertEquals("Left Trim script should match expected", expectedLeftTrim(), result);
} | 3.68 |
druid_SQLCommitStatement_isWrite | // oracle
public boolean isWrite() {
return write;
} | 3.68 |
querydsl_CollectionExpressionBase_contains | /**
* Create a {@code this.contains(child)} expression
*
* <p>Evaluates to true, if child is contained in this</p>
*
* @param child element to check
* @return this.contains(child)
*/
public final BooleanExpression contains(Expression<E> child) {
return Expressions.booleanOperation(Ops.IN, child, mixin);
} | 3.68 |
querydsl_MathExpressions_log | /**
* Create a {@code log(num, base)} expression
*
* @param num numeric expression
* @param base base
* @return log(num, base)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> log(Expression<A> num, int base) {
return Expressions.numberOperation(Double.class, Ops.MathOps.LOG, num, ConstantImpl.create(base));
} | 3.68 |
flink_SingleInputGateFactory_create | /** Creates an input gate and all of its input channels. */
public SingleInputGate create(
@Nonnull ShuffleIOOwnerContext owner,
int gateIndex,
@Nonnull InputGateDeploymentDescriptor igdd,
@Nonnull PartitionProducerStateProvider partitionProducerStateProvider,
@Nonnull InputChannelMetrics metrics) {
GateBuffersSpec gateBuffersSpec =
createGateBuffersSpec(
maxRequiredBuffersPerGate,
configuredNetworkBuffersPerChannel,
floatingNetworkBuffersPerGate,
igdd.getConsumedPartitionType(),
calculateNumChannels(
igdd.getShuffleDescriptors().length,
igdd.getConsumedSubpartitionIndexRange()),
tieredStorageConfiguration != null);
SupplierWithException<BufferPool, IOException> bufferPoolFactory =
createBufferPoolFactory(
networkBufferPool,
gateBuffersSpec.getRequiredFloatingBuffers(),
gateBuffersSpec.getTotalFloatingBuffers());
BufferDecompressor bufferDecompressor = null;
if (igdd.getConsumedPartitionType().supportCompression()
&& batchShuffleCompressionEnabled) {
bufferDecompressor = new BufferDecompressor(networkBufferSize, compressionCodec);
}
final String owningTaskName = owner.getOwnerName();
final MetricGroup networkInputGroup = owner.getInputGroup();
IndexRange subpartitionIndexRange = igdd.getConsumedSubpartitionIndexRange();
TieredStorageConsumerClient tieredStorageConsumerClient = null;
List<TieredStorageConsumerSpec> tieredStorageConsumerSpecs = null;
if (tieredStorageConfiguration != null) {
ShuffleDescriptor[] shuffleDescriptors = igdd.getShuffleDescriptors();
tieredStorageConsumerSpecs = new ArrayList<>();
for (ShuffleDescriptor shuffleDescriptor : shuffleDescriptors) {
TieredStoragePartitionId partitionId =
TieredStorageIdMappingUtils.convertId(
shuffleDescriptor.getResultPartitionID());
for (int index = subpartitionIndexRange.getStartIndex();
index <= subpartitionIndexRange.getEndIndex();
++index) {
TieredStorageSubpartitionId subpartitionId =
new TieredStorageSubpartitionId(index);
tieredStorageConsumerSpecs.add(
new TieredStorageConsumerSpec(partitionId, subpartitionId));
}
}
tieredStorageConsumerClient =
new TieredStorageConsumerClient(
tieredStorageConfiguration.getTierFactories(),
tieredStorageConsumerSpecs,
tieredStorageNettyService);
}
SingleInputGate inputGate =
new SingleInputGate(
owningTaskName,
gateIndex,
igdd.getConsumedResultId(),
igdd.getConsumedPartitionType(),
subpartitionIndexRange,
calculateNumChannels(
igdd.getShuffleDescriptors().length, subpartitionIndexRange),
partitionProducerStateProvider,
bufferPoolFactory,
bufferDecompressor,
networkBufferPool,
networkBufferSize,
new ThroughputCalculator(SystemClock.getInstance()),
maybeCreateBufferDebloater(
owningTaskName, gateIndex, networkInputGroup.addGroup(gateIndex)),
tieredStorageConsumerClient,
tieredStorageNettyService,
tieredStorageConsumerSpecs);
createInputChannels(
owningTaskName, igdd, inputGate, subpartitionIndexRange, gateBuffersSpec, metrics);
return inputGate;
} | 3.68 |
flink_BatchTask_getOutputCollector | /**
* Creates the {@link Collector} for the given task, as described by the given configuration.
* The output collector contains the writers that forward the data to the different tasks that
* the given task is connected to. Each writer applies the partitioning as described in the
* configuration.
*
* @param task The task that the output collector is created for.
* @param config The configuration describing the output shipping strategies.
* @param cl The classloader used to load user defined types.
* @param eventualOutputs The output writers that this task forwards to the next task for each
* output.
* @param outputOffset The offset to start to get the writers for the outputs
* @param numOutputs The number of outputs described in the configuration.
* @return The OutputCollector that data produced in this task is submitted to.
*/
public static <T> Collector<T> getOutputCollector(
AbstractInvokable task,
TaskConfig config,
ClassLoader cl,
List<RecordWriter<?>> eventualOutputs,
int outputOffset,
int numOutputs)
throws Exception {
if (numOutputs == 0) {
return null;
}
// get the factory for the serializer
final TypeSerializerFactory<T> serializerFactory = config.getOutputSerializer(cl);
final List<RecordWriter<SerializationDelegate<T>>> writers = new ArrayList<>(numOutputs);
// create a writer for each output
for (int i = 0; i < numOutputs; i++) {
// create the OutputEmitter from output ship strategy
final ShipStrategyType strategy = config.getOutputShipStrategy(i);
final int indexInSubtaskGroup = task.getIndexInSubtaskGroup();
final TypeComparatorFactory<T> compFactory = config.getOutputComparator(i, cl);
final ChannelSelector<SerializationDelegate<T>> oe;
if (compFactory == null) {
oe = new OutputEmitter<>(strategy, indexInSubtaskGroup);
} else {
final DataDistribution dataDist = config.getOutputDataDistribution(i, cl);
final Partitioner<?> partitioner = config.getOutputPartitioner(i, cl);
final TypeComparator<T> comparator = compFactory.createComparator();
oe =
new OutputEmitter<>(
strategy, indexInSubtaskGroup, comparator, partitioner, dataDist);
}
final RecordWriter<SerializationDelegate<T>> recordWriter =
new RecordWriterBuilder()
.setChannelSelector(oe)
.setTaskName(
task.getEnvironment().getTaskInfo().getTaskNameWithSubtasks())
.build(task.getEnvironment().getWriter(outputOffset + i));
recordWriter.setMetricGroup(task.getEnvironment().getMetricGroup().getIOMetricGroup());
writers.add(recordWriter);
}
if (eventualOutputs != null) {
eventualOutputs.addAll(writers);
}
return new OutputCollector<>(writers, serializerFactory.getSerializer());
} | 3.68 |
framework_DragHandle_setCallback | /**
* Sets the user-facing drag handle callback method. This allows code using
* the DragHandle to react to the situations where a drag handle first
* touched, when it's moved and when it's released.
*
* @param dragHandleCallback
* the callback object to use (can be null)
* @since 7.7.5
*/
public void setCallback(DragHandleCallback dragHandleCallback) {
userCallback = dragHandleCallback;
} | 3.68 |
hbase_BootstrapNodeManager_getFromRegionServer | // this method is also used to test whether a given region server is still alive.
private void getFromRegionServer() {
if (
EnvironmentEdgeManager.currentTime() - lastRequestMasterTime
>= TimeUnit.SECONDS.toMillis(requestMasterIntervalSecs)
) {
// schedule a get from master task immediately if haven't request master for more than
// requestMasterIntervalSecs
executor.execute(this::getFromMaster);
return;
}
List<ServerName> currentList = this.nodes;
ServerName peer = currentList.get(ThreadLocalRandom.current().nextInt(currentList.size()));
List<ServerName> otherList;
try {
otherList = FutureUtils.get(conn.getAllBootstrapNodes(peer));
} catch (IOException e) {
LOG.warn("failed to request region server {}", peer, e);
// remove this region server from the list since it can not respond successfully
List<ServerName> newList = currentList.stream().filter(sn -> sn != peer)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
this.nodes = newList;
if (newList.size() < maxNodeCount) {
// schedule a get from master task immediately
executor.execute(this::getFromMaster);
} else {
executor.schedule(this::getFromRegionServer, getDelay(requestRegionServerIntervalSecs),
TimeUnit.SECONDS);
}
return;
}
// randomly select new live region server list
Set<ServerName> newRegionServers = new HashSet<ServerName>(currentList);
newRegionServers.addAll(otherList);
List<ServerName> newList = new ArrayList<ServerName>(newRegionServers);
Collections.shuffle(newList, ThreadLocalRandom.current());
int expectedListSize = maxNodeCount * 2;
if (newList.size() <= expectedListSize) {
this.nodes = Collections.unmodifiableList(newList);
} else {
this.nodes =
Collections.unmodifiableList(new ArrayList<>(newList.subList(0, expectedListSize)));
}
// schedule a new get from region server task
executor.schedule(this::getFromRegionServer, requestRegionServerIntervalSecs, TimeUnit.SECONDS);
} | 3.68 |
zxing_LocaleManager_getBookSearchCountryTLD | /**
* The same as above, but specifically for Google Book Search.
*
* @param context application's {@link Context}
* @return The top-level domain to use.
*/
public static String getBookSearchCountryTLD(Context context) {
return doGetTLD(GOOGLE_BOOK_SEARCH_COUNTRY_TLD, context);
} | 3.68 |
hadoop_AzureNativeFileSystemStore_buildUpList | /**
* Build up a metadata list of blobs in an Azure blob directory. This method
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
* @param aCloudBlobDirectory Azure blob directory
* @param metadataHashMap a map of file metadata objects for each
* non-directory blob.
* @param maxListingCount maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
HashMap<String, FileMetadata> metadataHashMap, final int maxListingCount,
final int maxListingDepth) throws Exception {
// Push the blob directory onto the stack.
//
AzureLinkedStack<Iterator<ListBlobItem>> dirIteratorStack =
new AzureLinkedStack<Iterator<ListBlobItem>>();
Iterable<ListBlobItem> blobItems = aCloudBlobDirectory.listBlobs(null,
false, EnumSet.of(BlobListingDetails.METADATA), null,
getInstrumentedContext());
Iterator<ListBlobItem> blobItemIterator = blobItems.iterator();
if (0 == maxListingDepth || 0 == maxListingCount) {
// Recurrence depth and listing count are already exhausted. Return
// immediately.
return;
}
// The directory listing depth is unbounded if the maximum listing depth
// is negative.
final boolean isUnboundedDepth = (maxListingDepth < 0);
// Reset the current directory listing depth.
int listingDepth = 1;
// Loop until all directories have been traversed in-order. Loop only
// the following conditions are satisfied:
// (1) The stack is not empty, and
// (2) maxListingCount > 0 implies that the number of items in the
// metadata list is less than the max listing count.
while (null != blobItemIterator
&& (maxListingCount <= 0 || metadataHashMap.size() < maxListingCount)) {
while (blobItemIterator.hasNext()) {
// Check if the count of items on the list exhausts the maximum
// listing count.
//
if (0 < maxListingCount && metadataHashMap.size() >= maxListingCount) {
break;
}
ListBlobItem blobItem = blobItemIterator.next();
// Add the file metadata to the list if this is not a blob
// directory item.
//
if (blobItem instanceof CloudBlockBlobWrapper || blobItem instanceof CloudPageBlobWrapper) {
String blobKey = null;
CloudBlobWrapper blob = (CloudBlobWrapper) blobItem;
BlobProperties properties = blob.getProperties();
// Determine format of the blob name depending on whether an absolute
// path is being used or not.
blobKey = normalizeKey(blob);
FileMetadata metadata;
if (retrieveFolderAttribute(blob)) {
metadata = new FileMetadata(blobKey,
properties.getLastModified().getTime(),
getPermissionStatus(blob),
BlobMaterialization.Explicit,
hadoopBlockSize);
} else {
metadata = new FileMetadata(
blobKey,
getDataLength(blob, properties),
properties.getLastModified().getTime(),
getPermissionStatus(blob),
hadoopBlockSize);
}
// Add the metadata but remove duplicates. Note that the azure
// storage java SDK returns two types of entries: CloudBlobWrappter
// and CloudDirectoryWrapper. In the case where WASB generated the
// data, there will be an empty blob for each "directory", and we will
// receive a CloudBlobWrapper. If there are also files within this
// "directory", we will also receive a CloudDirectoryWrapper. To
// complicate matters, the data may not be generated by WASB, in
// which case we may not have an empty blob for each "directory".
// So, sometimes we receive both a CloudBlobWrapper and a
// CloudDirectoryWrapper for each directory, and sometimes we receive
// one or the other but not both. We remove duplicates, but
// prefer CloudBlobWrapper over CloudDirectoryWrapper.
// Furthermore, it is very unfortunate that the list results are not
// ordered, and it is a partial list which uses continuation. So
// the HashMap is the best structure to remove the duplicates, despite
// its potential large size.
metadataHashMap.put(blobKey, metadata);
} else if (blobItem instanceof CloudBlobDirectoryWrapper) {
CloudBlobDirectoryWrapper directory = (CloudBlobDirectoryWrapper) blobItem;
// This is a directory blob, push the current iterator onto
// the stack of iterators and start iterating through the current
// directory.
if (isUnboundedDepth || maxListingDepth > listingDepth) {
// Push the current directory on the stack and increment the listing
// depth.
dirIteratorStack.push(blobItemIterator);
++listingDepth;
// The current blob item represents the new directory. Get
// an iterator for this directory and continue by iterating through
// this directory.
blobItems = directory.listBlobs(null, false,
EnumSet.noneOf(BlobListingDetails.class), null,
getInstrumentedContext());
blobItemIterator = blobItems.iterator();
} else {
// Determine format of directory name depending on whether an
// absolute path is being used or not.
String dirKey = normalizeKey(directory);
// Add the directory metadata to the list only if it's not already
// there. See earlier note, we prefer CloudBlobWrapper over
// CloudDirectoryWrapper because it may have additional metadata (
// properties and ACLs).
if (!metadataHashMap.containsKey(dirKey)) {
// Reached the targeted listing depth. Return metadata for the
// directory using default permissions.
//
// Note: Something smarter should be done about permissions. Maybe
// inherit the permissions of the first non-directory blob.
// Also, getting a proper value for last-modified is tricky.
//
FileMetadata directoryMetadata = new FileMetadata(dirKey,
0,
defaultPermissionNoBlobMetadata(),
BlobMaterialization.Implicit,
hadoopBlockSize);
// Add the directory metadata to the list.
metadataHashMap.put(dirKey, directoryMetadata);
}
}
}
}
// Traversal of directory tree
// Check if the iterator stack is empty. If it is set the next blob
// iterator to null. This will act as a terminator for the for-loop.
// Otherwise pop the next iterator from the stack and continue looping.
//
if (dirIteratorStack.isEmpty()) {
blobItemIterator = null;
} else {
// Pop the next directory item from the stack and decrement the
// depth.
blobItemIterator = dirIteratorStack.pop();
--listingDepth;
// Assertion: Listing depth should not be less than zero.
if (listingDepth < 0) {
throw new AssertionError("Non-negative listing depth expected");
}
}
}
} | 3.68 |
hadoop_PublishedConfiguration_putValues | /**
* Set the values from an iterable (this includes a Hadoop Configuration
* and Java properties object).
* Any existing value set is discarded
* @param entries entries to put
*/
public void putValues(Iterable<Map.Entry<String, String>> entries) {
this.entries = new HashMap<String, String>();
for (Map.Entry<String, String> entry : entries) {
this.entries.put(entry.getKey(), entry.getValue());
}
} | 3.68 |
hbase_Sleeper_sleep | /**
* Sleep for period.
*/
public void sleep() {
sleep(this.period);
} | 3.68 |
streampipes_FileManager_cleanFile | /**
* Remove Byte Order Mark (BOM) from csv files
*
* @param fileInputStream
* @param filetype
* @return
*/
public static InputStream cleanFile(InputStream fileInputStream, String filetype) {
if (Filetypes.CSV.getFileExtensions().contains(filetype.toLowerCase())) {
fileInputStream = new BOMInputStream(fileInputStream);
}
return fileInputStream;
} | 3.68 |
framework_AbstractRemoteDataSource_setRowData | /**
* Informs this data source that updated data has been sent from the server.
*
* @param firstRowIndex
* the index of the first received row
* @param rowData
* a list of rows, starting from <code>firstRowIndex</code>
*/
protected void setRowData(int firstRowIndex, List<T> rowData) {
assert firstRowIndex + rowData.size() <= size();
Profiler.enter("AbstractRemoteDataSource.setRowData");
Range received = Range.withLength(firstRowIndex, rowData.size());
if (isWaitingForData()) {
cacheStrategy.onDataArrive(
Duration.currentTimeMillis()
- currentRequestCallback.requestStart,
received.length());
currentRequestCallback = null;
}
Range maxCacheRange = getMaxCacheRange(received);
Range[] partition = received.partitionWith(maxCacheRange);
Range newUsefulData = partition[1];
if (!newUsefulData.isEmpty()) {
if (!cached.isEmpty())
discardStaleCacheEntries();
// Update the parts that are actually inside
int start = newUsefulData.getStart();
for (int i = start; i < newUsefulData.getEnd(); i++) {
final T row = rowData.get(i - firstRowIndex);
indexToRowMap.put(Integer.valueOf(i), row);
keyToIndexMap.put(getRowKey(row), Integer.valueOf(i));
}
Profiler.enter(
"AbstractRemoteDataSource.setRowData notify dataChangeHandler");
int length = newUsefulData.length();
getHandlers().forEach(dch -> dch.dataUpdated(start, length));
Profiler.leave(
"AbstractRemoteDataSource.setRowData notify dataChangeHandler");
// Potentially extend the range
if (cached.isEmpty()) {
cached = newUsefulData;
} else {
/*
* everything might've become stale so we need to re-check for
* emptiness.
*/
if (!cached.isEmpty()) {
cached = cached.combineWith(newUsefulData);
// Attempt to restore invalidated items
if (trackInvalidatedRows) {
fillCacheFromInvalidatedRows(maxCacheRange);
}
} else {
cached = newUsefulData;
}
}
getHandlers().forEach(dch -> dch.dataAvailable(cached.getStart(),
cached.length()));
updatePinnedRows(rowData);
}
if (!partition[0].isEmpty() || !partition[2].isEmpty()) {
/*
* FIXME
*
* Got data that we might need in a moment if the container is
* updated before the widget settings. Support for this will be
* implemented later on.
*/
// Run a dummy drop from cache for unused rows.
for (int i = 0; i < partition[0].length(); ++i) {
onDropFromCache(i + partition[0].getStart(), rowData.get(i));
}
for (int i = 0; i < partition[2].length(); ++i) {
onDropFromCache(i + partition[2].getStart(), rowData.get(i));
}
}
// Eventually check whether all needed rows are now available
ensureCoverageCheck();
Profiler.leave("AbstractRemoteDataSource.setRowData");
} | 3.68 |
dubbo_InstanceAddressURL_getAnyMethodParameter | /**
* Gets method level value of the specified key.
*
* @param key
* @return
*/
@Override
public String getAnyMethodParameter(String key) {
if (consumerParamFirst(key)) {
URL consumerUrl = RpcContext.getServiceContext().getConsumerUrl();
if (consumerUrl != null) {
String v = consumerUrl.getAnyMethodParameter(key);
if (StringUtils.isNotEmpty(v)) {
return v;
}
}
}
String suffix = "." + key;
String protocolServiceKey = getProtocolServiceKey();
if (StringUtils.isNotEmpty(protocolServiceKey)) {
MetadataInfo.ServiceInfo serviceInfo = getServiceInfo(protocolServiceKey);
if (null == serviceInfo) {
return null;
}
for (String fullKey : serviceInfo.getAllParams().keySet()) {
if (fullKey.endsWith(suffix)) {
return getParameter(fullKey);
}
}
}
return null;
} | 3.68 |
open-banking-gateway_CreateConsentOrPaymentPossibleErrorHandler_tryCreateAndHandleErrors | /**
* Swallows retryable (like wrong IBAN) consent initiation exceptions.
* @param tryCreate Consent/payment creation function to call
*/
public <T> T tryCreateAndHandleErrors(DelegateExecution execution, Supplier<T> tryCreate) {
try {
return tryCreate.get();
} catch (ErrorResponseException ex) {
log.debug("Trying to handle ErrorResponseException", ex);
tryHandleWrongIbanOrCredentialsExceptionOrOauth2(execution, ex);
return null;
} catch (OAuthException ex) {
log.debug("Trying to handle OAuthException", ex);
tryHandleOauth2Exception(execution);
return null;
} catch (RequestAuthorizationValidationException ex) {
log.debug("Trying to handle AccessTokenException", ex);
tryHandleRequestAuthorizationValidationException(execution);
return null;
}
} | 3.68 |
hadoop_RecordCreatorFactory_setHost | /**
* Set the host name.
* @param host the host name.
*/
void setHost(Name host) {
this.host = host;
} | 3.68 |
hbase_MutableRegionInfo_getEncodedName | /** Returns the encoded region name */
@Override
public String getEncodedName() {
return this.encodedName;
} | 3.68 |
hbase_HBaseTestingUtility_ensureSomeRegionServersAvailable | /**
* Make sure that at least the specified number of region servers are running
* @param num minimum number of region servers that should be running
* @return true if we started some servers
*/
public boolean ensureSomeRegionServersAvailable(final int num) throws IOException {
boolean startedServer = false;
MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
for (int i = hbaseCluster.getLiveRegionServerThreads().size(); i < num; ++i) {
LOG.info("Started new server=" + hbaseCluster.startRegionServer());
startedServer = true;
}
return startedServer;
} | 3.68 |
flink_HiveParserTypeCheckCtx_setUnparseTranslator | /** @param unparseTranslator the unparseTranslator to set */
public void setUnparseTranslator(HiveParserUnparseTranslator unparseTranslator) {
this.unparseTranslator = unparseTranslator;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations10 | /**
* @return expected SQL for math operation 10
*/
protected String expectedSqlForMathOperations10() {
return "(a + b + (c / d) + e + 100 + f) / 5";
} | 3.68 |
morf_MergeStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getTableUniqueKey())
.dispatch(getIfUpdating())
.dispatch(getTable())
.dispatch(getSelectStatement());
} | 3.68 |
flink_Tuple11_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
*/
public void setFields(
T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
} | 3.68 |
hbase_HFileBlock_getUncompressedSizeWithoutHeader | /**
* The uncompressed size of the block data. Does not include header size.
*/
int getUncompressedSizeWithoutHeader() {
expectState(State.BLOCK_READY);
return baosInMemory.size() - HConstants.HFILEBLOCK_HEADER_SIZE;
} | 3.68 |
hadoop_TimelineReaderWebServicesUtils_parseMetricFilters | /**
* Parses metric filters.
*
* @param expr Metric filter expression to be parsed.
* @return a {@link TimelineFilterList} object.
* @throws TimelineParseException if any problem occurs during parsing.
*/
static TimelineFilterList parseMetricFilters(String expr)
throws TimelineParseException {
return parseFilters(new TimelineParserForNumericFilters(expr));
} | 3.68 |
framework_PasswordField_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractTextField#writeDesign(org.jsoup.nodes.Element
* , com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
AbstractTextField def = (AbstractTextField) designContext
.getDefaultInstance(this);
Attributes attr = design.attributes();
DesignAttributeHandler.writeAttribute("value", attr, getValue(),
def.getValue(), String.class, designContext);
} | 3.68 |
flink_ColumnOperationUtils_addOrReplaceColumns | /**
* Creates a projection list that adds new or replaces existing (if a column with corresponding
* name already exists) columns.
*
* <p><b>NOTE:</b> Resulting expression are still unresolved.
*
* @param inputFields names of current columns
* @param newExpressions new columns to add
* @return projection expressions
*/
static List<Expression> addOrReplaceColumns(
List<String> inputFields, List<Expression> newExpressions) {
LinkedHashMap<String, Expression> finalFields = new LinkedHashMap<>();
inputFields.forEach(field -> finalFields.put(field, unresolvedRef(field)));
newExpressions.forEach(
expr -> {
String name = extractName(expr).orElse(expr.toString());
finalFields.put(name, expr);
});
return new ArrayList<>(finalFields.values());
} | 3.68 |
flink_TableColumn_getType | /** Returns the data type of this column. */
public DataType getType() {
return this.type;
} | 3.68 |
hudi_HoodieAvroReadSupport_convertLegacyMap | /**
* Convert non-legacy map to legacy map.
*/
private List<Type> convertLegacyMap(List<Type> oldTypes) {
List<Type> newTypes = new ArrayList<>(oldTypes.size());
for (Type type : oldTypes) {
if (!type.isPrimitive()) {
GroupType parent = type.asGroupType();
List<Type> types = convertLegacyMap(parent.getFields());
if (type.getOriginalType() == OriginalType.MAP_KEY_VALUE) {
newTypes.add(new GroupType(parent.getRepetition(), "key_value", types));
} else {
newTypes.add(new GroupType(parent.getRepetition(), parent.getName(), parent.getOriginalType(), types));
}
} else {
newTypes.add(type);
}
}
return newTypes;
} | 3.68 |
shardingsphere-elasticjob_SnapshotService_close | /**
* Close listener.
*/
public void close() {
closed = true;
if (null != serverSocket && !serverSocket.isClosed()) {
try {
serverSocket.close();
} catch (final IOException ex) {
log.error("ElasticJob: Snapshot service close failure, error is: ", ex);
}
}
} | 3.68 |
framework_Notification_getIcon | /**
* Gets the icon part of the notification message.
*
* @return The message icon
*/
public Resource getIcon() {
return getResource("icon");
} | 3.68 |
hudi_HoodieTable_rollbackInflightInstant | /**
* Rollback inflight instant to requested instant
*
* @param inflightInstant Inflight instant
* @param getPendingRollbackInstantFunc Function to get rollback instant
*/
private void rollbackInflightInstant(HoodieInstant inflightInstant,
Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String commitTime = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
.orElse(getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false, config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, false, false);
getActiveTimeline().revertInstantFromInflightToRequested(inflightInstant);
} | 3.68 |
hbase_WALFactory_getWAL | /**
* @param region the region which we want to get a WAL for. Could be null.
*/
public WAL getWAL(RegionInfo region) throws IOException {
// Use different WAL for hbase:meta. Instantiates the meta WALProvider if not already up.
if (region != null && RegionReplicaUtil.isDefaultReplica(region)) {
if (region.isMetaRegion()) {
return metaProvider.getProvider().getWAL(region);
} else if (ReplicationStorageFactory.isReplicationQueueTable(conf, region.getTable())) {
return replicationProvider.getProvider().getWAL(region);
}
}
return provider.getWAL(region);
} | 3.68 |
flink_CheckpointFailureManager_handleTaskLevelCheckpointException | /**
* Handle task level checkpoint exception with a handler callback.
*
* @param pendingCheckpoint the failed checkpoint used to count the continuous failure number
* based on checkpoint id sequence. In trigger phase, we may not get the checkpoint id when
* the failure happens before the checkpoint id generation. In this case, it will be
* specified a negative latest generated checkpoint id as a special flag.
* @param exception the checkpoint exception.
* @param executionAttemptID the execution attempt id, as a safe guard.
*/
void handleTaskLevelCheckpointException(
PendingCheckpoint pendingCheckpoint,
CheckpointException exception,
ExecutionAttemptID executionAttemptID) {
CheckpointProperties checkpointProps = pendingCheckpoint.getProps();
if (checkpointProps.isSavepoint() && checkpointProps.isSynchronous()) {
failureCallback.failJob(exception);
} else {
checkFailureAgainstCounter(
exception,
pendingCheckpoint.getCheckpointID(),
e -> failureCallback.failJobDueToTaskFailure(e, executionAttemptID));
}
} | 3.68 |
flink_ScriptProcessBuilder_prependPathComponent | /** Appends the specified component to the path list. */
public void prependPathComponent(String str) {
pathenv = str + pathSep + pathenv;
} | 3.68 |
hbase_ProcedureCoordinator_submitProcedure | /**
* Submit an procedure to kick off its dependent subprocedures.
* @param proc Procedure to execute
* @return <tt>true</tt> if the procedure was started correctly, <tt>false</tt> if the procedure
* or any subprocedures could not be started. Failure could be due to submitting a
* procedure multiple times (or one with the same name), or some sort of IO problem. On
* errors, the procedure's monitor holds a reference to the exception that caused the
* failure.
*/
@SuppressWarnings("FutureReturnValueIgnored")
boolean submitProcedure(Procedure proc) {
// if the submitted procedure was null, then we don't want to run it
if (proc == null) {
return false;
}
String procName = proc.getName();
// make sure we aren't already running a procedure of that name
Procedure oldProc = procedures.get(procName);
if (oldProc != null) {
// procedures are always eventually completed on both successful and failed execution
try {
if (!oldProc.isCompleted()) {
LOG.warn("Procedure " + procName + " currently running. Rejecting new request");
return false;
} else {
LOG.debug("Procedure " + procName
+ " was in running list but was completed. Accepting new attempt.");
if (!procedures.remove(procName, oldProc)) {
LOG.warn("Procedure " + procName
+ " has been resubmitted by another thread. Rejecting this request.");
return false;
}
}
} catch (ForeignException e) {
LOG.debug("Procedure " + procName
+ " was in running list but has exception. Accepting new attempt.");
if (!procedures.remove(procName, oldProc)) {
LOG.warn("Procedure " + procName
+ " has been resubmitted by another thread. Rejecting this request.");
return false;
}
}
}
// kick off the procedure's execution in a separate thread
try {
if (this.procedures.putIfAbsent(procName, proc) == null) {
LOG.debug("Submitting procedure " + procName);
this.pool.submit(proc);
return true;
} else {
LOG.error(
"Another thread has submitted procedure '" + procName + "'. Ignoring this attempt.");
return false;
}
} catch (RejectedExecutionException e) {
LOG.warn("Procedure " + procName + " rejected by execution pool. Propagating error.", e);
// Remove the procedure from the list since is not started
this.procedures.remove(procName, proc);
// the thread pool is full and we can't run the procedure
proc.receive(new ForeignException(procName, e));
}
return false;
} | 3.68 |
hbase_ScannerModel_fromScan | /**
* @param scan the scan specification
*/
public static ScannerModel fromScan(Scan scan) throws Exception {
ScannerModel model = new ScannerModel();
model.setStartRow(scan.getStartRow());
model.setEndRow(scan.getStopRow());
Map<byte[], NavigableSet<byte[]>> families = scan.getFamilyMap();
if (families != null) {
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : families.entrySet()) {
if (entry.getValue() != null) {
for (byte[] qualifier : entry.getValue()) {
model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier));
}
} else {
model.addColumn(entry.getKey());
}
}
}
model.setStartTime(scan.getTimeRange().getMin());
model.setEndTime(scan.getTimeRange().getMax());
int caching = scan.getCaching();
if (caching > 0) {
model.setCaching(caching);
}
int batch = scan.getBatch();
if (batch > 0) {
model.setBatch(batch);
}
int maxVersions = scan.getMaxVersions();
if (maxVersions > 0) {
model.setMaxVersions(maxVersions);
}
if (scan.getLimit() > 0) {
model.setLimit(scan.getLimit());
}
Filter filter = scan.getFilter();
if (filter != null) {
model.setFilter(stringifyFilter(filter));
}
// Add the visbility labels if found in the attributes
Authorizations authorizations = scan.getAuthorizations();
if (authorizations != null) {
List<String> labels = authorizations.getLabels();
for (String label : labels) {
model.addLabel(label);
}
}
return model;
} | 3.68 |
framework_VColorPickerGrid_updateColor | /**
* Updates the changed colors within the grid based on the given x- and
* y-coordinates. Nothing happens if any of the parameters is null or the
* parameter lengths don't match.
* <p>
* For internal use only. May be renamed or removed in a future release.
*
* @param changedColor
* the changed colors
* @param changedX
* the x-coordinates for the changed colors
* @param changedY
* the y-coordinates for the changed colors
*/
public void updateColor(String[] changedColor, String[] changedX,
String[] changedY) {
if (changedColor != null && changedX != null && changedY != null) {
if (changedColor.length == changedX.length
&& changedX.length == changedY.length) {
for (int c = 0; c < changedColor.length; c++) {
Element element = grid.getCellFormatter().getElement(
Integer.parseInt(changedX[c]),
Integer.parseInt(changedY[c]));
element.getStyle().setProperty("background",
changedColor[c]);
}
}
gridLoaded = true;
}
} | 3.68 |
morf_ChangeIndex_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
return applyChange(schema, toIndex, fromIndex);
} | 3.68 |
cron-utils_FieldDefinitionBuilder_optional | /**
* Allows to tag a field as optional.
*
* @return this instance
*/
public FieldDefinitionBuilder optional() {
optional = true;
return this;
} | 3.68 |
framework_Dependency_getUrl | /**
* Gets the untranslated URL for the dependency.
*
* @return the URL for the dependency
*/
public String getUrl() {
return url;
} | 3.68 |
morf_Criterion_neq | /**
* Helper method to create a new "NOT EQUAL" expression.
*
* <blockquote><pre>
* Criterion.neq(new Field("agreementnumber"), "A0001");</pre></blockquote>
*
* @param field the field to evaluate in the expression (the left hand side of the expression)
* @param value the value to evaluate in the expression (the right hand side)
* @return a new Criterion object
*/
public static Criterion neq(AliasedField field, Object value) {
return new Criterion(Operator.NEQ, field, value);
} | 3.68 |
flink_ZooKeeperStateHandleStore_exists | /**
* Returns the version of the node if it exists and is not marked for deletion or <code>-1
* </code>.
*
* @param pathInZooKeeper Path in ZooKeeper to check
* @return Version of the ZNode if the path exists and is not marked for deletion, <code>-1
* </code> otherwise.
* @throws Exception If the ZooKeeper operation fails
*/
@Override
public IntegerResourceVersion exists(String pathInZooKeeper) throws Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
return getStat(pathInZooKeeper)
.filter(ZooKeeperStateHandleStore::isNotMarkedForDeletion)
.map(stat -> IntegerResourceVersion.valueOf(stat.getVersion()))
.orElse(IntegerResourceVersion.notExisting());
} | 3.68 |
hbase_RootProcedureState_isValid | /**
* Called on store load by the ProcedureExecutor to validate the procedure stack.
*/
protected synchronized boolean isValid() {
if (subprocStack != null) {
for (Procedure<TEnvironment> proc : subprocStack) {
if (proc == null) {
return false;
}
}
}
return true;
} | 3.68 |
hadoop_CleanerMetrics_reportAFileDelete | /**
* Report a delete operation at the current system time
*/
public void reportAFileDelete() {
totalProcessedFiles.incr();
processedFiles.incr();
totalDeletedFiles.incr();
deletedFiles.incr();
} | 3.68 |
flink_DataStructureConverters_getConverter | /** Returns a converter for the given {@link DataType}. */
@SuppressWarnings("unchecked")
public static DataStructureConverter<Object, Object> getConverter(DataType dataType) {
// cast to Object for ease of use
return (DataStructureConverter<Object, Object>) getConverterInternal(dataType);
} | 3.68 |
flink_AsyncSinkBaseBuilder_setMaxBatchSize | /**
* @param maxBatchSize maximum number of elements that may be passed in a list to be written
* downstream.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBatchSize(int maxBatchSize) {
this.maxBatchSize = maxBatchSize;
return (ConcreteBuilderT) this;
} | 3.68 |
hbase_BackupManager_getAncestors | /**
* Get the direct ancestors of this backup for one table involved.
* @param backupInfo backup info
* @param table table
* @return backupImages on the dependency list
* @throws IOException exception
*/
public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table)
throws IOException {
ArrayList<BackupImage> ancestors = getAncestors(backupInfo);
ArrayList<BackupImage> tableAncestors = new ArrayList<>();
for (BackupImage image : ancestors) {
if (image.hasTable(table)) {
tableAncestors.add(image);
if (image.getType() == BackupType.FULL) {
break;
}
}
}
return tableAncestors;
} | 3.68 |
framework_Escalator_isCurrentBrowserIE11OrEdge | /**
* Internal method for checking whether the browser is IE11 or Edge
*
* @return true only if the current browser is IE11, or Edge
*/
private static boolean isCurrentBrowserIE11OrEdge() {
return BrowserInfo.get().isIE11() || BrowserInfo.get().isEdge();
} | 3.68 |
hbase_StripeMultiFileWriter_sanityCheckRight | /**
* Subclasses can call this method to make sure the last KV is within multi-writer range.
* @param right The right boundary of the writer.
*/
protected void sanityCheckRight(byte[] right, Cell cell) throws IOException {
if (
!Arrays.equals(StripeStoreFileManager.OPEN_KEY, right)
&& comparator.compareRows(cell, right, 0, right.length) >= 0
) {
String error = "The last row is higher or equal than the right boundary of ["
+ Bytes.toString(right) + "]: ["
+ Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()) + "]";
LOG.error(error);
throw new IOException(error);
}
} | 3.68 |
rocketmq-connect_JdbcSinkConfig_filterWhiteTable | /**
* filter white table
*
* @param dbDialect
* @param tableId
* @return
*/
public boolean filterWhiteTable(DatabaseDialect dbDialect, TableId tableId) {
// not filter table
if (tableWhitelist.isEmpty()) {
return true;
}
for (String tableName : tableWhitelist) {
TableId table = dbDialect.parseTableNameToTableId(tableName);
if (table.catalogName() != null && table.catalogName().equals(tableId.catalogName())) {
return true;
}
if (table.tableName().equals(tableId.tableName())) {
return true;
}
}
return false;
} | 3.68 |
flink_Tuple_getFieldNotNull | /**
* Gets the field at the specified position, throws NullFieldException if the field is null.
* Used for comparing key fields.
*
* @param pos The position of the field, zero indexed.
* @return The field at the specified position.
* @throws IndexOutOfBoundsException Thrown, if the position is negative, or equal to, or larger
* than the number of fields.
* @throws NullFieldException Thrown, if the field at pos is null.
*/
public <T> T getFieldNotNull(int pos) {
T field = getField(pos);
if (field != null) {
return field;
} else {
throw new NullFieldException(pos);
}
} | 3.68 |
framework_Table_getVisibleCellsInsertIntoCache | /**
* @param firstIndex
* The position where new rows should be inserted
* @param rows
* The maximum number of rows that should be inserted at position
* firstIndex. Less rows will be inserted if the page buffer is
* too small.
* @return
*/
private Object[][] getVisibleCellsInsertIntoCache(int firstIndex,
int rows) {
getLogger().log(Level.FINEST,
"Insert {0} rows at index {1} to existing page buffer requested",
new Object[] { rows, firstIndex });
int minPageBufferIndex = getMinPageBufferIndex();
int maxPageBufferIndex = getMaxPageBufferIndex();
int maxBufferSize = maxPageBufferIndex - minPageBufferIndex + 1;
if (getPageLength() == 0) {
// If pageLength == 0 then all rows should be rendered
maxBufferSize = pageBuffer[CELL_ITEMID].length + rows;
}
/*
* Number of rows that were previously cached. This is not necessarily
* the same as maxBufferSize.
*/
int currentlyCachedRowCount = pageBuffer[CELL_ITEMID].length;
/* If rows > size available in page buffer */
if (firstIndex + rows - 1 > maxPageBufferIndex) {
rows = maxPageBufferIndex - firstIndex + 1;
}
/*
* "rows" rows will be inserted at firstIndex. Find out how many old
* rows fall outside the new buffer so we can unregister components in
* the cache.
*/
/*
* if there are rows before the new pageBuffer limits they must be
* removed
*/
int lastCacheRowToRemove = minPageBufferIndex - 1;
int rowsFromBeginning = lastCacheRowToRemove - pageBufferFirstIndex + 1;
if (lastCacheRowToRemove >= pageBufferFirstIndex) {
unregisterComponentsAndPropertiesInRows(pageBufferFirstIndex,
rowsFromBeginning);
} else {
rowsFromBeginning = 0;
}
/*
* the rows that fall outside of the new pageBuffer limits after the new
* rows are inserted must also be removed
*/
int firstCacheRowToRemove = firstIndex;
/*
* IF there is space remaining in the buffer after the rows have been
* inserted, we can keep more rows.
*/
int numberOfOldRowsAfterInsertedRows = Math.min(
pageBufferFirstIndex + currentlyCachedRowCount + rows,
maxPageBufferIndex + 1) - (firstIndex + rows - 1);
if (numberOfOldRowsAfterInsertedRows > 0) {
firstCacheRowToRemove += numberOfOldRowsAfterInsertedRows;
}
int rowsFromAfter = currentlyCachedRowCount
- (firstCacheRowToRemove - pageBufferFirstIndex);
if (rowsFromAfter > 0) {
/*
* Unregister all components that fall beyond the cache limits after
* inserting the new rows.
*/
unregisterComponentsAndPropertiesInRows(firstCacheRowToRemove,
rowsFromAfter);
}
// Calculate the new cache size
int newCachedRowCount = maxBufferSize;
if (pageBufferFirstIndex + currentlyCachedRowCount + rows
- 1 < maxPageBufferIndex) {
// there aren't enough rows to fill the whole potential -> use what
// there is
newCachedRowCount -= maxPageBufferIndex - (pageBufferFirstIndex
+ currentlyCachedRowCount + rows - 1);
} else if (minPageBufferIndex < pageBufferFirstIndex) {
newCachedRowCount -= pageBufferFirstIndex - minPageBufferIndex;
}
/*
* calculate the internal location of the new rows within the new cache
*/
int firstIndexInNewPageBuffer = firstIndex - pageBufferFirstIndex
- rowsFromBeginning;
/* Paint the new rows into a separate buffer */
Object[][] cells = getVisibleCellsNoCache(firstIndex, rows, false);
/*
* Create the new cache buffer and fill it with the data from the old
* buffer as well as the inserted rows.
*/
Object[][] newPageBuffer = new Object[pageBuffer.length][newCachedRowCount];
for (int i = 0; i < pageBuffer.length; i++) {
for (int row = 0; row < firstIndexInNewPageBuffer; row++) {
// Copy the first rows
newPageBuffer[i][row] = pageBuffer[i][rowsFromBeginning + row];
}
for (int row = firstIndexInNewPageBuffer; row < firstIndexInNewPageBuffer
+ rows; row++) {
// Copy the newly created rows
newPageBuffer[i][row] = cells[i][row
- firstIndexInNewPageBuffer];
}
for (int row = firstIndexInNewPageBuffer
+ rows; row < newCachedRowCount; row++) {
// Move the old rows down below the newly inserted rows
newPageBuffer[i][row] = pageBuffer[i][rowsFromBeginning + row
- rows];
}
}
pageBuffer = newPageBuffer;
pageBufferFirstIndex = Math.max(
pageBufferFirstIndex + rowsFromBeginning, minPageBufferIndex);
if (getLogger().isLoggable(Level.FINEST)) {
getLogger().log(Level.FINEST,
"Page Buffer now contains {0} rows ({1}-{2})",
new Object[] { pageBuffer[CELL_ITEMID].length,
pageBufferFirstIndex, (pageBufferFirstIndex
+ pageBuffer[CELL_ITEMID].length - 1) });
}
return cells;
} | 3.68 |
hadoop_OBSFileSystem_getWorkingDirectory | /**
* Return the current working directory for the given file system.
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return workingDir;
} | 3.68 |
hbase_FilterList_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {
if (other == this) {
return true;
}
if (!(other instanceof FilterList)) {
return false;
}
FilterList o = (FilterList) other;
return this.getOperator().equals(o.getOperator())
&& ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters()));
} | 3.68 |
pulsar_GenericSchemaImpl_of | /**
* warning :
* we suggest migrate GenericSchemaImpl.of() to <GenericSchema Implementor>.of() method
* (e.g. GenericJsonSchema 、GenericAvroSchema )
* @param schemaInfo {@link SchemaInfo}
* @param useProvidedSchemaAsReaderSchema {@link Boolean}
* @return generic schema implementation
*/
public static GenericSchemaImpl of(SchemaInfo schemaInfo,
boolean useProvidedSchemaAsReaderSchema) {
switch (schemaInfo.getType()) {
case AVRO:
return new GenericAvroSchema(schemaInfo, useProvidedSchemaAsReaderSchema);
case JSON:
return new GenericJsonSchema(schemaInfo, useProvidedSchemaAsReaderSchema);
default:
throw new UnsupportedOperationException("Generic schema is not supported on schema type "
+ schemaInfo.getType() + "'");
}
} | 3.68 |
flink_FutureUtils_retryWithDelay | /**
* Retry the given operation with the given delay in between failures.
*
* @param operation to retry
* @param retryStrategy the RetryStrategy
* @param scheduledExecutor executor to be used for the retry operation
* @param <T> type of the result
* @return Future which retries the given operation a given amount of times and delays the retry
* in case of failures
*/
public static <T> CompletableFuture<T> retryWithDelay(
final Supplier<CompletableFuture<T>> operation,
final RetryStrategy retryStrategy,
final ScheduledExecutor scheduledExecutor) {
return retryWithDelay(operation, retryStrategy, (throwable) -> true, scheduledExecutor);
} | 3.68 |
hadoop_OperationAuditorOptions_withConfiguration | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public OperationAuditorOptions withConfiguration(final Configuration value) {
configuration = value;
return this;
} | 3.68 |
framework_VMenuBar_onMenuClick | /**
* This is called by the items in the menu and it communicates the
* information to the server.
*
* @param clickedItemId
* id of the item that was clicked
*/
public void onMenuClick(int clickedItemId) {
// Updating the state to the server can not be done before
// the server connection is known, i.e., before updateFromUIDL()
// has been called.
if (uidlId != null && client != null) {
// Communicate the user interaction parameters to server. This call
// will initiate an AJAX request to the server.
client.updateVariable(uidlId, "clickedId", clickedItemId, true);
}
} | 3.68 |
hadoop_ResourceCalculator_divideSafelyAsFloat | /**
* Divides lhs by rhs.
*
* @param lhs left number.
* @param rhs right number.
* @return If both lhs and rhs are having a value of 0, then we return 0.
* This is to avoid division by zero and return NaN as a result.
* If lhs is zero but rhs is not, Float.infinity will be returned
* as the result.
*/
public static float divideSafelyAsFloat(long lhs, long rhs) {
if (lhs == 0 && rhs == 0) {
return 0;
} else {
return (float) lhs / (float) rhs;
}
} | 3.68 |
framework_AbstractOrderedLayout_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
// process default attributes
super.readDesign(design, designContext);
setMargin(readMargin(design, getMargin(), designContext));
// handle children
for (Element childComponent : design.children()) {
Attributes attr = childComponent.attributes();
Component newChild = designContext.readDesign(childComponent);
addComponent(newChild);
// handle alignment
setComponentAlignment(newChild,
DesignAttributeHandler.readAlignment(attr));
// handle expand ratio
if (attr.hasKey(":expand")) {
String value = attr.get(":expand");
if (!value.isEmpty()) {
try {
float ratio = Float.valueOf(value);
setExpandRatio(newChild, ratio);
} catch (NumberFormatException nfe) {
getLogger()
.info("Failed to parse expand ratio " + value);
}
} else {
setExpandRatio(newChild, 1.0f);
}
}
}
} | 3.68 |
rocketmq-connect_ClusterConfigState_taskConfig | /**
* task config
*
* @param task
* @return
*/
public Map<String, String> taskConfig(ConnectorTaskId task) {
return taskConfigs.get(task);
} | 3.68 |
hadoop_PeriodicService_getRunCount | /**
* Get how many times we run the periodic service.
*
* @return Times we run the periodic service.
*/
protected long getRunCount() {
return this.runCount;
} | 3.68 |
flink_JoinOperator_equalTo | /**
* Continues a Join transformation and defines a {@link KeySelector} function for the
* second join {@link DataSet}.
*
* <p>The KeySelector function is called for each element of the second DataSet and
* extracts a single key value on which the DataSet is joined.
*
* <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a
* {@link Tuple2}, with the element of the first input being the first field of the
* tuple and the element of the second input being the second field of the tuple.
*
* @param keySelector The KeySelector function which extracts the key values from the
* second DataSet on which it is joined.
* @return A DefaultJoin that represents the joined DataSet.
*/
@Override
public <K> DefaultJoin<I1, I2> equalTo(KeySelector<I2, K> keySelector) {
TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keySelector, input2.getType());
return createDefaultJoin(
new SelectorFunctionKeys<>(
input2.clean(keySelector), input2.getType(), keyType));
} | 3.68 |
morf_OracleDialect_buildRemainingStatementsAndComments | /**
* Builds the remaining statements (triggers, sequences and comments).
*
* @param table The table to create the statements.
* @return the collection of statements.
*/
private Collection<String> buildRemainingStatementsAndComments(Table table) {
List<String> statements = Lists.newArrayList();
Column sequence = findAutonumberedColumn(table);
if (sequence != null) {
statements.add(dropTrigger(table));
statements.add(dropSequence(table));
statements.add(createNewSequence(table, sequence));
statements.addAll(createTrigger(table, sequence));
}
String truncatedTableName = truncatedTableName(table.getName());
statements.add(commentOnTable(truncatedTableName));
statements.addAll(createColumnComments(table));
return statements;
} | 3.68 |
hbase_MetricsREST_incrementRequests | /**
* @param inc How much to add to requests.
*/
public void incrementRequests(final int inc) {
source.incrementRequests(inc);
} | 3.68 |
hadoop_TrustedChannelResolver_getInstance | /**
* Returns an instance of TrustedChannelResolver.
* Looks up the configuration to see if there is custom class specified.
* @return TrustedChannelResolver
*/
public static TrustedChannelResolver getInstance(Configuration conf) {
Class<? extends TrustedChannelResolver> clazz =
conf.getClass(
HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
TrustedChannelResolver.class, TrustedChannelResolver.class);
return ReflectionUtils.newInstance(clazz, conf);
} | 3.68 |
hibernate-validator_CollectionHelper_iteratorFromArray | /**
* Builds an {@link Iterator} for a given array. It is (un)necessarily ugly because we have to deal with array of primitives.
*
* @param object a given array
* @return an {@code Iterator} iterating over the array
*/
@SuppressWarnings({ "unchecked", "rawtypes" }) // Reflection is used to ensure the correct types are used
public static Iterator<?> iteratorFromArray(Object object) {
return new ArrayIterator( accessorFromArray( object ), object );
} | 3.68 |
flink_StreamSource_isCanceledOrStopped | /**
* Checks whether the source has been canceled or stopped.
*
* @return True, if the source is canceled or stopped, false is not.
*/
protected boolean isCanceledOrStopped() {
return canceledOrStopped;
} | 3.68 |
hbase_MemStoreLABImpl_decScannerCount | /**
* Called when closing a scanner on the data of this MemStoreLAB
*/
@Override
public void decScannerCount() {
this.refCnt.release();
} | 3.68 |
graphhopper_OSMNodeData_getNodeCount | /**
* @return the number of mapped nodes (tower + pillar, but also including pillar nodes that were converted to tower)
*/
public long getNodeCount() {
return idsByOsmNodeIds.getSize();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.