name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_AbstractStringToNumberConverter_getPresentationType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getPresentationType()
*/
@Override
public Class<String> getPresentationType() {
return String.class;
} | 3.68 |
hbase_SnapshotInfo_isCorrupted | /** Returns true if the file is corrupted */
public boolean isCorrupted() {
return this.corrupted;
} | 3.68 |
querydsl_SQLExpressions_set | /**
* Create an assignment expression
*
* @param target target expression
* @param value value to be set
* @param <T>
* @return target = value
*/
public static <T> Expression<T> set(Path<T> target, T value) {
if (value != null) {
return Expressions.operation(target.getType(), SQLOps.SET_LITERAL,
target, Expressions.constant(value));
} else {
return Expressions.operation(target.getType(), SQLOps.SET_LITERAL,
target, Expressions.nullExpression());
}
} | 3.68 |
framework_Upload_paintContent | /**
* Paints the content of this component.
*
* @param target
* Target to paint the content on.
* @throws PaintException
* if the paint operation failed.
*/
@Override
public void paintContent(PaintTarget target) throws PaintException {
if (notStarted) {
target.addAttribute("notStarted", true);
notStarted = false;
return;
}
// The field should be focused
if (focus) {
target.addAttribute("focus", true);
}
// The tab ordering number
if (tabIndex >= 0) {
target.addAttribute("tabindex", tabIndex);
}
target.addAttribute("state", isUploading);
target.addAttribute("nextid", nextid);
// Post file to this stream variable
target.addVariable(this, "action", getStreamVariable());
} | 3.68 |
hbase_Query_setLoadColumnFamiliesOnDemand | /**
* Set the value indicating whether loading CFs on demand should be allowed (cluster default is
* false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter
* on one column, the other column family data will be loaded only for the rows that are included
* in result, not all rows like in normal case. With column-specific filters, like
* SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when
* there's a cf with lots of data; however, it can also lead to some inconsistent results, as
* follows: - if someone does a concurrent update to both column families in question you may get
* a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat"
* } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent
* scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video =>
* "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some
* rows may be missing some column families.
*/
public Query setLoadColumnFamiliesOnDemand(boolean value) {
this.loadColumnFamiliesOnDemand = value;
return this;
} | 3.68 |
flink_FlinkSemiAntiJoinJoinTransposeRule_onMatch | // implement RelOptRule
public void onMatch(RelOptRuleCall call) {
LogicalJoin semiAntiJoin = call.rel(0);
if (semiAntiJoin.getJoinType() != JoinRelType.SEMI
&& semiAntiJoin.getJoinType() != JoinRelType.ANTI) {
return;
}
final Join join = call.rel(1);
if (join.getJoinType() == JoinRelType.SEMI || join.getJoinType() == JoinRelType.ANTI) {
return;
}
// TODO support other join type
if (join.getJoinType() != JoinRelType.INNER) {
return;
}
// unsupported cases:
// 1. (NOT) EXISTS with uncorrelation
// 2. keys in SemiJoin condition are from both Join's left and Join's right
Pair<ImmutableBitSet, ImmutableBitSet> inputRefs =
getSemiAntiJoinConditionInputRefs(semiAntiJoin);
final ImmutableBitSet leftInputRefs = inputRefs.left;
final ImmutableBitSet rightInputRefs = inputRefs.right;
// unsupported case1. (NOT) EXISTS with uncorrelation
// e.g. SELECT * FROM x, y WHERE x.c = y.f AND EXISTS (SELECT * FROM z)
// SemiJoin may be push to both Join's left and Join's right.
// TODO currently we does not handle this
if (leftInputRefs.isEmpty() || rightInputRefs.isEmpty()) {
return;
}
// X is the left child of the join below the semi-join
// Y is the right child of the join below the semi-join
// Z is the right child of the semi-join
int nFieldsX = join.getLeft().getRowType().getFieldList().size();
int nFieldsY = join.getRight().getRowType().getFieldList().size();
int nFieldsZ = semiAntiJoin.getRight().getRowType().getFieldList().size();
int nTotalFields = nFieldsX + nFieldsY + nFieldsZ;
List<RelDataTypeField> fields = new ArrayList<RelDataTypeField>();
// create a list of fields for the full join result; note that
// we can't simply use the fields from the semi-join because the
// row-type of a semi-join only includes the left hand side fields
List<RelDataTypeField> joinFields = semiAntiJoin.getRowType().getFieldList();
for (int i = 0; i < (nFieldsX + nFieldsY); i++) {
fields.add(joinFields.get(i));
}
joinFields = semiAntiJoin.getRight().getRowType().getFieldList();
for (int i = 0; i < nFieldsZ; i++) {
fields.add(joinFields.get(i));
}
// determine which operands below the semi-join are the actual
// Rels that participate in the semi-join
int nKeysFromX = 0;
int nKeysFromY = 0;
for (int leftKey : leftInputRefs) {
if (leftKey < nFieldsX) {
nKeysFromX++;
} else {
nKeysFromY++;
}
}
// unsupported case2. keys in SemiJoin condition are from both Join's left and Join's right
// e.g. SELECT * FROM x, y WHERE x.c = y.f AND x.a IN (SELECT z.i FROM z WHERE y.e = z.j)
if (nKeysFromX > 0 && nKeysFromY > 0) {
return;
}
// the keys must all originate from either the left or right;
// otherwise, a semi-join wouldn't have been created
assert (nKeysFromX == 0) || (nKeysFromX == leftInputRefs.cardinality());
assert (nKeysFromY == 0) || (nKeysFromY == leftInputRefs.cardinality());
// need to convert the semi/anti join condition and possibly the keys
RexNode newSemiAntiJoinFilter;
int[] adjustments = new int[nTotalFields];
if (nKeysFromX > 0) {
// (X, Y, Z) --> (X, Z, Y)
// semiJoin(X, Z)
// pass 0 as Y's adjustment because there shouldn't be any
// references to Y in the semi/anti join filter
setJoinAdjustments(adjustments, nFieldsX, nFieldsY, nFieldsZ, 0, -nFieldsY);
newSemiAntiJoinFilter =
semiAntiJoin
.getCondition()
.accept(
new RelOptUtil.RexInputConverter(
semiAntiJoin.getCluster().getRexBuilder(),
fields,
adjustments));
} else {
// (X, Y, Z) --> (X, Y, Z)
// semiJoin(Y, Z)
setJoinAdjustments(adjustments, nFieldsX, nFieldsY, nFieldsZ, -nFieldsX, -nFieldsX);
newSemiAntiJoinFilter =
semiAntiJoin
.getCondition()
.accept(
new RelOptUtil.RexInputConverter(
semiAntiJoin.getCluster().getRexBuilder(),
fields,
adjustments));
}
// create the new join
RelNode newSemiAntiJoinLeft;
if (nKeysFromX > 0) {
newSemiAntiJoinLeft = join.getLeft();
} else {
newSemiAntiJoinLeft = join.getRight();
}
Join newSemiAntiJoin =
LogicalJoin.create(
newSemiAntiJoinLeft,
semiAntiJoin.getRight(),
join.getHints(),
newSemiAntiJoinFilter,
semiAntiJoin.getVariablesSet(),
semiAntiJoin.getJoinType());
RelNode leftJoinRel;
RelNode rightJoinRel;
if (nKeysFromX > 0) {
leftJoinRel = newSemiAntiJoin;
rightJoinRel = join.getRight();
} else {
leftJoinRel = join.getLeft();
rightJoinRel = newSemiAntiJoin;
}
RelNode newJoinRel =
join.copy(
join.getTraitSet(),
join.getCondition(),
leftJoinRel,
rightJoinRel,
join.getJoinType(),
join.isSemiJoinDone());
call.transformTo(newJoinRel);
} | 3.68 |
pulsar_LocalBrokerData_updateBundleData | // Aggregate all message, throughput, topic count, bundle count, consumer
// count, and producer count across the
// given data. Also keep track of bundle gains and losses.
private void updateBundleData(final Map<String, NamespaceBundleStats> bundleStats) {
msgRateIn = 0;
msgRateOut = 0;
msgThroughputIn = 0;
msgThroughputOut = 0;
int totalNumTopics = 0;
int totalNumBundles = 0;
int totalNumConsumers = 0;
int totalNumProducers = 0;
final Iterator<String> oldBundleIterator = bundles.iterator();
while (oldBundleIterator.hasNext()) {
final String bundle = oldBundleIterator.next();
if (!bundleStats.containsKey(bundle)) {
// If this bundle is in the old bundle set but not the new one,
// we lost it.
lastBundleLosses.add(bundle);
oldBundleIterator.remove();
}
}
for (Map.Entry<String, NamespaceBundleStats> entry : bundleStats.entrySet()) {
final String bundle = entry.getKey();
final NamespaceBundleStats stats = entry.getValue();
if (!bundles.contains(bundle)) {
// If this bundle is in the new bundle set but not the old one,
// we gained it.
lastBundleGains.add(bundle);
bundles.add(bundle);
}
msgThroughputIn += stats.msgThroughputIn;
msgThroughputOut += stats.msgThroughputOut;
msgRateIn += stats.msgRateIn;
msgRateOut += stats.msgRateOut;
totalNumTopics += stats.topics;
++totalNumBundles;
totalNumConsumers += stats.consumerCount;
totalNumProducers += stats.producerCount;
}
numTopics = totalNumTopics;
numBundles = totalNumBundles;
numConsumers = totalNumConsumers;
numProducers = totalNumProducers;
} | 3.68 |
hbase_CompactingMemStore_flattenOneSegment | /**
* @param requesterVersion The caller must hold the VersionedList of the pipeline with version
* taken earlier. This version must be passed as a parameter here. The
* flattening happens only if versions match.
*/
public void flattenOneSegment(long requesterVersion, MemStoreCompactionStrategy.Action action) {
pipeline.flattenOneSegment(requesterVersion, indexType, action);
} | 3.68 |
flink_ExecutionConfig_toMap | /**
* Convert UserConfig into a {@code Map<String, String>} representation. This can be used by
* the runtime, for example for presenting the user config in the web frontend.
*
* @return Key/Value representation of the UserConfig
*/
public Map<String, String> toMap() {
return Collections.emptyMap();
} | 3.68 |
zxing_DecodeHandler_decode | /**
* Decode the data within the viewfinder rectangle, and time how long it took. For efficiency,
* reuse the same reader objects from one decode to the next.
*
* @param data The YUV preview frame.
* @param width The width of the preview frame.
* @param height The height of the preview frame.
*/
private void decode(byte[] data, int width, int height) {
Result rawResult = null;
PlanarYUVLuminanceSource source = activity.getCameraManager().buildLuminanceSource(data, width, height);
if (source != null) {
BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source));
try {
rawResult = multiFormatReader.decodeWithState(bitmap);
} catch (ReaderException re) {
// continue
} finally {
multiFormatReader.reset();
}
}
Handler handler = activity.getHandler();
if (rawResult != null) {
// Don't log the barcode contents for security.
if (handler != null) {
Message message = Message.obtain(handler, R.id.decode_succeeded, rawResult);
Bundle bundle = new Bundle();
bundleThumbnail(source, bundle);
message.setData(bundle);
message.sendToTarget();
}
} else {
if (handler != null) {
Message message = Message.obtain(handler, R.id.decode_failed);
message.sendToTarget();
}
}
} | 3.68 |
AreaShop_FileManager_getGroup | /**
* Get a group.
* @param name The name of the group to get (will be normalized)
* @return The group if found, otherwise null
*/
public RegionGroup getGroup(String name) {
return groups.get(name.toLowerCase());
} | 3.68 |
hadoop_ClusterSummarizer_toString | /**
* Summarizes the cluster used for this {@link Gridmix} run.
*/
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Cluster Summary:-");
builder.append("\nJobTracker: ").append(getJobTrackerInfo());
builder.append("\nFileSystem: ").append(getNamenodeInfo());
builder.append("\nNumber of blacklisted trackers: ")
.append(getNumBlacklistedTrackers());
builder.append("\nNumber of active trackers: ")
.append(getNumActiveTrackers());
builder.append("\nMax map task capacity: ")
.append(getMaxMapTasks());
builder.append("\nMax reduce task capacity: ").append(getMaxReduceTasks());
builder.append("\n\n");
return builder.toString();
} | 3.68 |
morf_DummyXmlOutputStreamProvider_open | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider#open()
*/
@Override
public void open() {
// Nothing to do
} | 3.68 |
hudi_InternalSchemaBuilder_buildNameToId | /**
* Build a mapping from full field name to id for a internal Type.
* if a field y belong to a struct filed x, then the full name of y is x.y
*
* @param type hoodie internal type
* @return a mapping from full field name to id
*/
public Map<String, Integer> buildNameToId(Type type) {
return visit(type, new NameToIDVisitor());
} | 3.68 |
AreaShop_GeneralRegion_actionAllowed | /**
* Check if the action is allowed.
* @return true if the actions is allowed, otherwise false
*/
public boolean actionAllowed() {
return actionAllowed;
} | 3.68 |
morf_SqlScriptExecutor_doWork | /**
* Performs some work using a JDBC connection derived from the injected data source.
*
* @param work a {@link Work} implementation.
*/
public void doWork(Work work) {
try {
if (dataSource == null) {
// Either initialise this executor with a DataSource or use the execute(Iterable<String>, Connection) method.
throw new IllegalStateException("No data source found.");
}
try (Connection connection = dataSource.getConnection()) {
autoCommitOff(connection, () -> {
commitOrRollback(connection, () -> {
work.execute(connection);
});
});
}
} catch (SQLException e) {
throw reclassifiedRuntimeException(e, "Error with statement");
}
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_enablePartitions | /**
* Enable metadata table partitions based on config.
*/
private void enablePartitions() {
final HoodieMetadataConfig metadataConfig = dataWriteConfig.getMetadataConfig();
if (dataWriteConfig.isMetadataTableEnabled() || dataMetaClient.getTableConfig().isMetadataPartitionAvailable(FILES)) {
this.enabledPartitionTypes.add(FILES);
}
if (metadataConfig.isBloomFilterIndexEnabled() || dataMetaClient.getTableConfig().isMetadataPartitionAvailable(BLOOM_FILTERS)) {
this.enabledPartitionTypes.add(BLOOM_FILTERS);
}
if (metadataConfig.isColumnStatsIndexEnabled() || dataMetaClient.getTableConfig().isMetadataPartitionAvailable(COLUMN_STATS)) {
this.enabledPartitionTypes.add(COLUMN_STATS);
}
if (dataWriteConfig.isRecordIndexEnabled() || dataMetaClient.getTableConfig().isMetadataPartitionAvailable(RECORD_INDEX)) {
this.enabledPartitionTypes.add(RECORD_INDEX);
}
if (dataMetaClient.getFunctionalIndexMetadata().isPresent()) {
this.enabledPartitionTypes.add(FUNCTIONAL_INDEX);
}
} | 3.68 |
flink_ConfigurationUtils_parseLocalStateDirectories | /**
* Extracts the local state directories as defined by {@link
* CheckpointingOptions#LOCAL_RECOVERY_TASK_MANAGER_STATE_ROOT_DIRS}.
*
* @param configuration configuration object
* @return array of configured directories (in order)
*/
@Nonnull
public static String[] parseLocalStateDirectories(Configuration configuration) {
String configValue =
configuration.getString(
CheckpointingOptions.LOCAL_RECOVERY_TASK_MANAGER_STATE_ROOT_DIRS, "");
return splitPaths(configValue);
} | 3.68 |
pulsar_ServiceConfigurationUtils_getInternalListener | /**
* Gets the internal advertised listener for broker-to-broker communication.
* @return a non-null advertised listener
*/
public static AdvertisedListener getInternalListener(ServiceConfiguration config, String protocol) {
Map<String, AdvertisedListener> result = MultipleListenerValidator
.validateAndAnalysisAdvertisedListener(config);
AdvertisedListener internal = result.get(config.getInternalListenerName());
if (internal == null || !internal.hasUriForProtocol(protocol)) {
// Search for an advertised listener for same protocol
for (AdvertisedListener l : result.values()) {
if (l.hasUriForProtocol(protocol)) {
internal = l;
break;
}
}
}
if (internal == null) {
// synthesize an advertised listener based on legacy configuration properties
String host = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getAdvertisedAddress());
internal = AdvertisedListener.builder()
.brokerServiceUrl(createUriOrNull("pulsar", host, config.getBrokerServicePort()))
.brokerServiceUrlTls(createUriOrNull("pulsar+ssl", host, config.getBrokerServicePortTls()))
.build();
}
return internal;
} | 3.68 |
flink_BaseMappingExtractor_putExplicitMappings | // --------------------------------------------------------------------------------------------
// Helper methods (ordered by invocation order)
// --------------------------------------------------------------------------------------------
/** Explicit mappings with complete signature to result declaration. */
private void putExplicitMappings(
Map<FunctionSignatureTemplate, FunctionResultTemplate> collectedMappings,
Set<FunctionTemplate> explicitMappings,
Set<FunctionSignatureTemplate> signatureOnly,
Function<FunctionTemplate, FunctionResultTemplate> accessor) {
explicitMappings.forEach(
t -> {
// signature templates are valid everywhere and are added to the explicit
// mapping
Stream.concat(signatureOnly.stream(), Stream.of(t.getSignatureTemplate()))
.forEach(v -> putMapping(collectedMappings, v, accessor.apply(t)));
});
} | 3.68 |
flink_PythonFunction_getPythonFunctionKind | /** Returns the kind of the user-defined python function. */
default PythonFunctionKind getPythonFunctionKind() {
return PythonFunctionKind.GENERAL;
} | 3.68 |
pulsar_SaslRoleTokenSigner_computeSignature | /**
* Returns the signature of a string.
*
* @param str string to sign.
*
* @return the signature for the string.
*/
protected String computeSignature(String str) {
try {
MessageDigest md = MessageDigest.getInstance("SHA-512");
md.update(str.getBytes());
md.update(secret);
byte[] digest = md.digest();
return new Base64(0).encodeToString(digest);
} catch (NoSuchAlgorithmException ex) {
throw new RuntimeException("It should not happen, " + ex.getMessage(), ex);
}
} | 3.68 |
framework_PushRequestHandler_handleSessionExpired | /*
* (non-Javadoc)
*
* @see
* com.vaadin.server.SessionExpiredHandler#handleSessionExpired(com.vaadin
* .server.VaadinRequest, com.vaadin.server.VaadinResponse)
*/
@Override
public boolean handleSessionExpired(VaadinRequest request,
VaadinResponse response) throws IOException {
// Websockets request must be handled by accepting the websocket
// connection and then sending session expired so we let
// PushRequestHandler handle it
return handleRequest(null, request, response);
} | 3.68 |
AreaShop_FriendsFeature_addFriend | /**
* Add a friend to the region.
* @param player The UUID of the player to add
* @param by The CommandSender that is adding the friend, or null
* @return true if the friend has been added, false if adding a friend was cancelled by another plugin
*/
public boolean addFriend(UUID player, CommandSender by) {
// Fire and check event
AddedFriendEvent event = new AddedFriendEvent(getRegion(), Bukkit.getOfflinePlayer(player), by);
Bukkit.getPluginManager().callEvent(event);
if(event.isCancelled()) {
plugin.message(by, "general-cancelled", event.getReason(), this);
return false;
}
Set<String> friends = new HashSet<>(getRegion().getConfig().getStringList("general.friends"));
friends.add(player.toString());
List<String> list = new ArrayList<>(friends);
getRegion().setSetting("general.friends", list);
return true;
} | 3.68 |
flink_NormalizedKeySorter_isEmpty | /**
* Checks whether the buffer is empty.
*
* @return True, if no record is contained, false otherwise.
*/
@Override
public boolean isEmpty() {
return this.numRecords == 0;
} | 3.68 |
framework_MultiSelectionModelImpl_onDeselectAll | /**
* Triggered when the user unchecks the select all checkbox.
*
* @param userOriginated
* {@code true} if originated from client side by user
*/
protected void onDeselectAll(boolean userOriginated) {
if (userOriginated) {
verifyUserCanSelectAll();
// all selected state has been update in client side already
getState(false).allSelected = false;
getUI().getConnectorTracker().getDiffState(this).put("allSelected",
false);
} else {
getState().allSelected = false;
}
updateSelection(Collections.emptySet(), new LinkedHashSet<>(selection),
userOriginated);
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateMethod | /**
* generate method declaration
*/
private String generateMethod(Method method) {
String methodReturnType = method.getReturnType().getCanonicalName();
String methodName = method.getName();
String methodContent = generateMethodContent(method);
String methodArgs = generateMethodArguments(method);
String methodThrows = generateMethodThrows(method);
return String.format(
CODE_METHOD_DECLARATION, methodReturnType, methodName, methodArgs, methodThrows, methodContent);
} | 3.68 |
pulsar_Schema_configureSchemaInfo | /**
* Configure the schema to use the provided schema info.
*
* @param topic topic name
* @param componentName component name
* @param schemaInfo schema info
*/
default void configureSchemaInfo(String topic, String componentName,
SchemaInfo schemaInfo) {
// no-op
} | 3.68 |
morf_ViewBean_getName | /**
* @see org.alfasoftware.morf.metadata.View#getName()
*/
@Override
public String getName() {
return name;
} | 3.68 |
rocketmq-connect_KafkaConnectAdaptorSink_processSinkRecord | /**
* convert ConnectRecord to SinkRecord
*
* @param record
* @return
*/
@Override
public SinkRecord processSinkRecord(ConnectRecord record) {
SinkRecord sinkRecord = Converters.fromConnectRecord(record);
return transforms(sinkRecord);
} | 3.68 |
hadoop_Server_checkAbsolutePath | /**
* Validates that the specified value is an absolute path (starts with '/').
*
* @param value value to verify it is an absolute path.
* @param name name to use in the exception if the value is not an absolute
* path.
*
* @return the value.
*
* @throws IllegalArgumentException thrown if the value is not an absolute
* path.
*/
private String checkAbsolutePath(String value, String name) {
if (!new File(value).isAbsolute()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] must be an absolute path [{1}]", name, value));
}
return value;
} | 3.68 |
AreaShop_AreaShop_getWorldEdit | /**
* Function to get the WorldEdit plugin.
* @return WorldEditPlugin
*/
@Override
public WorldEditPlugin getWorldEdit() {
return worldEdit;
} | 3.68 |
flink_AvroSerializationSchema_forGeneric | /**
* Creates {@link AvroSerializationSchema} that serializes {@link GenericRecord} using provided
* schema.
*
* @param schema the schema that will be used for serialization
* @return serialized record in form of byte array
*/
public static AvroSerializationSchema<GenericRecord> forGeneric(
Schema schema, AvroEncoding encoding) {
return new AvroSerializationSchema<>(GenericRecord.class, schema, encoding);
} | 3.68 |
morf_GraphBasedUpgradeNode_getParents | /**
* @return upgrade nodes on which this upgrade node depends on
*/
public Set<GraphBasedUpgradeNode> getParents() {
return parents;
} | 3.68 |
zxing_ReaderException_setStackTrace | /**
* For testing only. Controls whether library exception classes include stack traces or not.
* Defaults to false, unless running in the project's unit testing harness.
*
* @param enabled if true, enables stack traces in library exception classes
* @since 3.5.0
*/
public static void setStackTrace(boolean enabled) {
isStackTrace = enabled;
} | 3.68 |
hadoop_AHSController_logs | /**
* Render the logs page.
*/
public void logs() {
render(AHSLogsPage.class);
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_getCurrentMaximumUploadBandwidth | /**
* Get the current maximum upload bandwidth.
* @return maximum upload bandwidth in bytes per second.
*/
public long getCurrentMaximumUploadBandwidth() {
return currentMaximumUploadBytesPerSecond;
} | 3.68 |
pulsar_ManagedLedgerConfig_setBookKeeperEnsemblePlacementPolicyClassName | /**
* Returns EnsemblePlacementPolicy configured for the Managed-ledger.
*
* @param bookKeeperEnsemblePlacementPolicyClassName
*/
public void setBookKeeperEnsemblePlacementPolicyClassName(
Class<? extends EnsemblePlacementPolicy> bookKeeperEnsemblePlacementPolicyClassName) {
this.bookKeeperEnsemblePlacementPolicyClassName = bookKeeperEnsemblePlacementPolicyClassName;
} | 3.68 |
zxing_DecoderResult_getText | /**
* @return text representation of the result
*/
public String getText() {
return text;
} | 3.68 |
dubbo_ClassUtils_getAllInheritedTypes | /**
* Get all inherited types from the specified type
*
* @param type the specified type
* @param typeFilters the filters for types
* @return non-null read-only {@link Set}
* @since 2.7.6
*/
public static Set<Class<?>> getAllInheritedTypes(Class<?> type, Predicate<Class<?>>... typeFilters) {
// Add all super classes
Set<Class<?>> types = new LinkedHashSet<>(getAllSuperClasses(type, typeFilters));
// Add all interface classes
types.addAll(getAllInterfaces(type, typeFilters));
return unmodifiableSet(types);
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_removeStoredMasterKey | /**
* Removes the existing DelegationKey from the SQL database to
* invalidate it.
* @param key DelegationKey to remove from the SQL database.
*/
@Override
protected void removeStoredMasterKey(DelegationKey key) {
try {
deleteDelegationKey(key.getKeyId());
} catch (SQLException e) {
LOG.warn("Failed to remove delegation key in SQL secret manager", e);
}
} | 3.68 |
flink_AbstractBytesHashMap_reset | /** reset the map's record and bucket area's memory segments for reusing. */
public void reset() {
// reset the record segments.
recordArea.reset();
destructiveIterator = null;
super.reset();
} | 3.68 |
hbase_Bytes_createMaxByteArray | /**
* Create a max byte array with the specified max byte count
* @param maxByteCount the length of returned byte array
* @return the created max byte array
*/
public static byte[] createMaxByteArray(int maxByteCount) {
byte[] maxByteArray = new byte[maxByteCount];
for (int i = 0; i < maxByteArray.length; i++) {
maxByteArray[i] = (byte) 0xff;
}
return maxByteArray;
} | 3.68 |
hbase_HFileSystem_close | /**
* Close this filesystem object
*/
@Override
public void close() throws IOException {
super.close();
if (this.noChecksumFs != fs) {
this.noChecksumFs.close();
}
} | 3.68 |
flink_ManagedInitializationContext_isRestored | /** Returns true, if state was restored from the snapshot of a previous execution. */
default boolean isRestored() {
return getRestoredCheckpointId().isPresent();
} | 3.68 |
framework_ListDataSource_indexOf | /**
* Retrieves the index for given row object.
* <p>
* <em>Note:</em> This method does not verify that the given row object
* exists at all in this DataSource.
*
* @param row
* the row object
* @return index of the row; or <code>-1</code> if row is not available
*/
public int indexOf(T row) {
return ds.indexOf(row);
} | 3.68 |
hbase_FanOutOneBlockAsyncDFSOutput_recoverAndClose | /**
* The close method when error occurred. Now we just call recoverFileLease.
*/
@Override
public void recoverAndClose(CancelableProgressable reporter) throws IOException {
if (buf != null) {
buf.release();
buf = null;
}
closeDataNodeChannelsAndAwait();
endFileLease(client, fileId);
RecoverLeaseFSUtils.recoverFileLease(dfs, new Path(src), conf,
reporter == null ? new CancelOnClose(client) : reporter);
} | 3.68 |
framework_AbsoluteLayout_setRightValue | /**
* Sets the 'right' attribute value (distance from the right of the
* component to the right edge of the layout). Currently active units
* are maintained.
*
* @param rightValue
* The value of the 'right' attribute
* @see #setRightUnits(Unit)
*/
public void setRightValue(Float rightValue) {
this.rightValue = rightValue;
markAsDirty();
} | 3.68 |
flink_Tuple18_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple18<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
copy() {
return new Tuple18<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17);
} | 3.68 |
framework_DateResolution_getResolutionsHigherOrEqualTo | /**
* Returns the resolutions that are higher or equal to the given resolution,
* starting from the given resolution. In other words passing DAY to this
* methods returns DAY,MONTH,YEAR
*
* @param r
* The resolution to start from
* @return An iterable for the resolutions higher or equal to r
*/
public static Iterable<DateResolution> getResolutionsHigherOrEqualTo(
DateResolution r) {
List<DateResolution> resolutions = new ArrayList<>();
DateResolution[] values = DateResolution.values();
for (int i = r.ordinal(); i < values.length; i++) {
resolutions.add(values[i]);
}
return resolutions;
} | 3.68 |
flink_KeyedStream_validateKeyType | /**
* Validates that a given type of element (as encoded by the provided {@link TypeInformation})
* can be used as a key in the {@code DataStream.keyBy()} operation. This is done by searching
* depth-first the key type and checking if each of the composite types satisfies the required
* conditions (see {@link #validateKeyTypeIsHashable(TypeInformation)}).
*
* @param keyType The {@link TypeInformation} of the key.
*/
@SuppressWarnings("rawtypes")
private TypeInformation<KEY> validateKeyType(TypeInformation<KEY> keyType) {
Stack<TypeInformation<?>> stack = new Stack<>();
stack.push(keyType);
List<TypeInformation<?>> unsupportedTypes = new ArrayList<>();
while (!stack.isEmpty()) {
TypeInformation<?> typeInfo = stack.pop();
if (!validateKeyTypeIsHashable(typeInfo)) {
unsupportedTypes.add(typeInfo);
}
if (typeInfo instanceof TupleTypeInfoBase) {
for (int i = 0; i < typeInfo.getArity(); i++) {
stack.push(((TupleTypeInfoBase) typeInfo).getTypeAt(i));
}
}
}
if (!unsupportedTypes.isEmpty()) {
throw new InvalidProgramException(
"Type "
+ keyType
+ " cannot be used as key. Contained "
+ "UNSUPPORTED key types: "
+ StringUtils.join(unsupportedTypes, ", ")
+ ". Look "
+ "at the keyBy() documentation for the conditions a type has to satisfy in order to be "
+ "eligible for a key.");
}
return keyType;
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredCollection | /**
* Defines a collection of configuration parameters of the specified staticProperties.
* The developer can fill the staticProperties multiply times.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in a user-friendly manner.
* @param staticProperties A list of {@link org.apache.streampipes.model.staticproperty} elements.
* @return this
*/
public K requiredCollection(Label label, StaticProperty... staticProperties) {
this.staticProperties.add(StaticProperties.collection(label, staticProperties));
return me();
} | 3.68 |
hadoop_CachedSASToken_getExpiry | /**
* Parse the sasExpiry from the SAS token. The sasExpiry is the minimum
* of the ske and se parameters. The se parameter is required and the
* ske parameter is optional.
* @param token an Azure Storage SAS token
* @return the sasExpiry or OffsetDateTime.MIN if invalid.
*/
private static OffsetDateTime getExpiry(String token) {
// return MIN for all invalid input, including a null token
if (token == null) {
return OffsetDateTime.MIN;
}
String signedExpiry = "se=";
int signedExpiryLen = 3;
int start = token.indexOf(signedExpiry);
// return MIN if the required se parameter is absent
if (start == -1) {
return OffsetDateTime.MIN;
}
start += signedExpiryLen;
// extract the value of se parameter
int end = token.indexOf("&", start);
String seValue = (end == -1) ? token.substring(start) : token.substring(start, end);
try {
seValue = URLDecoder.decode(seValue, "utf-8");
} catch (UnsupportedEncodingException ex) {
LOG.error("Error decoding se query parameter ({}) from SAS.", seValue, ex);
return OffsetDateTime.MIN;
}
// parse the ISO 8601 date value; return MIN if invalid
OffsetDateTime seDate = OffsetDateTime.MIN;
try {
seDate = OffsetDateTime.parse(seValue, DateTimeFormatter.ISO_DATE_TIME);
} catch (DateTimeParseException ex) {
LOG.error("Error parsing se query parameter ({}) from SAS.", seValue, ex);
}
String signedKeyExpiry = "ske=";
int signedKeyExpiryLen = 4;
// if ske is present, the sasExpiry is the minimum of ske and se
start = token.indexOf(signedKeyExpiry);
// return seDate if ske is absent
if (start == -1) {
return seDate;
}
start += signedKeyExpiryLen;
// extract the value of ske parameter
end = token.indexOf("&", start);
String skeValue = (end == -1) ? token.substring(start) : token.substring(start, end);
try {
skeValue = URLDecoder.decode(skeValue, "utf-8");
} catch (UnsupportedEncodingException ex) {
LOG.error("Error decoding ske query parameter ({}) from SAS.", skeValue, ex);
return OffsetDateTime.MIN;
}
// parse the ISO 8601 date value; return MIN if invalid
OffsetDateTime skeDate = OffsetDateTime.MIN;
try {
skeDate = OffsetDateTime.parse(skeValue, DateTimeFormatter.ISO_DATE_TIME);
} catch (DateTimeParseException ex) {
LOG.error("Error parsing ske query parameter ({}) from SAS.", skeValue, ex);
return OffsetDateTime.MIN;
}
return skeDate.isBefore(seDate) ? skeDate : seDate;
} | 3.68 |
hadoop_BufferedIOStatisticsInputStream_getIOStatistics | /**
* Return any IOStatistics offered by the inner stream.
* @return inner IOStatistics or null
*/
@Override
public IOStatistics getIOStatistics() {
return retrieveIOStatistics(in);
} | 3.68 |
hbase_WALUtil_writeBulkLoadMarkerAndSync | /**
* Write a log marker that a bulk load has succeeded and is about to be committed. This write is
* for internal use only. Not for external client consumption.
* @param wal The log to write into.
* @param replicationScope The replication scope of the families in the HRegion
* @param hri A description of the region in the table that we are bulk loading into.
* @param desc A protocol buffers based description of the client's bulk loading
* request
* @return walKey with sequenceid filled out for this bulk load marker
* @throws IOException We will throw an IOException if we can not append to the HLog.
*/
public static WALKeyImpl writeBulkLoadMarkerAndSync(final WAL wal,
final NavigableMap<byte[], Integer> replicationScope, final RegionInfo hri,
final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc,
final RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = writeMarker(wal, replicationScope, hri,
WALEdit.createBulkLoadEvent(hri, desc), mvcc, null, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended Bulk Load marker " + TextFormat.shortDebugString(desc));
}
return walKey;
} | 3.68 |
hbase_OrderedBytes_decodeString | /**
* Decode a String value.
*/
public static String decodeString(PositionedByteRange src) {
final byte header = src.get();
if (header == NULL || header == DESCENDING.apply(NULL)) return null;
assert header == TEXT || header == DESCENDING.apply(TEXT);
Order ord = header == TEXT ? ASCENDING : DESCENDING;
byte[] a = src.getBytes();
final int offset = src.getOffset(), start = src.getPosition();
final byte terminator = ord.apply(TERM);
int rawStartPos = offset + start, rawTermPos = rawStartPos;
for (; a[rawTermPos] != terminator; rawTermPos++)
;
src.setPosition(rawTermPos - offset + 1); // advance position to TERM + 1
if (DESCENDING == ord) {
// make a copy so that we don't disturb encoded value with ord.
byte[] copy = new byte[rawTermPos - rawStartPos];
System.arraycopy(a, rawStartPos, copy, 0, copy.length);
ord.apply(copy);
return new String(copy, UTF8);
} else {
return new String(a, rawStartPos, rawTermPos - rawStartPos, UTF8);
}
} | 3.68 |
framework_AbstractSelect_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removeItemSetChangeListener(Container.ItemSetChangeListener)}
*/
@Override
@Deprecated
public void removeListener(Container.ItemSetChangeListener listener) {
removeItemSetChangeListener(listener);
} | 3.68 |
morf_AbstractSqlDialectTest_setMaxIdOnAutonumberTable | /**
* Method to override in dialect specific tests to set the max id value on the autonumber table for use during
* testRepairAutoNumberStartPosition
*
* @param id The max id
*/
protected void setMaxIdOnAutonumberTable(@SuppressWarnings("unused") long id) {
} | 3.68 |
hbase_ScannerModel_setCacheBlocks | /**
* @param value true if HFile blocks should be cached on the servers for this scan, false
* otherwise
*/
public void setCacheBlocks(boolean value) {
this.cacheBlocks = value;
} | 3.68 |
dubbo_DefaultApplicationDeployer_getDynamicConfiguration | /**
* Get the instance of {@link DynamicConfiguration} by the specified connection {@link URL} of config-center
*
* @param connectionURL of config-center
* @return non-null
* @since 2.7.5
*/
private DynamicConfiguration getDynamicConfiguration(URL connectionURL) {
String protocol = connectionURL.getProtocol();
DynamicConfigurationFactory factory =
ConfigurationUtils.getDynamicConfigurationFactory(applicationModel, protocol);
return factory.getDynamicConfiguration(connectionURL);
} | 3.68 |
flink_Pattern_consecutive | /**
* Works in conjunction with {@link Pattern#oneOrMore()} or {@link Pattern#times(int)}.
* Specifies that any not matching element breaks the loop.
*
* <p>E.g. a pattern like:
*
* <pre>{@code
* Pattern.<Event>begin("start").where(new SimpleCondition<Event>() {
* @Override
* public boolean filter(Event value) throws Exception {
* return value.getName().equals("c");
* }
* })
* .followedBy("middle").where(new SimpleCondition<Event>() {
* @Override
* public boolean filter(Event value) throws Exception {
* return value.getName().equals("a");
* }
* }).oneOrMore().consecutive()
* .followedBy("end1").where(new SimpleCondition<Event>() {
* @Override
* public boolean filter(Event value) throws Exception {
* return value.getName().equals("b");
* }
* });
* }</pre>
*
* <p>for a sequence: C D A1 A2 A3 D A4 B
*
* <p>will generate matches: {C A1 B}, {C A1 A2 B}, {C A1 A2 A3 B}
*
* <p>By default a relaxed continuity is applied.
*
* @return pattern with continuity changed to strict
*/
public Pattern<T, F> consecutive() {
quantifier.consecutive();
return this;
} | 3.68 |
pulsar_ClientCnxIdleState_tryMarkUsingAndClearIdleTime | /**
* Changes the idle-state of the connection to #{@link State#USING} as much as possible, This method
* is used when connection borrow, and reset {@link #idleMarkTime} if change state to
* #{@link State#USING} success.
* @return Whether change idle-stat to #{@link State#USING} success. False is returned only if the
* connection has already been released.
*/
public boolean tryMarkUsingAndClearIdleTime() {
while (true) {
// Ensure not released
if (isReleased()) {
return false;
}
// Try mark release
if (compareAndSetIdleStat(State.IDLE, State.USING)) {
idleMarkTime = 0;
return true;
}
if (compareAndSetIdleStat(State.RELEASING, State.USING)) {
idleMarkTime = 0;
return true;
}
if (isUsing()){
return true;
}
}
} | 3.68 |
hbase_EventHandler_getInformativeName | /**
* Event implementations should override thie class to provide an informative name about what
* event they are handling. For example, event-specific information such as which region or server
* is being processed should be included if possible.
*/
public String getInformativeName() {
return this.getClass().toString();
} | 3.68 |
flink_IntervalJoinOperator_processElement2 | /**
* Process a {@link StreamRecord} from the right stream. Whenever a {@link StreamRecord} arrives
* at the right stream, it will get added to the right buffer. Possible join candidates for that
* element will be looked up from the left buffer and if the pair lies within the user defined
* boundaries, it gets passed to the {@link ProcessJoinFunction}.
*
* @param record An incoming record to be joined
* @throws Exception Can throw an exception during state access
*/
@Override
public void processElement2(StreamRecord<T2> record) throws Exception {
processElement(record, rightBuffer, leftBuffer, -upperBound, -lowerBound, false);
} | 3.68 |
framework_GridElement_getHeaderRow | /**
* Get a header row by index.
*
* @param rowIndex
* Row index
* @return The th element of the row
*/
public TestBenchElement getHeaderRow(int rowIndex) {
return getSubPart("#header[" + rowIndex + "]");
} | 3.68 |
hbase_FSHLogProvider_createWriter | /**
* Public because of FSHLog. Should be package-private
*/
public static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path,
final boolean overwritable, long blocksize) throws IOException {
// Configuration already does caching for the Class lookup.
Class<? extends Writer> logWriterClass =
conf.getClass(WRITER_IMPL, ProtobufLogWriter.class, Writer.class);
Writer writer = null;
try {
writer = logWriterClass.getDeclaredConstructor().newInstance();
FileSystem rootFs = FileSystem.get(path.toUri(), conf);
writer.init(rootFs, path, conf, overwritable, blocksize,
StreamSlowMonitor.create(conf, path.getName()));
return writer;
} catch (Exception e) {
if (e instanceof CommonFSUtils.StreamLacksCapabilityException) {
LOG.error("The RegionServer write ahead log provider for FileSystem implementations "
+ "relies on the ability to call " + e.getMessage() + " for proper operation during "
+ "component failures, but the current FileSystem does not support doing so. Please "
+ "check the config value of '" + CommonFSUtils.HBASE_WAL_DIR + "' and ensure "
+ "it points to a FileSystem mount that has suitable capabilities for output streams.");
} else {
LOG.debug("Error instantiating log writer.", e);
}
throw new IOException("cannot get log writer", e);
}
} | 3.68 |
pulsar_NamespaceBundleStatsComparator_compare | // sort in reverse order, maximum loaded should be on top
public int compare(String a, String b) {
int result = 0;
if (this.resType == ResourceType.CPU) {
result = map.get(a).compareByMsgRate(map.get(b));
} else if (this.resType == ResourceType.Memory) {
result = map.get(a).compareByTopicConnections(map.get(b));
} else if (this.resType == ResourceType.BandwidthIn) {
result = map.get(a).compareByBandwidthIn(map.get(b));
} else if (this.resType == ResourceType.BandwidthOut) {
result = map.get(a).compareByBandwidthOut(map.get(b));
} else {
result = map.get(a).compareTo(map.get(b));
}
if (result > 0) {
return -1;
} else {
return 1;
}
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_isListDelimiter | /**
* Is list delimiter boolean.
*
* @return the boolean
*/
public boolean isListDelimiter() {
return !this.escaped && this.character == ',';
} | 3.68 |
framework_HierarchicalContainer_moveAfterSibling | /**
* Moves a node (an Item) in the container immediately after a sibling node.
* The two nodes must have the same parent in the container.
*
* @param itemId
* the identifier of the moved node (Item)
* @param siblingId
* the identifier of the reference node (Item), after which the
* other node will be located
*/
public void moveAfterSibling(Object itemId, Object siblingId) {
Object parent2 = getParent(itemId);
LinkedList<Object> childrenList;
if (parent2 == null) {
childrenList = roots;
} else {
childrenList = children.get(parent2);
}
if (siblingId == null) {
childrenList.remove(itemId);
childrenList.addFirst(itemId);
} else {
int oldIndex = childrenList.indexOf(itemId);
int indexOfSibling = childrenList.indexOf(siblingId);
if (indexOfSibling != -1 && oldIndex != -1) {
int newIndex;
if (oldIndex > indexOfSibling) {
newIndex = indexOfSibling + 1;
} else {
newIndex = indexOfSibling;
}
childrenList.remove(oldIndex);
childrenList.add(newIndex, itemId);
} else {
throw new IllegalArgumentException(
"Given identifiers no not have the same parent.");
}
}
fireItemSetChange();
} | 3.68 |
hbase_HRegion_decorateRegionConfiguration | /**
* This method modifies the region's configuration in order to inject replication-related features
* @param conf region configurations
*/
private static void decorateRegionConfiguration(Configuration conf) {
if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
String plugins = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
String replicationCoprocessorClass = ReplicationObserver.class.getCanonicalName();
if (!plugins.contains(replicationCoprocessorClass)) {
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
(plugins.equals("") ? "" : (plugins + ",")) + replicationCoprocessorClass);
}
}
} | 3.68 |
aws-saas-boost_ExistingEnvironmentFactory_getExistingSaaSBoostAnalyticsDeployed | // VisibleForTesting
static boolean getExistingSaaSBoostAnalyticsDeployed(SsmClient ssm, String environmentName) {
LOGGER.debug("Getting existing SaaS Boost Analytics module deployed from Parameter Store");
boolean analyticsDeployed = false;
try {
GetParameterResponse response = ssm.getParameter(request -> request
.name("/saas-boost/" + environmentName + "/METRICS_ANALYTICS_DEPLOYED")
);
analyticsDeployed = Boolean.parseBoolean(response.parameter().value());
} catch (ParameterNotFoundException paramStoreError) {
// this means the parameter doesn't exist, so ignore
} catch (SdkServiceException ssmError) {
// TODO CloudFormation should own this parameter, not the installer...
LOGGER.error("ssm:GetParameter error {}", ssmError.getMessage());
LOGGER.error(Utils.getFullStackTrace(ssmError));
throw ssmError;
}
LOGGER.info("Loaded analytics deployed {}", analyticsDeployed);
return analyticsDeployed;
} | 3.68 |
hbase_QuotaTableUtil_makeFilter | /**
* converts quotafilter to serializeable filterlists.
*/
public static Filter makeFilter(final QuotaFilter filter) {
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
if (StringUtils.isNotEmpty(filter.getUserFilter())) {
FilterList userFilters = new FilterList(FilterList.Operator.MUST_PASS_ONE);
boolean hasFilter = false;
if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) {
FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(
getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0)));
userFilters.addFilter(nsFilters);
hasFilter = true;
}
if (StringUtils.isNotEmpty(filter.getTableFilter())) {
FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator(
getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0)));
userFilters.addFilter(tableFilters);
hasFilter = true;
}
if (!hasFilter) {
userFilters.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0)));
}
filterList.addFilter(userFilters);
} else if (StringUtils.isNotEmpty(filter.getTableFilter())) {
filterList.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getTableRowKeyRegex(filter.getTableFilter()), 0)));
} else if (StringUtils.isNotEmpty(filter.getNamespaceFilter())) {
filterList.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getNamespaceRowKeyRegex(filter.getNamespaceFilter()), 0)));
} else if (StringUtils.isNotEmpty(filter.getRegionServerFilter())) {
filterList.addFilter(new RowFilter(CompareOperator.EQUAL,
new RegexStringComparator(getRegionServerRowKeyRegex(filter.getRegionServerFilter()), 0)));
}
return filterList;
} | 3.68 |
flink_RequestedGlobalProperties_setHashPartitioned | /**
* Sets these properties to request a hash partitioning on the given fields.
*
* <p>If the fields are provided as {@link FieldSet}, then any permutation of the fields is a
* valid partitioning, including subsets. If the fields are given as a {@link FieldList}, then
* only an exact partitioning on the fields matches this requested partitioning.
*
* @param partitionedFields The key fields for the partitioning.
*/
public void setHashPartitioned(FieldSet partitionedFields) {
if (partitionedFields == null) {
throw new NullPointerException();
}
this.partitioning = PartitioningProperty.HASH_PARTITIONED;
this.partitioningFields = partitionedFields;
this.ordering = null;
} | 3.68 |
flink_LogicalTypeCasts_supportsAvoidingCast | /** See {@link #supportsAvoidingCast(LogicalType, LogicalType)}. */
public static boolean supportsAvoidingCast(
List<LogicalType> sourceTypes, List<LogicalType> targetTypes) {
if (sourceTypes.size() != targetTypes.size()) {
return false;
}
for (int i = 0; i < sourceTypes.size(); i++) {
if (!supportsAvoidingCast(sourceTypes.get(i), targetTypes.get(i))) {
return false;
}
}
return true;
} | 3.68 |
framework_ConnectorBundleLoader_getLoadedBundles | /**
* Gets a list of all currently loaded bundle names.
* <p>
* This method is intended for testing the loading mechanism.
*
* @return a list of bundles, not <code>null</code>
*
* @since 8.0.3
*/
public List<String> getLoadedBundles() {
List<String> bundles = new ArrayList<>();
JsArrayString keys = asyncBlockLoaders.getKeys();
for (int i = 0; i < keys.length(); i++) {
String bundleName = keys.get(i);
if (isBundleLoaded(bundleName)) {
bundles.add(bundleName);
}
}
return bundles;
} | 3.68 |
hudi_BaseHoodieWriteClient_commitLogCompaction | /**
* Commit a log compaction operation. Allow passing additional meta-data to be stored in commit instant file.
*
* @param logCompactionInstantTime Log Compaction Instant Time
* @param metadata All the metadata that gets stored along with a commit
* @param extraMetadata Extra Metadata to be stored
*/
public void commitLogCompaction(String logCompactionInstantTime, HoodieCommitMetadata metadata,
Option<Map<String, String>> extraMetadata) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
extraMetadata.ifPresent(m -> m.forEach(metadata::addMetadata));
completeLogCompaction(metadata, table, logCompactionInstantTime);
} | 3.68 |
flink_BlobServerConnection_readFileFully | /**
* Reads a full file from <tt>inputStream</tt> into <tt>incomingFile</tt> returning its
* checksum.
*
* @param inputStream stream to read from
* @param incomingFile file to write to
* @param buf An auxiliary buffer for data serialization/deserialization
* @return the received file's content hash
* @throws IOException thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private static byte[] readFileFully(
final InputStream inputStream, final File incomingFile, final byte[] buf)
throws IOException {
MessageDigest md = BlobUtils.createMessageDigest();
try (FileOutputStream fos = new FileOutputStream(incomingFile)) {
while (true) {
final int bytesExpected = readLength(inputStream);
if (bytesExpected == -1) {
// done
break;
}
if (bytesExpected > BUFFER_SIZE) {
throw new IOException("Unexpected number of incoming bytes: " + bytesExpected);
}
readFully(inputStream, buf, 0, bytesExpected, "buffer");
fos.write(buf, 0, bytesExpected);
md.update(buf, 0, bytesExpected);
}
return md.digest();
}
} | 3.68 |
AreaShop_TeleportCommand_canUse | /**
* Check if a person can teleport to the region (assuming he is not teleporting to a sign).
* @param person The person to check
* @param region The region to check for
* @return true if the person can teleport to it, otherwise false
*/
public static boolean canUse(CommandSender person, GeneralRegion region) {
if(!(person instanceof Player)) {
return false;
}
Player player = (Player)person;
return player.hasPermission("areashop.teleportall")
|| region.isOwner(player) && player.hasPermission("areashop.teleport")
|| region.isAvailable() && player.hasPermission("areashop.teleportavailable")
|| region.getFriendsFeature().getFriends().contains(player.getUniqueId()) && player.hasPermission("areashop.teleportfriend");
} | 3.68 |
flink_StreamProjection_projectTuple15 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>
SingleOutputStreamOperator<
Tuple15<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14>>
projectTuple15() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>
tType =
new TupleTypeInfo<
Tuple15<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple15<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
flink_HiveParserQBParseInfo_setClusterByExprForClause | /** Set the Cluster By AST for the clause. */
public void setClusterByExprForClause(String clause, HiveParserASTNode ast) {
destToClusterby.put(clause, ast);
} | 3.68 |
hbase_UserMetrics_getNameAsString | /** Returns the user name as a string */
default String getNameAsString() {
return Bytes.toStringBinary(getUserName());
} | 3.68 |
framework_Slot_getSpacingElement | /**
* Get the element which is added to make the spacing.
*
* @return the spacing element
*/
@SuppressWarnings("deprecation")
public com.google.gwt.user.client.Element getSpacingElement() {
return DOM.asOld(spacer);
} | 3.68 |
hadoop_TimelineReaderUtils_joinAndEscapeStrings | /**
* Join different strings in the passed string array delimited by passed
* delimiter with delimiter and escape character escaped using passed escape
* char.
* @param strs strings to be joined.
* @param delimiterChar delimiter used to join strings.
* @param escapeChar escape character used to escape delimiter and escape
* char.
* @return a single string joined using delimiter and properly escaped.
*/
static String joinAndEscapeStrings(final String[] strs,
final char delimiterChar, final char escapeChar) {
int len = strs.length;
// Escape each string in string array.
for (int index = 0; index < len; index++) {
if (strs[index] == null) {
return null;
}
strs[index] = escapeString(strs[index], delimiterChar, escapeChar);
}
// Join the strings after they have been escaped.
return StringUtils.join(strs, delimiterChar);
} | 3.68 |
framework_Navigator_fireBeforeViewChange | /**
* Fires an event before an imminent view change.
* <p>
* Listeners are called in registration order. If any listener returns
* <code>false</code>, the rest of the listeners are not called and the view
* change is blocked.
* <p>
* The view change listeners may also e.g. open a warning or question dialog
* and save the parameters to re-initiate the navigation operation upon user
* action.
*
* @param event
* view change event (not null, view change not yet performed)
* @return true if the view change should be allowed, false to silently
* block the navigation operation
*/
protected boolean fireBeforeViewChange(ViewChangeEvent event) {
// a copy of the listener list is needed to avoid
// ConcurrentModificationException as a listener can add/remove
// listeners
for (ViewChangeListener l : new ArrayList<>(listeners)) {
if (!l.beforeViewChange(event)) {
return false;
}
}
return true;
} | 3.68 |
flink_TemplateUtils_asFunctionTemplates | /** Converts {@link FunctionHint}s to {@link FunctionTemplate}. */
static Set<FunctionTemplate> asFunctionTemplates(
DataTypeFactory typeFactory, Set<FunctionHint> hints) {
return hints.stream()
.map(
hint -> {
try {
return FunctionTemplate.fromAnnotation(typeFactory, hint);
} catch (Throwable t) {
throw extractionError(t, "Error in function hint annotation.");
}
})
.collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.68 |
framework_Escalator_setHeightByRows | /**
* Sets the number of rows that should be visible in Escalator's body, while
* {@link #getHeightMode()} is {@link HeightMode#ROW}.
* <p>
* If Escalator is currently not in {@link HeightMode#ROW}, the given value
* is remembered, and applied once the mode is applied.
*
* @param rows
* the number of rows that should be visible in Escalator's body
* @throws IllegalArgumentException
* if {@code rows} is ≤ 0, {@link Double#isInifinite(double)
* infinite} or {@link Double#isNaN(double) NaN}.
* @see #setHeightMode(HeightMode)
*/
public void setHeightByRows(double rows) throws IllegalArgumentException {
if (rows <= 0) {
throw new IllegalArgumentException(
"The number of rows must be a positive number.");
} else if (Double.isInfinite(rows)) {
throw new IllegalArgumentException(
"The number of rows must be finite.");
} else if (Double.isNaN(rows)) {
throw new IllegalArgumentException("The number must not be NaN.");
}
heightByRows = rows;
applyHeightByRows();
} | 3.68 |
framework_AbstractClientConnector_registerRpc | /**
* Registers an RPC interface implementation for this component.
*
* A component can listen to multiple RPC interfaces, and subclasses can
* register additional implementations.
*
* @since 7.0
*
* @param implementation
* RPC interface implementation. Also used to deduce the type.
*/
protected <T extends ServerRpc> void registerRpc(T implementation) {
// Search upwards until an interface is found. It must be found as T
// extends ServerRpc
Class<?> cls = implementation.getClass();
Class<ServerRpc> serverRpcClass = getServerRpcInterface(cls);
while (cls != null && serverRpcClass == null) {
cls = cls.getSuperclass();
serverRpcClass = getServerRpcInterface(cls);
}
if (serverRpcClass == null) {
throw new RuntimeException(
"No interface T extends ServerRpc found in the class hierarchy.");
}
registerRpc(implementation, serverRpcClass);
} | 3.68 |
framework_Action_getIcon | /**
* Returns the action's icon.
*
* @return the action's Icon.
*/
public Resource getIcon() {
return icon;
} | 3.68 |
framework_AbstractSelect_containsId | /**
* Tests, if the collection contains an item with given id.
*
* @param itemId
* the Id the of item to be tested.
*/
@Override
public boolean containsId(Object itemId) {
if (itemId != null) {
return items.containsId(itemId);
} else {
return false;
}
} | 3.68 |
hadoop_ResourceRequest_priority | /**
* Set the <code>priority</code> of the request.
* @see ResourceRequest#setPriority(Priority)
* @param priority <code>priority</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder priority(Priority priority) {
resourceRequest.setPriority(priority);
return this;
} | 3.68 |
hbase_MasterProcedureScheduler_getPeerQueue | // ============================================================================
// Peer Queue Lookup Helpers
// ============================================================================
private PeerQueue getPeerQueue(String peerId) {
PeerQueue node = AvlTree.get(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
node = new PeerQueue(peerId, locking.getPeerLock(peerId));
peerMap = AvlTree.insert(peerMap, node);
return node;
} | 3.68 |
zilla_StructFlyweightGenerator_defaultPriorField | // TODO: Varuint32 should NEVER be < 0
private void defaultPriorField(
CodeBlock.Builder code)
{
if (priorDefaultValue != null && priorDefaultedIsPrimitive)
{
code.addStatement("$L($L)", priorFieldIfDefaulted, defaultName(priorFieldIfDefaulted));
}
else
{
// Attempt to default the entire object. This will fail if it has any required fields.
if (priorDefaultValue == NULL_DEFAULT)
{
if (isVarintType(priorSizeType))
{
code.addStatement("$L(-1)", methodName(priorSizeName))
.addStatement("lastFieldSet = $L", index(priorFieldIfDefaulted));
}
else if (isVaruintType(priorSizeType) || isVaruintnType(priorSizeType))
{
code.addStatement("$L(0)", methodName(priorSizeName))
.addStatement("lastFieldSet = $L", index(priorFieldIfDefaulted));
}
else if (priorDefaultedIsString)
{
code.addStatement("$L((String) null)", priorFieldIfDefaulted);
}
else
{
code.addStatement("$L(b -> { })", priorFieldIfDefaulted);
code.addStatement("int limit = limit()");
code.addStatement("limit($L)", dynamicOffset(priorSizeName));
code.addStatement("$L(-1)", methodName(priorSizeName));
code.addStatement("limit(limit)");
}
}
else if (priorDefaultedIsEnum)
{
code.addStatement("$L(b -> b.set($L))", priorFieldIfDefaulted, defaultName(priorFieldIfDefaulted));
}
else if (priorDefaultedIsString)
{
code.addStatement("$L($L)", priorFieldIfDefaulted, priorDefaultValue);
}
else
{
code.addStatement("$L(b -> { })", priorFieldIfDefaulted);
}
}
} | 3.68 |
hudi_ArrayColumnReader_readPrimitiveTypedRow | // Need to be in consistent with that VectorizedPrimitiveColumnReader#readBatchHelper
// TODO Reduce the duplicated code
private Object readPrimitiveTypedRow(LogicalType category) {
switch (category.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
return dataColumn.readString();
case BOOLEAN:
return dataColumn.readBoolean();
case TIME_WITHOUT_TIME_ZONE:
case DATE:
case INTEGER:
return dataColumn.readInteger();
case TINYINT:
return dataColumn.readTinyInt();
case SMALLINT:
return dataColumn.readSmallInt();
case BIGINT:
return dataColumn.readLong();
case FLOAT:
return dataColumn.readFloat();
case DOUBLE:
return dataColumn.readDouble();
case DECIMAL:
switch (descriptor.getPrimitiveType().getPrimitiveTypeName()) {
case INT32:
return dataColumn.readInteger();
case INT64:
return dataColumn.readLong();
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
return dataColumn.readString();
default:
throw new AssertionError();
}
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return dataColumn.readTimestamp();
default:
throw new RuntimeException("Unsupported type in the list: " + type);
}
} | 3.68 |
hbase_RegionReplicaUtil_getRegionInfoForReplica | /**
* Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table,
* but more than one "instance" of the same range can be deployed which are differentiated by the
* replicaId.
* @return an RegionInfo object corresponding to the same range (table, start and end key), but
* for the given replicaId.
*/
public static RegionInfo getRegionInfoForReplica(RegionInfo regionInfo, int replicaId) {
if (regionInfo.getReplicaId() == replicaId) {
return regionInfo;
}
return RegionInfoBuilder.newBuilder(regionInfo).setReplicaId(replicaId).build();
} | 3.68 |
framework_DropEvent_getMouseEventDetails | /**
* Gets the mouse event details for the drop event.
*
* @return Mouse event details object containing information about the drop
* event.
*/
public MouseEventDetails getMouseEventDetails() {
return mouseEventDetails;
} | 3.68 |
querydsl_MapExpressionBase_isNotEmpty | /**
* Create a {@code !this,isEmpty()} expression
*
* @return !this.isEmpty()
*/
public final BooleanExpression isNotEmpty() {
return isEmpty().not();
} | 3.68 |
querydsl_Expressions_timeOperation | /**
* Create a new Operation expression
*
* @param type type of expression
* @param operator operator
* @param args operation arguments
* @return operation expression
*/
public static <T extends Comparable<?>> TimeOperation<T> timeOperation(Class<? extends T> type,
Operator operator, Expression<?>... args) {
return new TimeOperation<T>(type, operator, args);
} | 3.68 |
flink_SourceFunctionProvider_of | /** Helper method for creating a Source provider with a provided source parallelism. */
static SourceFunctionProvider of(
SourceFunction<RowData> sourceFunction,
boolean isBounded,
@Nullable Integer sourceParallelism) {
return new SourceFunctionProvider() {
@Override
public SourceFunction<RowData> createSourceFunction() {
return sourceFunction;
}
@Override
public boolean isBounded() {
return isBounded;
}
@Override
public Optional<Integer> getParallelism() {
return Optional.ofNullable(sourceParallelism);
}
};
} | 3.68 |
flink_IterativeDataSet_closeWith | /**
* Closes the iteration and specifies a termination criterion. This method defines the end of
* the iterative program part.
*
* <p>The termination criterion is a means of dynamically signaling the iteration to halt. It is
* expressed via a data set that will trigger to halt the loop as soon as the data set is empty.
* A typical way of using the termination criterion is to have a filter that filters out all
* elements that are considered non-converged. As soon as no more such elements exist, the
* iteration finishes.
*
* @param iterationResult The data set that will be fed back to the next iteration.
* @param terminationCriterion The data set that being used to trigger halt on operation once it
* is empty.
* @return The DataSet that represents the result of the iteration, after the computation has
* terminated.
* @see DataSet#iterate(int)
*/
public DataSet<T> closeWith(DataSet<T> iterationResult, DataSet<?> terminationCriterion) {
return new BulkIterationResultSet<T>(
getExecutionEnvironment(), getType(), this, iterationResult, terminationCriterion);
} | 3.68 |
framework_VComboBox_asFraction | /**
* Returns the percentage value as a fraction, e.g. 42% -> 0.42
*
* @param percentage
*/
private float asFraction(String percentage) {
String trimmed = percentage.trim();
String withoutPercentSign = trimmed.substring(0,
trimmed.length() - 1);
float asFraction = Float.parseFloat(withoutPercentSign) / 100;
return asFraction;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.