name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_AvroInternalSchemaConverter_buildAvroSchemaFromType | /**
* Converts hudi type into an Avro Schema.
*
* @param type a hudi type.
* @param recordName the record name
* @return a Avro schema match this type
*/
public static Schema buildAvroSchemaFromType(Type type, String recordName) {
Map<Type, Schema> cache = new HashMap<>();
return visitInternalSchemaToBuildAvroSchema(type, cache, recordName);
} | 3.68 |
pulsar_ManagedLedgerConfig_getLedgerRolloverTimeout | /**
* @return the ledgerRolloverTimeout
*/
public int getLedgerRolloverTimeout() {
return ledgerRolloverTimeout;
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_decodeKeyValueSchemaInfo | /**
* Decode the key/value schema info to get key schema info and value schema info.
*
* @param schemaInfo key/value schema info.
* @return the pair of key schema info and value schema info
*/
public KeyValue<SchemaInfo, SchemaInfo> decodeKeyValueSchemaInfo(SchemaInfo schemaInfo) {
return KeyValueSchemaInfo.decodeKeyValueSchemaInfo(schemaInfo);
} | 3.68 |
flink_PartitionRequestQueue_getAvailableReaders | /**
* Accesses internal state to verify reader registration in the unit tests.
*
* <p><strong>Do not use anywhere else!</strong>
*
* @return readers which are enqueued available for transferring data
*/
@VisibleForTesting
ArrayDeque<NetworkSequenceViewReader> getAvailableReaders() {
return availableReaders;
} | 3.68 |
dubbo_ZookeeperDynamicConfiguration_getInternalProperty | /**
* @param key e.g., {service}.configurators, {service}.tagrouters, {group}.dubbo.properties
* @return
*/
@Override
public String getInternalProperty(String key) {
return zkClient.getContent(buildPathKey("", key));
} | 3.68 |
hudi_HoodieSparkKeyGeneratorFactory_getKeyGenerator | /**
* Instantiate {@link BuiltinKeyGenerator}.
*
* @param properties properties map.
* @return the key generator thus instantiated.
*/
public static Option<BuiltinKeyGenerator> getKeyGenerator(Properties properties) {
TypedProperties typedProperties = new TypedProperties();
typedProperties.putAll(properties);
if (Option.ofNullable(properties.get(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key()))
.map(v -> v.equals(NonpartitionedKeyGenerator.class.getName())).orElse(false)) {
return Option.empty(); // Do not instantiate NonPartitionKeyGen
} else {
try {
return Option.of((BuiltinKeyGenerator) HoodieSparkKeyGeneratorFactory.createKeyGenerator(typedProperties));
} catch (ClassCastException cce) {
throw new HoodieIOException("Only those key generators implementing BuiltInKeyGenerator interface is supported with virtual keys");
} catch (IOException e) {
throw new HoodieIOException("Key generator instantiation failed ", e);
}
}
} | 3.68 |
flink_CrossOperator_types | /** @deprecated Deprecated method only kept for compatibility. */
@SuppressWarnings({"hiding", "unchecked"})
@Deprecated
@PublicEvolving
public <OUT extends Tuple> CrossOperator<I1, I2, OUT> types(Class<?>... types) {
TupleTypeInfo<OUT> typeInfo = (TupleTypeInfo<OUT>) this.getResultType();
if (types.length != typeInfo.getArity()) {
throw new InvalidProgramException("Provided types do not match projection.");
}
for (int i = 0; i < types.length; i++) {
Class<?> typeClass = types[i];
if (!typeClass.equals(typeInfo.getTypeAt(i).getTypeClass())) {
throw new InvalidProgramException(
"Provided type "
+ typeClass.getSimpleName()
+ " at position "
+ i
+ " does not match projection");
}
}
return (CrossOperator<I1, I2, OUT>) this;
} | 3.68 |
framework_VComboBox_onNullSelected | /**
* Triggered when an empty value is selected and null selection is allowed.
*/
public void onNullSelected() {
if (enableDebug) {
debug("VComboBox: onNullSelected()");
}
dataReceivedHandler.cancelPendingPostFiltering();
currentSuggestion = null;
setText(getEmptySelectionCaption());
setSelectedItemIcon(null);
if (!"".equals(selectedOptionKey) || selectedOptionKey != null) {
selectedOptionKey = "";
setSelectedCaption("");
connector.sendSelection(null);
// currentPage = 0;
}
updatePlaceholder();
suggestionPopup.hide();
} | 3.68 |
hadoop_FederationStateStoreUtils_encodeWritable | /**
* Encode for Writable objects.
* This method will convert the writable object to a base64 string.
*
* @param key Writable Key.
* @return base64 string.
* @throws IOException raised on errors performing I/O.
*/
public static String encodeWritable(Writable key) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos);
key.write(dos);
dos.flush();
return Base64.getUrlEncoder().encodeToString(bos.toByteArray());
} | 3.68 |
hadoop_ExitUtil_resetFirstExitException | /**
* Reset the tracking of process termination. This is for use in unit tests
* where one test in the suite expects an exit but others do not.
*/
public static void resetFirstExitException() {
FIRST_EXIT_EXCEPTION.set(null);
} | 3.68 |
AreaShop_RegionSign_update | /**
* Update this sign.
* @return true if the update was successful, otherwise false
*/
public boolean update() {
// Ignore updates of signs in chunks that are not loaded
Location signLocation = getLocation();
if(signLocation == null
|| signLocation.getWorld() == null
|| !signLocation.getWorld().isChunkLoaded(signLocation.getBlockX() >> 4, signLocation.getBlockZ() >> 4)) {
return false;
}
if(getRegion().isDeleted()) {
return false;
}
YamlConfiguration regionConfig = getRegion().getConfig();
ConfigurationSection signConfig = getProfile();
Block block = signLocation.getBlock();
if(signConfig == null || !signConfig.isSet(getRegion().getState().getValue())) {
block.setType(Material.AIR);
return true;
}
ConfigurationSection stateConfig = signConfig.getConfigurationSection(getRegion().getState().getValue());
// Get the lines
String[] signLines = new String[4];
boolean signEmpty = true;
for(int i = 0; i < 4; i++) {
signLines[i] = stateConfig.getString("line" + (i + 1));
signEmpty &= (signLines[i] == null || signLines[i].isEmpty());
}
if(signEmpty) {
block.setType(Material.AIR);
return true;
}
// Place the sign back (with proper rotation and type) after it has been hidden or (indirectly) destroyed
if(!Materials.isSign(block.getType())) {
Material signType = getMaterial();
// Don't do physics here, we first need to update the direction
block.setType(signType, false);
// This triggers a physics update, which pops the sign if not attached properly
if (!AreaShop.getInstance().getBukkitHandler().setSignFacing(block, getFacing())) {
AreaShop.warn("Failed to update the facing direction of the sign at", getStringLocation(), "to ", getFacing(), ", region:", getRegion().getName());
}
// Check if the sign has popped
if(!Materials.isSign(block.getType())) {
AreaShop.warn("Setting sign", key, "of region", getRegion().getName(), "failed, could not set sign block back");
return false;
}
}
// Save current rotation and type
if(!regionConfig.isString("general.signs." + key + ".signType")) {
getRegion().setSetting("general.signs." + key + ".signType", block.getType().name());
}
if(!regionConfig.isString("general.signs." + key + ".facing")) {
BlockFace signFacing = AreaShop.getInstance().getBukkitHandler().getSignFacing(block);
getRegion().setSetting("general.signs." + key + ".facing", signFacing == null ? null : signFacing.toString());
}
// Apply replacements and color and then set it on the sign
Sign signState = (Sign) block.getState();
for(int i = 0; i < signLines.length; i++) {
if(signLines[i] == null) {
signState.setLine(i, "");
continue;
}
signLines[i] = Message.fromString(signLines[i]).replacements(getRegion()).getSingle();
signLines[i] = Utils.applyColors(signLines[i]);
signState.setLine(i, signLines[i]);
}
signState.update();
return true;
} | 3.68 |
hadoop_StageConfig_withJobDirectories | /**
* Set the job directories from the attempt directories
* information. Does not set task attempt fields.
* @param dirs source of directories.
* @return this
*/
public StageConfig withJobDirectories(
final ManifestCommitterSupport.AttemptDirectories dirs) {
checkOpen();
withJobAttemptDir(dirs.getJobAttemptDir())
.withJobAttemptTaskSubDir(dirs.getJobAttemptTaskSubDir())
.withDestinationDir(dirs.getOutputPath())
.withOutputTempSubDir(dirs.getOutputTempSubDir())
.withTaskManifestDir(dirs.getTaskManifestDir());
return this;
} | 3.68 |
hadoop_AllocateRequest_releaseList | /**
* Set the <code>releaseList</code> of the request.
* @see AllocateRequest#setReleaseList(List)
* @param releaseList <code>releaseList</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Stable
public AllocateRequestBuilder releaseList(List<ContainerId> releaseList) {
allocateRequest.setReleaseList(releaseList);
return this;
} | 3.68 |
hbase_CompoundConfiguration_add | /**
* Add Hadoop Configuration object to config list. The added configuration overrides the previous
* ones if there are name collisions.
* @param conf configuration object
* @return this, for builder pattern
*/
public CompoundConfiguration add(final Configuration conf) {
freezeMutableConf();
if (conf instanceof CompoundConfiguration) {
this.configs.addAll(0, ((CompoundConfiguration) conf).configs);
return this;
}
// put new config at the front of the list (top priority)
this.configs.add(0, new ImmutableConfWrapper(conf));
return this;
} | 3.68 |
framework_VaadinSession_getUIByEmbedId | /**
* Finds the UI with the corresponding embed id.
*
* @since 7.2
* @param embedId
* the embed id
* @return the UI with the corresponding embed id, or <code>null</code> if
* no UI is found
*
* @see UI#getEmbedId()
*/
public UI getUIByEmbedId(String embedId) {
Integer uiId = embedIdMap.get(embedId);
if (uiId == null) {
return null;
} else {
return getUIById(uiId.intValue());
}
} | 3.68 |
querydsl_GeometryExpression_symDifference | /**
* Returns a geometric object that represents the
* Point set symmetric difference of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return symmetric difference between this and the geometry
*/
public GeometryExpression<Geometry> symDifference(Expression<? extends Geometry> geometry) {
return GeometryExpressions.geometryOperation(SpatialOps.SYMDIFFERENCE, mixin, geometry);
} | 3.68 |
flink_CatalogManager_getCatalogBaseTable | /**
* Retrieves a fully qualified table. If the path is not yet fully qualified use {@link
* #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param objectIdentifier full path of the table to retrieve
* @return resolved table that the path points to or empty if it does not exist.
*/
@Override
public Optional<ResolvedCatalogBaseTable<?>> getCatalogBaseTable(
ObjectIdentifier objectIdentifier) {
ContextResolvedTable resolvedTable = getTable(objectIdentifier).orElse(null);
return resolvedTable == null
? Optional.empty()
: Optional.of(resolvedTable.getResolvedTable());
} | 3.68 |
hadoop_WordStandardDeviation_reduce | /**
* Sums all the individual values within the iterator and writes them to the
* same key.
*
* @param key
* This will be one of 2 constants: LENGTH_STR, COUNT_STR, or
* SQUARE_STR.
* @param values
* This will be an iterator of all the values associated with that
* key.
*/
public void reduce(Text key, Iterable<LongWritable> values, Context context)
throws IOException, InterruptedException {
int sum = 0;
for (LongWritable value : values) {
sum += value.get();
}
val.set(sum);
context.write(key, val);
} | 3.68 |
rocketmq-connect_DorisStreamLoader_basicAuthHeader | /**
* Construct authentication information, the authentication method used by doris here is Basic Auth
*
* @param username
* @param password
* @return
*/
private String basicAuthHeader(String username, String password) {
final String tobeEncode = username + ":" + password;
byte[] encoded = Base64.encodeBase64(tobeEncode.getBytes(StandardCharsets.UTF_8));
return "Basic " + new String(encoded);
} | 3.68 |
hadoop_ShortWritable_set | /**
* Set the value of this ShortWritable.
* @param value input value.
*/
public void set(short value) {
this.value = value;
} | 3.68 |
morf_DeleteStatement_delete | /**
* Constructs a Delete Statement. See class-level documentation for usage instructions.
*
* @param table the database table to delete from.
* @return A builder.
*/
public static DeleteStatementBuilder delete(TableReference table) {
return new DeleteStatementBuilder(table);
} | 3.68 |
framework_Slot_setCaptionPosition | /**
* Set the position of the caption relative to the slot.
*
* @param captionPosition
* The position of the caption
*/
public void setCaptionPosition(CaptionPosition captionPosition) {
if (caption == null) {
return;
}
captionWrap.removeClassName("v-caption-on-"
+ this.captionPosition.name().toLowerCase(Locale.ROOT));
this.captionPosition = captionPosition;
if (captionPosition == CaptionPosition.BOTTOM
|| captionPosition == CaptionPosition.RIGHT) {
captionWrap.appendChild(caption);
} else {
captionWrap.insertFirst(caption);
}
captionWrap.addClassName("v-caption-on-"
+ captionPosition.name().toLowerCase(Locale.ROOT));
} | 3.68 |
hadoop_TreeWalk_getPendingQueue | /**
* @return the Deque containing the pending paths.
*/
protected Deque<TreePath> getPendingQueue() {
return pending;
} | 3.68 |
framework_PropertyFormatter_valueChange | /**
* Listens for changes in the datasource.
*
* This should not be called directly.
*/
@Override
public void valueChange(Property.ValueChangeEvent event) {
fireValueChange();
} | 3.68 |
framework_VAbstractOrderedLayout_updateCaptionOffset | /**
* Update the offset off the caption relative to the slot
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param caption
* The caption element
* @since 7.2
*/
public void updateCaptionOffset(Element caption) {
updateCaptionOffset(DOM.asOld(caption));
} | 3.68 |
hbase_MasterObserver_preListTablesInRSGroup | /**
* Called before listing all tables in the region server group.
* @param ctx the environment to interact with the framework and master
* @param groupName name of the region server group
*/
default void preListTablesInRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String groupName) throws IOException {
} | 3.68 |
framework_ScrollbarBundle_setScrollbarThickness | /**
* Sets the scrollbar's thickness.
* <p>
* If the thickness is set to 0, the scrollbar will be treated as an
* "invisible" scrollbar. This means, the DOM structure will be given a
* non-zero size, but {@link #getScrollbarThickness()} will still return the
* value 0.
*
* @param px
* the scrollbar's thickness in pixels
*/
public final void setScrollbarThickness(double px) {
isInvisibleScrollbar = (px == 0);
if (isInvisibleScrollbar) {
Event.sinkEvents(root, Event.ONSCROLL);
Event.setEventListener(root,
event -> invisibleScrollbarTemporaryResizer.show());
root.getStyle().setVisibility(Visibility.HIDDEN);
} else {
Event.sinkEvents(root, 0);
Event.setEventListener(root, null);
root.getStyle().clearVisibility();
}
internalSetScrollbarThickness(Math.max(1d, px));
} | 3.68 |
dubbo_CollectionUtils_size | /**
* Get the size of the specified {@link Collection}
*
* @param collection the specified {@link Collection}
* @return must be positive number
* @since 2.7.6
*/
public static int size(Collection<?> collection) {
return collection == null ? 0 : collection.size();
} | 3.68 |
flink_ListView_get | /**
* Returns an iterable of the list view.
*
* @throws Exception Thrown if the system cannot get data.
* @return The iterable of the list.
*/
public Iterable<T> get() throws Exception {
return list;
} | 3.68 |
flink_MapValue_entrySet | /*
* (non-Javadoc)
* @see java.util.Map#entrySet()
*/
@Override
public Set<Entry<K, V>> entrySet() {
return this.map.entrySet();
} | 3.68 |
AreaShop_RegionSign_needsPeriodicUpdate | /**
* Check if the sign needs to update periodically.
* @return true if it needs periodic updates, otherwise false
*/
public boolean needsPeriodicUpdate() {
ConfigurationSection signConfig = getProfile();
if(signConfig == null || !signConfig.isSet(getRegion().getState().getValue().toLowerCase())) {
return false;
}
ConfigurationSection stateConfig = signConfig.getConfigurationSection(getRegion().getState().getValue().toLowerCase());
if(stateConfig == null) {
return false;
}
// Check the lines for the timeleft tag
for(int i = 1; i <= 4; i++) {
String line = stateConfig.getString("line" + i);
if(line != null && !line.isEmpty() && line.contains(Message.VARIABLE_START + AreaShop.tagTimeLeft + Message.VARIABLE_END)) {
return true;
}
}
return false;
} | 3.68 |
dubbo_ClusterInterceptor_intercept | /**
* Override this method or {@link #before(AbstractClusterInvoker, Invocation)}
* and {@link #after(AbstractClusterInvoker, Invocation)} methods to add your own logic expected to be
* executed before and after invoke.
*
* @param clusterInvoker
* @param invocation
* @return
* @throws RpcException
*/
default Result intercept(AbstractClusterInvoker<?> clusterInvoker, Invocation invocation) throws RpcException {
return clusterInvoker.invoke(invocation);
} | 3.68 |
pulsar_AbstractHdfsConnector_resetHDFSResources | /*
* Reset Hadoop Configuration and FileSystem based on the supplied configuration resources.
*/
protected HdfsResources resetHDFSResources(HdfsSinkConfig hdfsSinkConfig) throws IOException {
Configuration config = new ExtendedConfiguration();
config.setClassLoader(Thread.currentThread().getContextClassLoader());
getConfig(config, connectorConfig.getHdfsConfigResources());
// first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
checkHdfsUriForTimeout(config);
/* Disable caching of Configuration and FileSystem objects, else we cannot reconfigure
* the processor without a complete restart
*/
String disableCacheName = String.format("fs.%s.impl.disable.cache",
FileSystem.getDefaultUri(config).getScheme());
config.set(disableCacheName, "true");
// If kerberos is enabled, create the file system as the kerberos principal
// -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
FileSystem fs;
UserGroupInformation ugi;
synchronized (RESOURCES_LOCK) {
if (SecurityUtil.isSecurityEnabled(config)) {
ugi = SecurityUtil.loginKerberos(config,
connectorConfig.getKerberosUserPrincipal(), connectorConfig.getKeytab());
fs = getFileSystemAsUser(config, ugi);
} else {
config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
config.set("hadoop.security.authentication", "simple");
ugi = SecurityUtil.loginSimple(config);
fs = getFileSystemAsUser(config, ugi);
}
}
return new HdfsResources(config, fs, ugi);
} | 3.68 |
hudi_CachingPath_getPathWithoutSchemeAndAuthority | /**
* This is {@link Path#getPathWithoutSchemeAndAuthority(Path)} counterpart, instantiating
* {@link CachingPath}
*/
public static Path getPathWithoutSchemeAndAuthority(Path path) {
// This code depends on Path.toString() to remove the leading slash before
// the drive specification on Windows.
return path.isUriPathAbsolute()
? createRelativePathUnsafe(path.toUri().getPath())
: path;
} | 3.68 |
hbase_MultiByteBuff_slice | /**
* Returns an MBB which is a sliced version of this MBB. The position, limit and mark of the new
* MBB will be independent than that of the original MBB. The content of the new MBB will start at
* this MBB's current position
* @return a sliced MBB
*/
@Override
public MultiByteBuff slice() {
checkRefCount();
ByteBuffer[] copy = new ByteBuffer[this.limitedItemIndex - this.curItemIndex + 1];
for (int i = curItemIndex, j = 0; i <= this.limitedItemIndex; i++, j++) {
copy[j] = this.items[i].slice();
}
return new MultiByteBuff(refCnt, copy);
} | 3.68 |
flink_LogicalType_asSummaryString | /**
* Returns a string that summarizes this type for printing to a console. An implementation might
* shorten long names or skips very specific properties.
*
* <p>Use {@link #asSerializableString()} for a type string that fully serializes this instance.
*
* @return summary string of this type for debugging purposes
*/
public String asSummaryString() {
return asSerializableString();
} | 3.68 |
flink_MemoryMappedBoundedData_alignSize | /**
* Rounds the size down to the next multiple of the {@link #PAGE_SIZE}. We need to round down
* here to not exceed the original maximum size value. Otherwise, values like INT_MAX would
* round up to overflow the valid maximum size of a memory mapping region in Java.
*/
private static int alignSize(int maxRegionSize) {
checkArgument(maxRegionSize >= PAGE_SIZE);
return maxRegionSize - (maxRegionSize % PAGE_SIZE);
} | 3.68 |
hadoop_ServiceLauncher_bindCommandOptions | /**
* Set the {@link #commandOptions} field to the result of
* {@link #createOptions()}; protected for subclasses and test access.
*/
protected void bindCommandOptions() {
commandOptions = createOptions();
} | 3.68 |
morf_DatabaseMetaDataProvider_loadAllColumns | /**
* Creates a map of maps of all table columns,
* first indexed by their case-agnostic table names,
* and then indexed by their case-agnostic column names.
*
* @return Map of table columns by table names and column names.
*/
protected Map<AName, Map<AName, ColumnBuilder>> loadAllColumns() {
final Map<AName, ImmutableMap.Builder<AName, ColumnBuilder>> columnMappingBuilders = Maps.toMap(tableNames.get().keySet(), k -> ImmutableMap.builder());
try {
final DatabaseMetaData databaseMetaData = connection.getMetaData();
try (ResultSet columnResultSet = databaseMetaData.getColumns(null, schemaName, null, null)) {
while (columnResultSet.next()) {
String tableName = columnResultSet.getString(COLUMN_TABLE_NAME);
RealName realTableName = tableNames.get().get(named(tableName));
if (realTableName == null) {
continue; // ignore columns of unknown tables
}
RealName columnName = readColumnName(columnResultSet);
try {
String typeName = columnResultSet.getString(COLUMN_TYPE_NAME);
int typeCode = columnResultSet.getInt(COLUMN_DATA_TYPE);
int width = columnResultSet.getInt(COLUMN_SIZE);
int scale = columnResultSet.getInt(COLUMN_DECIMAL_DIGITS);
try {
DataType dataType = dataTypeFromSqlType(typeCode, typeName, width);
ColumnBuilder column = SchemaUtils.column(columnName.getRealName(), dataType, width, scale);
column = setColumnNullability(realTableName, column, columnResultSet);
column = setColumnAutonumbered(realTableName, column, columnResultSet);
column = setColumnDefaultValue(realTableName, column, columnResultSet);
column = setAdditionalColumnMetadata(realTableName, column, columnResultSet);
if (log.isDebugEnabled()) {
log.debug("Found column [" + column + "] on table [" + tableName + "]: " + column);
}
columnMappingBuilders.get(realTableName).put(columnName, column);
}
catch (UnexpectedDataTypeException e) {
ColumnBuilder column = new UnsupportedDataTypeColumn(columnName, typeName, typeCode, width, scale, columnResultSet);
if (log.isDebugEnabled()) {
log.debug("Found unsupported column [" + column + "] on table [" + tableName + "]: " + column);
}
columnMappingBuilders.get(realTableName).put(columnName, column);
}
}
catch (SQLException e) {
throw new RuntimeSqlException("Error reading metadata for column ["+columnName+"] on table ["+tableName+"]", e);
}
}
// Maps.transformValues creates a view over the given map of builders
// Therefore we need to make a copy to avoid building the builders repeatedly
return ImmutableMap.copyOf(Maps.transformValues(columnMappingBuilders, ImmutableMap.Builder::build));
}
}
catch (SQLException e) {
throw new RuntimeSqlException(e);
}
} | 3.68 |
hbase_CachedEntryQueue_pollLast | /** Returns The last element in this queue, or {@code null} if the queue is empty. */
public Map.Entry<BlockCacheKey, BucketEntry> pollLast() {
return queue.pollLast();
} | 3.68 |
hadoop_LoadedManifestData_getEntrySequenceFile | /**
* Get the entry sequence data as a file.
*/
public File getEntrySequenceFile() {
return new File(entrySequenceData.toUri());
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_registerComplete | /**
* Register complete.
*
* @param shardingItems to be registered sharding items
*/
public void registerComplete(final Collection<Integer> shardingItems) {
for (int each : shardingItems) {
jobNodeStorage.createJobNodeIfNeeded(GuaranteeNode.getCompletedNode(each));
}
} | 3.68 |
flink_DeltaIteration_getName | /**
* Gets the name from this iteration.
*
* @return The name of the iteration.
*/
public String getName() {
return name;
} | 3.68 |
querydsl_MetaDataExporter_setInnerClassesForKeys | /**
* Set whether inner classes should be created for keys
*
* @param innerClassesForKeys
*/
public void setInnerClassesForKeys(boolean innerClassesForKeys) {
module.bind(SQLCodegenModule.INNER_CLASSES_FOR_KEYS, innerClassesForKeys);
} | 3.68 |
AreaShop_Utils_getImportantBuyRegions | /**
* Get the most important buy AreaShop regions.
* - Returns highest priority, child instead of parent regions.
* @param location The location to check for regions
* @return empty list if no regions found, 1 member if 1 region is a priority, more if regions with the same priority
*/
public static List<BuyRegion> getImportantBuyRegions(Location location) {
List<BuyRegion> result = new ArrayList<>();
for(GeneralRegion region : getImportantRegions(location, GeneralRegion.RegionType.BUY)) {
result.add((BuyRegion)region);
}
return result;
} | 3.68 |
framework_VEmbedded_getParameters | /**
* Returns a map (name -> value) of all parameters in the UIDL.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param uidl
* the UIDL to map
* @return the parameter map
*/
public static Map<String, String> getParameters(UIDL uidl) {
Map<String, String> parameters = new HashMap<>();
for (Object child : uidl) {
if (child instanceof UIDL) {
UIDL childUIDL = (UIDL) child;
if (childUIDL.getTag().equals("embeddedparam")) {
String name = childUIDL.getStringAttribute("name");
String value = childUIDL.getStringAttribute("value");
parameters.put(name, value);
}
}
}
return parameters;
} | 3.68 |
framework_StateChangeEvent_getChangedProperties | /**
* Gets the properties that have changed.
*
* @return a set of names of the changed properties
*
* @deprecated As of 7.0.1, use {@link #hasPropertyChanged(String)} instead
* for improved performance.
*/
@Deprecated
public Set<String> getChangedProperties() {
if (changedPropertiesSet == null) {
Profiler.enter("StateChangeEvent.getChangedProperties populate");
changedPropertiesSet = new HashSet<>();
getChangedPropertiesFastSet().addAllTo(changedPropertiesSet);
Profiler.leave("StateChangeEvent.getChangedProperties populate");
}
return changedPropertiesSet;
} | 3.68 |
flink_BufferCompressor_compressToIntermediateBuffer | /**
* Compresses the given {@link Buffer} using {@link BlockCompressor}. The compressed data will
* be stored in the intermediate buffer of this {@link BufferCompressor} and returned to the
* caller. The caller must guarantee that the returned {@link Buffer} has been freed when
* calling the method next time.
*
* <p>Notes that the compression will always start from offset 0 to the size of the input {@link
* Buffer}.
*/
public Buffer compressToIntermediateBuffer(Buffer buffer) {
int compressedLen;
if ((compressedLen = compress(buffer)) == 0) {
return buffer;
}
internalBuffer.setCompressed(true);
internalBuffer.setSize(compressedLen);
return internalBuffer.retainBuffer();
} | 3.68 |
hbase_MetaFixer_calculateMerges | /**
* Run through <code>overlaps</code> and return a list of merges to run. Presumes overlaps are
* ordered (which they are coming out of the CatalogJanitor consistency report).
* @param maxMergeCount Maximum regions to merge at a time (avoid merging 100k regions in one go!)
*/
static List<SortedSet<RegionInfo>> calculateMerges(int maxMergeCount,
List<Pair<RegionInfo, RegionInfo>> overlaps) {
if (overlaps.isEmpty()) {
LOG.debug("No overlaps.");
return Collections.emptyList();
}
List<SortedSet<RegionInfo>> merges = new ArrayList<>();
// First group overlaps by table then calculate merge table by table.
ListMultimap<TableName, Pair<RegionInfo, RegionInfo>> overlapGroups =
ArrayListMultimap.create();
for (Pair<RegionInfo, RegionInfo> pair : overlaps) {
overlapGroups.put(pair.getFirst().getTable(), pair);
}
for (Map.Entry<TableName, Collection<Pair<RegionInfo, RegionInfo>>> entry : overlapGroups
.asMap().entrySet()) {
calculateTableMerges(maxMergeCount, merges, entry.getValue());
}
return merges;
} | 3.68 |
hbase_SnapshotDescriptionUtils_getWorkingSnapshotDir | /**
* Get the directory to build a snapshot, before it is finalized
* @param snapshotName name of the snapshot
* @param rootDir root directory of the hbase installation
* @param conf Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
Configuration conf) {
return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
} | 3.68 |
hbase_ScannerContext_hasAnyLimit | /** Returns true if any limit can be enforced within the checker's scope */
boolean hasAnyLimit(LimitScope checkerScope) {
return hasBatchLimit(checkerScope) || hasSizeLimit(checkerScope) || hasTimeLimit(checkerScope);
} | 3.68 |
framework_LayoutEvents_getChildComponent | /**
* Returns the direct child component of the layout which contains the
* clicked component.
*
* For the clicked component inside that child component of the layout,
* see {@link #getClickedComponent()}.
*
* @return direct child {@link Component} of the layout which contains
* the clicked Component, null if none found
*/
public Component getChildComponent() {
return childComponent;
} | 3.68 |
flink_PartitionRequestClientFactory_createPartitionRequestClient | /**
* Atomically establishes a TCP connection to the given remote address and creates a {@link
* NettyPartitionRequestClient} instance for this connection.
*/
NettyPartitionRequestClient createPartitionRequestClient(ConnectionID connectionId)
throws IOException, InterruptedException {
// We map the input ConnectionID to a new value to restrict the number of tcp connections
connectionId =
new ConnectionID(
connectionId.getResourceID(),
connectionId.getAddress(),
connectionId.getConnectionIndex() % maxNumberOfConnections);
while (true) {
final CompletableFuture<NettyPartitionRequestClient> newClientFuture =
new CompletableFuture<>();
final CompletableFuture<NettyPartitionRequestClient> clientFuture =
clients.putIfAbsent(connectionId, newClientFuture);
final NettyPartitionRequestClient client;
if (clientFuture == null) {
try {
client = connectWithRetries(connectionId);
} catch (Throwable e) {
newClientFuture.completeExceptionally(
new IOException("Could not create Netty client.", e));
clients.remove(connectionId, newClientFuture);
throw e;
}
newClientFuture.complete(client);
} else {
try {
client = clientFuture.get();
} catch (ExecutionException e) {
ExceptionUtils.rethrowIOException(ExceptionUtils.stripExecutionException(e));
return null;
}
}
// Make sure to increment the reference count before handing a client
// out to ensure correct bookkeeping for channel closing.
if (client.validateClientAndIncrementReferenceCounter()) {
return client;
} else if (client.canBeDisposed()) {
client.closeConnection();
} else {
destroyPartitionRequestClient(connectionId, client);
}
}
} | 3.68 |
AreaShop_GeneralRegion_getLastActiveTime | /**
* Get the time that the player was last active.
* @return Current time if he is online, last online time if offline, -1 if the region has no owner
*/
public long getLastActiveTime() {
if(getOwner() == null) {
return -1;
}
Player player = Bukkit.getPlayer(getOwner());
long savedTime = getLongSetting("general.lastActive");
// Check if he is online currently
if(player != null || savedTime == 0) {
return Calendar.getInstance().getTimeInMillis();
}
return savedTime;
} | 3.68 |
framework_ApplicationConnection_getLoadingIndicator | /**
* Returns the loading indicator used by this ApplicationConnection.
*
* @return The loading indicator for this ApplicationConnection
*/
public VLoadingIndicator getLoadingIndicator() {
return loadingIndicator;
} | 3.68 |
flink_CheckpointOptions_getCheckpointType | /** Returns the type of checkpoint to perform. */
public SnapshotType getCheckpointType() {
return checkpointType;
} | 3.68 |
dubbo_ServiceConfig_findConfiguredPort | /**
* Register port and bind port for the provider, can be configured separately
* Configuration priority: environment variable -> java system properties -> port property in protocol config file
* -> protocol default port
*
* @param protocolConfig
* @param name
* @return
*/
private static synchronized Integer findConfiguredPort(
ProtocolConfig protocolConfig,
ProviderConfig provider,
ExtensionLoader<Protocol> extensionLoader,
String name,
Map<String, String> map) {
Integer portToBind;
// parse bind port from environment
String port = getValueFromConfig(protocolConfig, DUBBO_PORT_TO_BIND);
portToBind = parsePort(port);
// if there's no bind port found from environment, keep looking up.
if (portToBind == null) {
portToBind = protocolConfig.getPort();
if (provider != null && (portToBind == null || portToBind == 0)) {
portToBind = provider.getPort();
}
final int defaultPort = extensionLoader.getExtension(name).getDefaultPort();
if (portToBind == null || portToBind == 0) {
portToBind = defaultPort;
}
if (portToBind <= 0) {
portToBind = getRandomPort(name);
if (portToBind == null || portToBind < 0) {
portToBind = getAvailablePort(defaultPort);
putRandomPort(name, portToBind);
}
}
}
// save bind port, used as url's key later
map.put(BIND_PORT_KEY, String.valueOf(portToBind));
// bind port is not used as registry port by default
String portToRegistryStr = getValueFromConfig(protocolConfig, DUBBO_PORT_TO_REGISTRY);
Integer portToRegistry = parsePort(portToRegistryStr);
if (portToRegistry == null) {
portToRegistry = portToBind;
}
return portToRegistry;
} | 3.68 |
flink_ChannelReaderInputView_nextSegment | /**
* Gets the next segment from the asynchronous block reader. If more requests are to be issued,
* the method first sends a new request with the current memory segment. If no more requests are
* pending, the method adds the segment to the readers return queue, which thereby effectively
* collects all memory segments. Secondly, the method fetches the next non-consumed segment
* returned by the reader. If no further segments are available, this method thrown an {@link
* EOFException}.
*
* @param current The memory segment used for the next request.
* @return The memory segment to read from next.
* @throws EOFException Thrown, if no further segments are available.
* @throws IOException Thrown, if an I/O error occurred while reading
* @see AbstractPagedInputView#nextSegment(org.apache.flink.core.memory.MemorySegment)
*/
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
// check if we are at our end
if (this.inLastBlock) {
throw new EOFException();
}
// send a request first. if we have only a single segment, this same segment will be the one
// obtained in
// the next lines
if (current != null) {
sendReadRequest(current);
}
// get the next segment
final MemorySegment seg = this.reader.getNextReturnedBlock();
// check the header
if (seg.getShort(0) != ChannelWriterOutputView.HEADER_MAGIC_NUMBER) {
throw new IOException(
"The current block does not belong to a ChannelWriterOutputView / "
+ "ChannelReaderInputView: Wrong magic number.");
}
if ((seg.getShort(ChannelWriterOutputView.HEADER_FLAGS_OFFSET)
& ChannelWriterOutputView.FLAG_LAST_BLOCK)
!= 0) {
// last block
this.numRequestsRemaining = 0;
this.inLastBlock = true;
}
return seg;
} | 3.68 |
hbase_RegionStates_regionNamesToString | // ==========================================================================
// ToString helpers
// ==========================================================================
public static String regionNamesToString(final Collection<byte[]> regions) {
final StringBuilder sb = new StringBuilder();
final Iterator<byte[]> it = regions.iterator();
sb.append("[");
if (it.hasNext()) {
sb.append(Bytes.toStringBinary(it.next()));
while (it.hasNext()) {
sb.append(", ");
sb.append(Bytes.toStringBinary(it.next()));
}
}
sb.append("]");
return sb.toString();
} | 3.68 |
flink_SkipListUtils_putNextIndexNode | /**
* Puts next key pointer on the given index level to key space.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
* @param level level of index.
* @param nextKeyPointer next key pointer on the given level.
*/
public static void putNextIndexNode(
MemorySegment memorySegment, int offset, int level, long nextKeyPointer) {
memorySegment.putLong(offset + INDEX_NEXT_OFFSET_BY_LEVEL_ARRAY[level], nextKeyPointer);
} | 3.68 |
pulsar_Transactions_getTransactionBufferStatsAsync | /**
* Get transaction buffer stats.
*
* @param topic the topic of getting transaction buffer stats
* @return the future stats of transaction buffer in topic.
*/
default CompletableFuture<TransactionBufferStats> getTransactionBufferStatsAsync(String topic) {
return getTransactionBufferStatsAsync(topic, false, false);
} | 3.68 |
hadoop_SaslParticipant_createStreamPair | /**
* Return some input/output streams that may henceforth have their
* communication encrypted, depending on the negotiated quality of protection.
*
* @param out output stream to wrap
* @param in input stream to wrap
* @return IOStreamPair wrapping the streams
*/
public IOStreamPair createStreamPair(DataOutputStream out,
DataInputStream in) {
if (saslClient != null) {
return new IOStreamPair(
new SaslInputStream(in, saslClient),
new SaslOutputStream(out, saslClient));
} else {
return new IOStreamPair(
new SaslInputStream(in, saslServer),
new SaslOutputStream(out, saslServer));
}
} | 3.68 |
flink_DataStreamSink_setUidHash | /**
* Sets an user provided hash for this operator. This will be used AS IS the create the
* JobVertexID.
*
* <p>The user provided hash is an alternative to the generated hashes, that is considered when
* identifying an operator through the default hash mechanics fails (e.g. because of changes
* between Flink versions).
*
* <p><strong>Important</strong>: this should be used as a workaround or for trouble shooting.
* The provided hash needs to be unique per transformation and job. Otherwise, job submission
* will fail. Furthermore, you cannot assign user-specified hash to intermediate nodes in an
* operator chain and trying so will let your job fail.
*
* <p>A use case for this is in migration between Flink versions or changing the jobs in a way
* that changes the automatically generated hashes. In this case, providing the previous hashes
* directly through this method (e.g. obtained from old logs) can help to reestablish a lost
* mapping from states to their target operator.
*
* @param uidHash The user provided hash for this operator. This will become the JobVertexID,
* which is shown in the logs and web ui.
* @return The operator with the user provided hash.
*/
@PublicEvolving
public DataStreamSink<T> setUidHash(String uidHash) {
if (!(transformation instanceof LegacySinkTransformation)) {
throw new UnsupportedOperationException(
"Cannot set a custom UID hash on a non-legacy sink");
}
transformation.setUidHash(uidHash);
return this;
} | 3.68 |
hadoop_CalculationContext_getCurrentMinimumCapacityEntry | /**
* A shorthand to return the minimum capacity vector entry for the currently evaluated child and
* resource name.
*
* @param label node label
* @return capacity vector entry
*/
public QueueCapacityVectorEntry getCurrentMinimumCapacityEntry(String label) {
return queue.getConfiguredCapacityVector(label).getResource(resourceName);
} | 3.68 |
framework_RpcDataProviderExtension_getValueChangeListeners | /**
* Gets a collection copy of currently active ValueChangeListeners.
*
* @return collection of value change listeners
*/
public Collection<GridValueChangeListener> getValueChangeListeners() {
return new HashSet<GridValueChangeListener>(activeItemMap.values());
} | 3.68 |
hbase_LeaseManager_cancelLease | /**
* Client explicitly cancels a lease.
* @param leaseName name of lease
*/
public void cancelLease(final String leaseName) throws LeaseException {
removeLease(leaseName);
} | 3.68 |
pulsar_PulsarLedgerUnderreplicationManager_isLedgerBeingReplicated | /**
* Check whether the ledger is being replicated by any bookie.
*/
@Override
public boolean isLedgerBeingReplicated(long ledgerId) throws ReplicationException {
try {
return store.exists(getUrLedgerLockPath(urLockPath, ledgerId)).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
} catch (Exception e) {
throw new ReplicationException.UnavailableException("Failed to check if ledger is beinge replicated", e);
}
} | 3.68 |
flink_FsStateBackend_getBasePath | /**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
* @deprecated Deprecated in favor of {@link #getCheckpointPath()}.
*/
@Deprecated
public Path getBasePath() {
return getCheckpointPath();
} | 3.68 |
hadoop_AbfsOutputStream_shrinkWriteOperationQueue | /**
* Try to remove the completed write operations from the beginning of write
* operation FIFO queue.
*/
private synchronized void shrinkWriteOperationQueue() throws IOException {
try {
WriteOperation peek = writeOperations.peek();
while (peek != null && peek.task.isDone()) {
peek.task.get();
lastTotalAppendOffset += peek.length;
writeOperations.remove();
peek = writeOperations.peek();
// Incrementing statistics to indicate queue has been shrunk.
outputStreamStatistics.queueShrunk();
}
} catch (Exception e) {
if (e.getCause() instanceof AzureBlobFileSystemException) {
lastError = (AzureBlobFileSystemException) e.getCause();
} else {
lastError = new IOException(e);
}
throw lastError;
}
} | 3.68 |
hudi_WriteMarkers_getMarkerPath | /**
* Returns the marker path. Would create the partition path first if not exists
*
* @param partitionPath The partition path
* @param fileName The file name
* @param type The IO type
* @return path of the marker file
*/
protected Path getMarkerPath(String partitionPath, String fileName, IOType type) {
Path path = FSUtils.getPartitionPath(markerDirPath, partitionPath);
String markerFileName = getMarkerFileName(fileName, type);
return new Path(path, markerFileName);
} | 3.68 |
framework_Design_getComponentFactory | /**
* Gets the currently used component factory.
*
* @see #setComponentFactory(ComponentFactory)
*
* @return the component factory
*
* @since 7.4.1
*/
public static ComponentFactory getComponentFactory() {
return componentFactory;
} | 3.68 |
hbase_StoreFileScanner_requestSeek | /**
* Pretend we have done a seek but don't do it yet, if possible. The hope is that we find
* requested columns in more recent files and won't have to seek in older files. Creates a fake
* key/value with the given row/column and the highest (most recent) possible timestamp we might
* get from this file. When users of such "lazy scanner" need to know the next KV precisely (e.g.
* when this scanner is at the top of the heap), they run {@link #enforceSeek()}.
* <p>
* Note that this function does guarantee that the current KV of this scanner will be advanced to
* at least the given KV. Because of this, it does have to do a real seek in cases when the seek
* timestamp is older than the highest timestamp of the file, e.g. when we are trying to seek to
* the next row/column and use OLDEST_TIMESTAMP in the seek key.
*/
@Override
public boolean requestSeek(Cell kv, boolean forward, boolean useBloom) throws IOException {
if (kv.getFamilyLength() == 0) {
useBloom = false;
}
boolean haveToSeek = true;
if (useBloom) {
// check ROWCOL Bloom filter first.
if (reader.getBloomFilterType() == BloomType.ROWCOL) {
haveToSeek = reader.passesGeneralRowColBloomFilter(kv);
} else if (
canOptimizeForNonNullColumn
&& ((PrivateCellUtil.isDeleteFamily(kv) || PrivateCellUtil.isDeleteFamilyVersion(kv)))
) {
// if there is no such delete family kv in the store file,
// then no need to seek.
haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), kv.getRowOffset(),
kv.getRowLength());
}
}
delayedReseek = forward;
delayedSeekKV = kv;
if (haveToSeek) {
// This row/column might be in this store file (or we did not use the
// Bloom filter), so we still need to seek.
realSeekDone = false;
long maxTimestampInFile = reader.getMaxTimestamp();
long seekTimestamp = kv.getTimestamp();
if (seekTimestamp > maxTimestampInFile) {
// Create a fake key that is not greater than the real next key.
// (Lower timestamps correspond to higher KVs.)
// To understand this better, consider that we are asked to seek to
// a higher timestamp than the max timestamp in this file. We know that
// the next point when we have to consider this file again is when we
// pass the max timestamp of this file (with the same row/column).
setCurrentCell(PrivateCellUtil.createFirstOnRowColTS(kv, maxTimestampInFile));
} else {
// This will be the case e.g. when we need to seek to the next
// row/column, and we don't know exactly what they are, so we set the
// seek key's timestamp to OLDEST_TIMESTAMP to skip the rest of this
// row/column.
enforceSeek();
}
return cur != null;
}
// Multi-column Bloom filter optimization.
// Create a fake key/value, so that this scanner only bubbles up to the top
// of the KeyValueHeap in StoreScanner after we scanned this row/column in
// all other store files. The query matcher will then just skip this fake
// key/value and the store scanner will progress to the next column. This
// is obviously not a "real real" seek, but unlike the fake KV earlier in
// this method, we want this to be propagated to ScanQueryMatcher.
setCurrentCell(PrivateCellUtil.createLastOnRowCol(kv));
realSeekDone = true;
return true;
} | 3.68 |
hadoop_OBSCommonUtils_createListObjectsRequest | /**
* Create a {@code ListObjectsRequest} request against this bucket.
*
* @param owner the owner OBSFileSystem instance
* @param key key for request
* @param delimiter any delimiter
* @return the request
*/
static ListObjectsRequest createListObjectsRequest(
final OBSFileSystem owner, final String key, final String delimiter) {
return createListObjectsRequest(owner, key, delimiter, -1);
} | 3.68 |
pulsar_TopicPoliciesService_getTopicPoliciesAsyncWithRetry | /**
* When getting TopicPolicies, if the initialization has not been completed,
* we will go back off and try again until time out.
* @param topicName topic name
* @param backoff back off policy
* @param isGlobal is global policies
* @return CompletableFuture<Optional<TopicPolicies>>
*/
default CompletableFuture<Optional<TopicPolicies>> getTopicPoliciesAsyncWithRetry(TopicName topicName,
final Backoff backoff, ScheduledExecutorService scheduledExecutorService, boolean isGlobal) {
CompletableFuture<Optional<TopicPolicies>> response = new CompletableFuture<>();
Backoff usedBackoff = backoff == null ? new BackoffBuilder()
.setInitialTime(500, TimeUnit.MILLISECONDS)
.setMandatoryStop(DEFAULT_GET_TOPIC_POLICY_TIMEOUT, TimeUnit.MILLISECONDS)
.setMax(DEFAULT_GET_TOPIC_POLICY_TIMEOUT, TimeUnit.MILLISECONDS)
.create() : backoff;
try {
RetryUtil.retryAsynchronously(() -> {
CompletableFuture<Optional<TopicPolicies>> future = new CompletableFuture<>();
try {
future.complete(Optional.ofNullable(getTopicPolicies(topicName, isGlobal)));
} catch (BrokerServiceException.TopicPoliciesCacheNotInitException exception) {
future.completeExceptionally(exception);
}
return future;
}, usedBackoff, scheduledExecutorService, response);
} catch (Exception e) {
response.completeExceptionally(e);
}
return response;
} | 3.68 |
hadoop_StoreContext_fullKey | /**
* Build the full S3 key for a request from the status entry,
* possibly adding a "/" if it represents directory and it does
* not have a trailing slash already.
* @param stat status to build the key from
* @return a key for a delete request
*/
public String fullKey(final S3AFileStatus stat) {
String k = pathToKey(stat.getPath());
return (stat.isDirectory() && !k.endsWith("/"))
? k + "/"
: k;
} | 3.68 |
framework_PureGWTTestApplication_hasChildMenu | /**
* Tests for the existence of a child menu by title at this level of the
* menu hierarchy
*
* @param title
* a title string
* @return true, if this menu has a direct child menu with the specified
* title, otherwise false
*/
public boolean hasChildMenu(String title) {
return getChildMenu(title) != null;
} | 3.68 |
framework_Slot_remove | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.SimplePanel#remove(com.google.gwt.user
* .client.ui.Widget)
*/
@Override
public boolean remove(Widget w) {
detachListeners();
return super.remove(w);
} | 3.68 |
querydsl_ComparableExpressionBase_asc | /**
* Create an OrderSpecifier for ascending order of this expression
*
* @return ascending order by this
*/
public OrderSpecifier<T> asc() {
if (asc == null) {
asc = new OrderSpecifier<T>(Order.ASC, mixin);
}
return asc;
} | 3.68 |
hadoop_ReservationClientUtil_createMRReservation | /**
* Creates a request that envelopes a MR jobs, picking max number of maps and
* reducers, max durations, and max resources per container.
*
* @param reservationId the id of the reservation
* @param name the name of a reservation
* @param maxMapRes maximum resources used by any mapper
* @param numberMaps number of mappers
* @param maxMapDur maximum duration of any mapper
* @param maxRedRes maximum resources used by any reducer
* @param numberReduces number of reducers
* @param maxRedDur maximum duration of any reducer
* @param arrival start time of valid range for reservation
* @param deadline deadline for this reservation
* @param queueName queue to submit to
* @return a submission request
*/
@SuppressWarnings("checkstyle:parameternumber")
public static ReservationSubmissionRequest createMRReservation(
ReservationId reservationId, String name, Resource maxMapRes,
int numberMaps, long maxMapDur, Resource maxRedRes, int numberReduces,
long maxRedDur, long arrival, long deadline, String queueName) {
ReservationRequest mapRR = ReservationRequest.newInstance(maxMapRes,
numberMaps, numberMaps, maxMapDur);
ReservationRequest redRR = ReservationRequest.newInstance(maxRedRes,
numberReduces, numberReduces, maxRedDur);
List<ReservationRequest> listResReq = new ArrayList<ReservationRequest>();
listResReq.add(mapRR);
listResReq.add(redRR);
ReservationRequests reservationRequests = ReservationRequests
.newInstance(listResReq, ReservationRequestInterpreter.R_ORDER_NO_GAP);
ReservationDefinition resDef = ReservationDefinition.newInstance(arrival,
deadline, reservationRequests, name);
// outermost request
return ReservationSubmissionRequest
.newInstance(resDef, queueName, reservationId);
} | 3.68 |
framework_AbstractSplitPanelElement_getContainedComponent | /**
* Gets a component of a split panel and wraps it in the given class.
*
* @param clazz
* Components element class
* @param byContainer
* A locator that specifies the container (first or second) whose
* component is looked for
* @return A component wrapped in the given class
*/
private <T extends AbstractElement> T getContainedComponent(Class<T> clazz,
org.openqa.selenium.By byContainer) {
List<AbstractComponentElement> containedComponents = $$(
AbstractComponentElement.class).all();
List<WebElement> componentsInSelectedContainer = findElements(
byContainer);
for (AbstractComponentElement component : containedComponents) {
WebElement elem = component.getWrappedElement();
if (componentsInSelectedContainer.contains(elem)) {
return TestBench.createElement(clazz, elem,
getCommandExecutor());
}
}
return null;
} | 3.68 |
flink_Costs_subtractCosts | /**
* Subtracts the given costs from these costs. If the given costs are unknown, then these costs
* are remain unchanged.
*
* @param other The costs to subtract.
*/
public void subtractCosts(Costs other) {
if (this.networkCost != UNKNOWN && other.networkCost != UNKNOWN) {
this.networkCost -= other.networkCost;
if (this.networkCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
}
if (this.diskCost != UNKNOWN && other.diskCost != UNKNOWN) {
this.diskCost -= other.diskCost;
if (this.diskCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
}
if (this.cpuCost != UNKNOWN && other.cpuCost != UNKNOWN) {
this.cpuCost -= other.cpuCost;
if (this.cpuCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
}
// ---------- relative costs ----------
this.heuristicNetworkCost -= other.heuristicNetworkCost;
if (this.heuristicNetworkCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
this.heuristicDiskCost -= other.heuristicDiskCost;
if (this.heuristicDiskCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
this.heuristicCpuCost -= other.heuristicCpuCost;
if (this.heuristicCpuCost < 0) {
throw new IllegalArgumentException("Cannot subtract more cost then there is.");
}
} | 3.68 |
graphhopper_AbstractAverageSpeedParser_getMaxSpeed | /**
* @return {@link Double#NaN} if no maxspeed found
*/
public static double getMaxSpeed(ReaderWay way, boolean bwd) {
double maxSpeed = OSMValueExtractor.stringToKmh(way.getTag("maxspeed"));
double directedMaxSpeed = OSMValueExtractor.stringToKmh(way.getTag(bwd ? "maxspeed:backward" : "maxspeed:forward"));
return isValidSpeed(directedMaxSpeed) ? directedMaxSpeed : maxSpeed;
} | 3.68 |
morf_AliasedField_negated | /**
* @return The value, negated, with the original implied name.
*/
public AliasedField negated() {
return SqlUtils.literal(0).minus(this).as(getImpliedName());
} | 3.68 |
hbase_StorageClusterStatusModel_getMemStoreSizeMB | /** Returns memstore size, in MB */
@XmlAttribute
public int getMemStoreSizeMB() {
return memstoreSizeMB;
} | 3.68 |
dubbo_URLParam_getMethodParameterStrict | /**
* Get method related parameter. If not contains, return null.
* Specially, in some situation like `method1.1.callback=true`, key is `1.callback`.
*
* @param method method name
* @param key key
* @return value
*/
public String getMethodParameterStrict(String method, String key) {
String methodsString = getParameter(METHODS_KEY);
if (StringUtils.isNotEmpty(methodsString)) {
if (!methodsString.contains(method)) {
return null;
}
}
Map<String, String> methodMap = METHOD_PARAMETERS.get(key);
if (CollectionUtils.isNotEmptyMap(methodMap)) {
return methodMap.get(method);
} else {
return null;
}
} | 3.68 |
hudi_CompactionUtil_inferMetadataConf | /**
* Infers the metadata config based on the existence of metadata folder.
*
* <p>We can improve the code if the metadata config is set up as table config.
*
* @param conf The configuration
* @param metaClient The meta client
*/
public static void inferMetadataConf(Configuration conf, HoodieTableMetaClient metaClient) {
String path = HoodieTableMetadata.getMetadataTableBasePath(conf.getString(FlinkOptions.PATH));
if (!StreamerUtil.tableExists(path, metaClient.getHadoopConf())) {
conf.setBoolean(FlinkOptions.METADATA_ENABLED, false);
}
} | 3.68 |
hadoop_ManifestStoreOperations_isFile | /**
* Is a path a file? Used during directory creation.
* The is a copy & paste of FileSystem.isFile();
* {@code StoreOperationsThroughFileSystem} calls into
* the FS direct so that stores which optimize their probes
* can save on IO.
* @param path path to probe
* @return true if the path exists and resolves to a file
* @throws IOException failure other than FileNotFoundException
*/
public boolean isFile(Path path) throws IOException {
try {
return getFileStatus(path).isFile();
} catch (FileNotFoundException e) {
return false;
}
} | 3.68 |
druid_IPRange_getIPAddress | /**
* Return the encapsulated IP address.
*
* @return The IP address.
*/
public final IPAddress getIPAddress() {
return ipAddress;
} | 3.68 |
hadoop_CsiGrpcClient_createIdentityBlockingStub | /**
* Creates a blocking stub for CSI identity plugin on the given channel.
* @return the blocking stub
*/
public IdentityGrpc.IdentityBlockingStub createIdentityBlockingStub() {
return IdentityGrpc.newBlockingStub(channel);
} | 3.68 |
flink_DataSetUtils_partitionByRange | /** Range-partitions a DataSet using the specified key selector function. */
public static <T, K extends Comparable<K>> PartitionOperator<T> partitionByRange(
DataSet<T> input, DataDistribution distribution, KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keyExtractor, input.getType());
return new PartitionOperator<>(
input,
PartitionOperatorBase.PartitionMethod.RANGE,
new Keys.SelectorFunctionKeys<>(
input.clean(keyExtractor), input.getType(), keyType),
distribution,
Utils.getCallLocationName());
} | 3.68 |
hadoop_ApplicationInitializationContext_getApplicationDataForService | /**
* Get the data sent to the NodeManager via
* {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
* as part of {@link ContainerLaunchContext#getServiceData()}
*
* @return the servicesData for this application.
*/
public ByteBuffer getApplicationDataForService() {
return this.appDataForService;
} | 3.68 |
hadoop_WeakReferenceThreadMap_getForCurrentThread | /**
* Get the value for the current thread, creating if needed.
* @return an instance.
*/
public V getForCurrentThread() {
return get(currentThreadId());
} | 3.68 |
flink_PartitionWriterFactory_get | /** Util for get a {@link PartitionWriterFactory}. */
static <T> PartitionWriterFactory<T> get(
boolean dynamicPartition,
boolean grouped,
LinkedHashMap<String, String> staticPartitions) {
if (dynamicPartition) {
return grouped ? GroupedPartitionWriter::new : DynamicPartitionWriter::new;
} else {
return (PartitionWriterFactory<T>)
(context, manager, computer, writerListener) ->
new SingleDirectoryWriter<>(
context, manager, computer, staticPartitions, writerListener);
}
} | 3.68 |
flink_WatermarkOutputMultiplexer_getDeferredOutput | /**
* Returns a deferred {@link WatermarkOutput} for the given output ID.
*
* <p>>See {@link WatermarkOutputMultiplexer} for a description of immediate and deferred
* outputs.
*/
public WatermarkOutput getDeferredOutput(String outputId) {
final PartialWatermark outputState = watermarkPerOutputId.get(outputId);
Preconditions.checkArgument(
outputState != null, "no output registered under id %s", outputId);
return new DeferredOutput(outputState);
} | 3.68 |
flink_SystemProcessingTimeService_shutdownAndAwaitPending | /**
* Shuts down and clean up the timer service provider hard and immediately. This does wait for
* all timers to complete or until the time limit is exceeded. Any call to {@link
* #registerTimer(long, ProcessingTimeCallback)} will result in a hard exception after calling
* this method.
*
* @param time time to wait for termination.
* @param timeUnit time unit of parameter time.
* @return {@code true} if this timer service and all pending timers are terminated and {@code
* false} if the timeout elapsed before this happened.
*/
@VisibleForTesting
boolean shutdownAndAwaitPending(long time, TimeUnit timeUnit) throws InterruptedException {
shutdownService();
return timerService.awaitTermination(time, timeUnit);
} | 3.68 |
graphhopper_RamerDouglasPeucker_simplify | /**
* Simplifies a part of the <code>points</code>. The <code>fromIndex</code> and <code>lastIndex</code>
* are guaranteed to be kept.
*
* @param points The PointList to simplify
* @param fromIndex Start index to simplify, should be <= <code>lastIndex</code>
* @param lastIndex Simplify up to this index
* @param compress Whether the <code>points</code> shall be compressed or not, if set to false no points
* are actually removed, but instead their lat/lon/ele is only set to NaN
* @return The number of removed points
*/
public int simplify(PointList points, int fromIndex, int lastIndex, boolean compress) {
int removed = 0;
int size = lastIndex - fromIndex;
if (approx) {
int delta = 500;
int segments = size / delta + 1;
int start = fromIndex;
for (int i = 0; i < segments; i++) {
// start of next is end of last segment, except for the last
removed += subSimplify(points, start, Math.min(lastIndex, start + delta));
start += delta;
}
} else {
removed = subSimplify(points, fromIndex, lastIndex);
}
if (removed > 0 && compress)
removeNaN(points);
return removed;
} | 3.68 |
querydsl_GenericExporter_addStopClass | /**
* Add a stop class to be used (default Object.class and Enum.class)
*
* @param cl
*/
public void addStopClass(Class<?> cl) {
stopClasses.add(cl);
} | 3.68 |
hadoop_FilePosition_isWithinCurrentBuffer | /**
* Determines whether the given absolute position lies within the current buffer.
*
* @param pos the position to check.
* @return true if the given absolute position lies within the current buffer, false otherwise.
*/
public boolean isWithinCurrentBuffer(long pos) {
throwIfInvalidBuffer();
long bufferEndOffset = bufferStartOffset + buffer.limit();
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
} | 3.68 |
framework_CalendarMonthDropHandler_updateDropDetails | /**
* Updates the drop details sent to the server
*
* @param drag
* The drag event
*/
private void updateDropDetails(VDragEvent drag) {
int dayIndex = calendarConnector.getWidget().getMonthGrid()
.getDayCellIndex(currentTargetDay);
drag.getDropDetails().put("dropDayIndex", dayIndex);
} | 3.68 |
morf_ConnectionResourcesBean_setUserName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setUserName(java.lang.String)
*/
@Override
public void setUserName(String userName) {
this.userName = userName;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.