name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_GridDragSource_addGridDragEndListener | /**
* Attaches dragend listener for the current drag source grid.
*
* @param listener
* Listener to handle the dragend event.
* @return Handle to be used to remove this listener.
* @see GridDragEndEvent
*/
public Registration addGridDragEndListener(
GridDragEndListener<T> listener) {
return addListener(DragSourceState.EVENT_DRAGEND,
GridDragEndEvent.class, listener,
GridDragEndListener.DRAG_END_METHOD);
} | 3.68 |
flink_EnvironmentInformation_getSizeOfFreeHeapMemoryWithDefrag | /**
* Gets an estimate of the size of the free heap memory.
*
* <p>NOTE: This method is heavy-weight. It triggers a garbage collection to reduce
* fragmentation and get a better estimate at the size of free memory. It is typically more
* accurate than the plain version {@link #getSizeOfFreeHeapMemory()}.
*
* @return An estimate of the size of the free heap memory, in bytes.
*/
public static long getSizeOfFreeHeapMemoryWithDefrag() {
// trigger a garbage collection, to reduce fragmentation
System.gc();
return getSizeOfFreeHeapMemory();
} | 3.68 |
MagicPlugin_CompatibilityUtilsBase_getOrCreatePotionEntity | /**
* Lazily creates potion entities that can be used when damaging players.
*
* @param location The location the potion should be placed at.
* @return A potion entity placed ad the given location.
*/
protected ThrownPotion getOrCreatePotionEntity(Location location) {
World world = location.getWorld();
// Maintain a separate potion entity for every world so that
// potion.getWorld() reports the correct result.
WeakReference<ThrownPotion> ref = worldPotions.get(world);
ThrownPotion potion = ref == null ? null : ref.get();
if (potion == null) {
potion = (ThrownPotion) world.spawnEntity(
location,
EntityType.SPLASH_POTION);
potion.remove();
ref = new WeakReference<>(potion);
worldPotions.put(world, ref);
} else {
// TODO: Make sure this actually works?
potion.teleport(location);
}
return potion;
} | 3.68 |
flink_HsFileDataManager_release | /** Releases this file data manager and delete shuffle data after all readers is removed. */
public void release() {
synchronized (lock) {
if (isReleased) {
return;
}
isReleased = true;
List<HsSubpartitionFileReader> pendingReaders = new ArrayList<>(allReaders);
mayNotifyReleased();
failSubpartitionReaders(
pendingReaders,
new IllegalStateException("Result partition has been already released."));
// close data index and delete shuffle file only when no reader is reading now.
releaseFuture.thenRun(this::closeDataIndexAndDeleteShuffleFile);
}
} | 3.68 |
hadoop_SaveSuccessFileStage_getStageName | /**
* Stage name is always job commit.
* @param arguments args to the invocation.
* @return stage name
*/
@Override
protected String getStageName(ManifestSuccessData arguments) {
// set it to the job commit stage, always.
return OP_STAGE_JOB_COMMIT;
} | 3.68 |
flink_TableFunction_getParameterTypes | /**
* Returns {@link TypeInformation} about the operands of the evaluation method with a given
* signature.
*
* @deprecated This method uses the old type system and is based on the old reflective
* extraction logic. The method will be removed in future versions and is only called when
* using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new
* reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link
* FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it
* is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)}.
*/
@Deprecated
public TypeInformation<?>[] getParameterTypes(Class<?>[] signature) {
final TypeInformation<?>[] types = new TypeInformation<?>[signature.length];
for (int i = 0; i < signature.length; i++) {
try {
types[i] = TypeExtractor.getForClass(signature[i]);
} catch (InvalidTypesException e) {
throw new ValidationException(
"Parameter types of table function "
+ this.getClass().getCanonicalName()
+ " cannot be automatically determined. Please provide type information manually.");
}
}
return types;
} | 3.68 |
flink_BroadcastConnectedStream_getSecondInput | /**
* Returns the {@link BroadcastStream}.
*
* @return The stream which, by convention, is the broadcast one.
*/
public BroadcastStream<IN2> getSecondInput() {
return broadcastStream;
} | 3.68 |
hbase_MasterObserver_preMergeRegionsCommitAction | /**
* This will be called before update META step as part of regions merge transaction.
* @param ctx the environment to interact with the framework and master
* @param metaEntries mutations to execute on hbase:meta atomically with regions merge updates.
* Any puts or deletes to execute on hbase:meta can be added to the mutations.
*/
default void preMergeRegionsCommitAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final RegionInfo[] regionsToMerge, @MetaMutationAnnotation List<Mutation> metaEntries)
throws IOException {
} | 3.68 |
hbase_Import_instantiateFilter | /**
* Create a {@link Filter} to apply to all incoming keys ({@link KeyValue KeyValues}) to
* optionally not include in the job output
* @param conf {@link Configuration} from which to load the filter
* @return the filter to use for the task, or <tt>null</tt> if no filter to should be used
* @throws IllegalArgumentException if the filter is misconfigured
*/
public static Filter instantiateFilter(Configuration conf) {
// get the filter, if it was configured
Class<? extends Filter> filterClass = conf.getClass(FILTER_CLASS_CONF_KEY, null, Filter.class);
if (filterClass == null) {
LOG.debug("No configured filter class, accepting all keyvalues.");
return null;
}
LOG.debug("Attempting to create filter:" + filterClass);
String[] filterArgs = conf.getStrings(FILTER_ARGS_CONF_KEY);
ArrayList<byte[]> quotedArgs = toQuotedByteArrays(filterArgs);
try {
Method m = filterClass.getMethod("createFilterFromArguments", ArrayList.class);
return (Filter) m.invoke(null, quotedArgs);
} catch (IllegalAccessException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (SecurityException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (NoSuchMethodException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (IllegalArgumentException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
LOG.error("Couldn't instantiate filter!", e);
throw new RuntimeException(e);
}
} | 3.68 |
flink_StreamExecutionEnvironment_getDefaultLocalParallelism | /**
* Gets the default parallelism that will be used for the local execution environment created by
* {@link #createLocalEnvironment()}.
*
* @return The default local parallelism
*/
@PublicEvolving
public static int getDefaultLocalParallelism() {
return defaultLocalParallelism;
} | 3.68 |
flink_BeamPythonFunctionRunner_create | // the input value type is always byte array
@SuppressWarnings("unchecked")
@Override
public FnDataReceiver<WindowedValue<byte[]>> create(String pCollectionId) {
return input -> {
resultBuffer.add(Tuple2.of(pCollectionId, input.getValue()));
};
} | 3.68 |
flink_JobGraph_getJobID | /**
* Returns the ID of the job.
*
* @return the ID of the job
*/
public JobID getJobID() {
return this.jobID;
} | 3.68 |
graphhopper_RAMIntDataAccess_setStore | /**
* @param store true if in-memory data should be saved when calling flush
*/
public RAMIntDataAccess setStore(boolean store) {
this.store = store;
return this;
} | 3.68 |
pulsar_BytesSchemaVersion_hashCode | /**
* The hashcode is cached except for the case where it is computed as 0, in which
* case we compute the hashcode on every call.
*
* @return the hashcode
*/
@Override
public int hashCode() {
if (hashCode == 0) {
hashCode = Arrays.hashCode(bytes);
}
return hashCode;
} | 3.68 |
hudi_HoodieSyncClient_getPartitionValuesToPathMapping | /**
* Gets the partition values to the absolute path mapping based on the
* partition information from the metastore.
*
* @param partitionsInMetastore Partitions in the metastore.
* @return The partition values to the absolute path mapping.
*/
private Map<String, String> getPartitionValuesToPathMapping(List<Partition> partitionsInMetastore) {
Map<String, String> paths = new HashMap<>();
for (Partition tablePartition : partitionsInMetastore) {
List<String> hivePartitionValues = tablePartition.getValues();
String fullTablePartitionPath =
Path.getPathWithoutSchemeAndAuthority(new Path(tablePartition.getStorageLocation())).toUri().getPath();
paths.put(String.join(", ", hivePartitionValues), fullTablePartitionPath);
}
return paths;
} | 3.68 |
hadoop_QuotaUsage_getHeader | /** Return the header of the output.
* @return the header of the output
*/
public static String getHeader() {
return QUOTA_HEADER;
} | 3.68 |
hadoop_StageConfig_getEnterStageEventHandler | /**
* Handler for stage entry events.
* @return the handler.
*/
public StageEventCallbacks getEnterStageEventHandler() {
return enterStageEventHandler;
} | 3.68 |
morf_WindowFunction_over | /**
* Starts a new window function Builder.
* @param function the function to construct the window function over.
* @return the window function builder
*/
public static Builder over(Function function) {
return new BuilderImpl(function);
} | 3.68 |
hudi_HoodieTableMetadataUtil_tryUpcastDecimal | /**
* Does an upcast for {@link BigDecimal} instance to align it with scale/precision expected by
* the {@link org.apache.avro.LogicalTypes.Decimal} Avro logical type
*/
public static BigDecimal tryUpcastDecimal(BigDecimal value, final LogicalTypes.Decimal decimal) {
final int scale = decimal.getScale();
final int valueScale = value.scale();
boolean scaleAdjusted = false;
if (valueScale != scale) {
try {
value = value.setScale(scale, RoundingMode.UNNECESSARY);
scaleAdjusted = true;
} catch (ArithmeticException aex) {
throw new AvroTypeException(
"Cannot encode decimal with scale " + valueScale + " as scale " + scale + " without rounding");
}
}
int precision = decimal.getPrecision();
int valuePrecision = value.precision();
if (valuePrecision > precision) {
if (scaleAdjusted) {
throw new AvroTypeException("Cannot encode decimal with precision " + valuePrecision + " as max precision "
+ precision + ". This is after safely adjusting scale from " + valueScale + " to required " + scale);
} else {
throw new AvroTypeException(
"Cannot encode decimal with precision " + valuePrecision + " as max precision " + precision);
}
}
return value;
} | 3.68 |
framework_GridConnector_handleServerInitiated | /**
* Used to handle the case where the editor calls us because it was
* invoked by the server via RPC and not by the client. In that case,
* the request can be simply synchronously completed.
*
* @param request
* the request object
* @return true if the request was originally triggered by the server,
* false otherwise
*/
private boolean handleServerInitiated(EditorRequest<?> request) {
assert request != null : "Cannot handle null request";
assert currentRequest == null : "Earlier request not yet finished";
if (serverInitiated) {
serverInitiated = false;
request.success();
return true;
} else {
return false;
}
} | 3.68 |
framework_ComponentSizeValidator_isForm | /**
* Comparability form component which is defined in the different jar.
*
* TODO : Normally this logic shouldn't be here. But it means that the whole
* this class has wrong design and implementation and should be refactored.
*/
private static boolean isForm(Component component) {
if (!(component instanceof HasComponents)) {
return false;
}
Class<?> clazz = component.getClass();
while (clazz != null) {
if (component.getClass().getName()
.equals("com.vaadin.v7.ui.Form")) {
return true;
}
clazz = clazz.getSuperclass();
}
return false;
} | 3.68 |
hudi_StreamSync_close | /**
* Close all resources.
*/
public void close() {
if (writeClient != null) {
writeClient.close();
writeClient = null;
}
if (formatAdapter != null) {
formatAdapter.close();
}
LOG.info("Shutting down embedded timeline server");
if (embeddedTimelineService.isPresent()) {
embeddedTimelineService.get().stopForBasePath(cfg.targetBasePath);
}
if (metrics != null) {
metrics.shutdown();
}
} | 3.68 |
framework_ConnectorTracker_getSeckey | /**
* Returns the security key associated with the given StreamVariable.
*
* @param variable
* @return matching security key if one exists, null otherwise
*/
public String getSeckey(StreamVariable variable) {
if (streamVariableToSeckey == null) {
return null;
}
return streamVariableToSeckey.get(variable);
} | 3.68 |
hibernate-validator_MessagerAdapter_reportError | /**
* Reports the given error. Message parameters will be put into the template
* retrieved from the resource bundle if applicable.
*
* @param error The error to report.
*/
private void reportError(ConstraintCheckIssue error) {
report( error, diagnosticKind );
} | 3.68 |
hmily_SingletonHolder_register | /**
* register.
*
* @param <T> the type parameter
* @param clazz the clazz
* @param o the o
*/
public <T> void register(final Class<T> clazz, final Object o) {
SINGLES.put(clazz.getName(), o);
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleRegionServer | /**
* Remove the throttling for the specified region server.
* @param regionServer the region Server
* @return the quota settings
*/
public static QuotaSettings unthrottleRegionServer(final String regionServer) {
return throttle(null, null, null, regionServer, null, 0, null, QuotaScope.MACHINE);
} | 3.68 |
querydsl_JTSGeometryCollectionExpression_numGeometries | /**
* Returns the number of geometries in this GeometryCollection.
*
* @return numbers of geometries
*/
public NumberExpression<Integer> numGeometries() {
if (numGeometries == null) {
numGeometries = Expressions.numberOperation(Integer.class, SpatialOps.NUM_GEOMETRIES, mixin);
}
return numGeometries;
} | 3.68 |
hbase_TableDescriptorBuilder_setValue | /**
* Setter for storing metadata as a (key, value) pair in {@link #values} map
* @param key The key.
* @param value The value. If null, removes the setting.
*/
public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) {
if (value == null || value.getLength() == 0) {
values.remove(key);
} else {
values.put(key, value);
}
return this;
} | 3.68 |
pulsar_ModularLoadManagerImpl_writeBrokerDataOnZooKeeper | /**
* As any broker, write the local broker data to metadata store.
*/
@Override
public void writeBrokerDataOnZooKeeper() {
writeBrokerDataOnZooKeeper(false);
} | 3.68 |
framework_VUpload_rebuildPanel | /**
* Re-creates file input field and populates panel. This is needed as we
* want to clear existing values from our current file input field.
*/
private void rebuildPanel() {
panel.remove(submitButton);
panel.remove(fu);
fu = new VFileUpload();
fu.setName(paintableId + "_file");
fu.getElement().setPropertyBoolean("disabled", !enabled);
if (acceptMimeTypes != null && !acceptMimeTypes.isEmpty()) {
InputElement.as(fu.getElement()).setAccept(acceptMimeTypes);
}
panel.add(fu);
panel.add(submitButton);
if (isImmediateMode()) {
fu.sinkEvents(Event.ONCHANGE);
}
fu.addChangeHandler(event -> {
if (!isImmediateMode()) {
updateEnabledForSubmitButton();
}
if (client != null) {
UploadConnector connector = ((UploadConnector) ConnectorMap
.get(client).getConnector(VUpload.this));
if (connector.hasEventListener(EventId.CHANGE)) {
connector.getRpcProxy(UploadServerRpc.class)
.change(fu.getFilename());
}
}
});
} | 3.68 |
flink_JoinedStreams_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WithWindow<T1, T2, KEY, W> trigger(
Trigger<? super TaggedUnion<T1, T2>, ? super W> newTrigger) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
newTrigger,
evictor,
allowedLateness);
} | 3.68 |
shardingsphere-elasticjob_FailoverService_getLocalFailoverItems | /**
* Get failover items which execute on localhost.
*
* @return failover items which execute on localhost
*/
public List<Integer> getLocalFailoverItems() {
if (JobRegistry.getInstance().isShutdown(jobName)) {
return Collections.emptyList();
}
return getFailoverItems(JobRegistry.getInstance().getJobInstance(jobName).getJobInstanceId());
} | 3.68 |
flink_ParquetRowDataWriter_write | /**
* It writes a record to Parquet.
*
* @param record Contains the record that is going to be written.
*/
public void write(final RowData record) {
recordConsumer.startMessage();
rowWriter.write(record);
recordConsumer.endMessage();
} | 3.68 |
flink_PekkoUtils_getRpcURL | /**
* Returns the given {@link ActorRef}'s path string representation with host and port of the
* {@link ActorSystem} in which the actor is running.
*
* @param system {@link ActorSystem} in which the given {@link ActorRef} is running
* @param actor {@link ActorRef} of the actor for which the URL has to be generated
* @return String containing the {@link ActorSystem} independent URL of the actor
*/
public static String getRpcURL(ActorSystem system, ActorRef actor) {
final Address address = getAddress(system);
return actor.path().toStringWithAddress(address);
} | 3.68 |
pulsar_KubernetesServiceAccountTokenAuthProvider_cleanUpAuthData | /**
* No need to clean up anything. Kubernetes cleans up the secret when the pod is deleted.
*/
@Override
public void cleanUpAuthData(Function.FunctionDetails funcDetails, Optional<FunctionAuthData> functionAuthData)
throws Exception {
} | 3.68 |
hudi_HoodieOperation_isUpdateAfter | /**
* Returns whether the operation is UPDATE_AFTER.
*/
public static boolean isUpdateAfter(HoodieOperation operation) {
return operation == UPDATE_AFTER;
} | 3.68 |
hbase_ZKUtil_getChildDataAndWatchForNewChildren | /**
* Returns the date of child znodes of the specified znode. Also sets a watch on the specified
* znode which will capture a NodeDeleted event on the specified znode as well as
* NodeChildrenChanged if any children of the specified znode are created or deleted. Returns null
* if the specified node does not exist. Otherwise returns a list of children of the specified
* node. If the node exists but it has no children, an empty list will be returned.
* @param zkw zk reference
* @param baseNode path of node to list and watch children of
* @param throwOnInterrupt if true then just interrupt the thread, do not throw exception
* @return list of data of children of the specified node, an empty list if the node exists but
* has no children, and null if the node does not exist
* @throws KeeperException if unexpected zookeeper exception
* @deprecated Unused
*/
@Deprecated
public static List<NodeAndData> getChildDataAndWatchForNewChildren(ZKWatcher zkw, String baseNode,
boolean throwOnInterrupt) throws KeeperException {
List<String> nodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode);
if (nodes != null) {
List<NodeAndData> newNodes = new ArrayList<>();
for (String node : nodes) {
if (Thread.interrupted()) {
// Partial data should not be processed. Cancel processing by sending empty list.
return Collections.emptyList();
}
String nodePath = ZNodePaths.joinZNode(baseNode, node);
byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath, throwOnInterrupt);
newNodes.add(new NodeAndData(nodePath, data));
}
return newNodes;
}
return null;
} | 3.68 |
open-banking-gateway_EncryptionKeySerde_writeKey | /**
* Write public-private key pair into OutputStream
* @param publicKey Public key of pair
* @param privKey Private key of pair
* @param os Output stream to write to
*/
@SneakyThrows
public void writeKey(PublicKey publicKey, PrivateKey privKey, OutputStream os) {
// Mapper may choose to close the stream if using stream interface, we don't want this
// as objects are small - this is ok.
os.write(mapper.writeValueAsBytes(new PubAndPrivKeyContainer(publicKey, privKey)));
} | 3.68 |
hudi_CleanPlanner_getFilesToCleanKeepingLatestVersions | /**
* Selects the older versions of files for cleaning, such that it bounds the number of versions of each file. This
* policy is useful, if you are simply interested in querying the table, and you don't want too many versions for a
* single file (i.e., run it with versionsRetained = 1)
*/
private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestVersions(String partitionPath) {
LOG.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained()
+ " file versions. ");
List<CleanFileInfo> deletePaths = new ArrayList<>();
// Collect all the datafiles savepointed by all the savepoints
List<String> savepointedFiles = hoodieTable.getSavepointTimestamps().stream()
.flatMap(this::getSavepointedDataFiles)
.collect(Collectors.toList());
// In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely
// In other words, the file versions only apply to the active file groups.
deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, Option.empty()));
boolean toDeletePartition = false;
List<HoodieFileGroup> fileGroups = fileSystemView.getAllFileGroupsStateless(partitionPath).collect(Collectors.toList());
for (HoodieFileGroup fileGroup : fileGroups) {
int keepVersions = config.getCleanerFileVersionsRetained();
// do not cleanup slice required for pending compaction
Iterator<FileSlice> fileSliceIterator =
fileGroup.getAllFileSlices()
.filter(fs -> !isFileSliceNeededForPendingMajorOrMinorCompaction(fs))
.iterator();
if (isFileGroupInPendingMajorOrMinorCompaction(fileGroup)) {
// We have already saved the last version of file-groups for pending compaction Id
keepVersions--;
}
while (fileSliceIterator.hasNext() && keepVersions > 0) {
// Skip this most recent version
fileSliceIterator.next();
keepVersions--;
}
// Delete the remaining files
while (fileSliceIterator.hasNext()) {
FileSlice nextSlice = fileSliceIterator.next();
if (isFileSliceExistInSavepointedFiles(nextSlice, savepointedFiles)) {
// do not clean up a savepoint data file
continue;
}
deletePaths.addAll(getCleanFileInfoForSlice(nextSlice));
}
}
// if there are no valid file groups
// and no pending data files under the partition [IMPORTANT],
// mark it to be deleted
if (fileGroups.isEmpty() && !hasPendingFiles(partitionPath)) {
toDeletePartition = true;
}
return Pair.of(toDeletePartition, deletePaths);
} | 3.68 |
flink_SqlConstraintValidator_getFullConstraints | /** Returns the column constraints plus the table constraints. */
public static List<SqlTableConstraint> getFullConstraints(
List<SqlTableConstraint> tableConstraints, SqlNodeList columnList) {
List<SqlTableConstraint> ret = new ArrayList<>();
columnList.forEach(
column -> {
SqlTableColumn tableColumn = (SqlTableColumn) column;
if (tableColumn instanceof SqlTableColumn.SqlRegularColumn) {
SqlTableColumn.SqlRegularColumn regularColumn =
(SqlTableColumn.SqlRegularColumn) tableColumn;
regularColumn.getConstraint().map(ret::add);
}
});
ret.addAll(tableConstraints);
return ret;
} | 3.68 |
flink_BeamOperatorStateStore_getListState | /** Currently list state and union-list state is not supported. */
@Override
public ListState<byte[]> getListState(BeamFnApi.StateRequest request) throws Exception {
throw new RuntimeException("Operator list state is still not supported");
} | 3.68 |
morf_RemoveTable_isApplied | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
for (String tableName : schema.tableNames()) {
if (tableName.equalsIgnoreCase(tableToBeRemoved.getName())) {
return false;
}
}
return true;
} | 3.68 |
framework_Table_setMultiSelectMode | /**
* Sets the behavior of how the multi-select mode should behave when the
* table is both selectable and in multi-select mode.
* <p>
* Note, that on some clients the mode may not be respected. E.g. on touch
* based devices CTRL/SHIFT base selection method is invalid, so touch based
* browsers always use the {@link MultiSelectMode#SIMPLE} unless touch multi
* select is explicitly disabled.
*
* @see #setMultiSelectTouchDetectionEnabled(boolean)
*
* @param mode
* The select mode of the table
*/
public void setMultiSelectMode(MultiSelectMode mode) {
multiSelectMode = mode;
markAsDirty();
} | 3.68 |
framework_CustomLayoutDemo_init | /**
* Initialize Application. Demo components are added to main window.
*/
@Override
public void init() {
final LegacyWindow mainWindow = new LegacyWindow("CustomLayout demo");
setMainWindow(mainWindow);
// set the application to use example -theme
setTheme("tests-components");
// Create custom layout, themes/example/layout/mainLayout.html
mainLayout = new CustomLayout("mainLayout");
// wrap custom layout inside a panel
VerticalLayout customLayoutPanelLayout = new VerticalLayout();
customLayoutPanelLayout.setMargin(true);
final Panel customLayoutPanel = new Panel(
"Panel containing custom layout (mainLayout.html)",
customLayoutPanelLayout);
customLayoutPanelLayout.addComponent(mainLayout);
// Login components
mainLayout.addComponent(username, "loginUser");
mainLayout.addComponent(loginPwd, "loginPassword");
mainLayout.addComponent(loginButton, "loginButton");
// Menu component, when clicked bodyPanel is updated
menu.addItem("Welcome");
menu.addItem("Products");
menu.addItem("Support");
menu.addItem("News");
menu.addItem("Developers");
menu.addItem("Contact");
// "this" handles all menu events, e.g. node clicked event
menu.addListener(this);
// Value changes are immediate
menu.setImmediate(true);
menu.setNullSelectionAllowed(false);
mainLayout.addComponent(menu, "menu");
// Body component
mainLayout.addComponent(bodyPanel, "body");
// Initial body are comes from Welcome.html
setBody("Welcome");
// Add heading label and custom layout panel to main window
mainWindow.addComponent(
new Label("<h3>Custom layout demo</h3>", ContentMode.HTML));
mainWindow.addComponent(customLayoutPanel);
} | 3.68 |
flink_OptionalFailure_createFrom | /**
* @return wrapped {@link OptionalFailure} returned by {@code valueSupplier} or wrapped failure
* if {@code valueSupplier} has thrown an {@link Exception}.
*/
public static <T> OptionalFailure<T> createFrom(CheckedSupplier<T> valueSupplier) {
try {
return of(valueSupplier.get());
} catch (Exception ex) {
return ofFailure(ex);
}
} | 3.68 |
querydsl_GroupBy_sortedSet | /**
* Create a new aggregating set expression using a backing TreeSet using the given comparator
*
* @param groupExpression values for this expression will be accumulated into a set
* @param comparator comparator of the created TreeSet instance
* @return wrapper expression
*/
public static <E, F> GroupExpression<E, SortedSet<F>> sortedSet(GroupExpression<E, F> groupExpression,
Comparator<? super F> comparator) {
return new MixinGroupExpression<E, F, SortedSet<F>>(groupExpression, GSet.createSorted(groupExpression, comparator));
} | 3.68 |
hudi_WriteMarkers_stripMarkerSuffix | /**
* Strips the marker file suffix from the input path, i.e., ".marker.[IO_type]".
*
* @param path file path
* @return Stripped path
*/
public static String stripMarkerSuffix(String path) {
return path.substring(0, path.indexOf(HoodieTableMetaClient.MARKER_EXTN));
} | 3.68 |
hadoop_OBSObjectBucketUtils_copyFile | /**
* Copy a single object in the bucket via a COPY operation.
*
* @param owner OBS File System instance
* @param srcKey source object path
* @param dstKey destination object path
* @param size object size
* @throws InterruptedIOException the operation was interrupted
* @throws IOException Other IO problems
*/
private static void copyFile(final OBSFileSystem owner, final String srcKey,
final String dstKey, final long size)
throws IOException, InterruptedIOException {
for (int retryTime = 1;
retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
innerCopyFile(owner, srcKey, dstKey, size);
return;
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
LOG.warn(
"Failed to copy file from [{}] to [{}] with size [{}], "
+ "retry time [{}], exception [{}]", srcKey, dstKey,
size, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerCopyFile(owner, srcKey, dstKey, size);
} | 3.68 |
flink_KerberosLoginProvider_doLogin | /**
* Does kerberos login and sets current user. Must be called when isLoginPossible returns true.
*/
public void doLogin(boolean supportProxyUser) throws IOException {
if (principal != null) {
LOG.info(
"Attempting to login to KDC using principal: {} keytab: {}", principal, keytab);
UserGroupInformation.loginUserFromKeytab(principal, keytab);
LOG.info("Successfully logged into KDC");
} else if (!HadoopUserUtils.isProxyUser(UserGroupInformation.getCurrentUser())) {
LOG.info("Attempting to load user's ticket cache");
UserGroupInformation.loginUserFromSubject(null);
LOG.info("Loaded user's ticket cache successfully");
} else if (supportProxyUser) {
LOG.info("Proxy user doesn't need login since it must have credentials already");
} else {
throwProxyUserNotSupported();
}
} | 3.68 |
streampipes_Operations_validatePipeline | /**
* @param pipeline the pipeline to validate
* @return PipelineModificationMessage a message containing desired pipeline modifications
*/
public static PipelineModificationMessage validatePipeline(Pipeline pipeline) throws Exception {
return new PipelineVerificationHandlerV2(pipeline).verifyPipeline();
} | 3.68 |
hadoop_StagingCommitter_deleteStagingUploadsParentDirectory | /**
* Delete the multipart upload staging directory.
* @param context job context
* @throws IOException IO failure
*/
protected void deleteStagingUploadsParentDirectory(JobContext context)
throws IOException {
Path stagingUploadsPath = Paths.getStagingUploadsParentDirectory(
context.getConfiguration(), getUUID());
ignoreIOExceptions(LOG,
"Deleting staging uploads path", stagingUploadsPath.toString(),
() -> deleteWithWarning(
stagingUploadsPath.getFileSystem(getConf()),
stagingUploadsPath,
true));
} | 3.68 |
framework_BaseLayoutTestUI_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return null;
} | 3.68 |
hudi_SparkPreCommitValidator_validate | /**
* Verify the data written as part of specified instant.
* Throw HoodieValidationException if any unexpected data is written (Example: data files are not readable for some reason).
*/
public void validate(String instantTime, HoodieWriteMetadata<O> writeResult, Dataset<Row> before, Dataset<Row> after) throws HoodieValidationException {
HoodieTimer timer = HoodieTimer.start();
try {
validateRecordsBeforeAndAfter(before, after, getPartitionsModified(writeResult));
} finally {
long duration = timer.endTimer();
LOG.info(getClass() + " validator took " + duration + " ms" + ", metrics on? " + getWriteConfig().isMetricsOn());
publishRunStats(instantTime, duration);
}
} | 3.68 |
hbase_ResponseConverter_isClosed | /**
* Check if the region is closed from a CloseRegionResponse
* @param proto the CloseRegionResponse
* @return the region close state
*/
public static boolean isClosed(final CloseRegionResponse proto) {
if (proto == null || !proto.hasClosed()) return false;
return proto.getClosed();
} | 3.68 |
hbase_StorageClusterStatusModel_getDeadNodes | /** Returns the list of dead nodes */
@XmlElement(name = "Node")
@XmlElementWrapper(name = "DeadNodes")
// workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
@JsonProperty("DeadNodes")
public List<String> getDeadNodes() {
return deadNodes;
} | 3.68 |
flink_RestfulGateway_deliverCoordinationRequestToCoordinator | /**
* Deliver a coordination request to a specified coordinator and return the response.
*
* @param jobId identifying the job which the coordinator belongs to
* @param operatorId identifying the coordinator to receive the request
* @param serializedRequest serialized request to deliver
* @param timeout RPC timeout
* @return A future containing the response. The response will fail with a {@link
* org.apache.flink.util.FlinkException} if the task is not running, or no
* operator/coordinator exists for the given ID, or the coordinator cannot handle client
* events.
*/
default CompletableFuture<CoordinationResponse> deliverCoordinationRequestToCoordinator(
JobID jobId,
OperatorID operatorId,
SerializedValue<CoordinationRequest> serializedRequest,
@RpcTimeout Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_SpaceLimitSettings_buildProtoRemoveQuota | /**
* Builds a {@link SpaceQuota} protobuf object to remove a quota.
* @return The protobuf SpaceQuota representation.
*/
private SpaceLimitRequest buildProtoRemoveQuota() {
return SpaceLimitRequest.newBuilder().setQuota(SpaceQuota.newBuilder().setRemove(true).build())
.build();
} | 3.68 |
hbase_RegionReplicaCandidateGenerator_selectCoHostedRegionPerGroup | /**
* Randomly select one regionIndex out of all region replicas co-hosted in the same group (a group
* is a server, host or rack)
* @param colocatedReplicaCountsPerGroup either Cluster.colocatedReplicaCountsPerServer,
* colocatedReplicaCountsPerHost or
* colocatedReplicaCountsPerRack
* @param regionsPerGroup either Cluster.regionsPerServer, regionsPerHost or
* regionsPerRack
* @param regionIndexToPrimaryIndex Cluster.regionsIndexToPrimaryIndex
* @return a regionIndex for the selected primary or -1 if there is no co-locating
*/
int selectCoHostedRegionPerGroup(Int2IntCounterMap colocatedReplicaCountsPerGroup,
int[] regionsPerGroup, int[] regionIndexToPrimaryIndex) {
final IntArrayList colocated = new IntArrayList(colocatedReplicaCountsPerGroup.size(), -1);
colocatedReplicaCountsPerGroup.forEach((primary, count) -> {
if (count > 1) { // means consecutive primaries, indicating co-location
colocated.add(primary);
}
});
if (!colocated.isEmpty()) {
int rand = ThreadLocalRandom.current().nextInt(colocated.size());
int selectedPrimaryIndex = colocated.get(rand);
// we have found the primary id for the region to move. Now find the actual regionIndex
// with the given primary, prefer to move the secondary region.
for (int regionIndex : regionsPerGroup) {
if (selectedPrimaryIndex == regionIndexToPrimaryIndex[regionIndex]) {
// always move the secondary, not the primary
if (selectedPrimaryIndex != regionIndex) {
return regionIndex;
}
}
}
}
return -1;
} | 3.68 |
zilla_HpackIntegerFW_integer | /*
* Encodes integer in HPACK representation
*
* if I < 2^N - 1, encode I on N bits
* else
* encode (2^N - 1) on N bits
* I = I - (2^N - 1)
* while I >= 128
* encode (I % 128 + 128) on 8 bits
* I = I / 128
* encode I on 8 bits
*
* @param offset offset for current octet
* @param n number of bits of the prefix
*/
public HpackIntegerFW.Builder integer(int value)
{
assert n >= 1;
assert n <= 8;
int twoNminus1 = TWON_TABLE[n] - 1;
int i = offset();
byte cur = buffer().getByte(i);
if (value < twoNminus1)
{
buffer().putByte(i++, (byte) (cur | value));
}
else
{
buffer().putByte(i++, (byte) (cur | twoNminus1));
int remaining = value - twoNminus1;
while (remaining >= 128)
{
buffer().putByte(i++, (byte) (remaining % 128 + 128));
remaining = remaining / 128;
}
buffer().putByte(i++, (byte) remaining);
}
limit(i);
return this;
} | 3.68 |
hadoop_QuotaUsage_isTypeConsumedAvailable | /**
* Return true if any storage type consumption information is available.
*
* @return if any storage type consumption information
* is available, not false.
*/
public boolean isTypeConsumedAvailable() {
if (typeConsumed != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeConsumed[t.ordinal()] > 0L) {
return true;
}
}
}
return false;
} | 3.68 |
flink_TaskExecutorManager_allocateWorkers | /**
* Allocate a number of workers based on the input param.
*
* @param workerNum the number of workers to allocate
* @return the number of successfully allocated workers
*/
private int allocateWorkers(int workerNum) {
int allocatedWorkerNum = 0;
for (int i = 0; i < workerNum; ++i) {
if (allocateWorker(defaultSlotResourceProfile).isPresent()) {
++allocatedWorkerNum;
} else {
break;
}
}
return allocatedWorkerNum;
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableSizeAllMemTables | /**
* Returns approximate size of active, unflushed immutable, and pinned immutable memtables
* (bytes).
*/
public void enableSizeAllMemTables() {
this.properties.add(RocksDBProperty.SizeAllMemTables.getRocksDBProperty());
} | 3.68 |
framework_UIProvider_getPushMode | /**
* Finds the {@link PushMode} to use for a specific UI. If no specific push
* mode is required, <code>null</code> is returned.
* <p>
* The default implementation uses the @{@link Push} annotation if it's
* defined for the UI class.
*
* @param event
* the UI create event with information about the UI and the
* current request.
* @return the push mode to use, or <code>null</code> if the default push
* mode should be used
*
*/
public PushMode getPushMode(UICreateEvent event) {
Push push = getAnnotationFor(event.getUIClass(), Push.class);
if (push == null) {
return null;
} else {
return push.value();
}
} | 3.68 |
hadoop_OBSLoginHelper_extractLoginDetails | /**
* Extract the login details from a URI.
*
* @param name URI of the filesystem
* @return a login tuple, possibly empty.
*/
public static Login extractLoginDetails(final URI name) {
try {
String authority = name.getAuthority();
if (authority == null) {
return Login.EMPTY;
}
int loginIndex = authority.indexOf('@');
if (loginIndex < 0) {
// no login
return Login.EMPTY;
}
String login = authority.substring(0, loginIndex);
int loginSplit = login.indexOf(':');
if (loginSplit > 0) {
String user = login.substring(0, loginSplit);
String encodedPassword = login.substring(loginSplit + 1);
if (encodedPassword.contains(PLUS_UNENCODED)) {
LOG.warn(PLUS_WARNING);
encodedPassword = encodedPassword.replaceAll(
"\\" + PLUS_UNENCODED, PLUS_ENCODED);
}
String password = URLDecoder.decode(encodedPassword, "UTF-8");
return new Login(user, password);
} else if (loginSplit == 0) {
// there is no user, just a password. In this case,
// there's no login
return Login.EMPTY;
} else {
return new Login(login, "");
}
} catch (UnsupportedEncodingException e) {
// this should never happen; translate it if it does.
throw new RuntimeException(e);
}
} | 3.68 |
hadoop_LengthInputStream_getLength | /** @return the length. */
public long getLength() {
return length;
} | 3.68 |
hbase_HFileBlock_createFromBuff | /**
* Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of
* the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer
* beforehand, it will rewind to that point.
* @param buf Has header, content, and trailing checksums if present.
*/
static HFileBlock createFromBuff(ByteBuff buf, boolean usesHBaseChecksum, final long offset,
final int nextBlockOnDiskSize, HFileContext fileContext, ByteBuffAllocator allocator)
throws IOException {
buf.rewind();
final BlockType blockType = BlockType.read(buf);
final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX);
final int uncompressedSizeWithoutHeader =
buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
// This constructor is called when we deserialize a block from cache and when we read a block in
// from the fs. fileCache is null when deserialized from cache so need to make up one.
HFileContextBuilder fileContextBuilder =
fileContext != null ? new HFileContextBuilder(fileContext) : new HFileContextBuilder();
fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
int onDiskDataSizeWithHeader;
if (usesHBaseChecksum) {
byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
// Use the checksum type and bytes per checksum from header, not from fileContext.
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
} else {
fileContextBuilder.withChecksumType(ChecksumType.NULL);
fileContextBuilder.withBytesPerCheckSum(0);
// Need to fix onDiskDataSizeWithHeader; there are not checksums after-block-data
onDiskDataSizeWithHeader = onDiskSizeWithoutHeader + headerSize(usesHBaseChecksum);
}
fileContext = fileContextBuilder.build();
assert usesHBaseChecksum == fileContext.isUseHBaseChecksum();
return new HFileBlockBuilder().withBlockType(blockType)
.withOnDiskSizeWithoutHeader(onDiskSizeWithoutHeader)
.withUncompressedSizeWithoutHeader(uncompressedSizeWithoutHeader)
.withPrevBlockOffset(prevBlockOffset).withOffset(offset)
.withOnDiskDataSizeWithHeader(onDiskDataSizeWithHeader)
.withNextBlockOnDiskSize(nextBlockOnDiskSize).withHFileContext(fileContext)
.withByteBuffAllocator(allocator).withByteBuff(buf.rewind()).withShared(!buf.hasArray())
.build();
} | 3.68 |
framework_Table_getCurrentWidth | /**
* Get the width in pixels of the column after the resize event.
*
* @return Width in pixels
*/
public int getCurrentWidth() {
return currentWidth;
} | 3.68 |
flink_TemporalTableJoinUtil_isRowTimeTemporalTableJoinCondition | /** Check if the given rexCall is a rewrote join condition on event time. */
public static boolean isRowTimeTemporalTableJoinCondition(RexCall call) {
// (LEFT_TIME_ATTRIBUTE, RIGHT_TIME_ATTRIBUTE, LEFT_KEY, RIGHT_KEY, PRIMARY_KEY)
return call.getOperator() == TemporalJoinUtil.TEMPORAL_JOIN_CONDITION()
&& call.operands.size() == 5;
} | 3.68 |
hudi_HFileBootstrapIndex_getUserKeyFromCellKey | /**
* HFile stores cell key in the format example : "2020/03/18//LATEST_TIMESTAMP/Put/vlen=3692/seqid=0".
* This API returns only the user key part from it.
* @param cellKey HFIle Cell Key
* @return
*/
private static String getUserKeyFromCellKey(String cellKey) {
int hfileSuffixBeginIndex = cellKey.lastIndexOf(HFILE_CELL_KEY_SUFFIX_PART);
return cellKey.substring(0, hfileSuffixBeginIndex);
} | 3.68 |
framework_DragSourceExtensionConnector_isNativeDragEvent | /**
* Returns whether the given event is a native (android) drag start/end
* event, and not produced by the drag-drop-polyfill.
*
* @param nativeEvent
* the event to test
* @return {@code true} if native event, {@code false} if not (polyfill
* event)
*/
protected boolean isNativeDragEvent(NativeEvent nativeEvent) {
return isTrusted(nativeEvent) || isComposed(nativeEvent);
} | 3.68 |
hadoop_DiskBalancerDataNode_compareTo | /**
* Compares this object with the specified object for order. Returns a
* negative integer, zero, or a positive integer as this object is less than,
* equal to, or greater than the specified object.
*
* @param that the object to be compared.
* @return a negative integer, zero, or a positive integer as this object is
* less than, equal to, or greater than the specified object.
* @throws NullPointerException if the specified object is null
* @throws ClassCastException if the specified object's type prevents it
* from being compared to this object.
*/
@Override
public int compareTo(DiskBalancerDataNode that) {
Preconditions.checkNotNull(that);
if (Double.compare(this.nodeDataDensity - that.getNodeDataDensity(), 0)
< 0) {
return -1;
}
if (Double.compare(this.nodeDataDensity - that.getNodeDataDensity(), 0)
== 0) {
return 0;
}
if (Double.compare(this.nodeDataDensity - that.getNodeDataDensity(), 0)
> 0) {
return 1;
}
return 0;
} | 3.68 |
hadoop_HsLogsPage_preHead | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<__> html) {
commonPreHead(html);
setActiveNavColumnForTask();
} | 3.68 |
hudi_HoodieDataSourceHelpers_allCompletedCommitsCompactions | /**
* Obtain all the commits, compactions that have occurred on the timeline, whose instant times could be fed into the
* datasource options.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static HoodieTimeline allCompletedCommitsCompactions(FileSystem fs, String basePath) {
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(fs.getConf()).setBasePath(basePath).setLoadActiveTimelineOnLoad(true).build();
if (metaClient.getTableType().equals(HoodieTableType.MERGE_ON_READ)) {
return metaClient.getActiveTimeline().getTimelineOfActions(
CollectionUtils.createSet(HoodieActiveTimeline.COMMIT_ACTION,
HoodieActiveTimeline.DELTA_COMMIT_ACTION,
HoodieActiveTimeline.REPLACE_COMMIT_ACTION)).filterCompletedInstants();
} else {
return metaClient.getCommitTimeline().filterCompletedInstants();
}
} | 3.68 |
framework_HierarchicalContainer_hasChildren | /*
* Is the Item corresponding to the given ID a leaf node? Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean hasChildren(Object itemId) {
if (filteredChildren != null) {
return filteredChildren.containsKey(itemId);
} else {
return children.containsKey(itemId);
}
} | 3.68 |
morf_AnalyseTable_getTableName | /**
* Method to get the table that is being analysed.
*
* @return {@link Statement} to be executed
*/
public String getTableName() {
return tableName;
} | 3.68 |
hbase_Client_delete | /**
* Send a DELETE request
* @param cluster the cluster definition
* @param path the path or URI
* @return a Response object with response detail
* @throws IOException for error
*/
public Response delete(Cluster cluster, String path, Header extraHdr) throws IOException {
HttpDelete method = new HttpDelete(path);
try {
Header[] headers = { extraHdr };
HttpResponse resp = execute(cluster, method, headers, path);
headers = resp.getAllHeaders();
byte[] content = getResponseBody(resp);
return new Response(resp.getStatusLine().getStatusCode(), headers, content);
} finally {
method.releaseConnection();
}
} | 3.68 |
hadoop_AltKerberosAuthenticationHandler_authenticate | /**
* It enforces the the Kerberos SPNEGO authentication sequence returning an
* {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has
* completed successfully (in the case of Java access) and only after the
* custom authentication implemented by the subclass in alternateAuthenticate
* has completed successfully (in the case of browser access).
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an authentication token if the request is authorized or null
*
* @throws IOException thrown if an IO error occurred
* @throws AuthenticationException thrown if an authentication error occurred
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
if (isBrowser(request.getHeader("User-Agent"))) {
token = alternateAuthenticate(request, response);
}
else {
token = super.authenticate(request, response);
}
return token;
} | 3.68 |
hadoop_DockerKillCommand_setSignal | /**
* Set the signal for the {@link DockerKillCommand}.
*
* @param signal the signal to send to the container.
* @return the {@link DockerKillCommand} with the signal set.
*/
public DockerKillCommand setSignal(String signal) {
super.addCommandArguments("signal", signal);
return this;
} | 3.68 |
framework_TextArea_setRows | /**
* Sets the number of rows in the text area.
*
* @param rows
* the number of rows for this text area.
*/
public void setRows(int rows) {
if (rows < 0) {
rows = 0;
}
getState().rows = rows;
} | 3.68 |
flink_KeyMap_get | /**
* Looks up the value mapped under the given key. Returns null if no value is mapped under this
* key.
*
* @param key The key to look up.
* @return The value associated with the key, or null, if no value is found for the key.
* @throws java.lang.NullPointerException Thrown, if the key is null.
*/
public V get(K key) {
final int hash = hash(key);
final int slot = indexOf(hash);
// search the chain from the slot
for (Entry<K, V> entry = table[slot]; entry != null; entry = entry.next) {
if (entry.hashCode == hash && entry.key.equals(key)) {
return entry.value;
}
}
// not found
return null;
} | 3.68 |
hbase_TableHFileArchiveTracker_keepHFiles | /**
* Determine if the given table should or should not allow its hfiles to be deleted
* @param tableName name of the table to check
* @return <tt>true</tt> if its store files should be retained, <tt>false</tt> otherwise
*/
public boolean keepHFiles(String tableName) {
return getMonitor().shouldArchiveTable(tableName);
} | 3.68 |
framework_InfoSection_uidl | /*
* (non-Javadoc)
*
* @see com.vaadin.client.debug.internal.Section#uidl(com.vaadin.client.
* ApplicationConnection, com.vaadin.client.ValueMap)
*/
@Override
public void uidl(ApplicationConnection ac, ValueMap uidl) {
} | 3.68 |
dubbo_StringUtils_toString | /**
* @param msg
* @param e
* @return string
*/
public static String toString(String msg, Throwable e) {
UnsafeStringWriter w = new UnsafeStringWriter();
w.write(msg + "\n");
PrintWriter p = new PrintWriter(w);
try {
e.printStackTrace(p);
return w.toString();
} finally {
p.close();
}
} | 3.68 |
flink_BlobCacheSizeTracker_untrack | /** Remove the BLOB from the tracker. */
private void untrack(JobID jobId, BlobKey blobKey) {
checkNotNull(jobId);
checkNotNull(blobKey);
untrack(Tuple2.of(jobId, blobKey));
} | 3.68 |
framework_DateField_setLenient | /**
* Specifies whether or not date/time interpretation in component is to be
* lenient.
*
* @see Calendar#setLenient(boolean)
* @see #isLenient()
*
* @param lenient
* true if the lenient mode is to be turned on; false if it is to
* be turned off.
*/
public void setLenient(boolean lenient) {
this.lenient = lenient;
markAsDirty();
} | 3.68 |
hudi_ExpressionPredicates_bindFieldReference | /**
* Binds field reference to create a column predicate.
*
* @param fieldReference The field reference to negate.
* @return A column predicate.
*/
public ColumnPredicate bindFieldReference(FieldReferenceExpression fieldReference) {
this.literalType = fieldReference.getOutputDataType().getLogicalType();
this.columnName = fieldReference.getName();
return this;
} | 3.68 |
hadoop_OBSFileSystem_initialize | /**
* Initialize a FileSystem. Called after a new FileSystem instance is
* constructed.
*
* @param name a URI whose authority section names the host, port,
* etc. for this FileSystem
* @param originalConf the configuration to use for the FS. The
* bucket-specific options are patched over the base ones
* before any use is made of the config.
*/
@Override
public void initialize(final URI name, final Configuration originalConf)
throws IOException {
uri = URI.create(name.getScheme() + "://" + name.getAuthority());
bucket = name.getAuthority();
// clone the configuration into one with propagated bucket options
Configuration conf = OBSCommonUtils.propagateBucketOptions(originalConf,
bucket);
OBSCommonUtils.patchSecurityCredentialProviders(conf);
super.initialize(name, conf);
setConf(conf);
try {
// Username is the current user at the time the FS was instantiated.
username = UserGroupInformation.getCurrentUser().getShortUserName();
workingDir = new Path("/user", username).makeQualified(this.uri,
this.getWorkingDirectory());
Class<? extends OBSClientFactory> obsClientFactoryClass =
conf.getClass(
OBSConstants.OBS_CLIENT_FACTORY_IMPL,
OBSConstants.DEFAULT_OBS_CLIENT_FACTORY_IMPL,
OBSClientFactory.class);
obs = ReflectionUtils.newInstance(obsClientFactoryClass, conf)
.createObsClient(name);
sse = new SseWrapper(conf);
OBSCommonUtils.verifyBucketExists(this);
enablePosix = OBSCommonUtils.getBucketFsStatus(obs, bucket);
maxKeys = OBSCommonUtils.intOption(conf,
OBSConstants.MAX_PAGING_KEYS,
OBSConstants.DEFAULT_MAX_PAGING_KEYS, 1);
obsListing = new OBSListing(this);
partSize = OBSCommonUtils.getMultipartSizeProperty(conf,
OBSConstants.MULTIPART_SIZE,
OBSConstants.DEFAULT_MULTIPART_SIZE);
// check but do not store the block size
blockSize = OBSCommonUtils.longBytesOption(conf,
OBSConstants.FS_OBS_BLOCK_SIZE,
OBSConstants.DEFAULT_FS_OBS_BLOCK_SIZE, 1);
enableMultiObjectDelete = conf.getBoolean(
OBSConstants.ENABLE_MULTI_DELETE, true);
maxEntriesToDelete = conf.getInt(
OBSConstants.MULTI_DELETE_MAX_NUMBER,
OBSConstants.DEFAULT_MULTI_DELETE_MAX_NUMBER);
enableMultiObjectDeleteRecursion = conf.getBoolean(
OBSConstants.MULTI_DELETE_RECURSION, true);
obsContentSummaryEnable = conf.getBoolean(
OBSConstants.OBS_CONTENT_SUMMARY_ENABLE, true);
readAheadRange = OBSCommonUtils.longBytesOption(conf,
OBSConstants.READAHEAD_RANGE,
OBSConstants.DEFAULT_READAHEAD_RANGE, 0);
readTransformEnable = conf.getBoolean(
OBSConstants.READ_TRANSFORM_ENABLE, true);
multiDeleteThreshold = conf.getInt(
OBSConstants.MULTI_DELETE_THRESHOLD,
OBSConstants.MULTI_DELETE_DEFAULT_THRESHOLD);
initThreadPools(conf);
writeHelper = new OBSWriteOperationHelper(this);
initCannedAcls(conf);
OBSCommonUtils.initMultipartUploads(this, conf);
String blockOutputBuffer = conf.getTrimmed(
OBSConstants.FAST_UPLOAD_BUFFER,
OBSConstants.FAST_UPLOAD_BUFFER_DISK);
partSize = OBSCommonUtils.ensureOutputParameterInRange(
OBSConstants.MULTIPART_SIZE, partSize);
blockFactory = OBSDataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks =
OBSCommonUtils.intOption(conf,
OBSConstants.FAST_UPLOAD_ACTIVE_BLOCKS,
OBSConstants.DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
LOG.debug(
"Using OBSBlockOutputStream with buffer = {}; block={};"
+ " queue limit={}",
blockOutputBuffer,
partSize,
blockOutputActiveBlocks);
enableTrash = conf.getBoolean(OBSConstants.TRASH_ENABLE,
OBSConstants.DEFAULT_TRASH);
if (enableTrash) {
if (!isFsBucket()) {
String errorMsg = String.format(
"The bucket [%s] is not posix. not supported for "
+ "trash.", bucket);
LOG.warn(errorMsg);
enableTrash = false;
trashDir = null;
} else {
trashDir = conf.get(OBSConstants.TRASH_DIR);
if (StringUtils.isEmpty(trashDir)) {
String errorMsg =
String.format(
"The trash feature(fs.obs.trash.enable) is "
+ "enabled, but the "
+ "configuration(fs.obs.trash.dir [%s]) "
+ "is empty.",
trashDir);
LOG.error(errorMsg);
throw new ObsException(errorMsg);
}
trashDir = OBSCommonUtils.maybeAddBeginningSlash(trashDir);
trashDir = OBSCommonUtils.maybeAddTrailingSlash(trashDir);
}
}
} catch (ObsException e) {
throw OBSCommonUtils.translateException("initializing ",
new Path(name), e);
}
} | 3.68 |
flink_ExtractionUtils_createMethodSignatureString | /** Creates a method signature string like {@code int eval(Integer, String)}. */
public static String createMethodSignatureString(
String methodName, Class<?>[] parameters, @Nullable Class<?> returnType) {
final StringBuilder builder = new StringBuilder();
if (returnType != null) {
builder.append(returnType.getCanonicalName()).append(" ");
}
builder.append(methodName)
.append(
Stream.of(parameters)
.map(
parameter -> {
// in case we don't know the parameter at this location
// (i.e. for accumulators)
if (parameter == null) {
return "_";
} else {
return parameter.getCanonicalName();
}
})
.collect(Collectors.joining(", ", "(", ")")));
return builder.toString();
} | 3.68 |
morf_UpgradeTestHelper_validateUpgradeStepProperties | /**
* Validate that each upgrade step meets the basic requirements.
* For example UUID, Sequence, JIRA ID and Description are all populated.
*/
public void validateUpgradeStepProperties(Iterable<Class<? extends UpgradeStep>> upgradeSteps) {
instantiateAndValidateUpgradeSteps(upgradeSteps);
} | 3.68 |
pulsar_WRRPlacementStrategy_findBrokerForPlacement | /**
* Function : getByWeightedRoundRobin returns ResourceUnit selected by WRR algorithm
* based on available resource on RU.
* <code>
* ^
* |
* |
* |
* | | | | |
* | | | | |
* | Broker 2 | Broker 3 | Broker 1 | B4 |
* | | | | |
* +----------------+------------------------+--------------------------------+---------
* 0 20 50 90 100
*
* This is weighted Round robin, we calculate weight based on availability of resources;
* total availability is taken as a full range then each broker is given range based on
* its resource availability, if the number generated within total range happens to be in
* broker's range, that broker is selected
* </code>
*/
public ResourceUnit findBrokerForPlacement(Multimap<Long, ResourceUnit> finalCandidates) {
if (finalCandidates.isEmpty()) {
return null;
}
log.debug("Total Final Candidates selected - [{}]", finalCandidates.size());
int totalAvailability = 0;
for (Map.Entry<Long, ResourceUnit> candidateOwner : finalCandidates.entries()) {
totalAvailability += candidateOwner.getKey().intValue();
}
ResourceUnit selectedRU = null;
if (totalAvailability <= 0) {
// todo: this means all the brokers are overloaded and we can't assign this namespace to any broker
// for now, pick anyone and return that one, because when we don't have ranking we put O for each broker
return finalCandidates.get(0L)
.stream()
.skip(rand.nextInt(finalCandidates.size()))
.findFirst()
.orElse(null);
}
int weightedSelector = rand.nextInt(totalAvailability);
log.debug("Generated Weighted Selector Number - [{}] ", weightedSelector);
int weightRangeSoFar = 0;
for (Map.Entry<Long, ResourceUnit> candidateOwner : finalCandidates.entries()) {
weightRangeSoFar += candidateOwner.getKey();
if (weightedSelector < weightRangeSoFar) {
selectedRU = candidateOwner.getValue();
log.debug(" Weighted Round Robin Selected RU - [{}]", candidateOwner.getValue().getResourceId());
break;
}
}
return selectedRU;
} | 3.68 |
framework_IndexedContainer_equals | /**
* Tests if the given object is the same as the this object. Two
* Properties got from an Item with the same ID are equal.
*
* @param obj
* an object to compare with this object
* @return <code>true</code> if the given object is the same as this
* object, <code>false</code> if not
*/
@Override
public boolean equals(Object obj) {
if (obj == null
|| !obj.getClass().equals(IndexedContainerProperty.class)) {
return false;
}
final IndexedContainerProperty lp = (IndexedContainerProperty) obj;
return lp.getHost() == getHost() && lp.propertyId.equals(propertyId)
&& lp.itemId.equals(itemId);
} | 3.68 |
hudi_SparkInternalSchemaConverter_convertIntLongType | /**
* Convert Int/long type to other Type.
* Now only support int/long -> long/float/double/string/Decimal
* TODO: support more types
*/
private static boolean convertIntLongType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) {
boolean isInt = oldV.dataType() instanceof IntegerType;
if (newType instanceof LongType || newType instanceof FloatType
|| newType instanceof DoubleType || newType instanceof StringType || newType instanceof DecimalType) {
for (int i = 0; i < len; i++) {
if (oldV.isNullAt(i)) {
newV.putNull(i);
continue;
}
// int/long -> long/float/double/string/decimal
if (newType instanceof LongType) {
newV.putLong(i, isInt ? oldV.getInt(i) : oldV.getLong(i));
} else if (newType instanceof FloatType) {
newV.putFloat(i, isInt ? oldV.getInt(i) : oldV.getLong(i));
} else if (newType instanceof DoubleType) {
newV.putDouble(i, isInt ? oldV.getInt(i) : oldV.getLong(i));
} else if (newType instanceof StringType) {
newV.putByteArray(i, getUTF8Bytes((isInt ? oldV.getInt(i) : oldV.getLong(i)) + ""));
} else if (newType instanceof DecimalType) {
Decimal oldDecimal = Decimal.apply(isInt ? oldV.getInt(i) : oldV.getLong(i));
oldDecimal.changePrecision(((DecimalType) newType).precision(), ((DecimalType) newType).scale());
newV.putDecimal(i, oldDecimal, ((DecimalType) newType).precision());
}
}
return true;
}
return false;
} | 3.68 |
framework_ShowLastItem_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final Table table = new Table();
table.setHeight("210px");
table.addContainerProperty("Col", String.class, "");
for (int i = 0; i < 20; i++) {
table.addItem(i).getItemProperty("Col")
.setValue("row " + String.valueOf(i));
}
Button addItemBtn = new Button("Add item", event -> {
Object itemId = "row " + table.getItemIds().size();
table.addItem(itemId).getItemProperty("Col")
.setValue(String.valueOf(itemId));
table.setCurrentPageFirstItemIndex(table.getItemIds().size() - 1);
});
addComponent(table);
addComponent(addItemBtn);
} | 3.68 |
flink_ScriptProcessBuilder_getAbsolutePath | /** Returns the full path name of this file if it is listed in the path. */
public File getAbsolutePath(String filename) {
if (pathenv == null || pathSep == null || fileSep == null) {
return null;
}
int val;
String classvalue = pathenv + pathSep;
while (((val = classvalue.indexOf(pathSep)) >= 0) && classvalue.length() > 0) {
//
// Extract each entry from the pathenv
//
String entry = classvalue.substring(0, val).trim();
File f = new File(entry);
try {
if (f.isDirectory()) {
//
// this entry in the pathenv is a directory.
// see if the required file is in this directory
//
f = new File(entry + fileSep + filename);
}
//
// see if the filename matches and we can read it
//
if (f.isFile() && f.canRead()) {
return f;
}
} catch (Exception ignored) {
}
classvalue = classvalue.substring(val + 1).trim();
}
return null;
} | 3.68 |
framework_VDebugWindow_readPositionAndSize | /**
* Reads position and size from the DOM to local variables (which in turn
* can be stored to localStorage)
*/
private void readPositionAndSize() {
int x = getPopupLeft();
int fromRight = Window.getClientWidth() - x - getOffsetWidth();
if (fromRight < x) {
x -= Window.getClientWidth();
}
int y = getPopupTop();
int fromBottom = Window.getClientHeight() - y - getOffsetHeight();
if (fromBottom < y) {
y -= Window.getClientHeight();
}
if (minimized) {
minY = y;
minX = x;
} else {
fullY = y;
fullX = x;
fullW = content.getOffsetWidth();
fullH = content.getOffsetHeight();
}
} | 3.68 |
hadoop_NMClient_createNMClient | /**
* Create a new instance of NMClient.
*/
@Public
public static NMClient createNMClient(String name) {
NMClient client = new NMClientImpl(name);
return client;
} | 3.68 |
flink_ExpressionResolver_resolveExpanding | /**
* Resolves given expressions with configured set of rules. All expressions of an operation
* should be given at once as some rules might assume the order of expressions.
*
* <p>After this method is applied the returned expressions might contain unresolved expression
* that can be used for further API transformations.
*
* @param expressions list of expressions to resolve.
* @return resolved list of expression
*/
public List<Expression> resolveExpanding(List<Expression> expressions) {
final Function<List<Expression>, List<Expression>> resolveFunction =
concatenateRules(getExpandingResolverRules());
return resolveFunction.apply(expressions);
} | 3.68 |
graphhopper_WaySegmentParser_setWayPreprocessor | /**
* @param wayPreprocessor callback function that is called for each accepted OSM way during the second pass
*/
public Builder setWayPreprocessor(WayPreprocessor wayPreprocessor) {
waySegmentParser.wayPreprocessor = wayPreprocessor;
return this;
} | 3.68 |
hbase_ClientExceptionsUtil_findException | /**
* Look for an exception we know in the remote exception: - hadoop.ipc wrapped exceptions - nested
* exceptions Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException /
* RpcThrottlingException
* @return null if we didn't find the exception, the exception otherwise.
*/
public static Throwable findException(Object exception) {
if (exception == null || !(exception instanceof Throwable)) {
return null;
}
Throwable cur = (Throwable) exception;
while (cur != null) {
if (isSpecialException(cur)) {
return cur;
}
if (cur instanceof RemoteException) {
RemoteException re = (RemoteException) cur;
cur = re.unwrapRemoteException();
// unwrapRemoteException can return the exception given as a parameter when it cannot
// unwrap it. In this case, there is no need to look further
// noinspection ObjectEquality
if (cur == re) {
return cur;
}
// When we receive RemoteException which wraps IOException which has a cause as
// RemoteException we can get into infinite loop here; so if the cause of the exception
// is RemoteException, we shouldn't look further.
} else if (cur.getCause() != null && !(cur.getCause() instanceof RemoteException)) {
cur = cur.getCause();
} else {
return cur;
}
}
return null;
} | 3.68 |
hadoop_FlowActivityRowKey_getRowKey | /**
* Constructs a row key for the flow activity table as follows:
* {@code clusterId!dayTimestamp!user!flowName}.
*
* @return byte array for the row key
*/
public byte[] getRowKey() {
return flowActivityRowKeyConverter.encode(this);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.