name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VaadinService_setSystemMessagesProvider
/** * Sets the system messages provider to use for getting system messages to * display to users of this service. * * @see #getSystemMessagesProvider() * * @param systemMessagesProvider * the system messages provider; <code>null</code> is not * allowed. */ public void setSystemMessagesProvider( SystemMessagesProvider systemMessagesProvider) { if (systemMessagesProvider == null) { throw new IllegalArgumentException( "SystemMessagesProvider can not be null."); } this.systemMessagesProvider = systemMessagesProvider; }
3.68
querydsl_GenericExporter_setStrictMode
/** * Set whether annotationless superclasses are handled or not (default: true) * * @param s */ public void setStrictMode(boolean s) { strictMode = s; }
3.68
Activiti_TreeBuilderException_getExpected
/** * @return the substring (or description) that was expected */ public String getExpected() { return expected; }
3.68
framework_VFilterSelect_popupKeyDown
/** * Triggered when a key was pressed in the suggestion popup. * * @param event * The KeyDownEvent of the key */ private void popupKeyDown(KeyDownEvent event) { if (enableDebug) { debug("VFS: popupKeyDown(" + event.getNativeKeyCode() + ")"); } // Propagation of handled events is stopped so other handlers such as // shortcut key handlers do not also handle the same events. switch (event.getNativeKeyCode()) { case KeyCodes.KEY_DOWN: suggestionPopup.selectNextItem(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_UP: suggestionPopup.selectPrevItem(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_PAGEDOWN: selectNextPage(); event.stopPropagation(); break; case KeyCodes.KEY_PAGEUP: selectPrevPage(); event.stopPropagation(); break; case KeyCodes.KEY_ESCAPE: reset(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_TAB: case KeyCodes.KEY_ENTER: if (!allowNewItem) { int selected = suggestionPopup.menu.getSelectedIndex(); if (selected != -1) { onSuggestionSelected(currentSuggestions.get(selected)); } else { // The way VFilterSelect is done, it handles enter and tab // in exactly the same way so we close the popup in both // cases even though we could leave it open when pressing // enter suggestionPopup.hide(); } } else { // Handle addition of new items. suggestionPopup.menu.doSelectedItemAction(); } event.stopPropagation(); break; } }
3.68
hadoop_ImpersonationProvider_authorize
/** * Authorize the superuser which is doing doAs. * {@link #authorize(UserGroupInformation, InetAddress)} should * be preferred to avoid possibly re-resolving the ip address. * @param user ugi of the effective or proxy user which contains a real user. * @param remoteAddress the ip address of client. * @throws AuthorizationException Authorization Exception. */ default void authorize(UserGroupInformation user, String remoteAddress) throws AuthorizationException { try { authorize(user, InetAddress.getByName(remoteAddress)); } catch (UnknownHostException e) { throw new AuthorizationException(e); } }
3.68
flink_RunLengthDecoder_initWidthAndPacker
/** Initializes the internal state for decoding ints of `bitWidth`. */ private void initWidthAndPacker(int bitWidth) { Preconditions.checkArgument( bitWidth >= 0 && bitWidth <= 32, "bitWidth must be >= 0 and <= 32"); this.bitWidth = bitWidth; this.bytesWidth = BytesUtils.paddedByteCountFromBits(bitWidth); this.packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth); }
3.68
framework_AbstractSelect_getItemCaption
/** * Gets the caption of an item. The caption is generated as specified by the * item caption mode. See <code>setItemCaptionMode()</code> for more * details. * * @param itemId * the id of the item to be queried. * @return the caption for specified item. */ public String getItemCaption(Object itemId) { // Null items can not be found if (itemId == null) { return null; } String caption = null; switch (getItemCaptionMode()) { case ID: caption = idToCaption(itemId); break; case ID_TOSTRING: caption = itemId.toString(); break; case INDEX: if (items instanceof Container.Indexed) { caption = String .valueOf(((Container.Indexed) items).indexOfId(itemId)); } else { caption = "ERROR: Container is not indexed"; } break; case ITEM: final Item i = getItem(itemId); if (i != null) { caption = i.toString(); } break; case EXPLICIT: caption = itemCaptions.get(itemId); break; case EXPLICIT_DEFAULTS_ID: caption = itemCaptions.get(itemId); if (caption == null) { caption = idToCaption(itemId); } break; case PROPERTY: final Property<?> p = getContainerProperty(itemId, getItemCaptionPropertyId()); if (p != null) { Object value = p.getValue(); if (value != null) { caption = value.toString(); } } break; } // All items must have some captions return caption != null ? caption : ""; }
3.68
rocketmq-connect_KafkaConnectAdaptorSource_transforms
/** * convert transform * * @param record */ @Override protected SourceRecord transforms(SourceRecord record) { List<Transformation> transformations = transformationWrapper.transformations(); Iterator transformationIterator = transformations.iterator(); while (transformationIterator.hasNext()) { Transformation<SourceRecord> transformation = (Transformation) transformationIterator.next(); log.trace("applying transformation {} to {}", transformation.getClass().getName(), record); record = transformation.apply(record); if (record == null) { break; } } return record; }
3.68
open-banking-gateway_HbciFlowNameSelector_getNameForExecution
/** * Sub-process name for current context (PSU/FinTech input) execution (real calls to ASPSP API). */ public String getNameForExecution(HbciContext ctx) { return actionName(ctx); }
3.68
flink_WindowedStateTransformation_evictor
/** * Sets the {@code Evictor} that should be used to evict elements from a window before emission. * * <p>Note: When using an evictor window performance will degrade significantly, since * incremental aggregation of window results cannot be used. */ @PublicEvolving public WindowedStateTransformation<T, K, W> evictor(Evictor<? super T, ? super W> evictor) { builder.evictor(evictor); return this; }
3.68
hbase_Queue_compareKey
// ====================================================================== // Generic Helpers // ====================================================================== public int compareKey(TKey cmpKey) { return key.compareTo(cmpKey); }
3.68
rocketmq-connect_JdbcSourceTask_validate
/** * Should invoke before start the connector. * * @param config * @return error message */ @Override public void validate(KeyValue config) { }
3.68
hadoop_Utils_writeVLong
/** * Encoding a Long integer into a variable-length encoding format. * <ul> * <li>if n in [-32, 127): encode in one byte with the actual value. * Otherwise, * <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52; * byte[1]=n&amp;0xff. Otherwise, * <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 - * 88; byte[1]=(n&gt;&gt;8)&amp;0xff; byte[2]=n&amp;0xff. Otherwise, * <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112; * byte[1] = (n&gt;&gt;16)&amp;0xff; byte[2] = (n&gt;&gt;8)&amp;0xff; * byte[3]=n&amp;0xff. * Otherwise: * <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] = * (n&gt;&gt;24)&amp;0xff; byte[2]=(n&gt;&gt;16)&amp;0xff; * byte[3]=(n&gt;&gt;8)&amp;0xff; byte[4]=n&amp;0xff; * <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] = * (n&gt;&gt;32)&amp;0xff; byte[2]=(n&gt;&gt;24)&amp;0xff; * byte[3]=(n&gt;&gt;16)&amp;0xff; byte[4]=(n&gt;&gt;8)&amp;0xff; * byte[5]=n&amp;0xff * <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] = * (n&gt;&gt;40)&amp;0xff; byte[2]=(n&gt;&gt;32)&amp;0xff; * byte[3]=(n&gt;&gt;24)&amp;0xff; byte[4]=(n&gt;&gt;16)&amp;0xff; * byte[5]=(n&gt;&gt;8)&amp;0xff; byte[6]=n&amp;0xff; * <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] = * (n&gt;&gt;48)&amp;0xff; byte[2] = (n&gt;&gt;40)&amp;0xff; * byte[3]=(n&gt;&gt;32)&amp;0xff; byte[4]=(n&gt;&gt;24)&amp;0xff; byte[5]= * (n&gt;&gt;16)&amp;0xff; byte[6]=(n&gt;&gt;8)&amp;0xff; byte[7]=n&amp;0xff; * <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] = * (n&gt;&gt;54)&amp;0xff; byte[2] = (n&gt;&gt;48)&amp;0xff; * byte[3] = (n&gt;&gt;40)&amp;0xff; byte[4]=(n&gt;&gt;32)&amp;0xff; * byte[5]=(n&gt;&gt;24)&amp;0xff; byte[6]=(n&gt;&gt;16)&amp;0xff; byte[7]= * (n&gt;&gt;8)&amp;0xff; byte[8]=n&amp;0xff; * </ul> * * @param out * output stream * @param n * the integer number * @throws IOException raised on errors performing I/O. */ @SuppressWarnings("fallthrough") public static void writeVLong(DataOutput out, long n) throws IOException { if ((n < 128) && (n >= -32)) { out.writeByte((int) n); return; } long un = (n < 0) ? ~n : n; // how many bytes do we need to represent the number with sign bit? int len = (Long.SIZE - Long.numberOfLeadingZeros(un)) / 8 + 1; int firstByte = (int) (n >> ((len - 1) * 8)); switch (len) { case 1: // fall it through to firstByte==-1, len=2. firstByte >>= 8; case 2: if ((firstByte < 20) && (firstByte >= -20)) { out.writeByte(firstByte - 52); out.writeByte((int) n); return; } // fall it through to firstByte==0/-1, len=3. firstByte >>= 8; case 3: if ((firstByte < 16) && (firstByte >= -16)) { out.writeByte(firstByte - 88); out.writeShort((int) n); return; } // fall it through to firstByte==0/-1, len=4. firstByte >>= 8; case 4: if ((firstByte < 8) && (firstByte >= -8)) { out.writeByte(firstByte - 112); out.writeShort(((int) n) >>> 8); out.writeByte((int) n); return; } out.writeByte(len - 129); out.writeInt((int) n); return; case 5: out.writeByte(len - 129); out.writeInt((int) (n >>> 8)); out.writeByte((int) n); return; case 6: out.writeByte(len - 129); out.writeInt((int) (n >>> 16)); out.writeShort((int) n); return; case 7: out.writeByte(len - 129); out.writeInt((int) (n >>> 24)); out.writeShort((int) (n >>> 8)); out.writeByte((int) n); return; case 8: out.writeByte(len - 129); out.writeLong(n); return; default: throw new RuntimeException("Internal error"); } }
3.68
framework_VAccordion_setTabIndex
/** * For internal use only. May be renamed or removed in a future release. * <p> * Sets the tabulator index for the active stack item. The active stack item * represents the entire accordion in the browser's focus cycle (excluding * any focusable elements within the content panel). * <p> * This value is delegated from the TabsheetState via AccordionState. * * @param tabIndex * tabulator index for the open stack item * @since 8.1.7 */ public void setTabIndex(int tabIndex) { tabulatorIndex = tabIndex; StackItem openStackItem = getOpenStackItem(); if (openStackItem != null) { openStackItem.getElement().setTabIndex(tabIndex); } }
3.68
flink_SpeculativeExecutionVertex_archiveFailedExecution
/** * Remove execution from currentExecutions if it is failed. It is needed to make room for * possible future speculative executions. * * @param executionAttemptId attemptID of the execution to be removed */ public void archiveFailedExecution(ExecutionAttemptID executionAttemptId) { if (this.currentExecutions.size() <= 1) { // Leave the last execution because currentExecutions should never be empty. This should // happen only if all current executions have FAILED. A vertex reset will happen soon // and will archive the remaining execution. return; } final Execution removedExecution = this.currentExecutions.remove(executionAttemptId.getAttemptNumber()); nextInputSplitIndexToConsumeByAttempts.remove(executionAttemptId.getAttemptNumber()); checkNotNull( removedExecution, "Cannot remove execution %s which does not exist.", executionAttemptId); checkState( removedExecution.getState() == FAILED, "Cannot remove execution %s which is not FAILED.", executionAttemptId); executionHistory.add(removedExecution.archive()); if (removedExecution == this.currentExecution) { this.currentExecution = this.currentExecutions.values().iterator().next(); } }
3.68
flink_BinaryIndexedSortable_writeToOutput
/** Spill: Write all records to a {@link AbstractPagedOutputView}. */ public void writeToOutput(AbstractPagedOutputView output) throws IOException { final int numRecords = this.numRecords; int currentMemSeg = 0; int currentRecord = 0; while (currentRecord < numRecords) { final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++); // go through all records in the memory segment for (int offset = 0; currentRecord < numRecords && offset <= this.lastIndexEntryOffset; currentRecord++, offset += this.indexEntrySize) { final long pointer = currentIndexSegment.getLong(offset); this.recordBuffer.setReadPosition(pointer); this.serializer.copyFromPagesToView(this.recordBuffer, output); } } }
3.68
framework_VaadinServletRequest_getCurrent
/** * Gets the currently processed Vaadin servlet request. The current request * is automatically defined when the request is started. The current request * can not be used in e.g. background threads because of the way server * implementations reuse request instances. * * * @return the current Vaadin servlet request instance if available, * otherwise <code>null</code> * @since 8.1 */ public static VaadinServletRequest getCurrent() { VaadinRequest currentRequest = VaadinRequest.getCurrent(); if (currentRequest instanceof VaadinServletRequest) { return (VaadinServletRequest) currentRequest; } else { return null; } }
3.68
flink_FilterNode_computeOperatorSpecificDefaultEstimates
/** * Computes the estimates for the Filter operator. Since it applies a filter on the data we * assume a cardinality decrease. To give the system a hint at data decrease, we use a default * magic number to indicate a 0.5 decrease. */ @Override protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) { this.estimatedNumRecords = (long) (getPredecessorNode().getEstimatedNumRecords() * 0.5); this.estimatedOutputSize = (long) (getPredecessorNode().getEstimatedOutputSize() * 0.5); }
3.68
Activiti_ExecutionTree_getTreeNode
/** * Looks up the {@link ExecutionEntity} for a given id. */ public ExecutionTreeNode getTreeNode(String executionId) { return getTreeNode(executionId, root); }
3.68
framework_Payload_getValueType
/** * Gets the value type of this payload. * * @return the type of the value of this payload */ public ValueType getValueType() { return valueType; }
3.68
morf_DataValueLookupMetadata_readResolve
/** * When deserializing, resolve via the static factory. This prevents us getting duplicate * instances. * * @return The interned instance. */ private Object readResolve() { return DataValueLookupMetadataRegistry.deduplicate(this); }
3.68
framework_VLayoutSlot_isRelativeInDirection
/** * Returns whether the height or the width of the widget has been set as * relative depending on the indicated direction. * * @param isVertical * {@code true} if the requested dimension check is about height, * {@code false} if about width * @return {@code true} if the widget height or the widget width is relative * depending on the indicated direction, {@code false} otherwise */ public boolean isRelativeInDirection(boolean isVertical) { return isVertical ? isRelativeHeight() : isRelativeWidth(); }
3.68
morf_Oracle_openSchema
/** * @see org.alfasoftware.morf.jdbc.DatabaseType#openSchema(Connection, String, String) */ @Override public Schema openSchema(Connection connection, String databaseName, String schemaName) { if (StringUtils.isEmpty(schemaName)) throw new IllegalStateException("No schema name has been provided, but a schema name is required when connecting to Oracle"); return new OracleMetaDataProvider(connection, schemaName); }
3.68
hadoop_S3ClientFactory_isMultipartCopy
/** * Get the multipart flag. * @return multipart flag */ public boolean isMultipartCopy() { return multipartCopy; }
3.68
flink_TableChange_first
/** Get the position to place the column at the first. */ static ColumnPosition first() { return First.INSTANCE; }
3.68
framework_BrowserWindowOpener_removeParameter
/** * Removes a parameter that has been set using * {@link #setParameter(String, String)}. Removing a parameter that has not * been set has no effect. * * @param name * the name of the parameter to remove, not <code>null</code> * * @see #setParameter(String, String) */ public void removeParameter(String name) { if (name == null) { throw new IllegalArgumentException("Null not allowed"); } getState().parameters.remove(name); }
3.68
hudi_FileSystemBasedLockProvider_defaultLockPath
/** * Returns the default lock file root path. * * <p>IMPORTANT: this path should be shared especially when there is engine cooperation. */ private static String defaultLockPath(String tablePath) { return tablePath + Path.SEPARATOR + AUXILIARYFOLDER_NAME; }
3.68
framework_Calendar_fireEventClick
/** * Fires event when a date was clicked in the calendar. Uses an existing * event from the event cache. * * @param index * The index of the event in the event cache. */ protected void fireEventClick(Integer index) { fireEvent(new EventClick(this, events.get(index))); }
3.68
flink_PekkoUtils_getAddress
/** * Returns the address of the given {@link ActorSystem}. The {@link Address} object contains the * port and the host under which the actor system is reachable. * * @param system {@link ActorSystem} for which the {@link Address} shall be retrieved * @return {@link Address} of the given {@link ActorSystem} */ public static Address getAddress(ActorSystem system) { return RemoteAddressExtension.INSTANCE.apply(system).getAddress(); }
3.68
framework_TreeGridDropTarget_addTreeGridDropListener
/** * Attaches drop listener for the current drop target. * {@link TreeGridDropListener#drop(TreeGridDropEvent)} is called when drop * event happens on the client side. * * @param listener * Listener to handle drop event. * @return Handle to be used to remove this listener. */ public Registration addTreeGridDropListener( TreeGridDropListener<T> listener) { return addListener(TreeGridDropEvent.class, listener, TreeGridDropListener.DROP_METHOD); }
3.68
framework_VFilterSelect_fixHeightTo
/** * Fixes menus height to use same space as full page would use. Needed * to avoid height changes when quickly "scrolling" to last page. */ public void fixHeightTo(int pageItemsCount) { setHeight(getPreferredHeight(pageItemsCount)); }
3.68
hadoop_IOStatisticsContextIntegration_referenceLostContext
/** * In case of reference loss for IOStatisticsContext. * @param key ThreadID. */ private static void referenceLostContext(Long key) { LOG.debug("Reference lost for threadID for the context: {}", key); }
3.68
framework_DropTargetExtensionConnector_onDragLeave
/** * Event handler for the {@code dragleave} event. * <p> * Override this method in case custom handling for the dragleave event is * required. * * @param event * browser event to be handled */ protected void onDragLeave(Event event) { removeDragOverStyle((NativeEvent) event); }
3.68
flink_WindowedStateTransformation_reduce
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction The reduce function that is used for incremental aggregation. * @param function The window function. * @return The data stream that is the result of applying the window function to the window. */ @Internal public <R> StateBootstrapTransformation<T> reduce( ReduceFunction<T> reduceFunction, ProcessWindowFunction<T, R, K, W> function) { // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); WindowOperator<K, T, ?, R, W> operator = builder.reduce(reduceFunction, function); SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator); return new StateBootstrapTransformation<>( input, operatorMaxParallelism, factory, keySelector, keyType); }
3.68
shardingsphere-elasticjob_HandlerMappingRegistry_getMappingContext
/** * Get a MappingContext with Handler for the request. * * @param httpRequest HTTP request * @return A MappingContext if matched, return null if mismatched. */ public Optional<MappingContext<Handler>> getMappingContext(final HttpRequest httpRequest) { String uriWithoutQuery = httpRequest.uri().split("\\?")[0]; return Optional.ofNullable(mappings.get(httpRequest.method())).map(urlPatternMap -> urlPatternMap.match(uriWithoutQuery)); }
3.68
flink_MemoryStateBackend_isUsingAsynchronousSnapshots
/** * Gets whether the key/value data structures are asynchronously snapshotted, which is always * true for this state backend. */ public boolean isUsingAsynchronousSnapshots() { return true; }
3.68
hbase_MultiVersionConcurrencyControl_tryAdvanceTo
/** * Step the MVCC forward on to a new read/write basis. * @param newStartPoint Point to move read and write points to. * @param expected If not -1 (#NONE) * @return Returns false if <code>expected</code> is not equal to the current * <code>readPoint</code> or if <code>startPoint</code> is less than current * <code>readPoint</code> */ boolean tryAdvanceTo(long newStartPoint, long expected) { synchronized (writeQueue) { long currentRead = this.readPoint.get(); long currentWrite = this.writePoint.get(); if (currentRead != currentWrite) { throw new RuntimeException("Already used this mvcc; currentRead=" + currentRead + ", currentWrite=" + currentWrite + "; too late to tryAdvanceTo"); } if (expected != NONE && expected != currentRead) { return false; } if (newStartPoint < currentRead) { return false; } readPoint.set(newStartPoint); writePoint.set(newStartPoint); } return true; }
3.68
hbase_HFileReaderImpl_releaseIfNotCurBlock
/** * The curBlock will be released by shipping or close method, so only need to consider releasing * the block, which was read from HFile before and not referenced by curBlock. */ protected void releaseIfNotCurBlock(HFileBlock block) { if (curBlock != block) { block.release(); } } /** * Scans blocks in the "scanned" section of the {@link HFile}
3.68
hadoop_DomainRowKey_getRowKey
/** * Constructs a row key prefix for the domain table. * * @return byte array with the row key */ public byte[] getRowKey() { return domainIdKeyConverter.encode(this); }
3.68
rocketmq-connect_LRUCache_remove
/** * remove a data to cache * * @param key * @return */ @Override public boolean remove(K key) { return cache.remove(key) != null; }
3.68
hbase_MasterProcedureScheduler_wakeRegion
/** * Wake the procedures waiting for the specified region * @param procedure the procedure that was holding the region * @param regionInfo the region the procedure was holding */ public void wakeRegion(final Procedure<?> procedure, final RegionInfo regionInfo) { wakeRegions(procedure, regionInfo.getTable(), regionInfo); }
3.68
dubbo_MeshEnvListener_isEnable
/** * @return whether current environment support listen */ default boolean isEnable() { return false; }
3.68
hudi_HoodieCatalogUtil_getPartitionKeys
/** * Returns the partition key list with given table. */ public static List<String> getPartitionKeys(CatalogTable table) { // the PARTITIONED BY syntax always has higher priority than option FlinkOptions#PARTITION_PATH_FIELD if (table.isPartitioned()) { return table.getPartitionKeys(); } else if (table.getOptions().containsKey(FlinkOptions.PARTITION_PATH_FIELD.key())) { return Arrays.stream(table.getOptions().get(FlinkOptions.PARTITION_PATH_FIELD.key()).split(",")) .collect(Collectors.toList()); } return Collections.emptyList(); }
3.68
rocketmq-connect_KafkaSourceAdaptorConnector_stop
/** * Stop the component. */ @Override public void stop() { if (sourceConnector != null) { sourceConnector.stop(); } super.stop(); }
3.68
framework_OptimizedWidgetsetPanel_update
/** * Update the panel contents based on the connectors that have been used so * far on this execution of the application. */ public void update() { clear(); HTML h = new HTML("Getting used connectors"); add(h); String s = ""; for (ApplicationConnection ac : ApplicationConfiguration .getRunningApplications()) { ApplicationConfiguration conf = ac.getConfiguration(); s += "<h1>Used connectors for " + Util.escapeHTML(conf.getServiceUrl()) + "</h1>"; for (String connectorName : getUsedConnectorNames(conf)) { s += Util.escapeHTML(connectorName) + "<br/>"; } s += "<h2>To make an optimized widgetset based on these connectors:</h2>"; s += "<h3>1. Add the following <b>to the end</b> of your widgetset.gwt.xml file:</h3>"; s += "<textarea rows=\"3\" style=\"width:90%\">"; s += "<generate-with class=\"OptimizedConnectorBundleLoaderFactory\">\n"; s += " <when-type-assignable class=\"com.vaadin.client.metadata.ConnectorBundleLoader\" />\n"; s += "</generate-with>\n"; s += "</textarea>"; s += "<h3>2. Add the following code into OptimizedConnectorBundleLoaderFactory.java:</h3>"; s += "<textarea rows=\"5\" style=\"width:90%\">"; s += generateOptimizedWidgetSet(getUsedConnectorNames(conf)); s += "</textarea>"; s += "<h3>3. Recompile your widgetset. For example with Maven: 'mvn compile vaadin:compile'</h3>"; } h.setHTML(s); }
3.68
hadoop_Preconditions_checkState
/** * Preconditions that the expression involving one or more parameters to the calling method. * * <p>The message of the exception is {@code msgSupplier.get()}.</p> * * @param expression a boolean expression * @param msgSupplier the {@link Supplier#get()} set the * exception message if valid. Otherwise, * the message is {@link #CHECK_STATE_EX_MESSAGE} * @throws IllegalStateException if {@code expression} is false */ public static void checkState( final boolean expression, final Supplier<String> msgSupplier) { if (!expression) { String msg; try { // note that we can get NPE evaluating the message itself; // but we do not want this to override the actual NPE. msg = msgSupplier.get(); } catch (Exception e) { LOG.debug("Error formatting message", e); msg = CHECK_STATE_EX_MESSAGE; } throw new IllegalStateException(msg); } }
3.68
hudi_DagUtils_convertYamlPathToDag
/** * Converts a YAML path to {@link WorkflowDag}. */ public static WorkflowDag convertYamlPathToDag(FileSystem fs, String path) throws IOException { InputStream is = fs.open(new Path(path)); return convertYamlToDag(toString(is)); }
3.68
hbase_ColumnSchemaModel___getBlocksize
/** Returns the value of the BLOCKSIZE attribute or its default if it is unset */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); return o != null ? Integer.parseInt(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_BLOCKSIZE; }
3.68
hbase_SingleColumnValueFilter_toByteArray
/** Returns The filter serialized using pb */ @Override public byte[] toByteArray() { return convert().toByteArray(); }
3.68
zxing_FinderPatternFinder_squaredDistance
/** * Get square of distance between a and b. */ private static double squaredDistance(FinderPattern a, FinderPattern b) { double x = a.getX() - b.getX(); double y = a.getY() - b.getY(); return x * x + y * y; }
3.68
framework_Escalator_removeRows
/** * {@inheritDoc} * <p> * <em>Implementation detail:</em> This method does no DOM modifications * (i.e. is very cheap to call) if there are no rows in the DOM when * this method is called. * * @see #hasSomethingInDom() */ @Override public void removeRows(final int index, final int numberOfRows) { assertArgumentsAreValidAndWithinRange(index, numberOfRows); rows -= numberOfRows; if (!isAttached()) { return; } if (hasSomethingInDom()) { paintRemoveRows(index, numberOfRows); } }
3.68
hadoop_WordList_setSize
/** * Setters and getters for Jackson JSON */ /** * Sets the size of the list. * * Note: That this API is only for Jackson JSON deserialization. */ public void setSize(int size) { list = new HashMap<String, Integer>(size); }
3.68
framework_Escalator_updateVisibility
/** * Updates the spacer's visibility parameters, based on whether it * is being currently visible or not. */ public void updateVisibility() { if (isInViewport()) { show(); } else { hide(); } }
3.68
hbase_FavoredNodeAssignmentHelper_getFavoredNodes
/** Returns PB'ed bytes of {@link FavoredNodes} generated by the server list. */ public static byte[] getFavoredNodes(List<ServerName> serverAddrList) { FavoredNodes.Builder f = FavoredNodes.newBuilder(); for (ServerName s : serverAddrList) { HBaseProtos.ServerName.Builder b = HBaseProtos.ServerName.newBuilder(); b.setHostName(s.getHostname()); b.setPort(s.getPort()); b.setStartCode(ServerName.NON_STARTCODE); f.addFavoredNode(b.build()); } return f.build().toByteArray(); }
3.68
dubbo_StringToDurationConverter_parse
/** * Parse the given value to a duration. * * @param value the value to parse * @return a duration */ public Duration parse(String value) { return parse(value, null); }
3.68
hudi_ProtoConversionUtil_getWrappedValue
/** * Returns the wrapped field, assumes all wrapped fields have a single value * @param value wrapper message like {@link Int32Value} or {@link StringValue} * @return the wrapped object */ private static Object getWrappedValue(Object value) { Message valueAsMessage = (Message) value; return valueAsMessage.getField(valueAsMessage.getDescriptorForType().getFields().get(0)); }
3.68
framework_Upload_getReason
/** * Gets the exception that caused the failure. * * @return the exception that caused the failure, null if n/a */ public Exception getReason() { return reason; }
3.68
hudi_RequestHandler_isLocalViewBehind
/** * Determines if local view of table's timeline is behind that of client's view. */ private boolean isLocalViewBehind(Context ctx) { String basePath = ctx.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM); String lastKnownInstantFromClient = ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.LAST_INSTANT_TS, String.class).getOrDefault(HoodieTimeline.INVALID_INSTANT_TS); String timelineHashFromClient = ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.TIMELINE_HASH, String.class).getOrDefault(""); HoodieTimeline localTimeline = viewManager.getFileSystemView(basePath).getTimeline().filterCompletedOrMajorOrMinorCompactionInstants(); if (LOG.isDebugEnabled()) { LOG.debug("Client [ LastTs=" + lastKnownInstantFromClient + ", TimelineHash=" + timelineHashFromClient + "], localTimeline=" + localTimeline.getInstants()); } if ((!localTimeline.getInstantsAsStream().findAny().isPresent()) && HoodieTimeline.INVALID_INSTANT_TS.equals(lastKnownInstantFromClient)) { return false; } String localTimelineHash = localTimeline.getTimelineHash(); // refresh if timeline hash mismatches if (!localTimelineHash.equals(timelineHashFromClient)) { return true; } // As a safety check, even if hash is same, ensure instant is present return !localTimeline.containsOrBeforeTimelineStarts(lastKnownInstantFromClient); }
3.68
graphhopper_GHMatrixAbstractRequester_fillResponseFromJson
/** * @param failFast If false weights/distances/times that are null are interpreted as disconnected points and are * thus set to their respective maximum values. Furthermore, the indices of the disconnected points * are added to {@link MatrixResponse#getDisconnectedPoints()} and the indices of the points that * could not be found are added to {@link MatrixResponse#getInvalidFromPoints()} and/or * {@link MatrixResponse#getInvalidToPoints()}. */ protected void fillResponseFromJson(MatrixResponse matrixResponse, JsonNode solution, boolean failFast) { final boolean readWeights = solution.has("weights"); final boolean readDistances = solution.has("distances"); final boolean readTimes = solution.has("times"); int fromCount = 0; JsonNode weightsArray = null; if (readWeights) { weightsArray = solution.get("weights"); fromCount = checkArraySizes("weights", weightsArray.size()); } JsonNode timesArray = null; if (readTimes) { timesArray = solution.get("times"); fromCount = checkArraySizes("times", timesArray.size(), weightsArray); } JsonNode distancesArray = null; if (readDistances) { distancesArray = solution.get("distances"); fromCount = checkArraySizes("distances", distancesArray.size(), weightsArray, timesArray); } for (int fromIndex = 0; fromIndex < fromCount; fromIndex++) { int toCount = 0; JsonNode weightsFromArray = null; double[] weights = null; if (readWeights) { weightsFromArray = weightsArray.get(fromIndex); weights = new double[weightsFromArray.size()]; toCount = checkArraySizes("weights", weightsFromArray.size()); } JsonNode timesFromArray = null; long[] times = null; if (readTimes) { timesFromArray = timesArray.get(fromIndex); times = new long[timesFromArray.size()]; toCount = checkArraySizes("times", timesFromArray.size(), weightsFromArray); } JsonNode distancesFromArray = null; int[] distances = null; if (readDistances) { distancesFromArray = distancesArray.get(fromIndex); distances = new int[distancesFromArray.size()]; toCount = checkArraySizes("distances", distancesFromArray.size(), weightsFromArray, timesFromArray); } for (int toIndex = 0; toIndex < toCount; toIndex++) { if (readWeights) { if (weightsFromArray.get(toIndex).isNull() && !failFast) { weights[toIndex] = Double.MAX_VALUE; } else { weights[toIndex] = weightsFromArray.get(toIndex).asDouble(); } } if (readTimes) { if (timesFromArray.get(toIndex).isNull() && !failFast) { times[toIndex] = Long.MAX_VALUE; } else { times[toIndex] = timesFromArray.get(toIndex).asLong() * 1000; } } if (readDistances) { if (distancesFromArray.get(toIndex).isNull() && !failFast) { distances[toIndex] = Integer.MAX_VALUE; } else { distances[toIndex] = (int) Math.round(distancesFromArray.get(toIndex).asDouble()); } } } if (readWeights) { matrixResponse.setWeightRow(fromIndex, weights); } if (readTimes) { matrixResponse.setTimeRow(fromIndex, times); } if (readDistances) { matrixResponse.setDistanceRow(fromIndex, distances); } } if (!failFast && solution.has("hints")) { addProblems(matrixResponse, solution.get("hints")); } }
3.68
framework_DataCommunicator_addDataGenerator
/** * Adds a data generator to this data communicator. Data generators can be * used to insert custom data to the rows sent to the client. If the data * generator is already added, does nothing. * * @param generator * the data generator to add, not null */ public void addDataGenerator(DataGenerator<T> generator) { Objects.requireNonNull(generator, "generator cannot be null"); generators.add(generator); // Make sure data gets generated when adding data generators. reset(); }
3.68
hbase_MasterProcedureScheduler_waitNamespaceExclusiveLock
// ============================================================================ // Namespace Locking Helpers // ============================================================================ /** * Suspend the procedure if the specified namespace is already locked. * @see #wakeNamespaceExclusiveLock(Procedure,String) * @param procedure the procedure trying to acquire the lock * @param namespace Namespace to lock * @return true if the procedure has to wait for the namespace to be available */ public boolean waitNamespaceExclusiveLock(Procedure<?> procedure, String namespace) { schedLock(); try { final LockAndQueue systemNamespaceTableLock = locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME); if (!systemNamespaceTableLock.trySharedLock(procedure)) { waitProcedure(systemNamespaceTableLock, procedure); logLockedResource(LockedResourceType.TABLE, TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME.getNameAsString()); return true; } final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace); if (!namespaceLock.tryExclusiveLock(procedure)) { systemNamespaceTableLock.releaseSharedLock(); waitProcedure(namespaceLock, procedure); logLockedResource(LockedResourceType.NAMESPACE, namespace); return true; } return false; } finally { schedUnlock(); } }
3.68
hadoop_RenameOperation_maybeAddTrailingSlash
/** * Turns a path (relative or otherwise) into an S3 key, adding a trailing * "/" if the path is not the root <i>and</i> does not already have a "/" * at the end. * * @param key s3 key or "" * @return the with a trailing "/", or, if it is the root key, "", */ private String maybeAddTrailingSlash(String key) { if (!key.isEmpty() && !key.endsWith("/")) { return key + '/'; } else { return key; } }
3.68
flink_SecurityFactoryServiceLoader_findModuleFactory
/** Find a suitable {@link SecurityModuleFactory} based on canonical name. */ public static SecurityModuleFactory findModuleFactory(String securityModuleFactoryClass) throws NoMatchSecurityFactoryException { return findFactoryInternal( securityModuleFactoryClass, SecurityModuleFactory.class, SecurityModuleFactory.class.getClassLoader()); }
3.68
morf_InlineTableUpgrader_visitStatement
/** * Write the DSL statement. * * @param statement The {@link Statement}. */ private void visitStatement(Statement statement) { writeStatements(sqlDialect.convertStatementToSQL(statement, currentSchema, idTable)); }
3.68
flink_JoinOperator_types
/** * @deprecated Deprecated method only kept for compatibility. * @param types */ @SuppressWarnings({"unchecked", "hiding"}) @Deprecated @PublicEvolving public <OUT extends Tuple> JoinOperator<I1, I2, OUT> types(Class<?>... types) { TupleTypeInfo<OUT> typeInfo = (TupleTypeInfo<OUT>) this.getResultType(); if (types.length != typeInfo.getArity()) { throw new InvalidProgramException("Provided types do not match projection."); } for (int i = 0; i < types.length; i++) { Class<?> typeClass = types[i]; if (!typeClass.equals(typeInfo.getTypeAt(i).getTypeClass())) { throw new InvalidProgramException( "Provided type " + typeClass.getSimpleName() + " at position " + i + " does not match projection"); } } return (JoinOperator<I1, I2, OUT>) this; }
3.68
pulsar_Schema_KeyValue
/** * Key Value Schema using passed in key, value and encoding type schemas. */ static <K, V> Schema<KeyValue<K, V>> KeyValue(Schema<K> key, Schema<V> value, KeyValueEncodingType keyValueEncodingType) { return DefaultImplementation.getDefaultImplementation().newKeyValueSchema(key, value, keyValueEncodingType); }
3.68
flink_RequestedGlobalProperties_setAnyPartitioning
/** * Sets these properties to request some partitioning on the given fields. This will allow both * hash partitioning and range partitioning to match. * * <p>If the fields are provided as {@link FieldSet}, then any permutation of the fields is a * valid partitioning, including subsets. If the fields are given as a {@link FieldList}, then * only an exact partitioning on the fields matches this requested partitioning. * * @param partitionedFields The key fields for the partitioning. */ public void setAnyPartitioning(FieldSet partitionedFields) { if (partitionedFields == null) { throw new NullPointerException(); } this.partitioning = PartitioningProperty.ANY_PARTITIONING; this.partitioningFields = partitionedFields; this.ordering = null; }
3.68
flink_ArrowSerializer_createArrowWriter
/** Creates an {@link ArrowWriter}. */ public ArrowWriter<RowData> createArrowWriter() { return ArrowUtils.createRowDataArrowWriter(rootWriter, inputType); }
3.68
rocketmq-connect_DebeziumPostgresConnector_taskClass
/** * Return the current connector class * @return task implement class */ @Override public Class<? extends Task> taskClass() { return DebeziumPostgresSource.class; }
3.68
hbase_KeyStoreFileType_fromPropertyValueOrFileName
/** * If <code>propertyValue</code> is not null or empty, returns the result of * <code>KeyStoreFileType.fromPropertyValue(propertyValue)</code>. Else, returns the result of * <code>KeyStoreFileType.fromFileName(filename)</code>. * @param propertyValue property value describing the KeyStoreFileType, or null/empty to * auto-detect the type from the file name. * @param filename file name of the key store file. The file extension is used to auto-detect * the KeyStoreFileType when <code>propertyValue</code> is null or empty. * @return a KeyStoreFileType. * @throws IllegalArgumentException if <code>propertyValue</code> is not one of "JKS", "PEM", * "PKCS12", "BCFKS", or empty/null. * @throws IllegalArgumentException if <code>propertyValue</code>is empty or null and the type * could not be determined from the file name. */ public static KeyStoreFileType fromPropertyValueOrFileName(String propertyValue, String filename) { KeyStoreFileType result = KeyStoreFileType.fromPropertyValue(propertyValue); if (result == null) { result = KeyStoreFileType.fromFilename(filename); } return result; }
3.68
hbase_DynamicMetricsRegistry_newCounter
/** * Create a mutable long integer counter * @param info metadata of the metric * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { MutableFastCounter ret = new MutableFastCounter(info, iVal); return addNewMetricIfAbsent(info.name(), ret, MutableFastCounter.class); }
3.68
hbase_HMaster_getMaxBalancingTime
/** Returns Maximum time we should run balancer for */ private int getMaxBalancingTime() { // if max balancing time isn't set, defaulting it to period time int maxBalancingTime = getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, getConfiguration() .getInt(HConstants.HBASE_BALANCER_PERIOD, HConstants.DEFAULT_HBASE_BALANCER_PERIOD)); return maxBalancingTime; }
3.68
morf_AbstractSqlDialectTest_testSubstring
/** * Tests that substringing functionality works. */ @Test public void testSubstring() { // Given Function substring = substring(new FieldReference("field1"), new FieldLiteral(1), new FieldLiteral(3)); SelectStatement stmt = new SelectStatement(substring).from(new TableReference("schedule")); // When String result = testDialect.convertStatementToSQL(stmt); // Then assertEquals("Substring script should match expected", expectedSubstring(), result); }
3.68
framework_Slot_getWidgetResizeListener
/** * Returns the widget resize listener for this slot if one has been set. * * @return the listener or {@code null} if not set */ public ElementResizeListener getWidgetResizeListener() { return widgetResizeListener; }
3.68
hadoop_CleanerMetrics_reportAFileError
/** * Report a process operation error at the current system time */ public void reportAFileError() { totalProcessedFiles.incr(); processedFiles.incr(); totalFileErrors.incr(); fileErrors.incr(); }
3.68
pulsar_ProducerInterceptors_onSendAcknowledgement
/** * This method is called when the message send to the broker has been acknowledged, or when sending the record fails * before it gets send to the broker. * This method calls {@link ProducerInterceptor#onSendAcknowledgement(Producer, Message, MessageId, Throwable)} * method for each interceptor. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. * * @param producer the producer which contains the interceptor. * @param message The message returned from the last interceptor is returned from * {@link ProducerInterceptor#beforeSend(Producer, Message)} * @param msgId The message id that broker returned. Null if has error occurred. * @param exception The exception thrown during processing of this message. Null if no error occurred. */ public void onSendAcknowledgement(Producer producer, Message message, MessageId msgId, Throwable exception) { for (ProducerInterceptor interceptor : interceptors) { if (!interceptor.eligible(message)) { continue; } try { interceptor.onSendAcknowledgement(producer, message, msgId, exception); } catch (Throwable e) { log.warn("Error executing interceptor onSendAcknowledgement callback ", e); } } }
3.68
flink_SkipListUtils_getNextValuePointer
/** * Return the pointer to next value space. * * @param memorySegment memory segment for value space. * @param offset offset of value space in memory segment. */ public static long getNextValuePointer(MemorySegment memorySegment, int offset) { return memorySegment.getLong(offset + NEXT_VALUE_POINTER_OFFSET); }
3.68
rocketmq-connect_CountDownLatch2_toString
/** * Returns a string identifying this latch, as well as its state. The state, in brackets, includes the String {@code * "Count ="} followed by the current count. * * @return a string identifying this latch, as well as its state */ public String toString() { return super.toString() + "[Count = " + sync.getCount() + "]"; }
3.68
pulsar_OffloadIndexBlockBuilder_create
/** * create an OffloadIndexBlockBuilder. */ static OffloadIndexBlockBuilder create() { return new OffloadIndexBlockV2BuilderImpl(); }
3.68
flink_RemoteStorageScanner_start
/** Start the executor. */ public void start() { synchronized (scannerExecutor) { if (!scannerExecutor.isShutdown()) { scannerExecutor.schedule(this, lastInterval, TimeUnit.MILLISECONDS); } } }
3.68
hadoop_TFile_createScannerByRecordNum
/** * Create a scanner that covers a range of records. * * @param beginRecNum * The RecordNum for the first record (inclusive). * @param endRecNum * The RecordNum for the last record (exclusive). To scan the whole * file, either specify endRecNum==-1 or endRecNum==getEntryCount(). * @return The TFile scanner that covers the specified range of records. * @throws IOException raised on errors performing I/O. */ public Scanner createScannerByRecordNum(long beginRecNum, long endRecNum) throws IOException { if (beginRecNum < 0) beginRecNum = 0; if (endRecNum < 0 || endRecNum > getEntryCount()) { endRecNum = getEntryCount(); } return new Scanner(this, getLocationByRecordNum(beginRecNum), getLocationByRecordNum(endRecNum)); }
3.68
hbase_NoOpIndexBlockEncoder_writeNonRoot
/** * Writes the block index chunk in the non-root index block format. This format contains the * number of entries, an index of integer offsets for quick binary search on variable-length * records, and tuples of block offset, on-disk block size, and the first key for each entry. */ private void writeNonRoot(BlockIndexChunk blockIndexChunk, DataOutput out) throws IOException { // The number of entries in the block. out.writeInt(blockIndexChunk.getNumEntries()); if ( blockIndexChunk.getSecondaryIndexOffsetMarks().size() != blockIndexChunk.getBlockKeys().size() ) { throw new IOException("Corrupted block index chunk writer: " + blockIndexChunk.getBlockKeys().size() + " entries but " + blockIndexChunk.getSecondaryIndexOffsetMarks().size() + " secondary index items"); } // For each entry, write a "secondary index" of relative offsets to the // entries from the end of the secondary index. This works, because at // read time we read the number of entries and know where the secondary // index ends. for (int currentSecondaryIndex : blockIndexChunk.getSecondaryIndexOffsetMarks()) out.writeInt(currentSecondaryIndex); // We include one other element in the secondary index to calculate the // size of each entry more easily by subtracting secondary index elements. out.writeInt(blockIndexChunk.getCurTotalNonRootEntrySize()); for (int i = 0; i < blockIndexChunk.getNumEntries(); ++i) { out.writeLong(blockIndexChunk.getBlockOffset(i)); out.writeInt(blockIndexChunk.getOnDiskDataSize(i)); out.write(blockIndexChunk.getBlockKey(i)); } }
3.68
hbase_MasterObserver_preSplitRegionAction
/** * Called before the region is split. * @param c the environment to interact with the framework and master * @param tableName the table where the region belongs to * @param splitRow split point */ default void preSplitRegionAction(final ObserverContext<MasterCoprocessorEnvironment> c, final TableName tableName, final byte[] splitRow) throws IOException { }
3.68
flink_CatalogManager_createTemporaryTable
/** * Creates a temporary table in a given fully qualified path. * * @param table The table to put in the given path. * @param objectIdentifier The fully qualified path where to put the table. * @param ignoreIfExists if false exception will be thrown if a table exists in the given path. */ public void createTemporaryTable( CatalogBaseTable table, ObjectIdentifier objectIdentifier, boolean ignoreIfExists) { Optional<TemporaryOperationListener> listener = getTemporaryOperationListener(objectIdentifier); temporaryTables.compute( objectIdentifier, (k, v) -> { if (v != null) { if (!ignoreIfExists) { throw new ValidationException( String.format( "Temporary table '%s' already exists", objectIdentifier)); } return v; } else { ResolvedCatalogBaseTable<?> resolvedTable = resolveCatalogBaseTable(table); Catalog catalog = getCatalog(objectIdentifier.getCatalogName()).orElse(null); ResolvedCatalogBaseTable<?> resolvedListenedTable = managedTableListener.notifyTableCreation( catalog, objectIdentifier, resolvedTable, true, ignoreIfExists); if (listener.isPresent()) { return listener.get() .onCreateTemporaryTable( objectIdentifier.toObjectPath(), resolvedListenedTable); } if (resolvedListenedTable instanceof CatalogTable) { catalogModificationListeners.forEach( l -> l.onEvent( CreateTableEvent.createEvent( CatalogContext.createContext( objectIdentifier .getCatalogName(), catalog), objectIdentifier, resolvedListenedTable, ignoreIfExists, true))); } return resolvedListenedTable; } }); }
3.68
framework_DefaultSQLGenerator_generateSelectQuery
/* * (non-Javadoc) * * @see com.vaadin.addon.sqlcontainer.query.generator.SQLGenerator# * generateSelectQuery(java.lang.String, java.util.List, java.util.List, * int, int, java.lang.String) */ @Override public StatementHelper generateSelectQuery(String tableName, List<Filter> filters, List<OrderBy> orderBys, int offset, int pagelength, String toSelect) { if (tableName == null || tableName.trim().equals("")) { throw new IllegalArgumentException("Table name must be given."); } toSelect = toSelect == null ? "*" : toSelect; StatementHelper sh = getStatementHelper(); StringBuffer query = new StringBuffer(); query.append("SELECT " + toSelect + " FROM ") .append(SQLUtil.escapeSQL(tableName)); if (filters != null) { query.append(QueryBuilder.getWhereStringForFilters(filters, sh)); } if (orderBys != null) { for (OrderBy o : orderBys) { generateOrderBy(query, o, orderBys.indexOf(o) == 0); } } if (pagelength != 0) { generateLimits(query, offset, pagelength); } sh.setQueryString(query.toString()); return sh; }
3.68
hadoop_OperationDuration_getDurationString
/** * Return the duration as {@link #humanTime(long)}. * @return a printable duration. */ public String getDurationString() { return humanTime(value()); }
3.68
hudi_WriteMarkers_getMarkerFileName
/** * Gets the marker file name, in the format of "[file_name].marker.[IO_type]". * * @param fileName file name * @param type IO type * @return the marker file name */ protected static String getMarkerFileName(String fileName, IOType type) { return String.format("%s%s.%s", fileName, HoodieTableMetaClient.MARKER_EXTN, type.name()); }
3.68
flink_CheckpointStorageLocationReference_readResolve
/** readResolve() preserves the singleton property of the default value. */ protected final Object readResolve() throws ObjectStreamException { return encodedReference == null ? DEFAULT : this; }
3.68
hadoop_Cluster_getQueues
/** * Get all the queues in cluster. * * @return array of {@link QueueInfo} * @throws IOException * @throws InterruptedException */ public QueueInfo[] getQueues() throws IOException, InterruptedException { return client.getQueues(); }
3.68
flink_TableSink_getConsumedDataType
/** * Returns the data type consumed by this {@link TableSink}. * * @return The data type expected by this {@link TableSink}. */ default DataType getConsumedDataType() { final TypeInformation<T> legacyType = getOutputType(); if (legacyType == null) { throw new TableException("Table sink does not implement a consumed data type."); } return fromLegacyInfoToDataType(legacyType); }
3.68
MagicPlugin_EnteredStateTracker_touch
/** * This is just here to avoid compiler warnings by giving the user something to call */ public void touch() { }
3.68
hbase_TableIntegrityErrorHandlerImpl_handleRegionEndKeyNotEmpty
/** * {@inheritDoc} */ @Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { }
3.68
framework_Overlay_getVisualViewportWidth
/** * Gets the visual viewport width, which is useful for e.g iOS where the * view can be zoomed in while keeping the layout viewport intact. * * Falls back to layout viewport; for those browsers/devices the difference * is that the scrollbar with is included (if there is a scrollbar). * * @since 7.0.7 * @return */ private int getVisualViewportWidth() { int w = (int) getSubpixelInnerWidth(); if (w < 0) { return Window.getClientWidth(); } else { return w; } }
3.68
hadoop_TaskManifest_getTotalFileSize
/** * Calculate the total amount of data which will be committed. * @return the sum of sizes of all files to commit. */ @JsonIgnore public long getTotalFileSize() { return filesToCommit.stream().mapToLong(FileEntry::getSize).sum(); }
3.68
hibernate-validator_ReflectionHelper_internalBoxedType
/** * Returns the auto-boxed type of a primitive type. * * @param primitiveType the primitive type * * @return the auto-boxed type of a primitive type. In case {@link Void} is * passed (which is considered as primitive type by * {@link Class#isPrimitive()}), {@link Void} will be returned. * * @throws IllegalArgumentException in case the parameter {@code primitiveType} does not * represent a primitive type. */ private static Class<?> internalBoxedType(Class<?> primitiveType) { Class<?> wrapperType = PRIMITIVE_TO_WRAPPER_TYPES.get( primitiveType ); if ( wrapperType == null ) { throw LOG.getHasToBeAPrimitiveTypeException( primitiveType.getClass() ); } return wrapperType; }
3.68
hadoop_TimelineDomain_setId
/** * Set the domain ID * * @param id the domain ID */ public void setId(String id) { this.id = id; }
3.68
flink_EnvironmentSettings_withBuiltInCatalogName
/** * Specifies the name of the initial catalog to be created when instantiating a {@link * TableEnvironment}. * * <p>This catalog is an in-memory catalog that will be used to store all temporary objects * (e.g. from {@link TableEnvironment#createTemporaryView(String, Table)} or {@link * TableEnvironment#createTemporarySystemFunction(String, UserDefinedFunction)}) that cannot * be persisted because they have no serializable representation. * * <p>It will also be the initial value for the current catalog which can be altered via * {@link TableEnvironment#useCatalog(String)}. * * <p>Default: {@link TableConfigOptions#TABLE_DATABASE_NAME}{@code .defaultValue()}. */ public Builder withBuiltInCatalogName(String builtInCatalogName) { configuration.set(TABLE_CATALOG_NAME, builtInCatalogName); return this; }
3.68
pulsar_TopicEventsDispatcher_notify
/** * Dispatches notification to specified listeners. * @param listeners * @param topic * @param event * @param stage * @param t */ public static void notify(TopicEventsListener[] listeners, String topic, TopicEventsListener.TopicEvent event, TopicEventsListener.EventStage stage, Throwable t) { Objects.requireNonNull(listeners); for (TopicEventsListener listener: listeners) { notify(listener, topic, event, stage, t); } }
3.68
hadoop_RouterMetricsService_getRBFMetrics
/** * Get the federation metrics. * * @return Federation metrics. */ public RBFMetrics getRBFMetrics() { return this.rbfMetrics; }
3.68
morf_AbstractSqlDialectTest_testParameterisedInsertWithTableInDifferentSchema
/** * Same as {@link #testParameterisedInsert()}, but this also checks when the table is in a separate schema. */ @Test public void testParameterisedInsertWithTableInDifferentSchema() { AliasedField[] fields = new AliasedField[] { new FieldLiteral(5).as("id"), new FieldLiteral("Escap'd").as(STRING_FIELD), new FieldLiteral(20100405).as(DATE_FIELD), new FieldLiteral(7).as(INT_FIELD), new FieldLiteral(true).as(BOOLEAN_FIELD), }; InsertStatement stmt = new InsertStatement().into(new TableReference("MYSCHEMA", TEST_TABLE)).fields(fields); String sql = testDialect.convertStatementToSQL(stmt, metadata); assertEquals("Generated SQL not as expected", expectedParameterisedInsertStatementWithTableInDifferentSchema(), sql); }
3.68