name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VDebugWindow_getOverlayContainer
/** * Gets the container element for this window. The debug window is always * global to the document and not related to any * {@link ApplicationConnection} in particular. * * @return The global overlay container element. */ @Override public com.google.gwt.user.client.Element getOverlayContainer() { return RootPanel.get().getElement(); }
3.68
hbase_MasterFileSystem_getWALRootDir
/** Returns HBase root log dir. */ public Path getWALRootDir() { return this.walRootDir; }
3.68
framework_TableScroll_fillTable
// fill the table with some random data private void fillTable(Table table) { initProperties(table); for (int i = 0; i < ROWS; i++) { String[] line = new String[COLUMNS]; for (int j = 0; j < COLUMNS; j++) { line[j] = "col=" + j + " row=" + i; } table.addItem(line, null); } }
3.68
dubbo_ConfigurationUtils_getServerShutdownTimeout
/** * Server shutdown wait timeout mills * * @return */ @SuppressWarnings("deprecation") public static int getServerShutdownTimeout(ScopeModel scopeModel) { if (expectedShutdownTime < System.currentTimeMillis()) { return 1; } int timeout = DEFAULT_SERVER_SHUTDOWN_TIMEOUT; Configuration configuration = getGlobalConfiguration(scopeModel); String value = StringUtils.trim(configuration.getString(SHUTDOWN_WAIT_KEY)); if (StringUtils.isNotEmpty(value)) { try { timeout = Integer.parseInt(value); } catch (Exception e) { // ignore } } else { value = StringUtils.trim(configuration.getString(SHUTDOWN_WAIT_SECONDS_KEY)); if (StringUtils.isNotEmpty(value)) { try { timeout = Integer.parseInt(value) * 1000; } catch (Exception e) { // ignore } } } if (expectedShutdownTime - System.currentTimeMillis() < timeout) { return (int) Math.max(1, expectedShutdownTime - System.currentTimeMillis()); } return timeout; }
3.68
flink_ProjectOperator_projectTupleX
/** * Chooses a projectTupleX according to the length of {@link * org.apache.flink.api.java.operators.ProjectOperator.Projection#fieldIndexes}. * * @return The projected DataSet. * @see org.apache.flink.api.java.operators.ProjectOperator.Projection */ @SuppressWarnings("unchecked") public <OUT extends Tuple> ProjectOperator<T, OUT> projectTupleX() { ProjectOperator<T, OUT> projOperator; switch (fieldIndexes.length) { case 1: projOperator = (ProjectOperator<T, OUT>) projectTuple1(); break; case 2: projOperator = (ProjectOperator<T, OUT>) projectTuple2(); break; case 3: projOperator = (ProjectOperator<T, OUT>) projectTuple3(); break; case 4: projOperator = (ProjectOperator<T, OUT>) projectTuple4(); break; case 5: projOperator = (ProjectOperator<T, OUT>) projectTuple5(); break; case 6: projOperator = (ProjectOperator<T, OUT>) projectTuple6(); break; case 7: projOperator = (ProjectOperator<T, OUT>) projectTuple7(); break; case 8: projOperator = (ProjectOperator<T, OUT>) projectTuple8(); break; case 9: projOperator = (ProjectOperator<T, OUT>) projectTuple9(); break; case 10: projOperator = (ProjectOperator<T, OUT>) projectTuple10(); break; case 11: projOperator = (ProjectOperator<T, OUT>) projectTuple11(); break; case 12: projOperator = (ProjectOperator<T, OUT>) projectTuple12(); break; case 13: projOperator = (ProjectOperator<T, OUT>) projectTuple13(); break; case 14: projOperator = (ProjectOperator<T, OUT>) projectTuple14(); break; case 15: projOperator = (ProjectOperator<T, OUT>) projectTuple15(); break; case 16: projOperator = (ProjectOperator<T, OUT>) projectTuple16(); break; case 17: projOperator = (ProjectOperator<T, OUT>) projectTuple17(); break; case 18: projOperator = (ProjectOperator<T, OUT>) projectTuple18(); break; case 19: projOperator = (ProjectOperator<T, OUT>) projectTuple19(); break; case 20: projOperator = (ProjectOperator<T, OUT>) projectTuple20(); break; case 21: projOperator = (ProjectOperator<T, OUT>) projectTuple21(); break; case 22: projOperator = (ProjectOperator<T, OUT>) projectTuple22(); break; case 23: projOperator = (ProjectOperator<T, OUT>) projectTuple23(); break; case 24: projOperator = (ProjectOperator<T, OUT>) projectTuple24(); break; case 25: projOperator = (ProjectOperator<T, OUT>) projectTuple25(); break; default: throw new IllegalStateException("Excessive arity in tuple."); } return projOperator; }
3.68
morf_AddColumn_isApplied
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources) */ @Override public boolean isApplied(Schema schema, ConnectionResources database) { if (!schema.tableExists(tableName)) { return false; } Table table = schema.getTable(tableName); SchemaHomology homology = new SchemaHomology(); for (Column column : table.columns()) { if (homology.columnsMatch(column, newColumnDefinition)) { return true; } } return false; }
3.68
flink_TaskDeploymentDescriptorFactory_shouldOffload
/** * Determine whether shuffle descriptors should be offloaded to blob server. * * @param shuffleDescriptorsToSerialize shuffle descriptors to serialize * @param numConsumers how many consumers this serialized shuffle descriptor should be sent * @return whether shuffle descriptors should be offloaded to blob server */ private boolean shouldOffload( ShuffleDescriptorAndIndex[] shuffleDescriptorsToSerialize, int numConsumers) { return shuffleDescriptorsToSerialize.length * numConsumers >= offloadShuffleDescriptorsThreshold; }
3.68
framework_VAbstractOrderedLayout_insert
/** * {@inheritDoc} * * @since 7.2 */ @Override protected void insert(Widget child, Element container, int beforeIndex, boolean domInsert) { insert(child, DOM.asOld(container), beforeIndex, domInsert); }
3.68
framework_GridSingleSelect_isSelected
/** * Returns whether the given item is currently selected. * * @param item * the item to check, not null * @return {@code true} if the item is selected, {@code false} otherwise */ public boolean isSelected(T item) { return model.isSelected(item); }
3.68
hbase_AsyncAdmin_majorCompact
/** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction. type. * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all * column families. */ default CompletableFuture<Void> majorCompact(TableName tableName, byte[] columnFamily) { return majorCompact(tableName, columnFamily, CompactType.NORMAL); }
3.68
hudi_CleanPlanner_getPartitionPathsForIncrementalCleaning
/** * Use Incremental Mode for finding partition paths. * @param cleanMetadata * @param newInstantToRetain * @return */ private List<String> getPartitionPathsForIncrementalCleaning(HoodieCleanMetadata cleanMetadata, Option<HoodieInstant> newInstantToRetain) { LOG.info("Incremental Cleaning mode is enabled. Looking up partition-paths that have since changed " + "since last cleaned at " + cleanMetadata.getEarliestCommitToRetain() + ". New Instant to retain : " + newInstantToRetain); return hoodieTable.getCompletedCommitsTimeline().getInstantsAsStream().filter( instant -> HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.GREATER_THAN_OR_EQUALS, cleanMetadata.getEarliestCommitToRetain()) && HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.LESSER_THAN, newInstantToRetain.get().getTimestamp())).flatMap(instant -> { try { if (HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction())) { HoodieReplaceCommitMetadata replaceCommitMetadata = HoodieReplaceCommitMetadata.fromBytes( hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class); return Stream.concat(replaceCommitMetadata.getPartitionToReplaceFileIds().keySet().stream(), replaceCommitMetadata.getPartitionToWriteStats().keySet().stream()); } else { HoodieCommitMetadata commitMetadata = HoodieCommitMetadata .fromBytes(hoodieTable.getActiveTimeline().getInstantDetails(instant).get(), HoodieCommitMetadata.class); return commitMetadata.getPartitionToWriteStats().keySet().stream(); } } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }).distinct().collect(Collectors.toList()); }
3.68
morf_SelectFirstStatement_selectFirst
/** * Constructs a Select First Statement. * * @param field the field that should be selected * @return Builder. */ public static final SelectFirstStatementBuilder selectFirst(AliasedFieldBuilder field) { return new SelectFirstStatementBuilder(field); }
3.68
hbase_Mutation_getTTL
/** * Return the TTL requested for the result of the mutation, in milliseconds. * @return the TTL requested for the result of the mutation, in milliseconds, or Long.MAX_VALUE if * unset */ public long getTTL() { byte[] ttlBytes = getAttribute(OP_ATTRIBUTE_TTL); if (ttlBytes != null) { return Bytes.toLong(ttlBytes); } return Long.MAX_VALUE; }
3.68
framework_CalendarTargetDetails_hasDropTime
/** * @return true if {@link #getDropTime()} will return a date object with the * time set to the start of the time slot where the drop happened */ public boolean hasDropTime() { return hasDropTime; }
3.68
hbase_MetricsMasterFileSystem_addSplit
/** * Record a single instance of a split * @param time time that the split took * @param size length of original WALs that were split */ public synchronized void addSplit(long time, long size) { source.updateSplitTime(time); source.updateSplitSize(size); }
3.68
hadoop_OBSPosixBucketUtils_renameBasedOnPosix
/** * The inner rename operation based on Posix bucket. * * @param owner OBS File System instance * @param src source path to be renamed from * @param dst destination path to be renamed to * @return boolean * @throws RenameFailedException if some criteria for a state changing rename * was not met. This means work didn't happen; * it's not something which is reported upstream * to the FileSystem APIs, for which the * semantics of "false" are pretty vague. * @throws IOException on IO failure. */ static boolean renameBasedOnPosix(final OBSFileSystem owner, final Path src, final Path dst) throws IOException { Path dstPath = dst; String srcKey = OBSCommonUtils.pathToKey(owner, src); String dstKey = OBSCommonUtils.pathToKey(owner, dstPath); if (srcKey.isEmpty()) { LOG.error("rename: src [{}] is root directory", src); return false; } try { FileStatus dstStatus = owner.getFileStatus(dstPath); if (dstStatus.isDirectory()) { String newDstString = OBSCommonUtils.maybeAddTrailingSlash( dstPath.toString()); String filename = srcKey.substring( OBSCommonUtils.pathToKey(owner, src.getParent()) .length() + 1); dstPath = new Path(newDstString + filename); dstKey = OBSCommonUtils.pathToKey(owner, dstPath); LOG.debug( "rename: dest is an existing directory and will be " + "changed to [{}]", dstPath); if (owner.exists(dstPath)) { LOG.error("rename: failed to rename " + src + " to " + dstPath + " because destination exists"); return false; } } else { if (srcKey.equals(dstKey)) { LOG.warn( "rename: src and dest refer to the same " + "file or directory: {}", dstPath); return true; } else { LOG.error("rename: failed to rename " + src + " to " + dstPath + " because destination exists"); return false; } } } catch (FileNotFoundException e) { // if destination does not exist, do not change the // destination key, and just do rename. LOG.debug("rename: dest [{}] does not exist", dstPath); } catch (FileConflictException e) { Path parent = dstPath.getParent(); if (!OBSCommonUtils.pathToKey(owner, parent).isEmpty()) { FileStatus dstParentStatus = owner.getFileStatus(parent); if (!dstParentStatus.isDirectory()) { throw new ParentNotDirectoryException( parent + " is not a directory"); } } } if (dstKey.startsWith(srcKey) && (dstKey.equals(srcKey) || dstKey.charAt(srcKey.length()) == Path.SEPARATOR_CHAR)) { LOG.error("rename: dest [{}] cannot be a descendant of src [{}]", dstPath, src); return false; } return innerFsRenameWithRetry(owner, src, dstPath, srcKey, dstKey); }
3.68
framework_ConnectorMap_unregisterConnector
/** * Unregisters the given connector; always use after removing a connector. * This method does not remove the connector from the DOM, but marks the * connector so that ApplicationConnection may clean up its references to * it. Removing the widget from DOM is component containers responsibility. * * @param connector * the connector to remove */ public void unregisterConnector(ServerConnector connector) { if (connector == null) { getLogger().severe("Trying to unregister null connector"); return; } String connectorId = connector.getConnectorId(); idToComponentDetail.remove(connectorId); connector.onUnregister(); for (ServerConnector child : connector.getChildren()) { if (child.getParent() == connector) { /* * Only unregister children that are actually connected to this * parent. For instance when moving connectors from one layout * to another and removing the first layout it will still * contain references to its old children, which are now * attached to another connector. */ unregisterConnector(child); } } }
3.68
hadoop_AbstractQueueCapacityCalculator_getResourceNames
/** * Returns all resource names that are defined for a capacity type. * * @param queue queue for which the capacity vector is defined * @param label node label * @param capacityType capacity type for which the resource names are defined * @return resource names */ protected Set<String> getResourceNames(CSQueue queue, String label, ResourceUnitCapacityType capacityType) { return queue.getConfiguredCapacityVector(label) .getResourceNamesByCapacityType(capacityType); }
3.68
zxing_BinaryBitmap_getBlackRow
/** * Converts one row of luminance data to 1 bit data. May actually do the conversion, or return * cached data. Callers should assume this method is expensive and call it as seldom as possible. * This method is intended for decoding 1D barcodes and may choose to apply sharpening. * * @param y The row to fetch, which must be in [0, bitmap height) * @param row An optional preallocated array. If null or too small, it will be ignored. * If used, the Binarizer will call BitArray.clear(). Always use the returned object. * @return The array of bits for this row (true means black). * @throws NotFoundException if row can't be binarized */ public BitArray getBlackRow(int y, BitArray row) throws NotFoundException { return binarizer.getBlackRow(y, row); }
3.68
hadoop_FlowActivitySubDoc_equals
// Only check if type and id are equal @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof FlowActivitySubDoc)) { return false; } FlowActivitySubDoc m = (FlowActivitySubDoc) o; if (!flowVersion.equalsIgnoreCase(m.getFlowVersion())) { return false; } return flowRunId == m.getFlowRunId(); }
3.68
flink_FlinkConnection_setAutoCommit
// TODO We currently do not support this, but we can't throw a SQLException here because we want // to support jdbc tools such as beeline and sqlline. @Override public void setAutoCommit(boolean autoCommit) throws SQLException {}
3.68
framework_VTree_doLayout
/** * Tell LayoutManager that a layout is needed later for this VTree */ private void doLayout() { // This calls LayoutManager setNeedsMeasure and layoutNow Util.notifyParentOfSizeChange(this, false); }
3.68
flink_CrossOperator_projectTuple13
/** * Projects a pair of crossed elements to a {@link Tuple} with the previously selected * fields. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> ProjectCross<I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo< Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes); return new ProjectCross< I1, I2, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>( this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint); }
3.68
flink_SuperstepBarrier_setup
/** Setup the barrier, has to be called at the beginning of each superstep. */ public void setup() { latch = new CountDownLatch(1); }
3.68
hbase_BucketCache_createRecycler
/** * <pre> * Create the {@link Recycler} for {@link BucketEntry#refCnt},which would be used as * {@link RefCnt#recycler} of {@link HFileBlock#buf} returned from {@link BucketCache#getBlock}. * NOTE: for {@link BucketCache#getBlock},the {@link RefCnt#recycler} of {@link HFileBlock#buf} * from {@link BucketCache#backingMap} and {@link BucketCache#ramCache} are different: * 1.For {@link RefCnt#recycler} of {@link HFileBlock#buf} from {@link BucketCache#backingMap}, * it is the return value of current {@link BucketCache#createRecycler} method. * * 2.For {@link RefCnt#recycler} of {@link HFileBlock#buf} from {@link BucketCache#ramCache}, * it is {@link ByteBuffAllocator#putbackBuffer}. * </pre> */ private Recycler createRecycler(final BucketEntry bucketEntry) { return () -> { freeBucketEntry(bucketEntry); return; }; }
3.68
flink_HiveFunctionArguments_create
// create from a CallContext public static HiveFunctionArguments create(CallContext callContext) { DataType[] argTypes = callContext.getArgumentDataTypes().toArray(new DataType[0]); Object[] args = new Object[argTypes.length]; BitSet literalIndices = new BitSet(args.length); for (int i = 0; i < args.length; i++) { if (callContext.isArgumentLiteral(i)) { literalIndices.set(i); args[i] = callContext .getArgumentValue( i, argTypes[i].getLogicalType().getDefaultConversion()) .orElse(null); // we always use string type for string constant arg because that's what hive UDFs // expect. // it may happen that the type is char when call the function // in Flink SQL for calcite treat string literal as char type. if (args[i] instanceof String) { argTypes[i] = DataTypes.STRING(); } } } return new HiveFunctionArguments(args, argTypes, literalIndices); }
3.68
framework_Table_getColumnHeaders
/** * Gets the headers of the columns. * * <p> * The headers match the property id:s given by the set visible column * headers. The table must be set in either * {@link #COLUMN_HEADER_MODE_EXPLICIT} or * {@link #COLUMN_HEADER_MODE_EXPLICIT_DEFAULTS_ID} mode to show the * headers. In the defaults mode any nulls in the headers array are replaced * with id.toString(). * </p> * * @return the Array of column headers. */ public String[] getColumnHeaders() { if (columnHeaders == null) { return null; } final String[] headers = new String[visibleColumns.size()]; int i = 0; for (final Object column : visibleColumns) { headers[i++] = getColumnHeader(column); } return headers; }
3.68
flink_AbstractPythonEnvironmentManager_constructEnvironmentVariables
/** * Constructs the environment variables which is used to launch the python UDF worker. * * @return The environment variables which contain the paths of the python dependencies. */ @VisibleForTesting public Map<String, String> constructEnvironmentVariables(String baseDirectory) throws IOException { Map<String, String> env = new HashMap<>(this.systemEnv); constructFilesDirectory(env, baseDirectory); if (dependencyInfo.getPythonPath().isPresent()) { appendToPythonPath( env, Collections.singletonList(dependencyInfo.getPythonPath().get())); } LOG.info("PYTHONPATH of python worker: {}", env.get("PYTHONPATH")); constructRequirementsDirectory(env, baseDirectory); constructArchivesDirectory(env, baseDirectory); // set BOOT_LOG_DIR. env.put("BOOT_LOG_DIR", baseDirectory); // disable the launching of gateway server to prevent from this dead loop: // launch UDF worker -> import udf -> import job code // ^ | (If the job code is not enclosed in a // | | if name == 'main' statement) // | V // execute job in local mode <- launch gateway server and submit job to local executor env.put(PYFLINK_GATEWAY_DISABLED, "true"); // set the path of python interpreter, it will be used to execute the udf worker. env.put("python", dependencyInfo.getPythonExec()); LOG.info("Python interpreter path: {}", dependencyInfo.getPythonExec()); return env; }
3.68
hadoop_PseudoAuthenticator_authenticate
/** * Performs simple authentication against the specified URL. * <p> * If a token is given it does a NOP and returns the given token. * <p> * If no token is given, it will perform an HTTP <code>OPTIONS</code> request injecting an additional * parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()} * method. * <p> * If the response is successful it will update the authentication token. * * @param url the URl to authenticate against. * @param token the authentication token being used for the user. * * @throws IOException if an IO error occurred. * @throws AuthenticationException if an authentication error occurred. */ @Override public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException { String strUrl = url.toString(); String paramSeparator = (strUrl.contains("?")) ? "&" : "?"; strUrl += paramSeparator + USER_NAME_EQ + getUserName(); url = new URL(strUrl); HttpURLConnection conn = token.openConnection(url, connConfigurator); conn.setRequestMethod("OPTIONS"); conn.connect(); AuthenticatedURL.extractToken(conn, token); }
3.68
morf_UpdateStatement_getHints
/** * @return all hints in the order they were declared. */ public List<Hint> getHints() { return hints; }
3.68
framework_AbstractListingConnector_getRowData
/** * Returns the data of the given data row. * * @param row * the row * @return the row data */ protected static JsonValue getRowData(JsonObject row) { return row.get(DataCommunicatorConstants.DATA); }
3.68
flink_TopNBuffer_putAll
/** * Puts a record list into the buffer under the sortKey. Note: if buffer already contains * sortKey, putAll will overwrite the previous value * * @param sortKey sort key with which the specified values are to be associated * @param values record lists to be associated with the specified key */ public void putAll(RowData sortKey, Collection<RowData> values) { Collection<RowData> oldValues = treeMap.get(sortKey); if (oldValues != null) { currentTopNum -= oldValues.size(); } treeMap.put(sortKey, values); currentTopNum += values.size(); }
3.68
framework_Button_isCtrlKey
/** * Checks if the Ctrl key was down when the mouse event took place. * * @return true if Ctrl was pressed when the event occurred, false * otherwise or if unknown */ public boolean isCtrlKey() { if (null != details) { return details.isCtrlKey(); } else { return false; } }
3.68
hadoop_MapReduceBase_configure
/** Default implementation that does nothing. */ public void configure(JobConf job) { }
3.68
dubbo_QosProtocolWrapper_stopServer
/*package*/ void stopServer() { if (hasStarted.compareAndSet(true, false)) { Server server = frameworkModel.getBeanFactory().getBean(Server.class); if (server.isStarted()) { server.stop(); } } }
3.68
hudi_InternalFilter_readFields
/** * Deserialize the fields of this object from <code>in</code>. * * <p>For efficiency, implementations should attempt to re-use storage in the * existing object where possible.</p> * * @param in <code>DataInput</code> to deseriablize this object from. * @throws IOException */ public void readFields(DataInput in) throws IOException { int ver = in.readInt(); if (ver > 0) { // old non-versioned format this.nbHash = ver; this.hashType = Hash.JENKINS_HASH; } else if (ver == VERSION) { this.nbHash = in.readInt(); this.hashType = in.readByte(); } else { throw new IOException("Unsupported version: " + ver); } this.vectorSize = in.readInt(); this.hash = new HashFunction(this.vectorSize, this.nbHash, this.hashType); }
3.68
framework_StaticSection_getText
/** * Returns the textual caption of this cell. * * @return the plain text caption */ public String getText() { return cellState.text; }
3.68
hbase_TableSplit_getTableName
/** * Returns the table name converted to a byte array. * @see #getTable() * @return The table name. */ public byte[] getTableName() { return tableName.getName(); }
3.68
hudi_HoodieAvroHFileReader_getSharedHFileReader
/** * Instantiates the shared HFile reader if not instantiated * @return the shared HFile reader */ private HFile.Reader getSharedHFileReader() { if (!sharedReader.isPresent()) { synchronized (sharedLock) { if (!sharedReader.isPresent()) { sharedReader = Option.of(getHFileReader()); } } } return sharedReader.get(); }
3.68
morf_SchemaModificationAdapter_close
/** * @see org.alfasoftware.morf.dataset.DataSetAdapter#close(org.alfasoftware.morf.dataset.DataSetConsumer.CloseState) */ @Override public void close(CloseState closeState) { // only drop the remaining tables if the data copy completed cleanly if (closeState == CloseState.COMPLETE) { dropRemainingTables(); } schemaResource.close(); try { if (!connection.getAutoCommit()) { connection.commit(); } connection.close(); } catch (SQLException e) { throw new RuntimeSqlException("Error closing connection", e); } super.close(closeState); }
3.68
pulsar_NameAndConfigBasedSecretsProviderConfigurator_configureKubernetesRuntimeSecretsProvider
// Kubernetes secrets can be exposed as volume mounts or as environment // variables in the pods. We are currently using the // environment variables way. Essentially the secretName/secretPath is // attached as secretRef to the environment variables // of a pod and kubernetes magically makes the secret pointed to by this combination available as a env variable. @Override public void configureKubernetesRuntimeSecretsProvider(V1PodSpec podSpec, String functionsContainerName, Function.FunctionDetails functionDetails) { // noop }
3.68
dubbo_RpcUtils_isEcho
// check parameterTypesDesc to fix CVE-2020-1948 public static boolean isEcho(String parameterTypesDesc, String method) { return $ECHO.equals(method) && $ECHO_PARAMETER_DESC.equals(parameterTypesDesc); }
3.68
hbase_JvmVersion_getVersion
/** * Return the current JVM version information. */ public static String getVersion() { return System.getProperty("java.vm.vendor", "UNKNOWN_VM_VENDOR") + ' ' + System.getProperty("java.version", "UNKNOWN_JAVA_VERSION") + '-' + System.getProperty("java.vm.version", "UNKNOWN_VM_VERSION"); }
3.68
querydsl_ExpressionUtils_template
/** * Create a new Template expression * * @param cl type of expression * @param template template * @param args template parameters * @return template expression */ @SuppressWarnings("unchecked") public static <T> TemplateExpression<T> template(Class<? extends T> cl, Template template, List<?> args) { if (cl.equals(Boolean.class)) { return (TemplateExpression<T>) new PredicateTemplate(template, args); } else { return new TemplateExpressionImpl<T>(cl, template, args); } }
3.68
hudi_HoodieWriteMetadata_clone
/** * Clones the write metadata with transformed write statuses. * * @param transformedWriteStatuses transformed write statuses * @param <T> type of transformed write statuses * @return Cloned {@link HoodieWriteMetadata<T>} instance */ public <T> HoodieWriteMetadata<T> clone(T transformedWriteStatuses) { HoodieWriteMetadata<T> newMetadataInstance = new HoodieWriteMetadata<>(); newMetadataInstance.setWriteStatuses(transformedWriteStatuses); if (indexLookupDuration.isPresent()) { newMetadataInstance.setIndexLookupDuration(indexLookupDuration.get()); } newMetadataInstance.setCommitted(isCommitted); newMetadataInstance.setCommitMetadata(commitMetadata); if (writeStats.isPresent()) { newMetadataInstance.setWriteStats(writeStats.get()); } if (indexUpdateDuration.isPresent()) { newMetadataInstance.setIndexUpdateDuration(indexUpdateDuration.get()); } if (finalizeDuration.isPresent()) { newMetadataInstance.setFinalizeDuration(finalizeDuration.get()); } if (partitionToReplaceFileIds.isPresent()) { newMetadataInstance.setPartitionToReplaceFileIds(partitionToReplaceFileIds.get()); } return newMetadataInstance; }
3.68
flink_UserDefinedFunctionHelper_validateImplementationMethod
/** Validates an implementation method such as {@code eval()} or {@code accumulate()}. */ private static void validateImplementationMethod( Class<? extends UserDefinedFunction> clazz, boolean rejectStatic, boolean isOptional, String... methodNameOptions) { final Set<String> nameSet = new HashSet<>(Arrays.asList(methodNameOptions)); final List<Method> methods = getAllDeclaredMethods(clazz); boolean found = false; for (Method method : methods) { if (!nameSet.contains(method.getName())) { continue; } found = true; final int modifier = method.getModifiers(); if (!Modifier.isPublic(modifier)) { throw new ValidationException( String.format( "Method '%s' of function class '%s' is not public.", method.getName(), clazz.getName())); } if (Modifier.isAbstract(modifier)) { throw new ValidationException( String.format( "Method '%s' of function class '%s' must not be abstract.", method.getName(), clazz.getName())); } if (rejectStatic && Modifier.isStatic(modifier)) { throw new ValidationException( String.format( "Method '%s' of function class '%s' must not be static.", method.getName(), clazz.getName())); } } if (!found && !isOptional) { throw new ValidationException( String.format( "Function class '%s' does not implement a method named %s.", clazz.getName(), nameSet.stream() .map(s -> "'" + s + "'") .collect(Collectors.joining(" or ")))); } }
3.68
hbase_Increment_toString
/** * */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("row="); sb.append(Bytes.toStringBinary(this.row)); if (this.familyMap.isEmpty()) { sb.append(", no columns set to be incremented"); return sb.toString(); } sb.append(", families="); boolean moreThanOne = false; for (Map.Entry<byte[], List<Cell>> entry : this.familyMap.entrySet()) { if (moreThanOne) { sb.append("), "); } else { moreThanOne = true; sb.append("{"); } sb.append("(family="); sb.append(Bytes.toString(entry.getKey())); sb.append(", columns="); if (entry.getValue() == null) { sb.append("NONE"); } else { sb.append("{"); boolean moreThanOneB = false; for (Cell cell : entry.getValue()) { if (moreThanOneB) { sb.append(", "); } else { moreThanOneB = true; } sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } sb.append("}"); } } sb.append("}"); return sb.toString(); }
3.68
hadoop_IOStatisticsBinding_aggregateMaps
/** * Aggregate two maps so that the destination. * @param <E> type of values * @param dest destination map. * @param other other map * @param aggregateFn function to aggregate the values. * @param copyFn function to copy the value */ public static <E> void aggregateMaps( Map<String, E> dest, Map<String, E> other, BiFunction<E, E, E> aggregateFn, Function<E, E> copyFn) { // scan through the other hand map; copy // any values not in the left map, // aggregate those for which there is already // an entry other.entrySet().forEach(entry -> { String key = entry.getKey(); E rVal = entry.getValue(); E lVal = dest.get(key); if (lVal == null) { dest.put(key, copyFn.apply(rVal)); } else { dest.put(key, aggregateFn.apply(lVal, rVal)); } }); }
3.68
cron-utils_FieldDefinitionBuilder_withIntMapping
/** * Provides means to define int values mappings between equivalent values. * As a convention, higher values are mapped into lower ones * * @param source - higher value * @param dest - lower value with equivalent meaning to source * @return this instance */ public FieldDefinitionBuilder withIntMapping(final int source, final int dest) { constraints.withIntValueMapping(source, dest); return this; }
3.68
flink_RocksDBStateBackend_setRocksDBOptions
/** * Sets {@link org.rocksdb.Options} for the RocksDB instances. Because the options are not * serializable and hold native code references, they must be specified through a factory. * * <p>The options created by the factory here are applied on top of the pre-defined options * profile selected via {@link #setPredefinedOptions(PredefinedOptions)}. If the pre-defined * options profile is the default ({@link PredefinedOptions#DEFAULT}), then the factory fully * controls the RocksDB options. * * @param optionsFactory The options factory that lazily creates the RocksDB options. */ public void setRocksDBOptions(RocksDBOptionsFactory optionsFactory) { rocksDBStateBackend.setRocksDBOptions(optionsFactory); }
3.68
hbase_OrderedInt16_encodeShort
/** * Write instance {@code val} into buffer {@code dst}. * @param dst the {@link PositionedByteRange} to write to * @param val the value to write to {@code dst} * @return the number of bytes written */ public int encodeShort(PositionedByteRange dst, short val) { return OrderedBytes.encodeInt16(dst, val, order); }
3.68
rocketmq-connect_MemoryClusterManagementServiceImpl_stop
/** * Stop the cluster manager. */ @Override public void stop() { }
3.68
hadoop_WorkloadDriver_getMapperClass
// The cast is actually checked via isAssignableFrom but the compiler doesn't // recognize this @SuppressWarnings("unchecked") private Class<? extends WorkloadMapper<?, ?, ?, ?>> getMapperClass( String className) { String[] potentialQualifiedClassNames = { WorkloadDriver.class.getPackage().getName() + "." + className, AuditReplayMapper.class.getPackage().getName() + "." + className, className }; for (String qualifiedClassName : potentialQualifiedClassNames) { Class<?> mapperClass; try { mapperClass = getConf().getClassByName(qualifiedClassName); } catch (ClassNotFoundException cnfe) { continue; } if (!WorkloadMapper.class.isAssignableFrom(mapperClass)) { throw new IllegalArgumentException(className + " is not a subclass of " + WorkloadMapper.class.getCanonicalName()); } return (Class<? extends WorkloadMapper<?, ?, ?, ?>>) mapperClass; } throw new IllegalArgumentException("Unable to find workload mapper class: " + className); }
3.68
hadoop_Server_setStatus
/** * Sets a new server status. * <p> * The status must be settable. * <p> * All services will be notified o the status change via the * {@link Service#serverStatusChange(Server.Status, Server.Status)} method. If a service * throws an exception during the notification, the server will be destroyed. * * @param status status to set. * * @throws ServerException thrown if the service has been destroy because of * a failed notification to a service. */ public void setStatus(Status status) throws ServerException { Check.notNull(status, "status"); if (status.settable) { if (status != this.status) { Status oldStatus = this.status; this.status = status; for (Service service : services.values()) { try { service.serverStatusChange(oldStatus, status); } catch (Exception ex) { log.error("Service [{}] exception during status change to [{}] -server shutting down-, {}", new Object[]{service.getInterface().getSimpleName(), status, ex.getMessage(), ex}); destroy(); throw new ServerException(ServerException.ERROR.S11, service.getInterface().getSimpleName(), status, ex.getMessage(), ex); } } } } else { throw new IllegalArgumentException("Status [" + status + " is not settable"); } }
3.68
shardingsphere-elasticjob_JobConfiguration_jobListenerTypes
/** * Set job listener types. * * @param jobListenerTypes job listener types * @return ElasticJob configuration builder */ public Builder jobListenerTypes(final String... jobListenerTypes) { this.jobListenerTypes.addAll(Arrays.asList(jobListenerTypes)); return this; }
3.68
flink_TableConfig_setIdleStateRetentionTime
/** * Specifies a minimum and a maximum time interval for how long idle state, i.e., state which * was not updated, will be retained. State will never be cleared until it was idle for less * than the minimum time and will never be kept if it was idle for more than the maximum time. * * <p>When new data arrives for previously cleaned-up state, the new data will be handled as if * it was the first data. This can result in previous results being overwritten. * * <p>Set to 0 (zero) to never clean-up the state. * * <p>NOTE: Cleaning up state requires additional bookkeeping which becomes less expensive for * larger differences of minTime and maxTime. The difference between minTime and maxTime must be * at least 5 minutes. * * <p>NOTE: Currently maxTime will be ignored and it will automatically derived from minTime as * 1.5 x minTime. * * @param minTime The minimum time interval for which idle state is retained. Set to 0 (zero) to * never clean-up the state. * @param maxTime The maximum time interval for which idle state is retained. Must be at least 5 * minutes greater than minTime. Set to 0 (zero) to never clean-up the state. * @deprecated use {@link #setIdleStateRetention(Duration)} instead. */ @Deprecated public void setIdleStateRetentionTime(Time minTime, Time maxTime) { if (maxTime.toMilliseconds() - minTime.toMilliseconds() < 300000 && !(maxTime.toMilliseconds() == 0 && minTime.toMilliseconds() == 0)) { throw new IllegalArgumentException( "Difference between minTime: " + minTime + " and maxTime: " + maxTime + " should be at least 5 minutes."); } setIdleStateRetention(Duration.ofMillis(minTime.toMilliseconds())); }
3.68
hadoop_PlacementRule_setConfig
/** * Set the config based on the passed in argument. This construct is used to * not pollute this abstract class with implementation specific references. * @param initArg initialization arguments. */ public void setConfig(Object initArg) { // Default is a noop }
3.68
framework_VDateTimeCalendarPanel_onChange
/* * (non-Javadoc) VT * * @see * com.google.gwt.event.dom.client.ChangeHandler#onChange(com.google.gwt * .event.dom.client.ChangeEvent) */ @Override @SuppressWarnings("deprecation") public void onChange(ChangeEvent event) { /* * Value from dropdowns gets always set for the value. Like year and * month when resolution is month or year. */ if (event.getSource() == hours) { int h = hours.getSelectedIndex(); if (getDateTimeService().isTwelveHourClock()) { h = h + ampm.getSelectedIndex() * 12; } getDate().setHours(h); if (timeChangeListener != null) { timeChangeListener.changed(h, getDate().getMinutes(), getDate().getSeconds(), DateTimeService.getMilliseconds(getDate())); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == mins) { final int m = mins.getSelectedIndex(); getDate().setMinutes(m); if (timeChangeListener != null) { timeChangeListener.changed(getDate().getHours(), m, getDate().getSeconds(), DateTimeService.getMilliseconds(getDate())); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == sec) { final int s = sec.getSelectedIndex(); getDate().setSeconds(s); if (timeChangeListener != null) { timeChangeListener.changed(getDate().getHours(), getDate().getMinutes(), s, DateTimeService.getMilliseconds(getDate())); } event.preventDefault(); event.stopPropagation(); } else if (event.getSource() == ampm) { final int h = hours.getSelectedIndex() + (ampm.getSelectedIndex() * 12); getDate().setHours(h); if (timeChangeListener != null) { timeChangeListener.changed(h, getDate().getMinutes(), getDate().getSeconds(), DateTimeService.getMilliseconds(getDate())); } event.preventDefault(); event.stopPropagation(); } }
3.68
zxing_ErrorCorrection_encodeECC200
/** * Creates the ECC200 error correction for an encoded message. * * @param codewords the codewords * @param symbolInfo information about the symbol to be encoded * @return the codewords with interleaved error correction. */ public static String encodeECC200(String codewords, SymbolInfo symbolInfo) { if (codewords.length() != symbolInfo.getDataCapacity()) { throw new IllegalArgumentException( "The number of codewords does not match the selected symbol"); } StringBuilder sb = new StringBuilder(symbolInfo.getDataCapacity() + symbolInfo.getErrorCodewords()); sb.append(codewords); int blockCount = symbolInfo.getInterleavedBlockCount(); if (blockCount == 1) { String ecc = createECCBlock(codewords, symbolInfo.getErrorCodewords()); sb.append(ecc); } else { sb.setLength(sb.capacity()); int[] dataSizes = new int[blockCount]; int[] errorSizes = new int[blockCount]; for (int i = 0; i < blockCount; i++) { dataSizes[i] = symbolInfo.getDataLengthForInterleavedBlock(i + 1); errorSizes[i] = symbolInfo.getErrorLengthForInterleavedBlock(i + 1); } for (int block = 0; block < blockCount; block++) { StringBuilder temp = new StringBuilder(dataSizes[block]); for (int d = block; d < symbolInfo.getDataCapacity(); d += blockCount) { temp.append(codewords.charAt(d)); } String ecc = createECCBlock(temp.toString(), errorSizes[block]); int pos = 0; for (int e = block; e < errorSizes[block] * blockCount; e += blockCount) { sb.setCharAt(symbolInfo.getDataCapacity() + e, ecc.charAt(pos++)); } } } return sb.toString(); }
3.68
hbase_BloomFilterChunk_actualErrorRate
/** * Computes the error rate for this Bloom filter, taking into account the actual number of hash * functions and keys inserted. The return value of this function changes as a Bloom filter is * being populated. Used for reporting the actual error rate of compound Bloom filters when * writing them out. * @return error rate for this particular Bloom filter */ public double actualErrorRate() { return BloomFilterUtil.actualErrorRate(keyCount, byteSize * 8, hashCount); }
3.68
flink_MultipleParameterTool_get
/** * Returns the String value for the given key. The value should only have one item. Use {@link * #getMultiParameter(String)} instead if want to get multiple values parameter. If the key does * not exist it will return null. */ @Override public String get(String key) { addToDefaults(key, null); unrequestedParameters.remove(key); if (!data.containsKey(key)) { return null; } Preconditions.checkState( data.get(key).size() == 1, "Key %s should has only one value.", key); return (String) data.get(key).toArray()[0]; }
3.68
hadoop_ClientDatanodeProtocolServerSideTranslatorPB_queryDiskBalancerPlan
/** * Gets the status of an executing Plan. */ @Override public QueryPlanStatusResponseProto queryDiskBalancerPlan( RpcController controller, QueryPlanStatusRequestProto request) throws ServiceException { try { DiskBalancerWorkStatus result = impl.queryDiskBalancerPlan(); return QueryPlanStatusResponseProto .newBuilder() .setResult(result.getResult().getIntResult()) .setPlanID(result.getPlanID()) .setPlanFile(result.getPlanFile()) .setCurrentStatus(result.currentStateString()) .build(); } catch (Exception e) { throw new ServiceException(e); } }
3.68
hmily_ConfigEnv_addEvent
/** * Add an event subscription processing. * * @param <T> the type parameter * @param consumer the consumer */ @SuppressWarnings({"unchecked", "rawtypes"}) public synchronized <T extends EventData> void addEvent(final EventConsumer<T> consumer) { EVENTS.add((EventConsumer) consumer); }
3.68
framework_ContainerOrderedWrapper_addContainerProperty
/** * Registers a new Property to all Items in the Container. * * @param propertyId * the ID of the new Property. * @param type * the Data type of the new Property. * @param defaultValue * the value all created Properties are initialized to. * @return <code>true</code> if the operation succeeded, <code>false</code> * if not */ @Override public boolean addContainerProperty(Object propertyId, Class<?> type, Object defaultValue) throws UnsupportedOperationException { return container.addContainerProperty(propertyId, type, defaultValue); }
3.68
hadoop_FsCommand_runAll
/** @deprecated use {@link Command#run(String...argv)} */ @Deprecated @Override public int runAll() { return run(args); }
3.68
framework_AbstractHasComponentsConnector_getChildComponents
/* * (non-Javadoc) * * @see com.vaadin.client.HasComponentsConnector#getChildren() */ @Override public List<ComponentConnector> getChildComponents() { if (childComponents == null) { return Collections.emptyList(); } return childComponents; }
3.68
framework_VCalendarPanel_buildCalendarBody
/** * Builds the day and time selectors of the calendar. */ private void buildCalendarBody() { final int weekColumn = 0; final int firstWeekdayColumn = 1; final int headerRow = 0; setWidget(1, 0, days); setCellPadding(0); setCellSpacing(0); getFlexCellFormatter().setColSpan(1, 0, 5); getFlexCellFormatter().setStyleName(1, 0, parent.getStylePrimaryName() + "-calendarpanel-body"); days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, "v-week"); days.setHTML(headerRow, weekColumn, "<strong></strong>"); // Hide the week column if week numbers are not to be displayed. days.getFlexCellFormatter().setVisible(headerRow, weekColumn, isShowISOWeekNumbers()); days.getRowFormatter().setStyleName(headerRow, parent.getStylePrimaryName() + "-calendarpanel-weekdays"); if (isShowISOWeekNumbers()) { days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, "v-first"); days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn, ""); days.getRowFormatter().addStyleName(headerRow, parent.getStylePrimaryName() + "-calendarpanel-weeknumbers"); } else { days.getFlexCellFormatter().setStyleName(headerRow, weekColumn, ""); days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn, "v-first"); } days.getFlexCellFormatter().setStyleName(headerRow, firstWeekdayColumn + 6, "v-last"); // Print weekday names final int firstDay = getDateTimeService().getFirstDayOfWeek(); for (int i = 0; i < 7; i++) { int day = i + firstDay; if (day > 6) { day = 0; } if (getResolution().getCalendarField() > Resolution.MONTH .getCalendarField()) { days.setHTML(headerRow, firstWeekdayColumn + i, "<strong>" + getDateTimeService().getShortDay(day) + "</strong>"); } else { days.setHTML(headerRow, firstWeekdayColumn + i, ""); } Roles.getColumnheaderRole().set(days.getCellFormatter() .getElement(headerRow, firstWeekdayColumn + i)); } // Zero out hours, minutes, seconds, and milliseconds to compare dates // without time part final Date tmp = new Date(); final Date today = new Date(tmp.getYear(), tmp.getMonth(), tmp.getDate()); final Date selectedDate = value == null ? null : new Date(value.getYear(), value.getMonth(), value.getDate()); final int startWeekDay = getDateTimeService() .getStartWeekDay(displayedMonth); final Date curr = (Date) displayedMonth.clone(); // Start from the first day of the week that at least partially belongs // to the current month curr.setDate(1 - startWeekDay); // No month has more than 6 weeks so 6 is a safe maximum for rows. for (int weekOfMonth = 1; weekOfMonth < 7; weekOfMonth++) { for (int dayOfWeek = 0; dayOfWeek < 7; dayOfWeek++) { // Actually write the day of month Date dayDate = (Date) curr.clone(); Day day = new Day(dayDate); day.setStyleName( parent.getStylePrimaryName() + "-calendarpanel-day"); if (!isDateInsideRange(dayDate, Resolution.DAY)) { day.addStyleDependentName(CN_OUTSIDE_RANGE); } if (curr.equals(selectedDate)) { day.addStyleDependentName(CN_SELECTED); Roles.getGridcellRole().setAriaSelectedState( day.getElement(), SelectedValue.TRUE); selectedDay = day; } if (curr.equals(today)) { day.addStyleDependentName(CN_TODAY); } if (curr.equals(focusedDate)) { focusedDay = day; if (hasFocus) { day.addStyleDependentName(CN_FOCUSED); } } if (curr.getMonth() != displayedMonth.getMonth()) { day.addStyleDependentName(CN_OFFMONTH); } days.setWidget(weekOfMonth, firstWeekdayColumn + dayOfWeek, day); Roles.getGridcellRole().set(days.getCellFormatter().getElement( weekOfMonth, firstWeekdayColumn + dayOfWeek)); // ISO week numbers if requested days.getCellFormatter().setVisible(weekOfMonth, weekColumn, isShowISOWeekNumbers()); if (isShowISOWeekNumbers()) { final String baseCssClass = parent.getStylePrimaryName() + "-calendarpanel-weeknumber"; String weekCssClass = baseCssClass; int weekNumber = DateTimeService.getISOWeekNumber(curr); days.setHTML(weekOfMonth, 0, "<span class=\"" + weekCssClass + "\"" + ">" + weekNumber + "</span>"); } curr.setDate(curr.getDate() + 1); } } }
3.68
flink_DataSet_reduce
/** * Applies a Reduce transformation on a non-grouped {@link DataSet}. * * <p>The transformation consecutively calls a {@link * org.apache.flink.api.common.functions.RichReduceFunction} until only a single element remains * which is the result of the transformation. A ReduceFunction combines two elements into one * new element of the same type. * * @param reducer The ReduceFunction that is applied on the DataSet. * @return A ReduceOperator that represents the reduced DataSet. * @see org.apache.flink.api.common.functions.RichReduceFunction * @see ReduceOperator * @see DataSet */ public ReduceOperator<T> reduce(ReduceFunction<T> reducer) { if (reducer == null) { throw new NullPointerException("Reduce function must not be null."); } return new ReduceOperator<>(this, clean(reducer), Utils.getCallLocationName()); }
3.68
querydsl_QueryBase_restrict
/** * Defines both limit and offset of the query results, * use {@link QueryModifiers#EMPTY} to apply no paging. * * @param modifiers query modifiers * @return the current object */ public Q restrict(QueryModifiers modifiers) { return queryMixin.restrict(modifiers); }
3.68
flink_NetUtils_getPortRangeFromString
/** * Returns an iterator over available ports defined by the range definition. * * @param rangeDefinition String describing a single port, a range of ports or multiple ranges. * @return Set of ports from the range definition * @throws NumberFormatException If an invalid string is passed. */ public static Iterator<Integer> getPortRangeFromString(String rangeDefinition) throws NumberFormatException { final String[] ranges = rangeDefinition.trim().split(","); UnionIterator<Integer> iterators = new UnionIterator<>(); for (String rawRange : ranges) { Iterator<Integer> rangeIterator; String range = rawRange.trim(); int dashIdx = range.indexOf('-'); if (dashIdx == -1) { // only one port in range: final int port = Integer.parseInt(range); if (!isValidHostPort(port)) { throw new IllegalConfigurationException( "Invalid port configuration. Port must be between 0" + "and 65535, but was " + port + "."); } rangeIterator = Collections.singleton(Integer.valueOf(range)).iterator(); } else { // evaluate range final int start = Integer.parseInt(range.substring(0, dashIdx)); if (!isValidHostPort(start)) { throw new IllegalConfigurationException( "Invalid port configuration. Port must be between 0" + "and 65535, but range start was " + start + "."); } final int end = Integer.parseInt(range.substring(dashIdx + 1)); if (!isValidHostPort(end)) { throw new IllegalConfigurationException( "Invalid port configuration. Port must be between 0" + "and 65535, but range end was " + end + "."); } if (start >= end) { throw new IllegalConfigurationException( "Invalid port configuration." + " Port range end must be bigger than port range start." + " If you wish to use single port please provide the value directly, not as a range." + " Given range: " + range); } rangeIterator = new Iterator<Integer>() { int i = start; @Override public boolean hasNext() { return i <= end; } @Override public Integer next() { return i++; } @Override public void remove() { throw new UnsupportedOperationException("Remove not supported"); } }; } iterators.add(rangeIterator); } return iterators; }
3.68
querydsl_NumberExpression_floatValue
/** * Create a {@code cast(this as double)} expression * * <p>Get the float expression of this numeric expression</p> * * @return this.floatValue() * @see java.lang.Number#floatValue() */ public NumberExpression<Float> floatValue() { return castToNum(Float.class); }
3.68
hadoop_TypedBytesWritable_getType
/** Get the type code embedded in the first byte. */ public Type getType() { byte[] bytes = getBytes(); if (bytes == null || bytes.length == 0) { return null; } for (Type type : Type.values()) { if (type.code == (int) bytes[0]) { return type; } } return null; }
3.68
flink_MemorySegment_copyFromUnsafe
/** * Bulk copy method. Copies {@code numBytes} bytes from source unsafe object and pointer. NOTE: * This is an unsafe method, no check here, please be careful. * * @param offset The position where the bytes are started to be write in this memory segment. * @param source The unsafe memory to copy the bytes from. * @param sourcePointer The position in the source unsafe memory to copy the chunk from. * @param numBytes The number of bytes to copy. * @throws IndexOutOfBoundsException If this segment can not contain the given number of bytes * (starting from offset). */ public void copyFromUnsafe(int offset, Object source, int sourcePointer, int numBytes) { final long thisPointer = this.address + offset; if (thisPointer + numBytes > addressLimit) { throw new IndexOutOfBoundsException( String.format( "offset=%d, numBytes=%d, address=%d", offset, numBytes, this.address)); } UNSAFE.copyMemory(source, sourcePointer, this.heapMemory, thisPointer, numBytes); }
3.68
flink_SerializedValue_getByteArray
/** * Returns byte array for serialized data. * * @return Serialized data. */ public byte[] getByteArray() { return serializedData; }
3.68
morf_ViewBean_knowsDependencies
/** * @see org.alfasoftware.morf.metadata.View#knowsSelectStatement() */ @Override public boolean knowsDependencies() { return knowsDependencies; }
3.68
hudi_HoodieSimpleBucketLayout_determinesNumFileGroups
/** * Bucketing controls the number of file groups directly. */ @Override public boolean determinesNumFileGroups() { return true; }
3.68
framework_DragAndDropWrapper_getAbsoluteTop
/** * * @return the absolute position of wrapper on the page */ public Integer getAbsoluteTop() { return (Integer) getData("absoluteTop"); }
3.68
flink_BlockStatementSplitter_rewriteBlock
/** * Rewrite code block that was used for this object initialization. * * @param context prefix for extracted blocks. * @return a map which key represent rewritten block name and value represents rewritten code * block, including calls to extracted methods */ public String rewriteBlock(String context) { this.visitor = new BlockStatementVisitor(code, parameters); JavaParser javaParser = new JavaParser(visitor.tokenStream); javaParser.getInterpreter().setPredictionMode(PredictionMode.SLL); visitor.visitStatement(javaParser.statement(), context); visitor.rewrite(); return visitor.rewriter.getText(); }
3.68
hbase_CompactionRequestImpl_setIsMajor
/** * Specify if this compaction should be a major compaction based on the state of the store * @param isMajor <tt>true</tt> if the system determines that this compaction should be a major * compaction */ public void setIsMajor(boolean isMajor, boolean isAllFiles) { assert isAllFiles || !isMajor; this.isMajor = !isAllFiles ? DisplayCompactionType.MINOR : (isMajor ? DisplayCompactionType.MAJOR : DisplayCompactionType.ALL_FILES); }
3.68
hbase_WALPrettyPrinter_enableJSON
/** * turns JSON output on */ public void enableJSON() { outputJSON = true; }
3.68
flink_HiveStatsUtil_createCatalogColumnStats
/** Create a map of Flink column stats from the given Hive column stats. */ public static Map<String, CatalogColumnStatisticsDataBase> createCatalogColumnStats( @Nonnull List<ColumnStatisticsObj> hiveColStats, String hiveVersion) { checkNotNull(hiveColStats, "hiveColStats can not be null"); Map<String, CatalogColumnStatisticsDataBase> colStats = new HashMap<>(); for (ColumnStatisticsObj colStatsObj : hiveColStats) { CatalogColumnStatisticsDataBase columnStats = createTableColumnStats( HiveTypeUtil.toFlinkType( TypeInfoUtils.getTypeInfoFromTypeString( colStatsObj.getColType())), colStatsObj.getStatsData(), hiveVersion); colStats.put(colStatsObj.getColName(), columnStats); } return colStats; }
3.68
querydsl_SQLTemplates_serializeInsert
/** * template method for INSERT serialization * * @param metadata * @param batches * @param context */ public void serializeInsert(QueryMetadata metadata, RelationalPath<?> entity, List<SQLInsertBatch> batches, SQLSerializer context) { context.serializeForInsert(metadata, entity, batches); if (!metadata.getFlags().isEmpty()) { context.serialize(Position.END, metadata.getFlags()); } }
3.68
hudi_HoodieListData_eager
/** * Creates instance of {@link HoodieListData} bearing *eager* execution semantic * * @param listData a {@link List} of objects in type T * @param <T> type of object * @return a new instance containing the {@link List<T>} reference */ public static <T> HoodieListData<T> eager(List<T> listData) { return new HoodieListData<>(listData, false); }
3.68
hadoop_PendingSet_size
/** * Number of commits. * @return the number of commits in this structure. */ public int size() { return commits != null ? commits.size() : 0; }
3.68
hbase_HRegionServer_checkFileSystem
/** * Checks to see if the file system is still accessible. If not, sets abortRequested and * stopRequested * @return false if file system is not available */ boolean checkFileSystem() { if (this.dataFsOk && this.dataFs != null) { try { FSUtils.checkFileSystemAvailable(this.dataFs); } catch (IOException e) { abort("File System not available", e); this.dataFsOk = false; } } return this.dataFsOk; }
3.68
hbase_RegionMover_filename
/** * Path of file where regions will be written to during unloading/read from during loading * @return RegionMoverBuilder object */ public RegionMoverBuilder filename(String filename) { this.filename = filename; return this; }
3.68
flink_AdaptiveBatchScheduler_computeVertexParallelismStoreForDynamicGraph
/** * Compute the {@link VertexParallelismStore} for all given vertices in a dynamic graph, which * will set defaults and ensure that the returned store contains valid parallelisms, with the * configured default max parallelism. * * @param vertices the vertices to compute parallelism for * @param defaultMaxParallelism the global default max parallelism * @return the computed parallelism store */ @VisibleForTesting public static VertexParallelismStore computeVertexParallelismStoreForDynamicGraph( Iterable<JobVertex> vertices, int defaultMaxParallelism) { // for dynamic graph, there is no need to normalize vertex parallelism. if the max // parallelism is not configured and the parallelism is a positive value, max // parallelism can be computed against the parallelism, otherwise it needs to use the // global default max parallelism. return computeVertexParallelismStore( vertices, v -> { if (v.getParallelism() > 0) { return getDefaultMaxParallelism(v); } else { return defaultMaxParallelism; } }, Function.identity()); }
3.68
hudi_FileSystemViewManager_getFileSystemView
/** * Main API to get the file-system view for the base-path. * * @param metaClient HoodieTableMetaClient * @return */ public SyncableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient) { return globalViewMap.computeIfAbsent(metaClient.getBasePath(), (path) -> viewCreator.apply(metaClient, viewStorageConfig)); }
3.68
framework_AbstractSelect_addItemSetChangeListener
/** * Adds an Item set change listener for the object. * * @see Container.ItemSetChangeNotifier#addListener(Container.ItemSetChangeListener) */ @Override public void addItemSetChangeListener( Container.ItemSetChangeListener listener) { if (itemSetEventListeners == null) { itemSetEventListeners = new LinkedHashSet<ItemSetChangeListener>(); } itemSetEventListeners.add(listener); }
3.68
flink_TempBarrier_getIterator
/** * This method resets the input! * * @see org.apache.flink.runtime.operators.util.CloseableInputProvider#getIterator() */ @Override public MutableObjectIterator<T> getIterator() throws InterruptedException, IOException { synchronized (this.lock) { while (this.exception == null && !this.writingDone) { this.lock.wait(5000); } } if (this.exception != null) { throw new RuntimeException( "An error occurred creating the temp table.", this.exception); } else if (this.writingDone) { final DataInputView in = this.buffer.flip(); return new InputViewIterator<>(in, this.serializer); } else { return null; } }
3.68
pulsar_LoadSimulationController_handleTrade
// Handle the command line arguments associated with the trade command. private void handleTrade(final ShellArguments arguments) throws Exception { final List<String> commandArguments = arguments.commandArguments; // Trade expects three application arguments: tenant, namespace, and // topic. if (checkAppArgs(commandArguments.size() - 1, 3)) { final String topic = makeTopic(commandArguments.get(1), commandArguments.get(2), commandArguments.get(3)); trade(arguments, topic, random.nextInt(clients.length)); } }
3.68
framework_VaadinService_closeSession
/** * Sets the given session to be closed and all its UI state to be discarded * at the end of the current request, or at the end of the next request if * there is no ongoing one. * <p> * After the session has been discarded, any UIs that have been left open * will give a Session Expired error and a new session will be created for * serving new UIs. * <p> * To avoid causing out of sync errors, you should typically redirect to * some other page using {@link Page#setLocation(String)} to make the * browser unload the invalidated UI. * * @see SystemMessages#getSessionExpiredCaption() * * @param session * the session to close */ public void closeSession(VaadinSession session) { session.close(); }
3.68
hbase_ZNodeClearer_clear
/** * Delete the master znode if its content (ServerName string) is the same as the one in the znode * file. (env: HBASE_ZNODE_FILE). I case of master-rs colloaction we extract ServerName string * from rsZnode path.(HBASE-14861) * @return true on successful deletion, false otherwise. */ public static boolean clear(Configuration conf) { Configuration tempConf = new Configuration(conf); tempConf.setInt("zookeeper.recovery.retry", 0); ZKWatcher zkw; try { zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() { @Override public void abort(String why, Throwable e) { } @Override public boolean isAborted() { return false; } }); } catch (IOException e) { LOG.warn("Can't connect to zookeeper to read the master znode", e); return false; } String znodeFileContent; try { znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk(); return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent); } catch (FileNotFoundException fnfe) { // If no file, just keep going -- return success. LOG.warn("Can't find the znode file; presume non-fatal", fnfe); return true; } catch (IOException e) { LOG.warn("Can't read the content of the znode file", e); return false; } finally { zkw.close(); } }
3.68
hadoop_StartupProgress_endPhase
/** * Ends execution of the specified phase. * * @param phase Phase to end */ public void endPhase(Phase phase) { if (!isComplete()) { phases.get(phase).endTime = monotonicNow(); } LOG.debug("End of the phase: {}", phase); }
3.68
zxing_State_shiftAndAppend
// Create a new state representing this state, with a temporary shift // to a different mode to output a single value. State shiftAndAppend(int mode, int value) { Token token = this.token; int thisModeBitCount = this.mode == HighLevelEncoder.MODE_DIGIT ? 4 : 5; // Shifts exist only to UPPER and PUNCT, both with tokens size 5. token = token.add(HighLevelEncoder.SHIFT_TABLE[this.mode][mode], thisModeBitCount); token = token.add(value, 5); return new State(token, this.mode, 0, this.bitCount + thisModeBitCount + 5); }
3.68
hbase_Get_getFamilyMap
/** * Method for retrieving the get's familyMap */ public Map<byte[], NavigableSet<byte[]>> getFamilyMap() { return this.familyMap; }
3.68
flink_Tuple9_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple9)) { return false; } @SuppressWarnings("rawtypes") Tuple9 tuple = (Tuple9) o; if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } return true; }
3.68
querydsl_JTSGeometryExpressions_lineStringOperation
/** * Create a new LineString operation expression * * @param op operator * @param args arguments * @return operation expression */ public static JTSLineStringExpression<LineString> lineStringOperation(Operator op, Expression<?>... args) { return new JTSLineStringOperation<LineString>(LineString.class, op, args); }
3.68
framework_VCalendarPanel_getPreviousKey
/** * The key that selects the previous week in the calendar. By default this * is the up arrow key but by overriding this method it can be changed to * whatever you like. * * @return */ protected int getPreviousKey() { return KeyCodes.KEY_UP; }
3.68
morf_AbstractSqlDialectTest_expectedHints3a
/** * @return The expected SQL for the {@link UpdateStatement#useParallelDml(int)} directive. */ protected String expectedHints3a() { return "UPDATE " + tableName("Foo") + " SET a = b"; }
3.68