name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
dubbo_FrameworkModel_getAllInstances
/** * Get all framework model instances * @return */ public static List<FrameworkModel> getAllInstances() { synchronized (globalLock) { return Collections.unmodifiableList(new ArrayList<>(allInstances)); } }
3.68
graphhopper_BBox_parseTwoPoints
/** * This method creates a BBox out of a string in format lat1,lon1,lat2,lon2 */ public static BBox parseTwoPoints(String objectAsString) { String[] splittedObject = objectAsString.split(","); if (splittedObject.length != 4) throw new IllegalArgumentException("BBox should have 4 parts but was " + objectAsString); double minLat = Double.parseDouble(splittedObject[0]); double minLon = Double.parseDouble(splittedObject[1]); double maxLat = Double.parseDouble(splittedObject[2]); double maxLon = Double.parseDouble(splittedObject[3]); return BBox.fromPoints(minLat, minLon, maxLat, maxLon); }
3.68
flink_GeneratedClass_newInstance
/** Create a new instance of this generated class. */ public T newInstance(ClassLoader classLoader) { try { return compile(classLoader) .getConstructor(Object[].class) // Because Constructor.newInstance(Object... initargs), we need to load // references into a new Object[], otherwise it cannot be compiled. .newInstance(new Object[] {references}); } catch (Throwable e) { throw new RuntimeException( "Could not instantiate generated class '" + className + "'", e); } }
3.68
hbase_MutableSegment_add
/** * Adds the given cell into the segment * @param cell the cell to add * @param mslabUsed whether using MSLAB */ public void add(Cell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) { internalAdd(cell, mslabUsed, memStoreSizing, sizeAddedPreOperation); }
3.68
shardingsphere-elasticjob_JobNodePath_getShardingNodePath
/** * Get sharding node path. * * @param item sharding item * @param nodeName node name * @return sharding node path */ public String getShardingNodePath(final String item, final String nodeName) { return String.format("%s/%s/%s", getShardingNodePath(), item, nodeName); }
3.68
hbase_OrderedBytes_decodeInt16
/** * Decode an {@code int16} value. * @see #encodeInt16(PositionedByteRange, short, Order) */ public static short decodeInt16(PositionedByteRange src) { final byte header = src.get(); assert header == FIXED_INT16 || header == DESCENDING.apply(FIXED_INT16); Order ord = header == FIXED_INT16 ? ASCENDING : DESCENDING; short val = (short) ((ord.apply(src.get()) ^ 0x80) & 0xff); val = (short) ((val << 8) + (ord.apply(src.get()) & 0xff)); return val; }
3.68
hbase_RESTServer_main
/** * The main method for the HBase rest server. * @param args command-line arguments * @throws Exception exception */ public static void main(String[] args) throws Exception { LOG.info("***** STARTING service '" + RESTServer.class.getSimpleName() + "' *****"); VersionInfo.logVersion(); final Configuration conf = HBaseConfiguration.create(); parseCommandLine(args, conf); RESTServer server = new RESTServer(conf); try { server.run(); server.join(); } catch (Exception e) { LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); System.exit(1); } LOG.info("***** STOPPING service '" + RESTServer.class.getSimpleName() + "' *****"); }
3.68
flink_SqlDateParser_parseField
/** * Static utility to parse a field of type Date from a byte sequence that represents text * characters (such as when read from a file stream). * * @param bytes The bytes containing the text data that should be parsed. * @param startPos The offset to start the parsing. * @param length The length of the byte sequence (counting from the offset). * @param delimiter The delimiter that terminates the field. * @return The parsed value. * @throws IllegalArgumentException Thrown when the value cannot be parsed because the text * represents not a correct number. */ public static final Date parseField(byte[] bytes, int startPos, int length, char delimiter) { final int limitedLen = nextStringLength(bytes, startPos, length, delimiter); if (limitedLen > 0 && (Character.isWhitespace(bytes[startPos]) || Character.isWhitespace(bytes[startPos + limitedLen - 1]))) { throw new NumberFormatException( "There is leading or trailing whitespace in the numeric field."); } final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET); return Date.valueOf(str); }
3.68
hbase_ThriftMetrics_exception
/** * Increment the count for a specific exception type. This is called for each exception type that * is returned to the thrift handler. * @param rawThrowable type of exception */ public void exception(Throwable rawThrowable) { source.exception(); Throwable throwable = unwrap(rawThrowable); /** * Keep some metrics for commonly seen exceptions Try and put the most common types first. Place * child types before the parent type that they extend. If this gets much larger we might have * to go to a hashmap */ if (throwable != null) { if (throwable instanceof OutOfOrderScannerNextException) { source.outOfOrderException(); } else if (throwable instanceof RegionTooBusyException) { source.tooBusyException(); } else if (throwable instanceof UnknownScannerException) { source.unknownScannerException(); } else if (throwable instanceof ScannerResetException) { source.scannerResetException(); } else if (throwable instanceof RegionMovedException) { source.movedRegionException(); } else if (throwable instanceof NotServingRegionException) { source.notServingRegionException(); } else if (throwable instanceof FailedSanityCheckException) { source.failedSanityException(); } else if (throwable instanceof MultiActionResultTooLarge) { source.multiActionTooLargeException(); } else if (throwable instanceof CallQueueTooBigException) { source.callQueueTooBigException(); } else if (throwable instanceof QuotaExceededException) { source.quotaExceededException(); } else if (throwable instanceof RpcThrottlingException) { source.rpcThrottlingException(); } else if (throwable instanceof CallDroppedException) { source.callDroppedException(); } else if (throwable instanceof RequestTooBigException) { source.requestTooBigException(); } else { source.otherExceptions(); if (LOG.isDebugEnabled()) { LOG.debug("Unknown exception type", throwable); } } } }
3.68
hadoop_ProxyCombiner_getConnectionId
/** * Since this is incapable of returning multiple connection IDs, simply * return the first one. In most cases, the connection ID should be the same * for all proxies. */ @Override public ConnectionId getConnectionId() { return RPC.getConnectionIdForProxy(proxies[0]); }
3.68
hadoop_BlockReaderLocalMetrics_getShortCircuitReadRollingAverages
/** * Get the MutableRollingAverage metric for testing only. * @return */ @VisibleForTesting public MutableRollingAverages getShortCircuitReadRollingAverages() { return shortCircuitReadRollingAverages; }
3.68
pulsar_ConfigUtils_getConfigValueAsBoolean
/** * Utility method to get a boolean from the {@link ServiceConfiguration}. If the key is present in the conf, * return the default value. If key is present the value is not a valid boolean, the result will be false. * * @param conf - the map of configuration properties * @param configProp - the property (key) to get * @param defaultValue - the value to use if the property is missing from the conf * @return a boolean */ static boolean getConfigValueAsBoolean(ServiceConfiguration conf, String configProp, boolean defaultValue) { Object value = conf.getProperty(configProp); if (value instanceof Boolean) { log.info("Configuration for [{}] is [{}]", configProp, value); return (boolean) value; } else if (value instanceof String) { boolean result = Boolean.parseBoolean((String) value); log.info("Configuration for [{}] is [{}]", configProp, result); return result; } else { log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue); return defaultValue; } }
3.68
hadoop_Find_addStop
/** Add the {@link PathData} item to the stop set. */ private void addStop(PathData item) { stopPaths.add(item.path); }
3.68
hbase_MobFileCleanupUtil_cleanupObsoleteMobFiles
/** * Performs housekeeping file cleaning (called by MOB Cleaner chore) * @param conf configuration * @param table table name * @throws IOException exception */ public static void cleanupObsoleteMobFiles(Configuration conf, TableName table, Admin admin) throws IOException { long minAgeToArchive = conf.getLong(MobConstants.MIN_AGE_TO_ARCHIVE_KEY, MobConstants.DEFAULT_MIN_AGE_TO_ARCHIVE); // We check only those MOB files, which creation time is less // than maxCreationTimeToArchive. This is a current time - 1h. 1 hour gap // gives us full confidence that all corresponding store files will // exist at the time cleaning procedure begins and will be examined. // So, if MOB file creation time is greater than this maxTimeToArchive, // this will be skipped and won't be archived. long maxCreationTimeToArchive = EnvironmentEdgeManager.currentTime() - minAgeToArchive; TableDescriptor htd = admin.getDescriptor(table); List<ColumnFamilyDescriptor> list = MobUtils.getMobColumnFamilies(htd); if (list.size() == 0) { LOG.info("Skipping non-MOB table [{}]", table); return; } else { LOG.info("Only MOB files whose creation time older than {} will be archived, table={}", maxCreationTimeToArchive, table); } FileSystem fs = FileSystem.get(conf); Set<String> regionNames = new HashSet<>(); Path rootDir = CommonFSUtils.getRootDir(conf); Path tableDir = CommonFSUtils.getTableDir(rootDir, table); List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); Set<String> allActiveMobFileName = new HashSet<String>(); for (Path regionPath : regionDirs) { regionNames.add(regionPath.getName()); for (ColumnFamilyDescriptor hcd : list) { String family = hcd.getNameAsString(); Path storePath = new Path(regionPath, family); boolean succeed = false; Set<String> regionMobs = new HashSet<String>(); while (!succeed) { if (!fs.exists(storePath)) { String errMsg = String.format("Directory %s was deleted during MOB file cleaner chore" + " execution, aborting MOB file cleaner chore.", storePath); throw new IOException(errMsg); } RemoteIterator<LocatedFileStatus> rit = fs.listLocatedStatus(storePath); List<Path> storeFiles = new ArrayList<Path>(); // Load list of store files first while (rit.hasNext()) { Path p = rit.next().getPath(); if (fs.isFile(p)) { storeFiles.add(p); } } LOG.info("Found {} store files in: {}", storeFiles.size(), storePath); Path currentPath = null; try { for (Path pp : storeFiles) { currentPath = pp; LOG.trace("Store file: {}", pp); HStoreFile sf = null; byte[] mobRefData = null; byte[] bulkloadMarkerData = null; try { sf = new HStoreFile(fs, pp, conf, CacheConfig.DISABLED, BloomType.NONE, true); sf.initReader(); mobRefData = sf.getMetadataValue(HStoreFile.MOB_FILE_REFS); bulkloadMarkerData = sf.getMetadataValue(HStoreFile.BULKLOAD_TASK_KEY); // close store file to avoid memory leaks sf.closeStoreFile(true); } catch (IOException ex) { // When FileBased SFT is active the store dir can contain corrupted or incomplete // files. So read errors are expected. We just skip these files. if (ex instanceof FileNotFoundException) { throw ex; } LOG.debug("Failed to get mob data from file: {} due to error.", pp.toString(), ex); continue; } if (mobRefData == null) { if (bulkloadMarkerData == null) { LOG.warn("Found old store file with no MOB_FILE_REFS: {} - " + "can not proceed until all old files will be MOB-compacted.", pp); return; } else { LOG.debug("Skipping file without MOB references (bulkloaded file):{}", pp); continue; } } // file may or may not have MOB references, but was created by the distributed // mob compaction code. try { SetMultimap<TableName, String> mobs = MobUtils.deserializeMobFileRefs(mobRefData).build(); LOG.debug("Found {} mob references for store={}", mobs.size(), sf); LOG.trace("Specific mob references found for store={} : {}", sf, mobs); regionMobs.addAll(mobs.values()); } catch (RuntimeException exception) { throw new IOException("failure getting mob references for hfile " + sf, exception); } } } catch (FileNotFoundException e) { LOG.warn( "Missing file:{} Starting MOB cleaning cycle from the beginning" + " due to error", currentPath, e); regionMobs.clear(); continue; } succeed = true; } // Add MOB references for current region/family allActiveMobFileName.addAll(regionMobs); } // END column families } // END regions // Check if number of MOB files too big (over 1M) if (allActiveMobFileName.size() > 1000000) { LOG.warn("Found too many active MOB files: {}, table={}, " + "this may result in high memory pressure.", allActiveMobFileName.size(), table); } LOG.debug("Found: {} active mob refs for table={}", allActiveMobFileName.size(), table); allActiveMobFileName.stream().forEach(LOG::trace); // Now scan MOB directories and find MOB files with no references to them for (ColumnFamilyDescriptor hcd : list) { checkColumnFamilyDescriptor(conf, table, fs, admin, hcd, regionNames, maxCreationTimeToArchive); } }
3.68
flink_SavepointMetadataV2_getNewOperators
/** * @return List of new operator states for the savepoint, represented by their target {@link * OperatorID} and {@link StateBootstrapTransformation}. */ public List<StateBootstrapTransformationWithID<?>> getNewOperators() { return operatorStateIndex.values().stream() .filter(OperatorStateSpecV2::isNewStateTransformation) .map(OperatorStateSpecV2::asNewStateTransformation) .collect(Collectors.toList()); }
3.68
hudi_SparkRecordMergingUtils_mergePartialRecords
/** * Merges records which can contain partial updates. * <p> * For example, the reader schema is * {[ * {"name":"id", "type":"string"}, * {"name":"ts", "type":"long"}, * {"name":"name", "type":"string"}, * {"name":"price", "type":"double"}, * {"name":"tags", "type":"string"} * ]} * The older and newer records can be (omitting Hudi meta fields): * <p> * (1) older (complete record update): * id | ts | name | price | tags * 1 | 10 | apple | 2.3 | fruit * <p> * newer (partial record update): * ts | price * 16 | 2.8 * <p> * The merging result is (updated values from newer replaces the ones in the older): * <p> * id | ts | name | price | tags * 1 | 16 | apple | 2.8 | fruit * <p> * (2) older (partial record update): * ts | price * 10 | 2.8 * <p> * newer (partial record update): * ts | tag * 16 | fruit,juicy * <p> * The merging result is (two partial updates are merged together, and values of overlapped * fields come from the newer): * <p> * ts | price | tags * 16 | 2.8 | fruit,juicy * * @param older Older {@link HoodieSparkRecord}. * @param oldSchema Schema of the older record. * @param newer Newer {@link HoodieSparkRecord}. * @param newSchema Schema of the newer record. * @param readerSchema Reader schema containing all the fields to read. This is used to maintain * the ordering of the fields of the merged record. * @param props Configuration in {@link TypedProperties}. * @return The merged record and schema. */ public static Pair<HoodieRecord, Schema> mergePartialRecords(HoodieSparkRecord older, Schema oldSchema, HoodieSparkRecord newer, Schema newSchema, Schema readerSchema, TypedProperties props) { // The merged schema contains fields that only appear in either older and/or newer record Pair<Map<Integer, StructField>, Pair<StructType, Schema>> mergedSchemaPair = getCachedMergedSchema(oldSchema, newSchema, readerSchema); boolean isNewerPartial = isPartial(newSchema, mergedSchemaPair.getRight().getRight()); if (isNewerPartial) { InternalRow oldRow = older.getData(); InternalRow newPartialRow = newer.getData(); Map<Integer, StructField> mergedIdToFieldMapping = mergedSchemaPair.getLeft(); Map<String, Integer> oldNameToIdMapping = getCachedFieldNameToIdMapping(oldSchema); Map<String, Integer> newPartialNameToIdMapping = getCachedFieldNameToIdMapping(newSchema); List<Object> values = new ArrayList<>(mergedIdToFieldMapping.size()); for (int fieldId = 0; fieldId < mergedIdToFieldMapping.size(); fieldId++) { StructField structField = mergedIdToFieldMapping.get(fieldId); Integer ordInPartialUpdate = newPartialNameToIdMapping.get(structField.name()); if (ordInPartialUpdate != null) { // The field exists in the newer record; picks the value from newer record values.add(newPartialRow.get(ordInPartialUpdate, structField.dataType())); } else { // The field does not exist in the newer record; picks the value from older record values.add(oldRow.get(oldNameToIdMapping.get(structField.name()), structField.dataType())); } } InternalRow mergedRow = new GenericInternalRow(values.toArray()); HoodieSparkRecord mergedSparkRecord = new HoodieSparkRecord( mergedRow, mergedSchemaPair.getRight().getLeft()); return Pair.of(mergedSparkRecord, mergedSchemaPair.getRight().getRight()); } else { return Pair.of(newer, newSchema); } }
3.68
framework_Window_addResizeListener
/** * Add a resize listener. * * @see Registration * * @param listener * the listener to add, not null * @return a registration object for removing the listener * @since 8.0 */ public Registration addResizeListener(ResizeListener listener) { return addListener(ResizeEvent.class, listener, WINDOW_RESIZE_METHOD); }
3.68
hbase_ColumnFamilyDescriptorBuilder_toByteArray
/** Returns This instance serialized with pb with pb magic prefix */ private byte[] toByteArray() { return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray()); }
3.68
framework_Tree_setItemStyleGenerator
/** * Sets the {@link ItemStyleGenerator} to be used with this tree. * * @param itemStyleGenerator * item style generator or null to remove generator */ public void setItemStyleGenerator(ItemStyleGenerator itemStyleGenerator) { if (this.itemStyleGenerator != itemStyleGenerator) { this.itemStyleGenerator = itemStyleGenerator; markAsDirty(); } }
3.68
hbase_HMaster_getMasterCoprocessors
/** Returns array of coprocessor SimpleNames. */ public String[] getMasterCoprocessors() { Set<String> masterCoprocessors = getMasterCoprocessorHost().getCoprocessors(); return masterCoprocessors.toArray(new String[masterCoprocessors.size()]); }
3.68
flink_HsMemoryDataSpiller_spill
/** Called in single-threaded ioExecutor. Order is guaranteed. */ private void spill( List<BufferWithIdentity> toWrite, CompletableFuture<List<SpilledBuffer>> spilledFuture) { try { List<SpilledBuffer> spilledBuffers = new ArrayList<>(); long expectedBytes = createSpilledBuffersAndGetTotalBytes(toWrite, spilledBuffers); // write all buffers to file writeBuffers(toWrite, expectedBytes); // complete spill future when buffers are written to disk successfully. // note that the ownership of these buffers is transferred to the MemoryDataManager, // which controls data's life cycle. spilledFuture.complete(spilledBuffers); } catch (IOException exception) { // if spilling is failed, throw exception directly to uncaughtExceptionHandler. ExceptionUtils.rethrow(exception); } }
3.68
flink_DataStream_writeAsText
/** * Writes a DataStream to the file specified by path in text format. * * <p>For every element of the DataStream the result of {@link Object#toString()} is written. * * @param path The path pointing to the location the text file is written to * @param writeMode Controls the behavior for existing files. Options are NO_OVERWRITE and * OVERWRITE. * @return The closed DataStream. * @deprecated Please use the {@link * org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink} explicitly * using the {@link #addSink(SinkFunction)} method. */ @Deprecated @PublicEvolving public DataStreamSink<T> writeAsText(String path, WriteMode writeMode) { TextOutputFormat<T> tof = new TextOutputFormat<>(new Path(path)); tof.setWriteMode(writeMode); return writeUsingOutputFormat(tof); }
3.68
flink_DataSet_coGroup
/** * Initiates a CoGroup transformation. * * <p>A CoGroup transformation combines the elements of two {@link DataSet DataSets} into one * DataSet. It groups each DataSet individually on a key and gives groups of both DataSets with * equal keys together into a {@link org.apache.flink.api.common.functions.RichCoGroupFunction}. * If a DataSet has a group with no matching key in the other DataSet, the CoGroupFunction is * called with an empty group for the non-existing group. * * <p>The CoGroupFunction can iterate over the elements of both groups and return any number of * elements including none. * * <p>This method returns a {@link CoGroupOperatorSets} on which one of the {@code where} * methods can be called to define the join key of the first joining (i.e., this) DataSet. * * @param other The other DataSet of the CoGroup transformation. * @return A CoGroupOperatorSets to continue the definition of the CoGroup transformation. * @see CoGroupOperatorSets * @see CoGroupOperator * @see DataSet */ public <R> CoGroupOperator.CoGroupOperatorSets<T, R> coGroup(DataSet<R> other) { return new CoGroupOperator.CoGroupOperatorSets<>(this, other); } // -------------------------------------------------------------------------------------------- // Cross // -------------------------------------------------------------------------------------------- /** * Continues a Join transformation and defines the {@link Tuple} fields of the second join * {@link DataSet} that should be used as join keys. * * <p><b>Note: Fields can only be selected as join keys on Tuple DataSets.</b> * * <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a {@link * Tuple2}
3.68
hadoop_FilePosition_isLastBlock
/** * Determines whether the current block is the last block in this file. * * @return true if the current block is the last block in this file, false otherwise. */ public boolean isLastBlock() { return blockData.isLastBlock(blockNumber()); }
3.68
flink_GuavaFlinkConnectorRateLimiter_setRate
/** * Set the global per consumer and per sub-task rates. * * @param globalRate Value of rate in bytes per second. */ @Override public void setRate(long globalRate) { this.globalRateBytesPerSecond = globalRate; }
3.68
hbase_JVM_getOpenFileDescriptorCount
/** * Get the number of opened filed descriptor for the runtime jvm. If Oracle java, it will use the * com.sun.management interfaces. Otherwise, this methods implements it (linux only). * @return number of open file descriptors for the jvm */ public long getOpenFileDescriptorCount() { Long ofdc; if (!ibmvendor) { ofdc = runUnixMXBeanMethod("getOpenFileDescriptorCount"); return (ofdc != null ? ofdc : -1); } InputStream inputStream = null; InputStreamReader inputStreamReader = null; BufferedReader bufferedReader = null; try { // need to get the PID number of the process first RuntimeMXBean rtmbean = ManagementFactory.getRuntimeMXBean(); String rtname = rtmbean.getName(); Iterator<String> pidhost = Splitter.on('@').split(rtname).iterator(); // using linux bash commands to retrieve info Process p = Runtime.getRuntime() .exec(new String[] { "bash", "-c", "ls /proc/" + pidhost.next() + "/fdinfo | wc -l" }); inputStream = p.getInputStream(); inputStreamReader = new InputStreamReader(inputStream, StandardCharsets.UTF_8); bufferedReader = new BufferedReader(inputStreamReader); String openFileDesCount; if ((openFileDesCount = bufferedReader.readLine()) != null) { return Long.parseLong(openFileDesCount); } } catch (IOException ie) { LOG.warn("Not able to get the number of open file descriptors", ie); } finally { if (bufferedReader != null) { try { bufferedReader.close(); } catch (IOException e) { LOG.warn("Not able to close the BufferedReader", e); } } if (inputStreamReader != null) { try { inputStreamReader.close(); } catch (IOException e) { LOG.warn("Not able to close the InputStreamReader", e); } } if (inputStream != null) { try { inputStream.close(); } catch (IOException e) { LOG.warn("Not able to close the InputStream", e); } } } return -1; }
3.68
querydsl_GeometryExpressions_xmax
/** * Returns X maxima of a bounding box 2d or 3d or a geometry. * * @param expr geometry * @return x maxima */ public static NumberExpression<Double> xmax(GeometryExpression<?> expr) { return Expressions.numberOperation(Double.class, SpatialOps.XMAX, expr); }
3.68
morf_DatabaseSchemaManager_deployView
/** * Deploys the specified view to the database. * * @param view the view to deploy */ private Collection<String> deployView(View view) { if (log.isDebugEnabled()) log.debug("Deploying view [" + view.getName() + "]"); views.get().put(view.getName().toUpperCase(), SchemaUtils.copy(view)); viewsDeployedByThis.get().add(view.getName().toUpperCase()); return dialect.get().viewDeploymentStatements(view); }
3.68
hbase_ColumnFamilyDescriptorBuilder_parseFrom
/** * Parse the serialized representation of a {@link ModifyableColumnFamilyDescriptor} * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic * prefix * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from <code>bytes</code> * @see #toByteArray() */ private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("No magic"); } int pblen = ProtobufUtil.lengthOfPBMagic(); ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder(); ColumnFamilySchema cfs = null; try { ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); cfs = builder.build(); } catch (IOException e) { throw new DeserializationException(e); } return ProtobufUtil.toColumnFamilyDescriptor(cfs); }
3.68
hbase_UserProvider_isHadoopSecurityEnabled
/** * Return whether or not Kerberos authentication is configured for Hadoop. For non-secure Hadoop, * this always returns <code>false</code>. For secure Hadoop, it will return the value from * {@code UserGroupInformation.isSecurityEnabled()}. */ public boolean isHadoopSecurityEnabled() { return User.isSecurityEnabled(); }
3.68
hadoop_MutableQuantiles_setQuantiles
/** * Sets quantileInfo. * * @param ucName capitalized name of the metric * @param uvName capitalized type of the values * @param desc uncapitalized long-form textual description of the metric * @param lvName uncapitalized type of the values * @param pDecimalFormat Number formatter for percentile value */ void setQuantiles(String ucName, String uvName, String desc, String lvName, DecimalFormat pDecimalFormat) { for (int i = 0; i < QUANTILES.length; i++) { double percentile = 100 * QUANTILES[i].quantile; String nameTemplate = ucName + pDecimalFormat.format(percentile) + "thPercentile" + uvName; String descTemplate = pDecimalFormat.format(percentile) + " percentile " + lvName + " with " + getInterval() + " second interval for " + desc; addQuantileInfo(i, info(nameTemplate, descTemplate)); } }
3.68
framework_CalendarMonthDropHandler_dragOver
/* * (non-Javadoc) * * @see * com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragOver(com * .vaadin.terminal.gwt.client.ui.dd.VDragEvent) */ @Override public void dragOver(final VDragEvent drag) { if (isLocationValid(drag.getElementOver())) { validate(new VAcceptCallback() { @Override public void accepted(VDragEvent event) { dragAccepted(drag); } }, drag); } }
3.68
aws-saas-boost_ExistingEnvironmentFactory_getExistingSaaSBoostLambdasFolder
// VisibleForTesting static String getExistingSaaSBoostLambdasFolder(SsmClient ssm, String environmentName) { LOGGER.debug("Getting existing SaaS Boost Lambdas folder from Parameter Store"); String lambdasFolder = null; try { GetParameterResponse response = ssm.getParameter(request -> request .name("/saas-boost/" + environmentName + "/SAAS_BOOST_LAMBDAS_FOLDER") ); lambdasFolder = response.parameter().value(); } catch (ParameterNotFoundException paramStoreError) { LOGGER.warn("Parameter /saas-boost/" + environmentName + "/SAAS_BOOST_LAMBDAS_FOLDER not found setting to default 'lambdas'"); lambdasFolder = "lambdas"; } catch (SdkServiceException ssmError) { LOGGER.error("ssm:GetParameter error {}", ssmError.getMessage()); LOGGER.error(Utils.getFullStackTrace(ssmError)); throw ssmError; } LOGGER.info("Loaded Lambdas folder {}", lambdasFolder); return lambdasFolder; }
3.68
hudi_JdbcSource_fullFetch
/** * Does a full scan on the RDBMS data source. * * @return The {@link Dataset} after running full scan. */ private Dataset<Row> fullFetch(long sourceLimit) { final String ppdQuery = "(%s) rdbms_table"; final SqlQueryBuilder queryBuilder = SqlQueryBuilder.select("*") .from(getStringWithAltKeys(props, JdbcSourceConfig.RDBMS_TABLE_NAME)); if (sourceLimit > 0) { URI jdbcURI = URI.create(getStringWithAltKeys(props, JdbcSourceConfig.URL).substring(URI_JDBC_PREFIX.length())); if (DB_LIMIT_CLAUSE.contains(jdbcURI.getScheme())) { if (containsConfigProperty(props, JdbcSourceConfig.INCREMENTAL_COLUMN)) { queryBuilder.orderBy(getStringWithAltKeys(props, JdbcSourceConfig.INCREMENTAL_COLUMN)).limit(sourceLimit); } else { queryBuilder.limit(sourceLimit); } } } String query = String.format(ppdQuery, queryBuilder.toString()); return validatePropsAndGetDataFrameReader(sparkSession, props).option(Config.RDBMS_TABLE_PROP, query).load(); }
3.68
flink_ResultSubpartition_onConsumedSubpartition
/** Notifies the parent partition about a consumed {@link ResultSubpartitionView}. */ protected void onConsumedSubpartition() { parent.onConsumedSubpartition(getSubPartitionIndex()); }
3.68
hadoop_HAState_toString
/** * @return String representation of the service state. */ @Override public String toString() { return state.toString(); }
3.68
flink_BaseMappingExtractor_collectMethodMappings
/** * Extracts mappings from signature to result (either accumulator or output) for the given * method. It considers both global hints for the entire function and local hints just for this * method. * * <p>The algorithm aims to find an input signature for every declared result. If no result is * declared, it will be extracted. If no input signature is declared, it will be extracted. */ private Map<FunctionSignatureTemplate, FunctionResultTemplate> collectMethodMappings( Method method, Set<FunctionTemplate> global, Set<FunctionResultTemplate> globalResultOnly, ResultExtraction resultExtraction, Function<FunctionTemplate, FunctionResultTemplate> accessor) { final Map<FunctionSignatureTemplate, FunctionResultTemplate> collectedMappingsPerMethod = new LinkedHashMap<>(); final Set<FunctionTemplate> local = extractLocalFunctionTemplates(method); final Set<FunctionResultTemplate> localResultOnly = findResultOnlyTemplates(local, accessor); final Set<FunctionTemplate> explicitMappings = findResultMappingTemplates(global, local, accessor); final FunctionResultTemplate resultOnly = findResultOnlyTemplate( globalResultOnly, localResultOnly, explicitMappings, accessor, getHintType()); final Set<FunctionSignatureTemplate> inputOnly = findInputOnlyTemplates(global, local, accessor); // add all explicit mappings because they contain complete signatures putExplicitMappings(collectedMappingsPerMethod, explicitMappings, inputOnly, accessor); // add result only template with explicit or extracted signatures putUniqueResultMappings(collectedMappingsPerMethod, resultOnly, inputOnly, method); // handle missing result by extraction with explicit or extracted signatures putExtractedResultMappings(collectedMappingsPerMethod, inputOnly, resultExtraction, method); return collectedMappingsPerMethod; }
3.68
hbase_SnapshotDescriptionUtils_getSpecifiedSnapshotDir
/** * Get the directory within the given filepath to store the snapshot instance * @param snapshotsDir directory to store snapshot directory within * @param snapshotName name of the snapshot to take * @return the final directory for the snapshot in the given filepath */ private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) { return new Path(snapshotsDir, snapshotName); }
3.68
graphhopper_ArrayUtil_calcSortOrder
/** * This method calculates the sort order of the first {@param length} element-pairs given by two arrays. * The order is chosen such that it sorts the element-pairs first by the first and second by the second array. * The input arrays are not manipulated by this method. * * @param length must not be larger than either of the two input array lengths. * @return an array x of length {@param length}. e.g. if this method returns x = {2, 0, 1} it means that that * the element-pair with index 2 comes first in the order and so on */ public static int[] calcSortOrder(final int[] arr1, final int[] arr2, int length) { if (arr1.length < length || arr2.length < length) throw new IllegalArgumentException("Arrays must not be shorter than given length"); IndirectComparator comp = (indexA, indexB) -> { final int arr1cmp = Integer.compare(arr1[indexA], arr1[indexB]); return arr1cmp != 0 ? arr1cmp : Integer.compare(arr2[indexA], arr2[indexB]); }; return IndirectSort.mergesort(0, length, comp); }
3.68
flink_MetricDumpSerialization_deserialize
/** * De-serializes metrics from the given byte array and returns them as a list of {@link * MetricDump}. * * @param data serialized metrics * @return A list containing the deserialized metrics. */ public List<MetricDump> deserialize( MetricDumpSerialization.MetricSerializationResult data) { DataInputView countersInputView = new DataInputDeserializer( data.serializedCounters, 0, data.serializedCounters.length); DataInputView gaugesInputView = new DataInputDeserializer( data.serializedGauges, 0, data.serializedGauges.length); DataInputView metersInputView = new DataInputDeserializer( data.serializedMeters, 0, data.serializedMeters.length); DataInputView histogramsInputView = new DataInputDeserializer( data.serializedHistograms, 0, data.serializedHistograms.length); List<MetricDump> metrics = new ArrayList<>( data.numCounters + data.numGauges + data.numMeters + data.numHistograms); for (int x = 0; x < data.numCounters; x++) { try { metrics.add(deserializeCounter(countersInputView)); } catch (Exception e) { LOG.debug("Failed to deserialize counter.", e); } } for (int x = 0; x < data.numGauges; x++) { try { metrics.add(deserializeGauge(gaugesInputView)); } catch (Exception e) { LOG.debug("Failed to deserialize gauge.", e); } } for (int x = 0; x < data.numMeters; x++) { try { metrics.add(deserializeMeter(metersInputView)); } catch (Exception e) { LOG.debug("Failed to deserialize meter.", e); } } for (int x = 0; x < data.numHistograms; x++) { try { metrics.add(deserializeHistogram(histogramsInputView)); } catch (Exception e) { LOG.debug("Failed to deserialize histogram.", e); } } return metrics; }
3.68
hadoop_TimelineWriteResponse_getErrorCode
/** * Get the error code. * * @return an error code */ @XmlElement(name = "errorcode") public int getErrorCode() { return errorCode; }
3.68
pulsar_ManagedLedgerConfig_setClock
/** * Set clock to use for time operations. * * @param clock the clock to use */ public ManagedLedgerConfig setClock(Clock clock) { this.clock = clock; return this; }
3.68
flink_InputGateDeploymentDescriptor_getConsumedPartitionType
/** * Returns the type of this input channel's consumed result partition. * * @return consumed result partition type */ public ResultPartitionType getConsumedPartitionType() { return consumedPartitionType; }
3.68
hbase_HRegionServer_scheduleAbortTimer
// Limits the time spent in the shutdown process. private void scheduleAbortTimer() { if (this.abortMonitor == null) { this.abortMonitor = new Timer("Abort regionserver monitor", true); TimerTask abortTimeoutTask = null; try { Constructor<? extends TimerTask> timerTaskCtor = Class.forName(conf.get(ABORT_TIMEOUT_TASK, SystemExitWhenAbortTimeout.class.getName())) .asSubclass(TimerTask.class).getDeclaredConstructor(); timerTaskCtor.setAccessible(true); abortTimeoutTask = timerTaskCtor.newInstance(); } catch (Exception e) { LOG.warn("Initialize abort timeout task failed", e); } if (abortTimeoutTask != null) { abortMonitor.schedule(abortTimeoutTask, conf.getLong(ABORT_TIMEOUT, DEFAULT_ABORT_TIMEOUT)); } } }
3.68
hadoop_HdfsFileStatus_feInfo
/** * Set the encryption info for this entity (default = null). * @param feInfo Encryption info * @return This Builder instance */ public Builder feInfo(FileEncryptionInfo feInfo) { this.feInfo = feInfo; return this; }
3.68
shardingsphere-elasticjob_ElasticJobExecutor_execute
/** * Execute job. */ public void execute() { JobConfiguration jobConfig = jobFacade.loadJobConfiguration(true); executorServiceReloader.reloadIfNecessary(jobConfig); jobErrorHandlerReloader.reloadIfNecessary(jobConfig); JobErrorHandler jobErrorHandler = jobErrorHandlerReloader.getJobErrorHandler(); try { jobFacade.checkJobExecutionEnvironment(); } catch (final JobExecutionEnvironmentException cause) { jobErrorHandler.handleException(jobConfig.getJobName(), cause); } ShardingContexts shardingContexts = jobFacade.getShardingContexts(); jobFacade.postJobStatusTraceEvent(shardingContexts.getTaskId(), State.TASK_STAGING, String.format("Job '%s' execute begin.", jobConfig.getJobName())); if (jobFacade.misfireIfRunning(shardingContexts.getShardingItemParameters().keySet())) { jobFacade.postJobStatusTraceEvent(shardingContexts.getTaskId(), State.TASK_FINISHED, String.format( "Previous job '%s' - shardingItems '%s' is still running, misfired job will start after previous job completed.", jobConfig.getJobName(), shardingContexts.getShardingItemParameters().keySet())); return; } try { jobFacade.beforeJobExecuted(shardingContexts); // CHECKSTYLE:OFF } catch (final Throwable cause) { // CHECKSTYLE:ON jobErrorHandler.handleException(jobConfig.getJobName(), cause); } execute(jobConfig, shardingContexts, ExecutionSource.NORMAL_TRIGGER); while (jobFacade.isExecuteMisfired(shardingContexts.getShardingItemParameters().keySet())) { jobFacade.clearMisfire(shardingContexts.getShardingItemParameters().keySet()); execute(jobConfig, shardingContexts, ExecutionSource.MISFIRE); } jobFacade.failoverIfNecessary(); try { jobFacade.afterJobExecuted(shardingContexts); // CHECKSTYLE:OFF } catch (final Throwable cause) { // CHECKSTYLE:ON jobErrorHandler.handleException(jobConfig.getJobName(), cause); } }
3.68
graphhopper_EdgeBasedTarjanSCC_findComponentsForStartEdges
/** * Like {@link #findComponents(Graph, EdgeTransitionFilter, boolean)}, but the search only starts at the * given edges. This does not mean the search cannot expand to other edges, but this can be controlled by the * edgeTransitionFilter. This method does not return single edge components (the excludeSingleEdgeComponents option is * set to true). */ public static ConnectedComponents findComponentsForStartEdges(Graph graph, EdgeTransitionFilter edgeTransitionFilter, IntContainer edges) { return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, true).findComponentsForStartEdges(edges); }
3.68
hadoop_AbstractRMAdminRequestInterceptor_setNextInterceptor
/** * Sets the {@link RMAdminRequestInterceptor} in the chain. */ @Override public void setNextInterceptor(RMAdminRequestInterceptor nextInterceptor) { this.nextInterceptor = nextInterceptor; } /** * Sets the {@link Configuration}
3.68
cron-utils_FieldDefinition_createFieldDefinitionComparator
/** * Creates a field definition comparator. Will compare by CronFieldName order value; * * @return Comparator for FieldDefinition instance, never null; */ public static Comparator<FieldDefinition> createFieldDefinitionComparator() { return Comparator.comparingInt(o -> o.getFieldName().getOrder()); }
3.68
flink_TimeEvictor_getMaxTimestamp
/** * @param elements The elements currently in the pane. * @return The maximum value of timestamp among the elements. */ private long getMaxTimestamp(Iterable<TimestampedValue<Object>> elements) { long currentTime = Long.MIN_VALUE; for (Iterator<TimestampedValue<Object>> iterator = elements.iterator(); iterator.hasNext(); ) { TimestampedValue<Object> record = iterator.next(); currentTime = Math.max(currentTime, record.getTimestamp()); } return currentTime; }
3.68
hadoop_BondedS3AStatisticsContext_newOutputStreamStatistics
/** * Create a stream output statistics instance. * @return the new instance */ @Override public BlockOutputStreamStatistics newOutputStreamStatistics() { return getInstrumentation() .newOutputStreamStatistics(getInstanceStatistics()); }
3.68
framework_Table_unregisterPropertiesAndComponents
/** * Helper method to remove listeners and maintain correct component * hierarchy. Detaches properties and components if those are no more * rendered in client. * * @param oldListenedProperties * set of properties that where listened in last render * @param oldVisibleComponents * set of components that where attached in last render */ private void unregisterPropertiesAndComponents( HashSet<Property<?>> oldListenedProperties, HashSet<Component> oldVisibleComponents) { if (oldVisibleComponents != null) { for (final Component c : oldVisibleComponents) { if (!visibleComponents.contains(c)) { unregisterComponent(c); } } } if (oldListenedProperties != null) { for (final Property<?> p : oldListenedProperties) { Property.ValueChangeNotifier o = (ValueChangeNotifier) p; if (!listenedProperties.contains(o)) { o.removeListener(this); } } } }
3.68
hadoop_PlacementConstraint_nodeAttributes
/** * Node attributes are a set of key:value(s) pairs associated with nodes. */ public PlacementConstraint nodeAttributes( Map<String, List<String>> nodeAttributes) { this.nodeAttributes = nodeAttributes; return this; }
3.68
streampipes_AbstractConfigurablePipelineElementBuilder_requiredTextParameter
/** * Defines a text-based configuration parameter provided by pipeline developers at pipeline authoring time. * * @param label The {@link org.apache.streampipes.sdk.helpers.Label} * that describes why this parameter is needed in an user-friendly manner. * @param multiLine Defines whether the input dialog allows multiple lines. * @param placeholdersSupported Defines whether placeholders are supported, i.e., event property field names that * are replaced with the actual value at pipeline execution time. * @param htmlFontFormat Defines to only use bold, italic, striked in dialog. * @return this */ public K requiredTextParameter(Label label, boolean multiLine, boolean placeholdersSupported, boolean htmlFontFormat) { FreeTextStaticProperty fsp = prepareFreeTextStaticProperty(label, XSD.STRING.toString()); if (multiLine) { fsp.setMultiLine(true); } if (placeholdersSupported) { fsp.setPlaceholdersSupported(true); } if (htmlFontFormat) { fsp.setHtmlFontFormat(true); } this.staticProperties.add(fsp); return me(); }
3.68
hbase_MultiRowRangeFilter_sortAndMerge
/** * sort the ranges and if the ranges with overlap, then merge them. * @param ranges the list of ranges to sort and merge. * @return the ranges after sort and merge. */ public static List<RowRange> sortAndMerge(List<RowRange> ranges) { if (ranges.isEmpty()) { throw new IllegalArgumentException("No ranges found."); } List<RowRange> invalidRanges = new ArrayList<>(); List<RowRange> newRanges = new ArrayList<>(ranges.size()); Collections.sort(ranges); if (ranges.get(0).isValid()) { if (ranges.size() == 1) { newRanges.add(ranges.get(0)); } } else { invalidRanges.add(ranges.get(0)); } byte[] lastStartRow = ranges.get(0).startRow; boolean lastStartRowInclusive = ranges.get(0).startRowInclusive; byte[] lastStopRow = ranges.get(0).stopRow; boolean lastStopRowInclusive = ranges.get(0).stopRowInclusive; int i = 1; for (; i < ranges.size(); i++) { RowRange range = ranges.get(i); if (!range.isValid()) { invalidRanges.add(range); } if (Bytes.equals(lastStopRow, HConstants.EMPTY_BYTE_ARRAY)) { newRanges.add( new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } // with overlap in the ranges if ( (Bytes.compareTo(lastStopRow, range.startRow) > 0) || (Bytes.compareTo(lastStopRow, range.startRow) == 0 && !(lastStopRowInclusive == false && range.isStartRowInclusive() == false)) ) { if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, range.stopRowInclusive)); break; } // if first range contains second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { if ((Bytes.compareTo(lastStopRow, range.stopRow) == 0)) { if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } } if ((i + 1) == ranges.size()) { newRanges.add( new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); } } else { lastStopRow = range.stopRow; lastStopRowInclusive = range.stopRowInclusive; if ((i + 1) < ranges.size()) { i++; range = ranges.get(i); if (!range.isValid()) { invalidRanges.add(range); } } else { newRanges.add( new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); break; } while ( (Bytes.compareTo(lastStopRow, range.startRow) > 0) || (Bytes.compareTo(lastStopRow, range.startRow) == 0 && (lastStopRowInclusive == true || range.startRowInclusive == true)) ) { if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { break; } // if this first range contain second range, ignore the second range if (Bytes.compareTo(lastStopRow, range.stopRow) >= 0) { if (lastStopRowInclusive == true || range.stopRowInclusive == true) { lastStopRowInclusive = true; } i++; if (i < ranges.size()) { range = ranges.get(i); if (!range.isValid()) { invalidRanges.add(range); } } else { break; } } else { lastStopRow = range.stopRow; lastStopRowInclusive = range.stopRowInclusive; i++; if (i < ranges.size()) { range = ranges.get(i); if (!range.isValid()) { invalidRanges.add(range); } } else { break; } } } if (Bytes.equals(range.stopRow, HConstants.EMPTY_BYTE_ARRAY)) { if ( (Bytes.compareTo(lastStopRow, range.startRow) < 0) || (Bytes.compareTo(lastStopRow, range.startRow) == 0 && lastStopRowInclusive == false && range.startRowInclusive == false) ) { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); newRanges.add(range); } else { newRanges.add(new RowRange(lastStartRow, lastStartRowInclusive, range.stopRow, range.stopRowInclusive)); break; } } newRanges.add( new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } lastStartRow = range.startRow; lastStartRowInclusive = range.startRowInclusive; lastStopRow = range.stopRow; lastStopRowInclusive = range.stopRowInclusive; } } else { newRanges.add( new RowRange(lastStartRow, lastStartRowInclusive, lastStopRow, lastStopRowInclusive)); if ((i + 1) == ranges.size()) { newRanges.add(range); } lastStartRow = range.startRow; lastStartRowInclusive = range.startRowInclusive; lastStopRow = range.stopRow; lastStopRowInclusive = range.stopRowInclusive; } } // check the remaining ranges for (int j = i; j < ranges.size(); j++) { if (!ranges.get(j).isValid()) { invalidRanges.add(ranges.get(j)); } } // if invalid range exists, throw the exception if (invalidRanges.size() != 0) { throwExceptionForInvalidRanges(invalidRanges, true); } // If no valid ranges found, throw the exception if (newRanges.isEmpty()) { throw new IllegalArgumentException("No valid ranges found."); } return newRanges; }
3.68
flink_TableChange_modifyColumnName
/** * A table change to modify the column name. * * <p>It is equal to the following statement: * * <pre> * ALTER TABLE &lt;table_name&gt; RENAME &lt;old_column_name&gt; TO &lt;new_column_name&gt; * </pre> * * @param oldColumn the definition of the old column. * @param newName the name of the new column. * @return a TableChange represents the modification. */ static ModifyColumnName modifyColumnName(Column oldColumn, String newName) { return new ModifyColumnName(oldColumn, newName); }
3.68
hadoop_HadoopUncaughtExceptionHandler_uncaughtException
/** * Uncaught exception handler. * If an error is raised: shutdown * The state of the system is unknown at this point -attempting * a clean shutdown is dangerous. Instead: exit * @param thread thread that failed * @param exception the raised exception */ @Override public void uncaughtException(Thread thread, Throwable exception) { if (ShutdownHookManager.get().isShutdownInProgress()) { LOG.error("Thread {} threw an error during shutdown: {}.", thread.toString(), exception, exception); } else if (exception instanceof Error) { try { LOG.error("Thread {} threw an error: {}. Shutting down", thread.toString(), exception, exception); } catch (Throwable err) { // We don't want to not exit because of an issue with logging } if (exception instanceof OutOfMemoryError) { // After catching an OOM java says it is undefined behavior, so don't // even try to clean up or we can get stuck on shutdown. try { System.err.println("Halting due to Out Of Memory Error..."); } catch (Throwable err) { // Again we don't want to exit because of logging issues. } ExitUtil.haltOnOutOfMemory((OutOfMemoryError) exception); } else { // error other than OutOfMemory ExitUtil.ExitException ee = ServiceLauncher.convertToExitException(exception); ExitUtil.terminate(ee.status, ee); } } else { // simple exception in a thread. There's a policy decision here: // terminate the process vs. keep going after a thread has failed // base implementation: do nothing but log LOG.error("Thread {} threw an exception: {}", thread.toString(), exception, exception); if (delegate != null) { delegate.uncaughtException(thread, exception); } } }
3.68
querydsl_LuceneExpressions_fuzzyLike
/** * Create a fuzzy query * * @param path path * @param value value to match * @param minimumSimilarity a value between 0 and 1 to set the required similarity * @param prefixLength length of common (non-fuzzy) prefix * @return condition */ public static BooleanExpression fuzzyLike(Path<String> path, String value, float minimumSimilarity, int prefixLength) { Term term = new Term(path.getMetadata().getName(), value); return new QueryElement(new FuzzyQuery(term, minimumSimilarity, prefixLength)); }
3.68
morf_UpgradePath_logUpgradePathSQL
/** * Log out the SQL that forms this upgrade path to a logger of your choice. * * @param logger the logger to use. */ public void logUpgradePathSQL(Log logger) { if (sql.isEmpty()) { logger.info("No upgrade statements to be applied"); } else { logger.info("Upgrade statements:\n" + getUpgradeSqlScript()); } }
3.68
hbase_ModifyPeerProcedure_needReopen
// If the table is in enabling state, we need to wait until it is enabled and then reopen all its // regions. private boolean needReopen(TableStateManager tsm, TableName tn) throws IOException { for (;;) { try { TableState state = tsm.getTableState(tn); if (state.isEnabled()) { return true; } if (!state.isEnabling()) { return false; } Thread.sleep(SLEEP_INTERVAL_MS); } catch (TableNotFoundException e) { return false; } catch (InterruptedException e) { throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e); } } }
3.68
shardingsphere-elasticjob_JobFacade_loadJobConfiguration
/** * Load job configuration. * * @param fromCache load from cache or not * @return job configuration */ public JobConfiguration loadJobConfiguration(final boolean fromCache) { return configService.load(fromCache); }
3.68
flink_DualInputPlanNode_getInput1
/** * Gets the first input channel to this node. * * @return The first input channel to this node. */ public Channel getInput1() { return this.input1; }
3.68
flink_RocksDBMemoryControllerUtils_calculateActualCacheCapacity
/** * Calculate the actual memory capacity of cache, which would be shared among rocksDB * instance(s). We introduce this method because: a) We cannot create a strict capacity limit * cache util FLINK-15532 resolved. b) Regardless of the memory usage of blocks pinned by * RocksDB iterators, which is difficult to calculate and only happened when we iterator entries * in RocksDBMapState, the overuse of memory is mainly occupied by at most half of the write * buffer usage. (see <a * href="https://github.com/dataArtisans/frocksdb/blob/958f191d3f7276ae59b270f9db8390034d549ee0/include/rocksdb/write_buffer_manager.h#L51">the * flush implementation of write buffer manager</a>). Thus, we have four equations below: * write_buffer_manager_memory = 1.5 * write_buffer_manager_capacity write_buffer_manager_memory * = total_memory_size * write_buffer_ratio write_buffer_manager_memory + other_part = * total_memory_size write_buffer_manager_capacity + other_part = cache_capacity And we would * deduce the formula: cache_capacity = (3 - write_buffer_ratio) * total_memory_size / 3 * write_buffer_manager_capacity = 2 * total_memory_size * write_buffer_ratio / 3 * * @param totalMemorySize Total off-heap memory size reserved for RocksDB instance(s). * @param writeBufferRatio The ratio of total memory size which would be reserved for write * buffer manager and its over-capacity part. * @return The actual calculated cache capacity. */ @VisibleForTesting public static long calculateActualCacheCapacity(long totalMemorySize, double writeBufferRatio) { return (long) ((3 - writeBufferRatio) * totalMemorySize / 3); }
3.68
hadoop_FederationStateStoreFacade_getSubClusterResolver
/** * Get the singleton instance of SubClusterResolver. * * @return SubClusterResolver instance */ public SubClusterResolver getSubClusterResolver() { return this.subclusterResolver; }
3.68
hadoop_ManifestCommitterSupport_addHeapInformation
/** * Add heap information to IOStatisticSetters gauges, with a stage in front of every key. * @param ioStatisticsSetters map to update * @param stage stage */ public static void addHeapInformation(IOStatisticsSetters ioStatisticsSetters, String stage) { final long totalMemory = Runtime.getRuntime().totalMemory(); final long freeMemory = Runtime.getRuntime().freeMemory(); final String prefix = "stage."; ioStatisticsSetters.setGauge(prefix + stage + "." + TOTAL_MEMORY, totalMemory); ioStatisticsSetters.setGauge(prefix + stage + "." + FREE_MEMORY, freeMemory); ioStatisticsSetters.setGauge(prefix + stage + "." + HEAP_MEMORY, totalMemory - freeMemory); }
3.68
flink_SingleInputOperator_getOperatorInfo
/** Gets the information about the operators input/output types. */ @Override @SuppressWarnings("unchecked") public UnaryOperatorInformation<IN, OUT> getOperatorInfo() { return (UnaryOperatorInformation<IN, OUT>) this.operatorInfo; }
3.68
hadoop_ApplicationServiceRecordProcessor_initTypeToInfoMapping
/** * Initializes the DNS record type to descriptor mapping based on the * provided service record. * * @param serviceRecord the registry service record. * @throws Exception if an issue is encountered. */ @Override public void initTypeToInfoMapping(ServiceRecord serviceRecord) throws Exception { if (serviceRecord.external.isEmpty()) { LOG.info(serviceRecord.description + ": No external endpoints defined."); return; } for (int type : getRecordTypes()) { switch (type) { case Type.A: createAInfo(serviceRecord); break; case Type.AAAA: createAAAAInfo(serviceRecord); break; case Type.TXT: createTXTInfo(serviceRecord); break; case Type.CNAME: createCNAMEInfo(serviceRecord); break; case Type.SRV: createSRVInfo(serviceRecord); break; default: throw new IllegalArgumentException("Unknown type " + type); } } }
3.68
flink_EmbeddedRocksDBStateBackend_setPredefinedOptions
/** * Sets the predefined options for RocksDB. * * <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through * flink-conf.yaml) or a user-defined options factory is set (via {@link * #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on * top of the here specified predefined options and customized options. * * @param options The options to set (must not be null). */ public void setPredefinedOptions(@Nonnull PredefinedOptions options) { predefinedOptions = checkNotNull(options); }
3.68
hbase_RegionLocations_size
/** * Returns the size of the list even if some of the elements might be null. * @return the size of the list (corresponding to the max replicaId) */ public int size() { return locations.length; }
3.68
hbase_CompactionProgress_cancel
/** * Cancels the compaction progress, setting things to 0. */ public void cancel() { this.currentCompactedKVs = this.totalCompactingKVs = 0; }
3.68
flink_FileLock_tryLock
/** * Try to acquire a lock on the locking file. This method immediately returns whenever the lock * is acquired or not. * * @return True if successfully acquired the lock * @throws IOException If the file path is invalid */ public boolean tryLock() throws IOException { if (outputStream == null) { init(); } try { lock = outputStream.getChannel().tryLock(); } catch (Exception e) { return false; } return lock != null; }
3.68
framework_TooltipInfo_setContentMode
/** * Sets the tooltip title's content mode. * * @param contentMode * the content mode to set */ public void setContentMode(ContentMode contentMode) { this.contentMode = contentMode; }
3.68
flink_CheckpointStatsCache_tryGet
/** * Try to look up a checkpoint by it's ID in the cache. * * @param checkpointId ID of the checkpoint to look up. * @return The checkpoint or <code>null</code> if checkpoint not found. */ public AbstractCheckpointStats tryGet(long checkpointId) { if (cache != null) { return cache.getIfPresent(checkpointId); } else { return null; } }
3.68
framework_VFilterSelect_setPrevButtonActive
/** * Should the previous page button be visible to the user * * @param active */ private void setPrevButtonActive(boolean active) { if (enableDebug) { debug("VFS.SP: setPrevButtonActive(" + active + ")"); } if (active) { DOM.sinkEvents(up, Event.ONCLICK); up.setClassName( VFilterSelect.this.getStylePrimaryName() + "-prevpage"); } else { DOM.sinkEvents(up, 0); up.setClassName(VFilterSelect.this.getStylePrimaryName() + "-prevpage-off"); } }
3.68
hudi_OptionsResolver_isMultiWriter
/** * Returns whether multi-writer is enabled. */ public static boolean isMultiWriter(Configuration conf) { return WriteConcurrencyMode.supportsMultiWriter(conf.getString(HoodieWriteConfig.WRITE_CONCURRENCY_MODE.key(), HoodieWriteConfig.WRITE_CONCURRENCY_MODE.defaultValue())); }
3.68
hbase_LeaseManager_close
/** * Shut down this Leases instance. All pending leases will be destroyed, without any cancellation * calls. */ public void close() { this.stopRequested = true; leases.clear(); LOG.info("Closed leases"); }
3.68
pulsar_ManagedLedgerConfig_setMetadataMaxEntriesPerLedger
/** * @param metadataMaxEntriesPerLedger * the metadataMaxEntriesPerLedger to set */ public ManagedLedgerConfig setMetadataMaxEntriesPerLedger(int metadataMaxEntriesPerLedger) { this.metadataMaxEntriesPerLedger = metadataMaxEntriesPerLedger; return this; }
3.68
hudi_HoodieTimeline_makeInflightLogCompactionFileName
// Log compaction action static String makeInflightLogCompactionFileName(String instantTime) { return StringUtils.join(instantTime, HoodieTimeline.INFLIGHT_LOG_COMPACTION_EXTENSION); }
3.68
hadoop_ApplicationMaster_markCompleted
/** * Mark that this application should begin cleaning up and exit. */ private void markCompleted() { synchronized (completionLock) { completed = true; completionLock.notify(); } }
3.68
dubbo_CollectionUtils_isNotEmpty
/** * Return {@code true} if the supplied Collection is {@code not null} or not empty. * Otherwise, return {@code false}. * * @param collection the Collection to check * @return whether the given Collection is not empty */ public static boolean isNotEmpty(Collection<?> collection) { return !isEmpty(collection); }
3.68
hbase_ProcedureUtil_newProcedure
// ========================================================================== // Reflection helpers to create/validate a Procedure object // ========================================================================== private static Procedure<?> newProcedure(String className) throws BadProcedureException { try { Class<?> clazz = Class.forName(className); if (!Modifier.isPublic(clazz.getModifiers())) { throw new Exception("the " + clazz + " class is not public"); } @SuppressWarnings("rawtypes") Constructor<? extends Procedure> ctor = clazz.asSubclass(Procedure.class).getConstructor(); assert ctor != null : "no constructor found"; if (!Modifier.isPublic(ctor.getModifiers())) { throw new Exception("the " + clazz + " constructor is not public"); } return ctor.newInstance(); } catch (Exception e) { throw new BadProcedureException( "The procedure class " + className + " must be accessible and have an empty constructor", e); } }
3.68
morf_DatabaseMetaDataProvider_viewNames
/** * @see org.alfasoftware.morf.metadata.Schema#viewNames() */ @Override public Collection<String> viewNames() { return viewNames.get().values().stream().map(RealName::getRealName).collect(Collectors.toList()); }
3.68
framework_MarginInfo_getBitMask
/** * Returns the current bit mask that make up the margin settings. * <p> * This method is for internal use by the framework. * * @return an integer bit mask */ @Deprecated public int getBitMask() { return bitMask; }
3.68
dubbo_URLParam_addParameters
/** * Add parameters to a new URLParam. * If key-pair is present, this will cover it. * * @param parameters parameters in key-value pairs * @return A new URLParam */ public URLParam addParameters(Map<String, String> parameters) { if (CollectionUtils.isEmptyMap(parameters)) { return this; } boolean hasAndEqual = true; Map<String, String> urlParamMap = getParameters(); for (Map.Entry<String, String> entry : parameters.entrySet()) { String value = urlParamMap.get(entry.getKey()); if (value == null) { if (entry.getValue() != null) { hasAndEqual = false; break; } } else { if (!value.equals(entry.getValue())) { hasAndEqual = false; break; } } } // return immediately if there's no change if (hasAndEqual) { return this; } return doAddParameters(parameters, false); }
3.68
hudi_HiveSchemaUtils_toFlinkType
/** * Convert Hive data type to a Flink data type. * * @param hiveType a Hive data type * @return the corresponding Flink data type */ public static DataType toFlinkType(TypeInfo hiveType) { checkNotNull(hiveType, "hiveType cannot be null"); switch (hiveType.getCategory()) { case PRIMITIVE: return toFlinkPrimitiveType((PrimitiveTypeInfo) hiveType); case LIST: ListTypeInfo listTypeInfo = (ListTypeInfo) hiveType; return DataTypes.ARRAY(toFlinkType(listTypeInfo.getListElementTypeInfo())); case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) hiveType; return DataTypes.MAP( toFlinkType(mapTypeInfo.getMapKeyTypeInfo()), toFlinkType(mapTypeInfo.getMapValueTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) hiveType; List<String> names = structTypeInfo.getAllStructFieldNames(); List<TypeInfo> typeInfos = structTypeInfo.getAllStructFieldTypeInfos(); DataTypes.Field[] fields = new DataTypes.Field[names.size()]; for (int i = 0; i < fields.length; i++) { fields[i] = DataTypes.FIELD(names.get(i), toFlinkType(typeInfos.get(i))); } return DataTypes.ROW(fields); default: throw new UnsupportedOperationException( String.format("Flink doesn't support Hive data type %s yet.", hiveType)); } }
3.68
hbase_HRegionFileSystem_getFamilies
/** Returns the set of families present on disk n */ public Collection<String> getFamilies() throws IOException { FileStatus[] fds = CommonFSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs)); if (fds == null) return null; ArrayList<String> families = new ArrayList<>(fds.length); for (FileStatus status : fds) { families.add(status.getPath().getName()); } return families; }
3.68
flink_SolutionSetUpdateBarrier_notifySolutionSetUpdate
/** Releases the waiting thread. */ public void notifySolutionSetUpdate() { latch.countDown(); }
3.68
AreaShop_GithubUpdateCheck_hasUpdate
/** * Check if an update has been found. * @return true if an update has been found */ public boolean hasUpdate() { return hasUpdate; }
3.68
hbase_HBaseTestingUtility_getRegionSplitStartKeys
/** * Create region split keys between startkey and endKey * @param numRegions the number of regions to be created. it has to be greater than 3. * @return resulting split keys */ public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) { if (numRegions <= 3) { throw new AssertionError(); } byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3); byte[][] result = new byte[tmpSplitKeys.length + 1][]; System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length); result[0] = HConstants.EMPTY_BYTE_ARRAY; return result; }
3.68
hbase_MasterWalManager_splitLog
/** * This method is the base split method that splits WAL files matching a filter. Callers should * pass the appropriate filter for meta and non-meta WALs. * @param serverNames logs belonging to these servers will be split; this will rename the log * directory out from under a soft-failed server */ public void splitLog(final Set<ServerName> serverNames, PathFilter filter) throws IOException { long splitTime = 0, splitLogSize = 0; List<Path> logDirs = getLogDirs(serverNames); splitLogManager.handleDeadWorkers(serverNames); splitTime = EnvironmentEdgeManager.currentTime(); splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter); splitTime = EnvironmentEdgeManager.currentTime() - splitTime; if (this.metricsMasterFilesystem != null) { if (filter == META_FILTER) { this.metricsMasterFilesystem.addMetaWALSplit(splitTime, splitLogSize); } else { this.metricsMasterFilesystem.addSplit(splitTime, splitLogSize); } } }
3.68
shardingsphere-elasticjob_JobRegistry_getRegCenter
/** * Get registry center. * * @param jobName job name * @return registry center */ public CoordinatorRegistryCenter getRegCenter(final String jobName) { return regCenterMap.get(jobName); }
3.68
pulsar_PulsarAdminImpl_sink
/** * @return the sinks management object * @deprecated in favor of {@link #sinks} */ @Deprecated public Sink sink() { return (Sink) sinks; }
3.68
flink_HsFileDataManager_setup
/** Setup read buffer pool. */ public void setup() { bufferPool.initialize(); }
3.68
pulsar_SingletonCleanerListener_jsonSchemaClearCaches
// Call JSONSchema.clearCaches() using reflection to clear up classes held in // the singleton Jackson ObjectMapper instance of JSONSchema class private static void jsonSchemaClearCaches() { if (JSONSCHEMA_CLEARCACHES_METHOD != null) { try { JSONSCHEMA_CLEARCACHES_METHOD.invoke(null); } catch (IllegalAccessException | InvocationTargetException e) { LOG.warn("Cannot clean singleton JSONSchema caches", e); } } }
3.68
framework_VTwinColSelect_getRows
/** * Returns the number of visible items for the list boxes. * * @return the number of items to show * @see ListBox#setVisibleItemCount(int) */ public int getRows() { return rows; }
3.68
druid_IPRange_parseRange
/** * Parse the IP range string representation. * * @param range String representation of the IP range. * @throws IllegalArgumentException Throws this exception if the specified range is not a valid IP network range. */ final void parseRange(String range) { if (range == null) { throw new IllegalArgumentException("Invalid IP range"); } int index = range.indexOf('/'); String subnetStr = null; if (index == -1) { ipAddress = new IPAddress(range); } else { ipAddress = new IPAddress(range.substring(0, index)); subnetStr = range.substring(index + 1); } // try to convert the remaining part of the range into a decimal // value. try { if (subnetStr != null) { extendedNetworkPrefix = Integer.parseInt(subnetStr); if ((extendedNetworkPrefix < 0) || (extendedNetworkPrefix > 32)) { throw new IllegalArgumentException("Invalid IP range [" + range + "]"); } ipSubnetMask = computeMaskFromNetworkPrefix(extendedNetworkPrefix); } } catch (NumberFormatException ex) { // the remaining part is not a valid decimal value. // Check if it's a decimal-dotted notation. ipSubnetMask = new IPAddress(subnetStr); // create the corresponding subnet decimal extendedNetworkPrefix = computeNetworkPrefixFromMask(ipSubnetMask); if (extendedNetworkPrefix == -1) { throw new IllegalArgumentException("Invalid IP range [" + range + "]", ex); } } }
3.68
querydsl_AbstractSQLInsertClause_executeWithKey
/** * Execute the clause and return the generated key cast to the given type. * If no rows were created, null is returned, otherwise the key of the first * row is returned. * * @param <T> * @param type type of key * @return generated key */ public <T> T executeWithKey(Class<T> type) { return executeWithKey(type, null); }
3.68
framework_Buffered_getErrorMessage
// Intentional change in compatibility package @Override public ErrorMessage getErrorMessage() { // no message, only the causes to be painted UserError error = new UserError(null); // in practice, this was always ERROR in Vaadin 6 unless tweaked in // custom exceptions implementing ErrorMessage error.setErrorLevel(ErrorLevel.ERROR); // causes for (Throwable nestedException : getCauses()) { error.addCause(AbstractErrorMessage .getErrorMessageForException(nestedException)); } return error; }
3.68
flink_TableFactoryService_extractWildcardPrefixes
/** Converts the prefix of properties with wildcards (e.g., "format.*"). */ private static List<String> extractWildcardPrefixes(List<String> propertyKeys) { return propertyKeys.stream() .filter(p -> p.endsWith("*")) .map(s -> s.substring(0, s.length() - 1)) .collect(Collectors.toList()); }
3.68
hibernate-validator_TypeVariableBindings_getTypeVariableBindings
/** * Returns the bindings for all the type variables from the given type's hierarchy. The returned map will contain an * entry for each type in the given type's class hierarchy (including interfaces). Each entry is a map from the * given type's type variables to the corresponding type variables of the type represented by that entry. */ public static Map<Class<?>, Map<TypeVariable<?>, TypeVariable<?>>> getTypeVariableBindings(Class<?> type) { Map<Class<?>, Map<TypeVariable<?>, TypeVariable<?>>> allBindings = new HashMap<>(); Map<TypeVariable<?>, TypeVariable<?>> currentBindings = new HashMap<>(); TypeVariable<?>[] subTypeParameters = type.getTypeParameters(); for ( TypeVariable<?> typeVariable : subTypeParameters ) { currentBindings.put( typeVariable, typeVariable ); } allBindings.put( type, currentBindings ); collectTypeBindings( type, allBindings, currentBindings ); allBindings.put( Object.class, Collections.emptyMap() ); return CollectionHelper.toImmutableMap( allBindings ); }
3.68