name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
dubbo_RestRPCInvocationUtil_getInvokerByRequest
/** * get invoker by request * * @param request * @return */ public static Invoker getInvokerByRequest(RequestFacade request) { PathMatcher pathMatcher = createPathMatcher(request); return getInvoker(pathMatcher, request.getServiceDeployer()); }
3.68
flink_GenericRowData_getField
/** * Returns the field value at the given position. * * <p>Note: The returned value is in internal data structure. See {@link RowData} for more * information about internal data structures. * * <p>The returned field value can be null for representing nullability. */ public Object getField(int pos) { return this.fields[pos]; }
3.68
hadoop_AbstractS3ACommitter_loadAndRevert
/** * Load a pendingset file and revert all of its contents. * Invoked within a parallel run; the commitContext thread * pool is already busy/possibly full, so do not * execute work through the same submitter. * @param commitContext context to commit through * @param activeCommit commit state * @param status status of file to load * @throws IOException failure */ private void loadAndRevert( final CommitContext commitContext, final ActiveCommit activeCommit, final FileStatus status) throws IOException { final Path path = status.getPath(); commitContext.switchToIOStatisticsContext(); try (DurationInfo ignored = new DurationInfo(LOG, false, "Committing %s", path)) { PendingSet pendingSet = PersistentCommitData.load( activeCommit.getSourceFS(), status, commitContext.getPendingSetSerializer()); TaskPool.foreach(pendingSet.getCommits()) .suppressExceptions(true) .run(commitContext::revertCommit); } }
3.68
framework_Calendar_getDayNamesShort
/** * Localized display names for week days starting from sunday. Returned * array's length is always 7. * * @return Array of localized weekday names. */ protected String[] getDayNamesShort() { DateFormatSymbols s = new DateFormatSymbols(getLocale()); return Arrays.copyOfRange(s.getWeekdays(), 1, 8); }
3.68
flink_JavaRecordBuilderFactory_setField
/** * Set record field by index. If parameter index mapping is provided, the index is mapped, * otherwise it is used as is. * * @param i index of field to be set * @param value field value */ void setField(int i, Object value) { if (paramIndexMapping != null) { args[paramIndexMapping[i]] = value; } else { args[i] = value; } }
3.68
hudi_HoodiePipeline_sink
/** * Returns the data stream sink with given catalog table. * * @param input The input datastream * @param tablePath The table path to the hoodie table in the catalog * @param catalogTable The hoodie catalog table * @param isBounded A flag indicating whether the input data stream is bounded */ private static DataStreamSink<?> sink(DataStream<RowData> input, ObjectIdentifier tablePath, ResolvedCatalogTable catalogTable, boolean isBounded) { FactoryUtil.DefaultDynamicTableContext context = Utils.getTableContext(tablePath, catalogTable, Configuration.fromMap(catalogTable.getOptions())); HoodieTableFactory hoodieTableFactory = new HoodieTableFactory(); return ((DataStreamSinkProvider) hoodieTableFactory.createDynamicTableSink(context) .getSinkRuntimeProvider(new SinkRuntimeProviderContext(isBounded))) .consumeDataStream(input); }
3.68
framework_MarginInfo_hasBottom
/** * Checks if this MarginInfo object has the bottom edge margin enabled. * * @return true if bottom edge margin is enabled */ public boolean hasBottom() { return (bitMask & BOTTOM) == BOTTOM; }
3.68
graphhopper_CustomModelParser_verifyExpressions
/** * This method does: * 1. check user expressions via Parser.parseConditionalExpression and only allow whitelisted variables and methods. * 2. while this check it also guesses the variable names and stores it in createObjects * 3. creates if-then-elseif expressions from the checks and returns them as BlockStatements * * @return the created if-then, else and elseif statements */ private static List<Java.BlockStatement> verifyExpressions(StringBuilder expressions, String info, Set<String> createObjects, List<Statement> list, EncodedValueLookup lookup) throws Exception { // allow variables, all encoded values, constants and special variables like in_xyarea or backward_car_access NameValidator nameInConditionValidator = name -> lookup.hasEncodedValue(name) || name.toUpperCase(Locale.ROOT).equals(name) || name.startsWith(IN_AREA_PREFIX) || name.startsWith(BACKWARD_PREFIX) && lookup.hasEncodedValue(name.substring(BACKWARD_PREFIX.length())); parseExpressions(expressions, nameInConditionValidator, info, createObjects, list); return new Parser(new org.codehaus.janino.Scanner(info, new StringReader(expressions.toString()))). parseBlockStatements(); }
3.68
dubbo_AbstractInterfaceBuilder_local
/** * @param local * @see AbstractInterfaceBuilder#stub(Boolean) * @deprecated Replace to <code>stub(Boolean)</code> */ @Deprecated public B local(Boolean local) { if (local != null) { this.local = local.toString(); } else { this.local = null; } return getThis(); }
3.68
flink_Deadline_hasTimeLeft
/** Returns whether there is any time left between the deadline and now. */ public boolean hasTimeLeft() { return !isOverdue(); }
3.68
flink_BulkIterationBase_getBroadcastInputs
/** * The BulkIteration meta operator cannot have broadcast inputs. * * @return An empty map. */ public Map<String, Operator<?>> getBroadcastInputs() { return Collections.emptyMap(); }
3.68
hbase_ZKWatcher_checkACLForSuperUsers
/* * Validate whether ACL set for all superusers. */ private boolean checkACLForSuperUsers(String[] superUsers, List<ACL> acls) { for (String user : superUsers) { boolean hasAccess = false; // TODO: Validate super group members also when ZK supports setting node ACL for groups. if (!AuthUtil.isGroupPrincipal(user)) { for (ACL acl : acls) { if (user.equals(acl.getId().getId())) { if (acl.getPerms() == Perms.ALL) { hasAccess = true; } else { if (LOG.isDebugEnabled()) { LOG.debug(String.format( "superuser '%s' does not have correct permissions: have 0x%x, want 0x%x", acl.getId().getId(), acl.getPerms(), Perms.ALL)); } } break; } } if (!hasAccess) { return false; } } } return true; }
3.68
framework_CalendarMonthDropHandler_deEmphasis
/** * Removed the emphasis CSS style name from the currently emphasized day */ private void deEmphasis() { if (currentTargetElement != null && currentTargetDay != null) { currentTargetDay.removeEmphasisStyle(); currentTargetElement = null; } }
3.68
rocketmq-connect_AvroData_toConnectData
/** * Convert the given object, in Avro format, into a Connect data object. * * @param avroSchema the Avro schema * @param value the value to convert into a Connect data object * @param version the version to set on the Connect schema if the avroSchema does not have a * property named "connect.version", may be null * @return the Connect schema and value */ public SchemaAndValue toConnectData(org.apache.avro.Schema avroSchema, Object value, Integer version) { if (value == null) { return null; } ToConnectContext toConnectContext = new ToConnectContext(); Schema schema = (avroSchema.equals(ANYTHING_SCHEMA)) ? null : toConnectSchema(avroSchema, version, toConnectContext); return new SchemaAndValue(schema, toConnectData(schema, value, toConnectContext)); }
3.68
framework_Embedded_getCodebase
/** * This attribute specifies the base path used to resolve relative URIs * specified by the classid, data, and archive attributes. When absent, its * default value is the base URI of the current document. * * @return the code base. */ public String getCodebase() { return getState(false).codebase; }
3.68
flink_CliUtils_getSessionTimeZone
/** Get time zone from the given session config. */ public static ZoneId getSessionTimeZone(ReadableConfig sessionConfig) { final String zone = sessionConfig.get(TableConfigOptions.LOCAL_TIME_ZONE); return TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone); }
3.68
morf_ChangeColumn_getFromColumn
/** * Gets the column definition prior to the change * * @return the column definition prior to the change */ public Column getFromColumn() { return fromColumn; }
3.68
hadoop_LeveldbIterator_next
/** * Returns the next element in the iteration. * * @return the next element in the iteration. * @throws DBException DB Exception. */ @Override public Map.Entry<byte[], byte[]> next() throws DBException { try { return iter.next(); } catch (DBException e) { throw e; } catch (RuntimeException e) { throw new DBException(e.getMessage(), e); } }
3.68
Activiti_ExecutionTreeStringBuilder_toString
/* See http://stackoverflow.com/questions/4965335/how-to-print-binary-tree-diagram */ @Override public String toString() { StringBuilder strb = new StringBuilder(); strb.append(executionEntity.getId()).append(" : ") .append(executionEntity.getActivityId()) .append(", parent id ") .append(executionEntity.getParentId()) .append("\r\n"); List<? extends ExecutionEntity> children = executionEntity.getExecutions(); if (children != null) { for (ExecutionEntity childExecution : children) { internalToString(childExecution, strb, "", true); } } return strb.toString(); }
3.68
flink_RocksDBNativeMetricOptions_enableBlockCacheUsage
/** Returns the memory size for the entries residing in block cache. */ public void enableBlockCacheUsage() { this.properties.add(RocksDBProperty.BlockCacheUsage.getRocksDBProperty()); }
3.68
hadoop_OBSCommonUtils_qualify
/** * Qualify a path. * * @param owner the owner OBSFileSystem instance * @param path path to qualify * @return a qualified path. */ static Path qualify(final OBSFileSystem owner, final Path path) { return path.makeQualified(owner.getUri(), owner.getWorkingDirectory()); }
3.68
framework_VAbstractCalendarPanel_processClickEvent
/** * Handles a user click on the component * * @param sender * The component that was clicked */ private void processClickEvent(Widget sender) { if (!isEnabled() || isReadonly()) { return; } if (sender == prevYear) { focusPreviousYear(1); } else if (sender == nextYear) { focusNextYear(1); } else if (sender == prevMonth) { focusPreviousMonth(); } else if (sender == nextMonth) { focusNextMonth(); } }
3.68
AreaShop_FileManager_getRents
/** * Get all rental regions. * @return List of all rental regions */ public List<RentRegion> getRents() { List<RentRegion> result = new ArrayList<>(); for(GeneralRegion region : regions.values()) { if(region instanceof RentRegion) { result.add((RentRegion)region); } } return result; }
3.68
graphhopper_StopWatch_getCurrentSeconds
/** * returns the total elapsed time on this stopwatch without the need of stopping it */ public float getCurrentSeconds() { if (notStarted()) { return 0; } long lastNanos = lastTime < 0 ? 0 : System.nanoTime() - lastTime; return (elapsedNanos + lastNanos) / 1e9f; }
3.68
graphhopper_VectorTile_clearSintValue
/** * <code>optional sint64 sint_value = 6;</code> */ public Builder clearSintValue() { bitField0_ = (bitField0_ & ~0x00000020); sintValue_ = 0L; onChanged(); return this; }
3.68
hadoop_RawErasureEncoder_preferDirectBuffer
/** * Tell if direct buffer is preferred or not. It's for callers to * decide how to allocate coding chunk buffers, using DirectByteBuffer or * bytes array. It will return false by default. * @return true if native buffer is preferred for performance consideration, * otherwise false. */ public boolean preferDirectBuffer() { return false; }
3.68
morf_SqlDialect_fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming
/** * When using a "streaming" {@link ResultSet} (i.e. any where the fetch size indicates that fewer * than all the records should be returned at a time), MySQL does not permit the connection * to be used for anything else. Therefore we have an alternative fetch size here specifically * for the scenario where this is unavoidable. * * <p>In practice this returns the same value except for on MySQL, where we use it to * effectively disable streaming if we know the connection will be used. This means * certain types of processing are liable to cause high memory usage on MySQL.</p> * * @return The number of rows to try and fetch at a time (default) when * performing bulk select operations and needing to use the connection while * the {@link ResultSet} is open. * @see #fetchSizeForBulkSelects() */ public int fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming() { return fetchSizeForBulkSelects(); }
3.68
flink_ClusterEntrypointUtils_createJobManagerWorkingDirectory
/** * Creates the working directory for the JobManager process. This method ensures that the * working diretory exists. * * @param configuration to extract the required settings from * @param envelopedResourceId identifying the TaskManager process * @return working directory * @throws IOException if the working directory could not be created */ public static DeterminismEnvelope<WorkingDirectory> createJobManagerWorkingDirectory( Configuration configuration, DeterminismEnvelope<ResourceID> envelopedResourceId) throws IOException { return envelopedResourceId.map( resourceId -> WorkingDirectory.create( generateJobManagerWorkingDirectoryFile(configuration, resourceId))); }
3.68
hudi_ConsistentBucketIdentifier_getLatterBucket
/** * Get the latter node of the given node (inferred from hash value). */ public ConsistentHashingNode getLatterBucket(int hashValue) { SortedMap<Integer, ConsistentHashingNode> tailMap = ring.tailMap(hashValue, false); return tailMap.isEmpty() ? ring.firstEntry().getValue() : tailMap.get(tailMap.firstKey()); }
3.68
hadoop_FileSystemNodeLabelsStore_recover
/* (non-Javadoc) * @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean) */ @Override public void recover() throws YarnException, IOException { super.recoverFromStore(); }
3.68
flink_ExceptionUtils_updateDetailMessage
/** * Updates error messages of Throwables appearing in the cause tree of the passed root * Throwable. The passed Function is applied on each Throwable of the cause tree. Returning a * String will cause the detailMessage of the corresponding Throwable to be updated. Returning * <code>null</code>, instead, won't trigger any detailMessage update on that Throwable. * * @param root The Throwable whose cause tree shall be traversed. * @param throwableToMessage The Function based on which the new messages are generated. The * function implementation should return the new message. Returning <code>null</code>, in * contrast, will result in not updating the message for the corresponding Throwable. */ public static void updateDetailMessage( @Nullable Throwable root, @Nullable Function<Throwable, String> throwableToMessage) { if (throwableToMessage == null) { return; } Throwable it = root; while (it != null) { String newMessage = throwableToMessage.apply(it); if (newMessage != null) { updateDetailMessageOfThrowable(it, newMessage); } it = it.getCause(); } }
3.68
framework_MonthGrid_setRangeSelect
/** * Disable or enable possibility to select ranges. */ public void setRangeSelect(boolean b) { rangeSelectDisabled = !b; }
3.68
hbase_Segment_isEmpty
/** Returns whether the segment has any cells */ public boolean isEmpty() { return getCellSet().isEmpty(); }
3.68
flink_FunctionLookup_lookupBuiltInFunction
/** Helper method for looking up a built-in function. */ default ContextResolvedFunction lookupBuiltInFunction(BuiltInFunctionDefinition definition) { return lookupFunction(UnresolvedIdentifier.of(definition.getName())) .orElseThrow( () -> new TableException( String.format( "Required built-in function [%s] could not be found in any catalog.", definition.getName()))); }
3.68
morf_AbstractDatabaseType_split
/** * Splits a string using the pattern specified, keeping the delimiters in between. * * @param text the text to split. * @param delimiterPattern the regex pattern to use as the delimiter. * @return a list of strings made up of the parts and their delimiters. */ private Stack<String> split(String text, String delimiterPattern) { if (text == null) { throw new IllegalArgumentException("You must supply some text to split"); } if (delimiterPattern == null) { throw new IllegalArgumentException("You must supply a pattern to match on"); } Pattern pattern = Pattern.compile(delimiterPattern); int lastMatch = 0; Stack<String> splitted = new Stack<>(); Matcher m = pattern.matcher(text); // Iterate trough each match while (m.find()) { // Text since last match splitted.add(text.substring(lastMatch, m.start())); // The delimiter itself splitted.add(m.group()); lastMatch = m.end(); } // Trailing text splitted.add(text.substring(lastMatch)); Collections.reverse(splitted); return splitted; }
3.68
hbase_SnapshotInfo_getMissingStoreFilesCount
/** Returns the number of missing store files */ public int getMissingStoreFilesCount() { return hfilesMissing.get(); }
3.68
graphhopper_GraphHopper_getBaseGraph
/** * The underlying graph used in algorithms. * * @throws IllegalStateException if graph is not instantiated. */ public BaseGraph getBaseGraph() { if (baseGraph == null) throw new IllegalStateException("GraphHopper storage not initialized"); return baseGraph; }
3.68
pulsar_RuntimeUtils_splitRuntimeArgs
/** * Regex for splitting a string using space when not surrounded by single or double quotes. */ public static String[] splitRuntimeArgs(String input) { return input.split("\\s(?=([^\"]*\"[^\"]*\")*[^\"]*$)"); }
3.68
morf_ConnectionResourcesBean_getDatabaseName
/** * @see org.alfasoftware.morf.jdbc.ConnectionResources#getDatabaseName() */ @Override public String getDatabaseName() { return databaseName; }
3.68
hadoop_NativeRuntime_registerLibrary
/** * Register a customized library */ public synchronized static long registerLibrary(String libraryName, String clazz) { assertNativeLibraryLoaded(); final long ret = JNIRegisterModule(libraryName.getBytes(StandardCharsets.UTF_8), clazz.getBytes(StandardCharsets.UTF_8)); if (ret != 0) { LOG.warn("Can't create NativeObject for class " + clazz + ", probably not exist."); } return ret; }
3.68
flink_TopologyGraph_makeAsFarAs
/** * Make the distance of node A at least as far as node B by adding edges from all inputs of node * B to node A. */ void makeAsFarAs(ExecNode<?> a, ExecNode<?> b) { TopologyNode nodeA = getOrCreateTopologyNode(a); TopologyNode nodeB = getOrCreateTopologyNode(b); for (TopologyNode input : nodeB.inputs) { link(input.execNode, nodeA.execNode); } }
3.68
graphhopper_OSMInputFile_setWorkerThreads
/** * Currently only for pbf format. Default is number of cores. */ public OSMInputFile setWorkerThreads(int threads) { workerThreads = threads; return this; }
3.68
zxing_Result_getResultPoints
/** * @return points related to the barcode in the image. These are typically points * identifying finder patterns or the corners of the barcode. The exact meaning is * specific to the type of barcode that was decoded. */ public ResultPoint[] getResultPoints() { return resultPoints; }
3.68
hbase_QuotaObserverChore_getTableQuotaSnapshot
/** * Fetches the {@link SpaceQuotaSnapshot} for the given table. */ SpaceQuotaSnapshot getTableQuotaSnapshot(TableName table) { SpaceQuotaSnapshot state = this.tableQuotaSnapshots.get(table); if (state == null) { // No tracked state implies observance. return QuotaSnapshotStore.NO_QUOTA; } return state; }
3.68
flink_ProjectOperator_projectTuple21
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20> ProjectOperator< T, Tuple21< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> projectTuple21() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo< Tuple21< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>> tType = new TupleTypeInfo< Tuple21< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(fTypes); return new ProjectOperator< T, Tuple21< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20>>(this.ds, this.fieldIndexes, tType); }
3.68
pulsar_ManagedLedgerInterceptor_afterFailedAddEntry
/** * Intercept When add entry failed. * @param numberOfMessages */ default void afterFailedAddEntry(int numberOfMessages){ }
3.68
flink_FlinkRelMdCollation_calc
/** Helper method to determine a {@link org.apache.calcite.rel.core.Calc}'s collation. */ public static List<RelCollation> calc(RelMetadataQuery mq, RelNode input, RexProgram program) { final List<RexNode> projects = program.getProjectList().stream() .map(program::expandLocalRef) .collect(Collectors.toList()); return project(mq, input, projects); }
3.68
hbase_BlockCacheUtil_toStringMinusFileName
/** Returns The block content of <code>bc</code> as a String minus the filename. */ public static String toStringMinusFileName(final CachedBlock cb, final long now) { return "offset=" + cb.getOffset() + ", size=" + cb.getSize() + ", age=" + (now - cb.getCachedTime()) + ", type=" + cb.getBlockType() + ", priority=" + cb.getBlockPriority(); }
3.68
dubbo_TriHttp2RemoteFlowController_channelWritabilityChange
/** * Called when the writability of the underlying channel changes. * @throws Http2Exception If a write occurs and an exception happens in the write operation. */ void channelWritabilityChange() throws Http2Exception { }
3.68
hmily_ConsulClient_setConsul
/** * set consul. * @param consul consul client */ public void setConsul(final Consul consul) { this.consul = consul; }
3.68
hadoop_ManifestCommitterSupport_buildJobUUID
/** * Build a Job UUID from the job conf (if it is * {@link ManifestCommitterConstants#SPARK_WRITE_UUID} * or the MR job ID. * @param conf job/task configuration * @param jobId job ID from YARN or spark. * @return (a job ID, source) */ public static Pair<String, String> buildJobUUID(Configuration conf, JobID jobId) { String jobUUID = conf.getTrimmed(SPARK_WRITE_UUID, ""); if (jobUUID.isEmpty()) { jobUUID = jobId.toString(); return Pair.of(jobUUID, JOB_ID_SOURCE_MAPREDUCE); } else { return Pair.of(jobUUID, SPARK_WRITE_UUID); } }
3.68
querydsl_DateTimeExpression_milliSecond
/** * Create a milliseconds expression (range 0-999) * <p>Is always 0 in HQL and JDOQL modules</p> * * @return milli seconds */ public NumberExpression<Integer> milliSecond() { if (milliseconds == null) { milliseconds = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MILLISECOND, mixin); } return milliseconds; }
3.68
rocketmq-connect_MetricsReporter_onCounterRemoved
/** * Called when a {@link Counter} is removed from the registry. * * @param name the counter's name */ public void onCounterRemoved(String name) { this.onCounterRemoved(MetricUtils.stringToMetricName(name)); }
3.68
zxing_ResultHandler_launchIntent
/** * Like {@link #rawLaunchIntent(Intent)} but will show a user dialog if nothing is available to handle. */ final void launchIntent(Intent intent) { try { rawLaunchIntent(intent); } catch (ActivityNotFoundException ignored) { AlertDialog.Builder builder = new AlertDialog.Builder(activity); builder.setTitle(R.string.app_name); builder.setMessage(R.string.msg_intent_failed); builder.setPositiveButton(R.string.button_ok, null); builder.show(); } }
3.68
morf_SchemaChangeSequence_removeColumn
/** * @see org.alfasoftware.morf.upgrade.SchemaEditor#removeColumn(java.lang.String, org.alfasoftware.morf.metadata.Column) */ @Override public void removeColumn(String tableName, Column definition) { RemoveColumn removeColumn = new RemoveColumn(tableName, definition); visitor.visit(removeColumn); schemaAndDataChangeVisitor.visit(removeColumn); }
3.68
flink_TypeHint_getTypeInfo
/** * Gets the type information described by this TypeHint. * * @return The type information described by this TypeHint. */ public TypeInformation<T> getTypeInfo() { return typeInfo; }
3.68
hbase_PrivateCellUtil_isDelete
/** * Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily} * or a {@link KeyValue.Type#DeleteColumn} KeyValue type. */ public static boolean isDelete(final byte type) { return KeyValue.Type.Delete.getCode() <= type && type <= KeyValue.Type.DeleteFamily.getCode(); }
3.68
hudi_HoodieMetaserverBasedTimeline_getInstantFileName
/** * Completion time is essential for {@link HoodieActiveTimeline}, * TODO [HUDI-6883] We should change HoodieMetaserverBasedTimeline to store completion time as well. */ @Override protected String getInstantFileName(HoodieInstant instant) { if (instant.isCompleted()) { // Set a fake completion time. return instant.getFileName("0").replace("_0", ""); } return instant.getFileName(); }
3.68
hbase_ZKProcedureCoordinator_abort
/** * Receive a notification and propagate it to the local coordinator * @param abortNode full znode path to the failed procedure information */ protected void abort(String abortNode) { String procName = ZKUtil.getNodeName(abortNode); ForeignException ee = null; try { byte[] data = ZKUtil.getData(zkProc.getWatcher(), abortNode); if (data == null || data.length == 0) { // ignore return; } else if (!ProtobufUtil.isPBMagicPrefix(data)) { LOG.warn("Got an error notification for op:" + abortNode + " but we can't read the information. Killing the procedure."); // we got a remote exception, but we can't describe it ee = new ForeignException(coordName, "Data in abort node is illegally formatted. ignoring content."); } else { data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length); ee = ForeignException.deserialize(data); } } catch (IOException e) { LOG.warn("Got an error notification for op:" + abortNode + " but we can't read the information. Killing the procedure."); // we got a remote exception, but we can't describe it ee = new ForeignException(coordName, e); } catch (KeeperException e) { coordinator.rpcConnectionFailure( "Failed to get data for abort node:" + abortNode + zkProc.getAbortZnode(), new IOException(e)); } catch (InterruptedException e) { coordinator.rpcConnectionFailure( "Failed to get data for abort node:" + abortNode + zkProc.getAbortZnode(), new IOException(e)); Thread.currentThread().interrupt(); } coordinator.abortProcedure(procName, ee); }
3.68
cron-utils_FieldDayOfWeekDefinitionBuilder_withValidRange
/** * Allows to set a range of valid values for field. * * @param startRange - start range value * @param endRange - end range value * @return same FieldDayOfWeekDefinitionBuilder instance */ @Override public FieldDayOfWeekDefinitionBuilder withValidRange(final int startRange, final int endRange) { super.withValidRange(startRange, endRange); return this; }
3.68
Activiti_DelegateExpressionCustomPropertiesResolver_getExpressionText
/** * returns the expression text for this execution listener. Comes in handy if you want to check which listeners you already have. */ public String getExpressionText() { return expression.getExpressionText(); }
3.68
hbase_OrderedBytes_encodeFloat64
/** * Encode a 64-bit floating point value using the fixed-length encoding. * <p> * This format ensures the following total ordering of floating point values: * Double.NEGATIVE_INFINITY &lt; -Double.MAX_VALUE &lt; ... &lt; -Double.MIN_VALUE &lt; -0.0 &lt; * +0.0; &lt; Double.MIN_VALUE &lt; ... &lt; Double.MAX_VALUE &lt; Double.POSITIVE_INFINITY &lt; * Double.NaN * </p> * <p> * Floating point numbers are encoded as specified in IEEE 754. A 64-bit double precision float * consists of a sign bit, 11-bit unsigned exponent encoded in offset-1023 notation, and a 52-bit * significand. The format is described further in the * <a href="http://en.wikipedia.org/wiki/Double_precision"> Double Precision Floating Point * Wikipedia page</a> * </p> * <p> * The value of a normal float is -1 <sup>sign bit</sup> &times; 2<sup>exponent - 1023</sup> * &times; 1.significand * </p> * <p> * The IEE754 floating point format already preserves sort ordering for positive floating point * numbers when the raw bytes are compared in most significant byte order. This is discussed * further at * <a href= "http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm" > * http://www.cygnus-software.com/papers/comparingfloats/comparingfloats. htm</a> * </p> * <p> * Thus, we need only ensure that negative numbers sort in the the exact opposite order as * positive numbers (so that say, negative infinity is less than negative 1), and that all * negative numbers compare less than any positive number. To accomplish this, we invert the sign * bit of all floating point numbers, and we also invert the exponent and significand bits if the * floating point number was negative. * </p> * <p> * More specifically, we first store the floating point bits into a 64-bit long {@code l} using * {@link Double#doubleToLongBits}. This method collapses all NaNs into a single, canonical NaN * value but otherwise leaves the bits unchanged. We then compute * </p> * * <pre> * l &circ;= (l &gt;&gt; (Long.SIZE - 1)) | Long.MIN_SIZE * </pre> * <p> * which inverts the sign bit and XOR's all other bits with the sign bit itself. Comparing the raw * bytes of {@code l} in most significant byte order is equivalent to performing a double * precision floating point comparison on the underlying bits (ignoring NaN comparisons, as NaNs * don't compare equal to anything when performing floating point comparisons). * </p> * <p> * The resulting long integer is then converted into a byte array by serializing the long one byte * at a time in most significant byte order. The serialized integer is prefixed by a single header * byte. All serialized values are 9 bytes in length. * </p> * <p> * This encoding format, and much of this highly detailed documentation string, is based on * Orderly's {@code DoubleWritableRowKey}. * </p> * @return the number of bytes written. * @see #decodeFloat64(PositionedByteRange) */ public static int encodeFloat64(PositionedByteRange dst, double val, Order ord) { final int offset = dst.getOffset(), start = dst.getPosition(); long lng = Double.doubleToLongBits(val); lng ^= ((lng >> (Long.SIZE - 1)) | Long.MIN_VALUE); dst.put(FIXED_FLOAT64).put((byte) (lng >> 56)).put((byte) (lng >> 48)).put((byte) (lng >> 40)) .put((byte) (lng >> 32)).put((byte) (lng >> 24)).put((byte) (lng >> 16)) .put((byte) (lng >> 8)).put((byte) lng); ord.apply(dst.getBytes(), offset + start, 9); return 9; }
3.68
framework_VScrollTable_setUndefinedWidth
/** * Sets the width to undefined. */ public void setUndefinedWidth() { definedWidth = false; setWidth(-1, false); }
3.68
rocketmq-connect_ExpressionBuilder_escapeQuotesWith
/** * Return a new ExpressionBuilder that escapes quotes with the specified prefix. * This builder remains unaffected. * * @param prefix the prefix * @return the new ExpressionBuilder, or this builder if the prefix is null or empty */ public ExpressionBuilder escapeQuotesWith(String prefix) { if (prefix == null || prefix.isEmpty()) { return this; } return new ExpressionBuilder(this.rules.escapeQuotesWith(prefix)); }
3.68
hbase_PageFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof PageFilter)) { return false; } PageFilter other = (PageFilter) o; return this.getPageSize() == other.getPageSize(); }
3.68
hadoop_GangliaConf_getSlope
/** * @return the slope */ GangliaSlope getSlope() { return slope; }
3.68
hbase_CompactingMemStore_getScanners
/** * This method is protected under {@link HStore#lock} read lock. */ @Override public List<KeyValueScanner> getScanners(long readPt) throws IOException { MutableSegment activeTmp = getActive(); List<? extends Segment> pipelineList = pipeline.getSegments(); List<? extends Segment> snapshotList = snapshot.getAllSegments(); long numberOfSegments = 1L + pipelineList.size() + snapshotList.size(); // The list of elements in pipeline + the active element + the snapshot segment List<KeyValueScanner> list = createList((int) numberOfSegments); addToScanners(activeTmp, readPt, list); addToScanners(pipelineList, readPt, list); addToScanners(snapshotList, readPt, list); return list; }
3.68
pulsar_NamespacesBase_internalSetBacklogQuota
/** * Base method for setBacklogQuota v1 and v2. * Notion: don't re-use this logic. */ protected void internalSetBacklogQuota(AsyncResponse asyncResponse, BacklogQuotaType backlogQuotaType, BacklogQuota backlogQuota) { validateNamespacePolicyOperationAsync(namespaceName, PolicyName.BACKLOG, PolicyOperation.WRITE) .thenCompose(__ -> validatePoliciesReadOnlyAccessAsync()) .thenCompose(__ -> setBacklogQuotaAsync(backlogQuotaType, backlogQuota)) .thenAccept(__ -> { asyncResponse.resume(Response.noContent().build()); log.info("[{}] Successfully updated backlog quota map: namespace={}, map={}", clientAppId(), namespaceName, backlogQuota); }).exceptionally(ex -> { resumeAsyncResponseExceptionally(asyncResponse, ex); log.error("[{}] Failed to update backlog quota map for namespace {}", clientAppId(), namespaceName, ex); return null; }); }
3.68
dubbo_RpcStatus_getSucceeded
/** * get succeeded. * * @return succeeded */ public long getSucceeded() { return getTotal() - getFailed(); }
3.68
framework_KeyMapper_containsKey
/** * Checks if the given key is mapped to an object. * * @param key * the key to check * @return <code>true</code> if the key is currently mapped, * <code>false</code> otherwise * @since 7.7 */ public boolean containsKey(String key) { return keyObjectMap.containsKey(key); }
3.68
hadoop_GetApplicationsRequest_newInstance
/** * <p> * The request from clients to get a report of Applications matching the * giving and application types and application states in the cluster from the * <code>ResourceManager</code>. * </p> * * * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest) * @param applicationStates application states. * @param applicationTypes application types. * @return a report of Applications in <code>GetApplicationsRequest</code> */ @Public @Stable public static GetApplicationsRequest newInstance( Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) { GetApplicationsRequest request = Records.newRecord(GetApplicationsRequest.class); request.setApplicationTypes(applicationTypes); request.setApplicationStates(applicationStates); return request; }
3.68
flink_LogicalTypeMerging_findDivisionDecimalType
/** Finds the result type of a decimal division operation. */ public static DecimalType findDivisionDecimalType( int precision1, int scale1, int precision2, int scale2) { int scale = Math.max(6, scale1 + precision2 + 1); int precision = precision1 - scale1 + scale2 + scale; return adjustPrecisionScale(precision, scale); }
3.68
cron-utils_CronDefinitionBuilder_defineCron
/** * Creates a builder instance. * * @return new CronDefinitionBuilder instance */ public static CronDefinitionBuilder defineCron() { return new CronDefinitionBuilder(); }
3.68
framework_VAbstractSplitPanel_onMouseMove
/** * Handle updating the splitter position when dragging the splitter with a * mouse. This should only be called if content resizing has been * successfully initialized via a mouse down event. * * @param event * the browser event */ public void onMouseMove(Event event) { switch (orientation) { case HORIZONTAL: final int x = WidgetUtil.getTouchOrMouseClientX(event); onHorizontalMouseMove(x); break; case VERTICAL: default: final int y = WidgetUtil.getTouchOrMouseClientY(event); onVerticalMouseMove(y); break; } }
3.68
hadoop_StagingCommitter_getS3KeyPrefix
/** * Get the key of the destination "directory" of the job/task. * @param context job context * @return key to write to */ private String getS3KeyPrefix(JobContext context) { return s3KeyPrefix; }
3.68
hadoop_OneSidedPentomino_initializePieces
/** * Define the one sided pieces. The flipped pieces have the same name with * a capital letter. */ protected void initializePieces() { pieces.add(new Piece("x", " x /xxx/ x ", false, oneRotation)); pieces.add(new Piece("v", "x /x /xxx", false, fourRotations)); pieces.add(new Piece("t", "xxx/ x / x ", false, fourRotations)); pieces.add(new Piece("w", " x/ xx/xx ", false, fourRotations)); pieces.add(new Piece("u", "x x/xxx", false, fourRotations)); pieces.add(new Piece("i", "xxxxx", false, twoRotations)); pieces.add(new Piece("f", " xx/xx / x ", false, fourRotations)); pieces.add(new Piece("p", "xx/xx/x ", false, fourRotations)); pieces.add(new Piece("z", "xx / x / xx", false, twoRotations)); pieces.add(new Piece("n", "xx / xxx", false, fourRotations)); pieces.add(new Piece("y", " x /xxxx", false, fourRotations)); pieces.add(new Piece("l", " x/xxxx", false, fourRotations)); pieces.add(new Piece("F", "xx / xx/ x ", false, fourRotations)); pieces.add(new Piece("P", "xx/xx/ x", false, fourRotations)); pieces.add(new Piece("Z", " xx/ x /xx ", false, twoRotations)); pieces.add(new Piece("N", " xx/xxx ", false, fourRotations)); pieces.add(new Piece("Y", " x /xxxx", false, fourRotations)); pieces.add(new Piece("L", "x /xxxx", false, fourRotations)); }
3.68
framework_Range_splitAt
/** * Split the range into two at a certain integer. * <p> * <em>Example:</em> <code>[5..10[.splitAt(7) == [5..7[, [7..10[</code> * * @param integer * the integer at which to split the range into two * @return an array of two ranges, with <code>[start..integer[</code> in the * first element, and <code>[integer..end[</code> in the second * element. * <p> * If {@code integer} is less than {@code start}, [empty, * {@code this} ] is returned. if <code>integer</code> is equal to * or greater than {@code end}, [{@code this}, empty] is returned * instead. */ public Range[] splitAt(final int integer) { if (integer < start) { return new Range[] { Range.withLength(start, 0), this }; } else if (integer >= end) { return new Range[] { this, Range.withLength(end, 0) }; } else { return new Range[] { new Range(start, integer), new Range(integer, end) }; } }
3.68
hadoop_TFile_seekToEnd
/** * Seek to the end of the scanner. The entry returned by the previous * entry() call will be invalid. * * @throws IOException raised on errors performing I/O. */ public void seekToEnd() throws IOException { parkCursorAtEnd(); }
3.68
framework_Color_getRGB
/** * Returns RGB value of the color. */ public int getRGB() { return ((alpha & 0xFF) << 24) | ((red & 0xFF) << 16) | ((green & 0xFF) << 8) | ((blue & 0xFF) << 0); }
3.68
hudi_HoodieRealtimeRecordReaderUtils_getMaxCompactionMemoryInBytes
/** * get the max compaction memory in bytes from JobConf. */ public static long getMaxCompactionMemoryInBytes(JobConf jobConf) { // jobConf.getMemoryForMapTask() returns in MB return (long) Math .ceil(Double.parseDouble( ConfigUtils.getRawValueWithAltKeys( jobConf, HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION) .orElse(HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION)) * jobConf.getMemoryForMapTask() * 1024 * 1024L); }
3.68
graphhopper_GHRequest_setHeadings
/** * Sets the headings, i.e. the direction the route should leave the starting point and the directions the route * should arrive from at the via-points and the end point. Each heading is given as north based azimuth (clockwise) * in [0, 360) or NaN if no direction shall be specified. * <p> * The number of headings must be zero (default), one (for the start point) or equal to the number of points * when sending the request. */ public GHRequest setHeadings(List<Double> headings) { this.headings = headings; return this; }
3.68
graphhopper_StopWatch_getMillisDouble
/** * returns the elapsed time in ms but includes the fraction as well to get a precise value */ public double getMillisDouble() { return elapsedNanos / 1_000_000.0; }
3.68
flink_FileInputFormat_addFilesInDir
/** * Enumerate all files in the directory and recursive if enumerateNestedFiles is true. * * @return the total length of accepted files. */ private long addFilesInDir(Path path, List<FileStatus> files, boolean logExcludedFiles) throws IOException { final FileSystem fs = path.getFileSystem(); long length = 0; for (FileStatus dir : fs.listStatus(path)) { if (dir.isDir()) { if (acceptFile(dir) && enumerateNestedFiles) { length += addFilesInDir(dir.getPath(), files, logExcludedFiles); } else { if (logExcludedFiles && LOG.isDebugEnabled()) { LOG.debug( "Directory " + dir.getPath().toString() + " did not pass the file-filter and is excluded."); } } } else { if (acceptFile(dir)) { files.add(dir); length += dir.getLen(); testForUnsplittable(dir); } else { if (logExcludedFiles && LOG.isDebugEnabled()) { LOG.debug( "Directory " + dir.getPath().toString() + " did not pass the file-filter and is excluded."); } } } } return length; }
3.68
flink_BlobServer_globalCleanupAsync
/** * Removes all BLOBs from local and HA store belonging to the given {@link JobID}. * * @param jobId ID of the job this blob belongs to */ @Override public CompletableFuture<Void> globalCleanupAsync(JobID jobId, Executor executor) { checkNotNull(jobId); return runAsyncWithWriteLock( () -> { IOException exception = null; try { internalLocalCleanup(jobId); } catch (IOException e) { exception = e; } if (!blobStore.deleteAll(jobId)) { exception = ExceptionUtils.firstOrSuppressed( new IOException( "Error while cleaning up the BlobStore for job " + jobId), exception); } if (exception != null) { throw new IOException(exception); } }, executor); }
3.68
dubbo_StringUtils_startsWithIgnoreCase
/** * Test str whether starts with the prefix ignore case. * * @param str * @param prefix * @return */ public static boolean startsWithIgnoreCase(String str, String prefix) { if (str == null || prefix == null || str.length() < prefix.length()) { return false; } // return str.substring(0, prefix.length()).equalsIgnoreCase(prefix); return str.regionMatches(true, 0, prefix, 0, prefix.length()); }
3.68
framework_CustomizedSystemMessages_setSessionExpiredCaption
/** * Sets the caption of the notification. Set to null for no caption. If both * caption and message are null, client automatically forwards to * sessionExpiredUrl after timeout timer expires. Timer uses value read from * HTTPSession.getMaxInactiveInterval() * * @param sessionExpiredCaption * the caption */ public void setSessionExpiredCaption(String sessionExpiredCaption) { this.sessionExpiredCaption = sessionExpiredCaption; }
3.68
hadoop_BlockPoolTokenSecretManager_generateToken
/** * See {@link BlockTokenSecretManager#generateToken(ExtendedBlock, EnumSet, * StorageType[], String[])}. */ public Token<BlockTokenIdentifier> generateToken(ExtendedBlock b, EnumSet<AccessMode> of, StorageType[] storageTypes, String[] storageIds) throws IOException { return get(b.getBlockPoolId()).generateToken(b, of, storageTypes, storageIds); }
3.68
Activiti_NeedsActiveExecutionCmd_getSuspendedExceptionMessage
/** * Subclasses can override this to provide a more detailed exception message that will be thrown when the execution is suspended. */ protected String getSuspendedExceptionMessage() { return "Cannot execution operation because execution '" + executionId + "' is suspended"; }
3.68
morf_DataValueLookup_getBigDecimal
/** * Gets the value as a {@link BigDecimal}. Will attempt conversion where possible * and throw a suitable conversion exception if the conversion fails. * May return {@code null} if the value is not set or is explicitly set * to {@code null}. * * @param name The column name. * @return The value. */ public default BigDecimal getBigDecimal(String name) { String value = getValue(name); return value == null ? null : new BigDecimal(value); }
3.68
hadoop_DataJoinReducerBase_collect
/** * The subclass can overwrite this method to perform additional filtering * and/or other processing logic before a value is collected. * * @param key * @param aRecord * @param output * @param reporter * @throws IOException */ protected void collect(Object key, TaggedMapOutput aRecord, OutputCollector output, Reporter reporter) throws IOException { this.collected += 1; addLongValue("collectedCount", 1); if (aRecord != null) { output.collect(key, aRecord.getData()); reporter.setStatus("key: " + key.toString() + " collected: " + collected); addLongValue("actuallyCollectedCount", 1); } }
3.68
hadoop_OBSDataBlocks_createTmpFileForWrite
/** * Demand create the directory allocator, then create a temporary file. * {@link LocalDirAllocator#createTmpFileForWrite(String, long, * Configuration)}. * * @param pathStr prefix for the temporary file * @param size the size of the file that is going to be written * @param conf the Configuration object * @return a unique temporary file * @throws IOException IO problems */ static synchronized File createTmpFileForWrite(final String pathStr, final long size, final Configuration conf) throws IOException { if (directoryAllocator == null) { String bufferDir = conf.get(OBSConstants.BUFFER_DIR) != null ? OBSConstants.BUFFER_DIR : "hadoop.tmp.dir"; directoryAllocator = new LocalDirAllocator(bufferDir); } return directoryAllocator.createTmpFileForWrite(pathStr, size, conf); }
3.68
hbase_MultiByteBuff_arrayOffset
/** * @throws UnsupportedOperationException MBB does not support array based operations */ @Override public int arrayOffset() { throw new UnsupportedOperationException(); }
3.68
framework_TooltipInfo_setIdentifier
/** * Sets the tooltip's identifier. * * @param identifier * the identifier to set */ public void setIdentifier(Object identifier) { this.identifier = identifier; }
3.68
hadoop_TypedBytesWritable_toString
/** Generate a suitable string representation. */ public String toString() { return getValue().toString(); }
3.68
hbase_AbstractProcedureScheduler_wakeWaitingProcedures
/** * Wakes up given waiting procedures by pushing them back into scheduler queues. * @return size of given {@code waitQueue}. */ protected int wakeWaitingProcedures(LockAndQueue lockAndQueue) { return lockAndQueue.wakeWaitingProcedures(this); }
3.68
flink_SupportsReadingMetadata_supportsMetadataProjection
/** * Defines whether projections can be applied to metadata columns. * * <p>This method is only called if the source does <em>not</em> implement {@link * SupportsProjectionPushDown}. By default, the planner will only apply metadata columns which * have actually been selected in the query regardless. By returning {@code false} instead the * source can inform the planner to apply all metadata columns defined in the table's schema. * * <p>If the source implements {@link SupportsProjectionPushDown}, projections of metadata * columns are always considered before calling {@link #applyReadableMetadata(List, DataType)}. */ default boolean supportsMetadataProjection() { return true; }
3.68
zxing_BinaryBitmap_getWidth
/** * @return The width of the bitmap. */ public int getWidth() { return binarizer.getWidth(); }
3.68
hbase_AsyncBufferedMutator_mutate
/** * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the wire as * part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. * @param mutation The data to send. */ default CompletableFuture<Void> mutate(Mutation mutation) { return Iterables.getOnlyElement(mutate(Collections.singletonList(mutation))); }
3.68
flink_PekkoRpcActor_stop
/** Stop the actor immediately. */ private void stop(RpcEndpointTerminationResult rpcEndpointTerminationResult) { if (rpcEndpointStopped.compareAndSet(false, true)) { this.rpcEndpointTerminationResult = rpcEndpointTerminationResult; getContext().stop(getSelf()); } }
3.68
rocketmq-connect_JdbcSourceTask_sleepIfNeed
/** * Sleep if need * * @param querier * @return */ private boolean sleepIfNeed(Querier querier) { if (!querier.querying()) { final long nextUpdate = querier.getLastUpdate() + config.getPollIntervalMs(); final long now = System.currentTimeMillis(); final long sleepMs = Math.min(nextUpdate - now, 100); if (sleepMs > 0) { log.trace("Waiting {} ms to poll {} next", nextUpdate - now, querier); try { Thread.sleep(sleepMs); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return true; } } return false; }
3.68