name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_HRegionServer_kill
/* * Simulate a kill -9 of this server. Exits w/o closing regions or cleaninup logs but it does * close socket in case want to bring up server on old hostname+port immediately. */ @InterfaceAudience.Private protected void kill() { this.killed = true; abort("Simulated kill"); }
3.68
hudi_HiveSchemaUtil_createHiveStruct
/** * Return a 'struct' Hive schema from a list of Parquet fields. * * @param parquetFields : list of parquet fields * @return : Equivalent 'struct' Hive schema */ private static String createHiveStruct(List<Type> parquetFields, boolean supportTimestamp, boolean doFormat) { StringBuilder struct = new StringBuilder(); struct.append(doFormat ? "STRUCT< " : "STRUCT<"); for (Type field : parquetFields) { // TODO: struct field name is only translated to support special char($) // We will need to extend it to other collection type struct.append(hiveCompatibleFieldName(field.getName(), true, doFormat)).append(doFormat ? " : " : ":"); struct.append(convertField(field, supportTimestamp, doFormat)).append(doFormat ? ", " : ","); } struct.delete(struct.length() - (doFormat ? 2 : 1), struct.length()); // Remove the last // ", " struct.append(">"); String finalStr = struct.toString(); // Struct cannot have - in them. userstore_udr_entities has uuid in struct. This breaks the // schema. // HDrone sync should not fail because of this. finalStr = finalStr.replaceAll("-", "_"); return finalStr; }
3.68
flink_ExecutionConfig_getExecutionRetryDelay
/** * Returns the delay between execution retries. * * @return The delay between successive execution retries in milliseconds. * @deprecated Should no longer be used because it is subsumed by RestartStrategyConfiguration */ @Deprecated public long getExecutionRetryDelay() { return executionRetryDelay; }
3.68
flink_RpcUtils_extractImplementedRpcGateways
/** * Extracts all {@link RpcGateway} interfaces implemented by the given clazz. * * @param clazz from which to extract the implemented RpcGateway interfaces * @return A set of all implemented RpcGateway interfaces */ public static Set<Class<? extends RpcGateway>> extractImplementedRpcGateways(Class<?> clazz) { HashSet<Class<? extends RpcGateway>> interfaces = new HashSet<>(); while (clazz != null) { for (Class<?> interfaze : clazz.getInterfaces()) { if (RpcGateway.class.isAssignableFrom(interfaze)) { interfaces.add((Class<? extends RpcGateway>) interfaze); } } clazz = clazz.getSuperclass(); } return interfaces; }
3.68
hadoop_UnionStorageStatistics_isTracked
/** * Return true if a statistic is being tracked. * * @return True only if the statistic is being tracked. */ @Override public boolean isTracked(String key) { for (StorageStatistics stat : stats) { if (stat.isTracked(key)) { return true; } } return false; }
3.68
hadoop_TypedBytesOutput_writeListFooter
/** * Writes a list footer. * * @throws IOException */ public void writeListFooter() throws IOException { out.write(Type.MARKER.code); }
3.68
hadoop_OBSDataBlocks_getState
/** * Current state. * * @return the current state. */ protected final DestState getState() { return state; }
3.68
hbase_ColumnSchemaModel___setVersions
/** * @param value the desired value of the VERSIONS attribute */ public void __setVersions(int value) { attrs.put(VERSIONS, Integer.toString(value)); }
3.68
morf_Column_getUpperCaseName
/** * The name of the column in upper case. * Ideally implementations should support this directly via an interned string. * * @return the column name in upper case. */ public default String getUpperCaseName() { return getName().toUpperCase(); }
3.68
pulsar_GracefulExecutorServicesShutdown_terminationTimeout
/** * Sets the timeout for waiting for executors to complete in forceful termination. * * @param terminationTimeout duration for the timeout * @return the current instance for controlling graceful shutdown */ public GracefulExecutorServicesShutdown terminationTimeout(Duration terminationTimeout) { this.terminationTimeout = terminationTimeout; return this; }
3.68
zxing_DecoderResult_getOther
/** * @return arbitrary additional metadata */ public Object getOther() { return other; }
3.68
flink_TaskStateSnapshot_getInputRescalingDescriptor
/** * Returns the input channel mapping for rescaling with in-flight data or {@link * InflightDataRescalingDescriptor#NO_RESCALE}. */ public InflightDataRescalingDescriptor getInputRescalingDescriptor() { return getMapping(OperatorSubtaskState::getInputRescalingDescriptor); }
3.68
flink_InputTypeStrategies_logical
/** * Strategy for an argument that corresponds to a given {@link LogicalTypeFamily} and * nullability. Implicit casts will be inserted if possible. */ public static FamilyArgumentTypeStrategy logical( LogicalTypeFamily expectedFamily, boolean expectedNullability) { return new FamilyArgumentTypeStrategy(expectedFamily, expectedNullability); }
3.68
hbase_TraceUtil_createSpan
/** * Create a span with the given {@code kind}. Notice that, OpenTelemetry only expects one * {@link SpanKind#CLIENT} span and one {@link SpanKind#SERVER} span for a traced request, so use * this with caution when you want to create spans with kind other than {@link SpanKind#INTERNAL}. */ private static Span createSpan(String name, SpanKind kind) { return getGlobalTracer().spanBuilder(name).setSpanKind(kind).startSpan(); }
3.68
flink_FlinkSemiAntiJoinProjectTransposeRule_adjustCondition
/** * Pulls the project above the semi/anti join and returns the resulting semi/anti join * condition. As a result, the semi/anti join condition should be modified such that references * to the LHS of a semi/anti join should now reference the children of the project that's on the * LHS. * * @param project LogicalProject on the LHS of the semi/anti join * @param join the semi/anti join * @return the modified semi/anti join condition */ private RexNode adjustCondition(LogicalProject project, Join join) { // create two RexPrograms -- the bottom one representing a // concatenation of the project and the RHS of the semi/anti join and the // top one representing the semi/anti join condition RexBuilder rexBuilder = project.getCluster().getRexBuilder(); RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory(); RelNode rightChild = join.getRight(); // for the bottom RexProgram, the input is a concatenation of the // child of the project and the RHS of the semi/anti join RelDataType bottomInputRowType = SqlValidatorUtil.deriveJoinRowType( project.getInput().getRowType(), rightChild.getRowType(), JoinRelType.INNER, typeFactory, null, join.getSystemFieldList()); RexProgramBuilder bottomProgramBuilder = new RexProgramBuilder(bottomInputRowType, rexBuilder); // add the project expressions, then add input references for the RHS // of the semi/anti join for (Pair<RexNode, String> pair : project.getNamedProjects()) { bottomProgramBuilder.addProject(pair.left, pair.right); } int nLeftFields = project.getInput().getRowType().getFieldCount(); List<RelDataTypeField> rightFields = rightChild.getRowType().getFieldList(); int nRightFields = rightFields.size(); for (int i = 0; i < nRightFields; i++) { final RelDataTypeField field = rightFields.get(i); RexNode inputRef = rexBuilder.makeInputRef(field.getType(), i + nLeftFields); bottomProgramBuilder.addProject(inputRef, field.getName()); } RexProgram bottomProgram = bottomProgramBuilder.getProgram(); // input rowtype into the top program is the concatenation of the // project and the RHS of the semi/anti join RelDataType topInputRowType = SqlValidatorUtil.deriveJoinRowType( project.getRowType(), rightChild.getRowType(), JoinRelType.INNER, typeFactory, null, join.getSystemFieldList()); RexProgramBuilder topProgramBuilder = new RexProgramBuilder(topInputRowType, rexBuilder); topProgramBuilder.addIdentity(); topProgramBuilder.addCondition(join.getCondition()); RexProgram topProgram = topProgramBuilder.getProgram(); // merge the programs and expand out the local references to form // the new semi/anti join condition; it now references a concatenation of // the project's child and the RHS of the semi/anti join RexProgram mergedProgram = RexProgramBuilder.mergePrograms(topProgram, bottomProgram, rexBuilder); return mergedProgram.expandLocalRef(mergedProgram.getCondition()); }
3.68
flink_MergingWindowSet_addWindow
/** * Adds a new {@code Window} to the set of in-flight windows. It might happen that this triggers * merging of previously in-flight windows. In that case, the provided {@link MergeFunction} is * called. * * <p>This returns the window that is the representative of the added window after adding. This * can either be the new window itself, if no merge occurred, or the newly merged window. Adding * an element to a window or calling trigger functions should only happen on the returned * representative. This way, we never have to deal with a new window that is immediately * swallowed up by another window. * * <p>If the new window is merged, the {@code MergeFunction} callback arguments also don't * contain the new window as part of the list of merged windows. * * @param newWindow The new {@code Window} to add. * @param mergeFunction The callback to be invoked in case a merge occurs. * @return The {@code Window} that new new {@code Window} ended up in. This can also be the new * {@code Window} itself in case no merge occurred. * @throws Exception */ public W addWindow(W newWindow, MergeFunction<W> mergeFunction) throws Exception { List<W> windows = new ArrayList<>(); windows.addAll(this.mapping.keySet()); windows.add(newWindow); final Map<W, Collection<W>> mergeResults = new HashMap<>(); windowAssigner.mergeWindows( windows, new MergingWindowAssigner.MergeCallback<W>() { @Override public void merge(Collection<W> toBeMerged, W mergeResult) { if (LOG.isDebugEnabled()) { LOG.debug("Merging {} into {}", toBeMerged, mergeResult); } mergeResults.put(mergeResult, toBeMerged); } }); W resultWindow = newWindow; boolean mergedNewWindow = false; // perform the merge for (Map.Entry<W, Collection<W>> c : mergeResults.entrySet()) { W mergeResult = c.getKey(); Collection<W> mergedWindows = c.getValue(); // if our new window is in the merged windows make the merge result the // result window if (mergedWindows.remove(newWindow)) { mergedNewWindow = true; resultWindow = mergeResult; } // pick any of the merged windows and choose that window's state window // as the state window for the merge result W mergedStateWindow = this.mapping.get(mergedWindows.iterator().next()); // figure out the state windows that we are merging List<W> mergedStateWindows = new ArrayList<>(); for (W mergedWindow : mergedWindows) { W res = this.mapping.remove(mergedWindow); if (res != null) { mergedStateWindows.add(res); } } this.mapping.put(mergeResult, mergedStateWindow); // don't put the target state window into the merged windows mergedStateWindows.remove(mergedStateWindow); // don't merge the new window itself, it never had any state associated with it // i.e. if we are only merging one pre-existing window into itself // without extending the pre-existing window if (!(mergedWindows.contains(mergeResult) && mergedWindows.size() == 1)) { mergeFunction.merge( mergeResult, mergedWindows, this.mapping.get(mergeResult), mergedStateWindows); } } // the new window created a new, self-contained window without merging if (mergeResults.isEmpty() || (resultWindow.equals(newWindow) && !mergedNewWindow)) { this.mapping.put(resultWindow, resultWindow); } return resultWindow; }
3.68
hadoop_WriteOperationHelper_getAuditSpan
/** * Get the audit span this object was created with. * @return the audit span */ public AuditSpan getAuditSpan() { return auditSpan; }
3.68
flink_Description_build
/** Creates description representation. */ public Description build() { return new Description(blocks); }
3.68
pulsar_ResourceUnitRanking_getEstimatedLoadPercentage
/** * Get the estimated load percentage. */ public double getEstimatedLoadPercentage() { return this.estimatedLoadPercentage; }
3.68
hbase_ScannerContext_hasBatchLimit
/** Returns true if the batch limit can be enforced in the checker's scope */ boolean hasBatchLimit(LimitScope checkerScope) { return limits.canEnforceBatchLimitFromScope(checkerScope) && limits.getBatch() > 0; }
3.68
framework_VaadinSession_getAttribute
/** * Gets a stored attribute value. If a value has been stored for the * session, that value is returned. If no value is stored for the name, * <code>null</code> is returned. * <p> * The fully qualified name of the type is used as the name when getting the * value. The outcome of calling this method is thus the same as if * calling<br /> * <br /> * <code>getAttribute(type.getName());</code> * * @see #setAttribute(Class, Object) * @see #getAttribute(String) * * @param type * the type of the value to get, can not be <code>null</code>. * @return the value, or <code>null</code> if no value has been stored or if * it has been set to null. */ public <T> T getAttribute(Class<T> type) { assert hasLock(); if (type == null) { throw new IllegalArgumentException("type can not be null"); } Object value = getAttribute(type.getName()); if (value == null) { return null; } else { return type.cast(value); } }
3.68
hbase_CostFunction_scale
/** * Scale the value between 0 and 1. * @param min Min value * @param max The Max value * @param value The value to be scaled. * @return The scaled value. */ protected static double scale(double min, double max, double value) { if ( max <= min || value <= min || Math.abs(max - min) <= COST_EPSILON || Math.abs(value - min) <= COST_EPSILON ) { return 0; } if (max <= min || Math.abs(max - min) <= COST_EPSILON) { return 0; } return Math.max(0d, Math.min(1d, (value - min) / (max - min))); }
3.68
querydsl_MathExpressions_asin
/** * Create a {@code asin(num)} expression * * <p>Returns the principal value of the arc sine of num, expressed in radians.</p> * * @param num numeric expression * @return asin(num) */ public static <A extends Number & Comparable<?>> NumberExpression<Double> asin(Expression<A> num) { return Expressions.numberOperation(Double.class, Ops.MathOps.ASIN, num); }
3.68
zxing_AztecCode_getLayers
/** * @return number of levels */ public int getLayers() { return layers; }
3.68
framework_Escalator_updateDecoratorGeometry
/** Resizes and places the decorator. */ private void updateDecoratorGeometry(double detailsHeight) { Style style = deco.getStyle(); decoHeight = detailsHeight + getBody().getDefaultRowHeight(); style.setHeight(decoHeight, Unit.PX); }
3.68
flink_Tuple3_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 * @param f1 The value for field 1 * @param f2 The value for field 2 */ public void setFields(T0 f0, T1 f1, T2 f2) { this.f0 = f0; this.f1 = f1; this.f2 = f2; }
3.68
framework_ApplicationConfiguration_getAtmosphereVersion
/** * Return Atmosphere version. * * @since 7.4 * * @return Atmosphere version. */ public String getAtmosphereVersion() { return getJsoConfiguration(id).getAtmosphereVersion(); }
3.68
hbase_HFileCorruptionChecker_getCorruptedMobFiles
/** Returns the set of corrupted mob file paths after checkTables is called. */ public Collection<Path> getCorruptedMobFiles() { return new HashSet<>(corruptedMobFiles); }
3.68
hadoop_FedBalance_setDelayDuration
/** * Specify the duration(millie seconds) when the procedure needs retry. * @param value the delay duration of the job. */ public Builder setDelayDuration(long value) { this.delayDuration = value; return this; }
3.68
framework_VScrollTable_getColIndexByKey
/** * Gives correct column index for given column key ("cid" in UIDL). * * @param colKey * @return column index of visible columns, -1 if column not visible */ private int getColIndexByKey(String colKey) { // return 0 if asked for rowHeaders if (ROW_HEADER_COLUMN_KEY.equals(colKey)) { return 0; } for (int i = 0; i < visibleColOrder.length; i++) { if (visibleColOrder[i].equals(colKey)) { return i; } } return -1; }
3.68
hbase_AggregateImplementation_getMax
/** * Gives the maximum for a given combination of column qualifier and column family, in the given * row range as defined in the Scan object. In its current implementation, it takes one column * family and one column qualifier (if provided). In case of null column qualifier, maximum value * for the entire column family will be returned. */ @Override public void getMax(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) { InternalScanner scanner = null; AggregateResponse response = null; T max = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); List<Cell> results = new ArrayList<>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } // qualifier can be null. boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); max = (max == null || (temp != null && ci.compare(temp, max) > 0)) ? temp : max; } results.clear(); } while (hasMoreRows); if (max != null) { AggregateResponse.Builder builder = AggregateResponse.newBuilder(); builder.addFirstPart(ci.getProtoForCellType(max).toByteString()); response = builder.build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { IOUtils.closeQuietly(scanner); } } log.info("Maximum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + max); done.run(response); }
3.68
Activiti_Activiti_signallingMessageHandler
/** * Any message that enters this {@link org.springframework.messaging.MessageHandler} * containing a {@code executionId} parameter will trigger a * {@link org.activiti.engine.RuntimeService#signalEventReceived(String)}. */ public static MessageHandler signallingMessageHandler(final ProcessEngine processEngine) { return new MessageHandler() { @Override public void handleMessage(Message<?> message) throws MessagingException { String executionId = message.getHeaders().containsKey("executionId") ? (String) message.getHeaders().get("executionId") : (String) null; if (null != executionId) processEngine.getRuntimeService().trigger(executionId); } }; }
3.68
flink_KeyMap_traverseAndCountElements
/** * For testing only: Actively counts the number of entries, rather than using the counter * variable. This method has linear complexity, rather than constant. * * @return The counted number of entries. */ int traverseAndCountElements() { int num = 0; for (Entry<?, ?> entry : table) { while (entry != null) { num++; entry = entry.next; } } return num; }
3.68
hadoop_NamenodeStatusReport_getRpcAddress
/** * Get the RPC address. * * @return The RPC address. */ public String getRpcAddress() { return this.rpcAddress; }
3.68
flink_Catalog_getProcedure
/** * Get the procedure. The procedure name should be handled in a case-insensitive way. * * @param procedurePath path of the procedure * @return the requested procedure * @throws ProcedureNotExistException if the procedure does not exist in the catalog * @throws CatalogException in case of any runtime exception */ default Procedure getProcedure(ObjectPath procedurePath) throws ProcedureNotExistException, CatalogException { throw new UnsupportedOperationException( String.format("getProcedure is not implemented for %s.", this.getClass())); }
3.68
hbase_SchemaLocking_getLocks
/** * List lock queues. * @return the locks */ List<LockedResource> getLocks() { List<LockedResource> lockedResources = new ArrayList<>(); addToLockedResources(lockedResources, serverLocks, sn -> sn.getServerName(), LockedResourceType.SERVER); addToLockedResources(lockedResources, namespaceLocks, Function.identity(), LockedResourceType.NAMESPACE); addToLockedResources(lockedResources, tableLocks, tn -> tn.getNameAsString(), LockedResourceType.TABLE); addToLockedResources(lockedResources, regionLocks, Function.identity(), LockedResourceType.REGION); addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER); addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock), tn -> tn.getNameAsString(), LockedResourceType.META); addToLockedResources(lockedResources, globalLocks, Function.identity(), LockedResourceType.GLOBAL); return lockedResources; }
3.68
rocketmq-connect_DeadLetterQueueReporter_build
/** * build reporter * * @param connectorTaskId * @param sinkConfig * @param workerConfig * @return */ public static DeadLetterQueueReporter build(ConnectorTaskId connectorTaskId, ConnectKeyValue sinkConfig, WorkerConfig workerConfig, ErrorMetricsGroup errorMetricsGroup) { DeadLetterQueueConfig deadLetterQueueConfig = new DeadLetterQueueConfig(sinkConfig); String dlqTopic = deadLetterQueueConfig.dlqTopicName(); if (dlqTopic.isEmpty()) { return null; } if (!ConnectUtil.isTopicExist(workerConfig, dlqTopic)) { TopicConfig topicConfig = new TopicConfig(dlqTopic); topicConfig.setReadQueueNums(deadLetterQueueConfig.dlqTopicReadQueueNums()); topicConfig.setWriteQueueNums(deadLetterQueueConfig.dlqTopicWriteQueueNums()); ConnectUtil.createTopic(workerConfig, topicConfig); } DefaultMQProducer dlqProducer = ConnectUtil.initDefaultMQProducer(workerConfig); return new DeadLetterQueueReporter(dlqProducer, sinkConfig, connectorTaskId, errorMetricsGroup); }
3.68
hadoop_FederationStateStoreFacade_getRetryNumbers
/** * Get the number of retries. * * @param configRetries User-configured number of retries. * @return number of retries. * @throws YarnException yarn exception. */ public int getRetryNumbers(int configRetries) throws YarnException { int activeSubClustersCount = getActiveSubClustersCount(); int actualRetryNums = Math.min(activeSubClustersCount, configRetries); // Normally, we don't set a negative number for the number of retries, // but if the user sets a negative number for the number of retries, // we will return 0 if (actualRetryNums < 0) { return 0; } return actualRetryNums; }
3.68
hadoop_RemoteMethod_getParams
/** * Generate a list of parameters for this specific location. Parameters are * grouped into 2 categories: * <ul> * <li>Static parameters that are immutable across locations. * <li>Dynamic parameters that are determined for each location by a * RemoteParam object. * </ul> * * @param context The context identifying the location. * @return A list of parameters for the method customized for the location. */ public Object[] getParams(RemoteLocationContext context) { if (this.params == null) { return new Object[] {}; } Object[] objList = new Object[this.params.length]; for (int i = 0; i < this.params.length; i++) { Object currentObj = this.params[i]; if (currentObj instanceof RemoteParam) { RemoteParam paramGetter = (RemoteParam) currentObj; // Map the parameter using the context if (this.types[i] == CacheDirectiveInfo.class) { CacheDirectiveInfo path = (CacheDirectiveInfo) paramGetter.getParameterForContext(context); objList[i] = new CacheDirectiveInfo.Builder(path) .setPath(new Path(context.getDest())).build(); } else { objList[i] = paramGetter.getParameterForContext(context); } } else { objList[i] = currentObj; } } return objList; }
3.68
hbase_RpcServer_getRemoteIp
/** * Returns the remote side ip address when invoked inside an RPC Returns null incase of an error. */ public static InetAddress getRemoteIp() { RpcCall call = CurCall.get(); if (call != null) { return call.getRemoteAddress(); } return null; }
3.68
flink_RichAndCondition_getLeft
/** @return One of the {@link IterativeCondition conditions} combined in this condition. */ public IterativeCondition<T> getLeft() { return getNestedConditions()[0]; }
3.68
framework_Payload_setValue
/** * Sets the value of this payload. * * @param value * value of the payload */ public void setValue(String value) { this.value = value; }
3.68
hudi_GenericRecordFullPayloadGenerator_randomize
/** * Set random value to {@link GenericRecord} according to the schema type of field. The field in blacklist will not be set. * * @param record GenericRecord to randomize. * @param blacklistFields blacklistFields where the filed will not be randomized. * @return Randomized GenericRecord. */ protected GenericRecord randomize(GenericRecord record, Set<String> blacklistFields) { for (Schema.Field f : record.getSchema().getFields()) { if (f.name().equals(DEFAULT_HOODIE_IS_DELETED_COL)) { record.put(f.name(), false); } else if (blacklistFields == null || !blacklistFields.contains(f.name())) { record.put(f.name(), typeConvert(f)); } } return record; }
3.68
hadoop_ReadBufferWorker_getId
/** * return the ID of ReadBufferWorker. */ public int getId() { return this.id; }
3.68
hadoop_MultipartUploaderBuilderImpl_permission
/** * Set permission for the file. */ @Override public B permission(@Nonnull final FsPermission perm) { checkNotNull(perm); permission = perm; return getThisBuilder(); }
3.68
flink_HiveParserRowResolver_getExpression
/** * Retrieves the ColumnInfo corresponding to a source expression which exactly matches the * string rendering of the given HiveParserASTNode. */ public ColumnInfo getExpression(HiveParserASTNode node) throws SemanticException { return get("", node.toStringTree()); }
3.68
dubbo_DubboProtocol_getInvocationWithoutData
/** * only log body in debugger mode for size & security consideration. * * @param invocation * @return */ private Invocation getInvocationWithoutData(Invocation invocation) { if (logger.isDebugEnabled()) { return invocation; } if (invocation instanceof RpcInvocation) { RpcInvocation rpcInvocation = (RpcInvocation) invocation; rpcInvocation.setArguments(null); return rpcInvocation; } return invocation; }
3.68
hadoop_TextSplitter_split
/** * This method needs to determine the splits between two user-provided strings. * In the case where the user's strings are 'A' and 'Z', this is not hard; we * could create two splits from ['A', 'M') and ['M', 'Z'], 26 splits for strings * beginning with each letter, etc. * * If a user has provided us with the strings "Ham" and "Haze", however, we need * to create splits that differ in the third letter. * * The algorithm used is as follows: * Since there are 2**16 unicode characters, we interpret characters as digits in * base 65536. Given a string 's' containing characters s_0, s_1 .. s_n, we interpret * the string as the number: 0.s_0 s_1 s_2.. s_n in base 65536. Having mapped the * low and high strings into floating-point values, we then use the BigDecimalSplitter * to establish the even split points, then map the resulting floating point values * back into strings. */ public List<InputSplit> split(Configuration conf, ResultSet results, String colName) throws SQLException { LOG.warn("Generating splits for a textual index column."); LOG.warn("If your database sorts in a case-insensitive order, " + "this may result in a partial import or duplicate records."); LOG.warn("You are strongly encouraged to choose an integral split column."); String minString = results.getString(1); String maxString = results.getString(2); boolean minIsNull = false; // If the min value is null, switch it to an empty string instead for purposes // of interpolation. Then add [null, null] as a special case split. if (null == minString) { minString = ""; minIsNull = true; } if (null == maxString) { // If the max string is null, then the min string has to be null too. // Just return a special split for this case. List<InputSplit> splits = new ArrayList<InputSplit>(); splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( colName + " IS NULL", colName + " IS NULL")); return splits; } // Use this as a hint. May need an extra task if the size doesn't // divide cleanly. int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1); String lowClausePrefix = colName + " >= '"; String highClausePrefix = colName + " < '"; // If there is a common prefix between minString and maxString, establish it // and pull it out of minString and maxString. int maxPrefixLen = Math.min(minString.length(), maxString.length()); int sharedLen; for (sharedLen = 0; sharedLen < maxPrefixLen; sharedLen++) { char c1 = minString.charAt(sharedLen); char c2 = maxString.charAt(sharedLen); if (c1 != c2) { break; } } // The common prefix has length 'sharedLen'. Extract it from both. String commonPrefix = minString.substring(0, sharedLen); minString = minString.substring(sharedLen); maxString = maxString.substring(sharedLen); List<String> splitStrings = split(numSplits, minString, maxString, commonPrefix); List<InputSplit> splits = new ArrayList<InputSplit>(); // Convert the list of split point strings into an actual set of InputSplits. String start = splitStrings.get(0); for (int i = 1; i < splitStrings.size(); i++) { String end = splitStrings.get(i); if (i == splitStrings.size() - 1) { // This is the last one; use a closed interval. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + start + "'", colName + " <= '" + end + "'")); } else { // Normal open-interval case. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( lowClausePrefix + start + "'", highClausePrefix + end + "'")); } } if (minIsNull) { // Add the special null split at the end. splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit( colName + " IS NULL", colName + " IS NULL")); } return splits; }
3.68
querydsl_SQLExpressions_corr
/** * CORR returns the coefficient of correlation of a set of number pairs. * * @param expr1 first arg * @param expr2 second arg * @return corr(expr1, expr2) */ public static WindowOver<Double> corr(Expression<? extends Number> expr1, Expression<? extends Number> expr2) { return new WindowOver<Double>(Double.class, SQLOps.CORR, expr1, expr2); }
3.68
rocketmq-connect_ColumnDefinition_id
/** * Get the column's identifier. * * @return column identifier; never null */ public ColumnId id() { return id; } /** * Get the column's table identifier. * * @return the table identifier; never null */ // public TableId tableId() { // return id.tableId(); // }
3.68
framework_BigDecimalTextField_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Tests that BigDecimals work correctly with TextFields"; }
3.68
hadoop_AggregateAppResourceUsage_getVcoreSeconds
/** * @return the vcoreSeconds */ public long getVcoreSeconds() { return RMServerUtils .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(), 0L); }
3.68
framework_LegacyCommunicationManager_findJsStateReferenceType
/** * Finds the highest super class which implements * {@link JavaScriptConnectorState}. In practice, this finds either * {@link JavaScriptComponentState} or {@link JavaScriptExtensionState}. * This is used to find which state properties the client side knows * something about. * * @param stateType * the state type for which the reference type should be found * @return the found reference type */ private static Class<? extends SharedState> findJsStateReferenceType( Class<? extends SharedState> stateType) { assert JavaScriptConnectorState.class.isAssignableFrom(stateType); Class<?> type = stateType; while (type != null) { Class<?> superclass = type.getSuperclass(); if (!JavaScriptConnectorState.class.isAssignableFrom(superclass)) { break; } type = superclass; } return type.asSubclass(SharedState.class); }
3.68
rocketmq-connect_JdbcDriverInfo_jdbcMinorVersion
/** * Get the minor version of the JDBC specification supported by the driver. * * @return the minor version number */ public int jdbcMinorVersion() { return jdbcMinorVersion; }
3.68
flink_StateAssignmentOperation_checkStateMappingCompleteness
/** * Verifies that all operator states can be mapped to an execution job vertex. * * @param allowNonRestoredState if false an exception will be thrown if a state could not be * mapped * @param operatorStates operator states to map * @param tasks task to map to */ private static void checkStateMappingCompleteness( boolean allowNonRestoredState, Map<OperatorID, OperatorState> operatorStates, Set<ExecutionJobVertex> tasks) { Set<OperatorID> allOperatorIDs = new HashSet<>(); for (ExecutionJobVertex executionJobVertex : tasks) { for (OperatorIDPair operatorIDPair : executionJobVertex.getOperatorIDs()) { allOperatorIDs.add(operatorIDPair.getGeneratedOperatorID()); operatorIDPair.getUserDefinedOperatorID().ifPresent(allOperatorIDs::add); } } for (Map.Entry<OperatorID, OperatorState> operatorGroupStateEntry : operatorStates.entrySet()) { // ----------------------------------------find operator for // state--------------------------------------------- if (!allOperatorIDs.contains(operatorGroupStateEntry.getKey())) { OperatorState operatorState = operatorGroupStateEntry.getValue(); if (allowNonRestoredState) { LOG.info( "Skipped checkpoint state for operator {}.", operatorState.getOperatorID()); } else { throw new IllegalStateException( "There is no operator for the state " + operatorState.getOperatorID()); } } } }
3.68
flink_SourceCoordinatorContext_onCheckpoint
/** * Behavior of SourceCoordinatorContext on checkpoint. * * @param checkpointId The id of the ongoing checkpoint. */ void onCheckpoint(long checkpointId) throws Exception { assignmentTracker.onCheckpoint(checkpointId); }
3.68
framework_VMenuBar_onClose
/** * Listener method, fired when this menu is closed. */ @Override public void onClose(CloseEvent<PopupPanel> event) { close(event, true); }
3.68
flink_CepOperator_processEvent
/** * Process the given event by giving it to the NFA and outputting the produced set of matched * event sequences. * * @param nfaState Our NFAState object * @param event The current event to be processed * @param timestamp The timestamp of the event */ private void processEvent(NFAState nfaState, IN event, long timestamp) throws Exception { try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) { Collection<Map<String, List<IN>>> patterns = nfa.process( sharedBufferAccessor, nfaState, event, timestamp, afterMatchSkipStrategy, cepTimerService); if (nfa.getWindowTime() > 0 && nfaState.isNewStartPartialMatch()) { registerTimer(timestamp + nfa.getWindowTime()); } processMatchedSequences(patterns, timestamp); } }
3.68
flink_DispatcherId_fromUuid
/** Creates a new DispatcherId that corresponds to the UUID. */ public static DispatcherId fromUuid(UUID uuid) { return new DispatcherId(uuid); }
3.68
framework_CustomizedSystemMessages_setCookiesDisabledCaption
/** * Sets the caption of the "cookies disabled" notification. Set to null for * no caption. If both caption and message is null, the notification is * disabled. * * @param cookiesDisabledCaption * the caption for the "cookies disabled" notification */ public void setCookiesDisabledCaption(String cookiesDisabledCaption) { this.cookiesDisabledCaption = cookiesDisabledCaption; }
3.68
hbase_BufferedMutator_getWriteBufferSize
/** * Returns the maximum size in bytes of the write buffer for this HTable. * <p> * The default value comes from the configuration parameter {@code hbase.client.write.buffer}. * @return The size of the write buffer in bytes. */ default long getWriteBufferSize() { throw new UnsupportedOperationException( "The BufferedMutator::getWriteBufferSize has not been implemented"); }
3.68
hudi_RollbackNode_execute
/** * Method helps to rollback the last commit instant in the timeline, if it has one. * * @param executionContext Execution context to perform this rollback * @param curItrCount current iteration count. * @throws Exception will be thrown if any error occurred */ @Override public void execute(ExecutionContext executionContext, int curItrCount) throws Exception { int numRollbacks = config.getNumRollbacks(); log.info(String.format("Executing rollback node %s with %d rollbacks", this.getName(), numRollbacks)); // Can only be done with an instantiation of a new WriteClient hence cannot be done during DeltaStreamer // testing for now HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(executionContext.getHoodieTestSuiteWriter().getConfiguration()).setBasePath(executionContext.getHoodieTestSuiteWriter().getCfg().targetBasePath) .build(); for (int i = 0; i < numRollbacks; i++) { metaClient.reloadActiveTimeline(); Option<HoodieInstant> lastInstant = metaClient.getActiveTimeline().getCommitsTimeline().lastInstant(); if (lastInstant.isPresent()) { log.info("Rolling back last instant {}", lastInstant.get()); log.info("Cleaning up generated data for the instant being rolled back {}", lastInstant.get()); ValidationUtils.checkArgument( getStringWithAltKeys(executionContext.getWriterContext().getProps(), DFSPathSelectorConfig.SOURCE_INPUT_SELECTOR, DFSPathSelector.class.getName()) .equalsIgnoreCase(DFSTestSuitePathSelector.class.getName()), "Test Suite only supports DFSTestSuitePathSelector"); executionContext.getHoodieTestSuiteWriter().getWriteClient(this).rollback(lastInstant.get().getTimestamp()); metaClient.getFs().delete(new Path(executionContext.getWriterContext().getCfg().inputBasePath, executionContext.getWriterContext().getHoodieTestSuiteWriter().getLastCheckpoint().orElse("")), true); this.result = lastInstant; } } }
3.68
flink_StreamExecutionEnvironment_configure
/** * Sets all relevant options contained in the {@link ReadableConfig} such as e.g. {@link * StreamPipelineOptions#TIME_CHARACTERISTIC}. It will reconfigure {@link * StreamExecutionEnvironment}, {@link ExecutionConfig} and {@link CheckpointConfig}. * * <p>It will change the value of a setting only if a corresponding option was set in the {@code * configuration}. If a key is not present, the current value of a field will remain untouched. * * @param configuration a configuration to read the values from * @param classLoader a class loader to use when loading classes */ @PublicEvolving public void configure(ReadableConfig configuration, ClassLoader classLoader) { configuration .getOptional(StreamPipelineOptions.TIME_CHARACTERISTIC) .ifPresent(this::setStreamTimeCharacteristic); configuration .getOptional(StateChangelogOptions.ENABLE_STATE_CHANGE_LOG) .ifPresent(this::enableChangelogStateBackend); Optional.ofNullable(loadStateBackend(configuration, classLoader)) .ifPresent(this::setStateBackend); configuration .getOptional(PipelineOptions.OPERATOR_CHAINING) .ifPresent(c -> this.isChainingEnabled = c); configuration .getOptional( PipelineOptions .OPERATOR_CHAINING_CHAIN_OPERATORS_WITH_DIFFERENT_MAX_PARALLELISM) .ifPresent(c -> this.isChainingOfOperatorsWithDifferentMaxParallelismEnabled = c); configuration .getOptional(DeploymentOptions.JOB_LISTENERS) .ifPresent(listeners -> registerCustomListeners(classLoader, listeners)); configuration .getOptional(PipelineOptions.CACHED_FILES) .ifPresent( f -> { this.cacheFile.clear(); this.cacheFile.addAll(DistributedCache.parseCachedFilesFromString(f)); }); configuration .getOptional(ExecutionOptions.RUNTIME_MODE) .ifPresent( runtimeMode -> this.configuration.set(ExecutionOptions.RUNTIME_MODE, runtimeMode)); configuration .getOptional(ExecutionOptions.BATCH_SHUFFLE_MODE) .ifPresent( shuffleMode -> this.configuration.set( ExecutionOptions.BATCH_SHUFFLE_MODE, shuffleMode)); configuration .getOptional(ExecutionOptions.SORT_INPUTS) .ifPresent( sortInputs -> this.configuration.set(ExecutionOptions.SORT_INPUTS, sortInputs)); configuration .getOptional(ExecutionOptions.USE_BATCH_STATE_BACKEND) .ifPresent( sortInputs -> this.configuration.set( ExecutionOptions.USE_BATCH_STATE_BACKEND, sortInputs)); configuration .getOptional(PipelineOptions.NAME) .ifPresent(jobName -> this.configuration.set(PipelineOptions.NAME, jobName)); configuration .getOptional(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH) .ifPresent( flag -> this.configuration.set( ExecutionCheckpointingOptions .ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, flag)); configuration .getOptional(PipelineOptions.JARS) .ifPresent(jars -> this.configuration.set(PipelineOptions.JARS, jars)); configuration .getOptional(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED) .ifPresent( flag -> this.configuration.set( BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED, flag)); configBufferTimeout(configuration); config.configure(configuration, classLoader); checkpointCfg.configure(configuration); // here we should make sure the configured checkpoint storage will take effect // this needs to happen after checkpointCfg#configure(...) to override the effect of // checkpointCfg#setCheckpointStorage(checkpointDirectory) configureCheckpointStorage(configuration, checkpointCfg); }
3.68
hbase_ServerManager_recordNewServerWithLock
/** * Adds the onlineServers list. onlineServers should be locked. * @param serverName The remote servers name. */ void recordNewServerWithLock(final ServerName serverName, final ServerMetrics sl) { LOG.info("Registering regionserver=" + serverName); this.onlineServers.put(serverName, sl); }
3.68
framework_VAbstractTextualDate_updateStyleNames
/** * Updates style names for the widget (and its children). */ protected void updateStyleNames() { if (text != null) { text.setStyleName(VTextField.CLASSNAME); text.addStyleName(getStylePrimaryName() + "-textfield"); } }
3.68
hadoop_OBSFileSystem_close
/** * Close the filesystem. This shuts down all transfers. * * @throws IOException IO problem */ @Override public void close() throws IOException { LOG.debug("This Filesystem closed by user, clear resource."); if (closed.getAndSet(true)) { // already closed return; } try { super.close(); } finally { OBSCommonUtils.shutdownAll( boundedMultipartUploadThreadPool, boundedCopyThreadPool, boundedDeleteThreadPool, boundedCopyPartThreadPool, boundedListThreadPool); } }
3.68
hbase_ZkSplitLogWorkerCoordination_endTask
/* * Next part is related to WALSplitterHandler */ /** * endTask() can fail and the only way to recover out of it is for the * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. */ @Override public void endTask(SplitLogTask slt, LongAdder ctr, SplitTaskDetails details) { ZkSplitTaskDetails zkDetails = (ZkSplitTaskDetails) details; String task = zkDetails.getTaskNode(); int taskZKVersion = zkDetails.getCurTaskZKVersion().intValue(); try { if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) { LOG.info("successfully transitioned task " + task + " to final state " + slt); ctr.increment(); return; } LOG.warn("failed to transistion task " + task + " to end state " + slt + " because of version mismatch "); } catch (KeeperException.BadVersionException bve) { LOG.warn("transisition task " + task + " to " + slt + " failed because of version mismatch", bve); } catch (KeeperException.NoNodeException e) { LOG.error(HBaseMarkers.FATAL, "logic error - end task " + task + " " + slt + " failed because task doesn't exist", e); } catch (KeeperException e) { LOG.warn("failed to end task, " + task + " " + slt, e); } SplitLogCounters.tot_wkr_final_transition_failed.increment(); }
3.68
flink_KeyedStream_asQueryableState
/** * Publishes the keyed stream as a queryable ReducingState instance. * * @param queryableStateName Name under which to the publish the queryable state instance * @param stateDescriptor State descriptor to create state instance from * @return Queryable state instance * @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed * in a future Flink major version. */ @PublicEvolving @Deprecated public QueryableStateStream<KEY, T> asQueryableState( String queryableStateName, ReducingStateDescriptor<T> stateDescriptor) { transform( "Queryable state: " + queryableStateName, getType(), new QueryableAppendingStateOperator<>(queryableStateName, stateDescriptor)); stateDescriptor.initializeSerializerUnlessSet(getExecutionConfig()); return new QueryableStateStream<>( queryableStateName, stateDescriptor, getKeyType().createSerializer(getExecutionConfig())); }
3.68
flink_TimestampData_toTimestamp
/** Converts this {@link TimestampData} object to a {@link Timestamp}. */ public Timestamp toTimestamp() { return Timestamp.valueOf(toLocalDateTime()); }
3.68
flink_RestClusterClientConfiguration_getRetryMaxAttempts
/** @see RestOptions#RETRY_MAX_ATTEMPTS */ public int getRetryMaxAttempts() { return retryMaxAttempts; }
3.68
hudi_SqlQueryBuilder_join
/** * Appends a JOIN clause to a query. * * @param table The table to join with. * @return The {@link SqlQueryBuilder} instance. */ public SqlQueryBuilder join(String table) { if (StringUtils.isNullOrEmpty(table)) { throw new IllegalArgumentException("No table name provided with JOIN clause. Please provide a table name to join with."); } sqlBuilder.append(" join "); sqlBuilder.append(table); return this; }
3.68
morf_DatabaseDataSetConsumer_open
/** * @see org.alfasoftware.morf.dataset.DataSetConsumer#open() */ @Override public void open() { log.debug("Opening database connection"); try { connection = dataSource.getConnection(); wasAutoCommit = connection.getAutoCommit(); connection.setAutoCommit(false); sqlDialect = connectionResources.sqlDialect(); } catch (SQLException e) { throw new RuntimeSqlException("Error opening connection", e); } }
3.68
framework_HierarchicalDataCommunicator_getHierarchyMapper
/** * Returns the {@code HierarchyMapper} used by this data communicator. * * @return the hierarchy mapper used by this data communicator */ protected HierarchyMapper<T, ?> getHierarchyMapper() { return mapper; }
3.68
flink_SavepointMetadataV2_getOperatorState
/** * @return Operator state for the given UID. * @throws IOException If the savepoint does not contain operator state with the given uid. */ public OperatorState getOperatorState(OperatorIdentifier identifier) throws IOException { OperatorID operatorID = identifier.getOperatorId(); OperatorStateSpecV2 operatorState = operatorStateIndex.get(operatorID); if (operatorState == null || operatorState.isNewStateTransformation()) { throw new IOException( "Savepoint does not contain state with operator " + identifier .getUid() .map(uid -> "uid " + uid) .orElse("hash " + operatorID.toHexString())); } return operatorState.asExistingState(); }
3.68
hbase_DeleteNamespaceProcedure_deleteDirectory
/** * Delete the namespace directories from the file system * @param env MasterProcedureEnv * @param namespaceName name of the namespace in string format */ private static void deleteDirectory(MasterProcedureEnv env, String namespaceName) throws IOException { MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); FileSystem fs = mfs.getFileSystem(); Path p = CommonFSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName); try { for (FileStatus status : fs.listStatus(p)) { if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) { throw new IOException("Namespace directory contains table dir: " + status.getPath()); } } if (!fs.delete(CommonFSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName), true)) { throw new IOException("Failed to remove namespace: " + namespaceName); } } catch (FileNotFoundException e) { // File already deleted, continue LOG.debug("deleteDirectory throws exception: " + e); } }
3.68
framework_SelectorPath_getComponentName
/** * Returns the name of the component described by given query fragment. * * @param fragment * Query fragment * @return Class part of fragment */ protected String getComponentName(String fragment) { return fragment.split("\\[")[0]; }
3.68
flink_CliFrontend_getClientTimeout
/** * Get client timeout from command line via effective configuration. * * @param effectiveConfiguration Flink effective configuration. * @return client timeout with Duration type */ private Duration getClientTimeout(Configuration effectiveConfiguration) { return effectiveConfiguration.get(ClientOptions.CLIENT_TIMEOUT); }
3.68
flink_CheckpointConfig_setCheckpointInterval
/** * Sets the interval in which checkpoints are periodically scheduled. * * <p>This setting defines the base interval. Checkpoint triggering may be delayed by the * settings {@link #setMaxConcurrentCheckpoints(int)} and {@link * #setMinPauseBetweenCheckpoints(long)}. * * @param checkpointInterval The checkpoint interval, in milliseconds. */ public void setCheckpointInterval(long checkpointInterval) { if (checkpointInterval < MINIMAL_CHECKPOINT_TIME) { throw new IllegalArgumentException( String.format( "Checkpoint interval must be larger than or equal to %s ms", MINIMAL_CHECKPOINT_TIME)); } configuration.set( ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofMillis(checkpointInterval)); }
3.68
flink_Tuple8_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple8<T0, T1, T2, T3, T4, T5, T6, T7> copy() { return new Tuple8<>(this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7); }
3.68
hbase_MasterObserver_preMove
/** * Called prior to moving a given region from one region server to another. * @param ctx the environment to interact with the framework and master * @param region the RegionInfo * @param srcServer the source ServerName * @param destServer the destination ServerName */ default void preMove(final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo region, final ServerName srcServer, final ServerName destServer) throws IOException { }
3.68
flink_TimestampData_toInstant
/** Converts this {@link TimestampData} object to a {@link Instant}. */ public Instant toInstant() { long epochSecond = millisecond / 1000; int milliOfSecond = (int) (millisecond % 1000); if (milliOfSecond < 0) { --epochSecond; milliOfSecond += 1000; } long nanoAdjustment = milliOfSecond * 1_000_000 + nanoOfMillisecond; return Instant.ofEpochSecond(epochSecond, nanoAdjustment); }
3.68
framework_AbstractDateField_setLenient
/** * Specifies whether or not date/time interpretation in component is to be * lenient. * * @see Calendar#setLenient(boolean) * @see #isLenient() * * @param lenient * true if the lenient mode is to be turned on; false if it is to * be turned off. */ public void setLenient(boolean lenient) { getState().lenient = lenient; }
3.68
hudi_RepairUtils_tagInstantsOfBaseAndLogFiles
/** * Tags the instant time of each base or log file from the input file paths. * * @param basePath Base path of the table. * @param allPaths A {@link List} of file paths to tag. * @return A {@link Map} of instant time in {@link String} to a {@link List} of relative file paths. */ public static Map<String, List<String>> tagInstantsOfBaseAndLogFiles( String basePath, List<Path> allPaths) { // Instant time -> Set of base and log file paths Map<String, List<String>> instantToFilesMap = new HashMap<>(); allPaths.forEach(path -> { String instantTime = FSUtils.getCommitTime(path.getName()); instantToFilesMap.computeIfAbsent(instantTime, k -> new ArrayList<>()); instantToFilesMap.get(instantTime).add( FSUtils.getRelativePartitionPath(new Path(basePath), path)); }); return instantToFilesMap; }
3.68
graphhopper_CustomModelParser_createGetPriorityStatements
/** * Parse the expressions from CustomModel relevant for the method getPriority - see createClassTemplate. * * @return the created statements (parsed expressions) */ private static List<Java.BlockStatement> createGetPriorityStatements(Set<String> priorityVariables, CustomModel customModel, EncodedValueLookup lookup) throws Exception { List<Java.BlockStatement> priorityStatements = new ArrayList<>(verifyExpressions(new StringBuilder(), "priority entry", priorityVariables, customModel.getPriority(), lookup)); String priorityMethodStartBlock = "double value = super.getRawPriority(edge, reverse);\n"; for (String arg : priorityVariables) { priorityMethodStartBlock += getVariableDeclaration(lookup, arg); } priorityStatements.addAll(0, new Parser(new org.codehaus.janino.Scanner("getPriority", new StringReader(priorityMethodStartBlock))). parseBlockStatements()); return priorityStatements; }
3.68
hbase_Mutation_numFamilies
/** Returns the number of different families */ public int numFamilies() { return getFamilyCellMap().size(); }
3.68
hudi_HoodieTableMetadataUtil_deleteMetadataTable
/** * Delete the metadata table for the dataset and backup if required. * * @param dataMetaClient {@code HoodieTableMetaClient} of the dataset for which metadata table is to be deleted * @param context instance of {@link HoodieEngineContext}. * @param backup Whether metadata table should be backed up before deletion. If true, the table is backed up to the * directory with name metadata_<current_timestamp>. * @return The backup directory if backup was requested */ public static String deleteMetadataTable(HoodieTableMetaClient dataMetaClient, HoodieEngineContext context, boolean backup) { final Path metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(dataMetaClient.getBasePathV2()); FileSystem fs = FSUtils.getFs(metadataTablePath.toString(), context.getHadoopConf().get()); dataMetaClient.getTableConfig().clearMetadataPartitions(dataMetaClient); try { if (!fs.exists(metadataTablePath)) { return null; } } catch (FileNotFoundException e) { // Ignoring exception as metadata table already does not exist return null; } catch (IOException e) { throw new HoodieMetadataException("Failed to check metadata table existence", e); } if (backup) { final Path metadataBackupPath = new Path(metadataTablePath.getParent(), ".metadata_" + dataMetaClient.createNewInstantTime(false)); LOG.info("Backing up metadata directory to " + metadataBackupPath + " before deletion"); try { if (fs.rename(metadataTablePath, metadataBackupPath)) { return metadataBackupPath.toString(); } } catch (Exception e) { // If rename fails, we will ignore the backup and still delete the MDT LOG.error("Failed to backup metadata table using rename", e); } } LOG.info("Deleting metadata table from " + metadataTablePath); try { fs.delete(metadataTablePath, true); } catch (Exception e) { throw new HoodieMetadataException("Failed to delete metadata table from path " + metadataTablePath, e); } return null; }
3.68
hudi_WriteMetadataEvent_builder
/** * Returns the builder for {@link WriteMetadataEvent}. */ public static Builder builder() { return new Builder(); }
3.68
framework_AbstractSelect_removeAllItems
/** * Removes all items from the container. * * This functionality is optional. If the function is unsupported, it always * returns false. * * @return True if the operation succeeded. * @see Container#removeAllItems() */ @Override public boolean removeAllItems() throws UnsupportedOperationException { final boolean retval = items.removeAllItems(); itemIdMapper.removeAll(); if (retval) { setValue(null); if (!(items instanceof Container.ItemSetChangeNotifier)) { fireItemSetChange(); } } return retval; }
3.68
pulsar_ConsumerConfiguration_setReceiverQueueSize
/** * Sets the size of the consumer receive queue. * <p> * The consumer receive queue controls how many messages can be accumulated by the {@link Consumer} before the * application calls {@link Consumer#receive()}. Using a higher value could potentially increase the consumer * throughput at the expense of bigger memory utilization. * </p> * <p> * <b>Setting the consumer queue size as zero</b> * <ul> * <li>Decreases the throughput of the consumer, by disabling pre-fetching of messages. This approach improves the * message distribution on shared subscription, by pushing messages only to the consumers that are ready to process * them. Neither {@link Consumer#receive(int, TimeUnit)} nor Partitioned Topics can be used if the consumer queue * size is zero. {@link Consumer#receive()} function call should not be interrupted when the consumer queue size is * zero.</li> * <li>Doesn't support Batch-Message: if consumer receives any batch-message then it closes consumer connection with * broker and {@link Consumer#receive()} call will remain blocked while {@link Consumer#receiveAsync()} receives * exception in callback. <b> consumer will not be able receive any further message unless batch-message in pipeline * is removed</b></li> * </ul> * </p> * Default value is {@code 1000} messages and should be good for most use cases. * * @param receiverQueueSize * the new receiver queue size value */ public ConsumerConfiguration setReceiverQueueSize(int receiverQueueSize) { checkArgument(receiverQueueSize >= 0, "Receiver queue size cannot be negative"); conf.setReceiverQueueSize(receiverQueueSize); return this; }
3.68
flink_Runnables_withUncaughtExceptionHandler
/** * Guard {@link Runnable} with uncaughtException handler, because {@link * java.util.concurrent.ScheduledExecutorService} does not respect the one assigned to executing * {@link Thread} instance. * * @param runnable Runnable future to guard. * @param uncaughtExceptionHandler Handler to call in case of uncaught exception. * @return Future with handler. */ public static Runnable withUncaughtExceptionHandler( Runnable runnable, Thread.UncaughtExceptionHandler uncaughtExceptionHandler) { return () -> { try { runnable.run(); } catch (Throwable t) { uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t); } }; }
3.68
querydsl_Expressions_simpleTemplate
/** * Create a new Template expression * * @param cl type of expression * @param template template * @param args template parameters * @return template expression */ public static <T> SimpleTemplate<T> simpleTemplate(Class<? extends T> cl, Template template, List<?> args) { return new SimpleTemplate<T>(cl, template, args); }
3.68
hbase_HRegionFileSystem_getStoragePolicyName
/** * Get the storage policy of the directory of CF. * @param familyName The name of column family. * @return Storage policy name, or {@code null} if not using {@link HFileSystem} or exception * thrown when trying to get policy */ @Nullable public String getStoragePolicyName(String familyName) { if (this.fs instanceof HFileSystem) { Path storeDir = getStoreDir(familyName); return ((HFileSystem) this.fs).getStoragePolicyName(storeDir); } return null; }
3.68
hadoop_ServiceRegistryUtils_mkServiceHomePath
/** * Build the path to a service folder * @param username user name * @param serviceName service name * @return the home path to the service */ public static String mkServiceHomePath(String username, String serviceName) { return mkUserHomePath(username) + "/" + serviceName; }
3.68
hadoop_BlockManagerParameters_withTrackerFactory
/** * Sets the duration tracker with statistics to update. * * @param factory The tracker factory object. * @return The builder. */ public BlockManagerParameters withTrackerFactory( final DurationTrackerFactory factory) { this.trackerFactory = factory; return this; }
3.68
hadoop_Find_getExpression
/** Gets an instance of an expression from the factory. */ private Expression getExpression( Class<? extends Expression> expressionClass) { return ExpressionFactory.getExpressionFactory().createExpression( expressionClass, getConf()); }
3.68
shardingsphere-elasticjob_IpUtils_getHostName
/** * Get host name for localhost. * * @return host name for localhost */ public static String getHostName() { if (null != cachedHostName) { return cachedHostName; } try { cachedHostName = InetAddress.getLocalHost().getHostName(); } catch (final UnknownHostException ex) { cachedHostName = "unknown"; } return cachedHostName; }
3.68
hadoop_SaslOutputStream_disposeSasl
/** * Disposes of any system resources or security-sensitive information Sasl * might be using. * * @exception SaslException * if a SASL error occurs. */ private void disposeSasl() throws SaslException { if (saslClient != null) { saslClient.dispose(); } if (saslServer != null) { saslServer.dispose(); } }
3.68
framework_VTreeTable_buildCaptionHtmlSnippet
/** * Icons rendered into first actual column in TreeTable, not to row header * cell. */ @Override protected String buildCaptionHtmlSnippet(UIDL uidl) { if (uidl.getTag().equals("column")) { return super.buildCaptionHtmlSnippet(uidl); } else { String s = uidl.getStringAttribute("caption"); return s; } }
3.68
morf_AbstractSqlDialectTest_testAddIndexStatementsUnique
/** * Test adding a unique index. */ @SuppressWarnings("unchecked") @Test public void testAddIndexStatementsUnique() { Table table = metadata.getTable(TEST_TABLE); Index index = index("indexName").unique().columns(table.columns().get(0).getName()); compareStatements( expectedAddIndexStatementsUnique(), testDialect.addIndexStatements(table, index)); }
3.68
hadoop_OperationAuditor_checkAccess
/** * Check for permission to access a path. * The path is fully qualified and the status is the * status of the path. * This is called from the {@code FileSystem.access()} command * and is a soft permission check used by Hive. * @param path path to check * @param status status of the path. * @param mode access mode. * @return true if access is allowed. * @throws IOException failure */ default boolean checkAccess(Path path, S3AFileStatus status, FsAction mode) throws IOException { return true; }
3.68