name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_PrivateCellUtil_compareFamily
/** * Compare cell's column family against given comparator * @param cell the cell to use for comparison * @param comparator the {@link CellComparator} to use for comparison * @return result comparing cell's column family */ public static int compareFamily(Cell cell, ByteArrayComparable comparator) { if (cell instanceof ByteBufferExtendedCell) { return comparator.compareTo(((ByteBufferExtendedCell) cell).getFamilyByteBuffer(), ((ByteBufferExtendedCell) cell).getFamilyPosition(), cell.getFamilyLength()); } return comparator.compareTo(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); }
3.68
flink_JobGraphGenerator_postVisit
/** * This method implements the post-visit during the depth-first traversal. When the post visit * happens, all of the descendants have been processed, so this method connects all of the * current node's predecessors to the current node. * * @param node The node currently processed during the post-visit. * @see org.apache.flink.util.Visitor#postVisit(org.apache.flink.util.Visitable) t */ @Override public void postVisit(PlanNode node) { try { // --------- check special cases for which we handle post visit differently ---------- // skip data source node (they have no inputs) // also, do nothing for union nodes, we connect them later when gathering the inputs for // a task // solution sets have no input. the initial solution set input is connected when the // iteration node is in its postVisit if (node instanceof SourcePlanNode || node instanceof NAryUnionPlanNode || node instanceof SolutionSetPlanNode) { return; } // if this is a blocking shuffle vertex, we add one IntermediateDataSetID to its // predecessor and return if (checkAndConfigurePersistentIntermediateResult(node)) { return; } // check if we have an iteration. in that case, translate the step function now if (node instanceof IterationPlanNode) { // prevent nested iterations if (node.isOnDynamicPath()) { throw new CompilerException( "Nested Iterations are not possible at the moment!"); } // if we recursively go into an iteration (because the constant path of one // iteration contains // another one), we push the current one onto the stack if (this.currentIteration != null) { this.iterationStack.add(this.currentIteration); } this.currentIteration = (IterationPlanNode) node; this.currentIteration.acceptForStepFunction(this); // pop the current iteration from the stack if (this.iterationStack.isEmpty()) { this.currentIteration = null; } else { this.currentIteration = this.iterationStack.remove(this.iterationStack.size() - 1); } // inputs for initial bulk partial solution or initial workset are already connected // to the iteration head in the head's post visit. // connect the initial solution set now. if (node instanceof WorksetIterationPlanNode) { // connect the initial solution set WorksetIterationPlanNode wsNode = (WorksetIterationPlanNode) node; JobVertex headVertex = this.iterations.get(wsNode).getHeadTask(); TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration()); int inputIndex = headConfig.getDriverStrategy().getNumInputs(); headConfig.setIterationHeadSolutionSetInputIndex(inputIndex); translateChannel( wsNode.getInitialSolutionSetInput(), inputIndex, headVertex, headConfig, false); } return; } final JobVertex targetVertex = this.vertices.get(node); // --------- Main Path: Translation of channels ---------- // // There are two paths of translation: One for chained tasks (or merged tasks in // general), // which do not have their own task vertex. The other for tasks that have their own // vertex, // or are the primary task in a vertex (to which the others are chained). // check whether this node has its own task, or is merged with another one if (targetVertex == null) { // node's task is merged with another task. it is either chained, of a merged head // vertex // from an iteration final TaskInChain chainedTask; if ((chainedTask = this.chainedTasks.get(node)) != null) { // Chained Task. Sanity check first... final Iterator<Channel> inConns = node.getInputs().iterator(); if (!inConns.hasNext()) { throw new CompilerException("Bug: Found chained task with no input."); } final Channel inConn = inConns.next(); if (inConns.hasNext()) { throw new CompilerException( "Bug: Found a chained task with more than one input!"); } if (inConn.getLocalStrategy() != null && inConn.getLocalStrategy() != LocalStrategy.NONE) { throw new CompilerException( "Bug: Found a chained task with an input local strategy."); } if (inConn.getShipStrategy() != null && inConn.getShipStrategy() != ShipStrategyType.FORWARD) { throw new CompilerException( "Bug: Found a chained task with an input ship strategy other than FORWARD."); } JobVertex container = chainedTask.getContainingVertex(); if (container == null) { final PlanNode sourceNode = inConn.getSource(); container = this.vertices.get(sourceNode); if (container == null) { // predecessor is itself chained container = this.chainedTasks.get(sourceNode).getContainingVertex(); if (container == null) { throw new IllegalStateException( "Bug: Chained task predecessor has not been assigned its containing vertex."); } } else { // predecessor is a proper task job vertex and this is the first chained // task. add a forward connection entry. new TaskConfig(container.getConfiguration()) .addOutputShipStrategy(ShipStrategyType.FORWARD); } chainedTask.setContainingVertex(container); } // add info about the input serializer type chainedTask.getTaskConfig().setInputSerializer(inConn.getSerializer(), 0); // update name of container task String containerTaskName = container.getName(); if (containerTaskName.startsWith("CHAIN ")) { container.setName(containerTaskName + " -> " + chainedTask.getTaskName()); } else { container.setName( "CHAIN " + containerTaskName + " -> " + chainedTask.getTaskName()); } // update resource of container task container.setResources( container.getMinResources().merge(node.getMinResources()), container.getPreferredResources().merge(node.getPreferredResources())); this.chainedTasksInSequence.add(chainedTask); return; } else if (node instanceof BulkPartialSolutionPlanNode || node instanceof WorksetPlanNode) { // merged iteration head task. the task that the head is merged with will take // care of it return; } else { throw new CompilerException("Bug: Unrecognized merged task vertex."); } } // -------- Here, we translate non-chained tasks ------------- if (this.currentIteration != null) { JobVertex head = this.iterations.get(this.currentIteration).getHeadTask(); // Exclude static code paths from the co-location constraint, because otherwise // their execution determines the deployment slots of the co-location group if (node.isOnDynamicPath()) { targetVertex.setStrictlyCoLocatedWith(head); } } // create the config that will contain all the description of the inputs final TaskConfig targetVertexConfig = new TaskConfig(targetVertex.getConfiguration()); // get the inputs. if this node is the head of an iteration, we obtain the inputs from // the // enclosing iteration node, because the inputs are the initial inputs to the iteration. final Iterator<Channel> inConns; if (node instanceof BulkPartialSolutionPlanNode) { inConns = ((BulkPartialSolutionPlanNode) node) .getContainingIterationNode() .getInputs() .iterator(); // because the partial solution has its own vertex, is has only one (logical) input. // note this in the task configuration targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0); } else if (node instanceof WorksetPlanNode) { WorksetPlanNode wspn = (WorksetPlanNode) node; // input that is the initial workset inConns = Collections.singleton(wspn.getContainingIterationNode().getInput2()) .iterator(); // because we have a stand-alone (non-merged) workset iteration head, the initial // workset will // be input 0 and the solution set will be input 1 targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(0); targetVertexConfig.setIterationHeadSolutionSetInputIndex(1); } else { inConns = node.getInputs().iterator(); } if (!inConns.hasNext()) { throw new CompilerException("Bug: Found a non-source task with no input."); } int inputIndex = 0; while (inConns.hasNext()) { Channel input = inConns.next(); inputIndex += translateChannel( input, inputIndex, targetVertex, targetVertexConfig, false); } // broadcast variables int broadcastInputIndex = 0; for (NamedChannel broadcastInput : node.getBroadcastInputs()) { int broadcastInputIndexDelta = translateChannel( broadcastInput, broadcastInputIndex, targetVertex, targetVertexConfig, true); targetVertexConfig.setBroadcastInputName( broadcastInput.getName(), broadcastInputIndex); targetVertexConfig.setBroadcastInputSerializer( broadcastInput.getSerializer(), broadcastInputIndex); broadcastInputIndex += broadcastInputIndexDelta; } } catch (Exception e) { throw new CompilerException( "An error occurred while translating the optimized plan to a JobGraph: " + e.getMessage(), e); } }
3.68
framework_IndexedContainer_addListener
/** * @deprecated As of 7.0, replaced by * {@link #addValueChangeListener(Property.ValueChangeListener)} */ @Override @Deprecated public void addListener(Property.ValueChangeListener listener) { addValueChangeListener(listener); }
3.68
AreaShop_PlayerLoginLogoutListener_onPlayerLogin
/** * Called when a sign is changed. * @param event The event */ @EventHandler(priority = EventPriority.MONITOR) public void onPlayerLogin(PlayerLoginEvent event) { if(event.getResult() != Result.ALLOWED) { return; } final Player player = event.getPlayer(); // Schedule task to check for notifications, prevents a lag spike at login Do.syncTimerLater(25, 25, () -> { // Delay until all regions are loaded if(!plugin.isReady()) { return true; } if(!player.isOnline()) { return false; } // Notify for rents that almost run out for(RentRegion region : plugin.getFileManager().getRents()) { if(region.isRenter(player)) { String warningSetting = region.getStringSetting("rent.warningOnLoginTime"); if(warningSetting == null || warningSetting.isEmpty()) { continue; } long warningTime = Utils.durationStringToLong(warningSetting); if(region.getTimeLeft() < warningTime) { // Send the warning message later to let it appear after general MOTD messages AreaShop.getInstance().message(player, "rent-expireWarning", region); } } } // Notify admins for plugin updates AreaShop.getInstance().notifyUpdate(player); return false; }); // Check if the player has regions that use an old name of him and update them Do.syncTimerLater(22, 10, () -> { if(!plugin.isReady()) { return true; } List<GeneralRegion> regions = new ArrayList<>(); for(GeneralRegion region : plugin.getFileManager().getRegions()) { if(region.isOwner(player)) { regions.add(region); } } Do.forAll( plugin.getConfig().getInt("nameupdate.regionsPerTick"), regions, region -> { if(region instanceof BuyRegion) { if(!player.getName().equals(region.getStringSetting("buy.buyerName"))) { region.setSetting("buy.buyerName", player.getName()); region.update(); } } else if(region instanceof RentRegion) { if(!player.getName().equals(region.getStringSetting("rent.renterName"))) { region.setSetting("rent.renterName", player.getName()); region.update(); } } } ); return false; }); }
3.68
flink_BlobServer_getCurrentActiveConnections
/** * Returns all the current active connections in the BlobServer. * * @return the list of all the active in current BlobServer */ List<BlobServerConnection> getCurrentActiveConnections() { synchronized (activeConnections) { return new ArrayList<>(activeConnections); } }
3.68
flink_Preconditions_checkArgument
/** * Checks the given boolean condition, and throws an {@code IllegalArgumentException} if the * condition is not met (evaluates to {@code false}). * * @param condition The condition to check * @param errorMessageTemplate The message template for the {@code IllegalArgumentException} * that is thrown if the check fails. The template substitutes its {@code %s} placeholders * with the error message arguments. * @param errorMessageArgs The arguments for the error message, to be inserted into the message * template for the {@code %s} placeholders. * @throws IllegalArgumentException Thrown, if the condition is violated. */ public static void checkArgument( boolean condition, @Nullable String errorMessageTemplate, @Nullable Object... errorMessageArgs) { if (!condition) { throw new IllegalArgumentException(format(errorMessageTemplate, errorMessageArgs)); } }
3.68
framework_VTree_selectNode
/** * Selects a node and deselect all other nodes * * @param node * The node to select */ private void selectNode(TreeNode node, boolean deselectPrevious) { if (deselectPrevious) { deselectAll(); } if (node != null) { node.setSelected(true); selectedIds.add(node.key); lastSelection = node; } selectionHasChanged = true; }
3.68
flink_Ordering_createNewOrderingUpToIndex
/** * Creates a new ordering the represents an ordering on a prefix of the fields. If the exclusive * index up to which to create the ordering is <code>0</code>, then there is no resulting * ordering and this method return <code>null</code>. * * @param exclusiveIndex The index (exclusive) up to which to create the ordering. * @return The new ordering on the prefix of the fields, or <code>null</code>, if the prefix is * empty. */ public Ordering createNewOrderingUpToIndex(int exclusiveIndex) { if (exclusiveIndex == 0) { return null; } final Ordering newOrdering = new Ordering(); for (int i = 0; i < exclusiveIndex; i++) { newOrdering.appendOrdering(this.indexes.get(i), this.types.get(i), this.orders.get(i)); } return newOrdering; }
3.68
querydsl_JTSPolygonExpression_interiorRingN
/** * Returns the N th interior ring for this Polygon as a LineString. * * @param idx one based index * @return interior ring at index */ public JTSLineStringExpression<LineString> interiorRingN(int idx) { return JTSGeometryExpressions.lineStringOperation(SpatialOps.INTERIOR_RINGN, mixin, ConstantImpl.create(idx)); }
3.68
morf_H2Dialect_getFromDummyTable
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getFromDummyTable() */ @Override protected String getFromDummyTable() { return " FROM dual"; }
3.68
rocketmq-connect_WorkerTask_shouldPause
/** * should pause * * @return */ public boolean shouldPause() { return this.targetState == TargetState.PAUSED; }
3.68
flink_ElementTriggers_count
/** Creates a trigger that fires when the pane contains at lease {@code countElems} elements. */ public static <W extends Window> CountElement<W> count(long countElems) { return new CountElement<>(countElems); }
3.68
shardingsphere-elasticjob_ElasticJobListener_order
/** * Listener order, default is the lowest. * @return order */ default int order() { return LOWEST; }
3.68
flink_HiveParserSemanticAnalyzer_genExprNodeDesc
/** * Returns expression node descriptor for the expression. If it's evaluated already in previous * operator, it can be retrieved from cache. */ public ExprNodeDesc genExprNodeDesc( HiveParserASTNode expr, HiveParserRowResolver input, HiveParserTypeCheckCtx tcCtx) throws SemanticException { // We recursively create the exprNodeDesc. Base cases: when we encounter // a column ref, we convert that into an exprNodeColumnDesc; when we // encounter // a constant, we convert that into an exprNodeConstantDesc. For others we // just // build the exprNodeFuncDesc with recursively built children. // If the current subExpression is pre-calculated, as in Group-By etc. ExprNodeDesc cached = null; if (tcCtx.isUseCaching()) { cached = getExprNodeDescCached(expr, input); } if (cached == null) { Map<HiveParserASTNode, ExprNodeDesc> allExprs = genAllExprNodeDesc(expr, input, tcCtx); return allExprs.get(expr); } return cached; }
3.68
morf_MathsOperator_toString
/** * @see java.lang.Enum#toString() */ @Override public String toString() { return operator; }
3.68
hadoop_DatanodeVolumeInfo_getReservedSpaceForReplicas
/** * get reserved space for replicas. */ public long getReservedSpaceForReplicas() { return reservedSpaceForReplicas; }
3.68
flink_RecordWriter_setMaxOverdraftBuffersPerGate
/** Sets the max overdraft buffer size of per gate. */ public void setMaxOverdraftBuffersPerGate(int maxOverdraftBuffersPerGate) { targetPartition.setMaxOverdraftBuffersPerGate(maxOverdraftBuffersPerGate); }
3.68
hbase_SimpleLoadBalancer_balanceOverall
/** * If we need to balanceoverall, we need to add one more round to peel off one region from each * max. Together with other regions left to be assigned, we distribute all regionToMove, to the RS * that have less regions in whole cluster scope. */ private void balanceOverall(List<RegionPlan> regionsToReturn, Map<ServerName, BalanceInfo> serverBalanceInfo, boolean fetchFromTail, MinMaxPriorityQueue<RegionPlan> regionsToMove, int max, int min) { // Step 1. // A map to record the plan we have already got as status quo, in order to resolve a cyclic // assignment pair, // e.g. plan 1: A -> B, plan 2: B ->C => resolve plan1 to A -> C, remove plan2 Map<ServerName, List<Integer>> returnMap = new HashMap<>(); for (int i = 0; i < regionsToReturn.size(); i++) { List<Integer> pos = returnMap.get(regionsToReturn.get(i).getDestination()); if (pos == null) { pos = new ArrayList<>(); returnMap.put(regionsToReturn.get(i).getDestination(), pos); } pos.add(i); } // Step 2. // Peel off one region from each RS which has max number of regions now. // Each RS should have either max or min numbers of regions for this table. for (int i = 0; i < serverLoadList.size(); i++) { ServerAndLoad serverload = serverLoadList.get(i); BalanceInfo balanceInfo = serverBalanceInfo.get(serverload.getServerName()); if (balanceInfo == null) { continue; } setLoad(serverLoadList, i, balanceInfo.getNumRegionsAdded()); if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() == max) { RegionInfo hriToPlan; if (balanceInfo.getHriList().isEmpty()) { LOG.debug("During balanceOverall, we found " + serverload.getServerName() + " has no RegionInfo, no operation needed"); continue; } else if (balanceInfo.getNextRegionForUnload() >= balanceInfo.getHriList().size()) { continue; } else { hriToPlan = balanceInfo.getHriList().get(balanceInfo.getNextRegionForUnload()); } RegionPlan maxPlan = new RegionPlan(hriToPlan, serverload.getServerName(), null); regionsToMove.add(maxPlan); setLoad(serverLoadList, i, -1); } else if ( balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() > max || balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() < min ) { LOG.warn( "Encounter incorrect region numbers after calculating move plan during balanceOverall, " + "for this table, " + serverload.getServerName() + " originally has " + balanceInfo.getHriList().size() + " regions and " + balanceInfo.getNumRegionsAdded() + " regions have been added. Yet, max =" + max + ", min =" + min + ". Thus stop balance for this table"); // should not happen return; } } // Step 3. sort the ServerLoadList, the ArrayList hold overall load for each server. // We only need to assign the regionsToMove to // the first n = regionsToMove.size() RS that has least load. Collections.sort(serverLoadList, new Comparator<ServerAndLoad>() { @Override public int compare(ServerAndLoad s1, ServerAndLoad s2) { if (s1.getLoad() == s2.getLoad()) { return 0; } else { return (s1.getLoad() > s2.getLoad()) ? 1 : -1; } } }); // Step 4. // Preparation before assign out all regionsToMove. // We need to remove the plan that has the source RS equals to destination RS, // since the source RS belongs to the least n loaded RS. int assignLength = regionsToMove.size(); // A structure help to map ServerName to it's load and index in ServerLoadList Map<ServerName, Pair<ServerAndLoad, Integer>> SnLoadMap = new HashMap<>(); for (int i = 0; i < serverLoadList.size(); i++) { SnLoadMap.put(serverLoadList.get(i).getServerName(), new Pair<>(serverLoadList.get(i), i)); } Pair<ServerAndLoad, Integer> shredLoad; // A List to help mark the plan in regionsToMove that should be removed List<RegionPlan> planToRemoveList = new ArrayList<>(); // A structure to record how many times a server becomes the source of a plan, from // regionsToMove. Map<ServerName, Integer> sourceMap = new HashMap<>(); // We remove one of the plan which would cause source RS equals destination RS. // But we should keep in mind that the second plan from such RS should be kept. for (RegionPlan plan : regionsToMove) { // the source RS's load and index in ServerLoadList shredLoad = SnLoadMap.get(plan.getSource()); if (!sourceMap.containsKey(plan.getSource())) { sourceMap.put(plan.getSource(), 0); } sourceMap.put(plan.getSource(), sourceMap.get(plan.getSource()) + 1); if (shredLoad.getSecond() < assignLength && sourceMap.get(plan.getSource()) == 1) { planToRemoveList.add(plan); // While marked as to be removed, the count should be add back to the source RS setLoad(serverLoadList, shredLoad.getSecond(), 1); } } // Remove those marked plans from regionsToMove, // we cannot direct remove them during iterating through // regionsToMove, due to the fact that regionsToMove is a MinMaxPriorityQueue. for (RegionPlan planToRemove : planToRemoveList) { regionsToMove.remove(planToRemove); } // Step 5. // We only need to assign the regionsToMove to // the first n = regionsToMove.size() of them, with least load. // With this strategy adopted, we can gradually achieve the overall balance, // while keeping table level balanced. for (int i = 0; i < assignLength; i++) { // skip the RS that is also the source, we have removed them from regionsToMove in previous // step if (sourceMap.containsKey(serverLoadList.get(i).getServerName())) { continue; } addRegionPlan(regionsToMove, fetchFromTail, serverLoadList.get(i).getServerName(), regionsToReturn); setLoad(serverLoadList, i, 1); // resolve a possible cyclic assignment pair if we just produced one: // e.g. plan1: A -> B, plan2: B -> C => resolve plan1 to A -> C and remove plan2 List<Integer> pos = returnMap.get(regionsToReturn.get(regionsToReturn.size() - 1).getSource()); if (pos != null && pos.size() != 0) { regionsToReturn.get(pos.get(pos.size() - 1)) .setDestination(regionsToReturn.get(regionsToReturn.size() - 1).getDestination()); pos.remove(pos.size() - 1); regionsToReturn.remove(regionsToReturn.size() - 1); } } // Done balance overall }
3.68
morf_AbstractSelectStatement_alias
/** * Sets the alias for this select statement. This is useful if you are * including multiple select statements in a single select (not to be confused * with a join) and wish to reference the select statement itself. * * @param alias the alias to set. * @return the new select statement with the change applied. */ public T alias(String alias) { return copyOnWriteOrMutate( b -> b.alias(alias), () -> this.alias = alias ); }
3.68
rocketmq-connect_AvroData_fromConnectData
/** * Convert this object, in Connect data format, into an Avro object. */ public Object fromConnectData(Schema schema, Object value) { org.apache.avro.Schema avroSchema = fromConnectSchema(schema); return fromConnectData(schema, avroSchema, value); }
3.68
flink_TaskExecutorManager_removePendingTaskManagerSlots
/** * remove unused pending task manager slots. * * @param unusedResourceCounter the count of unused resources. */ public void removePendingTaskManagerSlots(ResourceCounter unusedResourceCounter) { if (!resourceAllocator.isSupported()) { return; } Preconditions.checkState(unusedResourceCounter.getResources().size() == 1); Preconditions.checkState( unusedResourceCounter.getResources().contains(defaultSlotResourceProfile)); int wantedPendingSlotsNumber = pendingSlots.size() - unusedResourceCounter.getResourceCount(defaultSlotResourceProfile); pendingSlots.entrySet().removeIf(ignore -> pendingSlots.size() > wantedPendingSlotsNumber); declareNeededResourcesWithDelay(); }
3.68
dubbo_ServiceConfig_init
/** * for early init serviceMetadata */ public void init() { if (this.initialized.compareAndSet(false, true)) { // load ServiceListeners from extension ExtensionLoader<ServiceListener> extensionLoader = this.getExtensionLoader(ServiceListener.class); this.serviceListeners.addAll(extensionLoader.getSupportedExtensionInstances()); } initServiceMetadata(provider); serviceMetadata.setServiceType(getInterfaceClass()); serviceMetadata.setTarget(getRef()); serviceMetadata.generateServiceKey(); }
3.68
flink_CompilerHints_getUniqueFields
/** * Gets the FieldSets that are unique * * @return List of FieldSet that are unique */ public Set<FieldSet> getUniqueFields() { return this.uniqueFields; }
3.68
flink_MemorySegment_compare
/** * Compares two memory segment regions with different length. * * @param seg2 Segment to compare this segment with * @param offset1 Offset of this segment to start comparing * @param offset2 Offset of seg2 to start comparing * @param len1 Length of this memory region to compare * @param len2 Length of seg2 to compare * @return 0 if equal, -1 if seg1 &lt; seg2, 1 otherwise */ public int compare(MemorySegment seg2, int offset1, int offset2, int len1, int len2) { final int minLength = Math.min(len1, len2); int c = compare(seg2, offset1, offset2, minLength); return c == 0 ? (len1 - len2) : c; }
3.68
flink_JobResult_getSerializedThrowable
/** * Returns an empty {@code Optional} if the job finished successfully, otherwise the {@code * Optional} will carry the failure cause. */ public Optional<SerializedThrowable> getSerializedThrowable() { return Optional.ofNullable(serializedThrowable); }
3.68
morf_AddColumn_getNewColumnDefinition
/** * @return {@link Column} - the definition of the new column. */ public Column getNewColumnDefinition() { return newColumnDefinition; }
3.68
hbase_AccessChecker_performOnSuperuser
/** * Check if caller is granting or revoking superusers's or supergroups's permissions. * @param request request name * @param caller caller * @param userToBeChecked target user or group * @throws IOException AccessDeniedException if target user is superuser */ public void performOnSuperuser(String request, User caller, String userToBeChecked) throws IOException { List<String> userGroups = new ArrayList<>(); userGroups.add(userToBeChecked); if (!AuthUtil.isGroupPrincipal(userToBeChecked)) { for (String group : getUserGroups(userToBeChecked)) { userGroups.add(AuthUtil.toGroupEntry(group)); } } for (String name : userGroups) { if (Superusers.isSuperUser(name)) { AuthResult result = AuthResult.deny(request, "Granting or revoking superusers's or supergroups's permissions is not allowed", caller, Action.ADMIN, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); logResult(result); throw new AccessDeniedException(result.getReason()); } } }
3.68
framework_VaadinRequest_getCurrent
/** * Gets the currently processed Vaadin request. The current request is * automatically defined when the request is started. The current request * can not be used in e.g. background threads because of the way server * implementations reuse request instances. * * @return the current Vaadin request instance if available, otherwise * <code>null</code> * @since 8.1 */ public static VaadinRequest getCurrent() { return CurrentInstance.get(VaadinRequest.class); }
3.68
flink_TwoInputUdfOperator_withForwardedFieldsFirst
/** * Adds semantic information about forwarded fields of the first input of the user-defined * function. The forwarded fields information declares fields which are never modified by the * function and which are forwarded at the same position to the output or unchanged copied to * another position in the output. * * <p>Fields that are forwarded at the same position are specified by their position. The * specified position must be valid for the input and output data type and have the same type. * For example <code>withForwardedFieldsFirst("f2")</code> declares that the third field of a * Java input tuple from the first input is copied to the third field of an output tuple. * * <p>Fields which are unchanged copied from the first input to another position in the output * are declared by specifying the source field reference in the first input and the target field * reference in the output. {@code withForwardedFieldsFirst("f0->f2")} denotes that the first * field of the first input Java tuple is unchanged copied to the third field of the Java output * tuple. When using a wildcard ("*") ensure that the number of declared fields and their types * in first input and output type match. * * <p>Multiple forwarded fields can be annotated in one ({@code withForwardedFieldsFirst("f2; * f3->f0; f4")}) or separate Strings ({@code withForwardedFieldsFirst("f2", "f3->f0", "f4")}). * Please refer to the JavaDoc of {@link org.apache.flink.api.common.functions.Function} or * Flink's documentation for details on field references such as nested fields and wildcard. * * <p>It is not possible to override existing semantic information about forwarded fields of the * first input which was for example added by a {@link * org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst} class * annotation. * * <p><b>NOTE: Adding semantic information for functions is optional! If used correctly, * semantic information can help the Flink optimizer to generate more efficient execution plans. * However, incorrect semantic information can cause the optimizer to generate incorrect * execution plans which compute wrong results! So be careful when adding semantic information. * </b> * * @param forwardedFieldsFirst A list of forwarded field expressions for the first input of the * function. * @return This operator with annotated forwarded field information. * @see org.apache.flink.api.java.functions.FunctionAnnotation * @see org.apache.flink.api.java.functions.FunctionAnnotation.ForwardedFieldsFirst */ @SuppressWarnings("unchecked") public O withForwardedFieldsFirst(String... forwardedFieldsFirst) { if (this.udfSemantics == null || this.analyzedUdfSemantics) { // extract semantic properties from function annotations setSemanticProperties(extractSemanticAnnotationsFromUdf(getFunction().getClass())); } if (this.udfSemantics == null || this.analyzedUdfSemantics) { setSemanticProperties(new DualInputSemanticProperties()); SemanticPropUtil.getSemanticPropsDualFromString( this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } else { if (this.udfWithForwardedFieldsFirstAnnotation(getFunction().getClass())) { // refuse semantic information as it would override the function annotation throw new SemanticProperties.InvalidSemanticAnnotationException( "Forwarded field information " + "has already been added by a function annotation for the first input of this operator. " + "Cannot overwrite function annotations."); } else { SemanticPropUtil.getSemanticPropsDualFromString( this.udfSemantics, forwardedFieldsFirst, null, null, null, null, null, getInput1Type(), getInput2Type(), getResultType()); } } O returnType = (O) this; return returnType; }
3.68
flink_SchemaTestUtils_open
/** * Opens the given schema with a mock initialization context. * * @param schema to open * @throws RuntimeException if the schema throws an exception */ public static void open(DeserializationSchema<?> schema) { try { schema.open(new DummyInitializationContext()); } catch (Exception e) { throw new RuntimeException(e); } }
3.68
aws-saas-boost_KeycloakUserDataAccessLayer_toSystemUser
// VisibleForTesting static SystemUser toSystemUser(UserRepresentation keycloakUser) { SystemUser user = null; if (keycloakUser != null) { user = new SystemUser(); user.setId(keycloakUser.getId()); user.setCreated(LocalDateTime.ofInstant( Instant.ofEpochMilli(keycloakUser.getCreatedTimestamp()), ZoneId.of("UTC"))); // Keycloak doesn't track when a user was last modified user.setModified(null); user.setActive(keycloakUser.isEnabled()); user.setUsername(keycloakUser.getUsername()); user.setFirstName(keycloakUser.getFirstName()); user.setLastName(keycloakUser.getLastName()); user.setEmail(keycloakUser.getEmail()); user.setEmailVerified(keycloakUser.isEmailVerified()); if (!keycloakUser.getRequiredActions().isEmpty()) { user.setStatus(keycloakUser.getRequiredActions().get(0)); } } return user; }
3.68
dubbo_StreamUtils_convertSingleAttachment
/** * Convert each user's attach value to metadata * * @param headers outbound headers * @param key metadata key * @param v metadata value (Metadata Only string and byte arrays are allowed) */ private static void convertSingleAttachment(DefaultHttp2Headers headers, String key, Object v) { try { if (v instanceof String || v instanceof Number || v instanceof Boolean) { String str = v.toString(); headers.set(key, str); } else if (v instanceof byte[]) { String str = encodeBase64ASCII((byte[]) v); headers.set(key + TripleConstant.HEADER_BIN_SUFFIX, str); } else { LOGGER.warn( PROTOCOL_UNSUPPORTED, "", "", "Unsupported attachment k: " + key + " class: " + v.getClass().getName()); } } catch (Throwable t) { LOGGER.warn( PROTOCOL_UNSUPPORTED, "", "", "Meet exception when convert single attachment key:" + key + " value=" + v, t); } }
3.68
hbase_AsyncRegionLocationCache_size
/** * Returns the size of the region locations cache */ public int size() { return cache.size(); }
3.68
hbase_ByteBufferUtils_searchDelimiterIndex
/** * Find index of passed delimiter. * @return Index of delimiter having started from start of <code>b</code> moving rightward. */ public static int searchDelimiterIndex(ByteBuffer b, int offset, final int length, final int delimiter) { for (int i = offset, n = offset + length; i < n; i++) { if (b.get(i) == delimiter) { return i; } } return -1; }
3.68
morf_HumanReadableStatementHelper_generateRemoveTableString
/** * Generates a human-readable "Remove Table" string. * * @param table the name of the table which will be removed * @return a string containing the human-readable version of the action */ public static String generateRemoveTableString(final Table table) { return String.format("Remove table %s", table.getName()); }
3.68
hbase_SnapshotInfo_printFiles
/** * Collect the hfiles and logs statistics of the snapshot and dump the file list if requested and * the collected information. */ private void printFiles(final boolean showFiles, final boolean showStats) throws IOException { if (showFiles) { System.out.println("Snapshot Files"); System.out.println("----------------------------------------"); } // Collect information about hfiles and logs in the snapshot final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription(); final String table = snapshotDesc.getTable(); final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc); final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc); SnapshotReferenceUtil.concurrentVisitReferencedFiles(getConf(), fs, snapshotManifest, "SnapshotInfo", new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { if (storeFile.hasReference()) return; SnapshotStats.FileInfo info = stats.addStoreFile(regionInfo, family, storeFile, null); if (showFiles) { String state = info.getStateToString(); System.out.printf("%8s %s/%s/%s/%s %s%n", (info.isMissing() ? "-" : fileSizeToString(info.getSize())), table, regionInfo.getEncodedName(), family, storeFile.getName(), state == null ? "" : "(" + state + ")"); } } }); // Dump the stats System.out.println(); if (stats.isSnapshotCorrupted()) { System.out.println("**************************************************************"); System.out.printf("BAD SNAPSHOT: %d hfile(s) and %d log(s) missing.%n", stats.getMissingStoreFilesCount(), stats.getMissingLogsCount()); System.out.printf(" %d hfile(s) corrupted.%n", stats.getCorruptedStoreFilesCount()); System.out.println("**************************************************************"); } if (showStats) { System.out.printf( "%d HFiles (%d in archive, %d in mob storage), total size %s " + "(%.2f%% %s shared with the source table, %.2f%% %s in mob dir)%n", stats.getStoreFilesCount(), stats.getArchivedStoreFilesCount(), stats.getMobStoreFilesCount(), fileSizeToString(stats.getStoreFilesSize()), stats.getSharedStoreFilePercentage(), fileSizeToString(stats.getSharedStoreFilesSize()), stats.getMobStoreFilePercentage(), fileSizeToString(stats.getMobStoreFilesSize())); System.out.printf("%d Logs, total size %s%n", stats.getLogsCount(), fileSizeToString(stats.getLogsSize())); System.out.println(); } }
3.68
hbase_ReplicationSourceManager_releaseWALEntryBatchBufferQuota
/** * To release the buffer quota of {@link WALEntryBatch} which acquired by * {@link ReplicationSourceManager#acquireWALEntryBufferQuota}. * @return the released buffer quota size. */ long releaseWALEntryBatchBufferQuota(WALEntryBatch walEntryBatch) { long usedBufferSize = walEntryBatch.getUsedBufferSize(); if (usedBufferSize > 0) { this.releaseBufferQuota(usedBufferSize); } return usedBufferSize; }
3.68
flink_DataSourceNode_getOperator
/** * Gets the contract object for this data source node. * * @return The contract. */ @Override public GenericDataSourceBase<?, ?> getOperator() { return (GenericDataSourceBase<?, ?>) super.getOperator(); }
3.68
hudi_HoodieLogFileReader_prev
/** * This is a reverse iterator Note: At any point, an instance of HoodieLogFileReader should either iterate reverse * (prev) or forward (next). Doing both in the same instance is not supported WARNING : Every call to prev() should be * preceded with hasPrev() */ @Override public HoodieLogBlock prev() throws IOException { if (!this.reverseReader) { throw new HoodieNotSupportedException("Reverse log reader has not been enabled"); } long blockSize = inputStream.readLong(); long blockEndPos = inputStream.getPos(); // blocksize should read everything about a block including the length as well try { inputStream.seek(reverseLogFilePosition - blockSize); } catch (Exception e) { // this could be a corrupt block inputStream.seek(blockEndPos); throw new CorruptedLogFileException("Found possible corrupted block, cannot read log file in reverse, " + "fallback to forward reading of logfile"); } boolean hasNext = hasNext(); reverseLogFilePosition -= blockSize; lastReverseLogFilePosition = reverseLogFilePosition; return next(); }
3.68
querydsl_MetaDataExporter_setTableNamePattern
/** * Set the table name pattern filter to be used * * @param tableNamePattern a table name pattern; must match the * table name as it is stored in the database (default: null) */ public void setTableNamePattern(@Nullable String tableNamePattern) { this.tableNamePattern = tableNamePattern; }
3.68
hbase_ResultScanner_next
/** * Get nbRows rows. How many RPCs are made is determined by the {@link Scan#setCaching(int)} * setting (or hbase.client.scanner.caching in hbase-site.xml). * @param nbRows number of rows to return * @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length * (We never return null). */ default Result[] next(int nbRows) throws IOException { List<Result> resultSets = new ArrayList<>(nbRows); for (int i = 0; i < nbRows; i++) { Result next = next(); if (next != null) { resultSets.add(next); } else { break; } } return resultSets.toArray(new Result[0]); }
3.68
hbase_BalanceResponse_isBalancerRan
/** * Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of * reasons, such as: another balance is running, there are regions in transition, the cluster is * in maintenance mode, etc. */ public boolean isBalancerRan() { return balancerRan; }
3.68
zxing_IntentResult_getRawBytes
/** * @return raw bytes of the barcode content, if applicable, or null otherwise */ public byte[] getRawBytes() { return rawBytes; }
3.68
pulsar_ManagedLedgerImpl_getNumberOfEntries
/** * Get the number of entries between a contiguous range of two positions. * * @param range * the position range * @return the count of entries */ long getNumberOfEntries(Range<PositionImpl> range) { PositionImpl fromPosition = range.lowerEndpoint(); boolean fromIncluded = range.lowerBoundType() == BoundType.CLOSED; PositionImpl toPosition = range.upperEndpoint(); boolean toIncluded = range.upperBoundType() == BoundType.CLOSED; if (fromPosition.getLedgerId() == toPosition.getLedgerId()) { // If the 2 positions are in the same ledger long count = toPosition.getEntryId() - fromPosition.getEntryId() - 1; count += fromIncluded ? 1 : 0; count += toIncluded ? 1 : 0; return count; } else { long count = 0; // If the from & to are pointing to different ledgers, then we need to : // 1. Add the entries in the ledger pointed by toPosition count += toPosition.getEntryId(); count += toIncluded ? 1 : 0; // 2. Add the entries in the ledger pointed by fromPosition LedgerInfo li = ledgers.get(fromPosition.getLedgerId()); if (li != null) { count += li.getEntries() - (fromPosition.getEntryId() + 1); count += fromIncluded ? 1 : 0; } // 3. Add the whole ledgers entries in between for (LedgerInfo ls : ledgers.subMap(fromPosition.getLedgerId(), false, toPosition.getLedgerId(), false) .values()) { count += ls.getEntries(); } return count; } }
3.68
flink_SinkTestSuiteBase_testScaleDown
/** * Test connector sink restart from a completed savepoint with a lower parallelism. * * <p>This test will create a sink in the external system, generate a collection of test data * and write a half part of them to this sink by the Flink Job with parallelism 4 at first. Then * stop the job, restart the same job from the completed savepoint with a lower parallelism 2. * After the job has been running, write the other part to the sink and compare the result. * * <p>In order to pass this test, the number of records produced by Flink need to be equals to * the generated test data. And the records in the sink will be compared to the test data by the * different semantic. There's no requirement for record order. */ @TestTemplate @DisplayName("Test sink restarting with a lower parallelism") public void testScaleDown( TestEnvironment testEnv, DataStreamSinkExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception { restartFromSavepoint(testEnv, externalContext, semantic, 4, 2); }
3.68
hmily_HmilyParticipantUndoCacheManager_cacheHmilyParticipantUndo
/** * Cache hmily participant undo. * * @param participantId the participant id * @param hmilyParticipantUndo the hmily participant undo */ public void cacheHmilyParticipantUndo(final Long participantId, final HmilyParticipantUndo hmilyParticipantUndo) { List<HmilyParticipantUndo> existList = get(participantId); if (CollectionUtils.isEmpty(existList)) { loadingCache.put(participantId, Lists.newArrayList(hmilyParticipantUndo)); } else { existList.add(hmilyParticipantUndo); loadingCache.put(participantId, existList); } }
3.68
framework_DropIndexCalculator_alwaysDropToEnd
/** * Returns a calculator for always dropping items to the end of the target * grid, regardless of drop position. * * @return the created drop index calculator */ static <T> DropIndexCalculator<T> alwaysDropToEnd() { return (GridDropEvent<T> event) -> Integer.MAX_VALUE; }
3.68
hadoop_TaskId_write
/** {@inheritDoc} */ public final void write(final DataOutput out) throws IOException { jobId.write(out); WritableUtils.writeVLong(out, taskId); }
3.68
hadoop_FlowActivityTableRW_createTable
/* * (non-Javadoc) * * @see * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW# * createTable(org.apache.hadoop.hbase.client.Admin, * org.apache.hadoop.conf.Configuration) */ public void createTable(Admin admin, Configuration hbaseConf) throws IOException { TableName table = getTableName(hbaseConf); if (admin.tableExists(table)) { // do not disable / delete existing table // similar to the approach taken by map-reduce jobs when // output directory exists throw new IOException("Table " + table.getNameAsString() + " already exists."); } HTableDescriptor flowActivityTableDescp = new HTableDescriptor(table); HColumnDescriptor infoCF = new HColumnDescriptor(FlowActivityColumnFamily.INFO.getBytes()); infoCF.setBloomFilterType(BloomType.ROWCOL); flowActivityTableDescp.addFamily(infoCF); infoCF.setMinVersions(1); infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS); // TODO: figure the split policy before running in production admin.createTable(flowActivityTableDescp); LOG.info("Status of table creation for " + table.getNameAsString() + "=" + admin.tableExists(table)); }
3.68
flink_Configuration_getEnum
/** * Returns the value associated with the given config option as an enum. * * @param enumClass The return enum class * @param configOption The configuration option * @throws IllegalArgumentException If the string associated with the given config option cannot * be parsed as a value of the provided enum class. */ @PublicEvolving public <T extends Enum<T>> T getEnum( final Class<T> enumClass, final ConfigOption<String> configOption) { checkNotNull(enumClass, "enumClass must not be null"); checkNotNull(configOption, "configOption must not be null"); Object rawValue = getRawValueFromOption(configOption).orElseGet(configOption::defaultValue); try { return ConfigurationUtils.convertToEnum(rawValue, enumClass); } catch (IllegalArgumentException ex) { final String errorMessage = String.format( "Value for config option %s must be one of %s (was %s)", configOption.key(), Arrays.toString(enumClass.getEnumConstants()), rawValue); throw new IllegalArgumentException(errorMessage); } }
3.68
querydsl_GroupByBuilder_iterate
/** * Get the results as a closeable iterator * * @param expression projection * @return new result transformer */ public <V> ResultTransformer<CloseableIterator<V>> iterate(FactoryExpression<V> expression) { final FactoryExpression<V> transformation = FactoryExpressionUtils.wrap(expression); List<Expression<?>> args = transformation.getArgs(); return new GroupByIterate<K, V>(key, args.toArray(new Expression<?>[0])) { @Override protected V transform(Group group) { // XXX Isn't group.toArray() suitable here? List<Object> args = new ArrayList<Object>(groupExpressions.size() - 1); for (int i = 1; i < groupExpressions.size(); i++) { args.add(group.getGroup(groupExpressions.get(i))); } return transformation.newInstance(args.toArray()); } }; }
3.68
flink_WindowedStream_allowedLateness
/** * Sets the time by which elements are allowed to be late. Elements that arrive behind the * watermark by more than the specified time will be dropped. By default, the allowed lateness * is {@code 0L}. * * <p>Setting an allowed lateness is only valid for event-time windows. */ @PublicEvolving public WindowedStream<T, K, W> allowedLateness(Time lateness) { builder.allowedLateness(lateness); return this; }
3.68
hadoop_S3AInputPolicy_getPolicy
/** * Choose an access policy. * @param name strategy name from a configuration option, etc. * @param defaultPolicy default policy to fall back to. * @return the chosen strategy */ public static S3AInputPolicy getPolicy( String name, @Nullable S3AInputPolicy defaultPolicy) { String trimmed = name.trim().toLowerCase(Locale.ENGLISH); switch (trimmed) { case FS_OPTION_OPENFILE_READ_POLICY_ADAPTIVE: case FS_OPTION_OPENFILE_READ_POLICY_DEFAULT: case Constants.INPUT_FADV_NORMAL: return Normal; // all these options currently map to random IO. case FS_OPTION_OPENFILE_READ_POLICY_RANDOM: case FS_OPTION_OPENFILE_READ_POLICY_VECTOR: return Random; case FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL: case FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE: return Sequential; default: return defaultPolicy; } }
3.68
flink_TypeSerializerSnapshot_readVersionedSnapshot
/** * Reads a snapshot from the stream, performing resolving * * <p>This method reads snapshots written by {@link #writeVersionedSnapshot(DataOutputView, * TypeSerializerSnapshot)}. */ static <T> TypeSerializerSnapshot<T> readVersionedSnapshot(DataInputView in, ClassLoader cl) throws IOException { final TypeSerializerSnapshot<T> snapshot = TypeSerializerSnapshotSerializationUtil.readAndInstantiateSnapshotClass(in, cl); int version = in.readInt(); snapshot.readSnapshot(version, in, cl); return snapshot; }
3.68
hadoop_OBSCommonUtils_innerIsFolderEmpty
// Used to check if a folder is empty or not. static boolean innerIsFolderEmpty(final OBSFileSystem owner, final String key) throws FileNotFoundException, ObsException { String obsKey = maybeAddTrailingSlash(key); ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(owner.getBucket()); request.setPrefix(obsKey); request.setDelimiter("/"); request.setMaxKeys(MAX_KEYS_FOR_CHECK_FOLDER_EMPTY); owner.getSchemeStatistics().incrementReadOps(1); ObjectListing objects = owner.getObsClient().listObjects(request); if (!objects.getCommonPrefixes().isEmpty() || !objects.getObjects() .isEmpty()) { if (isFolderEmpty(obsKey, objects)) { LOG.debug("Found empty directory {}", obsKey); return true; } if (LOG.isDebugEnabled()) { LOG.debug("Found path as directory (with /): {}/{}", objects.getCommonPrefixes().size(), objects.getObjects().size()); for (ObsObject summary : objects.getObjects()) { LOG.debug("Summary: {} {}", summary.getObjectKey(), summary.getMetadata().getContentLength()); } for (String prefix : objects.getCommonPrefixes()) { LOG.debug("Prefix: {}", prefix); } } LOG.debug("Found non-empty directory {}", obsKey); return false; } else if (obsKey.isEmpty()) { LOG.debug("Found root directory"); return true; } else if (owner.isFsBucket()) { LOG.debug("Found empty directory {}", obsKey); return true; } LOG.debug("Not Found: {}", obsKey); throw new FileNotFoundException("No such file or directory: " + obsKey); }
3.68
hbase_QuotaCache_updateQuotaFactors
/** * Update quota factors which is used to divide cluster scope quota into machine scope quota For * user/namespace/user over namespace quota, use [1 / RSNum] as machine factor. For table/user * over table quota, use [1 / TotalTableRegionNum * MachineTableRegionNum] as machine factor. */ private void updateQuotaFactors() { // Update machine quota factor ClusterMetrics clusterMetrics; try { clusterMetrics = rsServices.getConnection().getAdmin() .getClusterMetrics(EnumSet.of(Option.SERVERS_NAME, Option.TABLE_TO_REGIONS_COUNT)); } catch (IOException e) { LOG.warn("Failed to get cluster metrics needed for updating quotas", e); return; } int rsSize = clusterMetrics.getServersName().size(); if (rsSize != 0) { // TODO if use rs group, the cluster limit should be shared by the rs group machineQuotaFactor = 1.0 / rsSize; } Map<TableName, RegionStatesCount> tableRegionStatesCount = clusterMetrics.getTableRegionStatesCount(); // Update table machine quota factors for (TableName tableName : tableQuotaCache.keySet()) { if (tableRegionStatesCount.containsKey(tableName)) { double factor = 1; try { long regionSize = tableRegionStatesCount.get(tableName).getOpenRegions(); if (regionSize == 0) { factor = 0; } else { int localRegionSize = rsServices.getRegions(tableName).size(); factor = 1.0 * localRegionSize / regionSize; } } catch (IOException e) { LOG.warn("Get table regions failed: {}", tableName, e); } tableMachineQuotaFactors.put(tableName, factor); } else { // TableName might have already been dropped (outdated) tableMachineQuotaFactors.remove(tableName); } } }
3.68
flink_HiveStatsUtil_getPartialPartitionVals
/** * Get the partial partition values whose {@param partitionColIndex} partition column value will * be {@param defaultPartitionName} and the value for preceding partition column will empty * string. * * <p>For example, if partitionColIndex = 3, defaultPartitionName = __default_partition__, the * partial partition values will be ["", "", "", __default_partition__]. * * <p>It's be useful when we want to list all the these Hive's partitions, of which the value * for one specific partition column is null. */ private static List<String> getPartialPartitionVals( int partitionColIndex, String defaultPartitionName) { List<String> partitionValues = new ArrayList<>(); for (int i = 0; i < partitionColIndex; i++) { partitionValues.add(StringUtils.EMPTY); } partitionValues.add(defaultPartitionName); return partitionValues; }
3.68
hudi_HoodieTableMetadataUtil_convertMetadataToRecords
/** * Convert rollback action metadata to metadata table records. * <p> * We only need to handle FILES partition here as HUDI rollbacks on MOR table may end up adding a new log file. All other partitions * are handled by actual rollback of the deltacommit which added records to those partitions. */ public static Map<MetadataPartitionType, HoodieData<HoodieRecord>> convertMetadataToRecords( HoodieEngineContext engineContext, HoodieTableMetaClient dataTableMetaClient, HoodieRollbackMetadata rollbackMetadata, String instantTime) { List<HoodieRecord> filesPartitionRecords = convertMetadataToRollbackRecords(rollbackMetadata, instantTime, dataTableMetaClient); final HoodieData<HoodieRecord> rollbackRecordsRDD = filesPartitionRecords.isEmpty() ? engineContext.emptyHoodieData() : engineContext.parallelize(filesPartitionRecords, filesPartitionRecords.size()); return Collections.singletonMap(MetadataPartitionType.FILES, rollbackRecordsRDD); }
3.68
framework_AbstractComponent_isCaptionAsHtml
/** * Checks whether captions are rendered as HTML * <p> * The default is false, i.e. to render that caption as plain text. * * @return true if the captions are rendered as HTML, false if rendered as * plain text */ public boolean isCaptionAsHtml() { return getState(false).captionAsHtml; }
3.68
framework_VGridLayout_hiddenEmptyRow
/** * Checks if it is ok to hide (or ignore) the given row. * * @param rowIndex * the row to check * @return true, if the row should be interpreted as non-existant (hides * extra spacing) */ private boolean hiddenEmptyRow(int rowIndex) { return hideEmptyRowsAndColumns && !rowHasComponentsOrRowSpan(rowIndex) && !explicitRowRatios.contains(rowIndex); }
3.68
flink_ZooKeeperStateHandleStore_setStateHandle
// this method is provided for the sole purpose of easier testing @VisibleForTesting protected void setStateHandle(String path, byte[] serializedStateHandle, int expectedVersion) throws Exception { // Replace state handle in ZooKeeper. We use idempotent set here to avoid a scenario, where // we retry an update, because we didn't receive a proper acknowledgement due to temporary // connection loss. Without idempotent flag this would result in a BadVersionException, // because the version on server no longer matches our expected version. With this flag, // when curator receives BadVersionException internally, it checks whether the content on // the server matches our intended update and its version is our expectedVersion + 1. client.setData() .idempotent() .withVersion(expectedVersion) .forPath(path, serializedStateHandle); }
3.68
open-banking-gateway_Xs2aConsentInfo_isWrongPassword
/** * Was the PSU password that was sent to ASPSP wrong. */ public boolean isWrongPassword(Xs2aContext ctx) { return null != ctx.getWrongAuthCredentials() && ctx.getWrongAuthCredentials(); }
3.68
querydsl_SQLExpressions_all
/** * Get an aggregate all expression for the given boolean expression */ public static BooleanExpression all(BooleanExpression expr) { return Expressions.booleanOperation(Ops.AggOps.BOOLEAN_ALL, expr); }
3.68
framework_HorizontalLayoutConnector_getWidget
/* * (non-Javadoc) * * @see com.vaadin.client.ui.orderedlayout.AbstractOrderedLayoutConnector# * getWidget () */ @Override public VHorizontalLayout getWidget() { return (VHorizontalLayout) super.getWidget(); }
3.68
pulsar_ManagedLedgerConfig_setInactiveLedgerRollOverTime
/** * Set rollOver time for inactive ledgers. * * @param inactiveLedgerRollOverTimeMs * @param unit */ public void setInactiveLedgerRollOverTime(int inactiveLedgerRollOverTimeMs, TimeUnit unit) { this.inactiveLedgerRollOverTimeMs = (int) unit.toMillis(inactiveLedgerRollOverTimeMs); }
3.68
framework_Navigator_updateNavigationState
/** * Update the internal state of the navigator (parameters, previous * successful URL fragment navigated to) when navigation succeeds. * * Normally this method should not be overridden nor called directly from * application code, but it can be called by a custom implementation of * {@link #navigateTo(View, String, String)}. * * @since 7.6 * @param event * a view change event with details of the change */ protected void updateNavigationState(ViewChangeEvent event) { String viewName = event.getViewName(); String parameters = event.getParameters(); if (null != viewName && getStateManager() != null) { String navigationState = viewName; if (!parameters.isEmpty()) { navigationState += "/" + parameters; } if (!navigationState.equals(getStateManager().getState())) { getStateManager().setState(navigationState); } currentNavigationState = navigationState; } }
3.68
flink_StreamExecutionEnvironment_getCheckpointConfig
/** * Gets the checkpoint config, which defines values like checkpoint interval, delay between * checkpoints, etc. * * @return The checkpoint config. */ public CheckpointConfig getCheckpointConfig() { return checkpointCfg; }
3.68
zxing_Detector_getCorrectedParameterData
/** * Corrects the parameter bits using Reed-Solomon algorithm. * * @param parameterData parameter bits * @param compact true if this is a compact Aztec code * @return the corrected parameter * @throws NotFoundException if the array contains too many errors */ private static CorrectedParameter getCorrectedParameterData(long parameterData, boolean compact) throws NotFoundException { int numCodewords; int numDataCodewords; if (compact) { numCodewords = 7; numDataCodewords = 2; } else { numCodewords = 10; numDataCodewords = 4; } int numECCodewords = numCodewords - numDataCodewords; int[] parameterWords = new int[numCodewords]; for (int i = numCodewords - 1; i >= 0; --i) { parameterWords[i] = (int) parameterData & 0xF; parameterData >>= 4; } int errorsCorrected = 0; try { ReedSolomonDecoder rsDecoder = new ReedSolomonDecoder(GenericGF.AZTEC_PARAM); errorsCorrected = rsDecoder.decodeWithECCount(parameterWords, numECCodewords); } catch (ReedSolomonException ignored) { throw NotFoundException.getNotFoundInstance(); } // Toss the error correction. Just return the data as an integer int result = 0; for (int i = 0; i < numDataCodewords; i++) { result = (result << 4) + parameterWords[i]; } return new CorrectedParameter(result, errorsCorrected); }
3.68
hudi_StreamWriteFunction_bufferRecord
/** * Buffers the given record. * * <p>Flush the data bucket first if the bucket records size is greater than * the configured value {@link FlinkOptions#WRITE_BATCH_SIZE}. * * <p>Flush the max size data bucket if the total buffer size exceeds the configured * threshold {@link FlinkOptions#WRITE_TASK_MAX_SIZE}. * * @param value HoodieRecord */ protected void bufferRecord(HoodieRecord<?> value) { writeMetrics.markRecordIn(); final String bucketID = getBucketID(value); DataBucket bucket = this.buckets.computeIfAbsent(bucketID, k -> new DataBucket(this.config.getDouble(FlinkOptions.WRITE_BATCH_SIZE), value)); final DataItem item = DataItem.fromHoodieRecord(value); bucket.records.add(item); boolean flushBucket = bucket.detector.detect(item); boolean flushBuffer = this.tracer.trace(bucket.detector.lastRecordSize); // update buffer metrics after tracing buffer size writeMetrics.setWriteBufferedSize(this.tracer.bufferSize); if (flushBucket) { if (flushBucket(bucket)) { this.tracer.countDown(bucket.detector.totalSize); bucket.reset(); } } else if (flushBuffer) { // find the max size bucket and flush it out DataBucket bucketToFlush = this.buckets.values().stream() .max(Comparator.comparingLong(b -> b.detector.totalSize)) .orElseThrow(NoSuchElementException::new); if (flushBucket(bucketToFlush)) { this.tracer.countDown(bucketToFlush.detector.totalSize); bucketToFlush.reset(); } else { LOG.warn("The buffer size hits the threshold {}, but still flush the max size data bucket failed!", this.tracer.maxBufferSize); } } }
3.68
dubbo_ServiceInstanceMetadataUtils_getMetadataStorageType
/** * Get the metadata storage type specified by the peer instance. * * @return storage type, remote or local */ public static String getMetadataStorageType(ServiceInstance serviceInstance) { Map<String, String> metadata = serviceInstance.getMetadata(); return metadata.getOrDefault(METADATA_STORAGE_TYPE_PROPERTY_NAME, DEFAULT_METADATA_STORAGE_TYPE); }
3.68
druid_ListDG_linkLast
/* * 将node节点链接到list的最后 */ private void linkLast(ENode list, ENode node) { ENode p = list; while (p.nextEdge != null) { p = p.nextEdge; } p.nextEdge = node; }
3.68
hadoop_YarnServerSecurityUtils_updateAMRMToken
/** * Update the new AMRMToken into the ugi used for RM proxy. * * @param token the new AMRMToken sent by RM * @param user ugi used for RM proxy * @param conf configuration */ public static void updateAMRMToken( org.apache.hadoop.yarn.api.records.Token token, UserGroupInformation user, Configuration conf) { Token<AMRMTokenIdentifier> amrmToken = new Token<AMRMTokenIdentifier>( token.getIdentifier().array(), token.getPassword().array(), new Text(token.getKind()), new Text(token.getService())); // Preserve the token service sent by the RM when adding the token // to ensure we replace the previous token setup by the RM. // Afterwards we can update the service address for the RPC layer. user.addToken(amrmToken); amrmToken.setService(ClientRMProxy.getAMRMTokenService(conf)); }
3.68
hudi_ListBasedIndexFileFilter_shouldCompareWithFile
/** * if we don't have key ranges, then also we need to compare against the file. no other choice if we do, then only * compare the file if the record key falls in range. */ protected boolean shouldCompareWithFile(BloomIndexFileInfo indexInfo, String recordKey) { return !indexInfo.hasKeyRanges() || indexInfo.isKeyInRange(recordKey); }
3.68
flink_SortOperationFactory_createLimitWithOffset
/** * Creates a valid {@link SortQueryOperation} with offset (possibly merged into a preceding * {@link SortQueryOperation}). * * @param offset offset to start from * @param child relational expression on top of which to apply the sort operation * @param postResolverFactory factory for creating resolved expressions * @return valid sort operation with applied offset */ QueryOperation createLimitWithOffset( int offset, QueryOperation child, PostResolverFactory postResolverFactory) { SortQueryOperation previousSort = validateAndGetChildSort(child, postResolverFactory); if (offset < 0) { throw new ValidationException("Offset should be greater or equal 0"); } if (previousSort.getOffset() != -1) { throw new ValidationException("OFFSET already defined"); } return new SortQueryOperation(previousSort.getOrder(), previousSort.getChild(), offset, -1); }
3.68
flink_BlobServer_deleteFromCache
/** * Deletes the file associated with the blob key in the local storage of the blob server. * * @param jobId ID of the job this blob belongs to * @param key blob key associated with the file to be deleted * @return <tt>true</tt> if the given blob is successfully deleted or non-existing; * <tt>false</tt> otherwise */ @Override public boolean deleteFromCache(JobID jobId, TransientBlobKey key) { checkNotNull(jobId); return deleteInternal(jobId, key); }
3.68
flink_SqlConstraintValidator_validateAndChangeColumnNullability
/** * Check constraints and change the nullability of primary key columns. * * @throws SqlValidateException if encountered duplicate primary key constraints, or the * constraint is enforced or unique. */ public static void validateAndChangeColumnNullability( List<SqlTableConstraint> tableConstraints, SqlNodeList columnList) throws SqlValidateException { List<SqlTableConstraint> fullConstraints = getFullConstraints(tableConstraints, columnList); if (fullConstraints.stream().filter(SqlTableConstraint::isPrimaryKey).count() > 1) { throw new SqlValidateException( fullConstraints.get(1).getParserPosition(), "Duplicate primary key definition"); } for (SqlTableConstraint constraint : fullConstraints) { validate(constraint); Set<String> primaryKeyColumns = Arrays.stream(constraint.getColumnNames()).collect(Collectors.toSet()); // rewrite primary key's nullability to false // e.g. CREATE TABLE tbl (`a` STRING PRIMARY KEY NOT ENFORCED, ...) or // CREATE TABLE tbl (`a` STRING, PRIMARY KEY(`a`) NOT ENFORCED) will change `a` // to STRING NOT NULL for (SqlNode column : columnList) { SqlTableColumn tableColumn = (SqlTableColumn) column; if (tableColumn instanceof SqlTableColumn.SqlRegularColumn && primaryKeyColumns.contains(tableColumn.getName().getSimple())) { SqlTableColumn.SqlRegularColumn regularColumn = (SqlTableColumn.SqlRegularColumn) column; SqlDataTypeSpec notNullType = regularColumn.getType().withNullable(false); regularColumn.setType(notNullType); } } } }
3.68
framework_Link_setResource
/** * Sets the resource this link opens. * * @param resource * the resource to set. */ public void setResource(Resource resource) { setResource(LinkConstants.HREF_RESOURCE, resource); }
3.68
hbase_TableDescriptorBuilder_isSplitEnabled
/** * Check if the split enable flag of the table is true. If flag is false then no split will be * done. * @return true if table region split enabled */ @Override public boolean isSplitEnabled() { return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); }
3.68
hbase_BucketAllocator_usedBytes
/** * How many bytes are currently taking up space in this bucket size's buckets Note: If your * items are less than the bucket size of this bucket, the actual used bytes by items will be * lower than this value. But since a bucket size can only allocate items of a single size, this * value is the true number of used bytes. The difference will be counted in * {@link #fragmentationBytes()}. */ public long usedBytes() { return usedCount * itemSize; }
3.68
dubbo_AbstractMetricsKeyListener_isSupport
/** * The MetricsKey type determines whether events are supported */ @Override public boolean isSupport(MetricsEvent event) { return super.isSupport(event) && event.isAssignableFrom(metricsKey); }
3.68
hudi_HoodieSparkQuickstart_insertOverwriteData
/** * Generate new records, load them into a {@link Dataset} and insert-overwrite it into the Hudi dataset */ public static Dataset<Row> insertOverwriteData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) { String commitTime = Long.toString(System.currentTimeMillis()); List<String> inserts = dataGen.convertToStringList(dataGen.generateInsertsOnPartition(commitTime, 20, HoodieExampleDataGenerator.DEFAULT_THIRD_PARTITION_PATH)); Dataset<Row> df = spark.read().json(jsc.parallelize(inserts, 1)); df.write().format("hudi") .options(QuickstartUtils.getQuickstartWriteConfigs()) .option("hoodie.datasource.write.operation", WriteOperationType.INSERT_OVERWRITE.name()) .option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts") .option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid") .option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath") .option(TBL_NAME.key(), tableName) .mode(Append) .save(tablePath); return df; }
3.68
hbase_SplitLogWorker_getTaskReadySeq
/** * Returns the number of tasks processed by coordination. This method is used by tests only */ public int getTaskReadySeq() { return coordination.getTaskReadySeq(); }
3.68
flink_Channel_getDataExchangeMode
/** * Gets the data exchange mode (batch / pipelined) to use for the data exchange of this channel. * * @return The data exchange mode of this channel. */ public DataExchangeMode getDataExchangeMode() { return dataExchangeMode; }
3.68
morf_AbstractSqlDialectTest_testAlterPrimaryKeyColumn
/** * Test changing a column which is the primary key. */ @Test public void testAlterPrimaryKeyColumn() { testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, "id"), column("renamedId", DataType.BIG_INTEGER).primaryKey(), expectedAlterPrimaryKeyColumnStatements()); }
3.68
hibernate-validator_ConstraintAnnotationVisitor_visitTypeAsClass
/** * <p> * Checks whether the given annotations are correctly specified at the given * class type declaration. The following checks are performed: * </p> * <ul> * <li> * Constraint annotations may at types supported by the constraints.</li> * <li> * </ul> */ @Override public Void visitTypeAsClass(TypeElement e, List<AnnotationMirror> p) { checkConstraints( e, p ); return null; }
3.68
morf_UpgradePathFinder_hasStepsToApply
/** * @return Whether this path finder has steps to apply, based on the available steps and those already applied. */ public boolean hasStepsToApply() { return !stepsToApply.isEmpty(); }
3.68
streampipes_EpProperties_listDoubleEp
/** * Creates a new list-based event property of type double and with the assigned domain property. * * @param label A human-readable label of the property * @param runtimeName The field identifier of the event property at runtime. * @param domainProperty The semantics of the list property as a String. The string should correspond to a URI * provided by a vocabulary. Use one of the vocabularies provided in * {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary. * @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive} */ public static EventPropertyList listDoubleEp(Label label, String runtimeName, String domainProperty) { return listEp(label, runtimeName, Datatypes.Double, domainProperty); }
3.68
hadoop_WordMedian_reduce
/** * Sums all the individual values within the iterator and writes them to the * same key. * * @param key * This will be a length of a word that was read. * @param values * This will be an iterator of all the values associated with that * key. */ public void reduce(IntWritable key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable value : values) { sum += value.get(); } val.set(sum); context.write(key, val); }
3.68
hbase_DynamicMetricsRegistry_newHistogram
/** * Create a new histogram. * @param name The name of the histogram * @param desc The description of the data in the histogram. * @return A new MutableHistogram */ public MutableHistogram newHistogram(String name, String desc) { MutableHistogram histo = new MutableHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableHistogram.class); }
3.68
hbase_MetricsConnection_getGetTracker
/** getTracker metric */ public CallTracker getGetTracker() { return getTracker; }
3.68
hadoop_UnitsConversionUtil_convert
/** * Converts a value from one unit to another. Supported units can be obtained * by inspecting the KNOWN_UNITS set. * * @param fromUnit the unit of the from value * @param toUnit the target unit * @param fromValue the value you wish to convert * @return the value in toUnit */ public static long convert(String fromUnit, String toUnit, long fromValue) { if (toUnit == null || fromUnit == null) { throw new IllegalArgumentException("One or more arguments are null"); } if (fromUnit.equals(toUnit)) { return fromValue; } Converter fc = getConverter(fromUnit); Converter tc = getConverter(toUnit); long numerator = fc.numerator * tc.denominator; long denominator = fc.denominator * tc.numerator; long numeratorMultiplierLimit = Long.MAX_VALUE / numerator; if (numerator < denominator) { if (numeratorMultiplierLimit < fromValue) { String overflowMsg = "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + "' will result in an overflow of Long"; throw new IllegalArgumentException(overflowMsg); } return (fromValue * numerator) / denominator; } if (numeratorMultiplierLimit > fromValue) { return (numerator * fromValue) / denominator; } long tmp = numerator / denominator; if ((Long.MAX_VALUE / tmp) < fromValue) { String overflowMsg = "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + "' will result in an overflow of Long"; throw new IllegalArgumentException(overflowMsg); } return fromValue * tmp; }
3.68
hbase_ClientExceptionsUtil_isConnectionException
/** * Check if the exception is something that indicates that we cannot contact/communicate with the * server. * @param e exception to check * @return true when exception indicates that the client wasn't able to make contact with server */ public static boolean isConnectionException(Throwable e) { if (e == null) { return false; } for (Class<? extends Throwable> clazz : CONNECTION_EXCEPTION_TYPES) { if (clazz.isAssignableFrom(e.getClass())) { return true; } } return false; }
3.68
hadoop_CloseableReferenceCount_isOpen
/** * Return true if the status is currently open. * * @return True if the status is currently open. */ public boolean isOpen() { return ((status.get() & STATUS_CLOSED_MASK) == 0); }
3.68
flink_TestStreamEnvironment_setAsContext
/** * Sets the streaming context environment to a TestStreamEnvironment that runs its programs on * the given cluster with the given default parallelism. * * @param miniCluster The MiniCluster to execute jobs on. * @param parallelism The default parallelism for the test programs. */ public static void setAsContext(final MiniCluster miniCluster, final int parallelism) { setAsContext(miniCluster, parallelism, Collections.emptyList(), Collections.emptyList()); }
3.68
flink_HsSubpartitionConsumer_setDiskDataView
/** * Set {@link HsDataView} for this subpartition, this method only called when {@link * HsSubpartitionFileReader} is creating. */ void setDiskDataView(HsDataView diskDataView) { synchronized (lock) { checkState(this.diskDataView == null, "repeatedly set disk data view is not allowed."); this.diskDataView = diskDataView; } }
3.68
morf_DataValueLookupMetadata_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return "DataValueLookupMetadata [columnNames=" + keys + "]"; }
3.68
hadoop_QueueStateManager_canDelete
/** * Whether this queue can be deleted. * @param queueName the queue name * @return true if the queue can be deleted */ @SuppressWarnings("unchecked") public boolean canDelete(String queueName) { SchedulerQueue<T> queue = queueManager.getQueue(queueName); if (queue == null) { LOG.info("The specified queue:" + queueName + " does not exist!"); return false; } if (queue.getState() == QueueState.STOPPED){ return true; } LOG.info("Need to stop the specific queue:" + queueName + " first."); return false; }
3.68
hmily_HmilySafeNumberOperationUtils_safeClosed
/** * Execute range closed method by safe mode. * * @param lowerEndpoint lower endpoint * @param upperEndpoint upper endpoint * @return new range */ public static Range<Comparable<?>> safeClosed(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) { try { return Range.closed(lowerEndpoint, upperEndpoint); } catch (final ClassCastException ex) { Class<?> clazz = getTargetNumericType(Lists.newArrayList(lowerEndpoint, upperEndpoint)); if (clazz == null) { throw ex; } return Range.closed(parseNumberByClazz(lowerEndpoint.toString(), clazz), parseNumberByClazz(upperEndpoint.toString(), clazz)); } }
3.68
flink_SingleInputUdfOperator_setSemanticProperties
/** * Sets the semantic properties for the user-defined function (UDF). The semantic properties * define how fields of tuples and other objects are modified or preserved through this UDF. The * configured properties can be retrieved via {@link UdfOperator#getSemanticProperties()}. * * @param properties The semantic properties for the UDF. * @see UdfOperator#getSemanticProperties() */ @Internal public void setSemanticProperties(SingleInputSemanticProperties properties) { this.udfSemantics = properties; this.analyzedUdfSemantics = false; }
3.68
graphhopper_JaroWinkler_getThreshold
/** * Returns the current value of the threshold used for adding the Winkler * bonus. The default value is 0.7. * * @return the current value of the threshold */ public final double getThreshold() { return threshold; }
3.68