name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
AreaShop_GeneralRegion_getRegion
/** * Get the WorldGuard region associated with this AreaShop region. * @return The ProtectedRegion of WorldGuard or null if the region does not exist anymore */ @Override public ProtectedRegion getRegion() { if(getWorld() == null || plugin.getWorldGuard() == null || plugin.getRegionManager(getWorld()) == null || plugin.getRegionManager(getWorld()).getRegion(getName()) == null) { return null; } return plugin.getRegionManager(getWorld()).getRegion(getName()); }
3.68
hbase_EnabledTableSnapshotHandler_snapshotMobRegion
/** * Takes a snapshot of the mob region */ private void snapshotMobRegion(final RegionInfo regionInfo) throws IOException { snapshotManifest.addMobRegion(regionInfo); monitor.rethrowException(); status.setStatus("Completed referencing HFiles for the mob region of table: " + snapshotTable); }
3.68
open-banking-gateway_WebDriverBasedPaymentInitiation_sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack
/* * Caused by FIXME https://github.com/adorsys/XS2A-Sandbox/issues/42, should be sandbox_anton_brueckner_clicks_redirect_back_to_tpp_button_api_localhost_cookie_only */ public SELF sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack(WebDriver driver) { acc.sandbox_anton_brueckner_imitates_click_redirect_back_to_tpp_button_api_localhost_cookie_only_with_oauth2_integrated_hack(driver, authSessionCookie); return self(); }
3.68
hbase_MovingAverage_measure
/** * Measure elapsed time of a measurable method. * @param measurable method implements {@link TimeMeasurable} * @return T it refers to the original return type of the measurable method */ public T measure(TimeMeasurable<T> measurable) { long startTime = start(); LOG.trace("{} - start to measure at: {} ns.", label, startTime); // Here may throw exceptions which should be taken care by caller, not here. // If exception occurs, this time wouldn't count. T result = measurable.measure(); long elapsed = stop(startTime); LOG.trace("{} - elapse: {} ns.", label, elapsed); updateMostRecentTime(elapsed); return result; }
3.68
hbase_DrainingServerTracker_start
/** * Starts the tracking of draining RegionServers. * <p> * All Draining RSs will be tracked after this method is called. */ public void start() throws KeeperException, IOException { watcher.registerListener(this); // Add a ServerListener to check if a server is draining when it's added. serverManager.registerListener(new ServerListener() { @Override public void serverAdded(ServerName sn) { if (drainingServers.contains(sn)) { serverManager.addServerToDrainList(sn); } } }); List<String> servers = ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().drainingZNode); add(servers); }
3.68
rocketmq-connect_RetryWithToleranceOperator_execAndRetry
/** * Attempt to execute an operation. */ protected <V> V execAndRetry(Operation<V> operation) throws Exception { int attempt = 0; long startTime = System.currentTimeMillis(); long deadline = startTime + retryTimeout; do { try { attempt++; return operation.call(); } catch (RetriableException e) { log.trace("Caught a retriable exception while executing {} operation with {}", context.stage(), context.executingClass()); errorMetricsGroup.recordFailure(); if (checkRetry(startTime)) { backoff(attempt, deadline); if (Thread.currentThread().isInterrupted()) { log.trace("Thread was interrupted. Marking operation as failed."); context.error(e); return null; } errorMetricsGroup.recordRetry(); } else { log.trace("Can't retry. start={}, attempt={}, deadline={}", startTime, attempt, deadline); context.error(e); return null; } } finally { context.attempt(attempt); } } while (true); }
3.68
flink_StreamProjection_projectTuple10
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> SingleOutputStreamOperator<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> projectTuple10() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>> tType = new TupleTypeInfo<Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple10<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
hbase_MiniZooKeeperCluster_selectClientPort
/** * Selects a ZK client port. * @param seedPort the seed port to start with; -1 means first time. * @return a valid and unused client port */ private int selectClientPort(int seedPort) { int i; int returnClientPort = seedPort + 1; if (returnClientPort == 0) { // If the new port is invalid, find one - starting with the default client port. // If the default client port is not specified, starting with a random port. // The random port is selected from the range between 49152 to 65535. These ports cannot be // registered with IANA and are intended for dynamic allocation (see http://bit.ly/dynports). if (defaultClientPort > 0) { returnClientPort = defaultClientPort; } else { returnClientPort = 0xc000 + ThreadLocalRandom.current().nextInt(0x3f00); } } // Make sure that the port is unused. // break when an unused port is found do { for (i = 0; i < clientPortList.size(); i++) { if (returnClientPort == clientPortList.get(i)) { // Already used. Update the port and retry. returnClientPort++; break; } } } while (i != clientPortList.size()); return returnClientPort; }
3.68
framework_JSR356WebsocketInitializer_isVaadinServlet
/** * Tries to determine if the given servlet registration refers to a Vaadin * servlet. * * @param servletRegistration * The servlet registration info for the servlet * @return false if the servlet is definitely not a Vaadin servlet, true * otherwise */ protected boolean isVaadinServlet(ServletRegistration servletRegistration, ServletContext servletContext) { try { String servletClassName = servletRegistration.getClassName(); if (servletClassName.equals("com.ibm.ws.wsoc.WsocServlet")) { // Websphere servlet which implements websocket endpoints, // dynamically added return false; } // Must use servletContext class loader to load servlet class to // work correctly in an OSGi environment (#20024) Class<?> servletClass = servletContext.getClassLoader() .loadClass(servletClassName); return VaadinServlet.class.isAssignableFrom(servletClass); } catch (Exception e) { // This will fail in OSGi environments, assume everything is a // VaadinServlet return true; } }
3.68
hadoop_TaskAttemptScanDirectoryStage_executeStage
/** * Build the Manifest. * @return the manifest * @throws IOException failure. */ @Override protected TaskManifest executeStage(final Void arguments) throws IOException { final Path taskAttemptDir = getRequiredTaskAttemptDir(); final TaskManifest manifest = createTaskManifest(getStageConfig()); LOG.info("{}: scanning directory {}", getName(), taskAttemptDir); final int depth = scanDirectoryTree(manifest, taskAttemptDir, getDestinationDir(), 0, true); List<FileEntry> filesToCommit = manifest.getFilesToCommit(); LongSummaryStatistics fileSummary = filesToCommit.stream() .mapToLong(FileEntry::getSize) .summaryStatistics(); long fileDataSize = fileSummary.getSum(); long fileCount = fileSummary.getCount(); int dirCount = manifest.getDestDirectories().size(); LOG.info("{}: directory {} contained {} file(s); data size {}", getName(), taskAttemptDir, fileCount, fileDataSize); LOG.info("{}: Directory count = {}; maximum depth {}", getName(), dirCount, depth); // add statistics about the task output which, when aggregated, provides // insight into structure of job, task skew, etc. IOStatisticsStore iostats = getIOStatistics(); iostats.addSample(COMMITTER_TASK_DIRECTORY_COUNT_MEAN, dirCount); iostats.addSample(COMMITTER_TASK_DIRECTORY_DEPTH_MEAN, depth); iostats.addSample(COMMITTER_TASK_FILE_COUNT_MEAN, fileCount); iostats.addSample(COMMITTER_TASK_FILE_SIZE_MEAN, fileDataSize); return manifest; }
3.68
framework_ReportUsage_main
// for testing only public static void main(String[] args) { report(); }
3.68
framework_MenuBar_setAutoOpen
/** * Sets whether this menu bar's child menus will open when the mouse is * moved over it. * * @param autoOpen * <code>true</code> to cause child menus to auto-open */ public void setAutoOpen(boolean autoOpen) { this.autoOpen = autoOpen; }
3.68
pulsar_ConsumerImpl_messageProcessed
/** * Record the event that one message has been processed by the application. * * Periodically, it sends a Flow command to notify the broker that it can push more messages */ @Override protected synchronized void messageProcessed(Message<?> msg) { ClientCnx currentCnx = cnx(); ClientCnx msgCnx = ((MessageImpl<?>) msg).getCnx(); lastDequeuedMessageId = msg.getMessageId(); if (msgCnx != currentCnx) { // The processed message did belong to the old queue that was cleared after reconnection. } else { if (listener == null && !parentConsumerHasListener) { increaseAvailablePermits(currentCnx); } stats.updateNumMsgsReceived(msg); trackMessage(msg); } decreaseIncomingMessageSize(msg); }
3.68
hbase_MiniHBaseCluster_waitOnRegionServer
/** * Wait for the specified region server to stop. Removes this thread from list of running threads. * @return Name of region server that just went down. */ public String waitOnRegionServer(final int serverNumber) { return this.hbaseCluster.waitOnRegionServer(serverNumber); }
3.68
flink_DataSourceTask_initInputFormat
/** * Initializes the InputFormat implementation and configuration. * * @throws RuntimeException Throws if instance of InputFormat implementation can not be * obtained. */ private void initInputFormat() { ClassLoader userCodeClassLoader = getUserCodeClassLoader(); // obtain task configuration (including stub parameters) Configuration taskConf = getTaskConfiguration(); this.config = new TaskConfig(taskConf); final Pair<OperatorID, InputFormat<OT, InputSplit>> operatorIdAndInputFormat; InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(config, userCodeClassLoader); try { operatorIdAndInputFormat = formatContainer.getUniqueInputFormat(); this.format = operatorIdAndInputFormat.getValue(); // check if the class is a subclass, if the check is required if (!InputFormat.class.isAssignableFrom(this.format.getClass())) { throw new RuntimeException( "The class '" + this.format.getClass().getName() + "' is not a subclass of '" + InputFormat.class.getName() + "' as is required."); } } catch (ClassCastException ccex) { throw new RuntimeException( "The stub class is not a proper subclass of " + InputFormat.class.getName(), ccex); } Thread thread = Thread.currentThread(); ClassLoader original = thread.getContextClassLoader(); // configure the stub. catch exceptions here extra, to report them as originating from the // user code try { thread.setContextClassLoader(userCodeClassLoader); this.format.configure(formatContainer.getParameters(operatorIdAndInputFormat.getKey())); } catch (Throwable t) { throw new RuntimeException( "The user defined 'configure()' method caused an error: " + t.getMessage(), t); } finally { thread.setContextClassLoader(original); } // get the factory for the type serializer this.serializerFactory = this.config.getOutputSerializer(userCodeClassLoader); }
3.68
flink_CsvOutputFormat_setQuoteStrings
/** * Configures whether the output format should quote string values. String values are fields of * type {@link java.lang.String} and {@link org.apache.flink.types.StringValue}, as well as all * subclasses of the latter. * * <p>By default, strings are not quoted. * * @param quoteStrings Flag indicating whether string fields should be quoted. */ public void setQuoteStrings(boolean quoteStrings) { this.quoteStrings = quoteStrings; }
3.68
hudi_HoodieTableFactory_checkRecordKey
/** * Validate the record key. */ private void checkRecordKey(Configuration conf, ResolvedSchema schema) { List<String> fields = schema.getColumnNames(); if (!schema.getPrimaryKey().isPresent()) { String[] recordKeys = conf.get(FlinkOptions.RECORD_KEY_FIELD).split(","); if (recordKeys.length == 1 && FlinkOptions.RECORD_KEY_FIELD.defaultValue().equals(recordKeys[0]) && !fields.contains(recordKeys[0])) { throw new HoodieValidationException("Primary key definition is required, the default primary key field " + "'" + FlinkOptions.RECORD_KEY_FIELD.defaultValue() + "' does not exist in the table schema, " + "use either PRIMARY KEY syntax or option '" + FlinkOptions.RECORD_KEY_FIELD.key() + "' to speciy."); } Arrays.stream(recordKeys) .filter(field -> !fields.contains(field)) .findAny() .ifPresent(f -> { throw new HoodieValidationException("Field '" + f + "' specified in option " + "'" + FlinkOptions.RECORD_KEY_FIELD.key() + "' does not exist in the table schema."); }); } }
3.68
flink_SplitFetcher_removeSplits
/** * Notice the split fetcher that some splits finished. This operation is asynchronous. * * @param splitsToRemove the splits need to be removed. */ public void removeSplits(List<SplitT> splitsToRemove) { lock.lock(); try { enqueueTaskUnsafe( new RemoveSplitsTask<>( splitReader, splitsToRemove, assignedSplits, splitFinishedHook)); wakeUpUnsafe(true); } finally { lock.unlock(); } }
3.68
hmily_JavaBeanBinder_getValue
/** * Gets value. * * @param instance the instance * @return the value */ Supplier<Object> getValue(final Supplier<?> instance) { if (this.getter == null) { return null; } return () -> { try { this.getter.setAccessible(true); return this.getter.invoke(instance.get()); } catch (Exception ex) { throw new IllegalStateException( "Unable to get value for property " + this.name, ex); } }; }
3.68
AreaShop_GeneralRegion_limitsAllow
/** * Check if the player can buy/rent this region, detailed info in the result object. * @param type The type of region to check * @param offlinePlayer The player to check it for * @param extend Check for extending of rental regions * @return LimitResult containing if it is allowed, why and limiting factor */ public LimitResult limitsAllow(RegionType type, OfflinePlayer offlinePlayer, boolean extend) { if(plugin.hasPermission(offlinePlayer, "areashop.limitbypass")) { return new LimitResult(true, null, 0, 0, null); } GeneralRegion exclude = null; if(extend) { exclude = this; } String typePath; if(type == RegionType.RENT) { typePath = "rents"; } else { typePath = "buys"; } // Check all limitgroups the player has List<String> groups = new ArrayList<>(plugin.getConfig().getConfigurationSection("limitGroups").getKeys(false)); while(!groups.isEmpty()) { String group = groups.get(0); if(plugin.hasPermission(offlinePlayer, "areashop.limits." + group) && this.matchesLimitGroup(group)) { String pathPrefix = "limitGroups." + group + "."; if(!plugin.getConfig().isInt(pathPrefix + "total")) { AreaShop.warn("Limit group " + group + " in the config.yml file does not correctly specify the number of total regions (should be specified as total: <number>)"); } if(!plugin.getConfig().isInt(pathPrefix + typePath)) { AreaShop.warn("Limit group " + group + " in the config.yml file does not correctly specify the number of " + typePath + " regions (should be specified as " + typePath + ": <number>)"); } int totalLimit = plugin.getConfig().getInt("limitGroups." + group + ".total"); int typeLimit = plugin.getConfig().getInt("limitGroups." + group + "." + typePath); //AreaShop.debug("typeLimitOther="+typeLimit+", typePath="+typePath); int totalCurrent = hasRegionsInLimitGroup(offlinePlayer, group, plugin.getFileManager().getRegions(), exclude); int typeCurrent; if(type == RegionType.RENT) { typeCurrent = hasRegionsInLimitGroup(offlinePlayer, group, plugin.getFileManager().getRents(), exclude); } else { typeCurrent = hasRegionsInLimitGroup(offlinePlayer, group, plugin.getFileManager().getBuys(), exclude); } if(totalLimit == -1) { totalLimit = Integer.MAX_VALUE; } if(typeLimit == -1) { typeLimit = Integer.MAX_VALUE; } String totalHighestGroup = group; String typeHighestGroup = group; groups.remove(group); // Get the highest number from the groups of the same category List<String> groupsCopy = new ArrayList<>(groups); for(String checkGroup : groupsCopy) { if(plugin.hasPermission(offlinePlayer, "areashop.limits." + checkGroup) && this.matchesLimitGroup(checkGroup)) { if(limitGroupsOfSameCategory(group, checkGroup)) { groups.remove(checkGroup); int totalLimitOther = plugin.getConfig().getInt("limitGroups." + checkGroup + ".total"); int typeLimitOther = plugin.getConfig().getInt("limitGroups." + checkGroup + "." + typePath); if(totalLimitOther > totalLimit) { totalLimit = totalLimitOther; totalHighestGroup = checkGroup; } else if(totalLimitOther == -1) { totalLimit = Integer.MAX_VALUE; } if(typeLimitOther > typeLimit) { typeLimit = typeLimitOther; typeHighestGroup = checkGroup; } else if(typeLimitOther == -1) { typeLimit = Integer.MAX_VALUE; } } } else { groups.remove(checkGroup); } } // Check if the limits stop the player from buying the region if(typeCurrent >= typeLimit) { LimitType limitType; if(type == RegionType.RENT) { if(extend) { limitType = LimitType.EXTEND; } else { limitType = LimitType.RENTS; } } else { limitType = LimitType.BUYS; } return new LimitResult(false, limitType, typeLimit, typeCurrent, typeHighestGroup); } if(totalCurrent >= totalLimit) { return new LimitResult(false, LimitType.TOTAL, totalLimit, totalCurrent, totalHighestGroup); } } groups.remove(group); } return new LimitResult(true, null, 0, 0, null); }
3.68
framework_LegacyWindow_getURL
/** * Gets the full URL of the window. The returned URL is window specific and * can be used to directly refer to the window. * <p> * Note! This method can not be used for portlets. * </p> * * @return the URL of the window or null if the window is not attached to an * application */ public URL getURL() { LegacyApplication application = getApplication(); if (application == null) { return null; } try { return new URL(application.getURL(), getName() + "/"); } catch (MalformedURLException e) { throw new RuntimeException( "Internal problem getting window URL, please report"); } }
3.68
hbase_MasterObserver_postCloneSnapshot
/** * Called after a snapshot clone operation has been requested. Called as part of restoreSnapshot * RPC call. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the v of the table to create */ default void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx, final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException { }
3.68
hbase_SegmentScanner_getNextIndexedKey
/** * @return the next key in the index (the key to seek to the next block) if known, or null * otherwise Not relevant for in-memory scanner */ @Override public Cell getNextIndexedKey() { return null; }
3.68
hbase_ByteBufferKeyOnlyKeyValue_getFamilyLengthPosition
// The position in BB where the family length is added. private int getFamilyLengthPosition() { return getFamilyLengthPosition(getRowLength()); }
3.68
flink_TypeExtractionUtils_getAllDeclaredMethods
/** Returns all declared methods of a class including methods of superclasses. */ public static List<Method> getAllDeclaredMethods(Class<?> clazz) { List<Method> result = new ArrayList<>(); while (clazz != null) { Method[] methods = clazz.getDeclaredMethods(); Collections.addAll(result, methods); clazz = clazz.getSuperclass(); } return result; }
3.68
flink_DualInputOperator_addSecondInputs
/** * Add to the second input the union of the given operators. * * @param inputs The operator(s) to be unioned with the second input. * @deprecated This method will be removed in future versions. Use the {@link Union} operator * instead. */ @Deprecated @SuppressWarnings("unchecked") public void addSecondInputs(List<Operator<IN2>> inputs) { this.input2 = Operator.createUnionCascade( this.input2, inputs.toArray(new Operator[inputs.size()])); }
3.68
hbase_LogRollBackupSubprocedure_cleanup
/** * Cancel threads if they haven't finished. */ @Override public void cleanup(Exception e) { taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e); }
3.68
hbase_RatioBasedCompactionPolicy_needsCompaction
/** * A heuristic method to decide whether to schedule a compaction request * @param storeFiles files in the store. * @param filesCompacting files being scheduled to compact. * @return true to schedule a request. */ @Override public boolean needsCompaction(Collection<HStoreFile> storeFiles, List<HStoreFile> filesCompacting) { int numCandidates = storeFiles.size() - filesCompacting.size(); return numCandidates >= comConf.getMinFilesToCompact(); }
3.68
flink_CompletedCheckpointStore_getLatestCheckpoint
/** * Returns the latest {@link CompletedCheckpoint} instance or <code>null</code> if none was * added. */ default CompletedCheckpoint getLatestCheckpoint() throws Exception { List<CompletedCheckpoint> allCheckpoints = getAllCheckpoints(); if (allCheckpoints.isEmpty()) { return null; } return allCheckpoints.get(allCheckpoints.size() - 1); }
3.68
hadoop_S3ListResult_isV1
/** * Is this a v1 API result or v2? * @return true if v1, false if v2 */ public boolean isV1() { return v1Result != null; }
3.68
pulsar_TxnMetaImpl_status
/** * Return the current status of the transaction. * * @return current status of the transaction. */ @Override public synchronized TxnStatus status() { return txnStatus; }
3.68
hadoop_LightWeightLinkedSet_addElem
/** * Add given element to the hash table * * @return true if the element was not present in the table, false otherwise */ @Override protected boolean addElem(final T element) { // validate element if (element == null) { throw new IllegalArgumentException("Null element is not supported."); } // find hashCode & index final int hashCode = element.hashCode(); final int index = getIndex(hashCode); // return false if already present if (getContainedElem(index, element, hashCode) != null) { return false; } modification++; size++; // update bucket linked list DoubleLinkedElement<T> le = new DoubleLinkedElement<T>(element, hashCode); le.next = entries[index]; entries[index] = le; // insert to the end of the all-element linked list le.after = null; le.before = tail; if (tail != null) { tail.after = le; } tail = le; if (head == null) { head = le; bookmark.next = head; } // Update bookmark, if necessary. if (bookmark.next == null) { bookmark.next = le; } return true; }
3.68
hbase_MasterAddressTracker_getMasterAddress
/** * Get master address. Use this instead of {@link #getMasterAddress()} if you do not have an * instance of this tracker in your context. * @param zkw ZKWatcher to use * @return ServerName stored in the the master address znode or null if no znode present. * @throws KeeperException if a ZooKeeper operation fails * @throws IOException if the address of the ZooKeeper master cannot be retrieved */ public static ServerName getMasterAddress(final ZKWatcher zkw) throws KeeperException, IOException { byte[] data; try { data = ZKUtil.getData(zkw, zkw.getZNodePaths().masterAddressZNode); } catch (InterruptedException e) { throw new InterruptedIOException(); } // TODO javadoc claims we return null in this case. :/ if (data == null) { throw new IOException("Can't get master address from ZooKeeper; znode data == null"); } try { return ProtobufUtil.parseServerNameFrom(data); } catch (DeserializationException e) { KeeperException ke = new KeeperException.DataInconsistencyException(); ke.initCause(e); throw ke; } }
3.68
querydsl_AbstractOracleQuery_connectByPrior
/** * CONNECT BY specifies the relationship between parent rows and child rows of the hierarchy. * * @param cond condition * @return the current object */ public C connectByPrior(Predicate cond) { return addFlag(Position.BEFORE_ORDER, CONNECT_BY_PRIOR, cond); }
3.68
framework_CurrentInstance_get
/** * Gets the current instance of a specific type if available. * <p> * When a current instance of the specific type is not found, the * {@link CurrentInstanceFallbackResolver} registered via * {@link #defineFallbackResolver(Class, CurrentInstanceFallbackResolver)} * (if any) is invoked. * * @param type * the class to get an instance of * @return the current instance or the provided type, or <code>null</code> * if there is no current instance. */ public static <T> T get(Class<T> type) { T result = doGet(type); if (result != null) { return result; } CurrentInstanceFallbackResolver<?> fallbackResolver = fallbackResolvers .get(type); if (fallbackResolver != null) { return (T) fallbackResolver.resolve(); } return null; }
3.68
flink_ValueTypeInfo_createCopyableValueSerializer
// utility method to summon the necessary bound private static <X extends CopyableValue<X>> CopyableValueSerializer<X> createCopyableValueSerializer(Class<X> clazz) { return new CopyableValueSerializer<X>(clazz); }
3.68
hadoop_Contracts_checkArg
/** * Check an argument for false conditions * @param arg the argument to check * @param expression the boolean expression for the condition * @param msg the error message if {@code expression} is false * @return the argument for convenience */ public static double checkArg(double arg, boolean expression, Object msg) { if (!expression) { throw new IllegalArgumentException(String.valueOf(msg) +": "+ arg); } return arg; }
3.68
framework_Accordion_getState
/* * (non-Javadoc) * * @see com.vaadin.ui.TabSheet#getState() */ @Override protected AccordionState getState() { return (AccordionState) super.getState(); }
3.68
hadoop_AppPriorityACLConfigurationParser_parsePriorityACLType
/* * Parse different types of ACLs sub parts for on priority group and store in * a map for later processing. */ private void parsePriorityACLType(AppPriorityACLGroup userPriorityACL, String[] splits, List<StringBuilder> userAndGroupName) { // Here splits will have the key value pair at index 0 and 1 respectively. // To parse all keys, its better to convert to PriorityACLConfig enum. AppPriorityACLKeyType aclType = AppPriorityACLKeyType .valueOf(StringUtils.toUpperCase(splits[0].trim())); switch (aclType) { case MAX_PRIORITY : userPriorityACL .setMaxPriority(Priority.newInstance(Integer.parseInt(splits[1]))); break; case USER : userAndGroupName.add(getUserOrGroupACLStringFromConfig(splits[1])); break; case GROUP : userAndGroupName.add(getUserOrGroupACLStringFromConfig(splits[1])); break; case DEFAULT_PRIORITY : int defaultPriority = Integer.parseInt(splits[1]); Priority priority = (defaultPriority < 0) ? Priority.newInstance(0) : Priority.newInstance(defaultPriority); userPriorityACL.setDefaultPriority(priority); break; default: break; } }
3.68
morf_DataValue_defaultHashCode
/** * Default hashCode implementation for instances. * * @param obj The object. * @return The hashCode. */ public static int defaultHashCode(DataValue obj) { final int prime = 31; int result = 1; result = prime * result + obj.getName().hashCode(); if (obj.getObject() == null) { result = prime * result; } else if (obj.getObject().getClass().isArray()) { result = prime * result + Arrays.hashCode((byte[]) obj.getObject()); } else { result = prime * result + obj.getObject().hashCode(); } return result; }
3.68
morf_SqlServerDialect_getSqlForAddMonths
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddMonths(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForAddMonths(Function function) { return String.format( "DATEADD(month, %s, %s)", getSqlFrom(function.getArguments().get(1)), getSqlFrom(function.getArguments().get(0)) ); }
3.68
hmily_HmilySQLComputeUtils_getAllPKColumns
/** * Get all pk columns. * * @param segment hmily simple table segment * @param tableName table name * @param primaryKeyColumns primary key columns * @return all table primary key columns in asterisk way */ public static String getAllPKColumns(final HmilySimpleTableSegment segment, final String tableName, final List<String> primaryKeyColumns) { StringBuilder pkNamesStr = new StringBuilder(); for (int i = 0; i < primaryKeyColumns.size(); i++) { if (i > 0) { pkNamesStr.append(" , "); } String pkName = null; if (segment.getAlias().isPresent()) { pkName = String.format("%s.%s", segment.getAlias().get(), primaryKeyColumns.get(i)); } else if (segment.getOwner().isPresent()) { pkName = String.format("%s.%s.%s", segment.getOwner(), tableName, primaryKeyColumns.get(i)); } else { pkName = String.format("%s.%s", tableName, primaryKeyColumns.get(i)); } pkNamesStr.append(pkName); } return pkNamesStr.toString(); }
3.68
flink_HiveTableUtil_requireValidateConstraint
// returns whether a trait requires VALIDATE constraint public static boolean requireValidateConstraint(byte trait) { return (trait & HIVE_CONSTRAINT_VALIDATE) != 0; }
3.68
hbase_MasterObserver_preSplitRegionBeforeMETAAction
/** * This will be called before update META step as part of split transaction. * @param ctx the environment to interact with the framework and master */ default void preSplitRegionBeforeMETAAction( final ObserverContext<MasterCoprocessorEnvironment> ctx, final byte[] splitKey, final List<Mutation> metaEntries) throws IOException { }
3.68
hadoop_BinaryEditsVisitor_start
/** * Start the visitor (initialization) */ @Override public void start(int version) throws IOException { }
3.68
druid_NodeEvent_getEventsByDiffProperties
/** * Diff the given two Properties. * * @return A List of AddEvent and DelEvent */ public static List<NodeEvent> getEventsByDiffProperties(Properties previous, Properties next) { List<String> prevNames = PropertiesUtils.loadNameList(previous, ""); List<String> nextNames = PropertiesUtils.loadNameList(next, ""); List<String> namesToAdd = new ArrayList<String>(); List<String> namesToDel = new ArrayList<String>(); for (String n : prevNames) { if (n != null && !n.trim().isEmpty() && !nextNames.contains(n)) { namesToDel.add(n); } } for (String n : nextNames) { if (n != null && !n.trim().isEmpty() && !prevNames.contains(n)) { namesToAdd.add(n); } } List<NodeEvent> list = new ArrayList<NodeEvent>(); list.addAll(generateEvents(next, namesToAdd, NodeEventTypeEnum.ADD)); list.addAll(generateEvents(previous, namesToDel, NodeEventTypeEnum.DELETE)); return list; }
3.68
framework_VAbstractPopupCalendar_buildDate
/** * Update the text field contents from the date. See {@link #buildDate()}. * * @param forceValid * true to force the text field to be updated, false to only * update if the parsable flag is true. */ protected void buildDate(boolean forceValid) { if (forceValid) { parsable = true; } buildDate(); }
3.68
hbase_MemStoreSnapshot_getTimeRangeTracker
/** Returns {@link TimeRangeTracker} for all the Cells in the snapshot. */ public TimeRangeTracker getTimeRangeTracker() { return timeRangeTracker; }
3.68
morf_AbstractSqlDialectTest_expectedSelectMinimumWithExpression
/** * @return the decimal representation of a literal for testing */ protected String expectedSelectMinimumWithExpression() { return "SELECT MIN(intField - 1) FROM " + tableName(TEST_TABLE); }
3.68
AreaShop_CancellableRegionEvent_cancel
/** * Cancel the event from happening. * @param reason The reason of cancelling, used for display to the user, should end with a dot */ public void cancel(String reason) { this.cancelled = true; this.reason = reason; }
3.68
framework_Overlay_hide
/** * * Hides the popup and detaches it from the page. This has no effect if it * is not currently showing. Animation-in, animation-out can be * enable/disabled for different use cases. * * @see com.google.gwt.user.client.ui.PopupPanel#hide(boolean) * * @param autoClosed * the value that will be passed to * {@link CloseHandler#onClose(CloseEvent)} when the popup is * closed * @param animateIn * enable/disable animate-in animation * @param animateOut * enable/disable animate-out animation * @since 7.3.7 */ public void hide(final boolean autoClosed, final boolean animateIn, final boolean animateOut) { if (animateIn && getStyleName().contains(ADDITIONAL_CLASSNAME_ANIMATE_IN)) { AnimationUtil.addAnimationEndListener(getElement(), new AnimationEndListener() { @Override public void onAnimationEnd(NativeEvent event) { if (AnimationUtil.getAnimationName(event).contains( ADDITIONAL_CLASSNAME_ANIMATE_IN)) { boolean removed = AnimationUtil .removeAnimationEndListener( getElement(), this); assert removed : "Animation end listener was not removed"; reallyHide(autoClosed); } } }); } else { // Check if animations are used addStyleDependentName(ADDITIONAL_CLASSNAME_ANIMATE_OUT); ComputedStyle cs = new ComputedStyle(getElement()); String animationName = AnimationUtil.getAnimationName(cs); if (animationName == null) { animationName = ""; } if (animateOut && animationName .contains(ADDITIONAL_CLASSNAME_ANIMATE_OUT)) { // Disable GWT PopupPanel closing animation if used setAnimationEnabled(false); AnimationUtil.addAnimationEndListener(getElement(), new AnimationEndListener() { @Override public void onAnimationEnd(NativeEvent event) { String animationName = AnimationUtil .getAnimationName(event); if (animationName.contains( ADDITIONAL_CLASSNAME_ANIMATE_OUT)) { boolean removed = AnimationUtil .removeAnimationEndListener( getElement(), this); assert removed : "Animation end listener was not removed"; // Remove both animation styles just in case removeStyleDependentName( ADDITIONAL_CLASSNAME_ANIMATE_IN); removeStyleDependentName( ADDITIONAL_CLASSNAME_ANIMATE_OUT); reallyHide(autoClosed); } } }); // No event previews should happen after the animation has // started Overlay.this.setPreviewingAllNativeEvents(false); } else { removeStyleDependentName(ADDITIONAL_CLASSNAME_ANIMATE_OUT); reallyHide(autoClosed); } } }
3.68
framework_HierarchicalContainer_setIncludeParentsWhenFiltering
/** * Controls how the filtering of the container works. Set this to true to * make filtering include parents for all matched items in addition to the * items themselves. Setting this to false causes the filtering to only * include the matching items and make items with excluded parents into root * items. * * @param includeParentsWhenFiltering * true to include all parents for items that match the filter, * false to only include the matching items */ public void setIncludeParentsWhenFiltering( boolean includeParentsWhenFiltering) { this.includeParentsWhenFiltering = includeParentsWhenFiltering; if (filteredRoots != null) { // Currently filtered so needs to be re-filtered doFilterContainer(true); } }
3.68
rocketmq-connect_RetryWithToleranceOperator_sourceRecord
/** * Set the source record being processed in the connect pipeline. * * @param preTransformRecord the source record */ public void sourceRecord(ConnectRecord preTransformRecord) { this.context.sourceRecord(preTransformRecord); }
3.68
framework_DesignFormatter_findConverterFor
/** * Finds a converter for a given type. May return a converter for a * superclass instead, if one is found. * * @param sourceType * Type to find a converter for. * @return A valid converter for a given type or its subtype, <b>null</b> if * it was not found. * @since 8.0 */ protected <T> Converter<String, T> findConverterFor( Class<? extends T> sourceType) { return findConverterFor(sourceType, false); }
3.68
framework_LoadingIndicatorConfiguration_getFirstDelay
/* * (non-Javadoc) * * @see com.vaadin.ui.LoadingIndicator#getFirstDelay() */ @Override public int getFirstDelay() { return getState(false).firstDelay; }
3.68
pulsar_FileUtils_deleteFile
/** * Deletes the given file. If the given file exists but could not be deleted * this will be printed as a warning to the given logger * * @param file to delete * @param logger to notify * @param attempts indicates how many times an attempt to delete should be * made * @return true if given file no longer exists */ public static boolean deleteFile(final File file, final Logger logger, final int attempts) { if (file == null) { return false; } boolean isGone = false; try { if (file.exists()) { final int effectiveAttempts = Math.max(1, attempts); for (int i = 0; i < effectiveAttempts && !isGone; i++) { isGone = file.delete() || !file.exists(); if (!isGone && (effectiveAttempts - i) > 1) { FileUtils.sleepQuietly(MILLIS_BETWEEN_ATTEMPTS); } } if (!isGone && logger != null) { logger.warn("File appears to exist but unable to delete file: " + file.getAbsolutePath()); } } } catch (final Throwable t) { if (logger != null) { logger.warn("Unable to delete file: '" + file.getAbsolutePath() + "' due to " + t); } } return isGone; }
3.68
flink_SegmentsUtil_getShort
/** * get short from segments. * * @param segments target segments. * @param offset value offset. */ public static short getShort(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 2)) { return segments[0].getShort(offset); } else { return getShortMultiSegments(segments, offset); } }
3.68
hadoop_AbstractDelegationTokenBinding_bindToTokenIdentifier
/** * Bind to the token identifier, returning the credential providers to use * for the owner to talk to S3 and related AWS Services. * @param retrievedIdentifier the unmarshalled data * @return non-empty list of AWS credential providers to use for * authenticating this client with AWS services. * @throws IOException any failure * @throws UnsupportedOperationException in the base implementation. */ public AWSCredentialProviderList bindToTokenIdentifier( AbstractS3ATokenIdentifier retrievedIdentifier) throws IOException { throw new UnsupportedOperationException("unimplemented"); }
3.68
framework_VUI_sendClientResized
/** * Send new dimensions to the server. * <p> * For internal use only. May be removed or replaced in the future. */ public void sendClientResized() { Profiler.enter("VUI.sendClientResized"); Element parentElement = getElement().getParentElement(); int viewHeight = parentElement.getClientHeight(); int viewWidth = parentElement.getClientWidth(); ResizeEvent.fire(this, viewWidth, viewHeight); Profiler.leave("VUI.sendClientResized"); }
3.68
framework_Button_getButton
/** * Gets the Button where the event occurred. * * @return the Source of the event. */ public Button getButton() { return (Button) getSource(); }
3.68
flink_JobGraph_getNumberOfVertices
/** * Returns the number of all vertices. * * @return The number of all vertices. */ public int getNumberOfVertices() { return this.taskVertices.size(); }
3.68
hbase_MasterObserver_postStartMaster
/** * Called immediately after an active master instance has completed initialization. Will not be * called on standby master instances unless they take over the active role. */ default void postStartMaster(final ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
AreaShop_GeneralRegion_getStringListSetting
/** * Get a string list setting for this region, defined as follows * - If the region has the setting in its own file (/regions/regionName.yml), use that * - If the region has groups, use the setting defined by the most important group, if any * - Otherwise fallback to the default.yml file setting * @param path The path to get the setting of * @return The value of the setting */ public List<String> getStringListSetting(String path) { if(config.isSet(path)) { return config.getStringList(path); } List<String> result = null; int priority = Integer.MIN_VALUE; boolean found = false; for(RegionGroup group : plugin.getFileManager().getGroups()) { if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) { result = group.getSettings().getStringList(path); priority = group.getPriority(); found = true; } } if(found) { return result; } if(this.getFileManager().getRegionSettings().isSet(path)) { return this.getFileManager().getRegionSettings().getStringList(path); } else { return this.getFileManager().getFallbackRegionSettings().getStringList(path); } }
3.68
shardingsphere-elasticjob_JobConfiguration_jobParameter
/** * Set job parameter. * * @param jobParameter job parameter * * @return job configuration builder */ public Builder jobParameter(final String jobParameter) { if (null != jobParameter) { this.jobParameter = jobParameter; } return this; }
3.68
hudi_HoodieFlinkCompactor_shutdownAsyncService
/** * Shutdown async services like compaction/clustering as DeltaSync is shutdown. */ public void shutdownAsyncService(boolean error) { LOG.info("Gracefully shutting down compactor. Error ?" + error); executor.shutdown(); writeClient.close(); }
3.68
querydsl_AntMetaDataExporter_addRenameMapping
/** * Adds RenameMapping instance, called by Ant */ public void addRenameMapping(RenameMapping mapping) { renameMappings.add(mapping); }
3.68
hadoop_AbstractS3AStatisticsSource_lookupGaugeValue
/** * {@inheritDoc} */ public Long lookupGaugeValue(final String name) { return ioStatistics.gauges().get(name); }
3.68
flink_FixedLengthRecordSorter_write
/** * Writes a given record to this sort buffer. The written record will be appended and take the * last logical position. * * @param record The record to be written. * @return True, if the record was successfully written, false, if the sort buffer was full. * @throws IOException Thrown, if an error occurred while serializing the record into the * buffers. */ @Override public boolean write(T record) throws IOException { // check whether we need a new memory segment for the sort index if (this.currentSortBufferOffset > this.lastEntryOffset) { if (memoryAvailable()) { this.currentSortBufferSegment = nextMemorySegment(); this.sortBuffer.add(this.currentSortBufferSegment); this.outView.set(this.currentSortBufferSegment); this.currentSortBufferOffset = 0; this.sortBufferBytes += this.segmentSize; } else { return false; } } // serialize the record into the data buffers try { this.comparator.writeWithKeyNormalization(record, this.outView); this.numRecords++; this.currentSortBufferOffset += this.recordSize; return true; } catch (EOFException eofex) { throw new IOException( "Error: Serialization consumes more bytes than announced by the serializer."); } }
3.68
hudi_HoodieSortedMergeHandle_write
/** * Go through an old record. Here if we detect a newer version shows up, we write the new one to the file. */ @Override public void write(HoodieRecord oldRecord) { Schema oldSchema = config.populateMetaFields() ? writeSchemaWithMetaFields : writeSchema; Schema newSchema = useWriterSchemaForCompaction ? writeSchemaWithMetaFields : writeSchema; String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt); // To maintain overall sorted order across updates and inserts, write any new inserts whose keys are less than // the oldRecord's key. while (!newRecordKeysSorted.isEmpty() && newRecordKeysSorted.peek().compareTo(key) <= 0) { String keyToPreWrite = newRecordKeysSorted.remove(); if (keyToPreWrite.equals(key)) { // will be handled as an update later break; } // This is a new insert HoodieRecord<T> hoodieRecord = keyToNewRecords.get(keyToPreWrite).newInstance(); if (writtenRecordKeys.contains(keyToPreWrite)) { throw new HoodieUpsertException("Insert/Update not in sorted order"); } try { writeRecord(hoodieRecord, Option.of(hoodieRecord), newSchema, config.getProps()); insertRecordsWritten++; writtenRecordKeys.add(keyToPreWrite); } catch (IOException e) { throw new HoodieUpsertException("Failed to write records", e); } } super.write(oldRecord); }
3.68
dubbo_DubboBootstrap_getInstance
/** * See {@link ApplicationModel} and {@link ExtensionLoader} for why DubboBootstrap is designed to be singleton. */ public static DubboBootstrap getInstance() { if (instance == null) { synchronized (DubboBootstrap.class) { if (instance == null) { instance = DubboBootstrap.getInstance(ApplicationModel.defaultModel()); } } } return instance; }
3.68
zxing_Detector_detect
/** * Detects an Aztec Code in an image. * * @param isMirror if true, image is a mirror-image of original * @return {@link AztecDetectorResult} encapsulating results of detecting an Aztec Code * @throws NotFoundException if no Aztec Code can be found */ public AztecDetectorResult detect(boolean isMirror) throws NotFoundException { // 1. Get the center of the aztec matrix Point pCenter = getMatrixCenter(); // 2. Get the center points of the four diagonal points just outside the bull's eye // [topRight, bottomRight, bottomLeft, topLeft] ResultPoint[] bullsEyeCorners = getBullsEyeCorners(pCenter); if (isMirror) { ResultPoint temp = bullsEyeCorners[0]; bullsEyeCorners[0] = bullsEyeCorners[2]; bullsEyeCorners[2] = temp; } // 3. Get the size of the matrix and other parameters from the bull's eye int errorsCorrected = extractParameters(bullsEyeCorners); // 4. Sample the grid BitMatrix bits = sampleGrid(image, bullsEyeCorners[shift % 4], bullsEyeCorners[(shift + 1) % 4], bullsEyeCorners[(shift + 2) % 4], bullsEyeCorners[(shift + 3) % 4]); // 5. Get the corners of the matrix. ResultPoint[] corners = getMatrixCornerPoints(bullsEyeCorners); return new AztecDetectorResult(bits, corners, compact, nbDataBlocks, nbLayers, errorsCorrected); }
3.68
framework_ClickableRenderer_addClickHandler
/** * Adds a click handler to this button renderer. The handler is invoked * every time one of the widgets rendered by this renderer is clicked. * <p> * Note that the row type of the click handler must match the row type of * the containing Grid. * * @param handler * the click handler to be added */ public HandlerRegistration addClickHandler( RendererClickHandler<?> handler) { if (handlerManager == null) { handlerManager = new HandlerManager(this); } return handlerManager.addHandler(RendererClickEvent.TYPE, handler); }
3.68
hbase_ReplicationPeerManager_preUpdatePeerConfig
/** * Return the old peer description. Can never be null. */ ReplicationPeerDescription preUpdatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws DoNotRetryIOException { checkPeerConfig(peerConfig); ReplicationPeerDescription desc = checkPeerExists(peerId); ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig(); if (!isStringEquals(peerConfig.getClusterKey(), oldPeerConfig.getClusterKey())) { throw new DoNotRetryIOException( "Changing the cluster key on an existing peer is not allowed. Existing key '" + oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" + peerConfig.getClusterKey() + "'"); } if ( !isStringEquals(peerConfig.getReplicationEndpointImpl(), oldPeerConfig.getReplicationEndpointImpl()) ) { throw new DoNotRetryIOException("Changing the replication endpoint implementation class " + "on an existing peer is not allowed. Existing class '" + oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId + " does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'"); } if (!isStringEquals(peerConfig.getRemoteWALDir(), oldPeerConfig.getRemoteWALDir())) { throw new DoNotRetryIOException( "Changing the remote wal dir on an existing peer is not allowed. Existing remote wal " + "dir '" + oldPeerConfig.getRemoteWALDir() + "' for peer " + peerId + " does not match new remote wal dir '" + peerConfig.getRemoteWALDir() + "'"); } if (oldPeerConfig.isSyncReplication()) { if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldPeerConfig, peerConfig)) { throw new DoNotRetryIOException( "Changing the replicated namespace/table config on a synchronous replication " + "peer(peerId: " + peerId + ") is not allowed."); } } return desc; }
3.68
graphhopper_VectorTile_getLayersOrBuilderList
/** * <code>repeated .vector_tile.Tile.Layer layers = 3;</code> */ public java.util.List<? extends vector_tile.VectorTile.Tile.LayerOrBuilder> getLayersOrBuilderList() { if (layersBuilder_ != null) { return layersBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(layers_); } }
3.68
pulsar_AvroRecordBuilderImpl_set
/** * Sets the value of a field. * * @param index the field to set. * @param value the value to set. * @return a reference to the RecordBuilder. */ protected GenericRecordBuilder set(int index, Object value) { if (value instanceof GenericRecord) { if (value instanceof GenericAvroRecord) { avroRecordBuilder.set(genericSchema.getAvroSchema().getFields().get(index), ((GenericAvroRecord) value).getAvroRecord()); } else { throw new IllegalArgumentException("Avro Record Builder doesn't support non-avro record as a field"); } } else { avroRecordBuilder.set( genericSchema.getAvroSchema().getFields().get(index), value ); } return this; }
3.68
hbase_MapReduceHFileSplitterJob_main
/** * Main entry point. * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { int ret = ToolRunner.run(new MapReduceHFileSplitterJob(HBaseConfiguration.create()), args); System.exit(ret); }
3.68
hadoop_ReplicaUnderConstruction_isAlive
/** * Is data-node the replica belongs to alive. */ boolean isAlive() { return expectedLocation.getDatanodeDescriptor().isAlive(); }
3.68
hadoop_RouterClientRMService_getRootInterceptor
/** * Gets the root request interceptor. * * @return the root request interceptor */ public synchronized ClientRequestInterceptor getRootInterceptor() { return rootInterceptor; }
3.68
hudi_RowDataKeyGens_hasRecordKey
/** * Checks whether user provides any record key. */ private static boolean hasRecordKey(String recordKeys, List<String> fieldNames) { return recordKeys.split(",").length != 1 || fieldNames.contains(recordKeys); }
3.68
hbase_Import_createCfRenameMap
// helper: make a map from sourceCfName to destCfName by parsing a config key private static Map<byte[], byte[]> createCfRenameMap(Configuration conf) { Map<byte[], byte[]> cfRenameMap = null; String allMappingsPropVal = conf.get(CF_RENAME_PROP); if (allMappingsPropVal != null) { // The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,... String[] allMappings = allMappingsPropVal.split(","); for (String mapping : allMappings) { if (cfRenameMap == null) { cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } String[] srcAndDest = mapping.split(":"); if (srcAndDest.length != 2) { continue; } cfRenameMap.put(Bytes.toBytes(srcAndDest[0]), Bytes.toBytes(srcAndDest[1])); } } return cfRenameMap; }
3.68
querydsl_SQLExpressions_lastValue
/** * returns value evaluated at the row that is the last row of the window frame * * @param expr argument * @return last_value(expr) */ public static <T> WindowOver<T> lastValue(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.LASTVALUE, expr); }
3.68
framework_ConnectorHierarchyChangeEvent_getOldChildren
/** * Returns a collection of the old children for the connector. This was the * state before the update was received from the server. * * @return A collection of old child connectors. Never returns null. */ public List<ComponentConnector> getOldChildren() { return oldChildren; }
3.68
hmily_ConsulClient_getInstance
/** * get instance. * @param consulConfig consul config * @return consulClient */ public static ConsulClient getInstance(final ConsulConfig consulConfig) { String hostAndPorts = consulConfig.getHostAndPorts(); List<HostAndPort> hostAndPortList = buildHostAndPortList(hostAndPorts); Consul consul; if (StringUtils.isNoneBlank(consulConfig.getHostAndPorts())) { consul = Consul.builder().withMultipleHostAndPort(hostAndPortList, consulConfig.getBlacklistTimeInMillis()).build().newClient(); } else { consul = Consul.builder().withHostAndPort(HostAndPort.fromString(consulConfig.getHostAndPort())).build().newClient(); } ConsulClient consulClient = new ConsulClient(); consulClient.setConsul(consul); return consulClient; }
3.68
dubbo_TTree_recursive
/** * recursive visit */ private void recursive(int deep, boolean isLast, String prefix, Node node, Callback callback) { callback.callback(deep, isLast, prefix, node); if (!node.isLeaf()) { final int size = node.children.size(); for (int index = 0; index < size; index++) { final boolean isLastFlag = index == size - 1; final String currentPrefix = isLast ? prefix + STEP_EMPTY_BOARD : prefix + STEP_HAS_BOARD; recursive(deep + 1, isLastFlag, currentPrefix, node.children.get(index), callback); } } }
3.68
hadoop_BufferPool_toString
// For debugging purposes. @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(pool.toString()); sb.append("\n"); List<BufferData> allData = new ArrayList<>(getAll()); Collections.sort(allData, (d1, d2) -> d1.getBlockNumber() - d2.getBlockNumber()); for (BufferData data : allData) { sb.append(data.toString()); sb.append("\n"); } return sb.toString(); }
3.68
zilla_HttpClientFactory_teHeader
// 8.1.2.2 TE header MUST NOT contain any value other than "trailers". private void teHeader( DirectBuffer name, DirectBuffer value) { if (!error() && name.equals(TE) && !value.equals(TRAILERS)) { streamError = Http2ErrorCode.PROTOCOL_ERROR; } }
3.68
hadoop_AdminACLsManager_areACLsEnabled
/** * Returns whether ACLs are enabled * * @see YarnConfiguration#YARN_ACL_ENABLE * @see YarnConfiguration#DEFAULT_YARN_ACL_ENABLE * @return <tt>true</tt> if ACLs are enabled */ public boolean areACLsEnabled() { return aclsEnabled; }
3.68
flink_ExecutionVertex_markFailed
/** * This method marks the task as failed, but will make no attempt to remove task execution from * the task manager. It is intended for cases where the task is known not to be deployed yet. * * @param t The exception that caused the task to fail. */ public void markFailed(Throwable t) { currentExecution.markFailed(t); }
3.68
flink_FileInputFormat_open
/** * Opens an input stream to the file defined in the input format. The stream is positioned at * the beginning of the given split. * * <p>The stream is actually opened in an asynchronous thread to make sure any interruptions to * the thread working on the input format do not reach the file system. */ @Override public void open(FileInputSplit fileSplit) throws IOException { this.currentSplit = fileSplit; this.splitStart = fileSplit.getStart(); final Path path = fileSplit.getPath(); this.splitLength = testForUnsplittable(path.getFileSystem().getFileStatus(path)) ? READ_WHOLE_SPLIT_FLAG : fileSplit.getLength(); if (LOG.isDebugEnabled()) { LOG.debug( "Opening input split " + fileSplit.getPath() + " [" + this.splitStart + "," + this.splitLength + "]"); } // open the split in an asynchronous thread final InputSplitOpenThread isot = new InputSplitOpenThread(fileSplit, this.openTimeout); isot.start(); try { this.stream = isot.waitForCompletion(); this.stream = decorateInputStream(this.stream, fileSplit); } catch (Throwable t) { throw new IOException( "Error opening the Input Split " + fileSplit.getPath() + " [" + splitStart + "," + splitLength + "]: " + t.getMessage(), t); } // get FSDataInputStream if (this.splitStart != 0) { this.stream.seek(this.splitStart); } }
3.68
pulsar_ComponentImpl_isSuperUser
/** * @deprecated use {@link #isSuperUser(AuthenticationParameters)} */ @Deprecated public boolean isSuperUser(String clientRole, AuthenticationDataSource authenticationData) { AuthenticationParameters authParams = AuthenticationParameters.builder().clientRole(clientRole) .clientAuthenticationDataSource(authenticationData).build(); return isSuperUser(authParams); }
3.68
hbase_HFileLink_createFromHFileLink
/** * Create a new HFileLink starting from a hfileLink name * <p> * It also adds a back-reference to the hfile back-reference directory to simplify the * reference-count and the cleaning process. * @param conf {@link Configuration} to read for the archive directory name * @param fs {@link FileSystem} on which to write the HFileLink * @param dstFamilyPath - Destination path (table/region/cf/) * @param hfileLinkName - HFileLink name (it contains hfile-region-table) * @param createBackRef - Whether back reference should be created. Defaults to true. * @return the file link name. * @throws IOException on file or parent directory creation failure. */ public static String createFromHFileLink(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String hfileLinkName, final boolean createBackRef) throws IOException { Matcher m = LINK_NAME_PATTERN.matcher(hfileLinkName); if (!m.matches()) { throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!"); } return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), m.group(3), m.group(4), createBackRef); }
3.68
hbase_MiniZooKeeperCluster_hasValidClientPortInList
/** * Check whether the client port in a specific position of the client port list is valid. * @param index the specified position */ private boolean hasValidClientPortInList(int index) { return (clientPortList.size() > index && clientPortList.get(index) > 0); }
3.68
hbase_HDFSBlocksDistribution_addHostAndBlockWeight
/** * add some weight to a specific host * @param host the host name * @param weight the weight * @param weightForSsd the weight for ssd */ private void addHostAndBlockWeight(String host, long weight, long weightForSsd) { if (host == null) { // erroneous data return; } HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if (hostAndWeight == null) { hostAndWeight = new HostAndWeight(host, weight, weightForSsd); this.hostAndWeights.put(host, hostAndWeight); } else { hostAndWeight.addWeight(weight, weightForSsd); } }
3.68
AreaShop_FileManager_saveVersions
/** * Save the versions file to disk. */ public void saveVersions() { if(!(new File(versionPath).exists())) { AreaShop.debug("versions file created, this should happen only after installing or upgrading the plugin"); } try (ObjectOutputStream output = new ObjectOutputStream(new FileOutputStream(versionPath))) { output.writeObject(versions); } catch(IOException e) { AreaShop.warn("File could not be saved: " + versionPath); } }
3.68
querydsl_AbstractOracleQuery_connectBy
/** * CONNECT BY specifies the relationship between parent rows and child rows of the hierarchy. * * @param cond condition * @return the current object */ public C connectBy(Predicate cond) { return addFlag(Position.BEFORE_ORDER, CONNECT_BY, cond); }
3.68
pulsar_JSONSchema_clearCaches
/** * Clears the caches tied to the ObjectMapper instances and replaces the singleton ObjectMapper instance. * * This can be used in tests to ensure that classloaders and class references don't leak across tests. */ public static void clearCaches() { jsonMapper().getTypeFactory().clearCache(); replaceSingletonInstance(); }
3.68
framework_UIConnector_activateTheme
/** * Activates the new theme. Assumes the theme has been loaded and taken into * use in the browser. * * @since 7.4.3 * @param newTheme * The name of the new theme */ protected void activateTheme(String newTheme) { if (activeTheme != null) { getWidget().getParent().removeStyleName(activeTheme); VOverlay.getOverlayContainer(getConnection()) .removeClassName(activeTheme); } activeTheme = newTheme; if (newTheme != null) { getWidget().getParent().addStyleName(newTheme); VOverlay.getOverlayContainer(getConnection()) .addClassName(activeTheme); updateVaadinFavicon(newTheme); } // Request a full resynchronization from the server to deal with legacy // components getConnection().getMessageSender().resynchronize(); // Immediately update state and do layout while waiting for the resync forceStateChangeRecursively(UIConnector.this); getLayoutManager().forceLayout(); }
3.68
hbase_QuotaFilter_getTypeFilters
/** Returns the QuotaType types that we want to filter one */ public Set<QuotaType> getTypeFilters() { return types; }
3.68
framework_Slot_getHorizontalSpacing
/** * Get the horizontal amount of pixels of the spacing. * * @return the width of the spacing element or zero if this slot doesn't * have spacing */ protected int getHorizontalSpacing() { if (spacer == null) { return 0; } else if (layout.getLayoutManager() != null) { return layout.getLayoutManager().getOuterWidth(spacer); } return spacer.getOffsetWidth(); }
3.68
hmily_HmilyRepositoryNode_getHmilyParticipantUndoRootPath
/** * Get hmily participant undo root path. * * @return hmily participant undo root path */ public String getHmilyParticipantUndoRootPath() { return Joiner.on("/").join("", ROOT_PATH_PREFIX, appName, HMILY_PARTICIPANT_UNDO); }
3.68