name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_ConnectionPool_getJSON
/** * JSON representation of the connection pool. * * @return String representation of the JSON. */ public String getJSON() { final Map<String, String> info = new LinkedHashMap<>(); info.put("active", Integer.toString(getNumActiveConnections())); info.put("recent_active", Integer.toString(getNumActiveConnectionsRecently())); info.put("idle", Integer.toString(getNumIdleConnections())); info.put("total", Integer.toString(getNumConnections())); if (LOG.isDebugEnabled()) { List<ConnectionContext> tmpConnections = this.connections; for (int i=0; i<tmpConnections.size(); i++) { ConnectionContext connection = tmpConnections.get(i); info.put(i + " active", Boolean.toString(connection.isActive())); info.put(i + " recent_active", Integer.toString(getNumActiveConnectionsRecently())); info.put(i + " idle", Boolean.toString(connection.isUsable())); info.put(i + " closed", Boolean.toString(connection.isClosed())); } } return JSON.toString(info); }
3.68
hadoop_JobQueueChangeEvent_getJobQueueName
/** Get the new Job queue name */ public String getJobQueueName() { if (datum.jobQueueName != null) { return datum.jobQueueName.toString(); } return null; }
3.68
hadoop_AppToFlowRowKey_parseRowKey
/** * Given the raw row key as bytes, returns the row key as an object. * * @param rowKey a rowkey represented as a byte array. * @return an <cite>AppToFlowRowKey</cite> object. */ public static AppToFlowRowKey parseRowKey(byte[] rowKey) { String appId = new AppIdKeyConverter().decode(rowKey); return new AppToFlowRowKey(appId); }
3.68
hadoop_PlacementFactory_getPlacementRule
/** * Create a new {@link PlacementRule} based on the rule class from the * configuration. This is used to instantiate rules by the scheduler which * resolve the class before this call. * @param ruleClass The specific class reference to instantiate * @param initArg The config to set * @return Created class instance */ public static PlacementRule getPlacementRule( Class<? extends PlacementRule> ruleClass, Object initArg) { LOG.info("Creating PlacementRule implementation: " + ruleClass); PlacementRule rule = ReflectionUtils.newInstance(ruleClass, null); rule.setConfig(initArg); return rule; }
3.68
flink_StaticFileServerHandler_respondToRequest
/** Response when running with leading JobManager. */ private void respondToRequest( ChannelHandlerContext ctx, HttpRequest request, String requestPath) throws IOException, ParseException, URISyntaxException, RestHandlerException { // convert to absolute path final File file = new File(rootPath, requestPath); if (!file.exists()) { // file does not exist. Try to load it with the classloader ClassLoader cl = StaticFileServerHandler.class.getClassLoader(); try (InputStream resourceStream = cl.getResourceAsStream("web" + requestPath)) { boolean success = false; try { if (resourceStream != null) { URL root = cl.getResource("web"); URL requested = cl.getResource("web" + requestPath); if (root != null && requested != null) { URI rootURI = new URI(root.getPath()).normalize(); URI requestedURI = new URI(requested.getPath()).normalize(); // Check that we don't load anything from outside of the // expected scope. if (!rootURI.relativize(requestedURI).equals(requestedURI)) { logger.debug( "Loading missing file from classloader: {}", requestPath); // ensure that directory to file exists. file.getParentFile().mkdirs(); Files.copy(resourceStream, file.toPath()); success = true; } } } } catch (Throwable t) { logger.error("error while responding", t); } finally { if (!success) { logger.debug( "Unable to load requested file {} from classloader", requestPath); throw new NotFoundException( String.format("Unable to load requested file %s.", requestPath)); } } } } checkFileValidity(file, rootPath, logger); // cache validation final String ifModifiedSince = request.headers().get(IF_MODIFIED_SINCE); if (ifModifiedSince != null && !ifModifiedSince.isEmpty()) { SimpleDateFormat dateFormatter = new SimpleDateFormat(HTTP_DATE_FORMAT, Locale.US); Date ifModifiedSinceDate = dateFormatter.parse(ifModifiedSince); // Only compare up to the second because the datetime format we send to the client // does not have milliseconds long ifModifiedSinceDateSeconds = ifModifiedSinceDate.getTime() / 1000; long fileLastModifiedSeconds = file.lastModified() / 1000; if (ifModifiedSinceDateSeconds == fileLastModifiedSeconds) { if (logger.isDebugEnabled()) { logger.debug( "Responding 'NOT MODIFIED' for file '" + file.getAbsolutePath() + '\''); } sendNotModified(ctx); return; } } if (logger.isDebugEnabled()) { logger.debug("Responding with file '" + file.getAbsolutePath() + '\''); } // Don't need to close this manually. Netty's DefaultFileRegion will take care of it. final RandomAccessFile raf; try { raf = new RandomAccessFile(file, "r"); } catch (FileNotFoundException e) { if (logger.isDebugEnabled()) { logger.debug("Could not find file {}.", file.getAbsolutePath()); } throw new NotFoundException("File not found."); } try { long fileLength = raf.length(); HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK); setContentTypeHeader(response, file); setDateAndCacheHeaders(response, file); if (HttpUtil.isKeepAlive(request)) { response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE); } HttpUtil.setContentLength(response, fileLength); // write the initial line and the header. ctx.write(response); // write the content. ChannelFuture lastContentFuture; if (ctx.pipeline().get(SslHandler.class) == null) { ctx.write( new DefaultFileRegion(raf.getChannel(), 0, fileLength), ctx.newProgressivePromise()); lastContentFuture = ctx.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); } else { lastContentFuture = ctx.writeAndFlush( new HttpChunkedInput(new ChunkedFile(raf, 0, fileLength, 8192)), ctx.newProgressivePromise()); // HttpChunkedInput will write the end marker (LastHttpContent) for us. } // close the connection, if no keep-alive is needed if (!HttpUtil.isKeepAlive(request)) { lastContentFuture.addListener(ChannelFutureListener.CLOSE); } } catch (Exception e) { raf.close(); logger.error("Failed to serve file.", e); throw new RestHandlerException("Internal server error.", INTERNAL_SERVER_ERROR); } }
3.68
framework_LayoutManager_getInnerWidthDouble
/** * Gets the inner width (excluding margins, paddings and borders) of the * given element, provided that it has been measured. These elements are * guaranteed to be measured: * <ul> * <li>ManagedLayouts and their child Connectors * <li>Elements for which there is at least one ElementResizeListener * <li>Elements for which at least one ManagedLayout has registered a * dependency * </ul> * * -1 is returned if the element has not been measured. If 0 is returned, it * might indicate that the element is not attached to the DOM. * * @since 7.5.1 * @param element * the element to get the measured size for * @return the measured inner width (excluding margins, paddings and * borders) of the element in pixels. */ public final double getInnerWidthDouble(Element element) { assert needsMeasure( element) : "Getting measurement for element that is not measured"; return getMeasuredSize(element, nullSize).getInnerWidth(); }
3.68
hudi_DataHubSyncTool_syncHoodieTable
/** * Sync to a DataHub Dataset. * * @implNote DataHub sync is an experimental feature, which overwrites the DataHub Dataset's schema * and last commit time sync'ed upon every invocation. */ @Override public void syncHoodieTable() { try (DataHubSyncClient syncClient = new DataHubSyncClient(config)) { syncClient.updateTableSchema(config.getString(META_SYNC_TABLE_NAME), null); syncClient.updateLastCommitTimeSynced(config.getString(META_SYNC_TABLE_NAME)); } }
3.68
hadoop_TimelineMetricCalculator_sum
/** * Sum up two Numbers. * @param n1 Number n1 * @param n2 Number n2 * @return Number represent to (n1 + n2). */ public static Number sum(Number n1, Number n2) { if (n1 == null) { return n2; } else if (n2 == null) { return n1; } if (n1 instanceof Integer || n1 instanceof Long) { return n1.longValue() + n2.longValue(); } if (n1 instanceof Float || n1 instanceof Double) { return n1.doubleValue() + n2.doubleValue(); } // TODO throw warnings/exceptions for other types of number. return null; }
3.68
flink_RichSqlInsert_getStaticPartitionKVs
/** * Get static partition key value pair as strings. * * <p>For character literals we return the unquoted and unescaped values. For other types we use * {@link SqlLiteral#toString()} to get the string format of the value literal. If the string * format is not what you need, use {@link #getStaticPartitions()}. * * @return the mapping of column names to values of partition specifications, returns an empty * map if there is no partition specifications. */ public LinkedHashMap<String, String> getStaticPartitionKVs() { LinkedHashMap<String, String> ret = new LinkedHashMap<>(); if (this.staticPartitions.size() == 0) { return ret; } for (SqlNode node : this.staticPartitions.getList()) { SqlProperty sqlProperty = (SqlProperty) node; Comparable comparable = SqlLiteral.value(sqlProperty.getValue()); String value = comparable instanceof NlsString ? ((NlsString) comparable).getValue() : comparable.toString(); ret.put(sqlProperty.getKey().getSimple(), value); } return ret; }
3.68
hadoop_AbfsClientThrottlingAnalyzer_resumeTimer
/** * Resumes the timer if it was stopped. */ private void resumeTimer() { blobMetrics = new AtomicReference<AbfsOperationMetrics>( new AbfsOperationMetrics(System.currentTimeMillis())); timer.schedule(new TimerTaskImpl(), analysisPeriodMs, analysisPeriodMs); isOperationOnAccountIdle.set(false); }
3.68
hbase_ThriftUtilities_putsFromThrift
/** * Converts multiple {@link TPut}s (Thrift) into a list of {@link Put}s (HBase). * @param in list of <code>TPut</code>s to convert * @return list of converted <code>Put</code>s * @see #putFromThrift(TPut) */ public static List<Put> putsFromThrift(List<TPut> in) { List<Put> out = new ArrayList<>(in.size()); for (TPut put : in) { out.add(putFromThrift(put)); } return out; }
3.68
dubbo_MetadataInfo_calAndGetRevision
/** * Calculation of this instance's status like revision and modification of the same instance must be synchronized among different threads. * <p> * Usage of this method is strictly restricted to certain points such as when during registration. Always try to use {@link this#getRevision()} instead. */ public synchronized String calAndGetRevision() { if (revision != null && !updated) { return revision; } updated = false; if (CollectionUtils.isEmptyMap(services)) { this.revision = EMPTY_REVISION; } else { StringBuilder sb = new StringBuilder(); sb.append(app); for (Map.Entry<String, ServiceInfo> entry : new TreeMap<>(services).entrySet()) { sb.append(entry.getValue().toDescString()); } String tempRevision = RevisionResolver.calRevision(sb.toString()); if (!StringUtils.isEquals(this.revision, tempRevision)) { if (logger.isInfoEnabled()) { logger.info(String.format( "metadata revision changed: %s -> %s, app: %s, services: %d", this.revision, tempRevision, this.app, this.services.size())); } this.revision = tempRevision; this.rawMetadataInfo = JsonUtils.toJson(this); } } return revision; }
3.68
flink_Plan_setExecutionConfig
/** * Sets the runtime config object defining execution parameters. * * @param executionConfig The execution config to use. */ public void setExecutionConfig(ExecutionConfig executionConfig) { this.executionConfig = executionConfig; }
3.68
hbase_TableSnapshotInputFormatImpl_getBestLocations
/** * This computes the locations to be passed from the InputSplit. MR/Yarn schedulers does not take * weights into account, thus will treat every location passed from the input split as equal. We * do not want to blindly pass all the locations, since we are creating one split per region, and * the region's blocks are all distributed throughout the cluster unless favorite node assignment * is used. On the expected stable case, only one location will contain most of the blocks as * local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. * Here we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top * host with the best locality. Return at most numTopsAtMost locations if there are more than * that. */ private static List<String> getBestLocations(Configuration conf, HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights(); if (hostAndWeights.length == 0) { // no matter what numTopsAtMost is return null; } if (numTopsAtMost < 1) { // invalid if numTopsAtMost < 1, correct it to be 1 numTopsAtMost = 1; } int top = Math.min(numTopsAtMost, hostAndWeights.length); List<String> locations = new ArrayList<>(top); HostAndWeight topHost = hostAndWeights[0]; locations.add(topHost.getHost()); if (top == 1) { // only care about the top host return locations; } // When top >= 2, // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality double cutoffMultiplier = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); double filterWeight = topHost.getWeight() * cutoffMultiplier; for (int i = 1; i <= top - 1; i++) { if (hostAndWeights[i].getWeight() >= filterWeight) { locations.add(hostAndWeights[i].getHost()); } else { // As hostAndWeights is in descending order, // we could break the loop as long as we meet a weight which is less than filterWeight. break; } } return locations; }
3.68
flink_Costs_compareTo
/** * The order of comparison is: network first, then disk, then CPU. The comparison here happens * each time primarily after the heuristic costs, then after the quantifiable costs. * * @see java.lang.Comparable#compareTo(java.lang.Object) */ @Override public int compareTo(Costs o) { // check the network cost. if we have actual costs on both, use them, otherwise use the // heuristic costs. if (this.networkCost != UNKNOWN && o.networkCost != UNKNOWN) { if (this.networkCost != o.networkCost) { return this.networkCost < o.networkCost ? -1 : 1; } } else if (this.heuristicNetworkCost < o.heuristicNetworkCost) { return -1; } else if (this.heuristicNetworkCost > o.heuristicNetworkCost) { return 1; } // next, check the disk cost. again, if we have actual costs on both, use them, otherwise // use the heuristic costs. if (this.diskCost != UNKNOWN && o.diskCost != UNKNOWN) { if (this.diskCost != o.diskCost) { return this.diskCost < o.diskCost ? -1 : 1; } } else if (this.heuristicDiskCost < o.heuristicDiskCost) { return -1; } else if (this.heuristicDiskCost > o.heuristicDiskCost) { return 1; } // next, check the CPU cost. again, if we have actual costs on both, use them, otherwise use // the heuristic costs. if (this.cpuCost != UNKNOWN && o.cpuCost != UNKNOWN) { return this.cpuCost < o.cpuCost ? -1 : this.cpuCost > o.cpuCost ? 1 : 0; } else if (this.heuristicCpuCost < o.heuristicCpuCost) { return -1; } else if (this.heuristicCpuCost > o.heuristicCpuCost) { return 1; } else { return 0; } }
3.68
pulsar_ModularLoadManagerImpl_initialize
/** * Initialize this load manager using the given PulsarService. Should be called only once, after invoking the * default constructor. * * @param pulsar The service to initialize with. */ @Override public void initialize(final PulsarService pulsar) { this.pulsar = pulsar; this.pulsarResources = pulsar.getPulsarResources(); brokersData = pulsar.getCoordinationService().getLockManager(LocalBrokerData.class); resourceQuotaCache = pulsar.getLocalMetadataStore().getMetadataCache(ResourceQuota.class); pulsar.getLocalMetadataStore().registerListener(this::handleDataNotification); pulsar.getLocalMetadataStore().registerSessionListener(this::handleMetadataSessionEvent); if (SystemUtils.IS_OS_LINUX) { brokerHostUsage = new LinuxBrokerHostUsageImpl(pulsar); } else { brokerHostUsage = new GenericBrokerHostUsageImpl(pulsar); } bundleSplitStrategy = new BundleSplitterTask(); conf = pulsar.getConfiguration(); // Initialize the default stats to assume for unseen bundles (hard-coded for now). defaultStats.msgThroughputIn = DEFAULT_MESSAGE_THROUGHPUT; defaultStats.msgThroughputOut = DEFAULT_MESSAGE_THROUGHPUT; defaultStats.msgRateIn = DEFAULT_MESSAGE_RATE; defaultStats.msgRateOut = DEFAULT_MESSAGE_RATE; placementStrategy = ModularLoadManagerStrategy.create(conf); policies = new SimpleResourceAllocationPolicies(pulsar); filterPipeline.add(new BrokerLoadManagerClassFilter()); filterPipeline.add(new BrokerVersionFilter()); LoadManagerShared.refreshBrokerToFailureDomainMap(pulsar, brokerToFailureDomainMap); // register listeners for domain changes pulsarResources.getClusterResources().getFailureDomainResources() .registerListener(__ -> { executors.execute( () -> LoadManagerShared.refreshBrokerToFailureDomainMap(pulsar, brokerToFailureDomainMap)); }); loadSheddingPipeline.add(createLoadSheddingStrategy()); }
3.68
framework_AbstractSplitPanel_isSplitPositionReversed
/** * Is the split position reversed. By default the split position is measured * by the first region, but if split position is reversed the measuring is * done by the second region instead. * * @since 7.3.6 * @return {@code true} if reversed, {@code false} otherwise. * @see #setSplitPosition(float, boolean) */ public boolean isSplitPositionReversed() { return getSplitterState(false).positionReversed; }
3.68
hadoop_BCFile_prepareMetaBlock
/** * Create a Meta Block and obtain an output stream for adding data into the * block. The Meta Block will be compressed with the same compression * algorithm as data blocks. There can only be one BlockAppender stream * active at any time. Regular Blocks may not be created after the first * Meta Blocks. The caller must call BlockAppender.close() to conclude the * block creation. * * @param name * The name of the Meta Block. The name must not conflict with * existing Meta Blocks. * @return The BlockAppender stream * @throws MetaBlockAlreadyExists * If the meta block with the name already exists. * @throws IOException */ public BlockAppender prepareMetaBlock(String name) throws IOException, MetaBlockAlreadyExists { return prepareMetaBlock(name, getDefaultCompressionAlgorithm()); }
3.68
hadoop_ContainerStatus_getHost
/** * Get the hostname where the container runs. * @return The hostname where the container runs. */ @Public @Unstable public String getHost() { throw new UnsupportedOperationException( "subclass must implement this method"); }
3.68
framework_ExpandingContainer_removeItem
/** * @throws UnsupportedOperationException * always */ @Override public boolean removeItem(Object itemId) { throw new UnsupportedOperationException(); }
3.68
hbase_LockAndQueue_releaseSharedLock
/** Returns whether we should wake the procedures waiting on the lock here. */ public boolean releaseSharedLock() { // hasExclusiveLock could be true, it usually means we acquire shared lock while we or our // parent have held the xlock. And since there is still an exclusive lock, we do not need to // wake any procedures. return --sharedLock == 0 && !hasExclusiveLock(); }
3.68
flink_ExternalResourceOptions_getExternalResourceParamConfigPrefixForResource
/** Generate the suffix option key prefix for the user-defined params for external resources. */ public static String getExternalResourceParamConfigPrefixForResource(String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_DRIVER_PARAM_SUFFIX); }
3.68
flink_TextElement_text
/** * Creates a simple block of text. * * @param text a simple block of text * @return block of text */ public static TextElement text(String text) { return new TextElement(text, Collections.emptyList()); }
3.68
hudi_HiveSchemaUtils_toHiveTypeInfo
/** * Convert Flink DataType to Hive TypeInfo. For types with a precision parameter, e.g. * timestamp, the supported precisions in Hive and Flink can be different. Therefore the * conversion will fail for those types if the precision is not supported by Hive and * checkPrecision is true. * * @param dataType a Flink DataType * @return the corresponding Hive data type */ public static TypeInfo toHiveTypeInfo(DataType dataType) { checkNotNull(dataType, "type cannot be null"); LogicalType logicalType = dataType.getLogicalType(); return logicalType.accept(new TypeInfoLogicalTypeVisitor(dataType)); }
3.68
flink_GenericRowData_of
/** * Creates an instance of {@link GenericRowData} with given field values. * * <p>By default, the row describes a {@link RowKind#INSERT} in a changelog. * * <p>Note: All fields of the row must be internal data structures. */ public static GenericRowData of(Object... values) { GenericRowData row = new GenericRowData(values.length); for (int i = 0; i < values.length; ++i) { row.setField(i, values[i]); } return row; }
3.68
framework_ContainerOrderedWrapper_getType
/* * Gets the data type of all Properties identified by the given Property ID. * Don't add a JavaDoc comment here, we use the default documentation from * implemented interface. */ @Override public Class<?> getType(Object propertyId) { return container.getType(propertyId); }
3.68
hibernate-validator_ReflectionHelper_isIterable
/** * @param type the type to check. * * @return Returns {@code true} if {@code type} is a iterable type, {@code false} otherwise. */ public static boolean isIterable(Type type) { if ( type instanceof Class && Iterable.class.isAssignableFrom( (Class<?>) type ) ) { return true; } if ( type instanceof ParameterizedType ) { return isIterable( ( (ParameterizedType) type ).getRawType() ); } if ( type instanceof WildcardType ) { Type[] upperBounds = ( (WildcardType) type ).getUpperBounds(); return upperBounds.length != 0 && isIterable( upperBounds[0] ); } return false; }
3.68
flink_ConfigOptions_memoryType
/** Defines that the value of the option should be of {@link MemorySize} type. */ public TypedConfigOptionBuilder<MemorySize> memoryType() { return new TypedConfigOptionBuilder<>(key, MemorySize.class); }
3.68
hbase_SortedCompactionPolicy_checkMinFilesCriteria
/** * @param candidates pre-filtrate * @return filtered subset forget the compactionSelection if we don't have enough files */ protected ArrayList<HStoreFile> checkMinFilesCriteria(ArrayList<HStoreFile> candidates, int minFiles) { if (candidates.size() < minFiles) { if (LOG.isDebugEnabled()) { LOG.debug("Not compacting files because we only have " + candidates.size() + " files ready for compaction. Need " + minFiles + " to initiate."); } candidates.clear(); } return candidates; }
3.68
flink_SplitFetcher_shutdown
/** Shutdown the split fetcher. */ public void shutdown() { lock.lock(); try { if (!closed) { closed = true; paused = false; LOG.info("Shutting down split fetcher {}", id); wakeUpUnsafe(false); } } finally { lock.unlock(); } }
3.68
framework_CssLayout_replaceComponent
/* Documented in superclass */ @Override public void replaceComponent(Component oldComponent, Component newComponent) { // Gets the locations int oldLocation = -1; int newLocation = -1; int location = 0; for (final Component component : components) { if (component == oldComponent) { oldLocation = location; } if (component == newComponent) { newLocation = location; } location++; } if (oldLocation == -1) { addComponent(newComponent); } else if (newLocation == -1) { removeComponent(oldComponent); addComponent(newComponent, oldLocation); } else { if (oldLocation > newLocation) { components.remove(oldComponent); components.add(newLocation, oldComponent); components.remove(newComponent); components.add(oldLocation, newComponent); } else { components.remove(newComponent); components.add(oldLocation, newComponent); components.remove(oldComponent); components.add(newLocation, oldComponent); } markAsDirty(); } }
3.68
hadoop_SchedulingRequest_executionType
/** * Set the <code>executionType</code> of the request. * * @see SchedulingRequest#setExecutionType(ExecutionTypeRequest) * @param executionType <code>executionType</code> of the request * @return {@link SchedulingRequest.SchedulingRequestBuilder} */ @Public @Unstable public SchedulingRequestBuilder executionType( ExecutionTypeRequest executionType) { schedulingRequest.setExecutionType(executionType); return this; }
3.68
framework_VCustomLayout_notifyChildrenOfSizeChange
/** * This method is published to JS side with the same name into first DOM * node of custom layout. This way if one implements some resizeable * containers in custom layout he/she can notify children after resize. * * @deprecated this method has done absolutely nothing since Vaadin 7.0 and * should not be used, before that forced a recursive re-layout */ @Deprecated public void notifyChildrenOfSizeChange() { }
3.68
morf_SchemaChangeSequence_applyInReverseToSchema
/** * Applies the change reversals to the given schema. * * @param initialSchema The schema to apply changes to. * @return the resulting schema after applying reverse changes in this sequence */ public Schema applyInReverseToSchema(Schema initialSchema) { Schema currentSchema = initialSchema; // we need to reverse the order of the changes inside the step before we try to reverse-execute them for (UpgradeStepWithChanges changesForStep : Lists.reverse(allChanges)) { for (SchemaChange change : Lists.reverse(changesForStep.getChanges())) { try { currentSchema = change.reverse(currentSchema); } catch (RuntimeException rte) { throw new RuntimeException("Failed to reverse-apply change [" + change + "] from upgrade step " + changesForStep.getUpgradeClass(), rte); } } } return currentSchema; }
3.68
morf_ChangePrimaryKeyColumns_verifyNewPrimaryKeyIsNotIndexed
/** * Verify that the proposed PK does not already exist as an index. Permitting this confuses Oracle. */ private void verifyNewPrimaryKeyIsNotIndexed(Table table, List<String> newPrimaryKeyColumnsUpperCase) { for (Index index : table.indexes()) { List<String> indexColumnNames = toUpperCase(index.columnNames()); if (indexColumnNames.equals(newPrimaryKeyColumnsUpperCase)) { throw new IllegalArgumentException( "Attempting to change primary key of table [" + table.getName() + "] to " + newPrimaryKeyColumns + " but this combination of columns exists in index [" + index.getName() + "]"); } } }
3.68
framework_Escalator_applyHeightByRows
/** * Reapplies the row-based height of the Grid, if Grid currently should * define its height that way. */ private void applyHeightByRows() { if (heightMode != HeightMode.ROW && heightMode != HeightMode.UNDEFINED) { return; } double headerHeight = header.getHeightOfSection(); double footerHeight = footer.getHeightOfSection(); double bodyHeight = body.getDefaultRowHeight() * heightByRows; double scrollbar = horizontalScrollbar.showsScrollHandle() ? horizontalScrollbar.getScrollbarThickness() : 0; double spacerHeight = 0; // ignored if HeightMode.ROW if (heightMode == HeightMode.UNDEFINED) { spacerHeight = body.spacerContainer.getSpacerHeightsSum(); } double totalHeight = headerHeight + bodyHeight + spacerHeight + scrollbar + footerHeight; setHeightInternal(totalHeight + "px"); }
3.68
morf_MySqlDialect_getSqlFrom
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlFrom(org.alfasoftware.morf.sql.ExceptSetOperator) */ @Override protected String getSqlFrom(ExceptSetOperator operator) { throw new IllegalStateException("EXCEPT set operator is not supported in the MySQL dialect"); }
3.68
hbase_MapReduceBackupMergeJob_copyMetaData
/** * Copy meta data to of a backup session * @param fs file system * @param tmpBackupDir temp backup directory, where meta is locaed * @param backupDirPath new path for backup * @throws IOException exception */ protected void copyMetaData(FileSystem fs, Path tmpBackupDir, Path backupDirPath) throws IOException { RemoteIterator<LocatedFileStatus> it = fs.listFiles(tmpBackupDir, true); List<Path> toKeep = new ArrayList<Path>(); while (it.hasNext()) { Path p = it.next().getPath(); if (fs.isDirectory(p)) { continue; } // Keep meta String fileName = p.toString(); if ( fileName.indexOf(FSTableDescriptors.TABLEINFO_DIR) > 0 || fileName.indexOf(HRegionFileSystem.REGION_INFO_FILE) > 0 ) { toKeep.add(p); } } // Copy meta to destination for (Path p : toKeep) { Path newPath = convertToDest(p, backupDirPath); copyFile(fs, p, newPath); } }
3.68
morf_SchemaUtils_idColumn
/** * Creates a Column that defines the standard ID column. * * @return an ID Column. */ public static Column idColumn() { return new ColumnBean("id", DataType.BIG_INTEGER, 0, 0, false, "", true); }
3.68
hadoop_HsController_confPage
/** * @return the page that will be used to render the /conf page */ @Override protected Class<? extends View> confPage() { return HsConfPage.class; }
3.68
flink_TaskExecutorMemoryConfiguration_create
/** * Factory method for initializing a TaskExecutorMemoryConfiguration based on the passed * Configuration. * * @param config The Configuration used for initializing the TaskExecutorMemoryConfiguration. * @return The newly instantiated TaskExecutorMemoryConfiguration. */ public static TaskExecutorMemoryConfiguration create(Configuration config) { return new TaskExecutorMemoryConfiguration( getConfigurationValue(config, FRAMEWORK_HEAP_MEMORY), getConfigurationValue(config, TASK_HEAP_MEMORY), getConfigurationValue(config, FRAMEWORK_OFF_HEAP_MEMORY), getConfigurationValue(config, TASK_OFF_HEAP_MEMORY), getConfigurationValue(config, NETWORK_MEMORY_MAX), getConfigurationValue(config, MANAGED_MEMORY_SIZE), getConfigurationValue(config, JVM_METASPACE), getConfigurationValue(config, JVM_OVERHEAD_MAX), calculateTotalFlinkMemoryFromComponents(config), calculateTotalProcessMemoryFromComponents(config)); }
3.68
framework_VCalendarPanel_onClick
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt * .event.dom.client.ClickEvent) */ @Override public void onClick(ClickEvent event) { if (!isEnabled() || isReadonly()) { return; } Date newDate = ((Day) event.getSource()).getDate(); if (!isDateInsideRange(newDate, Resolution.DAY)) { return; } if (newDate.getMonth() != displayedMonth.getMonth() || newDate.getYear() != displayedMonth.getYear()) { // If an off-month date was clicked, we must change the // displayed month and re-render the calendar (#8931) displayedMonth.setMonth(newDate.getMonth()); displayedMonth.setYear(newDate.getYear()); renderCalendar(); } focusDay(newDate); selectFocused(); onSubmit(); }
3.68
hadoop_AzureFileSystemInstrumentation_fileCreated
/** * Indicate that we just created a file through WASB. */ public void fileCreated() { numberOfFilesCreated.incr(); }
3.68
framework_StaticSection_addCell
/** * Adds a cell to this section for given column. * * @param column * the column for which to add a cell */ protected void addCell(Column<?, ?> column) { if (!section.getGrid().getColumns().contains(column)) { throw new IllegalArgumentException( "Given column does not exist in this Grid"); } internalAddCell(section.getInternalIdForColumn(column)); }
3.68
graphhopper_VectorTile_getGeometry
/** * <pre> * Contains a stream of commands and parameters (vertices). * A detailed description on geometry encoding is located in * section 4.3 of the specification. * </pre> * * <code>repeated uint32 geometry = 4 [packed = true];</code> */ public int getGeometry(int index) { return geometry_.get(index); }
3.68
framework_DataCommunicator_getDroppedData
/** * Returns all dropped data mapped by their id from DataProvider. * * @return map of ids to dropped data objects * * @since 8.6.0 */ protected Map<Object, T> getDroppedData() { Function<T, Object> getId = getDataProvider()::getId; return droppedData.stream().map(getKeyMapper()::get) .collect(Collectors.toMap(getId, i -> i)); }
3.68
hudi_HiveSchemaUtil_convertMapSchemaToHiveFieldSchema
/** * @param schema Intermediate schema in the form of Map<String,String> * @param syncConfig * @return List of FieldSchema objects derived from schema without the partition fields as the HMS api expects them as different arguments for alter table commands. * @throws IOException */ public static List<FieldSchema> convertMapSchemaToHiveFieldSchema(LinkedHashMap<String, String> schema, HiveSyncConfig syncConfig) throws IOException { return schema.keySet().stream() .map(key -> new FieldSchema(key, schema.get(key).toLowerCase(), "")) .filter(field -> !syncConfig.getSplitStrings(META_SYNC_PARTITION_FIELDS).contains(field.getName())) .collect(Collectors.toList()); }
3.68
hmily_EventTypeEnum_buildByCode
/** * Build by code event type enum. * * @param code the code * @return the event type enum */ public static EventTypeEnum buildByCode(final int code) { return Arrays.stream(EventTypeEnum.values()).filter(e -> e.code == code).findFirst() .orElseThrow(() -> new HmilyRuntimeException("can not support this code!")); }
3.68
hbase_RegionInfo_parseRegionName
/** * Separate elements of a regionName. * @return Array of byte[] containing tableName, startKey and id OR null if not parseable as a * region name. * @throws IOException if not parseable as regionName. */ static byte[][] parseRegionName(final byte[] regionName) throws IOException { byte[][] result = parseRegionNameOrReturnNull(regionName); if (result == null) { throw new IOException( INVALID_REGION_NAME_FORMAT_MESSAGE + ": " + Bytes.toStringBinary(regionName)); } return result; }
3.68
framework_CalendarWeekDropHandler_dragOver
/* * (non-Javadoc) * * @see * com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragOver(com * .vaadin.terminal.gwt.client.ui.dd.VDragEvent) */ @Override public void dragOver(final VDragEvent drag) { if (isLocationValid(drag.getElementOver())) { validate(new VAcceptCallback() { @Override public void accepted(VDragEvent event) { dragAccepted(drag); } }, drag); } }
3.68
hadoop_AMOptions_setOptions
/** * Set all of the command line options relevant to this class into the passed * {@link Options}. * * @param opts * Where to set the command line options. */ static void setOptions(Options opts) { opts.addOption(SHELL_ENV_ARG, true, "Environment for shell script. Specified as env_key=env_val pairs"); opts.addOption(NAMENODE_MEMORY_MB_ARG, true, "Amount of memory in MB to be requested to run the NN (default " + NAMENODE_MEMORY_MB_DEFAULT + "). " + "Ignored unless the NameNode is run within YARN."); opts.addOption(NAMENODE_VCORES_ARG, true, "Amount of virtual cores to be requested to run the NN (default " + NAMENODE_VCORES_DEFAULT + "). " + "Ignored unless the NameNode is run within YARN."); opts.addOption(NAMENODE_ARGS_ARG, true, "Additional arguments to add when starting the NameNode. " + "Ignored unless the NameNode is run within YARN."); opts.addOption(NAMENODE_NODELABEL_ARG, true, "The node label to specify for the container to use to " + "run the NameNode."); opts.addOption(NAMENODE_METRICS_PERIOD_ARG, true, "The period in seconds for the NameNode's metrics to be emitted to " + "file; if <=0, disables this functionality. Otherwise, a " + "metrics file will be stored in the container logs for the " + "NameNode (default " + NAMENODE_METRICS_PERIOD_DEFAULT + ")."); opts.addOption(NAMENODE_NAME_DIR_ARG, true, "The directory to use for the NameNode's name data directory. " + "If not specified, a location within the container's working " + "directory will be used."); opts.addOption(NAMENODE_EDITS_DIR_ARG, true, "The directory to use for the NameNode's edits directory. " + "If not specified, a location within the container's working " + "directory will be used."); opts.addOption(DATANODE_MEMORY_MB_ARG, true, "Amount of memory in MB to be requested to run the DNs (default " + DATANODE_MEMORY_MB_DEFAULT + ")"); opts.addOption(DATANODE_VCORES_ARG, true, "Amount of virtual cores to be requested to run the DNs (default " + DATANODE_VCORES_DEFAULT + ")"); opts.addOption(DATANODE_ARGS_ARG, true, "Additional arguments to add when starting the DataNodes."); opts.addOption(DATANODE_NODELABEL_ARG, true, "The node label to specify " + "for the container to use to run the DataNode."); opts.addOption(DATANODES_PER_CLUSTER_ARG, true, "How many simulated DataNodes to run within each YARN container " + "(default " + DATANODES_PER_CLUSTER_DEFAULT + ")"); opts.addOption(DATANODE_LAUNCH_DELAY_ARG, true, "The period over which to launch the DataNodes; this will " + "be used as the maximum delay and each DataNode container will " + "be launched with some random delay less than this value. " + "Accepts human-readable time durations (e.g. 10s, 1m) (default " + DATANODE_LAUNCH_DELAY_DEFAULT + ")"); opts.addOption("help", false, "Print usage"); }
3.68
framework_Form_getFirstFocusableField
/** * Gets the first focusable field in form. If there are enabled, * non-read-only fields, the first one of them is returned. Otherwise, the * field for the first property (or null if none) is returned. * * @return the Field. */ private Field<?> getFirstFocusableField() { Collection<?> itemPropertyIds = getItemPropertyIds(); if (itemPropertyIds != null && !itemPropertyIds.isEmpty()) { for (Object id : itemPropertyIds) { if (id != null) { Field<?> field = getField(id); if (field.isConnectorEnabled() && !field.isReadOnly()) { return field; } } } // fallback: first field if none of the fields is enabled and // writable Object id = itemPropertyIds.iterator().next(); if (id != null) { return getField(id); } } return null; }
3.68
hmily_HmilyExecuteTemplate_commit
/** * Commit. * * @param connection the connection */ public void commit(final Connection connection) { if (check()) { return; } List<HmilyParticipantUndo> undoList = buildUndoList(); for (HmilyParticipantUndo undo : undoList) { HmilyParticipantUndoCacheManager.getInstance().cacheHmilyParticipantUndo(undo); HmilyRepositoryStorage.createHmilyParticipantUndo(undo); } log.debug("TAC-persist-undo ::: {}", undoList); clean(connection); }
3.68
hadoop_EditLogOutputStream_getNumSync
/** * Return number of calls to {@link #flushAndSync(boolean)} */ protected long getNumSync() { return numSync; }
3.68
pulsar_OneStageAuthenticationState_authenticate
/** * @deprecated use {@link #authenticateAsync(AuthData)} */ @Deprecated(since = "3.0.0") @Override public AuthData authenticate(AuthData authData) throws AuthenticationException { try { return authenticateAsync(authData).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }
3.68
hadoop_IOStatisticsBinding_dynamicIOStatistics
/** * Create a builder for dynamic IO Statistics. * @return a builder to be completed. */ public static DynamicIOStatisticsBuilder dynamicIOStatistics() { return new DynamicIOStatisticsBuilder(); }
3.68
morf_ResultSetComparer_compareMetadata
/** * Verify the meta data of data sets matches, throw {@link IllegalStateException} if not. * */ private void compareMetadata(ResultSetMetaData metadataLeft, ResultSetMetaData metadataRight) throws SQLException { if (metadataLeft.getColumnCount() != metadataRight.getColumnCount()) { throw new IllegalArgumentException("Column counts mismatch"); } for (int i = 1; i <= metadataLeft.getColumnCount(); i++) { int left = metadataLeft.getColumnType(i); int right = metadataRight.getColumnType(i); if (columnTypeIsBoolean(left)) { if (!columnTypeIsBoolean(right)) throwTypeMismatch(metadataLeft, metadataRight, i); continue; } if (columnTypeIsDate(left)) { if (!columnTypeIsDate(right)) throwTypeMismatch(metadataLeft, metadataRight, i); continue; } if (columnTypeIsNumeric(left)) { if (!columnTypeIsNumeric(right)) throwTypeMismatch(metadataLeft, metadataRight, i); continue; } if (columnTypeIsString(left)) { if (!columnTypeIsString(right)) throwTypeMismatch(metadataLeft, metadataRight, i); continue; } throw new IllegalArgumentException(String.format( "Unknown column type for comparison: %s[%s(%d,%d)]", metadataLeft.getColumnLabel(i), metadataLeft.getColumnTypeName(i), metadataLeft.getPrecision(i), metadataLeft.getScale(i) )); } }
3.68
hbase_TableRegionModel_setEndKey
/** * @param endKey the end key */ public void setEndKey(byte[] endKey) { this.endKey = endKey; }
3.68
morf_SqlDialect_expandInsertStatement
/** * Creates a new {@link InsertStatement} where the source table has been * expanded out into a {@link SelectStatement}. * <p> * The expansion will match fields in the destination to fields in the source * table using their names. If a field with the matching name cannot be found * then the literal value will be firstly sourced from the * <i>fieldDefaults</i> map. If it cannot be found in that map, then the * default for the field type will be used. * </p> * * @param insertStatement the source statement to expand * @param metadata the table metadata from the database * @return a new instance of {@link InsertStatement} with an expanded from * table definition */ protected InsertStatement expandInsertStatement(InsertStatement insertStatement, Schema metadata) { // If we're neither specified the source table nor the select statement then // throw and exception if (insertStatement.getFromTable() == null && insertStatement.getSelectStatement() == null) { throw new IllegalArgumentException("Cannot expand insert statement as it has no from table specified"); } // If we've already got a select statement then just return a copy of the // source insert statement if (insertStatement.getSelectStatement() != null) { return copyInsertStatement(insertStatement); } Map<String, AliasedField> fieldDefaults = insertStatement.getFieldDefaults(); // Expand the from table String sourceTableName = insertStatement.getFromTable().getName(); String destinationTableName = insertStatement.getTable().getName(); // Perform a couple of checks if (!metadata.tableExists(sourceTableName)) { throw new IllegalArgumentException("Source table [" + sourceTableName + "] is not available in the database metadata"); } if (!metadata.tableExists(destinationTableName)) { throw new IllegalArgumentException("Destination table [" + destinationTableName + "] is not available in the database metadata"); } // Convert the source table field list to a map for convenience Map<String, Column> sourceColumns = new HashMap<>(); for (Column currentColumn : metadata.getTable(sourceTableName).columns()) { // Convert everything to the same case to avoid match failure based on // case. sourceColumns.put(currentColumn.getUpperCaseName(), currentColumn); } // Build up the select statement from field list SelectStatementBuilder selectStatementBuilder = SelectStatement.select(); List<AliasedField> resultFields = new ArrayList<>(); for (Column currentColumn : metadata.getTable(destinationTableName).columns()) { String currentColumnName = currentColumn.getName(); // Add the destination column resultFields.add(new FieldReference(currentColumnName)); // If there is a default for this column in the defaults list then use it if (fieldDefaults.containsKey(currentColumnName)) { selectStatementBuilder = selectStatementBuilder.fields(fieldDefaults.get(currentColumnName)); continue; } // If there is a column in the source table with the same name then link // them // and move on to the next column if (sourceColumns.containsKey(currentColumn.getUpperCaseName())) { selectStatementBuilder = selectStatementBuilder.fields(new FieldReference(currentColumnName)); continue; } } // Set the source table SelectStatement selectStatement = selectStatementBuilder .from(insertStatement.getFromTable()) .build(); return InsertStatement.insert() .into(insertStatement.getTable()) .fields(resultFields) .from(selectStatement) .build(); }
3.68
flink_MemorySegmentFactory_wrap
/** * Creates a new memory segment that targets the given heap memory region. * * <p>This method should be used to turn short lived byte arrays into memory segments. * * @param buffer The heap memory region. * @return A new memory segment that targets the given heap memory region. */ public static MemorySegment wrap(byte[] buffer) { return new MemorySegment(buffer, null); }
3.68
hbase_BufferedMutatorParams_implementationClassName
/** * Specify a BufferedMutator implementation other than the default. * @param implementationClassName Name of the BufferedMutator implementation class * @deprecated Since 3.0.0, will be removed in 4.0.0. You can not set it any more as the * implementation has to use too many internal stuffs in HBase. */ @Deprecated public BufferedMutatorParams implementationClassName(String implementationClassName) { this.implementationClassName = implementationClassName; return this; }
3.68
hudi_DirectWriteMarkers_create
/** * Creates a marker file based on the full marker name excluding the base path and instant. * * @param markerName the full marker name, e.g., "2021/08/13/file1.marker.CREATE" * @return path of the marker file */ public Option<Path> create(String markerName) { return create(new Path(markerDirPath, markerName), true); }
3.68
hbase_SaslChallengeDecoder_tryDecodeError
// will throw a RemoteException out if data is enough, so do not need to return anything. private void tryDecodeError(ByteBuf in, int offset, int readableBytes) throws IOException { if (readableBytes < 4) { return; } int classLen = in.getInt(offset); if (classLen <= 0) { throw new IOException("Invalid exception class name length " + classLen); } if (classLen > MAX_CHALLENGE_SIZE) { throw new IOException("Exception class name length too large(" + classLen + "), max allowed is " + MAX_CHALLENGE_SIZE); } if (readableBytes < 4 + classLen + 4) { return; } int msgLen = in.getInt(offset + 4 + classLen); if (msgLen <= 0) { throw new IOException("Invalid exception message length " + msgLen); } if (msgLen > MAX_CHALLENGE_SIZE) { throw new IOException( "Exception message length too large(" + msgLen + "), max allowed is " + MAX_CHALLENGE_SIZE); } int totalLen = classLen + msgLen + 8; if (readableBytes < totalLen) { return; } String className = in.toString(offset + 4, classLen, HConstants.UTF8_CHARSET); String msg = in.toString(offset + classLen + 8, msgLen, HConstants.UTF8_CHARSET); in.readerIndex(offset + totalLen); throw new RemoteException(className, msg); }
3.68
flink_StreamConfig_clearInitialConfigs
/** * In general, we don't clear any configuration. However, the {@link #SERIALIZED_UDF} may be * very large when operator includes some large objects, the SERIALIZED_UDF is used to create a * StreamOperator and usually only needs to be called once. {@link #CHAINED_TASK_CONFIG} may be * large as well due to the StreamConfig of all non-head operators in OperatorChain will be * serialized and stored in CHAINED_TASK_CONFIG. They can be cleared to reduce the memory after * StreamTask is initialized. If so, TM will have more memory during running. See FLINK-33315 * and FLINK-33317 for more information. */ public void clearInitialConfigs() { removedKeys.add(SERIALIZED_UDF); config.removeKey(SERIALIZED_UDF); removedKeys.add(CHAINED_TASK_CONFIG); config.removeKey(CHAINED_TASK_CONFIG); }
3.68
hbase_TableModel_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { return this.name; }
3.68
flink_HiveParserTypeCheckProcFactory_isDescendant
// Returns true if des is a descendant of ans (ancestor) private boolean isDescendant(Node ans, Node des) { if (ans.getChildren() == null) { return false; } for (Node c : ans.getChildren()) { if (c == des) { return true; } if (isDescendant(c, des)) { return true; } } return false; }
3.68
aws-saas-boost_ExistingEnvironmentFactory_getExistingSaaSBoostArtifactBucket
// VisibleForTesting static SaaSBoostArtifactsBucket getExistingSaaSBoostArtifactBucket( SsmClient ssm, String environmentName, Region region) { LOGGER.debug("Getting existing SaaS Boost artifact bucket name from Parameter Store"); String artifactsBucket = null; try { // note: this currently assumes Settings service implementation details and should eventually be // replaced with a call to getSettings GetParameterResponse response = ssm.getParameter(request -> request .name("/saas-boost/" + environmentName + "/SAAS_BOOST_BUCKET") ); artifactsBucket = response.parameter().value(); } catch (ParameterNotFoundException paramStoreError) { LOGGER.error("Parameter /saas-boost/" + environmentName + "/SAAS_BOOST_BUCKET not found"); LOGGER.error(Utils.getFullStackTrace(paramStoreError)); throw paramStoreError; } catch (SdkServiceException ssmError) { LOGGER.error("ssm:GetParameter error {}", ssmError.getMessage()); LOGGER.error(Utils.getFullStackTrace(ssmError)); throw ssmError; } LOGGER.info("Loaded artifacts bucket {}", artifactsBucket); return new SaaSBoostArtifactsBucket(artifactsBucket, region); }
3.68
hbase_MetricSampleQuantiles_getSampleCount
/** * Returns the number of samples kept by the estimator * @return count current number of samples */ synchronized public int getSampleCount() { return samples.size(); }
3.68
framework_ShowLastItem_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 12407; }
3.68
hbase_HDFSBlocksDistribution_getTopHostsWithWeights
/** Return the sorted list of hosts in terms of their weights */ public HostAndWeight[] getTopHostsWithWeights() { NavigableSet<HostAndWeight> orderedHosts = new TreeSet<>(new HostAndWeight.WeightComparator()); orderedHosts.addAll(this.hostAndWeights.values()); return orderedHosts.descendingSet().toArray(new HostAndWeight[orderedHosts.size()]); }
3.68
flink_KubernetesUtils_checkAndUpdatePortConfigOption
/** * Check whether the port config option is a fixed port. If not, the fallback port will be set * to configuration. * * @param flinkConfig flink configuration * @param port config option need to be checked * @param fallbackPort the fallback port that will be set to the configuration */ public static void checkAndUpdatePortConfigOption( Configuration flinkConfig, ConfigOption<String> port, int fallbackPort) { if (KubernetesUtils.parsePort(flinkConfig, port) == 0) { flinkConfig.setString(port, String.valueOf(fallbackPort)); LOG.info( "Kubernetes deployment requires a fixed port. Configuration {} will be set to {}", port.key(), fallbackPort); } }
3.68
flink_FailureHandlingResultSnapshot_getRootCauseExecution
/** * Returns the {@link Execution} that handled the root cause for this failure. An empty {@code * Optional} will be returned if it's a global failure. * * @return The {@link Execution} that handled the root cause for this failure. */ public Optional<Execution> getRootCauseExecution() { return Optional.ofNullable(rootCauseExecution); }
3.68
hadoop_ResourceUsageMatcher_configure
/** * Configure the {@link ResourceUsageMatcher} to load the configured plugins * and initialize them. */ @SuppressWarnings("unchecked") public void configure(Configuration conf, ResourceCalculatorPlugin monitor, ResourceUsageMetrics metrics, Progressive progress) { Class[] plugins = conf.getClasses(RESOURCE_USAGE_EMULATION_PLUGINS); if (plugins == null) { System.out.println("No resource usage emulator plugins configured."); } else { for (Class clazz : plugins) { if (clazz != null) { if (ResourceUsageEmulatorPlugin.class.isAssignableFrom(clazz)) { ResourceUsageEmulatorPlugin plugin = (ResourceUsageEmulatorPlugin) ReflectionUtils.newInstance(clazz, conf); emulationPlugins.add(plugin); } else { throw new RuntimeException("Misconfigured resource usage plugins. " + "Class " + clazz.getClass().getName() + " is not a resource " + "usage plugin as it does not extend " + ResourceUsageEmulatorPlugin.class.getName()); } } } } // initialize the emulators once all the configured emulator plugins are // loaded for (ResourceUsageEmulatorPlugin emulator : emulationPlugins) { emulator.initialize(conf, metrics, monitor, progress); } }
3.68
hbase_MemorySizeUtil_checkForClusterFreeHeapMemoryLimit
/** * Checks whether we have enough heap memory left out after portion for Memstore and Block cache. * We need atleast 20% of heap left out for other RS functions. */ public static void checkForClusterFreeHeapMemoryLimit(Configuration conf) { if (conf.get(MEMSTORE_SIZE_OLD_KEY) != null) { LOG.warn(MEMSTORE_SIZE_OLD_KEY + " is deprecated by " + MEMSTORE_SIZE_KEY); } float globalMemstoreSize = getGlobalMemStoreHeapPercent(conf, false); int gml = (int) (globalMemstoreSize * CONVERT_TO_PERCENTAGE); float blockCacheUpperLimit = getBlockCacheHeapPercent(conf); int bcul = (int) (blockCacheUpperLimit * CONVERT_TO_PERCENTAGE); if ( CONVERT_TO_PERCENTAGE - (gml + bcul) < (int) (CONVERT_TO_PERCENTAGE * HConstants.HBASE_CLUSTER_MINIMUM_MEMORY_THRESHOLD) ) { throw new RuntimeException("Current heap configuration for MemStore and BlockCache exceeds " + "the threshold required for successful cluster operation. " + "The combined value cannot exceed 0.8. Please check " + "the settings for hbase.regionserver.global.memstore.size and " + "hfile.block.cache.size in your configuration. " + "hbase.regionserver.global.memstore.size is " + globalMemstoreSize + " hfile.block.cache.size is " + blockCacheUpperLimit); } }
3.68
flink_BeamPythonFunctionRunner_createExecutableStage
/** * Creates a {@link ExecutableStage} which contains the Python user-defined functions to be * executed and all the other information needed to execute them, such as the execution * environment, the input and output coder, etc. */ @SuppressWarnings("unchecked") private ExecutableStage createExecutableStage(RunnerApi.Environment environment) throws Exception { RunnerApi.Components.Builder componentsBuilder = RunnerApi.Components.newBuilder() .putPcollections( INPUT_COLLECTION_ID, RunnerApi.PCollection.newBuilder() .setWindowingStrategyId(WINDOW_STRATEGY) .setCoderId(INPUT_CODER_ID) .build()) .putPcollections( OUTPUT_COLLECTION_ID, RunnerApi.PCollection.newBuilder() .setWindowingStrategyId(WINDOW_STRATEGY) .setCoderId(OUTPUT_CODER_ID) .build()) .putWindowingStrategies( WINDOW_STRATEGY, RunnerApi.WindowingStrategy.newBuilder() .setWindowCoderId(WINDOW_CODER_ID) .build()) .putCoders(INPUT_CODER_ID, createCoderProto(inputCoderDescriptor)) .putCoders(OUTPUT_CODER_ID, createCoderProto(outputCoderDescriptor)) .putCoders(WINDOW_CODER_ID, getWindowCoderProto()); for (Map.Entry<String, FlinkFnApi.CoderInfoDescriptor> entry : sideOutputCoderDescriptors.entrySet()) { String collectionId = entry.getKey(); String collectionCoderId = SIDE_OUTPUT_CODER_PREFIX + collectionId; componentsBuilder.putPcollections( collectionId, RunnerApi.PCollection.newBuilder() .setWindowingStrategyId(WINDOW_STRATEGY) .setCoderId(collectionCoderId) .build()); componentsBuilder.putCoders(collectionCoderId, createCoderProto(entry.getValue())); } getOptionalTimerCoderProto() .ifPresent( timerCoderProto -> { componentsBuilder.putCoders(TIMER_CODER_ID, timerCoderProto); RunnerApi.Coder wrapperTimerCoderProto = RunnerApi.Coder.newBuilder() .setSpec( RunnerApi.FunctionSpec.newBuilder() .setUrn(ModelCoders.TIMER_CODER_URN) .build()) .addComponentCoderIds(TIMER_CODER_ID) .addComponentCoderIds(WINDOW_CODER_ID) .build(); componentsBuilder.putCoders( WRAPPER_TIMER_CODER_ID, wrapperTimerCoderProto); }); buildTransforms(componentsBuilder); RunnerApi.Components components = componentsBuilder.build(); PipelineNode.PCollectionNode input = PipelineNode.pCollection( INPUT_COLLECTION_ID, components.getPcollectionsOrThrow(INPUT_COLLECTION_ID)); List<SideInputReference> sideInputs = Collections.EMPTY_LIST; List<UserStateReference> userStates = Collections.EMPTY_LIST; List<TimerReference> timers = getTimers(components); List<PipelineNode.PTransformNode> transforms = components.getTransformsMap().keySet().stream() .map(id -> PipelineNode.pTransform(id, components.getTransformsOrThrow(id))) .collect(Collectors.toList()); List<PipelineNode.PCollectionNode> outputs = new ArrayList<>(); outputs.add( PipelineNode.pCollection( OUTPUT_COLLECTION_ID, components.getPcollectionsOrThrow(OUTPUT_COLLECTION_ID))); for (Map.Entry<String, FlinkFnApi.CoderInfoDescriptor> entry : sideOutputCoderDescriptors.entrySet()) { String collectionId = entry.getKey(); outputs.add( PipelineNode.pCollection( collectionId, components.getPcollectionsOrThrow(collectionId))); } return ImmutableExecutableStage.of( components, environment, input, sideInputs, userStates, timers, transforms, outputs, createValueOnlyWireCoderSetting()); }
3.68
framework_TwinColSelectElement_getSelectionsElement
/** * Gets the right {@code <select>} element inside the component, containing * the selected options. * * @return the select element containing selection inside the component * @since 8.1.1 */ public WebElement getSelectionsElement() { return selectionsElement; }
3.68
flink_TypeMappingUtils_getRowtimeAttributes
/** Returns a list with all rowtime attribute names of the [[TableSource]]. */ private static List<String> getRowtimeAttributes(TableSource<?> tableSource) { if (tableSource instanceof DefinedRowtimeAttributes) { return ((DefinedRowtimeAttributes) tableSource) .getRowtimeAttributeDescriptors().stream() .map(RowtimeAttributeDescriptor::getAttributeName) .collect(Collectors.toList()); } else { return Collections.emptyList(); } }
3.68
hbase_SyncTable_finishBatchAndCompareHashes
/** * Finish the currently open hash batch. Compare the target hash to the given source hash. If * they do not match, then sync the covered key range. */ private void finishBatchAndCompareHashes(Context context) throws IOException, InterruptedException { targetHasher.finishBatch(); context.getCounter(Counter.BATCHES).increment(1); if (targetHasher.getBatchSize() == 0) { context.getCounter(Counter.EMPTY_BATCHES).increment(1); } ImmutableBytesWritable targetHash = targetHasher.getBatchHash(); if (targetHash.equals(currentSourceHash)) { context.getCounter(Counter.HASHES_MATCHED).increment(1); } else { context.getCounter(Counter.HASHES_NOT_MATCHED).increment(1); ImmutableBytesWritable stopRow = nextSourceKey == null ? new ImmutableBytesWritable(sourceTableHash.stopRow) : nextSourceKey; if (LOG.isDebugEnabled()) { LOG.debug("Hash mismatch. Key range: " + toHex(targetHasher.getBatchStartKey()) + " to " + toHex(stopRow) + " sourceHash: " + toHex(currentSourceHash) + " targetHash: " + toHex(targetHash)); } syncRange(context, targetHasher.getBatchStartKey(), stopRow); } }
3.68
flink_GlobalWindows_create
/** * Creates a new {@code GlobalWindows} {@link WindowAssigner} that assigns all elements to the * same {@link GlobalWindow}. * * @return The global window policy. */ public static GlobalWindows create() { return new GlobalWindows(); }
3.68
framework_Window_addBlurListener
/* * (non-Javadoc) * * @see * com.vaadin.event.FieldEvents.BlurNotifier#addBlurListener(com.vaadin. * event.FieldEvents.BlurListener) */ @Override public Registration addBlurListener(BlurListener listener) { return addListener(BlurEvent.EVENT_ID, BlurEvent.class, listener, BlurListener.blurMethod); }
3.68
flink_DynamicSinkUtils_convertCollectToRel
/** Converts an {@link TableResult#collect()} sink to a {@link RelNode}. */ public static RelNode convertCollectToRel( FlinkRelBuilder relBuilder, RelNode input, CollectModifyOperation collectModifyOperation, ReadableConfig configuration, ClassLoader classLoader) { final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory(); final ResolvedSchema childSchema = collectModifyOperation.getChild().getResolvedSchema(); final ResolvedSchema schema = ResolvedSchema.physical( childSchema.getColumnNames(), childSchema.getColumnDataTypes()); final ResolvedCatalogTable catalogTable = new ResolvedCatalogTable( new ExternalCatalogTable( Schema.newBuilder().fromResolvedSchema(schema).build()), schema); final ContextResolvedTable contextResolvedTable = ContextResolvedTable.anonymous("collect", catalogTable); final DataType consumedDataType = fixCollectDataType(dataTypeFactory, schema); final String zone = configuration.get(TableConfigOptions.LOCAL_TIME_ZONE); final ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone); final CollectDynamicSink tableSink = new CollectDynamicSink( contextResolvedTable.getIdentifier(), consumedDataType, configuration.get(CollectSinkOperatorFactory.MAX_BATCH_SIZE), configuration.get(CollectSinkOperatorFactory.SOCKET_TIMEOUT), classLoader, zoneId, configuration .get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR) .isEnabled()); collectModifyOperation.setSelectResultProvider(tableSink.getSelectResultProvider()); collectModifyOperation.setConsumedDataType(consumedDataType); return convertSinkToRel( relBuilder, input, Collections.emptyMap(), // dynamicOptions contextResolvedTable, Collections.emptyMap(), // staticPartitions null, // targetColumns false, tableSink); }
3.68
framework_VaadinService_fireSessionDestroy
/** * Handles destruction of the given session. Internally ensures proper * locking is done. * * @param vaadinSession * The session to destroy */ public void fireSessionDestroy(VaadinSession vaadinSession) { final VaadinSession session = vaadinSession; session.access(() -> { if (session.getState() == State.CLOSED) { return; } if (session.getState() == State.OPEN) { closeSession(session); } List<UI> uis = new ArrayList<>(session.getUIs()); for (final UI ui : uis) { ui.accessSynchronously(() -> { /* * close() called here for consistency so that it is always * called before a UI is removed. UI.isClosing() is thus * always true in UI.detach() and associated detach * listeners. */ if (!ui.isClosing()) { ui.close(); } session.removeUI(ui); }); } SessionDestroyEvent event = new SessionDestroyEvent( VaadinService.this, session); for (SessionDestroyListener listener : sessionDestroyListeners) { try { listener.sessionDestroy(event); } catch (Exception e) { /* * for now, use the session error handler; in the future, * could have an API for using some other handler for * session init and destroy listeners */ session.getErrorHandler().error(new ErrorEvent(e)); } } session.setState(State.CLOSED); }); }
3.68
pulsar_WorkerStatsApiV2Resource_clientAppId
/** * @deprecated use {@link AuthenticationParameters} instead */ @Deprecated public String clientAppId() { return httpRequest != null ? (String) httpRequest.getAttribute(AuthenticationFilter.AuthenticatedRoleAttributeName) : null; } @GET @Path("/metrics") @ApiOperation( value = "Gets the metrics for Monitoring", notes = "Request should be executed by Monitoring agent on each worker to fetch the worker-metrics", response = org.apache.pulsar.common.stats.Metrics.class, responseContainer = "List") @ApiResponses(value = { @ApiResponse(code = 401, message = "Don't have admin permission"), @ApiResponse(code = 503, message = "Worker service is not running") }
3.68
flink_JobGraph_addJars
/** * Adds the given jar files to the {@link JobGraph} via {@link JobGraph#addJar}. * * @param jarFilesToAttach a list of the {@link URL URLs} of the jar files to attach to the * jobgraph. * @throws RuntimeException if a jar URL is not valid. */ public void addJars(final List<URL> jarFilesToAttach) { for (URL jar : jarFilesToAttach) { try { addJar(new Path(jar.toURI())); } catch (URISyntaxException e) { throw new RuntimeException("URL is invalid. This should not happen.", e); } } }
3.68
flink_ArrowFieldWriter_finish
/** Finishes the writing of the current row batch. */ public void finish() { valueVector.setValueCount(count); }
3.68
hbase_OrderedBytes_encodeInt16
/** * Encode an {@code int16} value using the fixed-length encoding. * @return the number of bytes written. * @see #encodeInt64(PositionedByteRange, long, Order) * @see #decodeInt16(PositionedByteRange) */ public static int encodeInt16(PositionedByteRange dst, short val, Order ord) { final int offset = dst.getOffset(), start = dst.getPosition(); dst.put(FIXED_INT16).put((byte) ((val >> 8) ^ 0x80)).put((byte) val); ord.apply(dst.getBytes(), offset + start, 3); return 3; }
3.68
hadoop_ConnectionPool_getPoolAlignmentContext
/** * Get the alignment context for this pool. * @return Alignment context */ public PoolAlignmentContext getPoolAlignmentContext() { return this.alignmentContext; }
3.68
hbase_HFileReaderImpl_getUncachedBlockReader
/** For testing */ @Override public HFileBlock.FSReader getUncachedBlockReader() { return fsBlockReader; }
3.68
hudi_InitialCheckPointProvider_init
/** * Initialize the class with the current filesystem. * * @param config Hadoop configuration */ public void init(Configuration config) throws HoodieException { try { this.fs = FileSystem.get(config); } catch (IOException e) { throw new HoodieException("CheckpointProvider initialization failed"); } }
3.68
querydsl_ExpressionUtils_predicate
/** * Create a new Operation expression * * @param operator operator * @param args operation arguments * @return operation expression */ public static PredicateOperation predicate(Operator operator, List<Expression<?>> args) { return new PredicateOperation(operator, args); }
3.68
hadoop_AbstractDNSToSwitchMapping_dumpTopology
/** * Generate a string listing the switch mapping implementation, * the mapping for every known node and the number of nodes and * unique switches known about -each entry to a separate line. * @return a string that can be presented to the ops team or used in * debug messages. */ public String dumpTopology() { Map<String, String> rack = getSwitchMap(); StringBuilder builder = new StringBuilder(); builder.append("Mapping: ").append(toString()).append("\n"); if (rack != null) { builder.append("Map:\n"); Set<String> switches = new HashSet<>(); for (Map.Entry<String, String> entry : rack.entrySet()) { builder.append(" ") .append(entry.getKey()) .append(" -> ") .append(entry.getValue()) .append("\n"); switches.add(entry.getValue()); } builder.append("Nodes: ").append(rack.size()).append("\n") .append("Switches: ").append(switches.size()).append("\n"); } else { builder.append("No topology information"); } return builder.toString(); }
3.68
framework_VFilterSelect_doPostFilterSelectedItemAction
/** * Triggered after a selection has been made. */ public void doPostFilterSelectedItemAction() { debug("VFS.SM: doPostFilterSelectedItemAction()"); final MenuItem item = getSelectedItem(); final String enteredItemValue = tb.getText(); updateSelectionWhenReponseIsReceived = false; // check for exact match in menu int p = getItems().size(); if (p > 0) { for (int i = 0; i < p; i++) { final MenuItem potentialExactMatch = getItems().get(i); if (potentialExactMatch.getText() .equals(enteredItemValue)) { selectItem(potentialExactMatch); // do not send a value change event if null was and // stays selected if (!"".equals(enteredItemValue) || (selectedOptionKey != null && !"".equals(selectedOptionKey))) { doItemAction(potentialExactMatch, true); } suggestionPopup.hide(); return; } } } if (allowNewItem) { if (!prompting && !enteredItemValue.equals(lastNewItemString)) { /* * Store last sent new item string to avoid double sends */ lastNewItemString = enteredItemValue; client.updateVariable(paintableId, "newitem", enteredItemValue, immediate); afterUpdateClientVariables(); } } else if (item != null && !"".equals(lastFilter) && (filteringmode == FilteringMode.CONTAINS ? item.getText().toLowerCase(Locale.ROOT).contains( lastFilter.toLowerCase(Locale.ROOT)) : item.getText().toLowerCase(Locale.ROOT) .startsWith(lastFilter .toLowerCase(Locale.ROOT)))) { doItemAction(item, true); } else { // currentSuggestion has key="" for nullselection if (currentSuggestion != null && !currentSuggestion.key.equals("")) { // An item (not null) selected String text = currentSuggestion.getReplacementString(); setText(text); selectedOptionKey = currentSuggestion.key; } else { // Null selected setText(""); selectedOptionKey = null; } } suggestionPopup.hide(); }
3.68
framework_FlyweightCell_getColSpan
/** * Return the colspan attribute of the element of the cell. * * @return the colspan attribute */ public int getColSpan() { assertSetup(); return element.getPropertyInt(COLSPAN_ATTR); }
3.68
hadoop_TFile_getFirstKey
/** * Get the first key in the TFile. * * @return The first key in the TFile. * @throws IOException raised on errors performing I/O. */ public RawComparable getFirstKey() throws IOException { checkTFileDataIndex(); return tfileIndex.getFirstKey(); }
3.68
hbase_MetricsMaster_getMetricsSource
// for unit-test usage public MetricsMasterSource getMetricsSource() { return masterSource; }
3.68
framework_PointerEventSupportImpl_getNativeEventName
/** * @param events * @return the native event name of the given event */ public String getNativeEventName(EventType eventName) { return eventName.toString().toLowerCase(Locale.ROOT); }
3.68
morf_AbstractSqlDialectTest_testSelectLiteralWithWhereClause
/** * Tests SQL for selecting literal fields where is a WHERE clause */ @Test public void testSelectLiteralWithWhereClause() { assertEquals( expectedSelectLiteralWithWhereClauseString(), testDialect.convertStatementToSQL( new SelectStatement(new FieldLiteral("LITERAL")).where(eq(new FieldLiteral("ONE"), "ONE")) ) ); }
3.68
flink_EnvironmentInformation_getGitCommitId
/** @return The last known commit id of this version of the software. */ public static String getGitCommitId() { return getVersionsInstance().gitCommitId; }
3.68
flink_ImperativeAggregateFunction_getResultType
/** * Returns the {@link TypeInformation} of the {@link ImperativeAggregateFunction}'s result. * * @return The {@link TypeInformation} of the {@link ImperativeAggregateFunction}'s result or * <code>null</code> if the result type should be automatically inferred. * @deprecated This method uses the old type system and is based on the old reflective * extraction logic. The method will be removed in future versions and is only called when * using the deprecated {@code TableEnvironment.registerFunction(...)} method. The new * reflective extraction logic (possibly enriched with {@link DataTypeHint} and {@link * FunctionHint}) should be powerful enough to cover most use cases. For advanced users, it * is possible to override {@link UserDefinedFunction#getTypeInference(DataTypeFactory)}. */ @Deprecated public TypeInformation<T> getResultType() { return null; }
3.68
hadoop_SubApplicationRowKeyPrefix_getRowKeyPrefix
/* * (non-Javadoc) * * @see org.apache.hadoop.yarn.server.timelineservice.storage.subapplication. * RowKeyPrefix#getRowKeyPrefix() */ public byte[] getRowKeyPrefix() { return super.getRowKey(); }
3.68