name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_CustomSinkOperatorUidHashes_setGlobalCommitterUidHash
/** * Sets the uid hash of the global committer operator used to recover state. * * @param globalCommitterUidHash uid hash denoting the global committer operator * @return {@link SinkOperatorUidHashesBuilder} */ public SinkOperatorUidHashesBuilder setGlobalCommitterUidHash( String globalCommitterUidHash) { this.globalCommitterUidHash = globalCommitterUidHash; return this; }
3.68
hadoop_JsonSerialization_writeJsonAsBytes
/** * Write the JSON as bytes, then close the stream. * @param instance instance to write * @param dataOutputStream an output stream that will always be closed * @throws IOException on any failure */ public void writeJsonAsBytes(T instance, OutputStream dataOutputStream) throws IOException { try { dataOutputStream.write(toBytes(instance)); } finally { dataOutputStream.close(); } }
3.68
morf_AbstractSqlDialectTest_expectedSqlForMathOperationsForExistingDataFix4
/** * @return the expected SQL for math operation for existing data fix 4 */ protected String expectedSqlForMathOperationsForExistingDataFix4() { return "invoiceLineReceived * vatRate / (vatRate + 100)"; }
3.68
hbase_HBaseTestingUtility_getDefaultRootDirPath
/** * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that * <code>create</code> flag is false. Note: this does not cause the root dir to be created. * @return Fully qualified path for the default hbase root dir */ public Path getDefaultRootDirPath() throws IOException { return getDefaultRootDirPath(false); }
3.68
framework_GridLayout_checkExistingOverlaps
/** * Tests if the given area overlaps with any of the items already on the * grid. * * @param area * the Area to be checked for overlapping. * @throws OverlapsException * if <code>area</code> overlaps with any existing area. */ private void checkExistingOverlaps(Area area) throws OverlapsException { for (Entry<Connector, ChildComponentData> entry : getState().childData .entrySet()) { if (componentsOverlap(entry.getValue(), area.childData)) { // Component not added, overlaps with existing component throw new OverlapsException( new Area(entry.getValue(), (Component) entry.getKey())); } } }
3.68
flink_DataSet_writeAsText
/** * Writes a DataSet as text file(s) to the specified location. * * <p>For each element of the DataSet the result of {@link Object#toString()} is written. * * @param filePath The path pointing to the location the text file is written to. * @param writeMode Control the behavior for existing files. Options are NO_OVERWRITE and * OVERWRITE. * @return The DataSink that writes the DataSet. * @see TextOutputFormat * @see DataSet#writeAsText(String) Output files and directories */ public DataSink<T> writeAsText(String filePath, WriteMode writeMode) { TextOutputFormat<T> tof = new TextOutputFormat<>(new Path(filePath)); tof.setWriteMode(writeMode); return output(tof); }
3.68
hmily_PropertyName_isParentOf
/** * Is parent of boolean. * * @param name the name * @return the boolean */ public boolean isParentOf(final PropertyName name) { if (this.getElementSize() != name.getElementSize() - 1) { return false; } return isAncestorOf(name); }
3.68
morf_NullFieldLiteral_deepCopyInternal
/** * @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation) */ @Override protected NullFieldLiteral deepCopyInternal(final DeepCopyTransformation transformer) { return new NullFieldLiteral(this.getAlias()); }
3.68
framework_DataCommunicatorConnector_sendDroppedRows
/** * Inform the server of any dropped rows. */ private void sendDroppedRows() { if (!droppedKeys.isEmpty()) { JsonArray dropped = Json.createArray(); int i = 0; for (String key : droppedKeys) { dropped.set(i++, key); } droppedKeys.clear(); getRpcProxy(DataRequestRpc.class).dropRows(dropped); } }
3.68
hadoop_GetClusterNodeLabelsResponsePBImpl_getNodeLabels
/** * @deprecated Use {@link #getNodeLabelList()} instead. */ @Override @Deprecated public synchronized Set<String> getNodeLabels() { Set<String> set = new HashSet<>(); List<NodeLabel> labelList = getNodeLabelList(); if (labelList != null) { for (NodeLabel label : labelList) { set.add(label.getName()); } } return set; }
3.68
hbase_MetricsHeapMemoryManager_setCurMemStoreOffHeapSizeGauge
/** * Set the current global memstore off-heap size gauge * @param memStoreOffHeapSize the current memory off-heap size in memstore, in bytes. */ public void setCurMemStoreOffHeapSizeGauge(final long memStoreOffHeapSize) { source.setCurMemStoreOffHeapSizeGauge(memStoreOffHeapSize); }
3.68
hbase_MasterFileSystem_getTempDir
/** Returns HBase temp dir. */ public Path getTempDir() { return this.tempdir; }
3.68
flink_Costs_setDiskCost
/** * Sets the costs for disk for this Costs object. * * @param bytes The disk cost to set, in bytes to be written and read. */ public void setDiskCost(double bytes) { if (bytes == UNKNOWN || bytes >= 0) { this.diskCost = bytes; } else { throw new IllegalArgumentException(); } }
3.68
flink_Keys_areCompatible
/** Check if two sets of keys are compatible to each other (matching types, key counts) */ public boolean areCompatible(Keys<?> other) throws IncompatibleKeysException { TypeInformation<?>[] thisKeyFieldTypes = this.getKeyFieldTypes(); TypeInformation<?>[] otherKeyFieldTypes = other.getKeyFieldTypes(); if (thisKeyFieldTypes.length != otherKeyFieldTypes.length) { throw new IncompatibleKeysException(IncompatibleKeysException.SIZE_MISMATCH_MESSAGE); } else { for (int i = 0; i < thisKeyFieldTypes.length; i++) { if (!thisKeyFieldTypes[i].equals(otherKeyFieldTypes[i])) { throw new IncompatibleKeysException( thisKeyFieldTypes[i], otherKeyFieldTypes[i]); } } } return true; }
3.68
flink_CustomHeadersDecorator_setCustomHeaders
/** * Sets the custom headers for the message. * * @param customHeaders A collection of custom headers. */ public void setCustomHeaders(Collection<HttpHeader> customHeaders) { this.customHeaders = customHeaders; }
3.68
dubbo_StringUtils_isNumber
/** * is positive integer or zero string. * * @param str a string * @return is positive integer or zero */ public static boolean isNumber(String str) { return isNotEmpty(str) && NUM_PATTERN.matcher(str).matches(); }
3.68
flink_FlinkCompletableFutureAssert_withThrowableOfType
/** * Checks that the underlying throwable is of the given type and returns a {@link * ThrowableAssertAlternative} to chain further assertions on the underlying throwable. * * @param type the expected {@link Throwable} type * @param <T> the expected {@link Throwable} type * @return a {@link ThrowableAssertAlternative} built with underlying throwable. */ public <T extends Throwable> ThrowableAssertAlternative<T> withThrowableOfType( Class<T> type) { final ThrowableAssertAlternative<Throwable> throwableAssert = new ThrowableAssertAlternative<>(throwable).isInstanceOf(type); @SuppressWarnings("unchecked") final ThrowableAssertAlternative<T> cast = (ThrowableAssertAlternative<T>) throwableAssert; return cast; }
3.68
hadoop_LoadManifestsStage_processOneManifest
/** * Method invoked to process one manifest. * @param status file to process. * @throws IOException failure to load/parse/queue */ private void processOneManifest(FileStatus status) throws IOException { updateAuditContext(OP_LOAD_ALL_MANIFESTS); TaskManifest manifest = fetchTaskManifest(status); progress(); // update the directories final int created = coalesceDirectories(manifest); final String attemptID = manifest.getTaskAttemptID(); LOG.debug("{}: task attempt {} added {} directories", getName(), attemptID, created); // add to the summary. summaryInfo.add(manifest); // clear the manifest extra data so if // blocked waiting for queue capacity, // memory use is reduced. manifest.setIOStatistics(null); manifest.getExtraData().clear(); // queue those files. final boolean enqueued = entryWriter.enqueue(manifest.getFilesToCommit()); if (!enqueued) { LOG.warn("{}: Failed to write manifest for task {}", getName(), attemptID); throw new EntryWriteException(attemptID); } }
3.68
hadoop_DiskBalancerWorkItem_incErrorCount
/** * Incs Error Count. */ public void incErrorCount() { this.errorCount++; }
3.68
flink_ResourceManagerId_fromUuid
/** Creates a ResourceManagerId that corresponds to the given UUID. */ public static ResourceManagerId fromUuid(UUID uuid) { return new ResourceManagerId(uuid); }
3.68
flink_HiveParserContext_getTokenRewriteStream
/** * @return the token rewrite stream being used to parse the current top-level SQL statement, or * null if it isn't available (e.g. for parser tests) */ public TokenRewriteStream getTokenRewriteStream() { return tokenRewriteStream; }
3.68
hadoop_BytesWritable_equals
/** * Are the two byte sequences equal? */ @Override public boolean equals(Object right_obj) { if (right_obj instanceof BytesWritable) return super.equals(right_obj); return false; }
3.68
pulsar_ProducerConfiguration_getMaxPendingMessages
/** * @return the maximum number of messages allowed in the outstanding messages queue for the producer */ public int getMaxPendingMessages() { return conf.getMaxPendingMessages(); }
3.68
flink_CatalogManager_initSchemaResolver
/** * Initializes a {@link SchemaResolver} for {@link Schema} resolution. * * <p>Currently, the resolver cannot be passed in the constructor because of a chicken-and-egg * problem between {@link Planner} and {@link CatalogManager}. * * @see TableEnvironmentImpl#create(EnvironmentSettings) */ public void initSchemaResolver( boolean isStreamingMode, ExpressionResolverBuilder expressionResolverBuilder) { this.schemaResolver = new DefaultSchemaResolver(isStreamingMode, typeFactory, expressionResolverBuilder); }
3.68
framework_AtmospherePushConnection_init
/* * (non-Javadoc) * * @see * com.vaadin.client.communication.PushConnection#init(ApplicationConnection * , Map<String, String>, CommunicationErrorHandler) */ @Override public void init(final ApplicationConnection connection, final PushConfigurationState pushConfiguration) { this.connection = connection; connection.addHandler(ApplicationStoppedEvent.TYPE, event -> { if (state == State.DISCONNECT_PENDING || state == State.DISCONNECTED) { return; } disconnect(() -> { }); }); config = createConfig(); String debugParameter = Location.getParameter("debug"); if ("push".equals(debugParameter)) { config.setStringValue("logLevel", "debug"); } for (String param : pushConfiguration.parameters.keySet()) { String value = pushConfiguration.parameters.get(param); if (value.equalsIgnoreCase("true") || value.equalsIgnoreCase("false")) { config.setBooleanValue(param, value.equalsIgnoreCase("true")); } else { config.setStringValue(param, value); } } if (pushConfiguration.pushUrl != null) { url = pushConfiguration.pushUrl; } else { url = ApplicationConstants.APP_PROTOCOL_PREFIX + ApplicationConstants.PUSH_PATH; } runWhenAtmosphereLoaded( () -> Scheduler.get().scheduleDeferred(() -> connect())); }
3.68
streampipes_TableDescription_createTable
/** * Creates a table with the name {@link JdbcConnectionParameters#getDbTable()} and the * properties from {@link TableDescription#getEventSchema()}. Calls * {@link SQLStatementUtils#extractEventProperties(List, String, DbDescription)} internally with the * {@link TableDescription#getEventSchema()} to extract all possible columns. * * @throws SpRuntimeException If the {@link JdbcConnectionParameters#getDbTable()} is not allowed, if * executeUpdate throws an SQLException or if * {@link SQLStatementUtils#extractEventProperties(List, String, DbDescription)} * throws an exception */ public void createTable(String createStatement, StatementHandler statementHandler, DbDescription dbDescription, TableDescription tableDescription) throws SpRuntimeException { SQLStatementUtils.checkRegEx(tableDescription.getName(), "Tablename", dbDescription); StringBuilder statement = new StringBuilder(createStatement); statement.append(this.getName()).append(" ( "); statement.append( SQLStatementUtils.extractEventProperties(this.getEventSchema().getEventProperties(), "", dbDescription)) .append(" );"); try { statementHandler.statement.executeUpdate(statement.toString()); } catch (SQLException e) { throw new SpRuntimeException(e.getMessage()); } }
3.68
hbase_MultiRowRangeFilter_resetExclusive
/** * Resets the exclusive flag. */ public void resetExclusive() { exclusive = false; }
3.68
framework_Page_setUriFragment
/** * Sets URI fragment. This method fires a {@link UriFragmentChangedEvent} * * @param newUriFragment * id of the new fragment * @see UriFragmentChangedEvent * @see Page.UriFragmentChangedListener */ public void setUriFragment(String newUriFragment) { setUriFragment(newUriFragment, true); }
3.68
hmily_GsonUtils_toListMap
/** * toList Map. * * @param json json * @return hashMap list */ public List<Map<String, Object>> toListMap(final String json) { return GSON.fromJson(json, new TypeToken<List<Map<String, Object>>>() { }.getType()); }
3.68
dubbo_RegistrySpecListener_getRgs
/** * Get the number of multiple registries */ public static List<String> getRgs(MetricsEvent event) { return event.getAttachmentValue(RegistryMetricsConstants.ATTACHMENT_REGISTRY_KEY); }
3.68
hbase_CoprocessorHost_findCoprocessor
/** * Find coprocessors by full class name or simple name. */ public C findCoprocessor(String className) { for (E env : coprocEnvironments) { if ( env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className) ) { return env.getInstance(); } } return null; }
3.68
flink_JoinOperator_where
/** * {@inheritDoc} * * @return An incomplete Join transformation. Call {@link * org.apache.flink.api.java.operators.JoinOperator.JoinOperatorSets.JoinOperatorSetsPredicate#equalTo(int...)} * or {@link * org.apache.flink.api.java.operators.JoinOperator.JoinOperatorSets.JoinOperatorSetsPredicate#equalTo(KeySelector)} * to continue the Join. */ @Override public <K> JoinOperatorSetsPredicate where(KeySelector<I1, K> keySelector) { TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input1.getType()); return new JoinOperatorSetsPredicate( new SelectorFunctionKeys<>( input1.clean(keySelector), input1.getType(), keyType)); }
3.68
pulsar_AuthenticationDataProvider_hasDataForHttp
/** * Check if data for HTTP are available. * * @return true if this authentication data contain data for HTTP */ default boolean hasDataForHttp() { return false; }
3.68
framework_VComboBox_performSelection
/** * Perform selection based on a message from the server. * * The special case where the selected item is not on the current page is * handled separately by the caller. * * @param selectedKey * non-empty selected item key * @param forceUpdateText * true to force the text box value to match the suggestion text * @param updatePromptAndSelectionIfMatchFound */ private void performSelection(String selectedKey, boolean forceUpdateText, boolean updatePromptAndSelectionIfMatchFound) { if (selectedKey == null || selectedKey.isEmpty()) { currentSuggestion = null; // #13217 selectedOptionKey = null; setText(getEmptySelectionCaption()); } // some item selected for (ComboBoxSuggestion suggestion : currentSuggestions) { String suggestionKey = suggestion.getOptionKey(); if (!suggestionKey.equals(selectedKey)) { continue; } // at this point, suggestion key matches the new selection key if (updatePromptAndSelectionIfMatchFound) { if (!suggestionKey.equals(selectedOptionKey) || suggestion .getReplacementString().equals(tb.getText()) || forceUpdateText) { // Update text field if we've got a new // selection // Also update if we've got the same text to // retain old text selection behavior // OR if selected item caption is changed. setText(suggestion.getReplacementString()); selectedOptionKey = suggestionKey; } } currentSuggestion = suggestion; // only a single item can be selected break; } }
3.68
hadoop_StagingCommitter_setupJob
/** * Set up the job, including calling the same method on the * wrapped committer. * @param context job context * @throws IOException IO failure. */ @Override public void setupJob(JobContext context) throws IOException { super.setupJob(context); wrappedCommitter.setupJob(context); }
3.68
hbase_ExpiredMobFileCleaner_cleanExpiredMobFiles
/** * Cleans the MOB files when they're expired and their min versions are 0. If the latest timestamp * of Cells in a MOB file is older than the TTL in the column family, it's regarded as expired. * This cleaner deletes them. At a time T0, the cells in a mob file M0 are expired. If a user * starts a scan before T0, those mob cells are visible, this scan still runs after T0. At that * time T1, this mob file M0 is expired, meanwhile a cleaner starts, the M0 is archived and can be * read in the archive directory. * @param tableName The current table name. * @param family The current family. */ public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException { Configuration conf = getConf(); TableName tn = TableName.valueOf(tableName); FileSystem fs = FileSystem.get(conf); LOG.info("Cleaning the expired MOB files of " + family.getNameAsString() + " in " + tableName); // disable the block cache. Configuration copyOfConf = new Configuration(conf); copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); CacheConfig cacheConfig = new CacheConfig(copyOfConf); MobUtils.cleanExpiredMobFiles(fs, conf, tn, family, cacheConfig, EnvironmentEdgeManager.currentTime()); }
3.68
morf_ExceptSetOperator_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser traverser) { traverser.dispatch(getSelectStatement()); }
3.68
flink_NFACompiler_createEndingState
/** * Creates the dummy Final {@link State} of the NFA graph. * * @return dummy Final state */ private State<T> createEndingState() { State<T> endState = createState(ENDING_STATE_NAME, State.StateType.Final); windowTime = Optional.ofNullable(currentPattern.getWindowTime()).map(Time::toMilliseconds); return endState; }
3.68
hbase_HFileInfo_read
/** * Populate this instance with what we find on the passed in <code>in</code> stream. Can * deserialize protobuf of old Writables format. * @see #write(DataOutputStream) */ void read(final DataInputStream in) throws IOException { // This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code. int pblen = ProtobufUtil.lengthOfPBMagic(); byte[] pbuf = new byte[pblen]; if (in.markSupported()) { in.mark(pblen); } int read = in.read(pbuf); if (read != pblen) { throw new IOException("read=" + read + ", wanted=" + pblen); } if (ProtobufUtil.isPBMagicPrefix(pbuf)) { parsePB(HFileProtos.FileInfoProto.parseDelimitedFrom(in)); } else { if (in.markSupported()) { in.reset(); parseWritable(in); } else { // We cannot use BufferedInputStream, it consumes more than we read from the underlying IS ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); SequenceInputStream sis = new SequenceInputStream(bais, in); // Concatenate input streams // TODO: Am I leaking anything here wrapping the passed in stream? We are not calling // close on the wrapped streams but they should be let go after we leave this context? // I see that we keep a reference to the passed in inputstream but since we no longer // have a reference to this after we leave, we should be ok. parseWritable(new DataInputStream(sis)); } } }
3.68
hbase_ColumnFamilyDescriptorBuilder_setMinVersions
/** * Set minimum versions to retain. * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { return setValue(MIN_VERSIONS_BYTES, Integer.toString(minVersions)); }
3.68
morf_ViewChanges_deployingAlso
/** * @param extraViewsToDeploy Additional views to deploy * @return a new {@link ViewChanges} which also deploys the specified views. */ public ViewChanges deployingAlso(Collection<View> extraViewsToDeploy) { Set<String> extraViewNames = ImmutableSet.copyOf(Collections2.transform(extraViewsToDeploy, viewToName())); return new ViewChanges(allViews, allViewsMap, dropSet, Sets.union(deploySet, extraViewNames), viewIndex); }
3.68
framework_AbstractClientConnector_getUI
/** * Finds a UI ancestor of this connector. <code>null</code> is returned if * no UI ancestor is found (typically because the connector is not attached * to a proper hierarchy). * * @return the UI ancestor of this connector, or <code>null</code> if none * is found. */ @Override public UI getUI() { ClientConnector connector = this; while (connector != null) { if (connector instanceof UI) { return (UI) connector; } connector = connector.getParent(); } return null; }
3.68
pulsar_ZipFiles_isZip
/** * Returns true if the given file is a gzip file. */ public static boolean isZip(File f) { try (DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(f)))){ int test = in.readInt(); return test == 0x504b0304; } catch (final Exception e) { return false; } }
3.68
dubbo_ServiceInstance_isEnabled
/** * The enabled status of the registered service instance. * * @return if <code>true</code>, indicates current instance is enabled, or disable, the client should remove this one. * The default value is <code>true</code> */ default boolean isEnabled() { return true; }
3.68
framework_CssLayoutConnector_onStateChanged
/* * (non-Javadoc) * * @see * com.vaadin.client.ui.AbstractComponentConnector#onStateChanged(com.vaadin * .client.communication.StateChangeEvent) */ @Override public void onStateChanged(StateChangeEvent stateChangeEvent) { super.onStateChanged(stateChangeEvent); clickEventHandler.handleEventHandlerRegistration(); for (ComponentConnector child : getChildComponents()) { if (!getState().childCss.containsKey(child)) { continue; } String css = getState().childCss.get(child); Style style = child.getWidget().getElement().getStyle(); // should we remove styles also? How can we know what we have added // as it is added directly to the child component? String[] cssRules = css.split(";"); for (String cssRule : cssRules) { String[] parts = cssRule.split(":", 2); if (parts.length == 2) { style.setProperty(makeCamelCase(parts[0].trim()), parts[1].trim()); } } } }
3.68
framework_ComponentStateUtil_addRegisteredEventListener
/** * Adds an event listener id. * * @param eventListenerId * The event identifier to add * @return a registration object for removing the listener * @since 8.0 */ public static final Registration addRegisteredEventListener( SharedState state, String eventListenerId) { if (state.registeredEventListeners == null) { state.registeredEventListeners = new HashSet<>(); } state.registeredEventListeners.add(eventListenerId); return () -> removeRegisteredEventListener(state, eventListenerId); }
3.68
hbase_MetricsSink_refreshAgeOfLastAppliedOp
/** * Refreshing the age makes sure the value returned is the actual one and not the one set a * replication time * @return refreshed age */ public long refreshAgeOfLastAppliedOp() { return setAgeOfLastAppliedOp(lastTimestampForAge); }
3.68
flink_GSBlobIdentifier_fromBlobId
/** * Construct an abstract blob identifier from a Google BlobId. * * @param blobId The Google BlobId * @return The abstract blob identifier */ public static GSBlobIdentifier fromBlobId(BlobId blobId) { return new GSBlobIdentifier(blobId.getBucket(), blobId.getName()); }
3.68
hadoop_AbfsOperationMetrics_getOperationsFailed
/** * * @return no of operations failed. */ AtomicLong getOperationsFailed() { return operationsFailed; }
3.68
flink_WorkerResourceSpec_setExtendedResource
/** * Add the given extended resource. The old value with the same resource name will be * replaced if present. */ public Builder setExtendedResource(ExternalResource extendedResource) { this.extendedResources.put(extendedResource.getName(), extendedResource); return this; }
3.68
flink_AbstractID_getLowerPart
/** * Gets the lower 64 bits of the ID. * * @return The lower 64 bits of the ID. */ public long getLowerPart() { return lowerPart; }
3.68
rocketmq-connect_AvroData_toAvroLogical
/** * to avro logical * * @param schema * @param value * @return */ private static Object toAvroLogical(Schema schema, Object value) { if (schema != null && schema.getName() != null) { LogicalTypeConverter logicalConverter = TO_AVRO_LOGICAL_CONVERTERS.get(schema.getName()); if (logicalConverter != null && value != null) { return logicalConverter.convert(schema, value); } } return value; }
3.68
hbase_CompactionProgress_complete
/** * Marks the compaction as complete by setting total to current KV count; Total KV count is an * estimate, so there might be a discrepancy otherwise. */ public void complete() { this.totalCompactingKVs = this.currentCompactedKVs; }
3.68
flink_HiveASTParseUtils_parse
/** Parses the Hive query. */ public static HiveParserASTNode parse( String command, HiveParserContext ctx, String viewFullyQualifiedName) throws HiveASTParseException { HiveASTParseDriver pd = new HiveASTParseDriver(); HiveParserASTNode tree = pd.parse(command, ctx, viewFullyQualifiedName); tree = findRootNonNullToken(tree); handleSetColRefs(tree); return tree; }
3.68
framework_VaadinServletResponse_doSetCacheTime
// Implementation shared with VaadinPortletResponse static void doSetCacheTime(VaadinResponse response, long milliseconds) { if (milliseconds <= 0) { response.setHeader("Cache-Control", "no-cache"); response.setHeader("Pragma", "no-cache"); response.setDateHeader("Expires", 0); } else { response.setHeader("Cache-Control", "max-age=" + milliseconds / 1000); response.setDateHeader("Expires", System.currentTimeMillis() + milliseconds); // Required to apply caching in some Tomcats response.setHeader("Pragma", "cache"); } }
3.68
dubbo_NetUtils_isUniqueLocalAddress
/** * If the address is Unique Local Address. * * @param address {@link InetAddress} * @return {@code true} if the address is Unique Local Address,otherwise {@code false} */ private static boolean isUniqueLocalAddress(InetAddress address) { byte[] ip = address.getAddress(); return (ip[0] & 0xff) == 0xfd; }
3.68
hibernate-validator_PredefinedScopeBeanMetaDataManager_createBeanMetaData
/** * Creates a {@link org.hibernate.validator.internal.metadata.aggregated.BeanMetaData} containing the meta data from all meta * data providers for the given type and its hierarchy. * * @param <T> The type of interest. * @param clazz The type's class. * * @return A bean meta data object for the given type. */ private static <T> BeanMetaDataImpl<T> createBeanMetaData(ConstraintCreationContext constraintCreationContext, ExecutableHelper executableHelper, ExecutableParameterNameProvider parameterNameProvider, JavaBeanHelper javaBeanHelper, ValidationOrderGenerator validationOrderGenerator, List<MetaDataProvider> optionalMetaDataProviders, MethodValidationConfiguration methodValidationConfiguration, List<MetaDataProvider> metaDataProviders, Class<T> clazz) { BeanMetaDataBuilder<T> builder = BeanMetaDataBuilder.getInstance( constraintCreationContext, executableHelper, parameterNameProvider, validationOrderGenerator, clazz, methodValidationConfiguration ); for ( MetaDataProvider provider : metaDataProviders ) { for ( BeanConfiguration<? super T> beanConfiguration : getBeanConfigurationForHierarchy( provider, clazz ) ) { builder.add( beanConfiguration ); } } return builder.build(); }
3.68
dubbo_ReferenceCountedResource_close
/** * Useful when used together with try-with-resources pattern */ @Override public final void close() { release(); }
3.68
hbase_MasterObserver_preMoveTables
/** * Called before tables are moved to target region server group * @param ctx the environment to interact with the framework and master * @param tables set of tables to move * @param targetGroup name of group */ default void preMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx, Set<TableName> tables, String targetGroup) throws IOException { }
3.68
dubbo_NacosRegistry_isServiceNamesWithCompatibleMode
/** * Since 2.7.6 the legacy service name will be added to serviceNames * to fix bug with https://github.com/apache/dubbo/issues/5442 * * @param url * @return */ private boolean isServiceNamesWithCompatibleMode(final URL url) { return !isAdminProtocol(url) && createServiceName(url).isConcrete(); }
3.68
flink_AbstractFileSource_setFileEnumerator
/** * Configures the {@link FileEnumerator} for the source. The File Enumerator is responsible * for selecting from the input path the set of files that should be processed (and which to * filter out). Furthermore, the File Enumerator may split the files further into * sub-regions, to enable parallelization beyond the number of files. */ public SELF setFileEnumerator(FileEnumerator.Provider fileEnumerator) { this.fileEnumerator = checkNotNull(fileEnumerator); return self(); }
3.68
hbase_StripeCompactionPolicy_needsSingleStripeCompaction
/** * @param si StoreFileManager. * @return Whether any stripe potentially needs compaction. */ protected boolean needsSingleStripeCompaction(StripeInformationProvider si) { int minFiles = this.config.getStripeCompactMinFiles(); for (List<HStoreFile> stripe : si.getStripes()) { if (stripe.size() >= minFiles) return true; } return false; }
3.68
hadoop_AbfsDelegationTokenManager_getCanonicalServiceName
/** * Query the token manager for the service name; if it does not implement * the extension interface, null is returned. * @return the canonical service name. */ @Override public String getCanonicalServiceName() { return ExtensionHelper.getCanonicalServiceName(tokenManager, null); }
3.68
flink_TopNBuffer_getCurrentTopNum
/** * Gets number of total records. * * @return the number of total records. */ public int getCurrentTopNum() { return currentTopNum; }
3.68
hadoop_NativeRuntime_reportStatus
/** * Get the status report from native space */ public static void reportStatus(TaskReporter reporter) throws IOException { assertNativeLibraryLoaded(); synchronized (reporter) { final byte[] statusBytes = JNIUpdateStatus(); final DataInputBuffer ib = new DataInputBuffer(); ib.reset(statusBytes, statusBytes.length); final FloatWritable progress = new FloatWritable(); progress.readFields(ib); reporter.setProgress(progress.get()); final Text status = new Text(); status.readFields(ib); if (status.getLength() > 0) { reporter.setStatus(status.toString()); } final IntWritable numCounters = new IntWritable(); numCounters.readFields(ib); if (numCounters.get() == 0) { return; } final Text group = new Text(); final Text name = new Text(); final LongWritable amount = new LongWritable(); for (int i = 0; i < numCounters.get(); i++) { group.readFields(ib); name.readFields(ib); amount.readFields(ib); reporter.incrCounter(group.toString(), name.toString(), amount.get()); } } }
3.68
pulsar_ResourceGroupService_getNumResourceGroups
/** * Get the current number of RGs. For testing. */ protected long getNumResourceGroups() { return resourceGroupsMap.mappingCount(); }
3.68
dubbo_CacheableFailbackRegistry_createURL
/** * Create DubboServiceAddress object using provider url, consumer url, and extra parameters. * * @param rawProvider provider url string * @param consumerURL URL object of consumer * @param extraParameters extra parameters * @return created DubboServiceAddressURL object */ protected ServiceAddressURL createURL(String rawProvider, URL consumerURL, Map<String, String> extraParameters) { boolean encoded = true; // use encoded value directly to avoid URLDecoder.decode allocation. int paramStartIdx = rawProvider.indexOf(ENCODED_QUESTION_MARK); if (paramStartIdx == -1) { // if ENCODED_QUESTION_MARK does not show, mark as not encoded. encoded = false; } // split by (encoded) question mark. // part[0] => protocol + ip address + interface. // part[1] => parameters (metadata). String[] parts = URLStrParser.parseRawURLToArrays(rawProvider, paramStartIdx); if (parts.length <= 1) { // 1-5 Received URL without any parameters. logger.warn(REGISTRY_NO_PARAMETERS_URL, "", "", "Received url without any parameters " + rawProvider); return DubboServiceAddressURL.valueOf(rawProvider, consumerURL); } String rawAddress = parts[0]; String rawParams = parts[1]; // Workaround for 'effectively final': duplicate the encoded variable. boolean isEncoded = encoded; // PathURLAddress if it's using dubbo protocol. URLAddress address = ConcurrentHashMapUtils.computeIfAbsent( stringAddress, rawAddress, k -> URLAddress.parse(k, getDefaultURLProtocol(), isEncoded)); address.setTimestamp(System.currentTimeMillis()); URLParam param = ConcurrentHashMapUtils.computeIfAbsent( stringParam, rawParams, k -> URLParam.parse(k, isEncoded, extraParameters)); param.setTimestamp(System.currentTimeMillis()); // create service URL using cached address, param, and consumer URL. ServiceAddressURL cachedServiceAddressURL = createServiceURL(address, param, consumerURL); if (isMatch(consumerURL, cachedServiceAddressURL)) { return cachedServiceAddressURL; } return null; }
3.68
hadoop_EditLogOutputStream_getTotalSyncTime
/** * Return total time spent in {@link #flushAndSync(boolean)} */ long getTotalSyncTime() { return totalTimeSync; }
3.68
hadoop_ReadStatistics_getTotalLocalBytesRead
/** * @return The total local bytes read. This will always be at least * as high as totalShortCircuitBytesRead, since all short-circuit * reads are also local. */ public synchronized long getTotalLocalBytesRead() { return totalLocalBytesRead; }
3.68
hbase_Bytes_getBytes
/** * Returns a new byte array, copied from the given {@code buf}, from the position (inclusive) to * the limit (exclusive). The position and the other index parameters are not changed. * @param buf a byte buffer * @return the byte array * @see #toBytes(ByteBuffer) */ public static byte[] getBytes(ByteBuffer buf) { return readBytes(buf.duplicate()); }
3.68
morf_FieldFromSelect_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser traverser) { traverser.dispatch(getSelectStatement()); }
3.68
morf_AbstractSqlDialectTest_testRightTrim
/** * Tests that Right Trim functionality works. */ @Test public void testRightTrim() { // Given Function rightTrim = rightTrim(new FieldReference("field1")); SelectStatement selectStatement = new SelectStatement(rightTrim).from(new TableReference("schedule")); // When String result = testDialect.convertStatementToSQL(selectStatement); // Then assertEquals("Right Trim script should match expected", expectedRightTrim(), result); }
3.68
hbase_HBaseConfiguration_getPassword
/** * Get the password from the Configuration instance using the getPassword method if it exists. If * not, then fall back to the general get method for configuration elements. * @param conf configuration instance for accessing the passwords * @param alias the name of the password element * @param defPass the default password * @return String password or default password */ public static String getPassword(Configuration conf, String alias, String defPass) throws IOException { String passwd; char[] p = conf.getPassword(alias); if (p != null) { LOG.debug("Config option {} was found through the Configuration getPassword method.", alias); passwd = new String(p); } else { LOG.debug("Config option {} was not found. Using provided default value", alias); passwd = defPass; } return passwd; }
3.68
hbase_HBaseReplicationEndpoint_reloadZkWatcher
/** * Closes the current ZKW (if not null) and creates a new one * @throws IOException If anything goes wrong connecting */ private void reloadZkWatcher() throws IOException { synchronized (zkwLock) { if (zkw != null) { zkw.close(); } zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); zkw.registerListener(new PeerRegionServerListener(this)); } }
3.68
hbase_EnableTableProcedure_runCoprocessorAction
/** * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state */ private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state) throws IOException, InterruptedException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { switch (state) { case ENABLE_TABLE_PRE_OPERATION: cpHost.preEnableTableAction(getTableName(), getUser()); break; case ENABLE_TABLE_POST_OPERATION: cpHost.postCompletedEnableTableAction(getTableName(), getUser()); break; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); } } }
3.68
hibernate-validator_JavaBeanField_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
pulsar_ModularLoadManagerImpl_checkNamespaceBundleSplit
/** * As the leader broker, attempt to automatically detect and split hot namespace bundles. */ @Override public void checkNamespaceBundleSplit() { if (!conf.isLoadBalancerAutoBundleSplitEnabled() || pulsar.getLeaderElectionService() == null || !pulsar.getLeaderElectionService().isLeader() || knownBrokers.size() <= 1) { return; } final boolean unloadSplitBundles = pulsar.getConfiguration().isLoadBalancerAutoUnloadSplitBundlesEnabled(); synchronized (bundleSplitStrategy) { final Map<String, String> bundlesToBeSplit = bundleSplitStrategy.findBundlesToSplit(loadData, pulsar); NamespaceBundleFactory namespaceBundleFactory = pulsar.getNamespaceService().getNamespaceBundleFactory(); int splitCount = 0; for (String bundleName : bundlesToBeSplit.keySet()) { try { final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundleName); final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundleName); if (!namespaceBundleFactory .canSplitBundle(namespaceBundleFactory.getBundle(namespaceName, bundleRange))) { continue; } // Make sure the same bundle is not selected again. loadData.getBundleData().remove(bundleName); localData.getLastStats().remove(bundleName); // Clear namespace bundle-cache this.pulsar.getNamespaceService().getNamespaceBundleFactory() .invalidateBundleCache(NamespaceName.get(namespaceName)); deleteBundleDataFromMetadataStore(bundleName); // Check NamespacePolicies and AntiAffinityNamespace support unload bundle. boolean isUnload = false; String broker = bundlesToBeSplit.get(bundleName); if (unloadSplitBundles && shouldNamespacePoliciesUnload(namespaceName, bundleRange, broker) && shouldAntiAffinityNamespaceUnload(namespaceName, bundleRange, broker)) { isUnload = true; } log.info("Load-manager splitting bundle {} and unloading {}", bundleName, isUnload); pulsar.getAdminClient().namespaces().splitNamespaceBundle(namespaceName, bundleRange, isUnload, null); splitCount++; log.info("Successfully split namespace bundle {}", bundleName); } catch (Exception e) { log.error("Failed to split namespace bundle {}", bundleName, e); } } updateBundleSplitMetrics(splitCount); } }
3.68
hadoop_IdentityTransformer_transformIdentityForGetRequest
/** * Perform identity transformation for the Get request results in AzureBlobFileSystemStore: * getFileStatus(), listStatus(), getAclStatus(). * Input originalIdentity can be one of the following: * <pre> * 1. $superuser: * by default it will be transformed to local user/group, this can be disabled by setting * "fs.azure.identity.transformer.skip.superuser.replacement" to true. * * 2. User principal id: * can be transformed to localIdentity, if this principal id matches the principal id set in * "fs.azure.identity.transformer.service.principal.id" and localIdentity is stated in * "fs.azure.identity.transformer.service.principal.substitution.list" * * 3. User principal name (UPN): * can be transformed to a short name(localIdentity) if originalIdentity is owner name, and * "fs.azure.identity.transformer.enable.short.name" is enabled. * </pre> * @param originalIdentity the original user or group in the get request results: FileStatus, AclStatus. * @param isUserName indicate whether the input originalIdentity is an owner name or owning group name. * @param localIdentity the local user or group, should be parsed from UserGroupInformation. * @return owner or group after transformation. * */ public String transformIdentityForGetRequest(String originalIdentity, boolean isUserName, String localIdentity) throws IOException { if (originalIdentity == null) { originalIdentity = localIdentity; // localIdentity might be a full name, so continue the transformation. } // case 1: it is $superuser and replace $superuser config is enabled if (!skipSuperUserReplacement && SUPER_USER.equals(originalIdentity)) { return localIdentity; } if (skipUserIdentityReplacement) { return originalIdentity; } // case 2: original owner is principalId set in config, and localUser // is a daemon service specified in substitution list, // To avoid ownership check failure in job task, replace it // to local daemon user/group if (originalIdentity.equals(servicePrincipalId) && isInSubstitutionList(localIdentity)) { return localIdentity; } // case 3: If original owner is a fully qualified name, and // short name is enabled, replace with shortName. if (isUserName && shouldUseShortUserName(originalIdentity)) { return getShortName(originalIdentity); } return originalIdentity; }
3.68
flink_CheckpointConfig_setMaxSubtasksPerChannelStateFile
/** * The number of subtasks to share the same channel state file. If {@link * ExecutionCheckpointingOptions#UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE} has value equal * to <code>1</code>, each subtask will create a new channel state file. */ @PublicEvolving public void setMaxSubtasksPerChannelStateFile(int maxSubtasksPerChannelStateFile) { configuration.set( ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE, maxSubtasksPerChannelStateFile); }
3.68
hbase_FavoredStochasticBalancer_randomAssignment
/** * If we have favored nodes for a region, we will return one of the FN as destination. If favored * nodes are not present for a region, we will generate and return one of the FN as destination. * If we can't generate anything, lets fallback. */ @Override public ServerName randomAssignment(RegionInfo regionInfo, List<ServerName> servers) throws HBaseIOException { ServerName destination = null; if (!FavoredNodesManager.isFavoredNodeApplicable(regionInfo)) { return super.randomAssignment(regionInfo, servers); } metricsBalancer.incrMiscInvocations(); Configuration conf = getConf(); List<ServerName> favoredNodes = fnm.getFavoredNodes(regionInfo); if (favoredNodes == null || favoredNodes.isEmpty()) { // Generate new favored nodes and return primary FavoredNodeAssignmentHelper helper = new FavoredNodeAssignmentHelper(servers, conf); helper.initialize(); try { favoredNodes = helper.generateFavoredNodes(regionInfo); updateFavoredNodesForRegion(regionInfo, favoredNodes); } catch (IOException e) { LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + e); throw new HBaseIOException(e); } } List<ServerName> onlineServers = getOnlineFavoredNodes(servers, favoredNodes); if (onlineServers.size() > 0) { destination = onlineServers.get(ThreadLocalRandom.current().nextInt(onlineServers.size())); } boolean alwaysAssign = conf.getBoolean(FAVORED_ALWAYS_ASSIGN_REGIONS, true); if (destination == null && alwaysAssign) { LOG.warn("Can't generate FN for region: " + regionInfo + " falling back"); destination = super.randomAssignment(regionInfo, servers); } return destination; }
3.68
hadoop_DockerClientConfigHandler_writeDockerCredentialsToPath
/** * Extract the Docker related tokens from the Credentials and write the Docker * client configuration to the supplied File. * * @param outConfigFile the File to write the Docker client configuration to. * @param credentials the populated Credentials object. * @throws IOException if the write fails. * @return true if a Docker credential is found in the supplied credentials. */ public static boolean writeDockerCredentialsToPath(File outConfigFile, Credentials credentials) throws IOException { boolean foundDockerCred = false; if (credentials.numberOfTokens() > 0) { ObjectMapper mapper = new ObjectMapper(); ObjectNode rootNode = mapper.createObjectNode(); ObjectNode registryUrlNode = mapper.createObjectNode(); for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) { if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) { foundDockerCred = true; DockerCredentialTokenIdentifier ti = (DockerCredentialTokenIdentifier) tk.decodeIdentifier(); ObjectNode registryCredNode = mapper.createObjectNode(); registryUrlNode.set(ti.getRegistryUrl(), registryCredNode); registryCredNode.put(CONFIG_AUTH_KEY, new String(tk.getPassword(), StandardCharsets.UTF_8)); LOG.debug("Prepared token for write: {}", tk); } } if (foundDockerCred) { rootNode.set(CONFIG_AUTHS_KEY, registryUrlNode); String json = mapper.writerWithDefaultPrettyPrinter() .writeValueAsString(rootNode); FileUtils.writeStringToFile( outConfigFile, json, StandardCharsets.UTF_8); } } return foundDockerCred; }
3.68
hibernate-validator_ConstraintViolationAssert_assertNoViolations
/** * Asserts that the given violation list has no violations (is empty). * * @param violations The violation list to verify. */ public static void assertNoViolations(Set<? extends ConstraintViolation<?>> violations, String message) { Assertions.assertThat( violations ).describedAs( message ).isEmpty(); }
3.68
flink_VertexFlameGraphFactory_cleanLambdaNames
// Drops stack trace elements with class names matching the above regular expression. // These elements are useless, because they don't provide any additional information // except the fact that a lambda is used (they don't have source information, for example), // and also the lambda "class names" can be different across different JVMs, which pollutes // flame graphs. // Note that Thread.getStackTrace() performs a similar logic - the stack trace returned // by this method will not contain lambda references with it. But ThreadMXBean does collect // lambdas, so we have to clean them up explicitly. private static StackTraceElement[] cleanLambdaNames(StackTraceElement[] stackTrace) { StackTraceElement[] result = new StackTraceElement[stackTrace.length]; final String javaVersion = System.getProperty("java.version"); final Pattern lambdaClassName = javaVersion.compareTo("21") >= 0 ? JDK21_LAMBDA_CLASS_NAME : LAMBDA_CLASS_NAME; for (int i = 0; i < stackTrace.length; i++) { StackTraceElement element = stackTrace[i]; Matcher matcher = lambdaClassName.matcher(element.getClassName()); if (matcher.find()) { // org.apache.flink.streaming.runtime.io.RecordProcessorUtils$$Lambda$773/0x00000001007f84a0 // --> // org.apache.flink.streaming.runtime.io.RecordProcessorUtils$$Lambda$0/0x0 // This ensures that the name is stable across JVMs, but at the same time // keeps the stack frame in the call since it has the method name, which // may be useful for analysis. String newClassName = matcher.replaceFirst("$10/$20"); result[i] = new StackTraceElement( newClassName, element.getMethodName(), element.getFileName(), element.getLineNumber()); } else { result[i] = element; } } return result; }
3.68
morf_AddTable_reverse
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema) * @return {@link Schema} with new table removed. */ @Override public Schema reverse(Schema schema) { return new FilteredSchema(schema, newTable.getName()); }
3.68
graphhopper_GTFSFeed_getOrderedStopTimesForTrip
/** * For the given trip ID, fetch all the stop times in order of increasing stop_sequence. * This is an efficient iteration over a tree map. */ public Iterable<StopTime> getOrderedStopTimesForTrip (String trip_id) { Map<Fun.Tuple2, StopTime> tripStopTimes = stop_times.subMap( Fun.t2(trip_id, null), Fun.t2(trip_id, Fun.HI) ); return tripStopTimes.values(); }
3.68
hadoop_FSSchedulerConfigurationStore_logMutation
/** * Update and persist latest configuration in temp file. * @param logMutation configuration change to be persisted in write ahead log * @throws IOException throw IOE when write temp configuration file fail */ @Override public void logMutation(LogMutation logMutation) throws IOException { LOG.info(new GsonBuilder().serializeNulls().create().toJson(logMutation)); oldConf = new Configuration(schedConf); Map<String, String> mutations = logMutation.getUpdates(); for (Map.Entry<String, String> kv : mutations.entrySet()) { if (kv.getValue() == null) { this.schedConf.unset(kv.getKey()); } else { this.schedConf.set(kv.getKey(), kv.getValue()); } } tempConfigPath = writeTmpConfig(schedConf); }
3.68
flink_GeneratorFunction_close
/** Tear-down method for the function. */ default void close() throws Exception {}
3.68
framework_DefaultEditorEventHandler_handleBufferedMoveEvent
/** * Moves the editor to another column if the received event is a move event. * By default the editor is moved on a keydown event with keycode * {@link #KEYCODE_MOVE_HORIZONTAL}. This moves the editor left or right if * shift key is pressed or not, respectively. * * @param event * the received event * @return true if this method handled the event and nothing else should be * done, false otherwise */ protected boolean handleBufferedMoveEvent(EditorDomEvent<T> event) { Event e = event.getDomEvent(); if (e.getType().equals(BrowserEvents.CLICK) && event.getRowIndex() == event.getCell().getRowIndex()) { editRow(event, event.getRowIndex(), event.getCell().getColumnIndexDOM()); return true; } else if (e.getType().equals(BrowserEvents.KEYDOWN) && e.getKeyCode() == KEYCODE_MOVE_HORIZONTAL) { // Prevent tab out of Grid Editor event.getDomEvent().preventDefault(); final int newColIndex = e.getShiftKey() ? findPrevEditableColumnIndex(event.getGrid(), event.getFocusedColumnIndex() - 1) : findNextEditableColumnIndex(event.getGrid(), event.getFocusedColumnIndex() + 1); if (newColIndex >= 0) { editRow(event, event.getRowIndex(), newColIndex); } return true; } else if (e.getType().equals(BrowserEvents.KEYDOWN) && e.getKeyCode() == KEYCODE_BUFFERED_SAVE) { triggerValueChangeEvent(event); // Save and close. event.getGrid().getEditor().save(); FocusUtil.setFocus(event.getGrid(), true); return true; } return false; }
3.68
hadoop_JobInfo_countTasksAndAttempts
/** * Go through a job and update the member variables with counts for * information to output in the page. * * @param job * the job to get counts for. */ private void countTasksAndAttempts(Job job) { numReduces = 0; numMaps = 0; final Map<TaskId, Task> tasks = job.getTasks(); if (tasks == null) { return; } for (Task task : tasks.values()) { // Attempts counts Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts(); int successful, failed, killed; for (TaskAttempt attempt : attempts.values()) { successful = 0; failed = 0; killed = 0; if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { // Do Nothing } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) { // Do Nothing } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt .getState())) { ++successful; } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) { ++failed; } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) { ++killed; } switch (task.getType()) { case MAP: successfulMapAttempts += successful; failedMapAttempts += failed; killedMapAttempts += killed; if (attempt.getState() == TaskAttemptState.SUCCEEDED) { numMaps++; avgMapTime += (attempt.getFinishTime() - attempt.getLaunchTime()); } break; case REDUCE: successfulReduceAttempts += successful; failedReduceAttempts += failed; killedReduceAttempts += killed; if (attempt.getState() == TaskAttemptState.SUCCEEDED) { numReduces++; avgShuffleTime += (attempt.getShuffleFinishTime() - attempt .getLaunchTime()); avgMergeTime += attempt.getSortFinishTime() - attempt.getShuffleFinishTime(); avgReduceTime += (attempt.getFinishTime() - attempt .getSortFinishTime()); } break; } } } if (numMaps > 0) { avgMapTime = avgMapTime / numMaps; } if (numReduces > 0) { avgReduceTime = avgReduceTime / numReduces; avgShuffleTime = avgShuffleTime / numReduces; avgMergeTime = avgMergeTime / numReduces; } }
3.68
hbase_MultiByteBuff_remaining
/** * Returns the number of elements between the current position and the limit. * @return the remaining elements in this MBB */ @Override public int remaining() { checkRefCount(); int remain = 0; for (int i = curItemIndex; i < items.length; i++) { remain += items[i].remaining(); } return remain; }
3.68
flink_SplitFetcher_wakeUp
/** * Wake up the fetcher thread. There are only two blocking points in a running fetcher. 1. * Waiting for the next task in an idle fetcher. 2. Running a task. * * <p>They need to be waken up differently. If the fetcher is blocking waiting on the next task * in the task queue, we should just notify that a task is available. If the fetcher is running * the user split reader, we should call SplitReader.wakeUp() instead. * * <p>The correctness can be thought of in the following way. The purpose of wake up is to let * the fetcher thread go to the very beginning of the running loop. */ void wakeUp(boolean taskOnly) { // Synchronize to make sure the wake up only works for the current invocation of runOnce(). lock.lock(); try { wakeUpUnsafe(taskOnly); } finally { lock.unlock(); } }
3.68
framework_Flash_getParameter
/** * Gets the value of an object parameter. Parameters are optional * information, and they are passed to the instantiated object. Parameters * are are stored as name value pairs. * * @param name * name of the parameter * @return the Value of parameter or null if not found. */ public String getParameter(String name) { return getState(false).embedParams != null ? getState(false).embedParams.get(name) : null; }
3.68
flink_InputTypeStrategies_symbol
/** * Strategy for a symbol argument of a specific {@link TableSymbol} enum, with value being one * of the provided variants. * * <p>A symbol is implied to be a literal argument. */ @SafeVarargs @SuppressWarnings("unchecked") public static <T extends Enum<? extends TableSymbol>> SymbolArgumentTypeStrategy<T> symbol( T firstAllowedVariant, T... otherAllowedVariants) { return new SymbolArgumentTypeStrategy<T>( (Class<T>) firstAllowedVariant.getClass(), Stream.concat(Stream.of(firstAllowedVariant), Arrays.stream(otherAllowedVariants)) .collect(Collectors.toSet())); }
3.68
hbase_MutableRegionInfo_toString
/** * @see Object#toString() */ @Override public String toString() { return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; }
3.68
morf_GraphBasedUpgradeNode_hashCode
/** * The hashCode of this class depends only on the name. * * @see java.lang.Object#hashCode() */ @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (name == null ? 0 : name.hashCode()); return result; }
3.68
hbase_RowIndexSeekerV1_copyFromNext
/** * Copy the state from the next one into this instance (the previous state placeholder). Used to * save the previous state when we are advancing the seeker to the next key/value. */ protected void copyFromNext(SeekerState nextState) { keyBuffer = nextState.keyBuffer; currentKey.setKey(nextState.keyBuffer, nextState.currentKey.getRowPosition() - Bytes.SIZEOF_SHORT, nextState.keyLength); startOffset = nextState.startOffset; valueOffset = nextState.valueOffset; keyLength = nextState.keyLength; valueLength = nextState.valueLength; nextKvOffset = nextState.nextKvOffset; memstoreTS = nextState.memstoreTS; currentBuffer = nextState.currentBuffer; tagsOffset = nextState.tagsOffset; tagsLength = nextState.tagsLength; }
3.68
dubbo_RegistryManager_destroyAll
/** * Close all created registries */ public void destroyAll() { if (!destroyed.compareAndSet(false, true)) { return; } if (LOGGER.isInfoEnabled()) { LOGGER.info("Close all registries " + getRegistries()); } // Lock up the registry shutdown process lock.lock(); try { for (Registry registry : getRegistries()) { try { registry.destroy(); } catch (Throwable e) { LOGGER.warn(INTERNAL_ERROR, "unknown error in registry module", "", e.getMessage(), e); } } registries.clear(); } finally { // Release the lock lock.unlock(); } }
3.68
flink_SingleInputOperator_getInput
/** * Returns the input operator or data source, or null, if none is set. * * @return This operator's input. */ public Operator<IN> getInput() { return this.input; }
3.68
framework_AbstractComponent_setWidthFull
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#setWidthFull() */ @Override public void setWidthFull() { setWidth(100, Unit.PERCENTAGE); }
3.68
Activiti_FlowNodeActivityBehavior_execute
/** * Default behaviour: just leave the activity with no extra functionality. */ public void execute(DelegateExecution execution) { leave(execution); }
3.68