name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
rocketmq-connect_KafkaSinkValueConverter_convertStructValue
/** * convert struct value * * @param toStruct * @param originalStruct */ private void convertStructValue(Struct toStruct, org.apache.kafka.connect.data.Struct originalStruct) { for (Field field : toStruct.schema().fields()) { try { Schema.Type type = field.schema().type(); Object value = originalStruct.get(field.name()); switch (type) { case INT8: case INT16: case INT32: case INT64: case FLOAT32: case FLOAT64: case BOOLEAN: case STRING: case BYTES: toStruct.put(field.name(), value); break; case STRUCT: case ARRAY: case MAP: toStruct.put( field.name(), convertKafkaValue( toStruct.schema().field(field.name()).schema(), value ) ); break; } } catch (Exception ex) { logger.error("Convert to kafka schema failure, {}", ex); throw new ConnectException(ex); } } }
3.68
framework_PropertysetItem_toString
/** * Gets the <code>String</code> representation of the contents of the Item. * The format of the string is a space separated catenation of the * <code>String</code> representations of the Properties contained by the * Item. * * @return <code>String</code> representation of the Item contents */ @Override public String toString() { String retValue = ""; for (final Iterator<?> i = getItemPropertyIds().iterator(); i .hasNext();) { final Object propertyId = i.next(); retValue += getItemProperty(propertyId).getValue(); if (i.hasNext()) { retValue += " "; } } return retValue; }
3.68
hadoop_DiskBalancerWorkItem_getBandwidth
/** * Max disk bandwidth to use. MB per second. * * @return - long. */ public long getBandwidth() { return bandwidth; }
3.68
hmily_HmilyXaResource_prepare
/** * Prepare int. * * @return the int * @throws XAException the xa exception */ public int prepare() throws XAException { return this.prepare(this.xid); }
3.68
hbase_RegionInfo_isAdjacent
/** * Returns True if region is adjacent, either just before or just after this one. * @see #isNext(RegionInfo) */ default boolean isAdjacent(RegionInfo other) { return getTable().equals(other.getTable()) && areAdjacent(this, other); }
3.68
framework_DeclarativeTestUI_getDesignPath
/** * Figure out the proper path for the HTML design file */ private String getDesignPath() { Class<?> clazz = getClass(); String designFilePath = null; if (clazz.getAnnotation(DeclarativeUI.class).absolutePath()) { designFilePath = ""; } else { // This is rather nasty.. but it works well enough for now. String userDir = System.getProperty("user.dir"); designFilePath = userDir + "/uitest/src/" + clazz.getPackage().getName().replace('.', '/') + "/"; } String designFileName = clazz.getAnnotation(DeclarativeUI.class) .value(); return designFilePath + designFileName; }
3.68
framework_AbstractBeanContainer_addValueChangeListener
/** * Make this container listen to the given property provided it notifies * when its value changes. * * @param item * The {@link Item} that contains the property * @param propertyId * The id of the property */ private void addValueChangeListener(Item item, Object propertyId) { Property<?> property = item.getItemProperty(propertyId); if (property instanceof ValueChangeNotifier) { // avoid multiple notifications for the same property if // multiple filters are in use ValueChangeNotifier notifier = (ValueChangeNotifier) property; notifier.removeListener(this); notifier.addListener(this); } }
3.68
flink_RocksDBNativeMetricOptions_enableNumImmutableMemTable
/** Returns number of immutable memtables that have not yet been flushed. */ public void enableNumImmutableMemTable() { this.properties.add(RocksDBProperty.NumImmutableMemTable.getRocksDBProperty()); }
3.68
morf_SchemaUtils_primaryKey
/** * @see org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder#primaryKey() */ @Override public ColumnBuilder primaryKey() { return new ColumnBuilderImpl(this, isNullable(), getDefaultValue(), true, isAutoNumbered(), getAutoNumberStart()); }
3.68
framework_MultiSelect_deselect
/** * Removes the given items from the set of currently selected items. * <p> * If the none of the items were selected, this is a NO-OP. * <p> * This is a short-hand for {@link #updateSelection(Set, Set)} with nothing * to select. * * @param items * to remove from selection, not {@code null} */ public default void deselect(T... items) { Objects.requireNonNull(items); Stream.of(items).forEach(Objects::requireNonNull); updateSelection(Collections.emptySet(), new LinkedHashSet<>(Arrays.asList(items))); }
3.68
framework_RpcDataProviderExtension_removeDataGenerator
/** * Removes a {@link DataGenerator} from this * {@code RpcDataProviderExtension}. If given DataGenerator is not added to * this data provider, this method does nothing. * * @since 7.6 * @param generator * generator to remove */ public void removeDataGenerator(DataGenerator generator) { dataGenerators.remove(generator); }
3.68
hbase_MasterObserver_preMergeRegionsAction
/** * Called before the regions merge. * @param ctx the environment to interact with the framework and master */ default void preMergeRegionsAction(final ObserverContext<MasterCoprocessorEnvironment> ctx, final RegionInfo[] regionsToMerge) throws IOException { }
3.68
flink_DeltaIteration_closeWith
/** * Closes the delta iteration. This method defines the end of the delta iteration's function. * * @param solutionSetDelta The delta for the solution set. The delta will be merged into the * solution set at the end of each iteration. * @param newWorkset The new workset (feedback data set) that will be fed back to the next * iteration. * @return The DataSet that represents the result of the iteration, after the computation has * terminated. * @see DataSet#iterateDelta(DataSet, int, int...) */ public DataSet closeWith(DataSet solutionSetDelta, DataSet<WT> newWorkset) { return new DeltaIterationResultSet<ST, WT>( initialSolutionSet.getExecutionEnvironment(), initialSolutionSet.getType(), initialWorkset.getType(), this, solutionSetDelta, newWorkset, keys, maxIterations); }
3.68
hadoop_S3APrefetchingInputStream_read
/** * Reads up to {@code len} bytes from this stream and copies them into * the given {@code buffer} starting at the given {@code offset}. * Returns the number of bytes actually copied in to the given buffer. * * @param buffer the buffer to copy data into. * @param offset data is copied starting at this offset. * @param len max number of bytes to copy. * @return the number of bytes actually copied in to the given buffer. * @throws IOException if there is an IO error during this operation. */ @Override public synchronized int read(byte[] buffer, int offset, int len) throws IOException { throwIfClosed(); return inputStream.read(buffer, offset, len); }
3.68
framework_JsonCodec_getInternalTransportType
/* * These methods looks good to inline, but are on a cold path of the * otherwise hot encode method, which needed to be shorted to allow inlining * of the hot part. */ private static String getInternalTransportType(Type valueType) { return TYPE_TO_TRANSPORT_TYPE.get(getClassForType(valueType)); }
3.68
flink_RootExceptionHistoryEntry_fromFailureHandlingResultSnapshot
/** * Creates a {@code RootExceptionHistoryEntry} based on the passed {@link * FailureHandlingResultSnapshot}. * * @param snapshot The reason for the failure. * @return The {@code RootExceptionHistoryEntry} instance. * @throws NullPointerException if {@code cause} or {@code failingTaskName} are {@code null}. * @throws IllegalArgumentException if the {@code timestamp} of the passed {@code * FailureHandlingResult} is not bigger than {@code 0}. */ public static RootExceptionHistoryEntry fromFailureHandlingResultSnapshot( FailureHandlingResultSnapshot snapshot) { String failingTaskName = null; TaskManagerLocation taskManagerLocation = null; if (snapshot.getRootCauseExecution().isPresent()) { final Execution rootCauseExecution = snapshot.getRootCauseExecution().get(); failingTaskName = rootCauseExecution.getVertexWithAttempt(); taskManagerLocation = rootCauseExecution.getAssignedResourceLocation(); } return createRootExceptionHistoryEntry( snapshot.getRootCause(), snapshot.getTimestamp(), snapshot.getFailureLabels(), failingTaskName, taskManagerLocation, snapshot.getConcurrentlyFailedExecution()); }
3.68
hbase_ZKSecretWatcher_refreshKeys
/** * refresh keys */ synchronized void refreshKeys() { try { List<ZKUtil.NodeAndData> nodes = ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error(HBaseMarkers.FATAL, "Error reading data from zookeeper", ke); watcher.abort("Error reading changed keys from zookeeper", ke); } }
3.68
hbase_MetricsSource_incrSourceInitializing
/** * Increment the count for initializing sources */ public void incrSourceInitializing() { singleSourceSource.incrSourceInitializing(); globalSourceSource.incrSourceInitializing(); }
3.68
hbase_MiniHBaseCluster_abortRegionServer
/** * Cause a region server to exit doing basic clean up only on its way out. * @param serverNumber Used as index into a list. */ public String abortRegionServer(int serverNumber) { HRegionServer server = getRegionServer(serverNumber); LOG.info("Aborting " + server.toString()); server.abort("Aborting for tests", new Exception("Trace info")); return server.toString(); }
3.68
hudi_HoodieDataSourceHelpers_hasNewCommits
/** * Checks if the Hoodie table has new data since given timestamp. This can be subsequently fed to an incremental * view read, to perform incremental processing. */ @PublicAPIMethod(maturity = ApiMaturityLevel.STABLE) public static boolean hasNewCommits(FileSystem fs, String basePath, String commitTimestamp) { return listCommitsSince(fs, basePath, commitTimestamp).size() > 0; }
3.68
hadoop_Trilean_getTrilean
/** * Converts String to Trilean. * * @param str the string to convert. * @return the corresponding Trilean for the passed string str. */ public static Trilean getTrilean(String str) { if (TRUE_STR.equalsIgnoreCase(str)) { return Trilean.TRUE; } if (FALSE_STR.equalsIgnoreCase(str)) { return Trilean.FALSE; } return Trilean.UNKNOWN; }
3.68
framework_PointerEvent_isPrimary
/** * Indicates whether the pointer is the primary pointer of this type. * * @return true if the pointer is the primary pointer, otherwise false */ public final boolean isPrimary() { return isPrimary(getNativeEvent()); }
3.68
hbase_MasterObserver_preTableFlush
/** * Called before the table memstore is flushed to disk. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
flink_TestingSinkSettings_getCheckpointingMode
/** Checkpointing mode required for the sink. */ public CheckpointingMode getCheckpointingMode() { return checkpointingMode; }
3.68
framework_GridDragSourceConnector_addDraggedStyle
/** * Add {@code v-grid-row-dragged} class name to each row being dragged. * * @param event * The dragstart event. */ @Override protected void addDraggedStyle(NativeEvent event) { getDraggedRowElementStream().forEach( rowElement -> rowElement.addClassName(draggedStyleName)); }
3.68
pulsar_ProducerConfiguration_setProperties
/** * Add all the properties in the provided map. * * @param properties * @return */ public ProducerConfiguration setProperties(Map<String, String> properties) { conf.getProperties().putAll(properties); return this; }
3.68
hbase_DeleteNamespaceProcedure_deleteNamespace
/** * delete the row from the ns family in meta table. * @param env MasterProcedureEnv * @param namespaceName name of the namespace in string format */ private static void deleteNamespace(MasterProcedureEnv env, String namespaceName) throws IOException { getTableNamespaceManager(env).deleteNamespace(namespaceName); }
3.68
flink_ApplicationStatus_deriveJobStatus
/** * Derives the {@link JobStatus} from the {@code ApplicationStatus}. * * @return The corresponding {@code JobStatus}. * @throws UnsupportedOperationException for {@link #UNKNOWN}. */ public JobStatus deriveJobStatus() { if (!JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().containsKey(this)) { throw new UnsupportedOperationException( this.name() + " cannot be mapped to a JobStatus."); } return JOB_STATUS_APPLICATION_STATUS_BI_MAP.inverse().get(this); }
3.68
flink_BlobClient_receiveAndCheckGetResponse
/** * Reads the response from the input stream and throws in case of errors. * * @param is stream to read from * @throws IOException if the response is an error or reading the response failed */ private static void receiveAndCheckGetResponse(InputStream is) throws IOException { int response = is.read(); if (response < 0) { throw new EOFException("Premature end of response"); } if (response == RETURN_ERROR) { Throwable cause = readExceptionFromStream(is); throw new IOException("Server side error: " + cause.getMessage(), cause); } else if (response != RETURN_OKAY) { throw new IOException("Unrecognized response"); } }
3.68
graphhopper_AccessFilter_allEdges
/** * Accepts all edges that are either forward or backward according to the given accessEnc. * Edges where neither one of the flags is enabled will still not be accepted. If you need to retrieve all edges * regardless of their encoding use {@link EdgeFilter#ALL_EDGES} instead. */ public static AccessFilter allEdges(BooleanEncodedValue accessEnc) { return new AccessFilter(accessEnc, true, true); }
3.68
hadoop_SinglePendingCommit_serializer
/** * Get a JSON serializer for this class. * @return a serializer. */ public static JsonSerialization<SinglePendingCommit> serializer() { return new JsonSerialization<>(SinglePendingCommit.class, false, false); }
3.68
hadoop_AddMountAttributes_getMountTableEntryWithAttributes
/** * Retrieve mount table object with all attributes derived from this object. * * @return MountTable object with updated attributes. * @throws IOException If mount table instantiation fails. */ public MountTable getMountTableEntryWithAttributes() throws IOException { String normalizedMount = RouterAdmin.normalizeFileSystemPath(this.getMount()); return getMountTableForAddRequest(normalizedMount); }
3.68
hbase_RegionNormalizerFactory_createNormalizerManager
// TODO: consolidate this down to MasterServices public static RegionNormalizerManager createNormalizerManager(final Configuration conf, final MasterRegion masterRegion, final ZKWatcher zkWatcher, final HMaster master) throws DeserializationException, IOException, KeeperException { final RegionNormalizer regionNormalizer = getRegionNormalizer(conf); regionNormalizer.setMasterServices(master); final RegionNormalizerStateStore stateStore = new RegionNormalizerStateStore(masterRegion, zkWatcher); final RegionNormalizerChore chore = master.isInMaintenanceMode() ? null : new RegionNormalizerChore(master); final RegionNormalizerWorkQueue<TableName> workQueue = master.isInMaintenanceMode() ? null : new RegionNormalizerWorkQueue<>(); final RegionNormalizerWorker worker = master.isInMaintenanceMode() ? null : new RegionNormalizerWorker(conf, master, regionNormalizer, workQueue); return new RegionNormalizerManager(stateStore, chore, workQueue, worker); }
3.68
framework_VComboBox_showSuggestions
/** * Shows the popup where the user can see the filtered options that have * been set with a call to * {@link SuggestionMenu#setSuggestions(Collection)}. * * @param currentPage * The current page number */ public void showSuggestions(final int currentPage) { debug("VComboBox.SP: showSuggestions(" + currentPage + ", " + getTotalSuggestions() + ")"); final SuggestionPopup popup = this; // Add TT anchor point getElement().setId("VAADIN_COMBOBOX_OPTIONLIST"); // Set the default position if the popup isn't already visible, // the setPopupPositionAndShow call later on can deal with any // adjustments that might be needed if (!popup.isShowing()) { leftPosition = getDesiredLeftPosition(); topPosition = getDesiredTopPosition(); setPopupPosition(leftPosition, topPosition); } int nullOffset = getNullSelectionItemShouldBeVisible() ? 1 : 0; boolean firstPage = currentPage == 0; final int first = currentPage * pageLength + 1 - (firstPage ? 0 : nullOffset); final int last = first + currentSuggestions.size() - 1 - (firstPage && "".equals(lastFilter) ? nullOffset : 0); final int matches = getTotalSuggestions(); if (last > 0) { // nullsel not counted, as requested by user status.setInnerText((matches == 0 ? 0 : first) + "-" + last + "/" + matches); } else { status.setInnerText(""); } // We don't need to show arrows or statusbar if there is // only one page setPagingEnabled( getTotalSuggestionsIncludingNullSelectionItem() > pageLength && pageLength > 0); setPrevButtonActive(first > 1); setNextButtonActive(last < matches); // clear previously fixed width menu.setWidth(""); menu.getElement().getFirstChildElement().getStyle().clearWidth(); setPopupPositionAndShow(popup); }
3.68
flink_ExecutorUtils_nonBlockingShutdown
/** * Shuts the given {@link ExecutorService} down in a non-blocking fashion. The shut down will be * executed by a thread from the common fork-join pool. * * <p>The executor services will be shut down gracefully for the given timeout period. * Afterwards {@link ExecutorService#shutdownNow()} will be called. * * @param timeout before {@link ExecutorService#shutdownNow()} is called * @param unit time unit of the timeout * @param executorServices to shut down * @return Future which is completed once the {@link ExecutorService} are shut down */ public static CompletableFuture<Void> nonBlockingShutdown( long timeout, TimeUnit unit, ExecutorService... executorServices) { return CompletableFuture.supplyAsync( () -> { gracefulShutdown(timeout, unit, executorServices); return null; }); }
3.68
framework_ComputedStyle_getDoubleProperty
/** * Retrieves the given computed property as a double. * * Returns NaN if the property cannot be converted to a double * * @since 7.5.1 * @param name * the property to retrieve * @return the double value of the property */ public final double getDoubleProperty(String name) { Profiler.enter("ComputedStyle.getDoubleProperty"); String value = getProperty(name); double result = parseDoubleNative(value); Profiler.leave("ComputedStyle.getDoubleProperty"); return result; }
3.68
morf_SqlDialect_getSqlForCriterionValueList
/** * Converts a list of values on a criterion into a comma-separated list. * * @param criterion The criterion to convert * @return The converted criterion as a String */ @SuppressWarnings("unchecked") protected String getSqlForCriterionValueList(Criterion criterion) { if (!(criterion.getValue() instanceof List)) { throw new IllegalStateException("Invalid parameter for IN criterion"); } StringBuilder builder = new StringBuilder(); boolean first = true; for (Object o : (List<Object>) criterion.getValue()) { if (!first) { builder.append(", "); } builder.append(getSqlForCriterionValue(o)); first = false; } return builder.toString(); }
3.68
flink_BinaryInMemorySortBuffer_createBuffer
/** Create a memory sorter in `insert` way. */ public static BinaryInMemorySortBuffer createBuffer( NormalizedKeyComputer normalizedKeyComputer, AbstractRowDataSerializer<RowData> inputSerializer, BinaryRowDataSerializer serializer, RecordComparator comparator, MemorySegmentPool memoryPool) { checkArgument(memoryPool.freePages() >= MIN_REQUIRED_BUFFERS); int totalNumBuffers = memoryPool.freePages(); ArrayList<MemorySegment> recordBufferSegments = new ArrayList<>(16); return new BinaryInMemorySortBuffer( normalizedKeyComputer, inputSerializer, serializer, comparator, recordBufferSegments, new SimpleCollectingOutputView( recordBufferSegments, memoryPool, memoryPool.pageSize()), memoryPool, totalNumBuffers); }
3.68
hbase_ZNodePaths_joinZNode
/** * Join the prefix znode name with the suffix znode name to generate a proper full znode name. * <p> * Assumes prefix does not end with slash and suffix does not begin with it. * @param prefix beginning of znode name * @param suffix ending of znode name * @return result of properly joining prefix with suffix */ public static String joinZNode(String prefix, String... suffix) { StringBuilder sb = new StringBuilder(prefix); for (String s : suffix) { sb.append(ZNodePaths.ZNODE_PATH_SEPARATOR).append(s); } return sb.toString(); }
3.68
flink_ConfigOptions_enumType
/** * Defines that the value of the option should be of {@link Enum} type. * * @param enumClass Concrete type of the expected enum. */ public <T extends Enum<T>> TypedConfigOptionBuilder<T> enumType(Class<T> enumClass) { return new TypedConfigOptionBuilder<>(key, enumClass); }
3.68
hbase_SpaceLimitSettings_fromSpaceQuota
/** * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and namespace. * @param namespace The target namespace for the limit. * @param proto The protobuf representation. * @return A QuotaSettings. */ static SpaceLimitSettings fromSpaceQuota(final String namespace, final QuotaProtos.SpaceQuota proto) { validateProtoArguments(proto); return new SpaceLimitSettings(namespace, proto.getSoftLimit(), ProtobufUtil.toViolationPolicy(proto.getViolationPolicy())); }
3.68
flink_PekkoRpcService_connect
// this method does not mutate state and is thus thread-safe @Override public <F extends Serializable, C extends FencedRpcGateway<F>> CompletableFuture<C> connect( String address, F fencingToken, Class<C> clazz) { return connectInternal( address, clazz, (ActorRef actorRef) -> { Tuple2<String, String> addressHostname = extractAddressHostname(actorRef); return new FencedPekkoInvocationHandler<>( addressHostname.f0, addressHostname.f1, actorRef, configuration.getTimeout(), configuration.getMaximumFramesize(), configuration.isForceRpcInvocationSerialization(), null, () -> fencingToken, captureAskCallstacks, flinkClassLoader); }); }
3.68
flink_CollectionUtil_newLinkedHashMapWithExpectedSize
/** * Creates a new {@link LinkedHashMap} of the expected size, i.e. a hash map that will not * rehash if expectedSize many keys are inserted, considering the load factor. * * @param expectedSize the expected size of the created hash map. * @return a new hash map instance with enough capacity for the expected size. * @param <K> the type of keys maintained by this map. * @param <V> the type of mapped values. */ public static <K, V> LinkedHashMap<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) { return new LinkedHashMap<>( computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR), HASH_MAP_DEFAULT_LOAD_FACTOR); }
3.68
hbase_HttpServer_setBindAddress
/** * @see #addEndpoint(URI) * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; }
3.68
flink_DelimitedInputFormat_fillBuffer
/** Fills the read buffer with bytes read from the file starting from an offset. */ private boolean fillBuffer(int offset) throws IOException { int maxReadLength = this.readBuffer.length - offset; // special case for reading the whole split. if (this.splitLength == FileInputFormat.READ_WHOLE_SPLIT_FLAG) { int read = this.stream.read(this.readBuffer, offset, maxReadLength); if (read == -1) { this.stream.close(); this.stream = null; return false; } else { this.readPos = offset; this.limit = read + offset; return true; } } // else .. int toRead; if (this.splitLength > 0) { // if we have more data, read that toRead = this.splitLength > maxReadLength ? maxReadLength : (int) this.splitLength; } else { // if we have exhausted our split, we need to complete the current record, or read one // more across the next split. // the reason is that the next split will skip over the beginning until it finds the // first // delimiter, discarding it as an incomplete chunk of data that belongs to the last // record in the // previous split. toRead = maxReadLength; this.overLimit = true; } int read = this.stream.read(this.readBuffer, offset, toRead); if (read == -1) { this.stream.close(); this.stream = null; return false; } else { this.splitLength -= read; this.readPos = offset; // position from where to start reading this.limit = read + offset; // number of valid bytes in the read buffer return true; } }
3.68
hbase_HRegionFileSystem_rename
/** * Renames a directory. Assumes the user has already checked for this directory existence. * @return true if rename is successful. */ boolean rename(Path srcpath, Path dstPath) throws IOException { IOException lastIOE = null; int i = 0; do { try { return fs.rename(srcpath, dstPath); } catch (IOException ioe) { lastIOE = ioe; if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // successful move // dir is not there, retry after some time. try { sleepBeforeRetry("Rename Directory", i + 1); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } while (++i <= hdfsClientRetriesNumber); throw new IOException("Exception in rename", lastIOE); }
3.68
streampipes_AssetLinkBuilder_build
/** * Builds and returns the final instance of AssetLink. * * @return The constructed AssetLink instance. */ public AssetLink build() { return this.assetLink; }
3.68
shardingsphere-elasticjob_JobFacade_postJobStatusTraceEvent
/** * Post job status trace event. * * @param taskId task Id * @param state job state * @param message job message */ public void postJobStatusTraceEvent(final String taskId, final State state, final String message) { TaskContext taskContext = TaskContext.from(taskId); jobTracingEventBus.post(new JobStatusTraceEvent(taskContext.getMetaInfo().getJobName(), taskContext.getId(), taskContext.getSlaveId(), taskContext.getType(), taskContext.getMetaInfo().getShardingItems().toString(), state, message)); if (!Strings.isNullOrEmpty(message)) { log.trace(message); } }
3.68
flink_CatalogManager_listTemporaryViews
/** * Returns an array of names of temporary views registered in the namespace of the current * catalog and database. * * @return names of registered temporary views */ public Set<String> listTemporaryViews() { return listTemporaryViewsInternal(getCurrentCatalog(), getCurrentDatabase()) .map(e -> e.getKey().getObjectName()) .collect(Collectors.toSet()); }
3.68
framework_FocusElementPanel_getFocusElement
/** * @return the focus element */ public com.google.gwt.user.client.Element getFocusElement() { return focusElement.cast(); }
3.68
querydsl_SerializerBase_getConstantLabel
/** * Generate a constant value under which to register a new constant in {@link #getConstantToLabel()}. * * @param value the constant value or parameter to create a constant for * @return the generated label */ @NotNull protected String getConstantLabel(Object value) { return constantPrefix + (getConstantToLabel().size() + 1); }
3.68
hbase_RegionReplicationBufferManager_increase
/** * Return whether we should just drop all the edits, if we have reached the hard limit of max * pending size. * @return {@code true} means OK, {@code false} means drop all the edits. */ public boolean increase(long size) { long sz = pendingSize.addAndGet(size); if (sz > softMaxPendingSize) { executor.execute(this::flush); } return sz <= maxPendingSize; }
3.68
framework_BrowserWindowOpener_setUrl
/** * Sets the provided URL {@code url} for this instance. The {@code url} will * be opened in a new browser window/tab when the extended component is * clicked. * * @since 7.4 * * @param url * URL to open */ public void setUrl(String url) { setResource(new ExternalResource(url)); }
3.68
morf_TableOutputter_createTitle
/** * Inserts a row at the top of the sheet with the given title * @param sheet to add the title to * @param title to add * @param fileName of the ALFA file to which the sheet relates */ private void createTitle(WritableSheet sheet, String title, String fileName) { try { //Friendly file name in A1 Label cell = new Label(0, 0, title); WritableFont headingFont = new WritableFont(WritableFont.ARIAL, 16, WritableFont.BOLD); WritableCellFormat headingFormat = new WritableCellFormat(headingFont); cell.setCellFormat(headingFormat); sheet.addCell(cell); //ALFA file name in B2 (hidden in white) cell = new Label(1, 1, fileName); WritableFont fileNameFont = new WritableFont(WritableFont.ARIAL,10,WritableFont.NO_BOLD,false,UnderlineStyle.NO_UNDERLINE,Colour.WHITE); WritableCellFormat fileNameFormat = new WritableCellFormat(fileNameFont); cell.setCellFormat(fileNameFormat); sheet.addCell(cell); //Copyright notice in M1 cell = new Label(12, 0, "Copyright " + new SimpleDateFormat("yyyy").format(new Date()) + " Alfa Financial Software Ltd."); WritableCellFormat copyrightFormat = new WritableCellFormat(); copyrightFormat.setAlignment(Alignment.RIGHT); cell.setCellFormat(copyrightFormat); sheet.addCell(cell); } catch (Exception e) { throw new RuntimeException(e); } }
3.68
querydsl_BeanPath_createDate
/** * Create a new Date path * * @param <A> * @param property property name * @param type property type * @return property path */ @SuppressWarnings("unchecked") protected <A extends Comparable> DatePath<A> createDate(String property, Class<? super A> type) { return add(new DatePath<A>((Class) type, forProperty(property))); }
3.68
shardingsphere-elasticjob_JobNodeStorage_executeInTransaction
/** * Execute operations in transaction. * * @param transactionOperations operations to be executed in transaction */ public void executeInTransaction(final List<TransactionOperation> transactionOperations) { List<TransactionOperation> result = new ArrayList<>(transactionOperations.size() + 1); result.add(TransactionOperation.opCheckExists("/")); result.addAll(transactionOperations); try { regCenter.executeInTransaction(result); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON RegExceptionHandler.handleException(ex); } }
3.68
morf_DummyXmlOutputStreamProvider_openOutputStreamForTable
/** * @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#openOutputStreamForTable(java.lang.String) */ @Override public OutputStream openOutputStreamForTable(String tableName) { return testOutputStream; }
3.68
hbase_SequenceIdAccounting_onRegionClose
/** * Clear all the records of the given region as it is going to be closed. * <p/> * We will call this once we get the region close marker. We need this because that, if we use * Durability.ASYNC_WAL, after calling startCacheFlush, we may still get some ongoing wal entries * that has not been processed yet, this will lead to orphan records in the * lowestUnflushedSequenceIds and then cause too many WAL files. * <p/> * See HBASE-23157 for more details. */ void onRegionClose(byte[] encodedRegionName) { synchronized (tieLock) { this.lowestUnflushedSequenceIds.remove(encodedRegionName); Map<ImmutableByteArray, Long> flushing = this.flushingSequenceIds.remove(encodedRegionName); if (flushing != null) { LOG.warn("Still have flushing records when closing {}, {}", Bytes.toString(encodedRegionName), flushing.entrySet().stream().map(e -> e.getKey().toString() + "->" + e.getValue()) .collect(Collectors.joining(",", "{", "}"))); } } this.highestSequenceIds.remove(encodedRegionName); }
3.68
hudi_ValidationUtils_checkState
/** * Ensures the truth of an expression involving the state of the calling instance, but not * involving any parameters to the calling method. * * @param expression a boolean expression * @param errorMessage - error message * @throws IllegalStateException if {@code expression} is false */ public static void checkState(final boolean expression, String errorMessage) { if (!expression) { throw new IllegalStateException(errorMessage); } }
3.68
flink_CompressionUtils_extractTarFileUsingTar
// Copy and simplify from hadoop-common package that is used in YARN // See // https://github.com/apache/hadoop/blob/7f93349ee74da5f35276b7535781714501ab2457/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java private static void extractTarFileUsingTar( String inFilePath, String targetDirPath, boolean gzipped) throws IOException { inFilePath = makeSecureShellPath(inFilePath); targetDirPath = makeSecureShellPath(targetDirPath); String untarCommand = gzipped ? String.format( "gzip -dc '%s' | (cd '%s' && tar -xf -)", inFilePath, targetDirPath) : String.format("cd '%s' && tar -xf '%s'", targetDirPath, inFilePath); Process process = new ProcessBuilder("bash", "-c", untarCommand).start(); int exitCode = 0; try { exitCode = process.waitFor(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted when untarring file " + inFilePath); } if (exitCode != 0) { throw new IOException( "Error untarring file " + inFilePath + ". Tar process exited with exit code " + exitCode); } }
3.68
hbase_CacheConfig_shouldReadBlockFromCache
/** * Return true if we may find this type of block in block cache. * <p> * TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we * consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in * the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled} * configuration. */ public boolean shouldReadBlockFromCache(BlockType blockType) { if (cacheDataOnRead) { return true; } if (prefetchOnOpen) { return true; } if (cacheDataOnWrite) { return true; } if (blockType == null) { return true; } if ( blockType.getCategory() == BlockCategory.BLOOM || blockType.getCategory() == BlockCategory.INDEX ) { return true; } return false; }
3.68
flink_AsyncSinkBaseBuilder_setMaxInFlightRequests
/** * @param maxInFlightRequests maximum number of uncompleted calls to submitRequestEntries that * the SinkWriter will allow at any given point. Once this point has reached, writes and * callbacks to add elements to the buffer may block until one or more requests to * submitRequestEntries completes. * @return {@link ConcreteBuilderT} itself */ public ConcreteBuilderT setMaxInFlightRequests(int maxInFlightRequests) { this.maxInFlightRequests = maxInFlightRequests; return (ConcreteBuilderT) this; }
3.68
flink_RocksDBStateBackend_setPredefinedOptions
/** * Sets the predefined options for RocksDB. * * <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through * flink-conf.yaml) or a user-defined options factory is set (via {@link * #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on * top of the here specified predefined options and customized options. * * @param options The options to set (must not be null). */ public void setPredefinedOptions(@Nonnull PredefinedOptions options) { rocksDBStateBackend.setPredefinedOptions(options); }
3.68
flink_SortedGrouping_withPartitioner
/** * Uses a custom partitioner for the grouping. * * @param partitioner The custom partitioner. * @return The grouping object itself, to allow for method chaining. */ public SortedGrouping<T> withPartitioner(Partitioner<?> partitioner) { Preconditions.checkNotNull(partitioner); getKeys().validateCustomPartitioner(partitioner, null); this.customPartitioner = partitioner; return this; }
3.68
hbase_FileArchiverNotifierImpl_getStoreFilesFromSnapshot
/** * Extracts the names of the store files referenced by this snapshot which satisfy the given * predicate (the predicate returns {@code true}). */ Set<StoreFileReference> getStoreFilesFromSnapshot(SnapshotManifest manifest, Predicate<String> filter) { Set<StoreFileReference> references = new HashSet<>(); // For each region referenced by the snapshot for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { StoreFileReference regionReference = new StoreFileReference(ProtobufUtil.toRegionInfo(rm.getRegionInfo()).getEncodedName()); // For each column family in this region for (FamilyFiles ff : rm.getFamilyFilesList()) { final String familyName = ff.getFamilyName().toStringUtf8(); // And each store file in that family for (StoreFile sf : ff.getStoreFilesList()) { String storeFileName = sf.getName(); // A snapshot only "inherits" a files size if it uniquely refers to it (no table // and no other snapshot references it). if (filter.test(storeFileName)) { regionReference.addFamilyStoreFile(familyName, storeFileName); } } } // Only add this Region reference if we retained any files. if (!regionReference.getFamilyToFilesMapping().isEmpty()) { references.add(regionReference); } } return references; }
3.68
framework_ColorPickerHistory_hasColor
/** * Checks if the history contains given color. * * @param c * the color * * @return true, if successful */ public boolean hasColor(Color c) { return getColorHistory().contains(c); }
3.68
hadoop_HsJobPage_content
/** * The content of this page is the JobBlock * @return HsJobBlock.class */ @Override protected Class<? extends SubView> content() { return HsJobBlock.class; }
3.68
hbase_CloneSnapshotProcedure_prepareClone
/** * Action before any real action of cloning from snapshot. * @param env MasterProcedureEnv */ private void prepareClone(final MasterProcedureEnv env) throws IOException { final TableName tableName = getTableName(); if (env.getMasterServices().getTableDescriptors().exists(tableName)) { throw new TableExistsException(tableName); } // check whether ttl has expired for this snapshot if ( SnapshotDescriptionUtils.isExpiredSnapshot(snapshot.getTtl(), snapshot.getCreationTime(), EnvironmentEdgeManager.currentTime()) ) { throw new SnapshotTTLExpiredException(ProtobufUtil.createSnapshotDesc(snapshot)); } validateSFT(); }
3.68
flink_GenericDataSourceBase_getStatisticsKey
/** * Gets the key under which statistics about this data source may be obtained from the * statistics cache. * * @return The statistics cache key. */ public String getStatisticsKey() { return this.statisticsKey; }
3.68
shardingsphere-elasticjob_YamlEngine_marshal
/** * Marshal YAML. * * @param value object to be marshaled * @return YAML content */ public static String marshal(final Object value) { return new Yaml(new ElasticJobYamlRepresenter(new DumperOptions())).dumpAsMap(value); }
3.68
hudi_KafkaOffsetGen_computeOffsetRanges
/** * Compute the offset ranges to read from Kafka, while handling newly added partitions, skews, event limits. * * @param fromOffsetMap offsets where we left off last time * @param toOffsetMap offsets of where each partitions is currently at * @param numEvents maximum number of events to read. */ public static OffsetRange[] computeOffsetRanges(Map<TopicPartition, Long> fromOffsetMap, Map<TopicPartition, Long> toOffsetMap, long numEvents, long minPartitions) { // Create initial offset ranges for each 'to' partition, with default from = 0 offsets. OffsetRange[] ranges = toOffsetMap.keySet().stream().map(tp -> { long fromOffset = fromOffsetMap.getOrDefault(tp, 0L); return OffsetRange.create(tp, fromOffset, toOffsetMap.get(tp)); }) .sorted(SORT_BY_PARTITION) .collect(Collectors.toList()) .toArray(new OffsetRange[toOffsetMap.size()]); LOG.debug("numEvents {}, minPartitions {}, ranges {}", numEvents, minPartitions, ranges); boolean needSplitToMinPartitions = minPartitions > toOffsetMap.size(); long totalEvents = totalNewMessages(ranges); long allocedEvents = 0; Set<Integer> exhaustedPartitions = new HashSet<>(); List<OffsetRange> finalRanges = new ArrayList<>(); // choose the actualNumEvents with min(totalEvents, numEvents) long actualNumEvents = Math.min(totalEvents, numEvents); // keep going until we have events to allocate and partitions still not exhausted. while (allocedEvents < numEvents && exhaustedPartitions.size() < toOffsetMap.size()) { // Allocate the remaining events to non-exhausted partitions, in round robin fashion Set<Integer> allocatedPartitionsThisLoop = new HashSet<>(exhaustedPartitions); for (int i = 0; i < ranges.length; i++) { long remainingEvents = actualNumEvents - allocedEvents; long remainingPartitions = toOffsetMap.size() - allocatedPartitionsThisLoop.size(); // if need tp split into minPartitions, recalculate the remainingPartitions if (needSplitToMinPartitions) { remainingPartitions = minPartitions - finalRanges.size(); } long eventsPerPartition = (long) Math.ceil((1.0 * remainingEvents) / remainingPartitions); OffsetRange range = ranges[i]; if (exhaustedPartitions.contains(range.partition())) { continue; } long toOffset = Math.min(range.untilOffset(), range.fromOffset() + eventsPerPartition); if (toOffset == range.untilOffset()) { exhaustedPartitions.add(range.partition()); } allocedEvents += toOffset - range.fromOffset(); // We need recompute toOffset if allocedEvents larger than actualNumEvents. if (allocedEvents > actualNumEvents) { long offsetsToAdd = Math.min(eventsPerPartition, (actualNumEvents - allocedEvents)); toOffset = Math.min(range.untilOffset(), toOffset + offsetsToAdd); } OffsetRange thisRange = OffsetRange.create(range.topicPartition(), range.fromOffset(), toOffset); finalRanges.add(thisRange); ranges[i] = OffsetRange.create(range.topicPartition(), range.fromOffset() + thisRange.count(), range.untilOffset()); allocatedPartitionsThisLoop.add(range.partition()); } } if (!needSplitToMinPartitions) { LOG.debug("final ranges merged by topic partition {}", Arrays.toString(mergeRangesByTopicPartition(finalRanges.toArray(new OffsetRange[0])))); return mergeRangesByTopicPartition(finalRanges.toArray(new OffsetRange[0])); } finalRanges.sort(SORT_BY_PARTITION); LOG.debug("final ranges {}", Arrays.toString(finalRanges.toArray(new OffsetRange[0]))); return finalRanges.toArray(new OffsetRange[0]); }
3.68
hadoop_TaskTrackerInfo_getReasonForBlacklist
/** * Gets the reason for which the tasktracker was blacklisted. * * @return reason which tracker was blacklisted */ public String getReasonForBlacklist() { return reasonForBlacklist; }
3.68
zxing_DecoderResult_getByteSegments
/** * @return list of byte segments in the result, or {@code null} if not applicable */ public List<byte[]> getByteSegments() { return byteSegments; }
3.68
AreaShop_Materials_isSign
/** * Check if a Material is a sign (of either the wall or floor type). * @param name String to check * @return true if the given material is a sign */ public static boolean isSign(String name) { return name != null && (FLOOR_SIGN_TYPES.contains(name) || WALL_SIGN_TYPES.contains(name)); }
3.68
flink_CopyOnWriteSkipListStateMap_removeNode
/** * Remove the given node indicated by {@link SkipListNodePointers#currentNode}. * * @param pointers pointers of the node to remove and its prev/next node. * @param isLogicallyRemoved whether the node to remove is already logically removed. * @param returnOldState whether to return the old state after removal. * @return the old state if {@code returnOldState} is true, or else return null. */ private S removeNode( SkipListNodePointers pointers, Boolean isLogicallyRemoved, boolean returnOldState) { long prevNode = pointers.prevNode; long currentNode = pointers.currentNode; long nextNode = pointers.nextNode; // if the node has been logically removed, and can not be physically // removed here, just return null if (isLogicallyRemoved && highestRequiredSnapshotVersionPlusOne != 0) { return null; } long oldValuePointer; boolean oldValueNeedFree; if (highestRequiredSnapshotVersionPlusOne == 0) { // do physically remove only when there is no snapshot running oldValuePointer = doPhysicalRemoveAndGetValue(currentNode, prevNode, nextNode); // the node has been logically removed, and remove it from the set if (isLogicallyRemoved) { logicallyRemovedNodes.remove(currentNode); } oldValueNeedFree = true; } else { int version = SkipListUtils.helpGetNodeLatestVersion(currentNode, spaceAllocator); if (version < highestRequiredSnapshotVersionPlusOne) { // the newest-version value may be used by snapshots, and update it with // copy-on-write oldValuePointer = updateValueWithCopyOnWrite(currentNode, null); oldValueNeedFree = false; } else { // replace the newest-version value. oldValuePointer = updateValueWithReplace(currentNode, null); oldValueNeedFree = true; } helpSetNodeStatus(currentNode, NodeStatus.REMOVE); logicallyRemovedNodes.add(currentNode); } S oldState = null; if (returnOldState) { oldState = helpGetState(oldValuePointer); } if (oldValueNeedFree) { spaceAllocator.free(oldValuePointer); } return oldState; }
3.68
hbase_CellModel_setTimestamp
/** * @param timestamp the timestamp to set */ public void setTimestamp(long timestamp) { this.timestamp = timestamp; }
3.68
hudi_AvroSchemaCompatibility_getReaderFragment
/** * Returns the fragment of the reader schema that failed compatibility check. * * @return a Schema instance (fragment of the reader schema). */ public Schema getReaderFragment() { return mReaderFragment; }
3.68
framework_ProgressBar_setValue
/** * Sets the value of this progress bar. The value is a <code>float</code> * between 0 and 1 where 0 represents no progress at all and 1 represents * fully completed. * * @param newValue * the current progress value */ @Override public void setValue(Float newValue) { super.setValue(newValue); }
3.68
zxing_CodaBarReader_toNarrowWidePattern
// Assumes that counters[position] is a bar. private int toNarrowWidePattern(int position) { int end = position + 7; if (end >= counterLength) { return -1; } int[] theCounters = counters; int maxBar = 0; int minBar = Integer.MAX_VALUE; for (int j = position; j < end; j += 2) { int currentCounter = theCounters[j]; if (currentCounter < minBar) { minBar = currentCounter; } if (currentCounter > maxBar) { maxBar = currentCounter; } } int thresholdBar = (minBar + maxBar) / 2; int maxSpace = 0; int minSpace = Integer.MAX_VALUE; for (int j = position + 1; j < end; j += 2) { int currentCounter = theCounters[j]; if (currentCounter < minSpace) { minSpace = currentCounter; } if (currentCounter > maxSpace) { maxSpace = currentCounter; } } int thresholdSpace = (minSpace + maxSpace) / 2; int bitmask = 1 << 7; int pattern = 0; for (int i = 0; i < 7; i++) { int threshold = (i & 1) == 0 ? thresholdBar : thresholdSpace; bitmask >>= 1; if (theCounters[position + i] > threshold) { pattern |= bitmask; } } for (int i = 0; i < CHARACTER_ENCODINGS.length; i++) { if (CHARACTER_ENCODINGS[i] == pattern) { return i; } } return -1; }
3.68
flink_FlinkAssertions_anyCauseMatches
/** * Shorthand to assert the chain of causes includes a {@link Throwable} matching a specific * {@link Class} and containing the provided message. Same as: * * <pre>{@code * assertThatChainOfCauses(throwable) * .anySatisfy( * cause -> * assertThat(cause) * .hasMessageContaining(containsMessage)); * }</pre> */ public static ThrowingConsumer<? super Throwable> anyCauseMatches(String containsMessage) { return t -> assertThatChainOfCauses(t) .as("Any cause contains message '%s'", containsMessage) .anySatisfy(t1 -> assertThat(t1).hasMessageContaining(containsMessage)); }
3.68
morf_SqlDialect_getSqlforLength
/** * Converts the LENGTH function into SQL. * * @param function the function to convert. * @return a string representation of the SQL. * @see org.alfasoftware.morf.sql.element.Function#length(AliasedField) */ protected String getSqlforLength(Function function) { return String.format("LENGTH(%s)", getSqlFrom(function.getArguments().get(0))); }
3.68
hudi_QuickstartUtils_generateRangeRandomTimestamp
/** * Generate timestamp range from {@param daysTillNow} before to now. */ private static long generateRangeRandomTimestamp(int daysTillNow) { long maxIntervalMillis = daysTillNow * 24 * 60 * 60 * 1000L; return System.currentTimeMillis() - (long) (Math.random() * maxIntervalMillis); }
3.68
framework_FreeformQuery_getPrimaryKeyColumns
/* * (non-Javadoc) * * @see com.vaadin.data.util.sqlcontainer.query.QueryDelegate# * getPrimaryKeyColumns () */ @Override public List<String> getPrimaryKeyColumns() { return primaryKeyColumns; }
3.68
flink_RetractableTopNFunction_retractRecordWithRowNumber
/** * Retract the input record and emit updated records. This works for outputting with row_number. * * @return true if the input record has been removed from {@link #dataState}. */ private boolean retractRecordWithRowNumber( SortedMap<RowData, Long> sortedMap, RowData sortKey, RowData inputRow, Collector<RowData> out) throws Exception { Iterator<Map.Entry<RowData, Long>> iterator = sortedMap.entrySet().iterator(); long currentRank = 0L; RowData prevRow = null; boolean findsSortKey = false; while (iterator.hasNext() && isInRankEnd(currentRank)) { Map.Entry<RowData, Long> entry = iterator.next(); RowData key = entry.getKey(); if (!findsSortKey && key.equals(sortKey)) { List<RowData> inputs = dataState.get(key); if (inputs == null) { processStateStaled(iterator); } else { Iterator<RowData> inputIter = inputs.iterator(); while (inputIter.hasNext() && isInRankEnd(currentRank)) { RowData currentRow = inputIter.next(); if (!findsSortKey && equaliser.equals(currentRow, inputRow)) { prevRow = currentRow; findsSortKey = true; inputIter.remove(); } else if (findsSortKey) { collectUpdateBefore(out, prevRow, currentRank); collectUpdateAfter(out, currentRow, currentRank); prevRow = currentRow; } currentRank += 1; } if (inputs.isEmpty()) { dataState.remove(key); } else { dataState.put(key, inputs); } } } else if (findsSortKey) { List<RowData> inputs = dataState.get(key); if (inputs == null) { processStateStaled(iterator); } else { int i = 0; while (i < inputs.size() && isInRankEnd(currentRank)) { RowData currentRow = inputs.get(i); collectUpdateBefore(out, prevRow, currentRank); collectUpdateAfter(out, currentRow, currentRank); prevRow = currentRow; currentRank += 1; i++; } } } else { currentRank += entry.getValue(); } } if (isInRankEnd(currentRank)) { if (!findsSortKey && null == prevRow) { stateStaledErrorHandle(); } else { // there is no enough elements in Top-N, emit DELETE message for the retract record. collectDelete(out, prevRow, currentRank); } } return findsSortKey; }
3.68
hadoop_JavaCommandLineBuilder_define
/** * Add a <code>-D key=val</code> command to the CLI. This is very Hadoop API * @param key key * @param val value * @throws IllegalArgumentException if either argument is null */ public void define(String key, String val) { Preconditions.checkArgument(key != null, "null key"); Preconditions.checkArgument(val != null, "null value"); add("-D", key + "=" + val); }
3.68
hudi_CloudObjectsSelectorCommon_checkIfFileExists
/** * Check if file with given path URL exists * @param storageUrlSchemePrefix Eg: s3:// or gs://. The storage-provider-specific prefix to use within the URL. */ private static boolean checkIfFileExists(String storageUrlSchemePrefix, String bucket, String filePathUrl, Configuration configuration) { try { FileSystem fs = FSUtils.getFs(storageUrlSchemePrefix + bucket, configuration); return fs.exists(new Path(filePathUrl)); } catch (IOException ioe) { String errMsg = String.format("Error while checking path exists for %s ", filePathUrl); LOG.error(errMsg, ioe); throw new HoodieIOException(errMsg, ioe); } }
3.68
flink_JobManagerCheckpointStorage_getSavepointPath
/** @return The default location where savepoints will be externalized if set. */ @Nullable public Path getSavepointPath() { return location.getBaseSavepointPath(); }
3.68
querydsl_SQLExpressions_regrAvgx
/** * REGR_AVGX evaluates the average of the independent variable (arg2) of the regression line. * * @param arg1 first arg * @param arg2 second arg * @return regr_avgx(arg1, arg2) */ public static WindowOver<Double> regrAvgx(Expression<? extends Number> arg1, Expression<? extends Number> arg2) { return new WindowOver<Double>(Double.class, SQLOps.REGR_AVGX, arg1, arg2); }
3.68
framework_AbstractDateField_getRangeEnd
/** * Returns the precise rangeEnd used. * * @return the precise rangeEnd used, may be {@code null}. */ public T getRangeEnd() { return convertFromDateString(getState(false).rangeEnd); }
3.68
hbase_ColumnValueFilter_parseFrom
/** * Parse a serialized representation of {@link ColumnValueFilter} * @param pbBytes A pb serialized {@link ColumnValueFilter} instance * @return An instance of {@link ColumnValueFilter} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static ColumnValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnValueFilter proto; try { proto = FilterProtos.ColumnValueFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } final CompareOperator compareOp = CompareOperator.valueOf(proto.getCompareOp().name()); final ByteArrayComparable comparator; try { comparator = ProtobufUtil.toComparator(proto.getComparator()); } catch (IOException ioe) { throw new DeserializationException(ioe); } return new ColumnValueFilter(proto.getFamily().toByteArray(), proto.getQualifier().toByteArray(), compareOp, comparator); }
3.68
hadoop_BlockRecoveryCommand_getNewBlock
/** * Return the new block. */ public Block getNewBlock() { return recoveryBlock; }
3.68
morf_TruncateStatement_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return "SQL TRUNCATE TABLE [" + table + "]"; }
3.68
shardingsphere-elasticjob_JobErrorHandlerReloader_reloadIfNecessary
/** * Reload if necessary. * * @param jobConfig job configuration */ public synchronized void reloadIfNecessary(final JobConfiguration jobConfig) { if (jobErrorHandler.getType().equals(jobConfig.getJobErrorHandlerType()) && props.equals(jobConfig.getProps())) { return; } jobErrorHandler.close(); init(jobConfig); }
3.68
open-banking-gateway_PairIdPsuAspspTuple_toDatasafePathWithoutPsu
/** * Computes current tuples' Datasafe storage path. * @return Datasafe path corresponding to current tuple */ public String toDatasafePathWithoutPsu() { return pairId.toString() + "/" + this.aspspId; }
3.68
pulsar_OpenIDProviderMetadataCache_verifyIssuer
/** * Verify the issuer url, as required by the OpenID Connect spec: * * Per the OpenID Connect Discovery spec, the issuer value returned MUST be identical to the * Issuer URL that was directly used to retrieve the configuration information. This MUST also * be identical to the iss Claim value in ID Tokens issued from this Issuer. * https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation * * @param issuer - the issuer used to retrieve the metadata * @param metadata - the OpenID Provider Metadata * @param isK8s - whether the issuer is represented by the Kubernetes API server. This affects error reporting. * @throws AuthenticationException if the issuer does not exactly match the metadata issuer */ private void verifyIssuer(@Nonnull String issuer, OpenIDProviderMetadata metadata, boolean isK8s) throws AuthenticationException { if (!issuer.equals(metadata.getIssuer())) { if (isK8s) { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER); throw new AuthenticationException("Issuer not allowed: " + issuer); } else { incrementFailureMetric(AuthenticationExceptionCode.ISSUER_MISMATCH); throw new AuthenticationException(String.format("Issuer URL mismatch: [%s] should match [%s]", issuer, metadata.getIssuer())); } } }
3.68
flink_HiveASTParseDriver_parseHint
/* * Parse a string as a query hint. */ public HiveParserASTNode parseHint(String command) throws HiveASTParseException { LOG.info("Parsing hint: " + command); HiveLexerX lexer = new HiveLexerX(new ANTLRNoCaseStringStream(command)); TokenRewriteStream tokens = new TokenRewriteStream(lexer); HiveASTHintParser parser = new HiveASTHintParser(tokens); parser.setTreeAdaptor(ADAPTOR); HiveASTHintParser.hint_return r = null; try { r = parser.hint(); } catch (RecognitionException e) { throw new HiveASTParseException(parser.errors); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { LOG.info("Parse Completed"); } else if (lexer.getErrors().size() != 0) { throw new HiveASTParseException(lexer.getErrors()); } else { throw new HiveASTParseException(parser.errors); } return r.getTree(); }
3.68
framework_Slot_setSpacingResizeListener
/** * Sets the spacing element resize listener for this slot. * * @param spacingResizeListener * the listener to set, or {@code null} to remove a previously * set listener */ public void setSpacingResizeListener( ElementResizeListener spacingResizeListener) { detachListeners(); this.spacingResizeListener = spacingResizeListener; attachListeners(); }
3.68
hbase_HBaseTestingUtility_getConnection
/** * Get a assigned Connection to the cluster. this method is thread safe. * @param user assigned user * @return A Connection with assigned user. */ public Connection getConnection(User user) throws IOException { return getAsyncConnection(user).toConnection(); }
3.68
hadoop_FederationCache_buildGetApplicationHomeSubClusterRequest
/** * Build GetApplicationHomeSubCluster CacheRequest. * * @param cacheKey cacheKey. * @param applicationId applicationId. * @return CacheRequest. * @throws YarnException exceptions from yarn servers. */ protected CacheRequest<String, CacheResponse<SubClusterId>> buildGetApplicationHomeSubClusterRequest(String cacheKey, ApplicationId applicationId) throws YarnException { CacheResponse<SubClusterId> response = buildSubClusterIdResponse(applicationId); return new CacheRequest<>(cacheKey, response); }
3.68
morf_SqlServer_formatJdbcUrl
/** * @see org.alfasoftware.morf.jdbc.DatabaseType#formatJdbcUrl(org.alfasoftware.morf.jdbc.JdbcUrlElements) */ @Override public String formatJdbcUrl(JdbcUrlElements jdbcUrlElements) { return "jdbc:sqlserver://" + jdbcUrlElements.getHostName() + (StringUtils.isNotBlank(jdbcUrlElements.getInstanceName()) ? "\\" + jdbcUrlElements.getInstanceName() : "") + (jdbcUrlElements.getPort() == 0 ? "" : ":" + jdbcUrlElements.getPort()) + (StringUtils.isNotBlank(jdbcUrlElements.getDatabaseName()) ? ";database=" + jdbcUrlElements.getDatabaseName() : ""); }
3.68