name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
rocketmq-connect_Serializer_serialize | /**
* Convert data into a byte array.
* @return serialized bytes
*/
default byte[] serialize(String topic, KeyValue extensions, T data) {
return serialize(topic, data);
} | 3.68 |
hadoop_JsonSerialization_load | /**
* Load from a Hadoop filesystem.
* If a file status is supplied, it's passed in to the openFile()
* call so that FS implementations can optimize their opening.
* @param fs filesystem
* @param path path
* @param status status of the file to open.
* @return a loaded object
* @throws PathIOException JSON parse problem
* @throws EOFException file status references an empty file
* @throws IOException IO problems
*/
public T load(FileSystem fs, Path path, @Nullable FileStatus status)
throws IOException {
if (status != null && status.getLen() == 0) {
throw new EOFException("No data in " + path);
}
FutureDataInputStreamBuilder builder = fs.openFile(path)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE);
if (status != null) {
builder.withFileStatus(status);
}
try (FSDataInputStream dataInputStream =
awaitFuture(builder.build())) {
return fromJsonStream(dataInputStream);
} catch (JsonProcessingException e) {
throw new PathIOException(path.toString(),
"Failed to read JSON file " + e, e);
}
} | 3.68 |
hadoop_S3ARemoteInputStream_setInputPolicy | /**
* Set/update the input policy of the stream.
* This updates the stream statistics.
* @param inputPolicy new input policy.
*/
private void setInputPolicy(S3AInputPolicy inputPolicy) {
this.inputPolicy = inputPolicy;
streamStatistics.inputPolicySet(inputPolicy.ordinal());
} | 3.68 |
framework_BinderValidationStatus_notifyBindingValidationStatusHandlers | /**
* Notifies validation status handlers for bindings that pass given filter.
* The filter should return {@code true} for each
* {@link BindingValidationStatus} that should be delegated to the status
* handler in the binding.
*
* @see #notifyBindingValidationStatusHandlers()
*
* @param filter
* the filter to select bindings to run status handling for
*
* @since 8.2
*/
public void notifyBindingValidationStatusHandlers(
SerializablePredicate<BindingValidationStatus<?>> filter) {
bindingStatuses.stream().filter(filter).forEach(s -> s.getBinding()
.getValidationStatusHandler().statusChange(s));
} | 3.68 |
rocketmq-connect_ServiceProviderUtil_getStateManagementService | /**
* Get state management service by class name
*
* @param stateManagementServiceClazz
* @return
*/
@NotNull
public static StateManagementService getStateManagementService(String stateManagementServiceClazz) {
if (StringUtils.isEmpty(stateManagementServiceClazz)) {
stateManagementServiceClazz = LocalStateManagementServiceImpl.class.getName();
}
StateManagementService stateManagementService = null;
ServiceLoader<StateManagementService> stateManagementServices = ServiceLoader.load(StateManagementService.class);
Iterator<StateManagementService> stateManagementServiceIterator = stateManagementServices.iterator();
while (stateManagementServiceIterator.hasNext()) {
StateManagementService currentStateManagementService = stateManagementServiceIterator.next();
if (currentStateManagementService.getClass().getName().equals(stateManagementServiceClazz)) {
stateManagementService = currentStateManagementService;
break;
}
}
if (null == stateManagementService) {
throw new ConnectException("StateManagementService class " + stateManagementServiceClazz + " not " +
"found");
}
return stateManagementService;
} | 3.68 |
hudi_HoodieWriteCommitCallbackUtil_convertToJsonString | /**
* Convert data to json string format.
*/
public static String convertToJsonString(Object obj) {
try {
return mapper.writeValueAsString(obj);
} catch (IOException e) {
throw new HoodieCommitCallbackException("Callback service convert data to json failed", e);
}
} | 3.68 |
framework_AbstractClientConnector_addListener | /**
* Convenience method for registering a new listener with the specified
* activation method to listen events generated by this component. If the
* activation method does not have any arguments the event object will not
* be passed to it when it's called.
*
* <p>
* This version of <code>addListener</code> gets the name of the activation
* method as a parameter. The actual method is reflected from
* <code>object</code>, and unless exactly one match is found,
* <code>java.lang.IllegalArgumentException</code> is thrown.
* </p>
*
* <p>
* For more information on the inheritable event mechanism see the
* {@link com.vaadin.event com.vaadin.event package documentation}.
* </p>
*
* <p>
* Note: Using this method is discouraged because it cannot be checked
* during compilation. Use {@link #addListener(Class, Object, Method)} or
* {@link #addListener(String, Class, Object, Method) instead. </p>
*
* @param eventType
* the type of the listened event. Events of this type or its
* subclasses activate the listener.
* @param listener
* the object instance who owns the activation method.
* @param methodName
* the name of the activation method.
* @return a registration object for removing the listener
* @deprecated This method has only been added for ease of migration and
* should be avoided in new code.
* Use
* {@link #addListener(Class, SerializableEventListener, Method)}
* or
* {@link #addListener(String, Class, SerializableEventListener, Method)}
* instead.
* @since 8.12
*/
@Override
@Deprecated
public Registration addListener(Class<?> eventType,
SerializableEventListener listener, String methodName) {
if (eventRouter == null) {
eventRouter = new EventRouter();
}
return eventRouter.addListener(eventType, listener, methodName);
} | 3.68 |
flink_TupleTypeInfoBase_getFieldTypes | /** Returns the field types. */
public TypeInformation<?>[] getFieldTypes() {
return types;
} | 3.68 |
rocketmq-connect_TimestampIncrementingQuerier_endTimestampValue | //Get end timestamp from db
@Override
public Timestamp endTimestampValue(Timestamp beginTime) throws SQLException {
long endTimestamp;
final long currentDbTime = dialect.currentTimeOnDB(
stmt.getConnection(),
DateTimeUtils.getTimeZoneCalendar(timeZone)
).getTime();
endTimestamp = currentDbTime - timestampDelay;
return new Timestamp(endTimestamp);
} | 3.68 |
morf_SqlDialect_oldTableForChangeColumn | /**
* Construct the old table for a change column
* @param table The table to change
* @param oldColumn The old column
* @param newColumn The new column
* @return The 'old' table
*
*/
protected Table oldTableForChangeColumn(Table table, Column oldColumn, Column newColumn) {
return new ChangeColumn(table.getName(), oldColumn, newColumn).reverse(SchemaUtils.schema(table)).getTable(table.getName());
} | 3.68 |
flink_StateTable_indexToOffset | /** Translates a key-group id to the internal array offset. */
private int indexToOffset(int index) {
return index - getKeyGroupOffset();
} | 3.68 |
hudi_HoodieConsistentBucketLayout_determinesNumFileGroups | /**
* Bucketing controls the number of file groups directly.
*/
@Override
public boolean determinesNumFileGroups() {
return true;
} | 3.68 |
druid_MySQL8DateTimeSqlTypeFilter_getObjectReplaceLocalDateTime | /**
* 针对mysql jdbc 8.0.23及以上版本,通过该方法控制将对象类型转换成原来的类型
*
* @param obj
* @return
*/
public static Object getObjectReplaceLocalDateTime(Object obj) {
if (!(obj instanceof LocalDateTime)) {
return obj;
}
// 针对升级到了mysql jdbc 8.0.23以上的情况,转换回老的兼容类型
return Timestamp.valueOf((LocalDateTime) obj);
} | 3.68 |
graphhopper_AbstractBidirCHAlgo_fillEdgesToUsingFilter | /**
* @see #fillEdgesFromUsingFilter(CHEdgeFilter)
*/
protected void fillEdgesToUsingFilter(CHEdgeFilter edgeFilter) {
// we temporarily ignore the additionalEdgeFilter
CHEdgeFilter tmpFilter = levelEdgeFilter;
levelEdgeFilter = edgeFilter;
finishedTo = !fillEdgesTo();
levelEdgeFilter = tmpFilter;
} | 3.68 |
flink_JobManagerCheckpointStorage_createFromConfig | /**
* Creates a new {@link JobManagerCheckpointStorage} using the given configuration.
*
* @param config The Flink configuration (loaded by the TaskManager).
* @param classLoader The clsas loader that should be used to load the checkpoint storage.
* @return The created checkpoint storage.
* @throws IllegalConfigurationException If the configuration misses critical values, or
* specifies invalid values
*/
public static JobManagerCheckpointStorage createFromConfig(
ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException {
try {
return new JobManagerCheckpointStorage().configure(config, classLoader);
} catch (IllegalArgumentException e) {
throw new IllegalConfigurationException(
"Invalid configuration for the state backend", e);
}
} | 3.68 |
flink_Tuple13_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> copy() {
return new Tuple13<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12);
} | 3.68 |
graphhopper_PbfFieldDecoder_decodeTimestamp | /**
* Decodes a raw timestamp value into a Date.
* <p>
*
* @param rawTimestamp The PBF encoded timestamp.
* @return The timestamp as a Date.
*/
public Date decodeTimestamp(long rawTimestamp) {
return new Date(dateGranularity * rawTimestamp);
} | 3.68 |
morf_Function_every | /**
* Helper method to create an instance of the "every" SQL function.
*
* @param fieldToEvaluate the field to evaluate in the every function.
* @return an instance of the every function
*/
public static Function every(AliasedField fieldToEvaluate) {
return new Function(FunctionType.EVERY, fieldToEvaluate);
} | 3.68 |
flink_PushCalcPastChangelogNormalizeRule_partitionPrimaryKeyPredicates | /**
* Separates the given {@param predicates} into filters which affect only the primary key and
* anything else.
*/
private void partitionPrimaryKeyPredicates(
List<RexNode> predicates,
Set<Integer> primaryKeyIndices,
List<RexNode> primaryKeyPredicates,
List<RexNode> remainingPredicates) {
for (RexNode predicate : predicates) {
int[] inputRefs = extractRefInputFields(Collections.singletonList(predicate));
if (Arrays.stream(inputRefs).allMatch(primaryKeyIndices::contains)) {
primaryKeyPredicates.add(predicate);
} else {
remainingPredicates.add(predicate);
}
}
} | 3.68 |
hmily_HmilyRepositoryEventDispatcher_doDispatch | /**
* Do event dispatch.
*
* @param event the event
*/
public void doDispatch(final HmilyRepositoryEvent event) {
EventTypeEnum eventTypeEnum = EventTypeEnum.buildByCode(event.getType());
HmilyTransaction hmilyTransaction = event.getHmilyTransaction();
HmilyParticipant hmilyParticipant = event.getHmilyParticipant();
HmilyParticipantUndo hmilyParticipantUndo = event.getHmilyParticipantUndo();
switch (eventTypeEnum) {
case CREATE_HMILY_TRANSACTION:
HmilyRepositoryFacade.getInstance().createHmilyTransaction(event.getHmilyTransaction());
break;
case REMOVE_HMILY_TRANSACTION:
HmilyRepositoryFacade.getInstance().removeHmilyTransaction(hmilyTransaction.getTransId());
break;
case UPDATE_HMILY_TRANSACTION_STATUS:
HmilyRepositoryFacade.getInstance().updateHmilyTransactionStatus(hmilyTransaction.getTransId(), hmilyTransaction.getStatus());
break;
case CREATE_HMILY_PARTICIPANT:
HmilyRepositoryFacade.getInstance().createHmilyParticipant(event.getHmilyParticipant());
break;
case UPDATE_HMILY_PARTICIPANT_STATUS:
HmilyRepositoryFacade.getInstance().updateHmilyParticipantStatus(hmilyParticipant.getParticipantId(), hmilyParticipant.getStatus());
break;
case REMOVE_HMILY_PARTICIPANT:
HmilyRepositoryFacade.getInstance().removeHmilyParticipant(hmilyParticipant.getParticipantId());
break;
case CREATE_HMILY_PARTICIPANT_UNDO:
HmilyRepositoryFacade.getInstance().createHmilyParticipantUndo(hmilyParticipantUndo);
break;
case REMOVE_HMILY_PARTICIPANT_UNDO:
HmilyRepositoryFacade.getInstance().removeHmilyParticipantUndo(hmilyParticipantUndo.getUndoId());
break;
case WRITE_HMILY_LOCKS:
HmilyRepositoryFacade.getInstance().writeHmilyLocks(event.getHmilyLocks());
break;
case RELEASE_HMILY_LOCKS:
HmilyRepositoryFacade.getInstance().releaseHmilyLocks(event.getHmilyLocks());
break;
default:
break;
}
} | 3.68 |
hbase_HttpServer_addFilterPathMapping | /**
* Add the path spec to the filter path mapping.
* @param pathSpec The path spec
* @param webAppCtx The WebApplicationContext to add to
*/
protected void addFilterPathMapping(String pathSpec, WebAppContext webAppCtx) {
for (String name : filterNames) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(name);
fmap.setDispatches(FilterMapping.ALL);
webAppCtx.getServletHandler().addFilterMapping(fmap);
}
} | 3.68 |
hudi_HoodieMetaSyncOperations_getLastCommitCompletionTimeSynced | /**
* Get the commit completion time of last sync
*/
default Option<String> getLastCommitCompletionTimeSynced(String tableName) {
return Option.empty();
} | 3.68 |
flink_WindowMapState_iterator | /**
* Iterates over all the mappings in the state.
*
* @return An iterator over all the mappings in the state
* @throws Exception Thrown if the system cannot access the state.
*/
public Iterator<Map.Entry<RowData, UV>> iterator(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.iterator();
} | 3.68 |
framework_BarInUIDL_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 12404;
} | 3.68 |
framework_StringToByteConverter_convertToModel | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object,
* java.lang.Class, java.util.Locale)
*/
@Override
public Byte convertToModel(String value, Class<? extends Byte> targetType,
Locale locale) throws ConversionException {
Number n = convertToNumber(value, targetType, locale);
if (n == null) {
return null;
}
byte byteValue = n.byteValue();
if (byteValue == n.longValue()) {
return byteValue;
}
throw new ConversionException("Could not convert '" + value + "' to "
+ Byte.class.getName() + ": value out of range");
} | 3.68 |
framework_ApplicationConnection_getWidgetSet | /**
* @since 7.6
* @return the widget set
*/
public WidgetSet getWidgetSet() {
return widgetSet;
} | 3.68 |
hudi_BoundedFsDataInputStream_getFileLength | /* Return the file length */
private long getFileLength() throws IOException {
if (fileLen == -1L) {
fileLen = fs.getContentSummary(file).getLength();
}
return fileLen;
} | 3.68 |
flink_AlterSchemaConverter_convertAlterSchema | /** Convert ALTER TABLE DROP WATERMARK to generate an updated {@link Schema}. */
public Operation convertAlterSchema(
SqlAlterTableDropWatermark dropWatermark, ResolvedCatalogTable oldTable) {
if (oldTable.getResolvedSchema().getWatermarkSpecs().isEmpty()) {
throw new ValidationException(
String.format(
"%sThe base table does not define any watermark strategy.",
EX_MSG_PREFIX));
}
Schema.Builder schemaBuilder = Schema.newBuilder();
buildUpdatedColumn(
schemaBuilder,
oldTable,
(builder, column) -> builder.fromColumns(Collections.singletonList(column)));
buildUpdatedPrimaryKey(schemaBuilder, oldTable, Function.identity());
return buildAlterTableChangeOperation(
dropWatermark,
Collections.singletonList(TableChange.dropWatermark()),
schemaBuilder.build(),
oldTable);
} | 3.68 |
hbase_MergeTableRegionsProcedure_isRollbackSupported | /*
* Check whether we are in the state that can be rolled back
*/
@Override
protected boolean isRollbackSupported(final MergeTableRegionsState state) {
switch (state) {
case MERGE_TABLE_REGIONS_POST_OPERATION:
case MERGE_TABLE_REGIONS_OPEN_MERGED_REGION:
case MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION:
case MERGE_TABLE_REGIONS_UPDATE_META:
// It is not safe to rollback in these states.
return false;
default:
break;
}
return true;
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_getMemoryConfiguration | /**
* Gets the memory configuration object, which offers settings to control RocksDB's memory
* usage.
*/
public RocksDBMemoryConfiguration getMemoryConfiguration() {
return memoryConfiguration;
} | 3.68 |
AreaShop_AreaShop_getPermissionProvider | /**
* Get the Vault permissions provider.
* @return Vault permissions provider
*/
public net.milkbowl.vault.permission.Permission getPermissionProvider() {
RegisteredServiceProvider<net.milkbowl.vault.permission.Permission> permissionProvider = getServer().getServicesManager().getRegistration(net.milkbowl.vault.permission.Permission.class);
if (permissionProvider == null || permissionProvider.getProvider() == null) {
return null;
}
return permissionProvider.getProvider();
} | 3.68 |
framework_VDateField_getId | /**
* Returns the connector id that corresponds with this widget.
*
* @return the connector id
* @deprecated This method is not used by the framework code anymore.
*/
@Deprecated
public String getId() {
return connector.getConnectorId();
} | 3.68 |
hbase_PrivateCellUtil_write | /**
* Made into a static method so as to reuse the logic within
* ValueAndTagRewriteByteBufferExtendedCell
*/
static void write(ByteBuffer buf, int offset, Cell cell, byte[] value, byte[] tags) {
offset = ByteBufferUtils.putInt(buf, offset, KeyValueUtil.keyLength(cell));// Key length
offset = ByteBufferUtils.putInt(buf, offset, value.length);// Value length
offset = KeyValueUtil.appendKeyTo(cell, buf, offset);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, value, 0, value.length);
offset += value.length;
int tagsLen = tags == null ? 0 : tags.length;
if (tagsLen > 0) {
offset = ByteBufferUtils.putAsShort(buf, offset, tagsLen);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, tags, 0, tagsLen);
}
} | 3.68 |
pulsar_BKCluster_startBKCluster | /**
* Start cluster. Also, starts the auto recovery process for each bookie, if
* isAutoRecoveryEnabled is true.
*
* @throws Exception
*/
private void startBKCluster(int numBookies) throws Exception {
PulsarRegistrationManager rm = new PulsarRegistrationManager(store, "/ledgers", baseConf);
rm.initNewCluster();
baseConf.setMetadataServiceUri("metadata-store:" + clusterConf.metadataServiceUri);
baseClientConf.setMetadataServiceUri("metadata-store:" + clusterConf.metadataServiceUri);
// Create Bookie Servers (B1, B2, B3)
for (int i = 0; i < numBookies; i++) {
startNewBookie(i);
}
} | 3.68 |
dubbo_ServiceDiscoveryRegistryDirectory_destroyUnusedInvokers | /**
* Check whether the invoker in the cache needs to be destroyed
* If set attribute of url: refer.autodestroy=false, the invokers will only increase without decreasing,there may be a refer leak
*
* @param oldUrlInvokerMap
* @param newUrlInvokerMap
*/
private void destroyUnusedInvokers(
Map<ProtocolServiceKeyWithAddress, Invoker<T>> oldUrlInvokerMap,
Map<ProtocolServiceKeyWithAddress, Invoker<T>> newUrlInvokerMap) {
if (newUrlInvokerMap == null || newUrlInvokerMap.size() == 0) {
destroyAllInvokers();
return;
}
if (oldUrlInvokerMap == null || oldUrlInvokerMap.size() == 0) {
return;
}
for (Map.Entry<ProtocolServiceKeyWithAddress, Invoker<T>> entry : oldUrlInvokerMap.entrySet()) {
Invoker<T> invoker = entry.getValue();
if (invoker != null) {
try {
invoker.destroy();
if (logger.isDebugEnabled()) {
logger.debug("destroy invoker[" + invoker.getUrl() + "] success. ");
}
} catch (Exception e) {
logger.warn(
PROTOCOL_FAILED_DESTROY_INVOKER,
"",
"",
"destroy invoker[" + invoker.getUrl() + "]failed." + e.getMessage(),
e);
}
}
}
logger.info(oldUrlInvokerMap.size() + " deprecated invokers deleted.");
} | 3.68 |
framework_BootstrapHandler_findAndEscapeThemeName | /**
* Do not override.
*
* @param context
* @return
*/
public String findAndEscapeThemeName(BootstrapContext context) {
String themeName = getThemeName(context);
if (themeName == null) {
VaadinRequest request = context.getRequest();
themeName = request.getService().getConfiguredTheme(request);
}
// XSS preventation, theme names shouldn't contain special chars anyway.
// The servlet denies them via url parameter.
themeName = VaadinServlet.stripSpecialChars(themeName);
return themeName;
} | 3.68 |
hadoop_JsonSerialization_getMapper | /**
* Get the mapper of this class.
* @return the mapper
*/
public ObjectMapper getMapper() {
return mapper;
} | 3.68 |
flink_Channel_getRelativeTempMemory | /**
* Gets the memory for materializing the channel's result from this Channel.
*
* @return The temp memory.
*/
public double getRelativeTempMemory() {
return this.relativeTempMemory;
} | 3.68 |
hadoop_TimelineDomains_addDomains | /**
* All a list of domains into the existing domain list
*
* @param domains
* a list of domains
*/
public void addDomains(List<TimelineDomain> domains) {
this.domains.addAll(domains);
} | 3.68 |
hbase_HBaseTestingUtility_unassignRegionByRow | /**
* Closes the region containing the given row.
* @param row The row to find the containing region.
* @param table The table to find the region.
*/
public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
HRegionLocation hrl = table.getRegionLocation(row);
unassignRegion(hrl.getRegion().getRegionName());
} | 3.68 |
flink_ParquetColumnarRowSplitReader_ensureBatch | /**
* Checks if there is at least one row left in the batch to return. If no more row are
* available, it reads another batch of rows.
*
* @return Returns true if there is one more row to return, false otherwise.
* @throws IOException throw if an exception happens while reading a batch.
*/
private boolean ensureBatch() throws IOException {
if (nextRow >= rowsInBatch) {
// Try to read the next batch if rows from the file.
if (nextBatch()) {
// No more rows available in the Rows array.
nextRow = 0;
return true;
}
return false;
}
// there is at least one Row left in the Rows array.
return true;
} | 3.68 |
hadoop_BondedS3AStatisticsContext_getInstrumentation | /**
* Get the instrumentation from the FS integration.
* @return instrumentation instance.
*/
private S3AInstrumentation getInstrumentation() {
return statisticsSource.getInstrumentation();
} | 3.68 |
hbase_ClusterStatusPublisher_generateDeadServersListToSend | /**
* Create the dead server to send. A dead server is sent NB_SEND times. We send at max
* MAX_SERVER_PER_MESSAGE at a time. if there are too many dead servers, we send the newly dead
* first.
*/
protected List<ServerName> generateDeadServersListToSend() {
// We're getting the message sent since last time, and add them to the list
long since = EnvironmentEdgeManager.currentTime() - messagePeriod * 2;
for (Pair<ServerName, Long> dead : getDeadServers(since)) {
lastSent.putIfAbsent(dead.getFirst(), 0);
}
// We're sending the new deads first.
List<Map.Entry<ServerName, Integer>> entries = new ArrayList<>(lastSent.entrySet());
Collections.sort(entries, new Comparator<Map.Entry<ServerName, Integer>>() {
@Override
public int compare(Map.Entry<ServerName, Integer> o1, Map.Entry<ServerName, Integer> o2) {
return o1.getValue().compareTo(o2.getValue());
}
});
// With a limit of MAX_SERVER_PER_MESSAGE
int max = entries.size() > MAX_SERVER_PER_MESSAGE ? MAX_SERVER_PER_MESSAGE : entries.size();
List<ServerName> res = new ArrayList<>(max);
for (int i = 0; i < max; i++) {
Map.Entry<ServerName, Integer> toSend = entries.get(i);
if (toSend.getValue() >= (NB_SEND - 1)) {
lastSent.remove(toSend.getKey());
} else {
lastSent.replace(toSend.getKey(), toSend.getValue(), toSend.getValue() + 1);
}
res.add(toSend.getKey());
}
return res;
} | 3.68 |
flink_StreamIterationHead_createBrokerIdString | /**
* Creates the identification string with which head and tail task find the shared blocking
* queue for the back channel. The identification string is unique per parallel head/tail pair
* per iteration per job.
*
* @param jid The job ID.
* @param iterationID The id of the iteration in the job.
* @param subtaskIndex The parallel subtask number
* @return The identification string.
*/
public static String createBrokerIdString(JobID jid, String iterationID, int subtaskIndex) {
return jid + "-" + iterationID + "-" + subtaskIndex;
} | 3.68 |
hbase_MasterObserver_preRemoveRSGroup | /**
* Called before a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param name group name
*/
default void preRemoveRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
} | 3.68 |
framework_VAbstractDropHandler_dragEnter | /**
* The default implementation in {@link VAbstractDropHandler} checks if the
* Transferable is accepted.
* <p>
* If transferable is accepted (either via server visit or client side
* rules) the default implementation calls abstract
* {@link #dragAccepted(VDragEvent)} method.
* <p>
* If drop handler has distinct places where some parts may accept the
* {@link Transferable} and others don't, one should use similar validation
* logic in dragOver method and replace this method with empty
* implementation.
*
*/
@Override
public void dragEnter(final VDragEvent drag) {
validate(event -> dragAccepted(drag), drag);
} | 3.68 |
framework_GridSingleSelect_setDeselectAllowed | /**
* Sets whether it's allowed to deselect the selected row through the UI.
* Deselection is allowed by default.
*
* @param deselectAllowed
* <code>true</code> if the selected row can be deselected
* without selecting another row instead; otherwise
* <code>false</code>.
*/
public void setDeselectAllowed(boolean deselectAllowed) {
model.setDeselectAllowed(deselectAllowed);
} | 3.68 |
hadoop_TimelineEvents_setEntityId | /**
* Set the entity Id
*
* @param entityId
* the entity Id
*/
public void setEntityId(String entityId) {
this.entityId = entityId;
} | 3.68 |
druid_BeanTypeAutoProxyCreator_isMatch | /**
* Return if the given bean name matches the mapped name.
* <p>
* The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches, as well as direct equality. Can be
* overridden in subclasses.
*
* @param beanName the bean name to check
* @param mappedName the name in the configured list of names
* @return if the names match
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected boolean isMatch(String beanName, String mappedName) {
return PatternMatchUtils.simpleMatch(mappedName, beanName);
} | 3.68 |
rocketmq-connect_PositionManagementService_registerListener | /**
* Register a listener.
*
* @param listener
*/
default void registerListener(PositionUpdateListener listener){
// No-op
} | 3.68 |
flink_TestUtils_copyDirectory | /**
* Copy all the files and sub-directories under source directory to destination directory
* recursively.
*
* @param source directory or file path to copy from.
* @param destination directory or file path to copy to.
* @return Path of the destination directory.
* @throws IOException if any IO error happen.
*/
public static Path copyDirectory(final Path source, final Path destination) throws IOException {
Files.walkFileTree(
source,
EnumSet.of(FileVisitOption.FOLLOW_LINKS),
Integer.MAX_VALUE,
new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes ignored)
throws IOException {
final Path targetDir = destination.resolve(source.relativize(dir));
try {
Files.copy(dir, targetDir, StandardCopyOption.COPY_ATTRIBUTES);
} catch (FileAlreadyExistsException e) {
if (!Files.isDirectory(targetDir)) {
throw e;
}
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes ignored)
throws IOException {
Files.copy(
file,
destination.resolve(source.relativize(file)),
StandardCopyOption.COPY_ATTRIBUTES);
return FileVisitResult.CONTINUE;
}
});
return destination;
} | 3.68 |
hbase_ConnectionOverAsyncConnection_toString | /**
* An identifier that will remain the same for a given connection.
*/
@Override
public String toString() {
return "connection-over-async-connection-0x" + Integer.toHexString(hashCode());
} | 3.68 |
flink_SortedGrouping_reduceGroup | /**
* Applies a GroupReduce transformation on a grouped and sorted {@link DataSet}.
*
* <p>The transformation calls a {@link
* org.apache.flink.api.common.functions.RichGroupReduceFunction} for each group of the DataSet.
* A GroupReduceFunction can iterate over all elements of a group and emit any number of output
* elements including none.
*
* @param reducer The GroupReduceFunction that is applied on each group of the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R> reducer) {
if (reducer == null) {
throw new NullPointerException("GroupReduce function must not be null.");
}
TypeInformation<R> resultType =
TypeExtractor.getGroupReduceReturnTypes(
reducer, inputDataSet.getType(), Utils.getCallLocationName(), true);
return new GroupReduceOperator<>(
this, resultType, inputDataSet.clean(reducer), Utils.getCallLocationName());
} | 3.68 |
hbase_SecurityUtil_getPrincipalWithoutRealm | /**
* Get the user name from a principal
*/
public static String getPrincipalWithoutRealm(final String principal) {
int i = principal.indexOf("@");
return (i > -1) ? principal.substring(0, i) : principal;
} | 3.68 |
hadoop_ReplayJobFactory_start | /**
* Start the reader thread, wait for latch if necessary.
*/
@Override
public void start() {
this.rThread.start();
} | 3.68 |
flink_BatchTask_initBroadcastInputReaders | /**
* Creates the record readers for the extra broadcast inputs as configured by {@link
* TaskConfig#getNumBroadcastInputs()}. This method requires that the task configuration, the
* driver, and the user-code class loader are set.
*/
protected void initBroadcastInputReaders() throws Exception {
final int numBroadcastInputs = this.config.getNumBroadcastInputs();
final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs];
int currentReaderOffset = config.getNumInputs();
for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) {
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getBroadcastGroupSize(i);
if (groupSize == 1) {
// non-union case
broadcastInputReaders[i] =
new MutableRecordReader<>(
getEnvironment().getInputGate(currentReaderOffset),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
IndexedInputGate[] readers = new IndexedInputGate[groupSize];
for (int j = 0; j < groupSize; ++j) {
readers[j] = getEnvironment().getInputGate(currentReaderOffset + j);
}
broadcastInputReaders[i] =
new MutableRecordReader<>(
new UnionInputGate(readers),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
currentReaderOffset += groupSize;
}
this.broadcastInputReaders = broadcastInputReaders;
} | 3.68 |
framework_AbstractComponentConnector_cancelParentTouchTimers | /**
* Cancel the timer recursively for parent components that have timers
* running
*
* @since 7.6
*/
private void cancelParentTouchTimers() {
ServerConnector parent = getParent();
// we have to account for the parent being something other than an
// abstractcomponent. getParent returns null for the root element.
while (parent != null) {
if (parent instanceof AbstractComponentConnector) {
((AbstractComponentConnector) parent).cancelTouchTimer();
}
parent = parent.getParent();
}
} | 3.68 |
hudi_HoodieTableMetadataUtil_getMetadataPartitionsNeedingWriteStatusTracking | /**
* Returns true if any enabled metadata partition in the given hoodie table requires WriteStatus to track the written records.
*
* @param config MDT config
* @param metaClient {@code HoodieTableMetaClient} of the data table
* @return true if WriteStatus should track the written records else false.
*/
public static boolean getMetadataPartitionsNeedingWriteStatusTracking(HoodieMetadataConfig config, HoodieTableMetaClient metaClient) {
// Does any enabled partition need to track the written records
if (MetadataPartitionType.getMetadataPartitionsNeedingWriteStatusTracking().stream().anyMatch(p -> metaClient.getTableConfig().isMetadataPartitionAvailable(p))) {
return true;
}
// Does any inflight partitions need to track the written records
Set<String> metadataPartitionsInflight = metaClient.getTableConfig().getMetadataPartitionsInflight();
if (MetadataPartitionType.getMetadataPartitionsNeedingWriteStatusTracking().stream().anyMatch(p -> metadataPartitionsInflight.contains(p.getPartitionPath()))) {
return true;
}
// Does any enabled partition being enabled need to track the written records
if (config.enableRecordIndex()) {
return true;
}
return false;
} | 3.68 |
framework_JsonCodec_encodeConnectorMap | /*
* Encodes a connector map. Invisible connectors are skipped.
*/
@SuppressWarnings("deprecation")
private static JsonObject encodeConnectorMap(Type valueType, Map<?, ?> map,
ConnectorTracker connectorTracker) {
JsonObject jsonMap = Json.createObject();
for (Entry<?, ?> entry : map.entrySet()) {
ClientConnector key = (ClientConnector) entry.getKey();
if (LegacyCommunicationManager.isConnectorVisibleToClient(key)) {
EncodeResult encodedValue = encode(entry.getValue(), null,
valueType, connectorTracker);
jsonMap.put(key.getConnectorId(),
encodedValue.getEncodedValue());
}
}
return jsonMap;
} | 3.68 |
hadoop_NameCache_promote | /** Promote a frequently used name to the cache */
private void promote(final K name) {
transientMap.remove(name);
cache.put(name, name);
lookups += useThreshold;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWhereGreaterThan | /**
* Tests a select with a greater than clause.
*/
@Test
public void testSelectWhereGreaterThan() {
SelectStatement stmt = new SelectStatement()
.from(new TableReference(TEST_TABLE))
.where(greaterThan(new FieldReference(INT_FIELD), 20090101));
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (intField > 20090101)";
assertEquals("Select with greater than clause", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
hadoop_AzureNativeFileSystemStore_isBlockBlobWithCompactionKey | /**
* Checks if the given key in Azure Storage should be stored as a block blobs
* with compaction enabled instead of normal block blob.
*
* @param key blob name
* @return true, if the file is in directory with block compaction enabled.
*/
public boolean isBlockBlobWithCompactionKey(String key) {
return isKeyForDirectorySet(key, blockBlobWithCompationDirs);
} | 3.68 |
hadoop_StartupProgress_beginStep | /**
* Begins execution of the specified step within the specified phase. This is
* a no-op if the phase is already completed.
*
* @param phase Phase within which the step should be started
* @param step Step to begin
*/
public void beginStep(Phase phase, Step step) {
if (!isComplete(phase)) {
lazyInitStep(phase, step).beginTime = monotonicNow();
}
LOG.debug("Beginning of the step. Phase: {}, Step: {}", phase, step);
} | 3.68 |
framework_TwinColSelect_setColumns | /**
* Sets the width of the component so that it displays approximately the
* given number of letters in each of the two selects.
* <p>
* Calling {@code setColumns(10);} is roughly equivalent to calling
* {@code setWidth((10*2+4)+"10em");}
* </p>
*
* @deprecated As of 7.0. "Columns" does not reflect the exact number of
* characters that will be displayed. It is better to use
* setWidth together with "em" to control the width of the
* field.
* @param columns
* the number of columns to set.
*/
@Deprecated
public void setColumns(int columns) {
if (columns < 0) {
columns = 0;
}
if (this.columns != columns) {
this.columns = columns;
markAsDirty();
}
} | 3.68 |
hbase_MultiByteBuff_hasArray | /** Returns false. MBB does not support array based operations */
@Override
public boolean hasArray() {
return false;
} | 3.68 |
flink_GSChecksumWriteChannel_close | /**
* Closes the channel and validates the checksum against the storage. Manually verifying
* checksums for streaming uploads is recommended by Google, see here:
* https://cloud.google.com/storage/docs/streaming
*
* @throws IOException On underlying failure or non-matching checksums
*/
public void close() throws IOException {
LOGGER.trace("Closing write channel to blob {}", blobIdentifier);
// close channel and get blob metadata
writeChannel.close();
Optional<GSBlobStorage.BlobMetadata> blobMetadata = storage.getMetadata(blobIdentifier);
if (!blobMetadata.isPresent()) {
throw new IOException(
String.format("Failed to read metadata for blob %s", blobIdentifier));
}
// make sure checksums match
String writeChecksum = ChecksumUtils.convertChecksumToString(hasher.hash().asInt());
String blobChecksum = blobMetadata.get().getChecksum();
if (!writeChecksum.equals(blobChecksum)) {
throw new IOException(
String.format(
"Checksum mismatch writing blob %s: expected %s but found %s",
blobIdentifier, writeChecksum, blobChecksum));
}
} | 3.68 |
hbase_ColumnRangeFilter_getMinColumnInclusive | /** Returns true if min column is inclusive, false otherwise */
public boolean getMinColumnInclusive() {
return this.minColumnInclusive;
} | 3.68 |
framework_VTree_setHtml | /** For internal use only. May be removed or replaced in the future. */
public void setHtml(String html) {
nodeCaptionSpan.setInnerHTML(html);
} | 3.68 |
graphhopper_GHRequest_addPoint | /**
* Add stopover point to routing request.
*
* @param point geographical position (see GHPoint)
*/
public GHRequest addPoint(GHPoint point) {
if (point == null)
throw new IllegalArgumentException("point cannot be null");
points.add(point);
return this;
} | 3.68 |
hbase_HFileBlockIndex_ensureNonEmpty | /**
* Verifies that the block index is non-empty and throws an {@link IllegalStateException}
* otherwise.
*/
public void ensureNonEmpty() {
if (isEmpty()) {
throw new IllegalStateException("Block index is empty or not loaded");
}
} | 3.68 |
hadoop_ReplicaInfo_getBytesReserved | /**
* Number of bytes reserved for this replica on disk.
*/
public long getBytesReserved() {
return 0;
} | 3.68 |
flink_CheckpointConfig_getMaxSubtasksPerChannelStateFile | /**
* @return the number of subtasks to share the same channel state file, as configured via {@link
* #setMaxSubtasksPerChannelStateFile(int)} or {@link
* ExecutionCheckpointingOptions#UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE}.
*/
@PublicEvolving
public int getMaxSubtasksPerChannelStateFile() {
return configuration.get(
ExecutionCheckpointingOptions.UNALIGNED_MAX_SUBTASKS_PER_CHANNEL_STATE_FILE);
} | 3.68 |
flink_TableFunctionCollector_reset | /** Resets the flag to indicate whether [[collect(T)]] has been called. */
public void reset() {
this.collected = false;
} | 3.68 |
flink_NFACompiler_createState | /**
* Creates a state with {@link State.StateType#Normal} and adds it to the collection of
* created states. Should be used instead of instantiating with new operator.
*
* @param name the name of the state
* @param stateType the type of the state
* @return the created state
*/
private State<T> createState(String name, State.StateType stateType) {
String stateName = stateNameHandler.getUniqueInternalName(name);
State<T> state = new State<>(stateName, stateType);
states.add(state);
return state;
} | 3.68 |
pulsar_ResourceUsageTopicTransportManager_unregisterResourceUsagePublisher | /*
* Unregister a resource owner (resource-group, tenant, namespace, topic etc).
*
* @param resource usage publisher
*/
public void unregisterResourceUsagePublisher(ResourceUsagePublisher r) {
publisherMap.remove(r.getID());
} | 3.68 |
rocketmq-connect_FilterTransform_stop | /**
* Stop the component.
*/
@Override
public void stop() {
} | 3.68 |
hbase_BufferedMutatorParams_maxKeyValueSize | /**
* Override the maximum key-value size specified by the provided {@link Connection}'s
* {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key
* {@code hbase.client.keyvalue.maxsize}.
*/
public BufferedMutatorParams maxKeyValueSize(int maxKeyValueSize) {
this.maxKeyValueSize = maxKeyValueSize;
return this;
} | 3.68 |
flink_ParquetColumnarRowInputFormat_createPartitionedFormat | /**
* Create a partitioned {@link ParquetColumnarRowInputFormat}, the partition columns can be
* generated by {@link Path}.
*/
public static <SplitT extends FileSourceSplit>
ParquetColumnarRowInputFormat<SplitT> createPartitionedFormat(
Configuration hadoopConfig,
RowType producedRowType,
TypeInformation<RowData> producedTypeInfo,
List<String> partitionKeys,
PartitionFieldExtractor<SplitT> extractor,
int batchSize,
boolean isUtcTimestamp,
boolean isCaseSensitive) {
// TODO FLINK-25113 all this partition keys code should be pruned from the parquet format,
// because now FileSystemTableSource uses FileInfoExtractorBulkFormat for reading partition
// keys.
RowType projectedRowType =
new RowType(
producedRowType.getFields().stream()
.filter(field -> !partitionKeys.contains(field.getName()))
.collect(Collectors.toList()));
List<String> projectedNames = projectedRowType.getFieldNames();
ColumnBatchFactory<SplitT> factory =
(SplitT split, ColumnVector[] parquetVectors) -> {
// create and initialize the row batch
ColumnVector[] vectors = new ColumnVector[producedRowType.getFieldCount()];
for (int i = 0; i < vectors.length; i++) {
RowType.RowField field = producedRowType.getFields().get(i);
vectors[i] =
partitionKeys.contains(field.getName())
? createVectorFromConstant(
field.getType(),
extractor.extract(
split, field.getName(), field.getType()),
batchSize)
: parquetVectors[projectedNames.indexOf(field.getName())];
}
return new VectorizedColumnBatch(vectors);
};
return new ParquetColumnarRowInputFormat<>(
hadoopConfig,
projectedRowType,
producedTypeInfo,
factory,
batchSize,
isUtcTimestamp,
isCaseSensitive);
} | 3.68 |
morf_InlineTableUpgrader_startStep | /**
* @see org.alfasoftware.morf.upgrade.SchemaChangeVisitor#startStep(java.lang.Class)
*/
@Override
public void startStep(Class<? extends UpgradeStep> upgradeClass) {
writeStatement(sqlDialect.convertCommentToSQL("Upgrade step: " + upgradeClass.getName()));
} | 3.68 |
dubbo_InternalThreadLocal_initialValue | /**
* Returns the initial value for this thread-local variable.
*/
@Override
protected V initialValue() {
return null;
} | 3.68 |
flink_PojoSerializer_buildSnapshot | /**
* Build and return a snapshot of the serializer's parameters and currently cached serializers.
*/
private static <T> PojoSerializerSnapshot<T> buildSnapshot(
Class<T> pojoType,
LinkedHashMap<Class<?>, Integer> registeredSubclassesToTags,
TypeSerializer<?>[] registeredSubclassSerializers,
Field[] fields,
TypeSerializer<?>[] fieldSerializers,
Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializerCache) {
final LinkedHashMap<Class<?>, TypeSerializer<?>> subclassRegistry =
CollectionUtil.newLinkedHashMapWithExpectedSize(registeredSubclassesToTags.size());
for (Map.Entry<Class<?>, Integer> entry : registeredSubclassesToTags.entrySet()) {
subclassRegistry.put(entry.getKey(), registeredSubclassSerializers[entry.getValue()]);
}
return new PojoSerializerSnapshot<>(
pojoType,
fields,
fieldSerializers,
subclassRegistry,
nonRegisteredSubclassSerializerCache);
} | 3.68 |
hbase_RandomRowFilter_setChance | /**
* Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
} | 3.68 |
flink_FileSystemJobResultStore_constructDirtyPath | /**
* Given a job ID, construct the path for a dirty entry corresponding to it in the job result
* store.
*
* @param jobId The job ID to construct a dirty entry path from.
* @return A path for a dirty entry for the given the Job ID.
*/
private Path constructDirtyPath(JobID jobId) {
return constructEntryPath(jobId.toString() + DIRTY_FILE_EXTENSION);
} | 3.68 |
hadoop_BalanceJob_waitJobDone | /**
* Wait until the job is done.
*/
public synchronized void waitJobDone() throws InterruptedException {
while (!jobDone) {
wait();
}
} | 3.68 |
hbase_TsvImporterMapper_doSetup | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
// If a custom separator has been used,
// decode it back from Base64 encoding.
separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
if (separator == null) {
separator = ImportTsv.DEFAULT_SEPARATOR;
} else {
separator = new String(Base64.getDecoder().decode(separator));
}
// Should never get 0 as we are setting this to a valid value in job
// configuration.
ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false);
skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);
badLineCount = context.getCounter("ImportTsv", "Bad Lines");
logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false);
hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY);
} | 3.68 |
flink_MemoryManager_allocatePages | /**
* Allocates a set of memory segments from this memory manager.
*
* <p>The total allocated memory will not exceed its size limit, announced in the constructor.
*
* @param owner The owner to associate with the memory segment, for the fallback release.
* @param target The list into which to put the allocated memory pages.
* @param numberOfPages The number of pages to allocate.
* @throws MemoryAllocationException Thrown, if this memory manager does not have the requested
* amount of memory pages any more.
*/
public void allocatePages(Object owner, Collection<MemorySegment> target, int numberOfPages)
throws MemoryAllocationException {
// sanity check
Preconditions.checkNotNull(owner, "The memory owner must not be null.");
Preconditions.checkState(!isShutDown, "Memory manager has been shut down.");
Preconditions.checkArgument(
numberOfPages <= totalNumberOfPages,
"Cannot allocate more segments %s than the max number %s",
numberOfPages,
totalNumberOfPages);
// reserve array space, if applicable
if (target instanceof ArrayList) {
((ArrayList<MemorySegment>) target).ensureCapacity(numberOfPages);
}
long memoryToReserve = numberOfPages * pageSize;
try {
memoryBudget.reserveMemory(memoryToReserve);
} catch (MemoryReservationException e) {
throw new MemoryAllocationException(
String.format("Could not allocate %d pages", numberOfPages), e);
}
Runnable pageCleanup = this::releasePage;
allocatedSegments.compute(
owner,
(o, currentSegmentsForOwner) -> {
Set<MemorySegment> segmentsForOwner =
currentSegmentsForOwner == null
? CollectionUtil.newHashSetWithExpectedSize(numberOfPages)
: currentSegmentsForOwner;
for (long i = numberOfPages; i > 0; i--) {
MemorySegment segment =
allocateOffHeapUnsafeMemory(getPageSize(), owner, pageCleanup);
target.add(segment);
segmentsForOwner.add(segment);
}
return segmentsForOwner;
});
Preconditions.checkState(!isShutDown, "Memory manager has been concurrently shut down.");
} | 3.68 |
hadoop_CachingBlockManager_numCached | /**
* Number of caching operations completed.
*
* @return the number of cached buffers.
*/
public int numCached() {
return cache.size();
} | 3.68 |
morf_SchemaUtils_notPrimaryKey | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder#notPrimaryKey()
*/
@Override
public ColumnBuilder notPrimaryKey() {
return new ColumnBuilderImpl(this, isNullable(), getDefaultValue(), false, isAutoNumbered(), getAutoNumberStart());
} | 3.68 |
framework_UIConnector_showServerDebugInfo | /**
* Sends a request to the server to print details to console that will help
* the developer to locate the corresponding server-side connector in the
* source code.
*
* @since 7.1
* @param serverConnector
* the connector to locate
*/
public void showServerDebugInfo(ServerConnector serverConnector) {
getRpcProxy(DebugWindowServerRpc.class)
.showServerDebugInfo(serverConnector);
} | 3.68 |
framework_VComboBox_serverReplyHandled | /**
* Called by the connector when it has finished handling any reply from
* the server, regardless of what was updated.
*/
public void serverReplyHandled() {
popupOpenerClicked = false;
// if (!initDone) {
// debug("VComboBox: init done, updating widths");
// // Calculate minimum textarea width
// updateSuggestionPopupMinWidth();
// updateRootWidth();
// initDone = true;
// }
} | 3.68 |
flink_SqlFunctionUtils_strToMap | /**
* Creates a map by parsing text. Split text into key-value pairs using two delimiters. The
* first delimiter separates pairs, and the second delimiter separates key and value. Both
* {@code listDelimiter} and {@code keyValueDelimiter} are treated as regular expressions.
*
* @param text the input text
* @param listDelimiter the delimiter to separates pairs
* @param keyValueDelimiter the delimiter to separates key and value
* @return the map
*/
public static Map<String, String> strToMap(
String text, String listDelimiter, String keyValueDelimiter) {
if (StringUtils.isEmpty(text)) {
return EMPTY_MAP;
}
String[] keyValuePairs = text.split(listDelimiter);
Map<String, String> ret = CollectionUtil.newHashMapWithExpectedSize(keyValuePairs.length);
for (String keyValuePair : keyValuePairs) {
String[] keyValue = keyValuePair.split(keyValueDelimiter, 2);
if (keyValue.length < 2) {
ret.put(keyValuePair, null);
} else {
ret.put(keyValue[0], keyValue[1]);
}
}
return ret;
} | 3.68 |
hbase_AsyncBufferedMutatorBuilder_setMaxRetries | /**
* Set the max retry times for an operation. Usually it is the max attempt times minus 1.
* <p>
* Operation timeout and max attempt times(or max retry times) are both limitations for retrying,
* we will stop retrying when we reach any of the limitations.
* @see #setMaxAttempts(int)
* @see #setOperationTimeout(long, TimeUnit)
*/
default AsyncBufferedMutatorBuilder setMaxRetries(int maxRetries) {
return setMaxAttempts(retries2Attempts(maxRetries));
} | 3.68 |
hbase_CostFunction_prepare | /**
* Called once per LB invocation to give the cost function to initialize it's state, and perform
* any costly calculation.
*/
void prepare(BalancerClusterState cluster) {
this.cluster = cluster;
} | 3.68 |
zxing_DataMatrixWriter_encodeLowLevel | /**
* Encode the given symbol info to a bit matrix.
*
* @param placement The DataMatrix placement.
* @param symbolInfo The symbol info to encode.
* @return The bit matrix generated.
*/
private static BitMatrix encodeLowLevel(DefaultPlacement placement, SymbolInfo symbolInfo, int width, int height) {
int symbolWidth = symbolInfo.getSymbolDataWidth();
int symbolHeight = symbolInfo.getSymbolDataHeight();
ByteMatrix matrix = new ByteMatrix(symbolInfo.getSymbolWidth(), symbolInfo.getSymbolHeight());
int matrixY = 0;
for (int y = 0; y < symbolHeight; y++) {
// Fill the top edge with alternate 0 / 1
int matrixX;
if ((y % symbolInfo.matrixHeight) == 0) {
matrixX = 0;
for (int x = 0; x < symbolInfo.getSymbolWidth(); x++) {
matrix.set(matrixX, matrixY, (x % 2) == 0);
matrixX++;
}
matrixY++;
}
matrixX = 0;
for (int x = 0; x < symbolWidth; x++) {
// Fill the right edge with full 1
if ((x % symbolInfo.matrixWidth) == 0) {
matrix.set(matrixX, matrixY, true);
matrixX++;
}
matrix.set(matrixX, matrixY, placement.getBit(x, y));
matrixX++;
// Fill the right edge with alternate 0 / 1
if ((x % symbolInfo.matrixWidth) == symbolInfo.matrixWidth - 1) {
matrix.set(matrixX, matrixY, (y % 2) == 0);
matrixX++;
}
}
matrixY++;
// Fill the bottom edge with full 1
if ((y % symbolInfo.matrixHeight) == symbolInfo.matrixHeight - 1) {
matrixX = 0;
for (int x = 0; x < symbolInfo.getSymbolWidth(); x++) {
matrix.set(matrixX, matrixY, true);
matrixX++;
}
matrixY++;
}
}
return convertByteMatrixToBitMatrix(matrix, width, height);
} | 3.68 |
hadoop_IOStatisticsContextIntegration_setThreadIOStatisticsContext | /**
* Set the IOStatisticsContext for the current thread.
* @param statisticsContext IOStatistics context instance for the
* current thread. If null, the context is reset.
*/
public static void setThreadIOStatisticsContext(
IOStatisticsContext statisticsContext) {
if (isThreadIOStatsEnabled) {
if (statisticsContext == null) {
// new value is null, so remove it
ACTIVE_IOSTATS_CONTEXT.removeForCurrentThread();
} else {
// the setter is efficient in that it does not create a new
// reference if the context is unchanged.
ACTIVE_IOSTATS_CONTEXT.setForCurrentThread(statisticsContext);
}
}
} | 3.68 |
framework_InfoSection_addVersionInfo | /**
* Logs version information for client/server/theme.
*
* @param applicationConfiguration
* @since 7.1
*/
private void addVersionInfo(
ApplicationConfiguration applicationConfiguration) {
String clientVersion = Version.getFullVersion();
String servletVersion = applicationConfiguration.getServletVersion();
String atmosphereVersion = applicationConfiguration
.getAtmosphereVersion();
String jsVersion = applicationConfiguration.getAtmosphereJSVersion();
String themeVersion = getThemeVersion();
boolean themeOk = equalsEither(themeVersion, clientVersion,
servletVersion);
boolean clientOk = equalsEither(clientVersion, servletVersion,
themeVersion);
boolean servletOk = equalsEither(servletVersion, clientVersion,
themeVersion);
addRow("Client engine version", clientVersion,
clientOk ? null : ERROR_STYLE);
addRow("Server engine version", servletVersion,
servletOk ? null : ERROR_STYLE);
addRow("Theme version", themeVersion, themeOk ? null : ERROR_STYLE);
if (jsVersion != null) {
addRow("Push server version", atmosphereVersion);
addRow("Push client version", jsVersion
+ " (note: does not need to match server version)");
}
} | 3.68 |
flink_TaskExecutorManager_declareNeededResources | /** DO NOT call this method directly. Use {@link #declareNeededResourcesWithDelay()} instead. */
private void declareNeededResources() {
resourceAllocator.declareResourceNeeded(getResourceDeclaration());
} | 3.68 |
morf_ArchiveDataSetReader_close | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider#close()
*/
@Override
public void close() {
super.close();
clear();
if (zipFile == null) {
throw new IllegalStateException("Archive data set has not been opened");
}
try {
zipFile.close();
} catch (IOException e) {
throw new RuntimeException("Error closing zip archive [" + file + "]", e);
}
} | 3.68 |
pulsar_PrometheusMetricStreams_writeSample | /**
* Write the given metric and sample value to the stream. Will write #TYPE header if metric not seen before.
* @param metricName name of the metric.
* @param value value of the sample
* @param labelsAndValuesArray varargs of label and label value
*/
void writeSample(String metricName, Number value, String... labelsAndValuesArray) {
SimpleTextOutputStream stream = initGaugeType(metricName);
stream.write(metricName).write('{');
for (int i = 0; i < labelsAndValuesArray.length; i += 2) {
String labelValue = labelsAndValuesArray[i + 1];
if (labelValue != null) {
labelValue = labelValue.replace("\"", "\\\"");
}
stream.write(labelsAndValuesArray[i]).write("=\"").write(labelValue).write('\"');
if (i + 2 != labelsAndValuesArray.length) {
stream.write(',');
}
}
stream.write("} ").write(value).write('\n');
} | 3.68 |
flink_CheckpointCommitter_setJobId | /**
* Internally used to set the job ID after instantiation.
*
* @param id
* @throws Exception
*/
public void setJobId(String id) throws Exception {
this.jobId = id;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.