name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_OverwriteNonDefaultsWithLatestAvroPayload_mergeRecords | /**
* Merges the given records into one.
* The fields in {@code baseRecord} has higher priority:
* it is set up into the merged record if it is not null or equals to the default.
*
* @param schema The record schema
* @param baseRecord The base record to merge with
* @param mergedRecord The record to be merged
*
* @return the merged record option
*/
protected Option<IndexedRecord> mergeRecords(Schema schema, GenericRecord baseRecord, GenericRecord mergedRecord) {
if (isDeleteRecord(baseRecord)) {
return Option.empty();
} else {
final GenericRecordBuilder builder = new GenericRecordBuilder(schema);
List<Schema.Field> fields = schema.getFields();
fields.forEach(field -> setField(baseRecord, mergedRecord, builder, field));
return Option.of(builder.build());
}
} | 3.68 |
druid_SQLExprParser_parseQueryPlanHint | //for ads
public void parseQueryPlanHint(SQLExpr expr) {
if (lexer.token == Token.HINT && (expr instanceof SQLInListExpr
|| expr instanceof SQLBinaryOpExpr
|| expr instanceof SQLInSubQueryExpr
|| expr instanceof SQLExistsExpr
|| expr instanceof SQLNotExpr
|| expr instanceof SQLBetweenExpr)) {
String text = lexer.stringVal().trim();
Lexer hintLex = SQLParserUtils.createLexer(text, dbType);
hintLex.nextToken();
//防止SQL注入
if (hintLex.token == Token.PLUS) {
if (expr instanceof SQLBinaryOpExpr) {
SQLBinaryOpExpr binaryOpExpr = (SQLBinaryOpExpr) expr;
SQLBinaryOperator operator = binaryOpExpr.getOperator();
if (operator == SQLBinaryOperator.BooleanAnd
|| operator == SQLBinaryOperator.BooleanOr) {
if (binaryOpExpr.isParenthesized()) {
binaryOpExpr.setHint(new SQLCommentHint(text));
} else {
SQLExpr right = binaryOpExpr.getRight();
if (right instanceof SQLBinaryOpExpr
|| right instanceof SQLBetweenExpr) {
((SQLExprImpl) right).setHint(new SQLCommentHint(text));
}
}
} else {
binaryOpExpr.setHint(new SQLCommentHint(text));
}
} else if (expr instanceof SQLObjectImpl) {
((SQLExprImpl) expr).setHint(new SQLCommentHint(text));
} else {
throw new ParserException("TODO : " + lexer.info());
}
this.lexer.nextToken();
}
}
} | 3.68 |
hbase_ArrayBackedTag_getType | /** Returns the tag type */
@Override
public byte getType() {
return this.type;
} | 3.68 |
framework_DefaultDeploymentConfiguration_getResourceCacheTime | /**
* {@inheritDoc}
* <p>
* The default interval is 3600 seconds (1 hour).
*/
@Override
public int getResourceCacheTime() {
return resourceCacheTime;
} | 3.68 |
querydsl_MetaDataExporter_setTypeMappings | /**
* Set the type mappings to use
*
* @param typeMappings
*/
public void setTypeMappings(TypeMappings typeMappings) {
module.bind(TypeMappings.class, typeMappings);
} | 3.68 |
rocketmq-connect_ServiceProviderUtil_getClusterManagementService | /**
* Get custer management service by class name
*
* @param clusterManagementServiceClazz
* @return
*/
@NotNull
public static ClusterManagementService getClusterManagementService(String clusterManagementServiceClazz) {
if (StringUtils.isEmpty(clusterManagementServiceClazz)) {
clusterManagementServiceClazz = ClusterManagementServiceImpl.class.getName();
}
ClusterManagementService clusterManagementService = null;
ServiceLoader<ClusterManagementService> clusterManagementServiceServiceLoader = ServiceLoader.load(ClusterManagementService.class);
Iterator<ClusterManagementService> clusterManagementServiceIterator = clusterManagementServiceServiceLoader.iterator();
while (clusterManagementServiceIterator.hasNext()) {
ClusterManagementService currentClusterManagementService = clusterManagementServiceIterator.next();
if (currentClusterManagementService.getClass().getName().equals(clusterManagementServiceClazz)) {
clusterManagementService = currentClusterManagementService;
break;
}
}
if (null == clusterManagementService) {
throw new ConnectException("ClusterManagementService class " + clusterManagementServiceClazz + " not " +
"found");
}
return clusterManagementService;
} | 3.68 |
framework_VaadinPortletRequest_getPortalProperty | /**
* Reads a portal property from the portal context of the Vaadin request.
*
* @param name
* a string with the name of the portal property to get
* @return a string with the value of the property, or <code>null</code> if
* the property is not defined
*/
public String getPortalProperty(String name) {
return getRequest().getPortalContext().getProperty(name);
} | 3.68 |
hbase_HelloHBase_createNamespaceAndTable | /**
* Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has
* one column-family.
* @param admin Standard Admin object
* @throws IOException If IO problem encountered
*/
static void createNamespaceAndTable(final Admin admin) throws IOException {
if (!namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "].");
admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build());
}
if (!admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString()
+ "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "].");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build();
admin.createTable(desc);
}
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchHoodieFileGroup | /**
* Default implementation for fetching file-group.
*/
Option<HoodieFileGroup> fetchHoodieFileGroup(String partitionPath, String fileId) {
return Option.fromJavaOptional(fetchAllStoredFileGroups(partitionPath)
.filter(fileGroup -> fileGroup.getFileGroupId().getFileId().equals(fileId)).findFirst());
} | 3.68 |
hbase_DeleteTableProcedure_cleanRegionsInMeta | /**
* There may be items for this table still up in hbase:meta in the case where the info:regioninfo
* column was empty because of some write error. Remove ALL rows from hbase:meta that have to do
* with this table.
* <p/>
* See HBASE-12980.
*/
private static void cleanRegionsInMeta(final MasterProcedureEnv env, final TableName tableName)
throws IOException {
Scan tableScan = MetaTableAccessor.getScanForTableName(env.getMasterConfiguration(), tableName)
.setFilter(new KeyOnlyFilter());
long now = EnvironmentEdgeManager.currentTime();
List<Delete> deletes = new ArrayList<>();
try (
Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (;;) {
Result result = scanner.next();
if (result == null) {
break;
}
deletes.add(new Delete(result.getRow(), now));
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from "
+ TableName.META_TABLE_NAME);
metaTable.delete(deletes);
}
}
} | 3.68 |
flink_LongMinimum_add | /** Consider using {@link #add(long)} instead for primitive long values */
@Override
public void add(Long value) {
this.min = Math.min(this.min, value);
} | 3.68 |
morf_IndexBean_columnNames | /**
* @see org.alfasoftware.morf.metadata.Index#columnNames()
*/
@Override
public List<String> columnNames() {
return columnNames;
} | 3.68 |
hmily_HmilyTacRollbackExecutor_getInstance | /**
* Gets instance.
*
* @return the instance
*/
public static HmilyTacRollbackExecutor getInstance() {
if (instance == null) {
synchronized (HmilyTacRollbackExecutor.class) {
if (instance == null) {
instance = new HmilyTacRollbackExecutor();
}
}
}
return instance;
} | 3.68 |
framework_AbstractSelect_getNewItemHandler | /**
* Returns the new item handler, which is called when user adds new item in
* {@code newItemAllowed} mode.
*
* @return NewItemHandler
*/
public NewItemHandler getNewItemHandler() {
if (newItemHandler == null) {
newItemHandler = new DefaultNewItemHandler();
}
return newItemHandler;
} | 3.68 |
framework_UIDL_hasAttribute | /**
* Indicates whether or not the named attribute is available.
*
* @param name
* the name of the attribute to check
* @return true if the attribute is available, false otherwise
*/
public boolean hasAttribute(final String name) {
return attr().containsKey(name);
} | 3.68 |
framework_FileUploadHandler_sendUploadResponse | /**
* Sends the upload response.
*
* @param request
* @param response
* @throws IOException
*/
protected void sendUploadResponse(VaadinRequest request,
VaadinResponse response) throws IOException {
response.setContentType(
ApplicationConstants.CONTENT_TYPE_TEXT_HTML_UTF_8);
try (OutputStream out = response.getOutputStream()) {
final PrintWriter outWriter = new PrintWriter(
new BufferedWriter(new OutputStreamWriter(out, UTF_8)));
outWriter.print("<html><body>download handled</body></html>");
outWriter.flush();
}
} | 3.68 |
pulsar_ProducerConfiguration_getCryptoKeyReader | /**
* @return the CryptoKeyReader
*/
public CryptoKeyReader getCryptoKeyReader() {
return conf.getCryptoKeyReader();
} | 3.68 |
hbase_MemStore_startReplayingFromWAL | /**
* This message intends to inform the MemStore that next coming updates are going to be part of
* the replaying edits from WAL
*/
default void startReplayingFromWAL() {
return;
} | 3.68 |
flink_FlinkConnection_setTransactionIsolation | // TODO We currently do not support this, but we can't throw a SQLException here because we want
// to support jdbc tools such as beeline and sqlline.
@Override
public void setTransactionIsolation(int level) throws SQLException {} | 3.68 |
flink_TypeExtractionUtils_extractTypeFromLambda | /**
* Extracts type from given index from lambda. It supports nested types.
*
* @param baseClass SAM function that the lambda implements
* @param exec lambda function to extract the type from
* @param lambdaTypeArgumentIndices position of type to extract in type hierarchy
* @param paramLen count of total parameters of the lambda (including closure parameters)
* @param baseParametersLen count of lambda interface parameters (without closure parameters)
* @return extracted type
*/
public static Type extractTypeFromLambda(
Class<?> baseClass,
LambdaExecutable exec,
int[] lambdaTypeArgumentIndices,
int paramLen,
int baseParametersLen) {
Type output =
exec.getParameterTypes()[
paramLen - baseParametersLen + lambdaTypeArgumentIndices[0]];
for (int i = 1; i < lambdaTypeArgumentIndices.length; i++) {
validateLambdaType(baseClass, output);
output = extractTypeArgument(output, lambdaTypeArgumentIndices[i]);
}
validateLambdaType(baseClass, output);
return output;
} | 3.68 |
framework_Payload_parse | /**
* Parses a payload string and returns a payload object represented by that
* string.
*
* @param payloadString
* string that represents a payload object
* @return a payload object represented by the given string
*/
public static Payload parse(String payloadString) {
String[] parts = payloadString.split(":");
if (parts.length != 4 || !ITEM_PREFIX.equals(parts[0])) {
throw new IllegalArgumentException(
"Data type does not have a valid payload format");
}
// Create payload object of the given parts. Value type is converted to
// upper case to match the enum's case.
return new Payload(parts[2], parts[3],
ValueType.valueOf(parts[1].toUpperCase(Locale.ROOT)));
} | 3.68 |
hadoop_AbstractMultipartUploader_checkPath | /**
* Validate a path.
* @param path path to check.
*/
protected void checkPath(Path path) {
Objects.requireNonNull(path, "null path");
checkArgument(path.toString().startsWith(basePath.toString()),
"Path %s is not under %s", path, basePath);
} | 3.68 |
flink_StatsSummary_getMinimum | /**
* Returns the minimum seen value.
*
* @return The current minimum value.
*/
public long getMinimum() {
return min;
} | 3.68 |
flink_RemoteInputChannel_setup | /**
* Setup includes assigning exclusive buffers to this input channel, and this method should be
* called only once after this input channel is created.
*/
@Override
void setup() throws IOException {
checkState(
bufferManager.unsynchronizedGetAvailableExclusiveBuffers() == 0,
"Bug in input channel setup logic: exclusive buffers have already been set for this input channel.");
bufferManager.requestExclusiveBuffers(initialCredit);
} | 3.68 |
zxing_AlignmentPatternFinder_find | /**
* <p>This method attempts to find the bottom-right alignment pattern in the image. It is a bit messy since
* it's pretty performance-critical and so is written to be fast foremost.</p>
*
* @return {@link AlignmentPattern} if found
* @throws NotFoundException if not found
*/
AlignmentPattern find() throws NotFoundException {
int startX = this.startX;
int height = this.height;
int maxJ = startX + width;
int middleI = startY + (height / 2);
// We are looking for black/white/black modules in 1:1:1 ratio;
// this tracks the number of black/white/black modules seen so far
int[] stateCount = new int[3];
for (int iGen = 0; iGen < height; iGen++) {
// Search from middle outwards
int i = middleI + ((iGen & 0x01) == 0 ? (iGen + 1) / 2 : -((iGen + 1) / 2));
stateCount[0] = 0;
stateCount[1] = 0;
stateCount[2] = 0;
int j = startX;
// Burn off leading white pixels before anything else; if we start in the middle of
// a white run, it doesn't make sense to count its length, since we don't know if the
// white run continued to the left of the start point
while (j < maxJ && !image.get(j, i)) {
j++;
}
int currentState = 0;
while (j < maxJ) {
if (image.get(j, i)) {
// Black pixel
if (currentState == 1) { // Counting black pixels
stateCount[1]++;
} else { // Counting white pixels
if (currentState == 2) { // A winner?
if (foundPatternCross(stateCount)) { // Yes
AlignmentPattern confirmed = handlePossibleCenter(stateCount, i, j);
if (confirmed != null) {
return confirmed;
}
}
stateCount[0] = stateCount[2];
stateCount[1] = 1;
stateCount[2] = 0;
currentState = 1;
} else {
stateCount[++currentState]++;
}
}
} else { // White pixel
if (currentState == 1) { // Counting black pixels
currentState++;
}
stateCount[currentState]++;
}
j++;
}
if (foundPatternCross(stateCount)) {
AlignmentPattern confirmed = handlePossibleCenter(stateCount, i, maxJ);
if (confirmed != null) {
return confirmed;
}
}
}
// Hmm, nothing we saw was observed and confirmed twice. If we had
// any guess at all, return it.
if (!possibleCenters.isEmpty()) {
return possibleCenters.get(0);
}
throw NotFoundException.getNotFoundInstance();
} | 3.68 |
hbase_CommonFSUtils_setStoragePolicy | /**
* Sets storage policy for given path. If the passed path is a directory, we'll set the storage
* policy for all files created in the future in said directory. Note that this change in storage
* policy takes place at the FileSystem level; it will persist beyond this RS's lifecycle. If
* we're running on a version of FileSystem that doesn't support the given storage policy (or
* storage policies at all), then we'll issue a log message and continue. See
* http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
* @param fs We only do anything it implements a setStoragePolicy method
* @param path the Path whose storage policy is to be set
* @param storagePolicy Policy to set on <code>path</code>; see hadoop 2.6+
* org.apache.hadoop.hdfs.protocol.HdfsConstants for possible list e.g
* 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
*/
public static void setStoragePolicy(final FileSystem fs, final Path path,
final String storagePolicy) {
try {
setStoragePolicy(fs, path, storagePolicy, false);
} catch (IOException e) {
// should never arrive here
LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
}
} | 3.68 |
pulsar_TripleLongPriorityQueue_bytesCapacity | /**
* The amount of memory used to back the priority queue.
*/
public long bytesCapacity() {
return array.bytesCapacity();
} | 3.68 |
flink_BlobServer_createTemporaryFilename | /**
* Returns a temporary file inside the BLOB server's incoming directory.
*
* @return a temporary file inside the BLOB server's incoming directory
* @throws IOException if creating the directory fails
*/
File createTemporaryFilename() throws IOException {
return new File(
BlobUtils.getIncomingDirectory(storageDir.deref()),
String.format("temp-%08d", tempFileCounter.getAndIncrement()));
} | 3.68 |
framework_DefaultConnectionStateHandler_handleRecoverableError | /**
* Called whenever an error occurs in communication which should be handled
* by showing the reconnect dialog and retrying communication until
* successful again.
*
* @param type
* The type of failure detected
* @param payload
* The message which did not reach the server, or null if no
* message was involved (heartbeat or push connection failed)
*/
protected void handleRecoverableError(Type type, final JsonObject payload) {
debug("handleTemporaryError(" + type + ")");
if (!connection.isApplicationRunning()) {
return;
}
if (!isReconnecting()) {
// First problem encounter
reconnectionCause = type;
getLogger().warning("Reconnecting because of " + type + " failure");
// Precaution only as there should never be a dialog at this point
// and no timer running
stopDialogTimer();
if (isDialogVisible()) {
hideDialog();
}
// Show dialog after grace period, still continue to try to
// reconnect even before it is shown
dialogShowTimer.schedule(getConfiguration().dialogGracePeriod);
} else {
// We are currently trying to reconnect
// Priority is HEARTBEAT -> PUSH -> XHR
// If a higher priority issues is resolved, we can assume the lower
// one will be also
if (type.isHigherPriorityThan(reconnectionCause)) {
getLogger().warning(
"Now reconnecting because of " + type + " failure");
reconnectionCause = type;
}
}
if (reconnectionCause != type) {
return;
}
reconnectAttempt++;
getLogger()
.info("Reconnect attempt " + reconnectAttempt + " for " + type);
if (reconnectAttempt >= getConfiguration().reconnectAttempts) {
// Max attempts reached, stop trying
giveUp();
} else {
updateDialog();
scheduleReconnect(payload);
}
} | 3.68 |
hadoop_TaskInfo_getInputBytes | /**
* @return Raw bytes read from the FileSystem into the task. Note that this
* may not always match the input bytes to the task.
*/
public long getInputBytes() {
return bytesIn;
} | 3.68 |
flink_PojoSerializer_createRegisteredSubclassSerializers | /**
* Creates an array of serializers for provided list of registered subclasses. Order of returned
* serializers will correspond to order of provided subclasses.
*/
private static TypeSerializer<?>[] createRegisteredSubclassSerializers(
LinkedHashSet<Class<?>> registeredSubclasses, ExecutionConfig executionConfig) {
final TypeSerializer<?>[] subclassSerializers =
new TypeSerializer[registeredSubclasses.size()];
int i = 0;
for (Class<?> registeredClass : registeredSubclasses) {
subclassSerializers[i] =
TypeExtractor.createTypeInfo(registeredClass).createSerializer(executionConfig);
i++;
}
return subclassSerializers;
} | 3.68 |
framework_ListSorter_sort | /**
* Apply sorting to the current ListDataSource.
*
* @param order
* the sort order list provided by the grid sort event
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private void sort(final List<SortOrder> order) {
DataSource<T> ds = grid.getDataSource();
if (!(ds instanceof ListDataSource)) {
throw new IllegalStateException(
"Grid " + grid + " data source is not a ListDataSource!");
}
((ListDataSource<T>) ds).sort((a, b) -> {
for (SortOrder o : order) {
Grid.Column column = o.getColumn();
Comparator cmp = comparators.get(column);
int result = 0;
Object valueA = column.getValue(a);
Object valueB = column.getValue(b);
if (cmp != null) {
result = cmp.compare(valueA, valueB);
} else {
if (!(valueA instanceof Comparable)) {
throw new IllegalStateException("Column " + column
+ " has no assigned comparator and value "
+ valueA + " isn't naturally comparable");
}
result = ((Comparable) valueA).compareTo(valueB);
}
if (result != 0) {
return o.getDirection() == SortDirection.ASCENDING ? result
: -result;
}
}
if (!order.isEmpty()) {
return order.get(0).getDirection() == SortDirection.ASCENDING
? a.hashCode() - b.hashCode()
: b.hashCode() - a.hashCode();
}
return a.hashCode() - b.hashCode();
});
} | 3.68 |
hudi_AvroInternalSchemaConverter_isOptional | /** Check whether current avro schema is optional?. */
public static boolean isOptional(Schema schema) {
if (schema.getType() == UNION && schema.getTypes().size() == 2) {
return schema.getTypes().get(0).getType() == Schema.Type.NULL || schema.getTypes().get(1).getType() == Schema.Type.NULL;
}
return false;
} | 3.68 |
flink_DataSet_union | /**
* Creates a union of this DataSet with an other DataSet. The other DataSet must be of the same
* data type.
*
* @param other The other DataSet which is unioned with the current DataSet.
* @return The resulting DataSet.
*/
public UnionOperator<T> union(DataSet<T> other) {
return new UnionOperator<>(this, other, Utils.getCallLocationName());
} | 3.68 |
flink_RestartPipelinedRegionFailoverStrategy_getFailoverRegion | /**
* Returns the failover region that contains the given execution vertex.
*
* @return the failover region that contains the given execution vertex
*/
@VisibleForTesting
public SchedulingPipelinedRegion getFailoverRegion(ExecutionVertexID vertexID) {
return topology.getPipelinedRegionOfVertex(vertexID);
} | 3.68 |
hadoop_PerGpuTemperature_getMaxGpuTemp | /**
* Get max possible celsius GPU temperature
* @return temperature
*/
@XmlJavaTypeAdapter(PerGpuDeviceInformation.StrToFloatBeforeSpaceAdapter.class)
@XmlElement(name = "gpu_temp_max_threshold")
public Float getMaxGpuTemp() {
return maxGpuTemp;
} | 3.68 |
framework_AbstractStringToNumberConverter_convertToPresentation | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToPresentation(java.lang
* .Object, java.util.Locale)
*/
@Override
public String convertToPresentation(T value,
Class<? extends String> targetType, Locale locale)
throws ConversionException {
if (value == null) {
return null;
}
return getFormat(locale).format(value);
} | 3.68 |
pulsar_MultiTopicsConsumerImpl_messageReceived | // Must be called from the internalPinnedExecutor thread
private void messageReceived(ConsumerImpl<T> consumer, Message<T> message) {
checkArgument(message instanceof MessageImpl);
TopicMessageImpl<T> topicMessage = new TopicMessageImpl<>(consumer.getTopic(), message, consumer);
if (log.isDebugEnabled()) {
log.debug("[{}][{}] Received message from topics-consumer {}",
topic, subscription, message.getMessageId());
}
// if asyncReceive is waiting : return message to callback without adding to incomingMessages queue
CompletableFuture<Message<T>> receivedFuture = nextPendingReceive();
if (receivedFuture != null) {
unAckedMessageTracker.add(topicMessage.getMessageId(), topicMessage.getRedeliveryCount());
completePendingReceive(receivedFuture, topicMessage);
} else if (enqueueMessageAndCheckBatchReceive(topicMessage) && hasPendingBatchReceive()) {
notifyPendingBatchReceivedCallBack();
}
tryTriggerListener();
} | 3.68 |
hbase_QuotaCache_getQuotaState | /**
* Returns the QuotaState requested. If the quota info is not in cache an empty one will be
* returned and the quota request will be enqueued for the next cache refresh.
*/
private <K> QuotaState getQuotaState(final ConcurrentMap<K, QuotaState> quotasMap, final K key) {
return computeIfAbsent(quotasMap, key, QuotaState::new, this::triggerCacheRefresh);
} | 3.68 |
flink_TwoInputTransformation_getInput2 | /** Returns the second input {@code Transformation} of this {@code TwoInputTransformation}. */
public Transformation<IN2> getInput2() {
return input2;
} | 3.68 |
hbase_PageFilter_parseFrom | /**
* Parse a serialized representation of {@link PageFilter}
* @param pbBytes A pb serialized {@link PageFilter} instance
* @return An instance of {@link PageFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.PageFilter proto;
try {
proto = FilterProtos.PageFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new PageFilter(proto.getPageSize());
} | 3.68 |
hadoop_TopologyBuilder_process | /**
* Process a collection of JobConf {@link Properties}. We do not restrict it
* to be called once.
*
* @param conf
* The job conf properties to be added.
*/
public void process(Properties conf) {
// no code
} | 3.68 |
hadoop_BlockStorageMovementNeeded_get | /**
* Gets the satisfier files for which block storage movements check necessary
* and make the movement if required.
*
* @return satisfier files
*/
public synchronized ItemInfo get() {
return storageMovementNeeded.poll();
} | 3.68 |
rocketmq-connect_WorkerTask_recordMultiple | /**
* batch record
*
* @param size
*/
protected void recordMultiple(int size) {
taskMetricsGroup.recordMultiple(size);
} | 3.68 |
flink_HiveParserSemanticAnalyzer_getExprNodeDescCached | // Find ExprNodeDesc for the expression cached in the HiveParserRowResolver. Returns null if not
// exists.
private ExprNodeDesc getExprNodeDescCached(HiveParserASTNode expr, HiveParserRowResolver input)
throws SemanticException {
ColumnInfo colInfo = input.getExpression(expr);
if (colInfo != null) {
HiveParserASTNode source = input.getExpressionSource(expr);
if (source != null) {
unparseTranslator.addCopyTranslation(expr, source);
}
return new ExprNodeColumnDesc(
colInfo.getType(),
colInfo.getInternalName(),
colInfo.getTabAlias(),
colInfo.getIsVirtualCol(),
colInfo.isSkewedCol());
}
return null;
} | 3.68 |
hadoop_BlockBlobAppendStream_setMaxBlockSize | /**
* Set payload size of the stream.
* It is intended to be used for unit testing purposes only.
*/
@VisibleForTesting
synchronized void setMaxBlockSize(int size) {
maxBlockSize.set(size);
// it is for testing only so we can abandon the previously allocated
// payload
this.outBuffer = ByteBuffer.allocate(maxBlockSize.get());
} | 3.68 |
flink_EnvironmentSettings_inStreamingMode | /** Sets that the components should work in a streaming mode. Enabled by default. */
public Builder inStreamingMode() {
configuration.set(RUNTIME_MODE, STREAMING);
return this;
} | 3.68 |
hudi_HoodieBackedTableMetadata_lookupKeysFromFileSlice | /**
* Lookup list of keys from a single file slice.
*
* @param partitionName Name of the partition
* @param keys The list of keys to lookup
* @param fileSlice The file slice to read
* @return A {@code Map} of key name to {@code HoodieRecord} for the keys which were found in the file slice
*/
private Map<String, HoodieRecord<HoodieMetadataPayload>> lookupKeysFromFileSlice(String partitionName, List<String> keys, FileSlice fileSlice) {
Pair<HoodieSeekingFileReader<?>, HoodieMetadataLogRecordReader> readers = getOrCreateReaders(partitionName, fileSlice);
try {
List<Long> timings = new ArrayList<>(1);
HoodieSeekingFileReader<?> baseFileReader = readers.getKey();
HoodieMetadataLogRecordReader logRecordScanner = readers.getRight();
if (baseFileReader == null && logRecordScanner == null) {
return Collections.emptyMap();
}
// Sort it here once so that we don't need to sort individually for base file and for each individual log files.
List<String> sortedKeys = new ArrayList<>(keys);
Collections.sort(sortedKeys);
boolean fullKeys = true;
Map<String, HoodieRecord<HoodieMetadataPayload>> logRecords = readLogRecords(logRecordScanner, sortedKeys, fullKeys, timings);
return readFromBaseAndMergeWithLogRecords(baseFileReader, sortedKeys, fullKeys, logRecords, timings, partitionName);
} catch (IOException ioe) {
throw new HoodieIOException("Error merging records from metadata table for " + keys.size() + " key : ", ioe);
} finally {
if (!reuse) {
closeReader(readers);
}
}
} | 3.68 |
framework_Panel_setScrollTop | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Scrollable#setScrollTop(int)
*/
@Override
public void setScrollTop(int scrollTop) {
if (scrollTop < 0) {
throw new IllegalArgumentException(
"Scroll offset must be at least 0");
}
getState().scrollTop = scrollTop;
} | 3.68 |
hudi_HoodieInputFormatUtils_getTableMetaClientByPartitionPath | /**
* Extract HoodieTableMetaClient by partition path.
*
* @param conf The hadoop conf
* @param partitions The partitions
* @return partition path to table meta client mapping
*/
public static Map<Path, HoodieTableMetaClient> getTableMetaClientByPartitionPath(Configuration conf, Set<Path> partitions) {
Map<Path, HoodieTableMetaClient> metaClientMap = new HashMap<>();
return partitions.stream().collect(Collectors.toMap(Function.identity(), p -> {
try {
HoodieTableMetaClient metaClient = getTableMetaClientForBasePathUnchecked(conf, p);
metaClientMap.put(p, metaClient);
return metaClient;
} catch (IOException e) {
throw new HoodieIOException("Error creating hoodie meta client against : " + p, e);
}
}));
} | 3.68 |
dubbo_AbstractAnnotationBeanPostProcessor_getInjectedObject | /**
* Get injected-object from specified {@link AnnotationAttributes annotation attributes} and Bean Class
*
* @param attributes {@link AnnotationAttributes the annotation attributes}
* @param bean Current bean that will be injected
* @param beanName Current bean name that will be injected
* @param injectedType the type of injected-object
* @param injectedElement {@link AnnotatedInjectElement}
* @return An injected object
* @throws Exception If getting is failed
*/
protected Object getInjectedObject(
AnnotationAttributes attributes,
Object bean,
String beanName,
Class<?> injectedType,
AnnotatedInjectElement injectedElement)
throws Exception {
return doGetInjectedBean(attributes, bean, beanName, injectedType, injectedElement);
} | 3.68 |
hadoop_Platform_registerKey | /**
* associate a key class with its serializer and platform
*
* @param keyClassName map out key class name
* @param key key serializer class
*/
protected void registerKey(String keyClassName, Class<?> key) throws IOException {
serialization.register(keyClassName, key);
keyClassNames.add(keyClassName);
} | 3.68 |
morf_SchemaValidator_validateColumnNames | /**
* Validates a {@link Table} or {@link View}'s {@link Column}s meet the rules.
*
* @param tableOrViewName The Table or View on which to validate columns.
*/
private void validateColumnNames(Collection<String> columnNames, String tableOrViewName) {
for (String columnName : columnNames) {
if (!isEntityNameLengthValid(columnName)) {
validationFailures.add("Name of column [" + columnName + "] on [" + tableOrViewName + "] is not allowed - it is over " + MAX_LENGTH + " characters");
}
if (isSQLReservedWord(columnName)) {
validationFailures.add("Name of column [" + columnName + "] on [" + tableOrViewName + "] is not allowed - it is an SQL reserved word");
}
if (!isNameConventional(columnName)) {
validationFailures.add("Name of column [" + columnName + "] on [" + tableOrViewName + "] is not allowed - it must match " + validNamePattern.toString());
}
}
} | 3.68 |
hadoop_XMLParser_valuesFromXMLString | /**
* @param xml An XML string
* @param field The field whose value(s) should be extracted
* @return List of the field's values.
*/
private static List<String> valuesFromXMLString(String xml, String field) {
Matcher m = Pattern.compile("<" + field + ">(.+?)</" + field + ">")
.matcher(xml);
List<String> found = new ArrayList<>();
while (m.find()) {
found.add(m.group(1));
}
return found;
} | 3.68 |
framework_VisibilityChangeEvent_getType | /**
* Returns the {@link Type} used to register this event.
*
* @return the type
*/
public static Type<VisibilityChangeHandler> getType() {
if (type == null) {
type = new Type<>();
}
return type;
} | 3.68 |
hbase_ReportMakingVisitor_getReport | /**
* Do not call until after {@link #close()}. Will throw a {@link RuntimeException} if you do.
*/
CatalogJanitorReport getReport() {
if (!this.closed) {
throw new RuntimeException("Report not ready until after close()");
}
return this.report;
} | 3.68 |
dubbo_AccessLogData_getServiceName | /**
* Return gthe service of access log entry
*
* @return
*/
public String getServiceName() {
return get(SERVICE).toString();
} | 3.68 |
hadoop_STSClientFactory_createClientConnection | /**
* Create an STS Client instance.
* @param stsClient STS instance
* @param invoker invoker to use
* @return an STS client bonded to that interface.
*/
public static STSClient createClientConnection(
final StsClient stsClient,
final Invoker invoker) {
return new STSClient(stsClient, invoker);
} | 3.68 |
hadoop_RMNode_getAllocatedContainerResource | /**
* The total allocated resources to containers.
* This will include the sum of Guaranteed and Opportunistic
* containers queued + running + paused on the node.
* @return the total allocated resources, including all Guaranteed and
* Opportunistic containers in queued, running and paused states.
*/
default Resource getAllocatedContainerResource() {
return Resources.none();
} | 3.68 |
framework_AbstractJavaScriptComponent_addFunction | /**
* Register a {@link JavaScriptFunction} that can be called from the
* JavaScript using the provided name. A JavaScript function with the
* provided name will be added to the connector wrapper object (initially
* available as <code>this</code>). Calling that JavaScript function will
* cause the call method in the registered {@link JavaScriptFunction} to be
* invoked with the same arguments.
*
* @param functionName
* the name that should be used for client-side function
* @param function
* the {@link JavaScriptFunction} object that will be invoked
* when the JavaScript function is called
*/
protected void addFunction(String functionName,
JavaScriptFunction function) {
callbackHelper.registerCallback(functionName, function);
} | 3.68 |
morf_ChangePrimaryKeyColumns_assertExistingPrimaryKey | /**
* Verify that the "from" position actually matches the schema we have been given
* @param from The from position
* @param table The target table
*/
protected void assertExistingPrimaryKey(List<String> from, Table table) {
List<String> fromUpperCase = toUpperCase(from);
List<String> existingUpperCase = upperCaseNamesOfColumns(primaryKeysForTable(table));
if (!fromUpperCase.equals(existingUpperCase)) {
throw new RuntimeException(String.format("Expected existing primary key columns do not match schema. Expected: %s, schema: %S", fromUpperCase, existingUpperCase));
}
} | 3.68 |
morf_RecreateOracleSequences_getDescription | /**
* @see org.alfasoftware.morf.upgrade.UpgradeStep#getDescription()
*/
@Override
public String getDescription() {
return "Triggers recreation of Oracle table sequences.";
} | 3.68 |
hbase_Segment_tailSet | /**
* Returns a subset of the segment cell set, which starts with the given cell
* @param firstCell a cell in the segment
* @return a subset of the segment cell set, which starts with the given cell
*/
protected SortedSet<Cell> tailSet(Cell firstCell) {
return getCellSet().tailSet(firstCell);
} | 3.68 |
zxing_AddressBookResultHandler_getDisplayContents | // Overriden so we can hyphenate phone numbers, format birthdays, and bold the name.
@Override
public CharSequence getDisplayContents() {
AddressBookParsedResult result = (AddressBookParsedResult) getResult();
StringBuilder contents = new StringBuilder(100);
ParsedResult.maybeAppend(result.getNames(), contents);
int namesLength = contents.length();
String pronunciation = result.getPronunciation();
if (pronunciation != null && !pronunciation.isEmpty()) {
contents.append("\n(");
contents.append(pronunciation);
contents.append(')');
}
ParsedResult.maybeAppend(result.getTitle(), contents);
ParsedResult.maybeAppend(result.getOrg(), contents);
ParsedResult.maybeAppend(result.getAddresses(), contents);
String[] numbers = result.getPhoneNumbers();
if (numbers != null) {
for (String number : numbers) {
if (number != null) {
ParsedResult.maybeAppend(formatPhone(number), contents);
}
}
}
ParsedResult.maybeAppend(result.getEmails(), contents);
ParsedResult.maybeAppend(result.getURLs(), contents);
String birthday = result.getBirthday();
if (birthday != null && !birthday.isEmpty()) {
long date = parseDate(birthday);
if (date >= 0L) {
ParsedResult.maybeAppend(DateFormat.getDateInstance(DateFormat.MEDIUM).format(date), contents);
}
}
ParsedResult.maybeAppend(result.getNote(), contents);
if (namesLength > 0) {
// Bold the full name to make it stand out a bit.
Spannable styled = new SpannableString(contents.toString());
styled.setSpan(new StyleSpan(Typeface.BOLD), 0, namesLength, 0);
return styled;
} else {
return contents.toString();
}
} | 3.68 |
flink_AbstractMapTypeInfo_getKeyTypeInfo | /**
* Returns the type information for the keys in the map.
*
* @return The type information for the keys in the map.
*/
public TypeInformation<K> getKeyTypeInfo() {
return keyTypeInfo;
} | 3.68 |
querydsl_AbstractSQLQuery_setStatementOptions | /**
* Set the options to be applied to the JDBC statements of this query
*
* @param statementOptions options to be applied to statements
*/
public void setStatementOptions(StatementOptions statementOptions) {
this.statementOptions = statementOptions;
} | 3.68 |
flink_Tuple3_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2), where the individual
* fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ")";
} | 3.68 |
framework_ApplicationConnection_getConnectionStateHandler | /**
* Gets the communication error handler for this application.
*
* @since 7.6
* @return the server RPC queue
*/
public ConnectionStateHandler getConnectionStateHandler() {
return connectionStateHandler;
} | 3.68 |
flink_FileMergingSnapshotManagerBase_generatePhysicalFilePath | /**
* Generate a file path for a physical file.
*
* @param dirPath the parent directory path for the physical file.
* @return the generated file path for a physical file.
*/
protected Path generatePhysicalFilePath(Path dirPath) {
// this must be called after initFileSystem() is called
// so the checkpoint directories must be not null if we reach here
final String fileName = UUID.randomUUID().toString();
return new Path(dirPath, fileName);
} | 3.68 |
flink_DefaultExecutionGraph_attachJobVertices | /** Attach job vertices without initializing them. */
private void attachJobVertices(List<JobVertex> topologicallySorted) throws JobException {
for (JobVertex jobVertex : topologicallySorted) {
if (jobVertex.isInputVertex() && !jobVertex.isStoppable()) {
this.isStoppable = false;
}
VertexParallelismInformation parallelismInfo =
parallelismStore.getParallelismInfo(jobVertex.getID());
// create the execution job vertex and attach it to the graph
ExecutionJobVertex ejv =
executionJobVertexFactory.createExecutionJobVertex(
this, jobVertex, parallelismInfo);
ExecutionJobVertex previousTask = this.tasks.putIfAbsent(jobVertex.getID(), ejv);
if (previousTask != null) {
throw new JobException(
String.format(
"Encountered two job vertices with ID %s : previous=[%s] / new=[%s]",
jobVertex.getID(), ejv, previousTask));
}
this.verticesInCreationOrder.add(ejv);
this.numJobVerticesTotal++;
}
} | 3.68 |
hbase_HBaseTestingUtility_shutdownMiniCluster | /**
* Stops mini hbase, zk, and hdfs clusters.
* @see #startMiniCluster(int)
*/
public void shutdownMiniCluster() throws IOException {
LOG.info("Shutting down minicluster");
shutdownMiniHBaseCluster();
shutdownMiniDFSCluster();
shutdownMiniZKCluster();
cleanupTestDir();
miniClusterRunning = false;
LOG.info("Minicluster is down");
} | 3.68 |
hudi_HoodieHiveUtils_getDateWriteable | /**
* Get date writeable object from int value.
* Hive3 use DateWritableV2 to build date objects and Hive2 use DateWritable.
* So that we need to initialize date according to the version of Hive.
*/
public static Writable getDateWriteable(int value) {
return HIVE_SHIM.getDateWriteable(value);
} | 3.68 |
hbase_MasterFeature_bindFactory | /**
* Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal
* implementation details of jersey itself.
*/
private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) {
return bindFactory(new SupplierFactoryAdapter<>(supplier));
} | 3.68 |
framework_InfoSection_getContent | /*
* (non-Javadoc)
*
* @see com.vaadin.client.debug.internal.Section#getContent()
*/
@Override
public Widget getContent() {
return content;
} | 3.68 |
hadoop_SubApplicationRowKey_decode | /*
* (non-Javadoc)
*
* Decodes a sub application row key of the form
* subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
*
* subAppUserId is usually the doAsUser.
* userId is the yarn user that the AM runs as.
*
* represented in byte format
* and converts it into an SubApplicationRowKey object.
*
* @see org.apache.hadoop.yarn.server.timelineservice.storage.common
* .KeyConverter#decode(byte[])
*/
@Override
public SubApplicationRowKey decode(byte[] rowKey) {
byte[][] rowKeyComponents =
Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
if (rowKeyComponents.length != 6) {
throw new IllegalArgumentException(
"the row key is not valid for " + "a sub app");
}
String subAppUserId =
Separator.decode(Bytes.toString(rowKeyComponents[0]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String entityType = Separator.decode(Bytes.toString(rowKeyComponents[2]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
Long entityPrefixId = Bytes.toLong(rowKeyComponents[3]);
String entityId = Separator.decode(Bytes.toString(rowKeyComponents[4]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
String userId =
Separator.decode(Bytes.toString(rowKeyComponents[5]),
Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
return new SubApplicationRowKey(subAppUserId, clusterId, entityType,
entityPrefixId, entityId, userId);
} | 3.68 |
querydsl_GroupBy_sortedMap | /**
* Create a new aggregating map expression using a backing TreeMap using the given comparator
*
* @param key key for the map entries
* @param value value for the map entries
* @param comparator comparator for the created TreeMap instances
* @return wrapper expression
*/
public static <K, V, T, U> AbstractGroupExpression<Pair<K, V>, SortedMap<T, U>> sortedMap(GroupExpression<K, T> key,
GroupExpression<V, U> value,
Comparator<? super T> comparator) {
return new GMap.Mixin<K, V, T, U, SortedMap<T, U>>(key, value, GMap.createSorted(QPair.create(key, value), comparator));
} | 3.68 |
flink_SplittableIterator_getSplit | /**
* Splits this iterator into <i>n</i> partitions and returns the <i>i-th</i> partition out of
* those.
*
* @param num The partition to return (<i>i</i>).
* @param numPartitions The number of partitions to split into (<i>n</i>).
* @return The iterator for the partition.
*/
public Iterator<T> getSplit(int num, int numPartitions) {
if (numPartitions < 1 || num < 0 || num >= numPartitions) {
throw new IllegalArgumentException();
}
return split(numPartitions)[num];
} | 3.68 |
morf_AbstractSqlDialectTest_verifyBlobColumnCallPrepareStatementParameter | /**
* Expected outcome for calling {@link #callPrepareStatementParameter} with a blob data type in {@link #testPrepareStatementParameter()}
* @param blobColumn the parameter to verify
* @throws SQLException exception
*/
protected void verifyBlobColumnCallPrepareStatementParameter(SqlParameter blobColumn) throws SQLException {
verify(callPrepareStatementParameter(blobColumn, null)).setBlob(Mockito.eq(blobColumn), Mockito.argThat(new ByteArrayMatcher(new byte[] {})));
verify(callPrepareStatementParameter(blobColumn, "QUJD")).setBlob(Mockito.eq(blobColumn), Mockito.argThat(new ByteArrayMatcher(new byte[] {65 , 66 , 67})));
} | 3.68 |
AreaShop_FriendsFeature_clearFriends | /**
* Remove all friends that are added to this region.
*/
public void clearFriends() {
getRegion().setSetting("general.friends", null);
} | 3.68 |
hadoop_IOStatisticsSnapshot_readObject | /**
* Deserialize by loading each TreeMap, and building concurrent
* hash maps from them.
*
* @param s ObjectInputStream.
* @throws IOException raised on errors performing I/O.
* @throws ClassNotFoundException class not found exception
*/
private void readObject(final ObjectInputStream s)
throws IOException, ClassNotFoundException {
// read in core
s.defaultReadObject();
// and rebuild a concurrent hashmap from every serialized tree map
// read back from the stream.
counters = new ConcurrentHashMap<>(
(TreeMap<String, Long>) s.readObject());
gauges = new ConcurrentHashMap<>(
(TreeMap<String, Long>) s.readObject());
minimums = new ConcurrentHashMap<>(
(TreeMap<String, Long>) s.readObject());
maximums = new ConcurrentHashMap<>(
(TreeMap<String, Long>) s.readObject());
meanStatistics = new ConcurrentHashMap<>(
(TreeMap<String, MeanStatistic>) s.readObject());
} | 3.68 |
hadoop_FlowRunRowKey_toString | /**
* returns the Flow Key as a verbose String output.
* @return String
*/
@Override
public String toString() {
StringBuilder flowKeyStr = new StringBuilder();
flowKeyStr.append("{clusterId=" + clusterId)
.append(" userId=" + userId)
.append(" flowName=" + flowName)
.append(" flowRunId=")
.append(flowRunId)
.append("}");
return flowKeyStr.toString();
} | 3.68 |
hbase_SimpleRpcScheduler_onConfigurationChange | /**
* Resize call queues;
* @param conf new configuration
*/
@Override
public void onConfigurationChange(Configuration conf) {
callExecutor.resizeQueues(conf);
if (priorityExecutor != null) {
priorityExecutor.resizeQueues(conf);
}
if (replicationExecutor != null) {
replicationExecutor.resizeQueues(conf);
}
if (metaTransitionExecutor != null) {
metaTransitionExecutor.resizeQueues(conf);
}
if (bulkloadExecutor != null) {
bulkloadExecutor.resizeQueues(conf);
}
String callQueueType =
conf.get(RpcExecutor.CALL_QUEUE_TYPE_CONF_KEY, RpcExecutor.CALL_QUEUE_TYPE_CONF_DEFAULT);
if (
RpcExecutor.isCodelQueueType(callQueueType) || RpcExecutor.isPluggableQueueType(callQueueType)
) {
callExecutor.onConfigurationChange(conf);
}
} | 3.68 |
framework_VerticalLayoutConnector_getWidget | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.orderedlayout.AbstractOrderedLayoutConnector#
* getWidget ()
*/
@Override
public VVerticalLayout getWidget() {
return (VVerticalLayout) super.getWidget();
} | 3.68 |
framework_VCalendar_getDropHandler | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.dd.VHasDropHandler#getDropHandler()
*/
@Override
public CalendarDropHandler getDropHandler() {
return dropHandler;
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_updateJobNode | /**
* Update job node.
*
* @param node node
* @param value data of job node
*/
public void updateJobNode(final String node, final Object value) {
regCenter.update(jobNodePath.getFullPath(node), value.toString());
} | 3.68 |
flink_ConnectedStreams_process | /**
* Applies the given {@link KeyedCoProcessFunction} on the connected input streams, thereby
* creating a transformed output stream.
*
* <p>The function will be called for every element in the input streams and can produce zero or
* more output elements. Contrary to the {@link #flatMap(CoFlatMapFunction)} function, this
* function can also query the time and set timers. When reacting to the firing of set timers
* the function can directly emit elements and/or register yet more timers.
*
* @param keyedCoProcessFunction The {@link KeyedCoProcessFunction} that is called for each
* element in the stream.
* @param <R> The type of elements emitted by the {@code CoProcessFunction}.
* @return The transformed {@link DataStream}.
*/
@Internal
public <K, R> SingleOutputStreamOperator<R> process(
KeyedCoProcessFunction<K, IN1, IN2, R> keyedCoProcessFunction,
TypeInformation<R> outputType) {
TwoInputStreamOperator<IN1, IN2, R> operator;
if ((inputStream1 instanceof KeyedStream) && (inputStream2 instanceof KeyedStream)) {
operator = new KeyedCoProcessOperator<>(inputStream1.clean(keyedCoProcessFunction));
} else {
throw new UnsupportedOperationException(
"KeyedCoProcessFunction can only be used "
+ "when both input streams are of type KeyedStream.");
}
return transform("Co-Keyed-Process", outputType, operator);
} | 3.68 |
zxing_BitArray_setRange | /**
* Sets a range of bits.
*
* @param start start of range, inclusive.
* @param end end of range, exclusive
*/
public void setRange(int start, int end) {
if (end < start || start < 0 || end > size) {
throw new IllegalArgumentException();
}
if (end == start) {
return;
}
end--; // will be easier to treat this as the last actually set bit -- inclusive
int firstInt = start / 32;
int lastInt = end / 32;
for (int i = firstInt; i <= lastInt; i++) {
int firstBit = i > firstInt ? 0 : start & 0x1F;
int lastBit = i < lastInt ? 31 : end & 0x1F;
// Ones from firstBit to lastBit, inclusive
int mask = (2 << lastBit) - (1 << firstBit);
bits[i] |= mask;
}
} | 3.68 |
flink_CliFrontend_savepoint | /**
* Executes the SAVEPOINT action.
*
* @param args Command line arguments for the savepoint action.
*/
protected void savepoint(String[] args) throws Exception {
LOG.info("Running 'savepoint' command.");
final Options commandOptions = CliFrontendParser.getSavepointCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final SavepointOptions savepointOptions = new SavepointOptions(commandLine);
// evaluate help flag
if (savepointOptions.isPrintHelp()) {
CliFrontendParser.printHelpForSavepoint(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine);
if (savepointOptions.isDispose()) {
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) ->
disposeSavepoint(
clusterClient,
savepointOptions.getSavepointPath(),
getClientTimeout(effectiveConfiguration)));
} else {
String[] cleanedArgs = savepointOptions.getArgs();
final JobID jobId;
if (cleanedArgs.length >= 1) {
String jobIdString = cleanedArgs[0];
jobId = parseJobId(jobIdString);
} else {
throw new CliArgsException(
"Missing JobID. " + "Specify a Job ID to trigger a savepoint.");
}
final String savepointDirectory;
if (cleanedArgs.length >= 2) {
savepointDirectory = cleanedArgs[1];
} else {
savepointDirectory = null;
}
// Print superfluous arguments
if (cleanedArgs.length >= 3) {
logAndSysout(
"Provided more arguments than required. Ignoring not needed arguments.");
}
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) ->
triggerSavepoint(
clusterClient,
jobId,
savepointDirectory,
savepointOptions.getFormatType(),
getClientTimeout(effectiveConfiguration)));
}
} | 3.68 |
hudi_HoodieIngestionService_startService | /**
* The main loop for running ingestion in continuous mode.
*/
@Override
protected Pair<CompletableFuture, ExecutorService> startService() {
ExecutorService executor = Executors.newFixedThreadPool(1);
return Pair.of(CompletableFuture.supplyAsync(() -> {
try {
while (!isShutdownRequested()) {
long ingestionStartEpochMillis = System.currentTimeMillis();
ingestOnce();
boolean requested = requestShutdownIfNeeded(Option.empty());
if (!requested) {
sleepBeforeNextIngestion(ingestionStartEpochMillis);
}
}
} finally {
executor.shutdownNow();
}
return true;
}, executor), executor);
} | 3.68 |
pulsar_PositionAckSetUtil_andAckSet | //This method is do `and` operation for ack set
public static long[] andAckSet(long[] firstAckSet, long[] secondAckSet) {
BitSetRecyclable thisAckSet = BitSetRecyclable.valueOf(firstAckSet);
BitSetRecyclable otherAckSet = BitSetRecyclable.valueOf(secondAckSet);
thisAckSet.and(otherAckSet);
long[] ackSet = thisAckSet.toLongArray();
thisAckSet.recycle();
otherAckSet.recycle();
return ackSet;
} | 3.68 |
rocketmq-connect_WorkerSinkTask_initializeAndStart | /**
* initinalize and start
*/
@Override
protected void initializeAndStart() {
Set<String> topics = new SinkConnectorConfig(taskConfig).parseTopicList();
if (org.apache.commons.collections4.CollectionUtils.isEmpty(topics)) {
throw new ConnectException("Sink connector topics config can be null, please check sink connector config info");
}
// sub topics
try {
for (String topic : topics) {
consumer.setPullBatchSize(MAX_MESSAGE_NUM);
consumer.subscribe(topic, "*");
}
if (messageQueueListener == null) {
messageQueueListener = consumer.getMessageQueueListener();
}
consumer.setMessageQueueListener(new MessageQueueListener() {
@Override
public void messageQueueChanged(String subTopic, Set<MessageQueue> mqAll, Set<MessageQueue> mqDivided) {
// update assign message queue
messageQueueListener.messageQueueChanged(subTopic, mqAll, mqDivided);
// listener message queue changed
log.info("Message queue changed start, old message queues offset {}", JSON.toJSONString(messageQueues));
if (isStopping()) {
log.trace("Skipping partition revocation callback as task has already been stopped");
return;
}
// remove and close message queue
log.info("Task {},MessageQueueChanged, old messageQueuesOffsetMap {}", id.toString(), JSON.toJSONString(messageQueues));
removeAndCloseMessageQueue(subTopic, mqDivided);
// add new message queue
assignMessageQueue(mqDivided);
log.info("Task {}, Message queue changed end, new message queues offset {}", id, JSON.toJSONString(messageQueues));
preCommit();
log.info("Message queue changed start, new message queues offset {}", JSON.toJSONString(messageQueues));
}
});
consumer.start();
} catch (MQClientException e) {
log.error("Task {},InitializeAndStart MQClientException", id.toString(), e);
throw new ConnectException(e);
}
log.info("Sink task consumer start. taskConfig {}", JSON.toJSONString(taskConfig));
sinkTask.init(sinkTaskContext);
sinkTask.start(taskConfig);
log.info("{} Sink task finished initialization and start", this);
} | 3.68 |
flink_TypeInference_accumulatorTypeStrategy | /**
* Sets the strategy for inferring the intermediate accumulator data type of a function
* call.
*/
public Builder accumulatorTypeStrategy(TypeStrategy accumulatorTypeStrategy) {
this.accumulatorTypeStrategy =
Preconditions.checkNotNull(
accumulatorTypeStrategy, "Accumulator type strategy must not be null.");
return this;
} | 3.68 |
flink_ClusterEntrypointUtils_createTaskManagerWorkingDirectory | /**
* Creates the working directory for the TaskManager process. This method ensures that the
* working directory exists.
*
* @param configuration to extract the required settings from
* @param envelopedResourceId identifying the TaskManager process
* @return working directory
* @throws IOException if the working directory could not be created
*/
public static DeterminismEnvelope<WorkingDirectory> createTaskManagerWorkingDirectory(
Configuration configuration, DeterminismEnvelope<ResourceID> envelopedResourceId)
throws IOException {
return envelopedResourceId.map(
resourceId ->
WorkingDirectory.create(
generateTaskManagerWorkingDirectoryFile(
configuration, resourceId)));
} | 3.68 |
hadoop_DatanodeAdminProperties_getUpgradeDomain | /**
* Get the upgrade domain of the datanode.
* @return the upgrade domain of the datanode.
*/
public String getUpgradeDomain() {
return upgradeDomain;
} | 3.68 |
hadoop_BlockBlobInputStream_available | /**
* Gets the number of bytes that can be read (or skipped over) without
* performing a network operation.
* @throws IOException IO failure
*/
@Override
public synchronized int available() throws IOException {
checkState();
if (blobInputStream != null) {
return blobInputStream.available();
} else {
return (streamBuffer == null)
? 0
: streamBufferLength - streamBufferPosition;
}
} | 3.68 |
hbase_Pair_newPair | /**
* Constructs a new pair, inferring the type via the passed arguments
* @param <T1> type for first
* @param <T2> type for second
* @param a first element
* @param b second element
* @return a new pair containing the passed arguments
*/
public static <T1, T2> Pair<T1, T2> newPair(T1 a, T2 b) {
return new Pair<>(a, b);
} | 3.68 |
hadoop_StoreContext_makeQualified | /**
* Qualify a path.
*
* @param path path to qualify/normalize
* @return possibly new path.
*/
public Path makeQualified(Path path) {
return contextAccessors.makeQualified(path);
} | 3.68 |
hudi_SanitizationUtils_sanitizeStructTypeForAvro | // TODO(HUDI-5256): Refactor this to use InternalSchema when it is ready.
private static StructType sanitizeStructTypeForAvro(StructType structType, String invalidCharMask) {
StructType sanitizedStructType = new StructType();
StructField[] structFields = structType.fields();
for (StructField s : structFields) {
DataType currFieldDataTypeSanitized = sanitizeDataTypeForAvro(s.dataType(), invalidCharMask);
StructField structFieldCopy = new StructField(HoodieAvroUtils.sanitizeName(s.name(), invalidCharMask),
currFieldDataTypeSanitized, s.nullable(), s.metadata());
sanitizedStructType = sanitizedStructType.add(structFieldCopy);
}
return sanitizedStructType;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_safeDelete | /**
* Deletes the given blob, taking special care that if we get a
* blob-not-found exception upon retrying the operation, we just
* swallow the error since what most probably happened is that
* the first operation succeeded on the server.
* @param blob The blob to delete.
* @param lease Azure blob lease, or null if no lease is to be used.
* @throws StorageException
*/
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
OperationContext operationContext = getInstrumentedContext();
try {
blob.delete(operationContext, lease);
} catch (StorageException e) {
if (!NativeAzureFileSystemHelper.isFileNotFoundException(e)) {
LOG.error("Encountered Storage Exception for delete on Blob: {}"
+ ", Exception Details: {} Error Code: {}",
blob.getUri(), e.getMessage(), e.getErrorCode());
}
// On exception, check that if:
// 1. It's a BlobNotFound exception AND
// 2. It got there after one-or-more retries THEN
// we swallow the exception.
if (e.getErrorCode() != null
&& "BlobNotFound".equals(e.getErrorCode())
&& operationContext.getRequestResults().size() > 1
&& operationContext.getRequestResults().get(0).getException() != null) {
LOG.debug("Swallowing delete exception on retry: {}", e.getMessage());
return;
} else {
throw e;
}
} finally {
if (lease != null) {
lease.free();
}
}
} | 3.68 |
hudi_BaseHoodieWriteClient_startCommit | /**
* Provides a new commit time for a write operation (insert/update/delete/insert_overwrite/insert_overwrite_table) with specified action.
*/
public String startCommit(String actionType, HoodieTableMetaClient metaClient) {
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(),
HoodieTimeline.COMMIT_ACTION, () -> tableServiceClient.rollbackFailedWrites());
String instantTime = createNewInstantTime();
startCommit(instantTime, actionType, metaClient);
return instantTime;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.