name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_RocksDBDAO_prefixSearch | /**
* Perform a prefix search and return stream of key-value pairs retrieved.
*
* @param columnFamilyName Column Family Name
* @param prefix Prefix Key
* @param <T> Type of value stored
*/
public <T extends Serializable> Stream<Pair<String, T>> prefixSearch(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
final HoodieTimer timer = HoodieTimer.start();
long timeTakenMicro = 0;
List<Pair<String, T>> results = new LinkedList<>();
try (final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName))) {
it.seek(getUTF8Bytes(prefix));
while (it.isValid() && new String(it.key()).startsWith(prefix)) {
long beginTs = System.nanoTime();
T val = SerializationUtils.deserialize(it.value());
timeTakenMicro += ((System.nanoTime() - beginTs) / 1000);
results.add(Pair.of(new String(it.key()), val));
it.next();
}
}
LOG.info("Prefix Search for (query=" + prefix + ") on " + columnFamilyName + ". Total Time Taken (msec)="
+ timer.endTimer() + ". Serialization Time taken(micro)=" + timeTakenMicro + ", num entries=" + results.size());
return results.stream();
} | 3.68 |
hudi_RocksDBDAO_prefixDelete | /**
* Perform a prefix delete and return stream of key-value pairs retrieved.
*
* @param columnFamilyName Column Family Name
* @param prefix Prefix Key
* @param <T> Type of value stored
*/
public <T extends Serializable> void prefixDelete(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
LOG.info("Prefix DELETE (query=" + prefix + ") on " + columnFamilyName);
final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName));
it.seek(getUTF8Bytes(prefix));
// Find first and last keys to be deleted
String firstEntry = null;
String lastEntry = null;
while (it.isValid() && new String(it.key()).startsWith(prefix)) {
String result = new String(it.key());
it.next();
if (firstEntry == null) {
firstEntry = result;
}
lastEntry = result;
}
it.close();
if (null != firstEntry) {
try {
// This will not delete the last entry
getRocksDB().deleteRange(managedHandlesMap.get(columnFamilyName), getUTF8Bytes(firstEntry), getUTF8Bytes(lastEntry));
// Delete the last entry
getRocksDB().delete(getUTF8Bytes(lastEntry));
} catch (RocksDBException e) {
LOG.error("Got exception performing range delete");
throw new HoodieException(e);
}
}
} | 3.68 |
framework_VUpload_setImmediateMode | /**
* Sets the upload in immediate mode.
*
* @param immediateMode
* {@code true} for immediate mode, {@code false} for
* non-immediate mode
*/
public void setImmediateMode(boolean immediateMode) {
if (this.immediateMode != immediateMode) {
this.immediateMode = immediateMode;
if (immediateMode) {
fu.sinkEvents(Event.ONCHANGE);
fu.sinkEvents(Event.ONFOCUS);
} else {
fu.unsinkEvents(Event.ONCHANGE);
fu.unsinkEvents(Event.ONFOCUS);
}
updateEnabledForSubmitButton();
}
setStyleName(getElement(), CLASSNAME + "-immediate", immediateMode);
} | 3.68 |
dubbo_AbstractAnnotationBeanPostProcessor_needsRefresh | // @Override // since Spring 5.2.4
protected boolean needsRefresh(Class<?> clazz) {
if (this.targetClass == clazz) {
return false;
}
// IGNORE Spring CGLIB enhanced class
if (targetClass.isAssignableFrom(clazz) && clazz.getName().contains("$$EnhancerBySpringCGLIB$$")) {
return false;
}
return true;
} | 3.68 |
hbase_RingBufferTruck_unloadSync | /**
* Unload the truck of its {@link SyncFuture} payload. The internal reference is released.
*/
SyncFuture unloadSync() {
SyncFuture sync = this.sync;
this.sync = null;
this.type = Type.EMPTY;
return sync;
} | 3.68 |
flink_ConsumedPartitionGroup_getIntermediateDataSetID | /** Get the ID of IntermediateDataSet this ConsumedPartitionGroup belongs to. */
public IntermediateDataSetID getIntermediateDataSetID() {
return intermediateDataSetID;
} | 3.68 |
shardingsphere-elasticjob_ElasticJobRegistryCenterConfiguration_zookeeperRegistryCenter | /**
* Create a zookeeper registry center bean via factory.
*
* @param zookeeperProperties factory
* @return zookeeper registry center
*/
@Bean(initMethod = "init")
public ZookeeperRegistryCenter zookeeperRegistryCenter(final ZookeeperProperties zookeeperProperties) {
return new ZookeeperRegistryCenter(zookeeperProperties.toZookeeperConfiguration());
} | 3.68 |
hbase_ByteBufferInputStream_read | /**
* Reads up to next <code>len</code> bytes of data from buffer into passed array(starting from
* given offset).
* @param b the array into which the data is read.
* @param off the start offset in the destination array <code>b</code>
* @param len the maximum number of bytes to read.
* @return the total number of bytes actually read into the buffer, or <code>-1</code> if not even
* 1 byte can be read because the end of the stream has been reached.
*/
@Override
public int read(byte[] b, int off, int len) {
int avail = available();
if (avail <= 0) {
return -1;
}
if (len > avail) {
len = avail;
}
if (len <= 0) {
return 0;
}
ByteBufferUtils.copyFromBufferToArray(b, this.buf, this.buf.position(), off, len);
this.buf.position(this.buf.position() + len); // we should advance the buffer position
return len;
} | 3.68 |
AreaShop_GeneralRegion_getFileManager | /**
* Get the FileManager from the plugin.
* @return The FileManager (responsible for saving/loading regions and getting them)
*/
public FileManager getFileManager() {
return plugin.getFileManager();
} | 3.68 |
morf_TableHelper_buildColumnNameList | /**
* Builds a list of column names from the column definitions and optionally
* includes the ID and VERSION fields at the beginning.
*
* @param table the table to get the list of column names for
* @return a list of column names for the table
*/
public static List<String> buildColumnNameList(Table table) {
List<String> result = new ArrayList<String>();
for (Column currentColumn : table.columns()) {
result.add(currentColumn.getName());
}
return result;
} | 3.68 |
morf_ChangeColumn_verifyWidthAndScaleChanges | /**
* Verify that any width and scale changes
*/
private void verifyWidthAndScaleChanges() {
// Reductions in width of Strings are permitted, although they will only work if the data fits in the narrower column
if (Objects.equals(toColumn.getType(), fromColumn.getType()) && DataType.STRING.equals(fromColumn.getType())) {
// don't do a check
return;
}
boolean scaleCheck = toColumn.getType().hasScale() && fromColumn.getType().hasScale();
boolean widthCheck = toColumn.getType().hasWidth() && fromColumn.getType().hasWidth();
// Oracle does not permit reductions to precision or scale on numeric columns:
// ORA-01440: column to be modified must be empty to decrease precision or scale
if (widthCheck && toColumn.getWidth() < fromColumn.getWidth()) {
throw new IllegalArgumentException(String.format(
"Attempting to change the width of [%s] from %d to %d. Reductions in width/precision are not permitted.",
fromColumn.getName(),
fromColumn.getWidth(),
toColumn.getWidth())
);
}
if (scaleCheck && toColumn.getScale() < fromColumn.getScale()) {
throw new IllegalArgumentException(String.format(
"Attempting to change the scale of [%s] from %d to %d. Reductions in scale are not permitted.",
fromColumn.getName(),
fromColumn.getScale(),
toColumn.getScale())
);
}
int fromInts = fromColumn.getWidth() - fromColumn.getScale();
int toInts = toColumn.getWidth() - toColumn.getScale();
if (scaleCheck && toInts < fromInts) {
throw new IllegalArgumentException(String.format(
"Attempting to change precision-width of [%s] from %d to %d. Reductions of this are not permitted.",
fromColumn.getName(),
fromInts,
toInts)
);
}
} | 3.68 |
hbase_HBaseServerException_isServerOverloaded | /** Returns True if server was considered overloaded when exception was thrown */
public boolean isServerOverloaded() {
return serverOverloaded;
} | 3.68 |
morf_RecordHelper_javaTypeToRecordValue | /**
* Take a java value (int, long, boolean, {@link String}, {@link LocalDate}) and convert it into a string format
* suitable for inclusion in a {@link Record}.
*
* @param value The java value.
* @return The {@link Record} value string
*/
public static String javaTypeToRecordValue(Object value) {
if (value == null) {
return null;
} else if (BigDecimal.class.isInstance(value)) {
return BigDecimal.class.cast(value).toPlainString();
} else {
return value.toString();
}
} | 3.68 |
querydsl_JPAExpressions_selectFrom | /**
* Create a new detached JPQLQuery instance with the given projection
*
* @param expr projection and source
* @param <T>
* @return select(expr).from(expr)
*/
public static <T> JPQLQuery<T> selectFrom(EntityPath<T> expr) {
return select(expr).from(expr);
} | 3.68 |
hbase_Get_setMaxResultsPerColumnFamily | /**
* Set the maximum number of values to return per row per Column Family
* @param limit the maximum number of values returned / row / CF
* @return this for invocation chaining
*/
public Get setMaxResultsPerColumnFamily(int limit) {
this.storeLimit = limit;
return this;
} | 3.68 |
pulsar_NoSplitter_split | /*
* (non-Javadoc)
*
* @see com.beust.jcommander.converters.IParameterSplitter#split(java.lang.String)
*/
@Override
public List<String> split(final String value) {
final List<String> result = new LinkedList<>();
result.add(value);
return result;
} | 3.68 |
flink_BytesMap_growAndRehash | /** @throws EOFException if the map can't allocate much more memory. */
protected void growAndRehash() throws EOFException {
// allocate the new data structures
int required = 2 * bucketSegments.size();
if (required * (long) numBucketsPerSegment > Integer.MAX_VALUE) {
LOG.warn(
"We can't handle more than Integer.MAX_VALUE buckets (eg. because hash functions return int)");
throw new EOFException();
}
int numAllocatedSegments = required - memoryPool.freePages();
if (numAllocatedSegments > 0) {
LOG.warn(
"BytesHashMap can't allocate {} pages, and now used {} pages",
required,
reservedNumBuffers);
throw new EOFException();
}
List<MemorySegment> newBucketSegments = memoryPool.allocateSegments(required);
setBucketVariables(newBucketSegments);
long reHashStartTime = System.currentTimeMillis();
resetBucketSegments(newBucketSegments);
// Re-mask (we don't recompute the hashcode because we stored all 32 bits of it)
for (MemorySegment memorySegment : bucketSegments) {
for (int j = 0; j < numBucketsPerSegment; j++) {
final int recordPointer = memorySegment.getInt(j * BUCKET_SIZE);
if (recordPointer != END_OF_LIST) {
final int hashCode1 =
memorySegment.getInt(j * BUCKET_SIZE + ELEMENT_POINT_LENGTH);
int newPos = hashCode1 & numBucketsMask;
int bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits;
int bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
int step = STEP_INCREMENT;
long hashCode2 = 0;
while (newBucketSegments.get(bucketSegmentIndex).getInt(bucketOffset)
!= END_OF_LIST) {
if (step == 1) {
hashCode2 = calcSecondHashCode(hashCode1);
}
newPos = (int) ((hashCode1 + step * hashCode2) & numBucketsMask);
// which segment contains the bucket
bucketSegmentIndex = newPos >>> numBucketsPerSegmentBits;
// offset of the bucket in the segment
bucketOffset = (newPos & numBucketsPerSegmentMask) << BUCKET_SIZE_BITS;
step += STEP_INCREMENT;
}
newBucketSegments.get(bucketSegmentIndex).putInt(bucketOffset, recordPointer);
newBucketSegments
.get(bucketSegmentIndex)
.putInt(bucketOffset + ELEMENT_POINT_LENGTH, hashCode1);
}
}
}
LOG.info(
"The rehash take {} ms for {} segments",
(System.currentTimeMillis() - reHashStartTime),
required);
this.memoryPool.returnAll(this.bucketSegments);
this.bucketSegments = newBucketSegments;
} | 3.68 |
flink_ServiceType_getRestPortFromExternalService | /** Get rest port from the external Service. */
public int getRestPortFromExternalService(Service externalService) {
final List<ServicePort> servicePortCandidates =
externalService.getSpec().getPorts().stream()
.filter(x -> x.getName().equals(Constants.REST_PORT_NAME))
.collect(Collectors.toList());
if (servicePortCandidates.isEmpty()) {
throw new RuntimeException(
"Failed to find port \""
+ Constants.REST_PORT_NAME
+ "\" in Service \""
+ externalService.getMetadata().getName()
+ "\"");
}
final ServicePort externalServicePort = servicePortCandidates.get(0);
return getRestPort(externalServicePort);
} | 3.68 |
dubbo_DubboNamespaceHandler_parse | /**
* Override {@link NamespaceHandlerSupport#parse(Element, ParserContext)} method
*
* @param element {@link Element}
* @param parserContext {@link ParserContext}
* @return
* @since 2.7.5
*/
@Override
public BeanDefinition parse(Element element, ParserContext parserContext) {
BeanDefinitionRegistry registry = parserContext.getRegistry();
registerAnnotationConfigProcessors(registry);
// initialize dubbo beans
DubboSpringInitializer.initialize(parserContext.getRegistry());
BeanDefinition beanDefinition = super.parse(element, parserContext);
setSource(beanDefinition);
return beanDefinition;
} | 3.68 |
framework_VCalendar_setFirstDayNumber | /**
* Set the number when a week starts.
*
* @param dayNumber
* The number of the day
*/
public void setFirstDayNumber(int dayNumber) {
assert (dayNumber >= 1 && dayNumber <= 7);
firstDay = dayNumber;
} | 3.68 |
hadoop_WriteOperationHelper_getRequestFactory | /**
* Get the request factory which uses this store's audit span.
* @return the request factory.
*/
public RequestFactory getRequestFactory() {
return requestFactory;
} | 3.68 |
hadoop_PlacementConstraints_maxCardinality | /**
* Similar to {@link #maxCardinality(String, int, String...)}, but let you
* specify a namespace for the tags, see supported namespaces in
* {@link AllocationTagNamespaceType}.
*
* @param scope the scope of the constraint
* @param tagNamespace the namespace of these tags
* @param maxCardinality determines the maximum number of allocations within
* the scope
* @param allocationTags allocation tags
* @return the resulting placement constraint
*/
public static AbstractConstraint maxCardinality(String scope,
String tagNamespace, int maxCardinality, String... allocationTags) {
return cardinality(scope, tagNamespace, 0, maxCardinality, allocationTags);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getCombineSplits | /**
* Create Hive splits based on CombineFileSplit.
*/
private InputSplit[] getCombineSplits(JobConf job, int numSplits, Map<Path, PartitionDesc> pathToPartitionInfo)
throws IOException {
init(job);
Map<Path, ArrayList<String>> pathToAliases = mrwork.getPathToAliases();
Map<String, Operator<? extends OperatorDesc>> aliasToWork = mrwork.getAliasToWork();
/* MOD - Initialize a custom combine input format shim that will call listStatus on the custom inputFormat **/
HoodieCombineHiveInputFormat.HoodieCombineFileInputFormatShim combine = createInputFormatShim();
InputSplit[] splits;
if (combine.getInputPathsShim(job).length == 0) {
throw new IOException("No input paths specified in job");
}
List<InputSplit> result = new ArrayList<>();
// combine splits only from same tables and same partitions. Do not combine splits from multiple
// tables or multiple partitions.
Path[] paths = StringInternUtils.internUriStringsInPathArray(combine.getInputPathsShim(job));
List<Path> inpDirs = new ArrayList<>();
List<Path> inpFiles = new ArrayList<>();
Map<CombinePathInputFormat, CombineFilter> poolMap = new HashMap<>();
Set<Path> poolSet = new HashSet<>();
for (Path path : paths) {
PartitionDesc part = getPartitionFromPath(pathToPartitionInfo, path,
IOPrepareCache.get().allocatePartitionDescMap());
TableDesc tableDesc = part.getTableDesc();
if ((tableDesc != null) && tableDesc.isNonNative()) {
return super.getSplits(job, numSplits);
}
// Use HiveInputFormat if any of the paths is not splittable
Class<?> inputFormatClass = part.getInputFileFormatClass();
String inputFormatClassName = inputFormatClass.getName();
InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
LOG.info("Input Format => " + inputFormatClass.getName());
// **MOD** Set the hoodie filter in the combine
if (inputFormatClass.getName().equals(getParquetInputFormatClassName())) {
combine.setHoodieFilter(true);
} else if (inputFormatClass.getName().equals(getParquetRealtimeInputFormatClassName())) {
LOG.info("Setting hoodie filter and realtime input format");
combine.setHoodieFilter(true);
combine.setRealTime(true);
if (job.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "").isEmpty()) {
List<String> partitions = new ArrayList<>(part.getPartSpec().keySet());
if (!partitions.isEmpty()) {
String partitionStr = String.join("/", partitions);
LOG.info("Setting Partitions in jobConf - Partition Keys for Path : " + path + " is :" + partitionStr);
job.set(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, partitionStr);
} else {
job.set(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");
}
}
}
String deserializerClassName = null;
try {
deserializerClassName = part.getDeserializer(job).getClass().getName();
} catch (Exception e) {
// ignore
LOG.error("Getting deserializer class name error ", e);
}
// don't combine if inputformat is a SymlinkTextInputFormat
if (inputFormat instanceof SymlinkTextInputFormat) {
splits = super.getSplits(job, numSplits);
return splits;
}
Path filterPath = path;
// Does a pool exist for this path already
CombineFilter f;
List<Operator<? extends OperatorDesc>> opList;
if (!mrwork.isMapperCannotSpanPartns()) {
// if mapper can span partitions, make sure a splits does not contain multiple
// opList + inputFormatClassName + deserializerClassName combination
// This is done using the Map of CombinePathInputFormat to PathFilter
opList = HiveFileFormatUtils.doGetWorksFromPath(pathToAliases, aliasToWork, filterPath);
CombinePathInputFormat combinePathInputFormat =
new CombinePathInputFormat(opList, inputFormatClassName, deserializerClassName);
f = poolMap.get(combinePathInputFormat);
if (f == null) {
f = new CombineFilter(filterPath);
LOG.info("CombineHiveInputSplit creating pool for " + path + "; using filter path " + filterPath);
combine.createPool(job, f);
poolMap.put(combinePathInputFormat, f);
} else {
LOG.info("CombineHiveInputSplit: pool is already created for " + path + "; using filter path " + filterPath);
f.addPath(filterPath);
}
} else {
// In the case of tablesample, the input paths are pointing to files rather than directories.
// We need to get the parent directory as the filtering path so that all files in the same
// parent directory will be grouped into one pool but not files from different parent
// directories. This guarantees that a split will combine all files in the same partition
// but won't cross multiple partitions if the user has asked so.
if (!path.getFileSystem(job).getFileStatus(path).isDirectory()) { // path is not directory
filterPath = path.getParent();
inpFiles.add(path);
poolSet.add(filterPath);
} else {
inpDirs.add(path);
}
}
}
// Processing directories
List<CombineFileSplit> iss = new ArrayList<>();
if (!mrwork.isMapperCannotSpanPartns()) {
// mapper can span partitions
// combine into as few as one split, subject to the PathFilters set
// using combine.createPool.
iss = Arrays.asList(combine.getSplits(job, 1));
} else {
for (Path path : inpDirs) {
processPaths(job, combine, iss, path);
}
if (inpFiles.size() > 0) {
// Processing files
for (Path filterPath : poolSet) {
combine.createPool(job, new CombineFilter(filterPath));
}
processPaths(job, combine, iss, inpFiles.toArray(new Path[0]));
}
}
if (mrwork.getNameToSplitSample() != null && !mrwork.getNameToSplitSample().isEmpty()) {
iss = sampleSplits(iss);
}
for (CombineFileSplit is : iss) {
final InputSplit csplit;
if (combine.isRealTime) {
if (is instanceof HoodieCombineRealtimeHiveSplit) {
csplit = is;
} else {
csplit = new HoodieCombineRealtimeHiveSplit(job, is, pathToPartitionInfo);
}
} else {
csplit = new CombineHiveInputSplit(job, is, pathToPartitionInfo);
}
result.add(csplit);
}
LOG.info("number of splits " + result.size());
return result.toArray(new CombineHiveInputSplit[result.size()]);
} | 3.68 |
flink_TableSourceFactory_createTableSource | /**
* Creates and configures a {@link TableSource} based on the given {@link Context}.
*
* @param context context of this table source.
* @return the configured table source.
*/
default TableSource<T> createTableSource(Context context) {
return createTableSource(context.getObjectIdentifier().toObjectPath(), context.getTable());
} | 3.68 |
flink_PlanNode_setPruningMarker | /** Sets the pruning marker to true. */
public void setPruningMarker() {
this.pFlag = true;
} | 3.68 |
hadoop_CacheStats_roundUp | /**
* Round up a number to the operating system page size.
*/
public long roundUp(long count) {
return (count + osPageSize - 1) & (~(osPageSize - 1));
} | 3.68 |
framework_LocaleService_addLocale | /**
* Adds a locale to be sent to the client (browser) for date and time entry
* etc. All locale specific information is derived from server-side
* {@link Locale} instances and sent to the client when needed, eliminating
* the need to use the {@link Locale} class and all the framework behind it
* on the client.
*
* @param locale
* The locale which is required on the client side
*/
public void addLocale(Locale locale) {
for (LocaleData data : getState(false).localeData) {
if (data.name.equals(locale.toString())) {
// Already there
return;
}
}
getState(true).localeData.add(createLocaleData(locale));
} | 3.68 |
hbase_MasterObserver_preListReplicationPeers | /**
* Called before list replication peers.
* @param ctx the environment to interact with the framework and master
* @param regex The regular expression to match peer id
*/
default void preListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String regex) throws IOException {
} | 3.68 |
framework_Upload_getTabIndex | /**
* Gets the Tabulator index of this Focusable component.
*
* @see com.vaadin.ui.Component.Focusable#getTabIndex()
*/
@Override
public int getTabIndex() {
return tabIndex;
} | 3.68 |
hadoop_ResourceCalculatorProcessTree_getCpuUsagePercent | /**
* Get the CPU usage by all the processes in the process-tree based on
* average between samples as a ratio of overall CPU cycles similar to top.
* Thus, if 2 out of 4 cores are used this should return 200.0.
* Note: UNAVAILABLE will be returned in case when CPU usage is not
* available. It is NOT advised to return any other error code.
*
* @return percentage CPU usage since the process-tree was created,
* {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.
*/
public float getCpuUsagePercent() {
return UNAVAILABLE;
} | 3.68 |
morf_SqlServerDialect_alterTableDropColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableDropColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableDropColumnStatements(Table table, final Column column) {
List<String> statements = new ArrayList<>();
// Drop any defaults for the old column
if (StringUtils.isNotBlank(column.getDefaultValue()))
statements.add(dropDefaultForColumn(table, column));
// Drop the primary key if the column is part of the primary key and we are dropping the column
boolean recreatePrimaryKey = column.isPrimaryKey();
if (recreatePrimaryKey) {
statements.add(dropPrimaryKey(table));
}
// We can't use the superclass method as we need to make sure we
// modify the correct schema in the database
StringBuilder statement = new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(table.getName())
.append(" DROP COLUMN ")
.append(column.getName());
statements.add(statement.toString());
List<Column> primaryKeyColumns = primaryKeysForTable(table);
// Recreate the primary key if necessary
if (recreatePrimaryKey && !primaryKeyColumns.isEmpty()) {
statements.add(new StringBuilder()
.append("ALTER TABLE ").append(schemaNamePrefix()).append(table.getName()).append(" ADD ")
.append(buildPrimaryKeyConstraint(table.getName(), namesOfColumns(primaryKeyColumns)))
.toString()
);
}
return statements;
} | 3.68 |
hbase_ThreadMonitoring_appendThreadInfo | /**
* Print all of the thread's information and stack traces.
*/
public static void appendThreadInfo(StringBuilder sb, ThreadInfo info, String indent) {
boolean contention = threadBean.isThreadContentionMonitoringEnabled();
if (info == null) {
sb.append(indent).append("Inactive (perhaps exited while monitoring was done)\n");
return;
}
String taskName = getTaskName(info.getThreadId(), info.getThreadName());
sb.append(indent).append("Thread ").append(taskName).append(":\n");
Thread.State state = info.getThreadState();
sb.append(indent).append(" State: ").append(state).append("\n");
sb.append(indent).append(" Blocked count: ").append(info.getBlockedCount()).append("\n");
sb.append(indent).append(" Waited count: ").append(info.getWaitedCount()).append("\n");
if (contention) {
sb.append(indent).append(" Blocked time: " + info.getBlockedTime()).append("\n");
sb.append(indent).append(" Waited time: " + info.getWaitedTime()).append("\n");
}
if (state == Thread.State.WAITING) {
sb.append(indent).append(" Waiting on ").append(info.getLockName()).append("\n");
} else if (state == Thread.State.BLOCKED) {
sb.append(indent).append(" Blocked on ").append(info.getLockName()).append("\n");
sb.append(indent).append(" Blocked by ")
.append(getTaskName(info.getLockOwnerId(), info.getLockOwnerName())).append("\n");
}
sb.append(indent).append(" Stack:").append("\n");
for (StackTraceElement frame : info.getStackTrace()) {
sb.append(indent).append(" ").append(frame.toString()).append("\n");
}
} | 3.68 |
morf_ResultSetMismatch_getMismatchColumnIndex | /**
* @return Mismatch column index.
*/
public int getMismatchColumnIndex() {
return mismatchColumnIndex;
} | 3.68 |
zxing_QRCodeEncoder_encodeFromStreamExtra | // Handles send intents from the Contacts app, retrieving a contact as a VCARD.
private void encodeFromStreamExtra(Intent intent) throws WriterException {
format = BarcodeFormat.QR_CODE;
Bundle bundle = intent.getExtras();
if (bundle == null) {
throw new WriterException("No extras");
}
Uri uri = bundle.getParcelable(Intent.EXTRA_STREAM);
if (uri == null) {
throw new WriterException("No EXTRA_STREAM");
}
byte[] vcard;
String vcardString;
try (InputStream stream = activity.getContentResolver().openInputStream(uri)) {
if (stream == null) {
throw new WriterException("Can't open stream for " + uri);
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] buffer = new byte[2048];
int bytesRead;
while ((bytesRead = stream.read(buffer)) > 0) {
baos.write(buffer, 0, bytesRead);
}
vcard = baos.toByteArray();
vcardString = new String(vcard, 0, vcard.length, StandardCharsets.UTF_8);
} catch (IOException ioe) {
throw new WriterException(ioe);
}
Result result = new Result(vcardString, vcard, null, BarcodeFormat.QR_CODE);
ParsedResult parsedResult = ResultParser.parseResult(result);
if (!(parsedResult instanceof AddressBookParsedResult)) {
throw new WriterException("Result was not an address");
}
encodeQRCodeContents((AddressBookParsedResult) parsedResult);
if (contents == null || contents.isEmpty()) {
throw new WriterException("No content to encode");
}
} | 3.68 |
hadoop_IOStatisticsContextIntegration_createNewInstance | /**
* Creating a new IOStatisticsContext instance for a FS to be used.
* @param key Thread ID that represents which thread the context belongs to.
* @return an instance of IOStatisticsContext.
*/
private static IOStatisticsContext createNewInstance(Long key) {
IOStatisticsContextImpl instance =
new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
LOG.debug("Created instance {}", instance);
return instance;
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_addConnectionStateListener | /**
* Add connection state listener.
*
* @param listener connection state listener
*/
public void addConnectionStateListener(final ConnectionStateChangedEventListener listener) {
regCenter.addConnectionStateChangedEventListener("/" + jobName, listener);
} | 3.68 |
flink_SourceCoordinator_aggregate | /**
* Update the {@link Watermark} for the given {@code key)}.
*
* @return the new updated combined {@link Watermark} if the value has changed. {@code
* Optional.empty()} otherwise.
*/
public Optional<Watermark> aggregate(T key, Watermark watermark) {
Watermark oldAggregatedWatermark = getAggregatedWatermark();
WatermarkElement watermarkElement = new WatermarkElement(watermark);
WatermarkElement oldWatermarkElement = watermarks.put(key, watermarkElement);
if (oldWatermarkElement != null) {
orderedWatermarks.remove(oldWatermarkElement);
}
orderedWatermarks.add(watermarkElement);
Watermark newAggregatedWatermark = getAggregatedWatermark();
if (newAggregatedWatermark.equals(oldAggregatedWatermark)) {
return Optional.empty();
}
return Optional.of(newAggregatedWatermark);
} | 3.68 |
framework_Upload_fireUploadInterrupted | /**
* Emits the upload failed event.
*
* @param filename
* @param mimeType
* @param length
*/
protected void fireUploadInterrupted(String filename, String mimeType,
long length) {
fireEvent(new Upload.FailedEvent(this, filename, mimeType, length));
} | 3.68 |
querydsl_BeanPath_add | /**
* Template method for tracking child path creation
*
* @param <P>
* @param path path to be tracked
* @return path
*/
protected <P extends Path<?>> P add(P path) {
return path;
} | 3.68 |
flink_StringUtf8Utils_encodeUTF8 | /** This method must have the same result with JDK's String.getBytes. */
public static byte[] encodeUTF8(String str) {
byte[] bytes = allocateReuseBytes(str.length() * MAX_BYTES_PER_CHAR);
int len = encodeUTF8(str, bytes);
return Arrays.copyOf(bytes, len);
} | 3.68 |
flink_TaskExecutionState_getError | /**
* Gets the attached exception, which is in serialized form. Returns null, if the status update
* is no failure with an associated exception.
*
* @param userCodeClassloader The classloader that can resolve user-defined exceptions.
* @return The attached exception, or null, if none.
*/
public Throwable getError(ClassLoader userCodeClassloader) {
if (this.throwable == null) {
return null;
} else {
return this.throwable.deserializeError(userCodeClassloader);
}
} | 3.68 |
morf_HumanReadableStatementProducer_changeIndex | /** @see org.alfasoftware.morf.upgrade.SchemaEditor#changeIndex(java.lang.String, org.alfasoftware.morf.metadata.Index, org.alfasoftware.morf.metadata.Index) **/
@Override
public void changeIndex(String tableName, Index fromIndex, Index toIndex) {
consumer.schemaChange(HumanReadableStatementHelper.generateChangeIndexString(tableName, fromIndex, toIndex));
} | 3.68 |
flink_SyntaxHighlightStyle_getQuotedStyle | /**
* Returns the style for a SQL character literal, such as {@code 'Hello, world!'}.
*
* @return Style for SQL character literals
*/
public AttributedStyle getQuotedStyle() {
return singleQuotedStyle;
} | 3.68 |
querydsl_GeometryExpressions_xmin | /**
* Returns X minima of a bounding box 2d or 3d or a geometry.
*
* @param expr geometry
* @return x minima
*/
public static NumberExpression<Double> xmin(GeometryExpression<?> expr) {
return Expressions.numberOperation(Double.class, SpatialOps.XMIN, expr);
} | 3.68 |
morf_Deployment_deploySchema | /**
* Static convenience method which deploys the specified database schema, prepopulating
* the upgrade step table with all pre-existing upgrade information. Assumes no initial
* start position manipulation is required and the application can start with an empty
* database.
*
* @param targetSchema The target database schema.
* @param upgradeSteps All upgrade steps which should be deemed to have already run.
* @param connectionResources Connection details for the database.
*/
public static void deploySchema(Schema targetSchema, Collection<Class<? extends UpgradeStep>> upgradeSteps, ConnectionResources connectionResources) {
UpgradeStatusTableServiceImpl upgradeStatusTableService = new UpgradeStatusTableServiceImpl(
new SqlScriptExecutorProvider(connectionResources), connectionResources.sqlDialect());
try {
new Deployment(
new UpgradePathFactoryImpl(new UpgradeScriptAdditionsProvider.NoOpScriptAdditions(), UpgradeStatusTableServiceImpl::new),
new ViewChangesDeploymentHelper.Factory(new CreateViewListener.Factory.NoOpFactory(), new DropViewListener.Factory.NoOpFactory()),
connectionResources
).deploy(targetSchema, upgradeSteps);
} finally {
upgradeStatusTableService.tidyUp(connectionResources.getDataSource());
}
} | 3.68 |
hbase_HashTable_selectPartitions | /**
* Choose partitions between row ranges to hash to a single output file Selects region
* boundaries that fall within the scan range, and groups them into the desired number of
* partitions.
*/
void selectPartitions(Pair<byte[][], byte[][]> regionStartEndKeys) {
List<byte[]> startKeys = new ArrayList<>();
for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) {
byte[] regionStartKey = regionStartEndKeys.getFirst()[i];
byte[] regionEndKey = regionStartEndKeys.getSecond()[i];
// if scan begins after this region, or starts before this region, then drop this region
// in other words:
// IF (scan begins before the end of this region
// AND scan ends before the start of this region)
// THEN include this region
if (
(isTableStartRow(startRow) || isTableEndRow(regionEndKey)
|| Bytes.compareTo(startRow, regionEndKey) < 0)
&& (isTableEndRow(stopRow) || isTableStartRow(regionStartKey)
|| Bytes.compareTo(stopRow, regionStartKey) > 0)
) {
startKeys.add(regionStartKey);
}
}
int numRegions = startKeys.size();
if (numHashFiles == 0) {
numHashFiles = numRegions / 100;
}
if (numHashFiles == 0) {
numHashFiles = 1;
}
if (numHashFiles > numRegions) {
// can't partition within regions
numHashFiles = numRegions;
}
// choose a subset of start keys to group regions into ranges
partitions = new ArrayList<>(numHashFiles - 1);
// skip the first start key as it is not a partition between ranges.
for (long i = 1; i < numHashFiles; i++) {
int splitIndex = (int) (numRegions * i / numHashFiles);
partitions.add(new ImmutableBytesWritable(startKeys.get(splitIndex)));
}
} | 3.68 |
flink_OrcLegacyTimestampColumnVector_fromTimestamp | // converting from/to Timestamp is copied from Hive 2.0.0 TimestampUtils
private static long fromTimestamp(Timestamp timestamp) {
long time = timestamp.getTime();
int nanos = timestamp.getNanos();
return (time * 1000000) + (nanos % 1000000);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_scheduleCompaction | /**
* Schedules a new compaction instant.
*
* @param extraMetadata Extra Metadata to be stored
*/
public Option<String> scheduleCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException {
String instantTime = createNewInstantTime();
return scheduleCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty();
} | 3.68 |
framework_UIDL_getChildByTagName | /**
* Returns the child UIDL by its name. If several child nodes exist with the
* given name, the first child UIDL will be returned.
*
* @param tagName
* @return the child UIDL or null if child wit given name was not found
*/
public UIDL getChildByTagName(String tagName) {
for (Object next : this) {
if (next instanceof UIDL) {
UIDL childUIDL = (UIDL) next;
if (childUIDL.getTag().equals(tagName)) {
return childUIDL;
}
}
}
return null;
} | 3.68 |
flink_RestClientConfiguration_fromConfiguration | /**
* Creates and returns a new {@link RestClientConfiguration} from the given {@link
* Configuration}.
*
* @param config configuration from which the REST client endpoint configuration should be
* created from
* @return REST client endpoint configuration
* @throws ConfigurationException if SSL was configured incorrectly
*/
public static RestClientConfiguration fromConfiguration(Configuration config)
throws ConfigurationException {
Preconditions.checkNotNull(config);
final SSLHandlerFactory sslHandlerFactory;
if (SecurityOptions.isRestSSLEnabled(config)) {
try {
sslHandlerFactory = SSLUtils.createRestClientSSLEngineFactory(config);
} catch (Exception e) {
throw new ConfigurationException(
"Failed to initialize SSLContext for the REST client", e);
}
} else {
sslHandlerFactory = null;
}
final long connectionTimeout = config.getLong(RestOptions.CONNECTION_TIMEOUT);
final long idlenessTimeout = config.getLong(RestOptions.IDLENESS_TIMEOUT);
int maxContentLength = config.getInteger(RestOptions.CLIENT_MAX_CONTENT_LENGTH);
return new RestClientConfiguration(
sslHandlerFactory, connectionTimeout, idlenessTimeout, maxContentLength);
} | 3.68 |
hbase_HBaseCommonTestingUtility_deleteDir | /**
* @param dir Directory to delete
* @return True if we deleted it.
*/
boolean deleteDir(final File dir) {
if (dir == null || !dir.exists()) {
return true;
}
int ntries = 0;
do {
ntries += 1;
try {
if (deleteOnExit()) {
FileUtils.deleteDirectory(dir);
}
return true;
} catch (IOException ex) {
LOG.warn("Failed to delete " + dir.getAbsolutePath());
} catch (IllegalArgumentException ex) {
LOG.warn("Failed to delete " + dir.getAbsolutePath(), ex);
}
} while (ntries < 30);
return false;
} | 3.68 |
graphhopper_TranslationMap_postImportHook | /**
* This method does some checks and fills missing translation from en
*/
private void postImportHook() {
Map<String, String> enMap = get("en").asMap();
StringBuilder sb = new StringBuilder();
for (Translation tr : translations.values()) {
Map<String, String> trMap = tr.asMap();
for (Entry<String, String> enEntry : enMap.entrySet()) {
String value = trMap.get(enEntry.getKey());
if (isEmpty(value)) {
trMap.put(enEntry.getKey(), enEntry.getValue());
continue;
}
int expectedCount = countOccurence(enEntry.getValue(), "\\%");
if (expectedCount != countOccurence(value, "\\%")) {
sb.append(tr.getLocale()).append(" - error in ").
append(enEntry.getKey()).append("->").
append(value).append("\n");
} else {
// try if formatting works, many times e.g. '%1$' instead of '%1$s'
Object[] strs = new String[expectedCount];
Arrays.fill(strs, "tmp");
try {
String.format(Locale.ROOT, value, strs);
} catch (Exception ex) {
sb.append(tr.getLocale()).append(" - error ").append(ex.getMessage()).append("in ").
append(enEntry.getKey()).append("->").
append(value).append("\n");
}
}
}
}
if (sb.length() > 0) {
System.out.println(sb);
throw new IllegalStateException(sb.toString());
}
} | 3.68 |
hadoop_PseudoAuthenticator_setConnectionConfigurator | /**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator the {@link ConnectionConfigurator} instance.
*/
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
connConfigurator = configurator;
} | 3.68 |
hbase_Procedure_elapsedTime | // ==========================================================================
// runtime state
// ==========================================================================
/** Returns the time elapsed between the last update and the start time of the procedure. */
public long elapsedTime() {
return getLastUpdate() - getSubmittedTime();
} | 3.68 |
hadoop_EmptyIOStatisticsStore_getInstance | /**
* Get the single instance of this class.
* @return a shared, empty instance.
*/
static IOStatisticsStore getInstance() {
return INSTANCE;
} | 3.68 |
flink_CatalogDescriptor_of | /**
* Creates an instance of this interface.
*
* @param catalogName the name of the catalog
* @param configuration the configuration of the catalog
*/
public static CatalogDescriptor of(String catalogName, Configuration configuration) {
return new CatalogDescriptor(catalogName, configuration);
} | 3.68 |
framework_VaadinPortlet_handleRequest | /**
* @param request
* @param response
* @throws PortletException
* @throws IOException
*
* @deprecated As of 7.0. Will likely change or be removed in a future
* version
*/
@Deprecated
protected void handleRequest(PortletRequest request,
PortletResponse response) throws PortletException, IOException {
CurrentInstance.clearAll();
try {
getService().handleRequest(createVaadinRequest(request),
createVaadinResponse(response));
} catch (ServiceException e) {
throw new PortletException(e);
}
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperationsForExistingDataFix2 | /**
* Regression test that checks if the DSL with Math expressions, that is used
* in Core and Aether modules produces expected SQL.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperationsForExistingDataFix2() {
AliasedField dsl = floor(random().multiplyBy(new FieldLiteral(Math.pow(10, 6) - 1)));
String sql = testDialect.getSqlFrom(dsl);
assertEquals(expectedSqlForMathOperationsForExistingDataFix2(testDialect.getSqlForRandom()), sql);
} | 3.68 |
framework_SQLContainer_setAutoCommit | /**
* Set auto commit mode enabled or disabled. Auto commit mode means that all
* changes made to items of this container will be immediately written to
* the underlying data source.
*
* @param autoCommitEnabled
* true to enable auto commit mode
*/
public void setAutoCommit(boolean autoCommitEnabled) {
autoCommit = autoCommitEnabled;
} | 3.68 |
flink_CoFeedbackTransformation_getFeedbackEdges | /** Returns the list of feedback {@code Transformations}. */
public List<Transformation<F>> getFeedbackEdges() {
return feedbackEdges;
} | 3.68 |
pulsar_TxnLogBufferedWriter_nextTimingTrigger | /***
* Why not use {@link ScheduledExecutorService#scheduleAtFixedRate(Runnable, long, long, TimeUnit)} ?
* Because: when the {@link #singleThreadExecutorForWrite} thread processes slowly, the scheduleAtFixedRate task
* will continue to append tasks to the ledger thread, this burdens the ledger thread and leads to an avalanche.
* see: https://github.com/apache/pulsar/pull/16679.
*/
private void nextTimingTrigger(){
try {
if (state == State.CLOSED || state == State.CLOSING){
return;
}
timeout = timer.newTimeout(timingFlushTask, batchedWriteMaxDelayInMillis, TimeUnit.MILLISECONDS);
} catch (Exception e){
log.error("Start timing flush trigger failed."
+ " managedLedger: " + managedLedger.getName(), e);
}
} | 3.68 |
hbase_Table_ifEquals | /**
* Check for equality.
* @param value the expected value
*/
default CheckAndMutateBuilder ifEquals(byte[] value) {
return ifMatches(CompareOperator.EQUAL, value);
} | 3.68 |
pulsar_ProxyConnection_authChallengeSuccessCallback | // Always run in this class's event loop.
protected void authChallengeSuccessCallback(AuthData authChallenge) {
try {
// authentication has completed, will send newConnected command.
if (authChallenge == null) {
clientAuthRole = authState.getAuthRole();
if (LOG.isDebugEnabled()) {
LOG.debug("[{}] Client successfully authenticated with {} role {}",
remoteAddress, authMethod, clientAuthRole);
}
// First connection
if (state == State.Connecting) {
// authentication has completed, will send newConnected command.
completeConnect();
}
return;
}
// auth not complete, continue auth with client side.
final ByteBuf msg = Commands.newAuthChallenge(authMethod, authChallenge, protocolVersionToAdvertise);
writeAndFlush(msg);
if (LOG.isDebugEnabled()) {
LOG.debug("[{}] Authentication in progress client by method {}.",
remoteAddress, authMethod);
}
} catch (Exception e) {
authenticationFailedCallback(e);
}
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_convertSqlOperator | // try to create an ExprNodeDesc with a SqlOperator
private ExprNodeDesc convertSqlOperator(
String funcText, List<ExprNodeDesc> children, HiveParserTypeCheckCtx ctx)
throws SemanticException {
SqlOperator sqlOperator =
HiveParserUtils.getSqlOperator(
funcText,
ctx.getSqlOperatorTable(),
SqlFunctionCategory.USER_DEFINED_FUNCTION);
if (sqlOperator == null) {
return null;
}
List<RelDataType> relDataTypes =
children.stream()
.map(ExprNodeDesc::getTypeInfo)
.map(
t -> {
try {
return HiveParserTypeConverter.convert(
t, ctx.getTypeFactory());
} catch (SemanticException e) {
throw new FlinkHiveException(e);
}
})
.collect(Collectors.toList());
List<RexNode> operands = new ArrayList<>(children.size());
for (ExprNodeDesc child : children) {
if (child instanceof ExprNodeConstantDesc) {
operands.add(
HiveParserRexNodeConverter.convertConstant(
(ExprNodeConstantDesc) child, ctx.getCluster()));
} else {
operands.add(null);
}
}
TypeInfo returnType =
HiveParserTypeConverter.convert(
HiveParserUtils.inferReturnTypeForOperandsTypes(
sqlOperator, relDataTypes, operands, ctx.getTypeFactory()));
return new SqlOperatorExprNodeDesc(funcText, sqlOperator, children, returnType);
} | 3.68 |
graphhopper_DistanceCalcEarth_calcDist3D | /**
* This implements a rather quick solution to calculate 3D distances on earth using euclidean
* geometry mixed with Haversine formula used for the on earth distance. The haversine formula makes
* not so much sense as it is only important for large distances where then the rather smallish
* heights would becomes negligible.
*/
@Override
public double calcDist3D(double fromLat, double fromLon, double fromHeight,
double toLat, double toLon, double toHeight) {
double eleDelta = hasElevationDiff(fromHeight, toHeight) ? (toHeight - fromHeight) : 0;
double len = calcDist(fromLat, fromLon, toLat, toLon);
return Math.sqrt(eleDelta * eleDelta + len * len);
} | 3.68 |
framework_TableTooManyColumns_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Table column drop down becomes too large to fit the screen.";
} | 3.68 |
framework_FileUploadHandler_getBuffered | /**
* Returns the partly matched boundary string and the byte following
* that.
*
* @return
* @throws IOException
*/
private int getBuffered() throws IOException {
int b;
if (matchedCount == 0) {
// The boundary has been returned, return the buffered byte.
b = bufferedByte;
bufferedByte = -1;
matchedCount = -1;
} else {
b = boundary[curBoundaryIndex++];
if (curBoundaryIndex == matchedCount) {
// The full boundary has been returned, remaining is the
// char that did not match the boundary.
curBoundaryIndex = 0;
if (bufferedByte != boundary[0]) {
/*
* next call for getBuffered will return the
* bufferedByte that came after the partial boundary
* match
*/
matchedCount = 0;
} else {
/*
* Special case where buffered byte again matches the
* boundaryString. This could be the start of the real
* end boundary.
*/
matchedCount = 0;
bufferedByte = -1;
}
}
}
if (b == -1) {
throw new IOException(
"The multipart stream ended unexpectedly");
}
return b;
} | 3.68 |
flink_KvStateSerializer_deserializeList | /**
* Deserializes all values with the given serializer.
*
* @param serializedValue Serialized value of type List<T>
* @param serializer Serializer for T
* @param <T> Type of the value
* @return Deserialized list or <code>null</code> if the serialized value is <code>null</code>
* @throws IOException On failure during deserialization
*/
public static <T> List<T> deserializeList(byte[] serializedValue, TypeSerializer<T> serializer)
throws IOException {
if (serializedValue != null) {
final DataInputDeserializer in =
new DataInputDeserializer(serializedValue, 0, serializedValue.length);
try {
final List<T> result = new ArrayList<>();
while (in.available() > 0) {
result.add(serializer.deserialize(in));
// The expected binary format has a single byte separator. We
// want a consistent binary format in order to not need any
// special casing during deserialization. A "cleaner" format
// would skip this extra byte, but would require a memory copy
// for RocksDB, which stores the data serialized in this way
// for lists.
if (in.available() > 0) {
in.readByte();
}
}
return result;
} catch (IOException e) {
throw new IOException(
"Unable to deserialize value. "
+ "This indicates a mismatch in the value serializers "
+ "used by the KvState instance and this access.",
e);
}
} else {
return null;
}
} | 3.68 |
flink_SourcePlanNode_setSerializer | /**
* Sets the serializer for this PlanNode.
*
* @param serializer The serializer to set.
*/
public void setSerializer(TypeSerializerFactory<?> serializer) {
this.serializer = serializer;
} | 3.68 |
framework_VAccordion_getContainerElement | /**
* Returns the container element for the content widget.
*
* @return the content container element
*/
@SuppressWarnings("deprecation")
public com.google.gwt.user.client.Element getContainerElement() {
return DOM.asOld(content);
} | 3.68 |
framework_WebBrowser_isEdge | /**
* Tests whether the user is using Edge.
*
* @since 7.5.3
* @return true if the user is using Edge, false if the user is not using
* Edge or if no information on the browser is present
*/
public boolean isEdge() {
if (browserDetails == null) {
return false;
}
return browserDetails.isEdge();
} | 3.68 |
dubbo_AbstractPeer_getHandler | /**
* @return ChannelHandler
*/
@Deprecated
public ChannelHandler getHandler() {
return getDelegateHandler();
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_processQueuedBlocksForInstant | /**
* Process the set of log blocks belonging to the last instant which is read fully.
*/
private void processQueuedBlocksForInstant(Deque<HoodieLogBlock> logBlocks, int numLogFilesSeen,
Option<KeySpec> keySpecOpt) throws Exception {
while (!logBlocks.isEmpty()) {
LOG.info("Number of remaining logblocks to merge " + logBlocks.size());
// poll the element at the bottom of the stack since that's the order it was inserted
HoodieLogBlock lastBlock = logBlocks.pollLast();
switch (lastBlock.getBlockType()) {
case AVRO_DATA_BLOCK:
case HFILE_DATA_BLOCK:
case PARQUET_DATA_BLOCK:
processDataBlock((HoodieDataBlock) lastBlock, keySpecOpt);
break;
case DELETE_BLOCK:
Arrays.stream(((HoodieDeleteBlock) lastBlock).getRecordsToDelete()).forEach(this::processNextDeletedRecord);
break;
case CORRUPT_BLOCK:
LOG.warn("Found a corrupt block which was not rolled back");
break;
default:
break;
}
}
// At this step the lastBlocks are consumed. We track approximate progress by number of log-files seen
progress = (numLogFilesSeen - 1) / logFilePaths.size();
} | 3.68 |
hadoop_MawoConfiguration_getWorkerWhiteListEnv | /**
* Get Worker whitelist env params.
* These params will be set in all tasks.
* @return list of white list environment
*/
public List<String> getWorkerWhiteListEnv() {
List<String> whiteList = new ArrayList<String>();
String env = configsMap.get(WORKER_WHITELIST_ENV);
if (env != null && !env.isEmpty()) {
String[] variables = env.split(COMMA_SPLITTER);
for (String variable : variables) {
variable = variable.trim();
if (variable.startsWith("$")) {
variable = variable.substring(1);
}
if (!variable.isEmpty()) {
whiteList.add(variable);
}
}
}
return whiteList;
} | 3.68 |
hbase_TableQuotaSnapshotStore_getSnapshotSizesForTable | /**
* Fetches any serialized snapshot sizes from the quota table for the {@code tn} provided. Any
* malformed records are skipped with a warning printed out.
*/
long getSnapshotSizesForTable(TableName tn) throws IOException {
try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
Scan s = QuotaTableUtil.createScanForSpaceSnapshotSizes(tn);
ResultScanner rs = quotaTable.getScanner(s);
try {
long size = 0L;
// Should just be a single row (for our table)
for (Result result : rs) {
// May have multiple columns, one for each snapshot
CellScanner cs = result.cellScanner();
while (cs.advance()) {
Cell current = cs.current();
try {
long snapshotSize = QuotaTableUtil.parseSnapshotSize(current);
if (LOG.isTraceEnabled()) {
LOG.trace("Saw snapshot size of " + snapshotSize + " for " + current);
}
size += snapshotSize;
} catch (InvalidProtocolBufferException e) {
LOG.warn("Failed to parse snapshot size from cell: " + current);
}
}
}
return size;
} finally {
if (null != rs) {
rs.close();
}
}
}
} | 3.68 |
hudi_CompactionCommand_compactionPlanReader | /**
* Compaction reading is different for different timelines. Create partial function to override special logic.
* We can make these read methods part of HoodieDefaultTimeline and override where necessary. But the
* BiFunction below has 'hacky' exception blocks, so restricting it to CLI.
*/
private <T extends HoodieDefaultTimeline, U extends HoodieInstant, V extends HoodieCompactionPlan>
Function<HoodieInstant, HoodieCompactionPlan> compactionPlanReader(
BiFunction<T, HoodieInstant, HoodieCompactionPlan> f, T timeline) {
return (y) -> f.apply(timeline, y);
} | 3.68 |
flink_Tuple13_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12), where the individual fields are the value returned by calling {@link
* Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ")";
} | 3.68 |
framework_DesignContext_getDefaultInstance | /**
* Returns the default instance for the given class. The instance must not
* be modified by the caller.
*
* @param <T>
* a component class
* @param component
* the component that determines the class
* @return the default instance for the given class. The return value must
* not be modified by the caller
*/
@SuppressWarnings("unchecked")
public <T> T getDefaultInstance(Component component) {
// If the root is a @DesignRoot component, it can't use itself as a
// reference or the written design will be empty
// If the root component in some other way initializes itself in the
// constructor
if (getRootComponent() == component
&& component.getClass().isAnnotationPresent(DesignRoot.class)) {
return (T) getDefaultInstance((Class<? extends Component>) component
.getClass().getSuperclass());
}
return (T) getDefaultInstance(component.getClass());
} | 3.68 |
hibernate-validator_MethodInheritanceTree_getTopLevelMethods | /**
* Returns a set containing all the top level overridden methods.
*
* @return a set containing all the top level overridden methods
*/
public Set<ExecutableElement> getTopLevelMethods() {
return topLevelMethods;
} | 3.68 |
streampipes_Formats_fstFormat | /**
* Defines the transport format Fast-Serializer used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type FST.
*/
public static TransportFormat fstFormat() {
return new TransportFormat(MessageFormat.FST);
} | 3.68 |
graphhopper_VectorTile_getExtent | /**
* <pre>
* Although this is an "optional" field it is required by the specification.
* See https://github.com/mapbox/vector-tile-spec/issues/47
* </pre>
*
* <code>optional uint32 extent = 5 [default = 4096];</code>
*/
public int getExtent() {
return extent_;
} | 3.68 |
flink_ExceptionUtils_isJvmFatalOrOutOfMemoryError | /**
* Checks whether the given exception indicates a situation that may leave the JVM in a
* corrupted state, or an out-of-memory error.
*
* <p>See {@link ExceptionUtils#isJvmFatalError(Throwable)} for a list of fatal JVM errors. This
* method additionally classifies the {@link OutOfMemoryError} as fatal, because it may occur in
* any thread (not the one that allocated the majority of the memory) and thus is often not
* recoverable by destroying the particular thread that threw the exception.
*
* @param t The exception to check.
* @return True, if the exception is fatal to the JVM or and OutOfMemoryError, false otherwise.
*/
public static boolean isJvmFatalOrOutOfMemoryError(Throwable t) {
return isJvmFatalError(t) || t instanceof OutOfMemoryError;
} | 3.68 |
hibernate-validator_ExecutableMetaData_getSignatures | /**
* Returns the signature(s) of the method represented by this meta data object, based on the represented
* executable's name and its parameter types.
*
* @return The signatures of this meta data object. Will only contain more than one element in case the represented
* method represents a sub-type method overriding a super-type method using a generic type parameter in its
* parameters.
*/
public Set<Signature> getSignatures() {
return signatures;
} | 3.68 |
graphhopper_CarAccessParser_isBackwardOneway | /**
* make sure that isOneway is called before
*/
protected boolean isBackwardOneway(ReaderWay way) {
return way.hasTag("oneway", "-1")
|| way.hasTag("vehicle:forward", restrictedValues)
|| way.hasTag("motor_vehicle:forward", restrictedValues);
} | 3.68 |
rocketmq-connect_ExpressionBuilder_appendTableName | /**
* Append to this builder's expression the specified Column identifier, possibly surrounded by
* the leading and trailing quotes based upon {@link #setQuoteIdentifiers(QuoteMethod)}.
*
* @param name the name to be appended
* @param quote the quote method to be used
* @return this builder to enable methods to be chained; never null
*/
public ExpressionBuilder appendTableName(String name, QuoteMethod quote) {
appendLeadingQuote(quote);
sb.append(name);
appendTrailingQuote(quote);
return this;
} | 3.68 |
pulsar_ProducerBuilderImpl_initialSubscriptionName | /**
* Use this config to automatically create an initial subscription when creating the topic.
* If this field is not set, the initial subscription will not be created.
* If this field is set but the broker's `allowAutoSubscriptionCreation` is disabled, the producer will fail to
* be created.
* This method is limited to internal use. This method will only be used when the consumer creates the dlq producer.
*
* @param initialSubscriptionName Name of the initial subscription of the topic.
* @return the producer builder implementation instance
*/
public ProducerBuilderImpl<T> initialSubscriptionName(String initialSubscriptionName) {
conf.setInitialSubscriptionName(initialSubscriptionName);
return this;
} | 3.68 |
flink_TopNBuffer_checkSortKeyInBufferRange | /**
* Checks whether the record should be put into the buffer.
*
* @param sortKey sortKey to test
* @param topNum buffer to add
* @return true if the record should be put into the buffer.
*/
public boolean checkSortKeyInBufferRange(RowData sortKey, long topNum) {
Comparator<RowData> comparator = getSortKeyComparator();
Map.Entry<RowData, Collection<RowData>> worstEntry = lastEntry();
if (worstEntry == null) {
// return true if the buffer is empty.
return true;
} else {
RowData worstKey = worstEntry.getKey();
int compare = comparator.compare(sortKey, worstKey);
if (compare < 0) {
return true;
} else {
return getCurrentTopNum() < topNum;
}
}
} | 3.68 |
hibernate-validator_CascadableConstraintMappingContextImplBase_addGroupConversion | /**
* Adds a group conversion for this element.
*
* @param from the source group of the conversion
* @param to the target group of the conversion
*/
public void addGroupConversion(Class<?> from, Class<?> to) {
groupConversions.put( from, to );
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_constructLoginURL | /**
* Create the URL to be used for authentication of the user in the absence of
* a JWT token within the incoming request.
*
* @param request for getting the original request URL
* @return url to use as login url for redirect
*/
@VisibleForTesting
String constructLoginURL(HttpServletRequest request) {
String delimiter = "?";
if (authenticationProviderUrl.contains("?")) {
delimiter = "&";
}
String loginURL = authenticationProviderUrl + delimiter
+ ORIGINAL_URL_QUERY_PARAM
+ request.getRequestURL().toString() + getOriginalQueryString(request);
return loginURL;
} | 3.68 |
hadoop_ConfigurationUtils_injectDefaults | /**
* Injects configuration key/value pairs from one configuration to another if the key does not exist in the target
* configuration.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void injectDefaults(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
if (target.get(entry.getKey()) == null) {
target.set(entry.getKey(), entry.getValue());
}
}
} | 3.68 |
hbase_TextSortReducer_doSetup | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context, Configuration conf) {
// If a custom separator has been used,
// decode it back from Base64 encoding.
separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY);
if (separator == null) {
separator = ImportTsv.DEFAULT_SEPARATOR;
} else {
separator = Bytes.toString(Base64.getDecoder().decode(separator));
}
// Should never get 0 as we are setting this to a valid value in job configuration.
ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);
badLineCount = context.getCounter("ImportTsv", "Bad Lines");
} | 3.68 |
dubbo_AbstractReferenceConfig_setInjvm | /**
* @param injvm
* @deprecated instead, use the parameter <b>scope</b> to judge if it's in jvm, scope=local
*/
@Deprecated
public void setInjvm(Boolean injvm) {
this.injvm = injvm;
} | 3.68 |
flink_TopNBuffer_removeLast | /**
* Removes the last record of the last Entry in the buffer.
*
* @return removed record
*/
public RowData removeLast() {
Map.Entry<RowData, Collection<RowData>> last = treeMap.lastEntry();
RowData lastElement = null;
if (last != null) {
Collection<RowData> collection = last.getValue();
if (collection != null) {
if (collection instanceof List) {
// optimization for List
List<RowData> list = (List<RowData>) collection;
if (!list.isEmpty()) {
lastElement = list.remove(list.size() - 1);
currentTopNum -= 1;
if (list.isEmpty()) {
treeMap.remove(last.getKey());
}
}
} else {
lastElement = getLastElement(collection);
if (lastElement != null) {
if (collection.remove(lastElement)) {
currentTopNum -= 1;
}
if (collection.size() == 0) {
treeMap.remove(last.getKey());
}
}
}
}
}
return lastElement;
} | 3.68 |
hudi_HoodieEmptyRecord_writeRecordPayload | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
protected final void writeRecordPayload(T payload, Kryo kryo, Output output) {
kryo.writeObject(output, type);
// NOTE: Since [[orderingVal]] is polymorphic we have to write out its class
// to be able to properly deserialize it
kryo.writeClassAndObject(output, orderingVal);
} | 3.68 |
hadoop_JobMonitor_submissionFailed | /**
* Add a submission failed job's status, such that it can be communicated
* back to serial.
* TODO: Cleaner solution for this problem
* @param job
*/
public void submissionFailed(JobStats job) {
String jobID = job.getJob().getConfiguration().get(Gridmix.ORIGINAL_JOB_ID);
LOG.info("Job submission failed notification for job " + jobID);
synchronized (statistics) {
this.statistics.add(job);
}
} | 3.68 |
framework_DragSourceExtensionConnector_removeDraggedStyle | /**
* Remove class name that indicated that the drag source element was being
* dragged. This method is called during the dragend event.
*
* @param event
* The drag end element.
*/
protected void removeDraggedStyle(NativeEvent event) {
Element dragSource = getDraggableElement();
dragSource.removeClassName(
getStylePrimaryName(dragSource) + STYLE_SUFFIX_DRAGGED);
} | 3.68 |
hadoop_ReadStatistics_getTotalShortCircuitBytesRead | /**
* @return The total short-circuit local bytes read.
*/
public synchronized long getTotalShortCircuitBytesRead() {
return totalShortCircuitBytesRead;
} | 3.68 |
morf_SchemaUtils_columns | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.IndexBuilder#columns(java.lang.Iterable)
*/
@Override
public IndexBuilder columns(Iterable<String> columnNames) {
return new IndexBuilderImpl(getName(), isUnique(), columnNames);
} | 3.68 |
hadoop_ExpressionFactory_createExpression | /**
* Creates an instance of the requested {@link Expression} class.
*
* @param expressionClassname
* name of the {@link Expression} class to be instantiated
* @param conf
* the Hadoop configuration
* @return a new instance of the requested {@link Expression} class
*/
Expression createExpression(String expressionClassname,
Configuration conf) {
try {
Class<? extends Expression> expressionClass = Class.forName(
expressionClassname).asSubclass(Expression.class);
return createExpression(expressionClass, conf);
} catch (ClassNotFoundException e) {
throw new IllegalArgumentException("Invalid classname "
+ expressionClassname);
}
} | 3.68 |
flink_PeriodicMaterializationManager_scheduleNextMaterialization | // task thread and asyncOperationsThreadPool can access this method
private synchronized void scheduleNextMaterialization(long delay) {
if (started && !periodicExecutor.isShutdown()) {
LOG.info(
"Task {} schedules the next materialization in {} seconds",
subtaskName,
delay / 1000);
periodicExecutor.schedule(this::triggerMaterialization, delay, TimeUnit.MILLISECONDS);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.