name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_SqlConstraintValidator_validate | /** Check table constraint. */
private static void validate(SqlTableConstraint constraint) throws SqlValidateException {
if (constraint.isUnique()) {
throw new SqlValidateException(
constraint.getParserPosition(), "UNIQUE constraint is not supported yet");
}
if (constraint.isEnforced()) {
throw new SqlValidateException(
constraint.getParserPosition(),
"Flink doesn't support ENFORCED mode for PRIMARY KEY constraint. ENFORCED/NOT ENFORCED "
+ "controls if the constraint checks are performed on the incoming/outgoing data. "
+ "Flink does not own the data therefore the only supported mode is the NOT ENFORCED mode");
}
} | 3.68 |
hadoop_SerialJobFactory_setDistCacheEmulator | // it is need for test
void setDistCacheEmulator(DistributedCacheEmulator e) {
jobCreator.setDistCacheEmulator(e);
} | 3.68 |
pulsar_SchemasImpl_convertGetSchemaResponseToSchemaInfo | // the util function converts `GetSchemaResponse` to `SchemaInfo`
static SchemaInfo convertGetSchemaResponseToSchemaInfo(TopicName tn,
GetSchemaResponse response) {
byte[] schema;
if (response.getType() == SchemaType.KEY_VALUE) {
try {
schema = DefaultImplementation.getDefaultImplementation().convertKeyValueDataStringToSchemaInfoSchema(
response.getData().getBytes(UTF_8));
} catch (IOException conversionError) {
throw new RuntimeException(conversionError);
}
} else {
schema = response.getData().getBytes(UTF_8);
}
return SchemaInfo.builder()
.schema(schema)
.type(response.getType())
.timestamp(response.getTimestamp())
.properties(response.getProperties())
.name(tn.getLocalName())
.build();
} | 3.68 |
morf_FieldReference_deepCopyInternal | /**
* {@inheritDoc}
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected FieldReference deepCopyInternal(DeepCopyTransformation transformer) {
return new FieldReference(getAlias(), transformer.deepCopy(table), name, direction, nullValueHandling);
} | 3.68 |
flink_Executors_newDirectExecutorService | /** Creates a more {@link ExecutorService} that runs the passed task in the calling thread. */
public static ExecutorService newDirectExecutorService() {
return new DirectExecutorService(true);
} | 3.68 |
dubbo_AbstractGenerator_methodNameUpperUnderscore | // This method mimics the upper-casing method ogf gRPC to ensure compatibility
// See https://github.com/grpc/grpc-java/blob/v1.8.0/compiler/src/java_plugin/cpp/java_generator.cpp#L58
public String methodNameUpperUnderscore() {
StringBuilder s = new StringBuilder();
for (int i = 0; i < methodName.length(); i++) {
char c = methodName.charAt(i);
s.append(Character.toUpperCase(c));
if ((i < methodName.length() - 1)
&& Character.isLowerCase(c)
&& Character.isUpperCase(methodName.charAt(i + 1))) {
s.append('_');
}
}
return s.toString();
} | 3.68 |
hbase_CommonFSUtils_delete | /**
* Calls fs.delete() and returns the value returned by the fs.delete()
* @param fs must not be null
* @param path must not be null
* @param recursive delete tree rooted at path
* @return the value returned by the fs.delete()
* @throws IOException from underlying FileSystem
*/
public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
throws IOException {
return fs.delete(path, recursive);
} | 3.68 |
flink_WatermarkAssignerOperator_processWatermark | /**
* Override the base implementation to completely ignore watermarks propagated from upstream (we
* rely only on the {@link WatermarkGenerator} to emit watermarks from here).
*/
@Override
public void processWatermark(Watermark mark) throws Exception {
// if we receive a Long.MAX_VALUE watermark we forward it since it is used
// to signal the end of input and to not block watermark progress downstream
if (mark.getTimestamp() == Long.MAX_VALUE && currentWatermark != Long.MAX_VALUE) {
if (idleTimeout > 0 && currentStatus.equals(WatermarkStatus.IDLE)) {
// mark the channel active
emitWatermarkStatus(WatermarkStatus.ACTIVE);
}
currentWatermark = Long.MAX_VALUE;
output.emitWatermark(mark);
}
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getIntResult | /**
* Get int value of result.
*
* @return int
*/
public int getIntResult() {
return result;
} | 3.68 |
hbase_ColumnSchemaModel_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{ NAME => '");
sb.append(name);
sb.append('\'');
for (Map.Entry<QName, Object> e : attrs.entrySet()) {
sb.append(", ");
sb.append(e.getKey().getLocalPart());
sb.append(" => '");
sb.append(e.getValue().toString());
sb.append('\'');
}
sb.append(" }");
return sb.toString();
} | 3.68 |
pulsar_LoadManagerShared_filterBrokersWithLargeTopicCount | /**
* It filters out brokers which owns topic higher than configured threshold at
* ServiceConfiguration.loadBalancerBrokerMaxTopics. <br/>
* if all the brokers own topic higher than threshold then it resets the list with original broker candidates
*
* @param brokerCandidateCache
* @param loadData
* @param loadBalancerBrokerMaxTopics
*/
public static void filterBrokersWithLargeTopicCount(Set<String> brokerCandidateCache, LoadData loadData,
int loadBalancerBrokerMaxTopics) {
Set<String> filteredBrokerCandidates = brokerCandidateCache.stream().filter((broker) -> {
BrokerData brokerData = loadData.getBrokerData().get(broker);
long totalTopics = brokerData != null && brokerData.getPreallocatedBundleData() != null
? brokerData.getPreallocatedBundleData().values().stream()
.mapToLong((preAllocatedBundle) -> preAllocatedBundle.getTopics()).sum()
+ brokerData.getLocalData().getNumTopics()
: 0;
return totalTopics <= loadBalancerBrokerMaxTopics;
}).collect(Collectors.toSet());
if (!filteredBrokerCandidates.isEmpty()) {
brokerCandidateCache.clear();
brokerCandidateCache.addAll(filteredBrokerCandidates);
}
} | 3.68 |
dubbo_DubboProtocol_buildReferenceCountExchangeClient | /**
* Build a single client
*
* @param url
* @return
*/
private ReferenceCountExchangeClient buildReferenceCountExchangeClient(URL url) {
ExchangeClient exchangeClient = initClient(url);
ReferenceCountExchangeClient client = new ReferenceCountExchangeClient(exchangeClient, DubboCodec.NAME);
// read configs
int shutdownTimeout = ConfigurationUtils.getServerShutdownTimeout(url.getScopeModel());
client.setShutdownWaitTime(shutdownTimeout);
return client;
} | 3.68 |
flink_StateAssignmentOperation_getManagedKeyedStateHandles | /**
* Collect {@link KeyGroupsStateHandle managedKeyedStateHandles} which have intersection with
* given {@link KeyGroupRange} from {@link TaskState operatorState}.
*
* @param operatorState all state handles of a operator
* @param subtaskKeyGroupRange the KeyGroupRange of a subtask
* @return all managedKeyedStateHandles which have intersection with given KeyGroupRange
*/
public static List<KeyedStateHandle> getManagedKeyedStateHandles(
OperatorState operatorState, KeyGroupRange subtaskKeyGroupRange) {
final int parallelism = operatorState.getParallelism();
List<KeyedStateHandle> subtaskKeyedStateHandles = null;
for (int i = 0; i < parallelism; i++) {
if (operatorState.getState(i) != null) {
Collection<KeyedStateHandle> keyedStateHandles =
operatorState.getState(i).getManagedKeyedState();
if (subtaskKeyedStateHandles == null) {
subtaskKeyedStateHandles =
new ArrayList<>(parallelism * keyedStateHandles.size());
}
extractIntersectingState(
keyedStateHandles, subtaskKeyGroupRange, subtaskKeyedStateHandles);
}
}
return subtaskKeyedStateHandles != null ? subtaskKeyedStateHandles : emptyList();
} | 3.68 |
morf_MergeStatement_getTable | /**
* Gets the table to merge the data into.
*
* @return the table.
*/
public TableReference getTable() {
return table;
} | 3.68 |
dubbo_RegistryManager_reset | /**
* Reset state of AbstractRegistryFactory
*/
public void reset() {
destroyed.set(false);
registries.clear();
} | 3.68 |
hadoop_AbfsConfiguration_unset | /**
* Unsets parameter in the underlying Configuration object.
* Provided only as a convenience; does not add any account logic.
* @param key Configuration key
*/
public void unset(String key) {
rawConfig.unset(key);
} | 3.68 |
dubbo_ReflectUtils_getDesc | /**
* get constructor desc.
* "()V", "(Ljava/lang/String;I)V"
*
* @param c constructor.
* @return desc
*/
public static String getDesc(final CtConstructor c) throws NotFoundException {
StringBuilder ret = new StringBuilder("(");
CtClass[] parameterTypes = c.getParameterTypes();
for (int i = 0; i < parameterTypes.length; i++) {
ret.append(getDesc(parameterTypes[i]));
}
ret.append(')').append('V');
return ret.toString();
} | 3.68 |
morf_XmlDataSetConsumer_getValue | /**
* Get the value of the column from the provided record. Can be overridden in subclasses if required.
*
* @param record the record to extract a value from
* @param column the column to extract a value from
* @param table the name of the table being processed
* @return the value of column from record
*/
protected String getValue(Record record, Column column, @SuppressWarnings("unused") String table) {
return record.getString(column.getName());
} | 3.68 |
flink_FileDataIndexCache_get | /**
* Get a region contains target bufferIndex and belong to target subpartition.
*
* @param subpartitionId the subpartition that target buffer belong to.
* @param bufferIndex the index of target buffer.
* @return If target region can be founded from memory or disk, return optional contains target
* region. Otherwise, return {@code Optional#empty()};
*/
public Optional<T> get(int subpartitionId, int bufferIndex) {
// first of all, try to get region in memory.
Optional<T> regionOpt =
getCachedRegionContainsTargetBufferIndex(subpartitionId, bufferIndex);
if (regionOpt.isPresent()) {
T region = regionOpt.get();
checkNotNull(
// this is needed for cache entry remove algorithm like LRU.
internalCache.getIfPresent(
new CachedRegionKey(subpartitionId, region.getFirstBufferIndex())));
return Optional.of(region);
} else {
// try to find target region and load it into cache if founded.
spilledRegionManager.findRegion(subpartitionId, bufferIndex, true);
return getCachedRegionContainsTargetBufferIndex(subpartitionId, bufferIndex);
}
} | 3.68 |
hudi_HoodieTableMetadataUtil_getFileIdLengthWithoutFileIndex | /**
* Returns the length of the fileID ignoring the fileIndex suffix
* <p>
* 0.10 version MDT code added -0 (0th fileIndex) to the fileID. This was removed later.
* <p>
* Examples:
* 0.11+ version: fileID: files-0000 returns 10
* 0.10 version: fileID: files-0000-0 returns 10
*
* @param fileId The fileID
* @return The length of the fileID ignoring the fileIndex suffix
*/
private static int getFileIdLengthWithoutFileIndex(String fileId) {
return fileId.endsWith("-0") ? fileId.length() - 2 : fileId.length();
} | 3.68 |
open-banking-gateway_FintechConsentAccessImpl_findByCurrentServiceSessionOrderByModifiedDesc | /**
* Lists all consents that are associated with current service session.
*/
@Override
public List<ProtocolFacingConsent> findByCurrentServiceSessionOrderByModifiedDesc() {
ServiceSession serviceSession = entityManager.find(ServiceSession.class, serviceSessionId);
if (null == serviceSession) {
return Collections.emptyList();
}
List<Consent> consent = consents.findByServiceSessionIdOrderByModifiedAtDesc(serviceSession.getId());
// Anonymous consent session:
if (null == serviceSession.getAuthSession() || null == serviceSession.getAuthSession().getPsu()) {
return anonymousConsent(consent);
}
Optional<FintechPsuAspspPrvKey> psuAspspPrivateKey = keys.findByFintechIdAndPsuIdAndAspspId(
fintech.getId(),
serviceSession.getAuthSession().getPsu().getId(),
serviceSession.getAuthSession().getAction().getBankProfile().getBank().getId()
);
if (!psuAspspPrivateKey.isPresent() || consent.isEmpty()) {
return Collections.emptyList();
}
var psuAspspKey = fintechVault.psuAspspKeyFromPrivate(serviceSession, fintech, fintechPassword);
EncryptionService enc = encryptionService.forPublicAndPrivateKey(psuAspspPrivateKey.get().getId(), psuAspspKey);
return consent.stream().map(it -> new ProtocolFacingConsentImpl(it, enc, encServiceProvider, encryptionKeySerde))
.collect(Collectors.toList());
} | 3.68 |
flink_BuiltInFunctionDefinition_runtimeProvided | /**
* Specifies that this {@link BuiltInFunctionDefinition} is implemented during code
* generation.
*/
public Builder runtimeProvided() {
this.isRuntimeProvided = true;
return this;
} | 3.68 |
flink_PbSchemaValidationUtils_validateSimpleType | /**
* Only validate type match for simple type like int, long, string, boolean.
*
* @param fd {@link FieldDescriptor} in proto descriptor
* @param logicalTypeRoot {@link LogicalTypeRoot} of row element
*/
private static void validateSimpleType(FieldDescriptor fd, LogicalTypeRoot logicalTypeRoot) {
if (!TYPE_MATCH_MAP.containsKey(fd.getJavaType())) {
throw new ValidationException("Unsupported protobuf java type: " + fd.getJavaType());
}
if (TYPE_MATCH_MAP.get(fd.getJavaType()).stream().noneMatch(x -> x == logicalTypeRoot)) {
throw new ValidationException(
"Protobuf field type does not match column type, "
+ fd.getJavaType()
+ "(protobuf) is not compatible of "
+ logicalTypeRoot);
}
} | 3.68 |
flink_Catalog_getFactory | /**
* Returns a factory for creating instances from catalog objects.
*
* <p>This method enables bypassing the discovery process. Implementers can directly pass
* internal catalog-specific objects to their own factory. For example, a custom {@link
* CatalogTable} can be processed by a custom {@link DynamicTableFactory}.
*
* <p>Because all factories are interfaces, the returned {@link Factory} instance can implement
* multiple supported extension points. An {@code instanceof} check is performed by the caller
* that checks whether a required factory is implemented; otherwise the discovery process is
* used.
*/
default Optional<Factory> getFactory() {
return Optional.empty();
} | 3.68 |
dubbo_BitList_add | /**
* If the element to added is appeared in originList even if it is not in rootSet,
* directly set its index in rootSet to true. (This may change the order of elements.)
* <p>
* If the element is not contained in originList, allocate tailList and add to tailList.
* <p>
* Notice: It is not recommended adding duplicated element.
*/
@Override
public boolean add(E e) {
int index = originList.indexOf(e);
if (index > -1) {
rootSet.set(index);
return true;
} else {
if (tailList == null) {
tailList = new LinkedList<>();
}
return tailList.add(e);
}
} | 3.68 |
hbase_ReplicationPeerConfigUtil_getTableCF | /**
* Get TableCF in TableCFs, if not exist, return null.
*/
public static ReplicationProtos.TableCF getTableCF(ReplicationProtos.TableCF[] tableCFs,
String table) {
for (int i = 0, n = tableCFs.length; i < n; i++) {
ReplicationProtos.TableCF tableCF = tableCFs[i];
if (tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) {
return tableCF;
}
}
return null;
} | 3.68 |
shardingsphere-elasticjob_TriggerService_removeTriggerFlag | /**
* Remove trigger flag.
*/
public void removeTriggerFlag() {
jobNodeStorage.removeJobNodeIfExisted(triggerNode.getLocalTriggerPath());
} | 3.68 |
framework_FieldGroup_firePreCommitEvent | /**
* Sends a preCommit event to all registered commit handlers
*
* @throws CommitException
* If the commit should be aborted
*/
private void firePreCommitEvent() throws CommitException {
CommitHandler[] handlers = commitHandlers
.toArray(new CommitHandler[commitHandlers.size()]);
for (CommitHandler handler : handlers) {
handler.preCommit(new CommitEvent(this));
}
} | 3.68 |
framework_SassLinker_createTempDir | /**
* Create folder in temporary space on disk.
*
* @param partialPath
* @return
*/
private File createTempDir(String partialPath) {
String baseTempPath = System.getProperty("java.io.tmpdir");
File tempDir = new File(baseTempPath + File.separator + partialPath);
if (!tempDir.exists()) {
tempDir.mkdirs();
}
tempDir.deleteOnExit();
return tempDir;
} | 3.68 |
hbase_MetaRegionLocationCache_isValidMetaPath | /**
* Helper to check if the given 'path' corresponds to a meta znode. This listener is only
* interested in changes to meta znodes.
*/
private boolean isValidMetaPath(String path) {
return watcher.getZNodePaths().isMetaZNodePath(path);
} | 3.68 |
hbase_Mutation_getClusterIds | /** Returns the set of clusterIds that have consumed the mutation */
public List<UUID> getClusterIds() {
List<UUID> clusterIds = new ArrayList<>();
byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS);
if (bytes != null) {
ByteArrayDataInput in = ByteStreams.newDataInput(bytes);
int numClusters = in.readInt();
for (int i = 0; i < numClusters; i++) {
clusterIds.add(new UUID(in.readLong(), in.readLong()));
}
}
return clusterIds;
} | 3.68 |
hadoop_StripedBlockReconstructor_clearBuffers | /**
* Clear all associated buffers.
*/
private void clearBuffers() {
getStripedReader().clearBuffers();
stripedWriter.clearBuffers();
} | 3.68 |
framework_FieldGroup_unbind | /**
* Detaches the field from its property id and removes it from this
* FieldBinder.
* <p>
* Note that the field is not detached from its property data source if it
* is no longer connected to the same property id it was bound to using this
* FieldBinder.
*
* @param field
* The field to detach
* @throws BindException
* If the field is not bound by this field binder or not bound
* to the correct property id
*/
public void unbind(Field<?> field) throws BindException {
Object propertyId = fieldToPropertyId.get(field);
if (propertyId == null) {
throw new BindException(
"The given field is not part of this FieldBinder");
}
TransactionalPropertyWrapper<?> wrapper = null;
Property fieldDataSource = field.getPropertyDataSource();
if (fieldDataSource instanceof TransactionalPropertyWrapper) {
wrapper = (TransactionalPropertyWrapper<?>) fieldDataSource;
fieldDataSource = ((TransactionalPropertyWrapper<?>) fieldDataSource)
.getWrappedProperty();
}
if (getItemDataSource() != null
&& fieldDataSource == getItemProperty(propertyId)) {
if (null != wrapper) {
wrapper.detachFromProperty();
}
field.setPropertyDataSource(null);
}
fieldToPropertyId.remove(field);
propertyIdToField.remove(propertyId);
} | 3.68 |
flink_RocksDBNativeMetricOptions_enableNumDeletesImmMemTables | /** Returns total number of delete entries in the unflushed immutable memtables. */
public void enableNumDeletesImmMemTables() {
this.properties.add(RocksDBProperty.NumDeletesImmMemTables.getRocksDBProperty());
} | 3.68 |
flink_AbstractOrcFileInputFormat_orcVectorizedRowBatch | /** Gets the ORC VectorizedRowBatch structure from this batch. */
public OrcVectorizedBatchWrapper<BatchT> orcVectorizedRowBatch() {
return orcVectorizedRowBatch;
} | 3.68 |
flink_RestClusterClient_getJobDetails | /**
* Requests the job details.
*
* @param jobId The job id
* @return Job details
*/
public CompletableFuture<JobDetailsInfo> getJobDetails(JobID jobId) {
final JobDetailsHeaders detailsHeaders = JobDetailsHeaders.getInstance();
final JobMessageParameters params = new JobMessageParameters();
params.jobPathParameter.resolve(jobId);
return sendRequest(detailsHeaders, params);
} | 3.68 |
framework_Page_open | /**
* @deprecated As of 7.0, only retained to maintain compatibility with
* LegacyWindow.open methods. See documentation for
* {@link LegacyWindow#open(Resource, String, boolean)} for
* discussion about replacing API.
*/
@Deprecated
public void open(Resource resource, String windowName,
boolean tryToOpenAsPopup) {
openList.add(new OpenResource(resource, windowName, -1, -1,
BORDER_DEFAULT, tryToOpenAsPopup));
uI.markAsDirty();
} | 3.68 |
zxing_BitSource_getByteOffset | /**
* @return index of next byte in input byte array which would be read by the next call to {@link #readBits(int)}.
*/
public int getByteOffset() {
return byteOffset;
} | 3.68 |
morf_Oracle_reclassifyException | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#reclassifyException(java.lang.Exception)
*/
@Override
public Exception reclassifyException(Exception e) {
// Reclassify OracleXA exceptions
Optional<Integer> xaErrorCode = getErrorCodeFromOracleXAException(e);
if (xaErrorCode.isPresent()) {
// ORA-00060: Deadlock detected while waiting for resource
// ORA-02049: Distributed transaction waiting for lock
if (xaErrorCode.get() == 60 || xaErrorCode.get() == 2049) {
return new SQLTransientException(e.getMessage(), null, xaErrorCode.get(), e);
}
return new SQLException(e.getMessage(), null, xaErrorCode.get(), e);
}
// Reclassify any SQLExceptions which should be SQLTransientExceptions but are not. Specifically this handles BatchUpdateExceptions
if(e instanceof SQLException && !(e instanceof SQLTransientException)) {
int errorCode = ((SQLException) e).getErrorCode();
if(errorCode == 60 || errorCode == 2049) {
return new SQLTransientException(e.getMessage(), ((SQLException) e).getSQLState(), errorCode, e);
}
}
return e;
} | 3.68 |
framework_VTooltip_getCloseTimeout | /**
* Returns the time (in ms) the tooltip should be displayed after an event
* that will cause it to be closed (e.g. mouse click outside the component,
* key down).
*
* @return The close timeout (in ms)
*/
public int getCloseTimeout() {
return closeTimeout;
} | 3.68 |
hbase_BackupManager_createBackupInfo | /**
* Creates a backup info based on input backup request.
* @param backupId backup id
* @param type type
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec
* @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
String targetRootDir, int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
}
if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
// If table list is null for full backup, which means backup all tables. Then fill the table
// list with all user tables from meta. It no table available, throw the request exception.
List<TableDescriptor> htds = null;
try (Admin admin = conn.getAdmin()) {
htds = admin.listTableDescriptors();
} catch (Exception e) {
throw new BackupException(e);
}
if (htds == null) {
throw new BackupException("No table exists for full backup of all tables.");
} else {
tableList = new ArrayList<>();
for (TableDescriptor hTableDescriptor : htds) {
TableName tn = hTableDescriptor.getTableName();
if (tn.equals(BackupSystemTable.getTableName(conf))) {
// skip backup system table
continue;
}
tableList.add(hTableDescriptor.getTableName());
}
LOG.info("Full backup all the tables available in the cluster: {}", tableList);
}
}
// there are one or more tables in the table list
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]),
targetRootDir);
backupInfo.setBandwidth(bandwidth);
backupInfo.setWorkers(workers);
return backupInfo;
} | 3.68 |
flink_ShadeParser_parseShadeOutput | /**
* Parses the output of a Maven build where {@code shade:shade} was used, and returns a set of
* bundled dependencies for each module.
*
* <p>The returned dependencies will NEVER contain the scope or optional flag.
*
* <p>This method only considers the {@code shade-flink} and {@code shade-dist} executions,
* because all artifacts we produce that are either published or referenced are created by these
* executions. In other words, all artifacts from other executions are only used internally by
* the module that created them.
*/
public static Map<String, Set<Dependency>> parseShadeOutput(Path buildOutput)
throws IOException {
try (Stream<String> lines = Files.lines(buildOutput)) {
return parseShadeOutput(lines);
}
} | 3.68 |
hadoop_StreamUtil_findInClasspath | /** @return a jar file path or a base directory or null if not found.
*/
public static String findInClasspath(String className, ClassLoader loader) {
String relPath = className;
relPath = relPath.replace('.', '/');
relPath += ".class";
java.net.URL classUrl = loader.getResource(relPath);
String codePath;
if (classUrl != null) {
boolean inJar = classUrl.getProtocol().equals("jar");
codePath = classUrl.toString();
if (codePath.startsWith("jar:")) {
codePath = codePath.substring("jar:".length());
}
if (codePath.startsWith("file:")) { // can have both
codePath = codePath.substring("file:".length());
}
if (inJar) {
// A jar spec: remove class suffix in /path/my.jar!/package/Class
int bang = codePath.lastIndexOf('!');
codePath = codePath.substring(0, bang);
} else {
// A class spec: remove the /my/package/Class.class portion
int pos = codePath.lastIndexOf(relPath);
if (pos == -1) {
throw new IllegalArgumentException("invalid codePath: className=" + className
+ " codePath=" + codePath);
}
codePath = codePath.substring(0, pos);
}
} else {
codePath = null;
}
return codePath;
} | 3.68 |
morf_GraphBasedUpgradeSchemaChangeVisitor_writeStatements | /**
* Write statements to the current node
*/
private void writeStatements(Collection<String> statements) {
currentNode.addAllUpgradeStatements(statements);
} | 3.68 |
hbase_ByteArrayComparable_compareTo | /**
* Special compareTo method for subclasses, to avoid copying bytes unnecessarily.
* @param value bytes to compare within a ByteBuffer
* @param offset offset into value
* @param length number of bytes to compare
* @return a negative integer, zero, or a positive integer as this object is less than, equal to,
* or greater than the specified object.
*/
public int compareTo(ByteBuffer value, int offset, int length) {
// For BC, providing a default implementation here which is doing a bytes copy to a temp byte[]
// and calling compareTo(byte[]). Make sure to override this method in subclasses to avoid
// copying bytes unnecessarily.
byte[] temp = new byte[length];
ByteBufferUtils.copyFromBufferToArray(temp, value, offset, 0, length);
return compareTo(temp);
} | 3.68 |
framework_AbstractDateField_getAssistiveLabel | /**
* Gets the assistive label of a calendar navigation element.
*
* @param element
* the element of which to get the assistive label
* @since 8.4
*/
public void getAssistiveLabel(AccessibleElement element) {
getState(false).assistiveLabels.get(element);
} | 3.68 |
framework_FilesystemContainer_setParent | /**
* Returns <code>false</code> when moving files around in the filesystem is
* not supported.
*
* @param itemId
* the ID of the item.
* @param newParentId
* the ID of the Item that's to be the new parent of the Item
* identified with itemId.
* @return <code>true</code> if the operation is successful otherwise
* <code>false</code>.
* @throws UnsupportedOperationException
* if the setParent is not supported.
*/
@Override
public boolean setParent(Object itemId, Object newParentId)
throws UnsupportedOperationException {
throw new UnsupportedOperationException("File moving is not supported");
} | 3.68 |
graphhopper_WaySegmentParser_setWorkerThreads | /**
* @param workerThreads the number of threads used for the low level reading of the OSM file
*/
public Builder setWorkerThreads(int workerThreads) {
waySegmentParser.workerThreads = workerThreads;
return this;
} | 3.68 |
hbase_HttpServer_getParameter | /**
* Unquote the name and quote the value.
*/
@Override
public String getParameter(String name) {
return HtmlQuoting
.quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name)));
} | 3.68 |
morf_AbstractSqlDialectTest_testDropTables | /**
* Tests SQL for dropping a tables with optional parameters.
*/
@Test
public void testDropTables() {
Table table1 = metadata.getTable(TEST_TABLE);
Table table2 = metadata.getTable(OTHER_TABLE);
compareStatements(
expectedDropSingleTable(),
testDialect.dropTables(ImmutableList.of(table1), false, false)
);
compareStatements(
expectedDropTables(),
testDialect.dropTables(ImmutableList.of(table1, table2), false, false));
compareStatements(
expectedDropTablesWithParameters(),
testDialect.dropTables(ImmutableList.of(table1, table2), true, true));
} | 3.68 |
hbase_HFileBlock_getOffset | /**
* Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey}
* when block is returned to the cache.
* @return the offset of this block in the file it was read from
*/
long getOffset() {
if (offset < 0) {
throw new IllegalStateException("HFile block offset not initialized properly");
}
return offset;
} | 3.68 |
hudi_HoodieRecordMerger_shouldFlush | /**
* In some cases a business logic does some checks before flushing a merged record to the disk.
* This method does the check, and when false is returned, it means the merged record should not
* be flushed.
*
* @param record the merged record.
* @param schema the schema of the merged record.
* @return a boolean variable to indicate if the merged record should be returned or not.
*
* <p> This interface is experimental and might be evolved in the future.
**/
default boolean shouldFlush(HoodieRecord record, Schema schema, TypedProperties props) throws IOException {
return true;
} | 3.68 |
dubbo_AbstractProxyProtocol_isBound | /**
* @return
*/
@Override
public boolean isBound() {
return false;
} | 3.68 |
zxing_CharacterSetECI_getCharacterSetECIByName | /**
* @param name character set ECI encoding name
* @return CharacterSetECI representing ECI for character encoding, or null if it is legal
* but unsupported
*/
public static CharacterSetECI getCharacterSetECIByName(String name) {
return NAME_TO_ECI.get(name);
} | 3.68 |
hbase_HBaseTestingUtility_isNewVersionBehaviorEnabled | /**
* Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
* Default to false.
*/
public boolean isNewVersionBehaviorEnabled() {
final String propName = "hbase.tests.new.version.behavior";
String v = System.getProperty(propName);
if (v != null) {
return Boolean.parseBoolean(v);
}
return false;
} | 3.68 |
hudi_HoodieDataTableValidator_readConfigFromFileSystem | /**
* Reads config from the file system.
*
* @param jsc {@link JavaSparkContext} instance.
* @param cfg {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs)
.getProps(true);
} | 3.68 |
hudi_FlinkWriteClients_createWriteClientV2 | /**
* Creates the Flink write client.
*
* <p>This expects to be used by the driver, the client can then send requests for files view.
*
* <p>The task context supplier is a constant: the write token is always '0-1-0'.
*
* <p>Note: different with {@link #createWriteClient}, the fs view storage options are set into the given
* configuration {@code conf}.
*/
@SuppressWarnings("rawtypes")
public static HoodieFlinkWriteClient createWriteClientV2(Configuration conf) {
HoodieWriteConfig writeConfig = getHoodieClientConfig(conf, true, false);
// build the write client to start the embedded timeline server
final HoodieFlinkWriteClient writeClient = new HoodieFlinkWriteClient<>(new HoodieFlinkEngineContext(HadoopConfigurations.getHadoopConf(conf)), writeConfig);
writeClient.setOperationType(WriteOperationType.fromValue(conf.getString(FlinkOptions.OPERATION)));
// create the filesystem view storage properties for client
final FileSystemViewStorageConfig viewStorageConfig = writeConfig.getViewStorageConfig();
conf.setString(FileSystemViewStorageConfig.VIEW_TYPE.key(), viewStorageConfig.getStorageType().name());
conf.setString(FileSystemViewStorageConfig.REMOTE_HOST_NAME.key(), viewStorageConfig.getRemoteViewServerHost());
conf.setInteger(FileSystemViewStorageConfig.REMOTE_PORT_NUM.key(), viewStorageConfig.getRemoteViewServerPort());
return writeClient;
} | 3.68 |
flink_InputFormatProvider_of | /** Helper method for creating a static provider with a provided source parallelism. */
static InputFormatProvider of(
InputFormat<RowData, ?> inputFormat, @Nullable Integer sourceParallelism) {
return new InputFormatProvider() {
@Override
public InputFormat<RowData, ?> createInputFormat() {
return inputFormat;
}
@Override
public boolean isBounded() {
return true;
}
@Override
public Optional<Integer> getParallelism() {
return Optional.ofNullable(sourceParallelism);
}
};
} | 3.68 |
hadoop_StateStoreSerializableImpl_getPrimaryKey | /**
* Get the primary key for a record. If we don't want to store in folders, we
* need to remove / from the name.
*
* @param record Record to get the primary key for.
* @return Primary key for the record.
*/
protected static String getPrimaryKey(BaseRecord record) {
String primaryKey = record.getPrimaryKey();
primaryKey = primaryKey.replaceAll("/", SLASH_MARK);
primaryKey = primaryKey.replaceAll(":", COLON_MARK);
return primaryKey;
} | 3.68 |
hudi_FlinkOptions_optionalOptions | /**
* Returns all the optional config options.
*/
public static Set<ConfigOption<?>> optionalOptions() {
Set<ConfigOption<?>> options = new HashSet<>(allOptions());
options.remove(PATH);
return options;
} | 3.68 |
hbase_HStoreFile_getStreamScanner | /**
* Get a scanner which uses streaming read.
* <p>
* Must be called after initReader.
*/
public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn)
throws IOException {
return createStreamReader(canUseDropBehind).getStoreFileScanner(cacheBlocks, false,
isCompaction, readPt, scannerOrder, canOptimizeForNonNullColumn);
} | 3.68 |
flink_Execution_getReleaseFuture | /**
* Gets the release future which is completed once the execution reaches a terminal state and
* the assigned resource has been released. This future is always completed from the job
* master's main thread.
*
* @return A future which is completed once the assigned resource has been released
*/
public CompletableFuture<?> getReleaseFuture() {
return releaseFuture;
} | 3.68 |
morf_NamedParameterPreparedStatement_setDate | /**
* Sets the value of a named date parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setDate(SqlParameter parameter, final Date value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
statement.setDate(parameterIndex, value);
}
});
return this;
} | 3.68 |
framework_VScrollTable_setIndex | /**
* Sets the index of the row in the whole table. Currently used just
* to set even/odd classname
*
* @param indexInWholeTable
*/
private void setIndex(int indexInWholeTable) {
index = indexInWholeTable;
boolean isOdd = indexInWholeTable % 2 == 0;
// Inverted logic to be backwards compatible with earlier 6.4.
// It is very strange because rows 1,3,5 are considered "even"
// and 2,4,6 "odd".
//
// First remove any old styles so that both styles aren't
// applied when indexes are updated.
String primaryStyleName = getStylePrimaryName();
if (primaryStyleName != null && !primaryStyleName.equals("")) {
removeStyleName(getStylePrimaryName());
}
if (!isOdd) {
addStyleName(VScrollTable.this.getStylePrimaryName()
+ "-row-odd");
} else {
addStyleName(
VScrollTable.this.getStylePrimaryName() + "-row");
}
} | 3.68 |
querydsl_Alias_resetAlias | /**
* Reset the alias
*/
public static void resetAlias() {
aliasFactory.reset();
} | 3.68 |
framework_DefaultDeploymentConfiguration_isXsrfProtectionEnabled | /**
* {@inheritDoc}
* <p>
* The default is true.
*/
@Override
public boolean isXsrfProtectionEnabled() {
return xsrfProtectionEnabled;
} | 3.68 |
framework_ComponentConnectorLayoutSlot_getLayoutManager | /**
* Returns the layout manager for the managed layout.
*
* @return layout manager
*/
public LayoutManager getLayoutManager() {
return layout.getLayoutManager();
} | 3.68 |
hbase_HFileReaderImpl_getLastRowKey | /**
* TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's patch goes in to
* eliminate {@link KeyValue} here.
* @return the last row key, or null if the file is empty.
*/
@Override
public Optional<byte[]> getLastRowKey() {
// We have to copy the row part to form the row key alone
return getLastKey().map(CellUtil::cloneRow);
} | 3.68 |
querydsl_SQLExpressions_stddevPop | /**
* returns the population standard deviation and returns the square root of the population variance.
*
* @param expr argument
* @return stddev_pop(expr)
*/
public static <T extends Number> WindowOver<T> stddevPop(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.STDDEVPOP, expr);
} | 3.68 |
flink_Vectorizer_setWriter | /**
* Users are not supposed to use this method since this is intended to be used only by the
* {@link OrcBulkWriter}.
*
* @param writer the underlying ORC Writer.
*/
public void setWriter(Writer writer) {
this.writer = writer;
} | 3.68 |
querydsl_JDOExpressions_selectOne | /**
* Create a new detached {@link JDOQuery} instance with the projection 1
*
* @return select(1)
*/
public static JDOQuery<Integer> selectOne() {
return select(Expressions.ONE);
} | 3.68 |
flink_FlinkRelUtil_initializeArray | /**
* Returns an int array with given length and initial value.
*
* @param length array length
* @param initVal initial value
* @return initialized int array
*/
public static int[] initializeArray(int length, int initVal) {
final int[] array = new int[length];
Arrays.fill(array, initVal);
return array;
} | 3.68 |
hbase_Threads_threadDumpingIsAlive | /** Waits on the passed thread to die dumping a threaddump every minute while its up. */
public static void threadDumpingIsAlive(final Thread t) throws InterruptedException {
if (t == null) {
return;
}
while (t.isAlive()) {
t.join(60 * 1000);
if (t.isAlive()) {
printThreadInfo(System.out,
"Automatic Stack Trace every 60 seconds waiting on " + t.getName());
}
}
} | 3.68 |
framework_Window_getPositionX | /**
* Gets the distance of Window left border in pixels from left border of the
* containing (main window) when the window is in {@link WindowMode#NORMAL}.
*
* @return the Distance of Window left border in pixels from left border of
* the containing (main window).or -1 if unspecified
* @since 4.0.0
*/
public int getPositionX() {
return getState(false).positionX;
} | 3.68 |
morf_ConnectionResourcesBean_setPort | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setPort(int)
*/
@Override
public void setPort(int port) {
this.port = port;
} | 3.68 |
flink_PlannerCallProcedureOperation_toExternal | /** Convert the value with internal representation to the value with external representation. */
private Object toExternal(Object internalValue, DataType inputType, ClassLoader classLoader) {
if (!(DataTypeUtils.isInternal(inputType))) {
// if the expected input type of the procedure is not internal type,
// which means the converted Flink internal value doesn't
// match the expected input type, then we need to convert the Flink
// internal value to external value
DataStructureConverter<Object, Object> converter =
DataStructureConverters.getConverter(inputType);
converter.open(classLoader);
return converter.toExternal(internalValue);
} else {
return internalValue;
}
} | 3.68 |
flink_RecordWriter_randomEmit | /** This is used to send LatencyMarks to a random target channel. */
public void randomEmit(T record) throws IOException {
checkErroneous();
int targetSubpartition = rng.nextInt(numberOfChannels);
emit(record, targetSubpartition);
} | 3.68 |
framework_WebBrowser_isChromeFrame | /**
* Tests whether the user is using Chrome Frame.
*
* @return true if the user is using Chrome Frame, false if the user is not
* using Chrome or if no information on the browser is present
*/
public boolean isChromeFrame() {
if (browserDetails == null) {
return false;
}
return browserDetails.isChromeFrame();
} | 3.68 |
MagicPlugin_MagicController_isMage | // TODO: Remove the if and replace it with a precondition
// once we're sure nothing is calling this with a null value.
@SuppressWarnings({"null", "unused"})
@Override
public boolean isMage(Entity entity) {
if (entity == null) return false;
String id = mageIdentifier.fromEntity(entity);
return mages.containsKey(id);
} | 3.68 |
hbase_FlushSnapshotSubprocedure_acquireBarrier | /**
* do nothing, core of snapshot is executed in {@link #insideBarrier} step.
*/
@Override
public void acquireBarrier() throws ForeignException {
// NO OP
} | 3.68 |
hbase_RegionServerObserver_preExecuteProcedures | /**
* This will be called before executing procedures
* @param ctx the environment to interact with the framework and region server.
*/
default void preExecuteProcedures(ObserverContext<RegionServerCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
zxing_AddressBookParsedResult_getEmailTypes | /**
* @return optional descriptions of the type of each e-mail. It could be like "WORK", but,
* there is no guaranteed or standard format.
*/
public String[] getEmailTypes() {
return emailTypes;
} | 3.68 |
querydsl_GroupBy_list | /**
* Create a new aggregating list expression
*
* @param groupExpression values for this expression will be accumulated into a list
* @param <E>
* @param <F>
* @return wrapper expression
*/
public static <E, F> AbstractGroupExpression<E, List<F>> list(GroupExpression<E, F> groupExpression) {
return new MixinGroupExpression<E, F, List<F>>(groupExpression, new GList<F>(groupExpression));
} | 3.68 |
hudi_HoodieTableMetaClient_getMetaPath | /**
* @return Meta path
*/
public String getMetaPath() {
return metaPath.get().toString(); // this invocation is cached
} | 3.68 |
hbase_ProcedureCoordinator_abortProcedure | /**
* Abort the procedure with the given name
* @param procName name of the procedure to abort
* @param reason serialized information about the abort
*/
public void abortProcedure(String procName, ForeignException reason) {
LOG.debug("abort procedure " + procName, reason);
// if we know about the Procedure, notify it
Procedure proc = procedures.get(procName);
if (proc == null) {
return;
}
proc.receive(reason);
} | 3.68 |
graphhopper_GraphHopper_init | /**
* Reads the configuration from a {@link GraphHopperConfig} object which can be manually filled, or more typically
* is read from `config.yml`.
* <p>
* Important note: Calling this method overwrites the configuration done in some of the setter methods of this class,
* so generally it is advised to either use this method to configure GraphHopper or the different setter methods,
* but not both. Unfortunately, this still does not cover all cases and sometimes you have to use both, but then you
* should make sure there are no conflicts. If you need both it might also help to call the init before calling the
* setters, because this way the init method won't apply defaults to configuration options you already chose using
* the setters.
*/
public GraphHopper init(GraphHopperConfig ghConfig) {
ensureNotLoaded();
// disabling_allowed config options were removed for GH 3.0
if (ghConfig.has("routing.ch.disabling_allowed"))
throw new IllegalArgumentException("The 'routing.ch.disabling_allowed' configuration option is no longer supported");
if (ghConfig.has("routing.lm.disabling_allowed"))
throw new IllegalArgumentException("The 'routing.lm.disabling_allowed' configuration option is no longer supported");
if (ghConfig.has("osmreader.osm"))
throw new IllegalArgumentException("Instead of osmreader.osm use datareader.file, for other changes see CHANGELOG.md");
String tmpOsmFile = ghConfig.getString("datareader.file", "");
if (!isEmpty(tmpOsmFile))
osmFile = tmpOsmFile;
String graphHopperFolder = ghConfig.getString("graph.location", "");
if (isEmpty(graphHopperFolder) && isEmpty(ghLocation)) {
if (isEmpty(osmFile))
throw new IllegalArgumentException("If no graph.location is provided you need to specify an OSM file.");
graphHopperFolder = pruneFileEnd(osmFile) + "-gh";
}
ghLocation = graphHopperFolder;
countryRuleFactory = ghConfig.getBool("country_rules.enabled", false) ? new CountryRuleFactory() : null;
customAreasDirectory = ghConfig.getString("custom_areas.directory", customAreasDirectory);
defaultSegmentSize = ghConfig.getInt("graph.dataaccess.segment_size", defaultSegmentSize);
String daTypeString = ghConfig.getString("graph.dataaccess.default_type", ghConfig.getString("graph.dataaccess", "RAM_STORE"));
dataAccessDefaultType = DAType.fromString(daTypeString);
for (Map.Entry<String, Object> entry : ghConfig.asPMap().toMap().entrySet()) {
if (entry.getKey().startsWith("graph.dataaccess.type."))
dataAccessConfig.put(entry.getKey().substring("graph.dataaccess.type.".length()), entry.getValue().toString());
if (entry.getKey().startsWith("graph.dataaccess.mmap.preload."))
dataAccessConfig.put(entry.getKey().substring("graph.dataaccess.mmap.".length()), entry.getValue().toString());
}
if (ghConfig.getBool("max_speed_calculator.enabled", false))
maxSpeedCalculator = new MaxSpeedCalculator(MaxSpeedCalculator.createLegalDefaultSpeeds());
sortGraph = ghConfig.getBool("graph.do_sort", sortGraph);
removeZipped = ghConfig.getBool("graph.remove_zipped", removeZipped);
if (!ghConfig.getString("spatial_rules.location", "").isEmpty())
throw new IllegalArgumentException("spatial_rules.location has been deprecated. Please use custom_areas.directory instead and read the documentation for custom areas.");
if (!ghConfig.getString("spatial_rules.borders_directory", "").isEmpty())
throw new IllegalArgumentException("spatial_rules.borders_directory has been deprecated. Please use custom_areas.directory instead and read the documentation for custom areas.");
// todo: maybe introduce custom_areas.max_bbox if this is needed later
if (!ghConfig.getString("spatial_rules.max_bbox", "").isEmpty())
throw new IllegalArgumentException("spatial_rules.max_bbox has been deprecated. There is no replacement, all custom areas will be considered.");
String customAreasDirectory = ghConfig.getString("custom_areas.directory", "");
JsonFeatureCollection globalAreas = GraphHopper.resolveCustomAreas(customAreasDirectory);
String customModelFolder = ghConfig.getString("custom_models.directory", ghConfig.getString("custom_model_folder", ""));
setProfiles(GraphHopper.resolveCustomModelFiles(customModelFolder, ghConfig.getProfiles(), globalAreas));
if (ghConfig.has("graph.vehicles") && ghConfig.has("graph.flag_encoders"))
throw new IllegalArgumentException("Remove graph.flag_encoders as it cannot be used in parallel with graph.vehicles");
if (ghConfig.has("graph.flag_encoders"))
logger.warn("The option graph.flag_encoders is deprecated and will be removed. Replace with graph.vehicles");
vehiclesString = ghConfig.getString("graph.vehicles", ghConfig.getString("graph.flag_encoders", vehiclesString));
encodedValuesString = ghConfig.getString("graph.encoded_values", encodedValuesString);
dateRangeParserString = ghConfig.getString("datareader.date_range_parser_day", dateRangeParserString);
if (ghConfig.getString("graph.locktype", "native").equals("simple"))
lockFactory = new SimpleFSLockFactory();
else
lockFactory = new NativeFSLockFactory();
// elevation
if (ghConfig.has("graph.elevation.smoothing"))
throw new IllegalArgumentException("Use 'graph.elevation.edge_smoothing: moving_average' or the new 'graph.elevation.edge_smoothing: ramer'. See #2634.");
osmReaderConfig.setElevationSmoothing(ghConfig.getString("graph.elevation.edge_smoothing", osmReaderConfig.getElevationSmoothing()));
osmReaderConfig.setSmoothElevationAverageWindowSize(ghConfig.getDouble("graph.elevation.edge_smoothing.moving_average.window_size", osmReaderConfig.getSmoothElevationAverageWindowSize()));
osmReaderConfig.setElevationSmoothingRamerMax(ghConfig.getInt("graph.elevation.edge_smoothing.ramer.max_elevation", osmReaderConfig.getElevationSmoothingRamerMax()));
osmReaderConfig.setLongEdgeSamplingDistance(ghConfig.getDouble("graph.elevation.long_edge_sampling_distance", osmReaderConfig.getLongEdgeSamplingDistance()));
osmReaderConfig.setElevationMaxWayPointDistance(ghConfig.getDouble("graph.elevation.way_point_max_distance", osmReaderConfig.getElevationMaxWayPointDistance()));
routerConfig.setElevationWayPointMaxDistance(ghConfig.getDouble("graph.elevation.way_point_max_distance", routerConfig.getElevationWayPointMaxDistance()));
ElevationProvider elevationProvider = createElevationProvider(ghConfig);
setElevationProvider(elevationProvider);
if (osmReaderConfig.getLongEdgeSamplingDistance() < Double.MAX_VALUE && !elevationProvider.canInterpolate())
logger.warn("Long edge sampling enabled, but bilinear interpolation disabled. See #1953");
// optimizable prepare
minNetworkSize = ghConfig.getInt("prepare.min_network_size", minNetworkSize);
subnetworksThreads = ghConfig.getInt("prepare.subnetworks.threads", subnetworksThreads);
// prepare CH&LM
chPreparationHandler.init(ghConfig);
lmPreparationHandler.init(ghConfig);
// osm import
// We do a few checks for import.osm.ignored_highways to prevent configuration errors when migrating from an older
// GH version.
if (!ghConfig.has("import.osm.ignored_highways"))
throw new IllegalArgumentException("Missing 'import.osm.ignored_highways'. Not using this parameter can decrease performance, see config-example.yml for more details");
String ignoredHighwaysString = ghConfig.getString("import.osm.ignored_highways", "");
if ((ignoredHighwaysString.contains("footway") || ignoredHighwaysString.contains("path")) && ghConfig.getProfiles().stream().map(Profile::getName).anyMatch(p -> p.contains("foot") || p.contains("hike")))
throw new IllegalArgumentException("You should not use import.osm.ignored_highways=footway or =path in conjunction with pedestrian profiles. This is probably an error in your configuration.");
if ((ignoredHighwaysString.contains("cycleway") || ignoredHighwaysString.contains("path")) && ghConfig.getProfiles().stream().map(Profile::getName).anyMatch(p -> p.contains("mtb") || p.contains("bike")))
throw new IllegalArgumentException("You should not use import.osm.ignored_highways=cycleway or =path in conjunction with bicycle profiles. This is probably an error in your configuration");
osmReaderConfig.setIgnoredHighways(Arrays.stream(ghConfig.getString("import.osm.ignored_highways", String.join(",", osmReaderConfig.getIgnoredHighways()))
.split(",")).map(String::trim).collect(Collectors.toList()));
osmReaderConfig.setParseWayNames(ghConfig.getBool("datareader.instructions", osmReaderConfig.isParseWayNames()));
osmReaderConfig.setPreferredLanguage(ghConfig.getString("datareader.preferred_language", osmReaderConfig.getPreferredLanguage()));
osmReaderConfig.setMaxWayPointDistance(ghConfig.getDouble(Routing.INIT_WAY_POINT_MAX_DISTANCE, osmReaderConfig.getMaxWayPointDistance()));
osmReaderConfig.setWorkerThreads(ghConfig.getInt("datareader.worker_threads", osmReaderConfig.getWorkerThreads()));
// index
preciseIndexResolution = ghConfig.getInt("index.high_resolution", preciseIndexResolution);
maxRegionSearch = ghConfig.getInt("index.max_region_search", maxRegionSearch);
// urban density calculation
residentialAreaRadius = ghConfig.getDouble("graph.urban_density.residential_radius", residentialAreaRadius);
residentialAreaSensitivity = ghConfig.getDouble("graph.urban_density.residential_sensitivity", residentialAreaSensitivity);
cityAreaRadius = ghConfig.getDouble("graph.urban_density.city_radius", cityAreaRadius);
cityAreaSensitivity = ghConfig.getDouble("graph.urban_density.city_sensitivity", cityAreaSensitivity);
urbanDensityCalculationThreads = ghConfig.getInt("graph.urban_density.threads", urbanDensityCalculationThreads);
// routing
routerConfig.setMaxVisitedNodes(ghConfig.getInt(Routing.INIT_MAX_VISITED_NODES, routerConfig.getMaxVisitedNodes()));
routerConfig.setTimeoutMillis(ghConfig.getLong(Routing.INIT_TIMEOUT_MS, routerConfig.getTimeoutMillis()));
routerConfig.setMaxRoundTripRetries(ghConfig.getInt(RoundTrip.INIT_MAX_RETRIES, routerConfig.getMaxRoundTripRetries()));
routerConfig.setNonChMaxWaypointDistance(ghConfig.getInt(Parameters.NON_CH.MAX_NON_CH_POINT_DISTANCE, routerConfig.getNonChMaxWaypointDistance()));
routerConfig.setInstructionsEnabled(ghConfig.getBool(Routing.INIT_INSTRUCTIONS, routerConfig.isInstructionsEnabled()));
int activeLandmarkCount = ghConfig.getInt(Landmark.ACTIVE_COUNT_DEFAULT, Math.min(8, lmPreparationHandler.getLandmarks()));
if (activeLandmarkCount > lmPreparationHandler.getLandmarks())
throw new IllegalArgumentException("Default value for active landmarks " + activeLandmarkCount
+ " should be less or equal to landmark count of " + lmPreparationHandler.getLandmarks());
routerConfig.setActiveLandmarkCount(activeLandmarkCount);
return this;
} | 3.68 |
querydsl_MathExpressions_sign | /**
* Create a {@code sign(num)} expression
*
* <p>Returns the positive (+1), zero (0), or negative (-1) sign of num.</p>
*
* @param num numeric expression
* @return sign(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Integer> sign(Expression<A> num) {
return Expressions.numberOperation(Integer.class, Ops.MathOps.SIGN, num);
} | 3.68 |
AreaShop_SoldRegionEvent_getOldBuyer | /**
* Get the player that the region is sold for.
* @return The UUID of the player that the region is sold for
*/
public UUID getOldBuyer() {
return oldBuyer;
} | 3.68 |
pulsar_Producer_run | /**
* Executed from I/O thread when sending receipt back to client.
*/
@Override
public void run() {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] [{}] Persisted message. cnx {}, sequenceId {}", producer.topic,
producer.producerName, producer.producerId, producer.cnx, sequenceId);
}
// stats
rateIn.recordMultipleEvents(batchSize, msgSize);
producer.topic.recordAddLatency(System.nanoTime() - startTimeNs, TimeUnit.NANOSECONDS);
producer.cnx.getCommandSender().sendSendReceiptResponse(producer.producerId, sequenceId, highestSequenceId,
ledgerId, entryId);
producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize);
if (this.chunked) {
producer.chunkedMessageRate.recordEvent();
}
producer.publishOperationCompleted();
if (producer.brokerInterceptor != null) {
producer.brokerInterceptor.messageProduced(
(ServerCnx) producer.cnx, producer, startTimeNs, ledgerId, entryId, this);
}
recycle();
} | 3.68 |
shardingsphere-elasticjob_InstanceService_getAvailableJobInstances | /**
* Get available job instances.
*
* @return available job instances
*/
public List<JobInstance> getAvailableJobInstances() {
List<JobInstance> result = new LinkedList<>();
for (String each : jobNodeStorage.getJobNodeChildrenKeys(InstanceNode.ROOT)) {
// TODO It's better to make it atomic
String jobNodeData = jobNodeStorage.getJobNodeData(instanceNode.getInstancePath(each));
if (null == jobNodeData) {
continue;
}
JobInstance jobInstance = YamlEngine.unmarshal(jobNodeData, JobInstance.class);
if (null != jobInstance && serverService.isEnableServer(jobInstance.getServerIp())) {
result.add(jobInstance);
}
}
return result;
} | 3.68 |
flink_Pattern_followedByAny | /**
* Appends a new group pattern to the existing one. The new pattern enforces non-strict temporal
* contiguity. This means that a matching event of this pattern and the preceding matching event
* might be interleaved with other events which are ignored.
*
* @param group the pattern to append
* @return A new pattern which is appended to this one
*/
public GroupPattern<T, F> followedByAny(Pattern<T, F> group) {
return new GroupPattern<>(
this, group, ConsumingStrategy.SKIP_TILL_ANY, afterMatchSkipStrategy);
} | 3.68 |
hbase_HBaseTestingUtility_setMaxRecoveryErrorCount | /**
* Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests
* linger. Here is the exception you'll see:
*
* <pre>
* 2010-06-15 11:52:28,511 WARN [DataStreamer for file /hbase/.logs/wal.1276627923013 block
* blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
* blk_928005470262850423_1021 failed because recovery from primary datanode 127.0.0.1:53683
* failed 4 times. Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
* </pre>
*
* @param stream A DFSClient.DFSOutputStream.
*/
public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) {
try {
Class<?>[] clazzes = DFSClient.class.getDeclaredClasses();
for (Class<?> clazz : clazzes) {
String className = clazz.getSimpleName();
if (className.equals("DFSOutputStream")) {
if (clazz.isInstance(stream)) {
Field maxRecoveryErrorCountField =
stream.getClass().getDeclaredField("maxRecoveryErrorCount");
maxRecoveryErrorCountField.setAccessible(true);
maxRecoveryErrorCountField.setInt(stream, max);
break;
}
}
}
} catch (Exception e) {
LOG.info("Could not set max recovery field", e);
}
} | 3.68 |
hudi_SerDeHelper_fromJson | /**
* Convert string to internalSchema.
*
* @param json a json string.
* @return a internalSchema.
*/
public static Option<InternalSchema> fromJson(String json) {
if (json == null || json.isEmpty()) {
return Option.empty();
}
try {
return Option.of(fromJson((new ObjectMapper(new JsonFactory())).readValue(json, JsonNode.class)));
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.68 |
flink_HiveTableUtil_initiateTableFromProperties | /**
* Extract DDL semantics from properties and use it to initiate the table. The related
* properties will be removed from the map after they're used.
*/
private static void initiateTableFromProperties(
Table hiveTable, Map<String, String> properties, HiveConf hiveConf) {
extractExternal(hiveTable, properties);
extractRowFormat(hiveTable.getSd(), properties);
extractStoredAs(hiveTable.getSd(), properties, hiveConf);
extractLocation(hiveTable.getSd(), properties);
} | 3.68 |
flink_ShutdownHookUtil_addShutdownHook | /** Adds a shutdown hook to the JVM and returns the Thread, which has been registered. */
public static Thread addShutdownHook(
final AutoCloseable service, final String serviceName, final Logger logger) {
checkNotNull(service);
checkNotNull(logger);
final Thread shutdownHook =
new Thread(
() -> {
try {
service.close();
} catch (Throwable t) {
logger.error(
"Error during shutdown of {} via JVM shutdown hook.",
serviceName,
t);
}
},
serviceName + " shutdown hook");
return addShutdownHookThread(shutdownHook, serviceName, logger) ? shutdownHook : null;
} | 3.68 |
flink_PushCalcPastChangelogNormalizeRule_buildFieldsMapping | /** Build field reference mapping from old field index to new field index after projection. */
private Map<Integer, Integer> buildFieldsMapping(int[] projectedInputRefs) {
final Map<Integer, Integer> fieldsOldToNewIndexMapping = new HashMap<>();
for (int i = 0; i < projectedInputRefs.length; i++) {
fieldsOldToNewIndexMapping.put(projectedInputRefs[i], i);
}
return fieldsOldToNewIndexMapping;
} | 3.68 |
flink_HiveParserTypeCheckCtx_setError | /** @param error the error to set */
public void setError(String error, HiveParserASTNode errorSrcNode) {
if (LOG.isDebugEnabled()) {
// Logger the callstack from which the error has been set.
LOG.debug(
"Setting error: ["
+ error
+ "] from "
+ ((errorSrcNode == null) ? "null" : errorSrcNode.toStringTree()),
new Exception());
}
this.error = error;
this.errorSrcNode = errorSrcNode;
} | 3.68 |
morf_SchemaValidator_validate | /**
* Validate a {@link View} meets the rules
*
* @param view The {@link View} to validate
*/
public void validate(View view) {
validateView(view);
checkForValidationErrors();
} | 3.68 |
hbase_PrivateCellUtil_qualifierStartsWith | /**
* Finds if the start of the qualifier part of the Cell matches <code>buf</code>
* @param left the cell with which we need to match the qualifier
* @param startsWith the serialized keyvalue format byte[]
* @return true if the qualifier have same staring characters, false otherwise
*/
public static boolean qualifierStartsWith(final Cell left, final byte[] startsWith) {
if (startsWith == null || startsWith.length == 0) {
throw new IllegalArgumentException("Cannot pass an empty startsWith");
}
if (left.getQualifierLength() < startsWith.length) {
return false;
}
if (left instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getQualifierByteBuffer(),
((ByteBufferExtendedCell) left).getQualifierPosition(), startsWith.length, startsWith, 0,
startsWith.length);
}
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(), startsWith.length,
startsWith, 0, startsWith.length);
} | 3.68 |
hadoop_Tristate_getMapping | /**
* Get the boolean mapping, if present.
* @return the boolean value, if present.
*/
public Optional<Boolean> getMapping() {
return mapping;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.