name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
druid_CalciteMySqlNodeVisitor_convertToSingleValuesIfNeed_rdh | /**
* If there are multiple VALUES, and all values in VALUES CLAUSE are literal,
* convert the value clauses to a single value clause.
*
* @param valuesClauseList
* @return */
public static List<SQLInsertStatement.ValuesClause> convertToSingleValuesIfNeed(List<SQLInsertStatement.ValuesClause> valuesClauseList)
{
if (valuesClauseList.size() <= 1) {
return valuesClauseList;
}
// If they are all literals
for (SQLInsertStatement.ValuesClause clause : valuesClauseList) {
for (SQLExpr expr : clause.getValues()) {
if (expr instanceof SQLVariantRefExpr) {
if (((SQLVariantRefExpr) (expr)).getName().equals("?")) {
continue;
}
}
return valuesClauseList;
}
}
// Return only the first values clause.
return Arrays.asList(valuesClauseList.get(0));
} | 3.26 |
druid_DruidStatManagerFacade_mergWallStat_rdh | /**
*
* @return * @deprecated */
public static Map mergWallStat(Map mapA, Map mapB) {
return mergeWallStat(mapA, mapB);
}
@SuppressWarnings({ "rawtypes", "unchecked" } | 3.26 |
druid_DruidDataSourceWrapper_setMaxEvictableIdleTimeMillis_rdh | /**
* Ignore the 'maxEvictableIdleTimeMillis < minEvictableIdleTimeMillis' validate,
* it will be validated again in {@link DruidDataSource#init()}.
* <p>
* for fix issue #3084, #2763
*
* @since 1.1.14
*/
@Override
public void setMaxEvictableIdleTimeMillis(long maxEvictableIdleTimeMillis) {
try {
super.setMaxEvictableIdleTimeMillis(maxEvictableIdleTimeMillis);
} catch (IllegalArgumentException ignore) {super.maxEvictableIdleTimeMillis = maxEvictableIdleTimeMillis;
}
} | 3.26 |
druid_AntsparkOutputVisitor_visit_rdh | // add using statment
@Override
public boolean visit(AntsparkCreateTableStatement x) {
print0(ucase ? "CREATE " : "create ");
if (x.isExternal()) {print0(ucase ? "EXTERNAL " : "external ");}
if (x.isIfNotExists()) {
print0(ucase ? "TABLE IF NOT EXISTS " : "table if not exists ");
} else {
print0(ucase ? "TABLE " : "table ");
}x.getName().accept(this);
if (x.getLike() != null) {
print0(ucase ? " LIKE " : " like ");
x.getLike().accept(this);
}
final List<SQLTableElement> tableElementList = x.getTableElementList();
int size = tableElementList.size();
if (size > 0) {
print0(" (");
if (this.isPrettyFormat() && x.hasBodyBeforeComment()) {
print(' ');
printlnComment(x.getBodyBeforeCommentsDirect());
}
this.indentCount++;
println();
for (int i =
0; i < size; ++i) {
SQLTableElement element = tableElementList.get(i);
element.accept(this);
if
(i != (size - 1)) {
print(',');
}
if (this.isPrettyFormat() && element.hasAfterComment()) {
print(' ');
printlnComment(element.getAfterCommentsDirect());
}
if (i != (size - 1)) {
println();
}
}
this.indentCount--;
println();
print(')');
}
if (x.getDatasource() != null) {
println();
print0(ucase ? "USING " : "using ");
print0(x.getDatasource().toString());
}
if (x.getComment() != null) {
println();
print0(ucase ? "COMMENT " : "comment ");
x.getComment().accept(this);
}
int partitionSize = x.getPartitionColumns().size();
if (partitionSize > 0) {
println();
print0(ucase ?
"PARTITIONED BY (" : "partitioned by (");
this.indentCount++;
println();
for (int i = 0; i < partitionSize; ++i) {
SQLColumnDefinition column = x.getPartitionColumns().get(i);
column.accept(this);
if (i != (partitionSize - 1)) {
print(','); }
if (this.isPrettyFormat() && column.hasAfterComment()) {
print(' ');
printlnComment(column.getAfterCommentsDirect());
}
if (i != (partitionSize - 1)) {
println();
}
}
this.indentCount--;
println();print(')'); }List<SQLSelectOrderByItem> clusteredBy = x.getClusteredBy();
if (clusteredBy.size() > 0) {
println();
print0(ucase ? "CLUSTERED BY (" : "clustered by (");
printAndAccept(clusteredBy, ",");
print(')');
}
List<SQLSelectOrderByItem> sortedBy = x.getSortedBy();
if (sortedBy.size() > 0) {
println();
print0(ucase ? "SORTED BY (" : "sorted by (");
printAndAccept(sortedBy, ", ");
print(')');
}
int buckets = x.getBuckets();
if (buckets > 0) {
println();
print0(ucase ? "INTO " : "into ");
print(buckets);
print0(ucase ? " BUCKETS" : " buckets");
}
SQLExpr storedAs = x.getStoredAs();
if (storedAs != null) {
println();
print0(ucase ? "STORED AS " : "stored as ");
storedAs.accept(this);
}
SQLSelect select = x.getSelect();
if (select != null) {
println();
print0(ucase ? "AS" : "as");
println();
select.accept(this);
}
Map<String, SQLObject> serdeProperties = x.getSerdeProperties();
if (serdeProperties.size() > 0)
{
println();
print0(ucase ? "TBLPROPERTIES (" : "tblproperties (");
String seperator = "";
for (Entry<String, SQLObject> entry : serdeProperties.entrySet()) {
print0(("'" + entry.getKey()) + "'='");
entry.getValue().accept(this);
print0("'" + seperator);
seperator = ",";
}
print(')');
}
SQLExpr location = x.getLocation();
if (location != null) {
println();
print0(ucase ? "LOCATION " : "location ");
location.accept(this);
}
return false;
} | 3.26 |
druid_TableStat_getDataType_rdh | /**
*
* @since 1.0.20
*/
public String getDataType() {
return dataType;
} | 3.26 |
druid_TableStat_setDataType_rdh | /**
*
* @since 1.0.20
*/
public void setDataType(String dataType) {
this.dataType = dataType;
} | 3.26 |
druid_DataSourceSelectorEnum_newInstance_rdh | /**
* Create a new instance of the DataSourceSelector represented by this enum.
*
* @return null if dataSource is not given or exception occurred while creating new instance
*/
public DataSourceSelector newInstance(HighAvailableDataSource dataSource) {
if (dataSource
== null) {
LOG.warn("You should provide an instance of HighAvailableDataSource!");
return
null;
}
DataSourceSelector selector = null;
try {
selector = clazz.getDeclaredConstructor(HighAvailableDataSource.class).newInstance(dataSource);
} catch (Exception e) {
LOG.error("Can not create new instance of " + clazz.getName(), e); }
return selector;
} | 3.26 |
druid_PagerUtils_getLimit_rdh | /**
*
* @param sql
* @param dbType
* @return if not exists limit, return -1;
*/
public static int getLimit(String sql, DbType dbType) {
List<SQLStatement> stmtList = SQLUtils.parseStatements(sql, dbType);
if (stmtList.size() != 1) {
return -1;
}
SQLStatement
stmt = stmtList.get(0);
if (stmt instanceof SQLSelectStatement)
{
SQLSelectStatement selectStmt = ((SQLSelectStatement) (stmt));
SQLSelectQuery query = selectStmt.getSelect().getQuery();
if (query instanceof SQLSelectQueryBlock) {
if (query instanceof MySqlSelectQueryBlock) {
SQLLimit limit = ((MySqlSelectQueryBlock) (query)).getLimit();
if (limit == null) {
return -1;
}
SQLExpr rowCountExpr = limit.getRowCount();
if (rowCountExpr instanceof SQLNumericLiteralExpr) {
int rowCount = ((SQLNumericLiteralExpr)
(rowCountExpr)).getNumber().intValue();
return rowCount;
}
return Integer.MAX_VALUE;
}
if (query instanceof OdpsSelectQueryBlock) {
SQLLimit limit = ((OdpsSelectQueryBlock) (query)).getLimit();
SQLExpr rowCountExpr = (limit != null) ? limit.getRowCount() : null;
if (rowCountExpr instanceof SQLNumericLiteralExpr) {
int v82 = ((SQLNumericLiteralExpr) (rowCountExpr)).getNumber().intValue();
return v82;
}
return Integer.MAX_VALUE;
}
return -1;
}
}
return -1;
} | 3.26 |
druid_DruidPooledConnection_getPhysicalConnectNanoSpan_rdh | /**
*
* @since 1.0.17
*/
public long getPhysicalConnectNanoSpan() {
return this.holder.getCreateNanoSpan();
} | 3.26 |
druid_DruidPooledConnection_getPhysicalConnectionUsedCount_rdh | /**
*
* @since 1.0.17
*/
public long
getPhysicalConnectionUsedCount() {
return this.holder.getUseCount();} | 3.26 |
druid_DruidPooledConnection_getGloablVariables_rdh | /**
*
* @since 1.0.28
*/
public Map<String, Object> getGloablVariables() {
return this.holder.globalVariables;
} | 3.26 |
druid_DruidPooledConnection_getConnectNotEmptyWaitNanos_rdh | /**
*
* @since 1.0.17
*/
public long getConnectNotEmptyWaitNanos() {
return this.holder.getLastNotEmptyWaitNanos();
} | 3.26 |
druid_DruidPooledConnection_prepareCall_rdh | // ////////////////////
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
checkState();
PreparedStatementHolder stmtHolder = null;
PreparedStatementKey key = new PreparedStatementKey(sql, getCatalog(), MethodType.Precall_1);boolean poolPreparedStatements = holder.isPoolPreparedStatements();
if (poolPreparedStatements) {
stmtHolder = holder.getStatementPool().get(key);
}
if (stmtHolder == null) {
try {
stmtHolder = new PreparedStatementHolder(key, conn.prepareCall(sql));
holder.getDataSource().incrementPreparedStatementCount();
} catch (SQLException ex) {
handleException(ex, sql);
}
}
initStatement(stmtHolder);
DruidPooledCallableStatement rtnVal = new DruidPooledCallableStatement(this, stmtHolder);
holder.addTrace(rtnVal);
return rtnVal;
} | 3.26 |
druid_DruidPooledConnection_getVariables_rdh | /**
*
* @since 1.0.28
*/
public Map<String, Object> getVariables() {
return this.holder.variables;
} | 3.26 |
druid_DruidAbstractDataSource_setConnectTimeout_rdh | /**
*
* @since 1.2.12
*/
public void setConnectTimeout(int milliSeconds) {
this.connectTimeout = milliSeconds;
this.connectTimeoutStr = null;
} | 3.26 |
druid_DruidAbstractDataSource_isInitExceptionThrow_rdh | /**
*
* @since 1.1.11
*/
public boolean isInitExceptionThrow() {
return initExceptionThrow;
} | 3.26 |
druid_DruidAbstractDataSource_getSocketTimeout_rdh | /**
*
* @since 1.2.12
*/
public int getSocketTimeout() {
return socketTimeout;
} | 3.26 |
druid_DruidAbstractDataSource_getConnectTimeout_rdh | /**
*
* @since 1.2.12
*/public int getConnectTimeout() {
return connectTimeout;
} | 3.26 |
druid_DruidAbstractDataSource_setQueryTimeout_rdh | /**
* Sets the number of seconds the driver will wait for a <code>Statement</code> object to execute to the given
* number of seconds. If the limit is exceeded, an <code>SQLException</code> is thrown. A JDBC driver must apply
* this limit to the <code>execute</code>, <code>executeQuery</code> and <code>executeUpdate</code> methods. JDBC
* driver implementations may also apply this limit to <code>ResultSet</code> methods (consult your driver vendor
* documentation for details).
*
* @param seconds
* the new query timeout limit in seconds; zero means there is no limit
* @see #getQueryTimeout
*/
public void setQueryTimeout(int seconds) {
this.queryTimeout = seconds;} | 3.26 |
druid_DruidAbstractDataSource_setNumTestsPerEvictionRun_rdh | /**
*
* @param numTestsPerEvictionRun
*/
@Deprecated
public void setNumTestsPerEvictionRun(int numTestsPerEvictionRun) {
this.numTestsPerEvictionRun = numTestsPerEvictionRun;
} | 3.26 |
druid_DruidAbstractDataSource_testConnectionInternal_rdh | /**
*
* @deprecated */
protected boolean testConnectionInternal(Connection conn) {
return testConnectionInternal(null, conn);
} | 3.26 |
druid_DruidAbstractDataSource_setExceptionSorterClassName_rdh | // 兼容JBOSS
public void
setExceptionSorterClassName(String exceptionSorter) throws Exception {
this.setExceptionSorter(exceptionSorter);
} | 3.26 |
druid_DruidAbstractDataSource_setInitExceptionThrow_rdh | /**
*
* @since 1.1.11
*/
public void setInitExceptionThrow(boolean initExceptionThrow) {
this.initExceptionThrow = initExceptionThrow;
} | 3.26 |
druid_DruidAbstractDataSource_setSocketTimeout_rdh | /**
*
* @since 1.2.12
*/
public void setSocketTimeout(int milliSeconds) {
this.socketTimeout = milliSeconds;
this.socketTimeoutSr =
null;
} | 3.26 |
druid_SQLCreateTableStatement_apply_rdh | // SQLAlterTableRenameColumn
private boolean apply(SQLAlterTableRenameColumn item) {
int columnIndex = columnIndexOf(item.getColumn());
if (columnIndex == (-1)) {
return false;
}
SQLColumnDefinition column = ((SQLColumnDefinition) (tableElementList.get(columnIndex)));
column.setName(item.getTo().clone());
return true;
} | 3.26 |
druid_SQLCreateTableStatement_isUNI_rdh | /**
* only for show columns
*/
public boolean isUNI(String columnName) {
for (SQLTableElement element : this.tableElementList) {
if (element instanceof MySqlUnique) {
MySqlUnique unique = ((MySqlUnique) (element));
if (unique.getColumns().isEmpty()) {continue;
}
SQLExpr column = unique.getColumns().get(0).getExpr();
if ((column instanceof SQLIdentifierExpr) && SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) (column)).getName())) {
return unique.getColumns().size() == 1;
} else if ((column instanceof SQLMethodInvokeExpr) &&
SQLUtils.nameEquals(((SQLMethodInvokeExpr) (column)).getMethodName(), columnName)) {
return true;}
}
}
return false;
} | 3.26 |
druid_SQLCreateTableStatement_isExternal_rdh | // for odps & hive
public boolean isExternal() {
return external;
} | 3.26 |
druid_SQLCreateTableStatement_isMUL_rdh | /**
* only for show columns
*/
public boolean
isMUL(String columnName) {
for (SQLTableElement element : this.tableElementList) {
if (element instanceof MySqlUnique) {
MySqlUnique unique = ((MySqlUnique) (element));
SQLExpr v34 = unique.getColumns().get(0).getExpr();
if ((v34 instanceof SQLIdentifierExpr) && SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) (v34)).getName())) {
return unique.getColumns().size() > 1;
} else if ((v34 instanceof SQLMethodInvokeExpr) && SQLUtils.nameEquals(((SQLMethodInvokeExpr) (v34)).getMethodName(), columnName)) {
return true;
}
} else if (element instanceof MySqlKey) {
MySqlKey unique = ((MySqlKey) (element));
SQLExpr column = unique.getColumns().get(0).getExpr();
if
((column instanceof SQLIdentifierExpr) && SQLUtils.nameEquals(columnName, ((SQLIdentifierExpr) (column)).getName())) {
return true;
} else if ((column instanceof SQLMethodInvokeExpr) && SQLUtils.nameEquals(((SQLMethodInvokeExpr) (column)).getMethodName(), columnName)) {
return true;
}
}
}
return false;} | 3.26 |
druid_HighAvailableDataSource_getPoolPurgeIntervalSeconds_rdh | // Getters & Setters
public int getPoolPurgeIntervalSeconds() {
return poolPurgeIntervalSeconds;
} | 3.26 |
druid_ZookeeperNodeListener_init_rdh | /**
* Init a PathChildrenCache to watch the given path.
*/
@Override
public void init() {
checkParameters();
super.init();
if (client == null) {
client = CuratorFrameworkFactory.builder().canBeReadOnly(true).connectionTimeoutMs(5000).connectString(zkConnectString).retryPolicy(new RetryForever(10000)).sessionTimeoutMs(30000).build();
client.start();
privateZkClient = true;
}
cache = new PathChildrenCache(client, path, true);
cache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
try
{
LOG.info("Receive an event: " + event.getType());
lock.lock();
PathChildrenCacheEvent.Type eventType = event.getType();
switch (eventType) {
case CHILD_REMOVED :
updateSingleNode(event, NodeEventTypeEnum.DELETE);
break;
case CHILD_ADDED :
updateSingleNode(event, NodeEventTypeEnum.ADD);
break;
case CONNECTION_RECONNECTED :
refreshAllNodes();
break;
default :
// CHILD_UPDATED
// INITIALIZED
// CONNECTION_LOST
// CONNECTION_SUSPENDED
LOG.info("Received a PathChildrenCacheEvent, IGNORE it: " + event);
}
} finally {
lock.unlock();
LOG.info("Finish the processing of event: " + event.getType());
}}
});
try {
// Use BUILD_INITIAL_CACHE to force build cache in the current Thread.
// We don't use POST_INITIALIZED_EVENT, so there's no INITIALIZED event.
cache.start(StartMode.BUILD_INITIAL_CACHE);
} catch (Exception e) {
LOG.error("Can't start PathChildrenCache", e);
}
} | 3.26 |
druid_ZookeeperNodeListener_refresh_rdh | /**
* Build Properties from PathChildrenCache.
* Should be called after init().
*
* @see #getPropertiesFromCache()
*/
@Override
public List<NodeEvent> refresh() {
lock.lock();try {
Properties properties = m0();
List<NodeEvent> events = NodeEvent.getEventsByDiffProperties(getProperties(), properties);
if ((events != null) &&
(!events.isEmpty())) {
setProperties(properties);
}
return events;
} finally {
lock.unlock();
}
} | 3.26 |
druid_AntsparkCreateTableStatement_getDatasource_rdh | /**
* Getter method for property datasource.
*
* @return property value of datasource
*/
public SQLExprTableSource getDatasource() {return f0;
} | 3.26 |
druid_IPRange_getIPAddress_rdh | // -------------------------------------------------------------------------
/**
* Return the encapsulated IP address.
*
* @return The IP address.
*/
public final IPAddress getIPAddress()
{return ipAddress;
} | 3.26 |
druid_IPRange_parseRange_rdh | // -------------------------------------------------------------------------
/**
* Parse the IP range string representation.
*
* @param range
* String representation of the IP range.
* @throws IllegalArgumentException
* Throws this exception if the specified range is not a valid IP network range.
*/
final void parseRange(String range) {
if (range == null)
{
throw new IllegalArgumentException("Invalid IP range");
}
int index = range.indexOf('/');
String subnetStr = null;
if (index == (-1)) {
ipAddress = new IPAddress(range);
} else {
ipAddress = new IPAddress(range.substring(0, index));
subnetStr = range.substring(index + 1);
}
// try to convert the remaining part of the range into a decimal
// value.
try {
if (subnetStr != null) {
extendedNetworkPrefix = Integer.parseInt(subnetStr);
if ((extendedNetworkPrefix < 0) || (extendedNetworkPrefix > 32)) {
throw new IllegalArgumentException(("Invalid IP range [" + range) + "]");
} ipSubnetMask = computeMaskFromNetworkPrefix(extendedNetworkPrefix);
}
} catch (NumberFormatException ex) {
// the remaining part is not a valid decimal value.
// Check if it's a decimal-dotted notation.
ipSubnetMask = new IPAddress(subnetStr);
// create the corresponding subnet decimal
extendedNetworkPrefix = computeNetworkPrefixFromMask(ipSubnetMask);
if (extendedNetworkPrefix == (-1)) {
throw new IllegalArgumentException(("Invalid IP range [" + range) + "]", ex);
}
}} | 3.26 |
druid_IPRange_computeMaskFromNetworkPrefix_rdh | // -------------------------------------------------------------------------
/**
* Convert a extended network prefix integer into an IP number.
*
* @param prefix
* The network prefix number.
* @return Return the IP number corresponding to the extended network prefix.
*/
private IPAddress computeMaskFromNetworkPrefix(int prefix) {
/* int subnet = 0; for (int i=0; i<prefix; i++) { subnet = subnet << 1; subnet += 1; } */
StringBuilder str = new StringBuilder();
for (int i = 0; i < 32; i++) {
if (i < prefix) {
str.append("1");
} else {
str.append("0");
}
}
String decimalString = toDecimalString(str.toString());
return new IPAddress(decimalString);
} | 3.26 |
druid_DruidDataSourceBuilder_build_rdh | /**
* For issue #1796, use Spring Environment by specify configuration properties prefix to build DruidDataSource.
* <p>
* 这是为了兼容 Spring Boot 1.X 中 .properties 内配置属性不能按照配置声明顺序进行绑定,进而导致配置出错(issue #1796 )而提供的方法。
* 如果你不存在上述问题或者使用 .yml 进行配置则不必使用该方法,使用上面的{@link DruidDataSourceBuilder#build}即可,Spring Boot 2.0 修复了该问题,该方法届时也会停用。
* <p>
* fixed, the method will be removed in future versions.
*
* @see DruidDataSourceWrapper#setMaxEvictableIdleTimeMillis(long)
*/
@Deprecated
public DruidDataSource build(Environment env, String prefix) {
DruidDataSource druidDataSource = new DruidDataSourceWrapper();
druidDataSource.setMinEvictableIdleTimeMillis(env.getProperty(prefix + "min-evictable-idle-time-millis", Long.class, DruidDataSource.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS));
druidDataSource.setMaxEvictableIdleTimeMillis(env.getProperty(prefix + "max-evictable-idle-time-millis", Long.class, DruidDataSource.DEFAULT_MAX_EVICTABLE_IDLE_TIME_MILLIS));
return druidDataSource;
} | 3.26 |
druid_PropertiesUtils_loadProperties_rdh | /**
* Load properties from the given file into Properties.
*/
public static Properties loadProperties(String file) {
Properties properties = new Properties();
if (file == null) {
return properties;
}
InputStream is = null;
try {
LOG.debug(("Trying to load " + file) + " from FileSystem.");
is = new FileInputStream(file);
} catch (FileNotFoundException e) {
LOG.debug(("Trying to load " + file) + " from Classpath.");
try {
is = PropertiesUtils.class.getResourceAsStream(file);
} catch (Exception ex) {
LOG.warn("Can not load resource " + file, ex);
}
} if (is != null) {
try {
properties.load(is);
} catch (Exception e)
{
LOG.error("Exception occurred while loading " + file, e);
} finally {
try {
is.close();
} catch (Exception e) {
LOG.debug("Can not close Inputstream.", e);
}
}
} else {
LOG.warn(("File " + file) + " can't be loaded!");
}
return
properties;
} | 3.26 |
druid_RandomDataSourceValidateThread_logSuccessTime_rdh | /**
* Provide a static method to record the last success time of a DataSource
*/
public static void logSuccessTime(DataSourceProxy dataSource) {
if ((dataSource != null) && (!StringUtils.isEmpty(dataSource.getName()))) {
String name = dataSource.getName();
long time = System.currentTimeMillis();
LOG.debug((("Log successTime [" + time) + "] for ") + name);
successTimes.put(name, time);
}
} | 3.26 |
druid_Resources_classForName_rdh | /**
* Loads a class
*
* @param className
* - the class to load
* @return The loaded class
* @throws ClassNotFoundException
* If the class cannot be found (duh!)
*/
public static Class<?> classForName(String className) throws ClassNotFoundException {
Class<?> clazz = null;
try {clazz = getClassLoader().loadClass(className);
} catch (Exception
e) {
// Ignore. Failsafe below.
}
if (clazz == null) {
clazz = Class.forName(className);
}
return clazz;
} | 3.26 |
druid_Resources_getDefaultClassLoader_rdh | /**
* Returns the default classloader (may be null).
*
* @return The default classloader
*/
public static ClassLoader getDefaultClassLoader() {
return defaultClassLoader;} | 3.26 |
druid_SQLColumnDefinition_setIdentity_rdh | // for sqlserver
public void setIdentity(Identity identity) {
if (identity != null) {
identity.setParent(this);
}
this.identity = identity;
} | 3.26 |
druid_ListDG_print_rdh | /* 打印矩阵队列图 */
public void print() {
System.out.printf("== List Graph:\n");
for (int v25 = 0; v25 < mVexs.size(); v25++) {
System.out.printf("%d(%c): ", v25, mVexs.get(v25).data);ENode node = mVexs.get(v25).firstEdge;
while (node != null) {
System.out.printf("%d(%c) ", node.ivex, mVexs.get(node.ivex).data);
node = node.nextEdge;}
}
} | 3.26 |
druid_ListDG_BFS_rdh | /* 广度优先搜索(类似于树的层次遍历) */
public void BFS() {
int head = 0;
int rear = 0;
int[] queue = new int[mVexs.size()];
// 辅组队列
boolean[] visited = new boolean[mVexs.size()];// 顶点访问标记
for (int i = 0; i < mVexs.size(); i++) {
visited[i] = false;
}
for (int i = 0; i < mVexs.size(); i++) {
if (!visited[i]) {
visited[i] = true;
System.out.printf("%c ", mVexs.get(i).data);
queue[rear++] = i;// 入队列
}
while (head != rear) {
int j = queue[head++];// 出队列
ENode node = mVexs.get(j).firstEdge;
while (node != null) {
int k = node.ivex;if (!visited[k]) {
visited[k] = true;
System.out.printf("%c ", mVexs.get(k).data);
queue[rear++]
= k;
}
node = node.nextEdge;
}
}
}
} | 3.26 |
druid_ListDG_DFS_rdh | /* 深度优先搜索遍历图 */
public void DFS() {
boolean[] v13 = new boolean[mVexs.size()];// 顶点访问标记
// 初始化所有顶点都没有被访问
for (int i = 0; i < mVexs.size(); i++) {
v13[i] = false;
}
for (int i = 0; i < mVexs.size(); i++) {
if (!v13[i]) {
DFS(i, v13);
}
}
} | 3.26 |
druid_ListDG_getPosition_rdh | /* 返回ch位置 */
private int getPosition(Object ch) {
for (int i = 0; i < mVexs.size(); i++) {
if (mVexs.get(i).data == ch) {
return i;
}
}
return -1;
} | 3.26 |
druid_ListDG_linkLast_rdh | /* 将node节点链接到list的最后 */
private void linkLast(ENode list, ENode node) {
ENode p = list;
while (p.nextEdge != null) {
p = p.nextEdge;
}
p.nextEdge = node;
} | 3.26 |
druid_Base64_base64toInt_rdh | /**
* Translates the specified character, which is assumed to be in the "Base 64 Alphabet" into its equivalent 6-bit
* positive integer.
*
* @throw IllegalArgumentException or ArrayOutOfBoundsException if c is not in the Base64 Alphabet.
*/
private static int base64toInt(char c, byte[] alphaToInt) {
int result = alphaToInt[c];
if (result < 0) {
throw new IllegalArgumentException("Illegal character "
+ c);}
return result;
} | 3.26 |
druid_Base64_byteArrayToBase64_rdh | /**
* Translates the specified byte array into a Base64 string as per Preferences.put(byte[]).
*/
public static String byteArrayToBase64(byte[] a) {
return byteArrayToBase64(a, false);} | 3.26 |
druid_Base64_byteArrayToAltBase64_rdh | /**
* Translates the specified byte array into an "alternate representation" Base64 string. This non-standard variant
* uses an alphabet that does not contain the uppercase alphabetic characters, which makes it suitable for use in
* situations where case-folding occurs.
*/
public static String byteArrayToAltBase64(byte[] a) {return byteArrayToBase64(a, true);
} | 3.26 |
druid_Utils_murmurhash2_64_rdh | /**
* murmur hash 2.0, The murmur hash is a relatively fast hash function from http://murmurhash.googlepages.com/ for
* platforms with efficient multiplication.
*
* @author Viliam Holub
*/public static long murmurhash2_64(final byte[] data, int length, int seed) {
final long m = 0xc6a4a7935bd1e995L;
final int r = 47;
long h
= (seed & 0xffffffffL) ^ (length * m);
int length8 = length / 8;
for (int i = 0; i < length8; i++) {
final int i8 = i * 8;
long k = (((((((((long) (data[i8 + 0])) & 0xff)//
+ ((((long) (data[i8 + 1]))
& 0xff) << 8))//
+ ((((long) (data[i8 + 2])) & 0xff) << 16))//
+ ((((long) (data[i8 + 3])) & 0xff) << 24))//
+ ((((long) (data[i8 +
4])) & 0xff) << 32))//
+ ((((long) (data[i8 + 5])) & 0xff) << 40))//
+ ((((long) (data[i8 + 6])) & 0xff) << 48))//
+ ((((long)
(data[i8 + 7])) & 0xff) << 56);
k *= m;k ^= k >>> r;
k *= m;
h ^= k;
h *= m;
}
switch (length % 8) {
case 7 :
h ^= ((long) (data[(length & (~7)) + 6] & 0xff)) << 48;
case 6 : h ^=
((long) (data[(length & (~7)) + 5] & 0xff)) << 40;
case 5 :
h ^= ((long) (data[(length & (~7)) + 4] & 0xff)) << 32;
case 4 :
h ^= ((long) (data[(length & (~7)) + 3] & 0xff)) << 24;
case 3 :
h ^= ((long) (data[(length & (~7)) + 2] & 0xff)) << 16;
case 2 :
h ^= ((long) (data[(length & (~7)) + 1] & 0xff)) << 8;
case 1 :
h ^= ((long) (data[length & (~7)] & 0xff));
h *= m;
}
h ^= h >>> r;
h *= m;
h ^= h >>> r;
return h;
} | 3.26 |
druid_SpringIbatisBeanTypeAutoProxyCreator_getAdvicesAndAdvisorsForBean_rdh | /**
* Identify as bean to proxy if the bean name is in the configured list of names.
*/
@SuppressWarnings("rawtypes") protected Object[] getAdvicesAndAdvisorsForBean(Class beanClass, String beanName, TargetSource targetSource) {
for (String mappedName : this.beanNames)
{
if (FactoryBean.class.isAssignableFrom(beanClass)) {
if (!mappedName.startsWith(BeanFactory.FACTORY_BEAN_PREFIX)) {
continue;
}
mappedName = mappedName.substring(BeanFactory.FACTORY_BEAN_PREFIX.length());
}if (isMatch(beanName, mappedName)) {
return PROXY_WITHOUT_ADDITIONAL_INTERCEPTORS;
}
}
return DO_NOT_PROXY;
} | 3.26 |
druid_SpringIbatisBeanTypeAutoProxyCreator_isMatch_rdh | /**
* Return if the given bean name matches the mapped name.
* <p>
* The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches, as well as direct equality. Can be
* overridden in subclasses.
*
* @param beanName
* the bean name to check
* @param mappedName
* the name in the configured list of names
* @return if the names match
* @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
*/
protected boolean isMatch(String beanName, String mappedName) {
return PatternMatchUtils.simpleMatch(mappedName, beanName); } | 3.26 |
druid_SQLStatementParser_parseStatement_rdh | /**
*
* @param tryBest
* - 为true去解析并忽略之后的错误
* 强制建议除非明确知道可以忽略才传tryBest=true,
* 不然会忽略语法错误,且截断sql,导致update和delete无where条件下执行!!!
*/
public SQLStatement parseStatement(final boolean tryBest) {
List<SQLStatement> list = new ArrayList<SQLStatement>();
this.parseStatementList(list, 1, null);
if (tryBest) {
if (lexer.token != Token.Token.EOF) {
throw new ParserException("sql syntax error, no terminated. " + lexer.info());
}
}
return list.get(0);
} | 3.26 |
druid_EncodingConvertFilter_preparedStatement_setString_rdh | // ========== preparedStatement
@Override
public void preparedStatement_setString(FilterChain chain, PreparedStatementProxy statement, int parameterIndex, String x) throws SQLException {
super.preparedStatement_setString(chain, statement, parameterIndex, encode(statement.getConnectionProxy(), x));
} | 3.26 |
druid_EncodingConvertFilter_connection_prepareCall_rdh | // / precall
@Override
public CallableStatementProxy connection_prepareCall(FilterChain chain, ConnectionProxy connection, String sql) throws SQLException {
return super.connection_prepareCall(chain, connection, encode(connection, sql));
} | 3.26 |
druid_EncodingConvertFilter_m2_rdh | // ///////////// callableStatement_
@Override
public void m2(FilterChain chain, CallableStatementProxy statement, String parameterName,
Reader reader) throws SQLException {
String text = Utils.read(reader);Reader encodeReader = new StringReader(encode(statement.getConnectionProxy(), text));
super.callableStatement_setCharacterStream(chain, statement, parameterName, encodeReader);
} | 3.26 |
druid_EncodingConvertFilter_m0_rdh | // nativeSQL
@Override
public String m0(FilterChain chain, ConnectionProxy connection, String sql) throws SQLException {
String encodedSql = encode(connection, sql);
return super.connection_nativeSQL(chain, connection, encodedSql);
} | 3.26 |
druid_EncodingConvertFilter_connection_prepareStatement_rdh | // //////////////// Connection
@Override
public PreparedStatementProxy connection_prepareStatement(FilterChain chain, ConnectionProxy connection, String sql) throws SQLException {
return super.connection_prepareStatement(chain, connection, encode(connection, sql));
} | 3.26 |
druid_EncodingConvertFilter_decodeObject_rdh | // ///////////
public Object decodeObject(ConnectionProxy connection, Object object) throws SQLException {
if (object instanceof String) {
return decode(connection, ((String) (object)));
}
if (object instanceof Reader) {Reader reader = ((Reader) (object));
String text = Utils.read(reader);
return new StringReader(decode(connection, text));}
return object;
} | 3.26 |
druid_EncodingConvertFilter_statement_addBatch_rdh | // ////////////// statement
@Override
public void statement_addBatch(FilterChain chain, StatementProxy statement, String sql) throws SQLException {
super.statement_addBatch(chain, statement, encode(statement.getConnectionProxy(), sql));
} | 3.26 |
druid_EncodingConvertFilter_clob_position_rdh | // //////////
@Override
public long clob_position(FilterChain chain,
ClobProxy wrapper, String searchstr, long start) throws SQLException {
return chain.clob_position(wrapper,
encode(wrapper.getConnectionWrapper(), searchstr), start);
} | 3.26 |
druid_HexBin_decode_rdh | /**
* Decode hex string to a byte array
*
* @param encoded
* encoded string
* @return return array of byte to encode
*/
public static byte[] decode(String encoded) {
if (encoded == null) {
return null;
}
int lengthData = encoded.length();
if ((lengthData % 2) != 0) {
return null;
}
char[] binaryData = encoded.toCharArray();
int lengthDecode = lengthData / 2;
byte[] decodedData = new byte[lengthDecode];
byte v14;
byte temp2;
char tempChar;
for (int v17 = 0; v17 < lengthDecode; v17++) {
tempChar = binaryData[v17 * 2];
v14 = (tempChar < BASE_LENGTH) ? HEX_NUMBER_TABLE[tempChar] : -1;
if
(v14 == (-1)) {
return null;
}
tempChar = binaryData[(v17 * 2) + 1];
temp2 = (tempChar < BASE_LENGTH) ? HEX_NUMBER_TABLE[tempChar]
: -1;
if (temp2 == (-1)) {
return null;
}
decodedData[v17] = ((byte) ((v14 << 4) | temp2));
}
return decodedData;
} | 3.26 |
druid_BlinkStatementParser_getSQLCreateTableParser_rdh | //
// public H2SelectParser createSQLSelectParser() {
// return new H2SelectParser(this.exprParser, selectListCache);
// }
public SQLCreateTableParser getSQLCreateTableParser() {
return new BlinkCreateTableParser(this.exprParser);
} | 3.26 |
all7c_createServer_createServer_rdh | /**
* Only the server that implements servlet container
* could support something like @Context injection of servlet objects.
*/
public class RestServerFactory {public RestProtocolServer createServer(String name) {
return new NettyHttpRestServer();
}
} | 3.26 |
hmily_HmilyTacDatasourceConfig_dataSource_rdh | /**
* Data source data source.
*
* @return the data source
*/
@Bean
@Primary
public DataSource dataSource() {
HikariDataSource hikariDataSource = new HikariDataSource();
hikariDataSource.setJdbcUrl(dataSourceProperties.getUrl());
hikariDataSource.setDriverClassName(dataSourceProperties.getDriverClassName());
hikariDataSource.setUsername(dataSourceProperties.getUsername());
hikariDataSource.setPassword(dataSourceProperties.getPassword());
hikariDataSource.setMaximumPoolSize(20);hikariDataSource.setMinimumIdle(10);
hikariDataSource.setConnectionTimeout(30000);
hikariDataSource.setIdleTimeout(600000);
hikariDataSource.setMaxLifetime(1800000);
return new HmilyP6Datasource(hikariDataSource);
} | 3.26 |
hmily_HmilyActionEnum_acquireByCode_rdh | /**
* Acquire by code tcc action enum.
*
* @param code
* the code
* @return the tcc action enum
*/
public static HmilyActionEnum acquireByCode(final int code) {
return Arrays.stream(HmilyActionEnum.values()).filter(v -> Objects.equals(v.getCode(), code)).findFirst().orElse(HmilyActionEnum.TRYING);
} | 3.26 |
hmily_HmilySelectStatement_getWhere_rdh | /**
* Get where.
*
* @return where segment
*/public Optional<HmilyWhereSegment> getWhere() {
return Optional.ofNullable(where);
} | 3.26 |
hmily_HmilyLockManager_tryAcquireLocks_rdh | /**
* Try acquire locks.
*
* @param hmilyLocks
* hmily locks
*/
// TODO add timeout mechanism in future
public void tryAcquireLocks(final Collection<HmilyLock> hmilyLocks) {
Set<String>
existedHmilyLockIds = new HashSet<>();
for (HmilyLock each : hmilyLocks) {
Optional<HmilyLock> hmilyLock = HmilyLockCacheManager.getInstance().get(each.getLockId());
if (hmilyLock.isPresent()) {
if (!hmilyLock.get().getTransId().equals(each.getTransId())) {
String message = String.format("current record [%s] has locked by transaction:[%s]", each.getLockId(), hmilyLock.get().getTransId());
log.error(message);
throw new HmilyLockConflictException(message);
}
existedHmilyLockIds.add(hmilyLock.get().getLockId());
}
}Collection<HmilyLock> v4 = hmilyLocks;
// If the lock already exists in the database, remove it from the hmilyLocks
if (CollectionUtils.isNotEmpty(existedHmilyLockIds)) {
v4 = hmilyLocks.stream().filter(lock -> !existedHmilyLockIds.contains(lock.getLockId())).collect(Collectors.toList());
}
if (CollectionUtils.isEmpty(v4)) {
return;
}
HmilyRepositoryStorage.writeHmilyLocks(v4);
v4.forEach(lock -> HmilyLockCacheManager.getInstance().cacheHmilyLock(lock.getLockId(), lock));
} | 3.26 |
hmily_HmilyLockManager_releaseLocks_rdh | /**
* Release locks.
*
* @param hmilyLocks
* hmily locks
*/
public void releaseLocks(final Collection<HmilyLock> hmilyLocks) {
HmilyRepositoryStorage.releaseHmilyLocks(hmilyLocks);
hmilyLocks.forEach(lock -> HmilyLockCacheManager.getInstance().removeByKey(lock.getLockId()));
log.debug("TAC-release-lock ::: {}", hmilyLocks);
} | 3.26 |
hmily_HmilyLockManager_checkLocks_rdh | /**
* Check locks.
*
* @param hmilyLocks
* hmily locks
*/
public void checkLocks(final Collection<HmilyLock> hmilyLocks) {
if (CollectionUtils.isEmpty(hmilyLocks)) {
return;
}
for (HmilyLock lock : hmilyLocks) {
Optional<HmilyLock> hmilyLock = HmilyLockCacheManager.getInstance().get(lock.getLockId());
if (hmilyLock.isPresent() && (!Objects.equals(hmilyLock.get().getTransId(), lock.getTransId()))) {
String message = String.format("current record [%s] has locked by transaction:[%s]", lock.getLockId(), hmilyLock.get().getTransId());
log.error(message);
throw new HmilyLockConflictException(message);
}
}
} | 3.26 |
hmily_NetUtils_getLocalIp_rdh | /**
* Gets local ip.
*
* @return the local ip
*/
public static String getLocalIp() {
if (localAddress == null) {
synchronized(NetUtils.class) {
if (localAddress == null) {
try {
localAddress = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
localAddress = "0.0.0.0";
}
}
}
}
return localAddress;
} | 3.26 |
hmily_IndexMetaDataLoader_load_rdh | /**
* Load index meta data list.
* In a few jdbc implementation(eg. oracle), return value of getIndexInfo contains a statistics record that not a index itself and INDEX_NAME is null.
*
* @param connection
* connection
* @param table
* table name
* @return index meta data list
* @throws SQLException
* SQL exception
*/
public static Collection<IndexMetaData> load(final Connection connection, final String table) throws SQLException {
Collection<IndexMetaData> result
= new HashSet<>();
try (ResultSet resultSet = connection.getMetaData().getIndexInfo(connection.getCatalog(), connection.getSchema(), table, false, false)) {
while (resultSet.next()) {
String indexName = resultSet.getString(INDEX_NAME);
if (null != indexName)
{
result.add(new IndexMetaData(indexName));
}
}
}
return result;
} | 3.26 |
hmily_HmilyRepositoryEvent_clear_rdh | /**
* help gc.
*/
public void clear() {
hmilyTransaction = null; hmilyParticipant = null;
hmilyParticipantUndo = null;
hmilyLocks = null;
transId = null;
} | 3.26 |
hmily_HmilySQLServerDeleteStatement_getWithSegment_rdh | /**
* Get with segment.
*
* @return with segment.
*/
public Optional<HmilyWithSegment> getWithSegment() {
return Optional.ofNullable(withSegment);} | 3.26 |
hmily_HmilySQLServerDeleteStatement_getOutputSegment_rdh | /**
* Get output segment.
*
* @return output segment.
*/
public Optional<HmilyOutputSegment> getOutputSegment() {
return Optional.ofNullable(outputSegment);
} | 3.26 |
hmily_RepositoryPathUtils_buildZookeeperRootPath_rdh | /**
* Build zookeeper root path string.
*
* @param prefix
* the prefix
* @param id
* the id
* @return the string
*/
public static String buildZookeeperRootPath(final String prefix, final String id) {
return String.join("/", prefix, id);
} | 3.26 |
hmily_RepositoryPathUtils_getFullFileName_rdh | /**
* Gets full file name.
*
* @param filePath
* the file path
* @param id
* the id
* @return the full file name
*/
public static String getFullFileName(final String filePath, final String id) {
return String.format("%s/%s", filePath, id);
} | 3.26 |
hmily_RepositoryPathUtils_buildMongoTableName_rdh | /**
* Build mongo table name string.
*
* @param applicationName
* the application name
* @return the string
*/
public static String buildMongoTableName(final String applicationName) {
return CommonConstant.DB_SUFFIX + applicationName.replaceAll("-", "_");
} | 3.26 |
hmily_RepositoryPathUtils_buildZookeeperPathPrefix_rdh | /**
* Build zookeeper path prefix string.
*
* @param applicationName
* the application name
* @return the string
*/
public static String buildZookeeperPathPrefix(final String applicationName) {
return String.join("-", CommonConstant.PATH_SUFFIX, applicationName);
} | 3.26 |
hmily_RepositoryPathUtils_buildDbTableName_rdh | /**
* Build db table name string.
*
* @param applicationName
* the application name
* @return the string
*/
public static String buildDbTableName(final String applicationName) {return CommonConstant.DB_SUFFIX + applicationName.replaceAll("-", "_");} | 3.26 |
hmily_RepositoryPathUtils_buildRedisKey_rdh | /**
* Build redis key string.
*
* @param keyPrefix
* the key prefix
* @param id
* the id
* @return the string
*/
public static String buildRedisKey(final String keyPrefix, final String id) {
return String.join(":", keyPrefix,
id);
} | 3.26 |
hmily_AbstractConfig_setLoad_rdh | /**
* Sets load.
*
* @param load
* the load
*/
public void setLoad(final boolean load) {
isLoad = load;
} | 3.26 |
hmily_AbstractConfig_setPassive_rdh | /**
* Sets passive.
*
* @param passive
* the passive
*/
public void setPassive(final boolean passive) {
this.passive = passive;
} | 3.26 |
hmily_HmilyColumnExtractor_extract_rdh | /**
* Get left value if left value of expression is column segment.
*
* @param expression
* expression segment
* @return column segment
*/
public static Optional<HmilyColumnSegment> extract(final HmilyExpressionSegment expression) {
if ((expression instanceof HmilyBinaryOperationExpression) && (((HmilyBinaryOperationExpression) (expression)).getLeft() instanceof HmilyColumnSegment)) {
HmilyColumnSegment column = ((HmilyColumnSegment) (((HmilyBinaryOperationExpression) (expression)).getLeft()));
return Optional.of(column);
}
if ((expression instanceof HmilyInExpression) && (((HmilyInExpression) (expression)).getLeft() instanceof HmilyColumnSegment)) {
HmilyColumnSegment column = ((HmilyColumnSegment) (((HmilyInExpression) (expression)).getLeft()));
return Optional.of(column);
}
if ((expression instanceof HmilyBetweenExpression) && (((HmilyBetweenExpression) (expression)).getLeft() instanceof HmilyColumnSegment)) {
HmilyColumnSegment column = ((HmilyColumnSegment) (((HmilyBetweenExpression) (expression)).getLeft()));
return Optional.of(column);
}
return Optional.empty();
} | 3.26 |
hmily_HmilyTacRollbackExecutor_getInstance_rdh | /**
* Gets instance.
*
* @return the instance
*/
public static HmilyTacRollbackExecutor getInstance() {
if (instance == null) {
synchronized(HmilyTacRollbackExecutor.class) {
if (instance == null) {
instance = new HmilyTacRollbackExecutor();
}
}
}
return instance;} | 3.26 |
hmily_ConfigLoader_againLoad_rdh | /**
* Again load.
*
* @param context
* the context
* @param handler
* the handler
* @param tClass
* the t class
*/
default void againLoad(final Supplier<Context> context, final LoaderHandler<T> handler, final Class<T> tClass) { T config = ConfigEnv.getInstance().getConfig(tClass);
for (PropertyKeySource<?> propertyKeySource : context.get().getSource()) {
ConfigPropertySource configPropertySource = new DefaultConfigPropertySource<>(propertyKeySource, PropertyKeyParse.INSTANCE);Binder binder = Binder.of(configPropertySource);
T newConfig = binder.bind(config.prefix(), BindData.of(DataType.of(tClass), () -> config));
handler.finish(context, newConfig);
}
} | 3.26 |
hmily_ConfigLoader_getOriginal_rdh | /**
* Gets original.
*
* @return the original
*/
public ConfigLoader<Config> getOriginal() {
return original;
} | 3.26 |
hmily_ConfigLoader_passive_rdh | /**
* Passive subscription processes related events. When the current event is processed,
* the push method is called to push it to subscribers in the system.
*
* @param context
* the context
* @param handler
* the handler
* @param config
* Configuration information of things processed by load method
* @see #push(Supplier, EventData) #push(Supplier, EventData)
*/default void passive(final Supplier<Context> context, final PassiveHandler<Config> handler, Config config) {
} | 3.26 |
hmily_ConfigLoader_push_rdh | /**
* Implementation of Active Remote Push.
*
* @param context
* the context
* @param data
* the data
*/
default void push(final Supplier<Context> context, final EventData data) {
if (data == null) {
return;
}
Set<EventConsumer<EventData>> events = ConfigEnv.getInstance().getEvents();
if (events.isEmpty()) {
return;
}
String properties = data.getProperties();
List<EventConsumer<EventData>> eventsLists = events.stream().filter(e -> !Objects.isNull(e.regex())).filter(e -> Pattern.matches(e.regex(), properties)).collect(Collectors.toList());
for (EventConsumer<EventData> consumer : eventsLists) {
Optional<Config> first = ConfigEnv.getInstance().stream().filter(e -> properties.startsWith(e.prefix())).findFirst();
first.ifPresent(x -> {
List<PropertyKeySource<?>> sources = new ArrayList<>();
Map<String, Object> values = new HashMap<>(1);
values.put(properties, data.getValue());
sources.add(new MapPropertyKeySource(first.get().prefix(), values));
PassiveHandler<Config> handler = (ct,
cf) -> {
data.setConfig(cf);
data.setSubscribe(consumer.regex());
try {
consumer.accept(data);
} catch (ClassCastException e) {if (LOG.isWarnEnabled()) {
LOG.warn("EventData of type [{}] not accepted by EventConsumer [{}]", data.getClass(), consumer);
}
}
};
context.get().getOriginal().passive(() -> context.get().withSources(sources), handler, first.get());
});
}
} | 3.26 |
hmily_ConfigLoader_with_rdh | /**
* With context.
*
* @param sources
* the sources
* @param original
* the original
* @return the context.
*/
public Context with(final List<PropertyKeySource<?>> sources, final ConfigLoader<Config> original)
{
return new Context(original, sources);
} | 3.26 |
hmily_ConfigLoader_withSources_rdh | /**
* With sources context.
*
* @param sources
* the sources
* @return the context.
*/
public Context withSources(final List<PropertyKeySource<?>> sources) {
return with(sources, this.original);
} | 3.26 |
hmily_NacosConfig_fileName_rdh | /**
* File name string.
*
* @return the string
*/
public String fileName()
{
return (dataId + ".") + fileExtension;
} | 3.26 |
hmily_UndoHook_run_rdh | /**
* Run boolean.
*
* @param undo
* the undo
* @return the boolean
*/
public boolean run(final HmilyParticipantUndo undo) {
for (Function<HmilyParticipantUndo, Boolean>
each : consumers) {
return each.apply(undo);
}
return false;
} | 3.26 |
hmily_UndoHook_register_rdh | /**
* Register.
*
* @param function
* the function
*/
public void register(final Function<HmilyParticipantUndo, Boolean> function) {
consumers.add(function);
} | 3.26 |
hmily_HmilyDataTypeLengthSegment_getScale_rdh | /**
* get secondNumber.
*
* @return Optional.
*/public Optional<Integer> getScale() {
return Optional.of(scale);
} | 3.26 |
hmily_XidImpl_newBranchId_rdh | /**
* New branch id x id.
*
* @return the x id
*/
public XidImpl newBranchId() {
return new XidImpl(this);
} | 3.26 |
hmily_XidImpl_newResId_rdh | /**
* New res id x id.
*
* @param index
* the index
* @return the x id
*/
public XidImpl newResId(final int index) {
return new XidImpl(this, index);
} | 3.26 |
hmily_CuratorZookeeperClient_addListener_rdh | /**
* Add listener.
*
* @param context
* the context
* @param passiveHandler
* the passive handler
* @param config
* the config
* @throws Exception
* the exception
*/
public void addListener(final Supplier<ConfigLoader.Context> context, final ConfigLoader.PassiveHandler<ZkPassiveConfig> passiveHandler, final ZookeeperConfig config) throws Exception {
if (!config.isPassive()) {
return;
}
if (client == null) {
LOGGER.warn("zookeeper client is null...");
} // Use CuratorCache to monitor and find that the lower version of zk cannot monitor the message.
// But using this high version marked as @Deprecated can receive messages normally.。
// @see CuratorCache
NodeCache v7 = new NodeCache(client, config.getPath());
v7.getListenable().addListener(() -> {
byte[] data = v7.getCurrentData().getData();
String string = new String(data, StandardCharsets.UTF_8);
ZkPassiveConfig zkPassiveConfig =
new ZkPassiveConfig();
zkPassiveConfig.setPath(config.getPath());
zkPassiveConfig.setFileExtension(config.getFileExtension());
zkPassiveConfig.setValue(string);
passiveHandler.passive(context, zkPassiveConfig);
});
v7.start();
LOGGER.info("passive zookeeper remote started....");} | 3.26 |
hmily_CuratorZookeeperClient_getInstance_rdh | /**
* Gets instance.
*
* @param zookeeperConfig
* the zookeeper config
* @return the instance
*/
public static CuratorZookeeperClient getInstance(final ZookeeperConfig zookeeperConfig) {
if (instance == null) {
synchronized(CuratorZookeeperClient.class) {
if (instance == null) {
instance = new CuratorZookeeperClient();
instance.initCuratorClient(zookeeperConfig);
}
}}
return instance;
} | 3.26 |
hmily_CuratorZookeeperClient_persist_rdh | /**
* Persist.
*
* @param key
* the key
* @param value
* the value
*/
public void persist(final String key, final String value) {
try {
if (!isExisted(key)) {
client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(Charsets.UTF_8));
} else {
update(key, value);
}
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
CuratorZookeeperExceptionHandler.handleException(ex);
}
} | 3.26 |
Subsets and Splits