mycat对sql的解析分为两部分,一个是普通sql,另一个是PreparedStatment。
下面以解析普通sql为例分析(另一种方式大同小异),sql从客户端发过来后server接收后会调用FrontendCommandHandler的handle方法,这个方法会调用FrontendConnection的query方法,接着query方法会调用ServerQueryHandler的query方法,接着调用ServerConnection的execute方法。如下图所示:
public void execute(String sql, int type) {
//连接状态检查
if (()) {
("ignore execute ,server connection is closed " + this);
return;
}
// 事务状态检查
if (txInterrupted) {
writeErrMessage(ErrorCode.ER_YES,
"Transaction error, need to rollback." + txInterrputMsg);
return;
}
// 检查当前使用的DB
String db = ;
boolean isDefault = true;
if (db == null) {
db = (sql, type);
if (db == null) {
writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, "No MyCAT Database selected");
return;
}
isDefault = false;
}
// 兼容PhpAdmin's, 支持对MySQL元数据的模拟返回
TODO: 2016/5/20 支持更多information_schema特性
if ( == type
&& ("information_schema") ) {
(sql, this);
return;
}
if ( == type
&& ("mysql")
&& ("proc")) {
schemaInfo = (sql);
if (schemaInfo != null
&& "mysql".equalsIgnoreCase()
&& "proc".equalsIgnoreCase()) {
// 兼容MySQLWorkbench
(sql, this);
return;
}
}
SchemaConfig schema = ().getConfig().getSchemas().get(db);
if (schema == null) {
writeErrMessage(ErrorCode.ERR_BAD_LOGICDB,
"Unknown MyCAT Database '" + db + "'");
return;
}
//fix navicat SELECT STATE AS `State`, ROUND(SUM(DURATION),7) AS `Duration`, CONCAT(ROUND(SUM(DURATION)/*100,3), '%') AS `Percentage` FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID= GROUP BY STATE ORDER BY SEQ
if( == type &&(" INFORMATION_SCHEMA.PROFILING ")&&("CONCAT(ROUND(SUM(DURATION)/*100,3)"))
{
(this);
return;
}
/* 当已经设置默认schema时,可以通过在sql中指定其它schema的方式执行
* 相关sql,已经在mysql客户端中验证。
* 所以在此处增加关于sql中指定Schema方式的支持。
*/
if (isDefault && () && isNormalSql(type)) {
schemaInfo = (sql);
if (schemaInfo != null && != null && !(db)) {
SchemaConfig schemaConfig = ().getConfig().getSchemas().get();
if (schemaConfig != null)
schema = schemaConfig;
}
}
routeEndExecuteSQL(sql, type, schema);
}
最后有个routeEndExecuteSQL方法,它会首先调用RouteService的route方法先进行路由,然后调用HintSQLHandler的route方法,这个方法里调用RouteStrategy的route方法,这里使用了一个策略模式,包含下面几种sql类型,不同类型使用不同策略。
public final class ServerParse {
public static final int OTHER = -1;
public static final int BEGIN = 1;
public static final int COMMIT = 2;
public static final int DELETE = 3;
public static final int INSERT = 4;
public static final int REPLACE = 5;
public static final int ROLLBACK = 6;
public static final int SELECT = 7;
public static final int SET = 8;
public static final int SHOW = 9;
public static final int START = 10;
public static final int UPDATE = 11;
public static final int KILL = 12;
public static final int SAVEPOINT = 13;
public static final int USE = 14;
public static final int EXPLAIN = 15;
public static final int EXPLAIN2 = 151;
public static final int KILL_QUERY = 16;
public static final int HELP = 17;
public static final int MYSQL_CMD_COMMENT = 18;
public static final int MYSQL_COMMENT = 19;
public static final int CALL = 20;
public static final int DESCRIBE = 21;
public static final int LOAD_DATA_INFILE_SQL = 99;
public static final int DDL = 100;
使用不同的路由方法是在routeNormalSqlWithAST中决定的,
public RouteResultset routeNormalSqlWithAST(SchemaConfig schema,
String stmt, RouteResultset rrs, String charset,
LayerCachePool cachePool) throws SQLNonTransientException {
/**
* 只有mysql时只支持mysql语法
*/
SQLStatementParser parser = null;
if (()) {
parser = new MycatStatementParser(stmt);
} else {
parser = new MySqlStatementParser(stmt);
}
MycatSchemaStatVisitor visitor = null;
SQLStatement statement;
/**
* 解析出现问题统一抛SQL语法错误
*/
try {
statement = ();
visitor = new MycatSchemaStatVisitor();
} catch (Exception t) {
("DruidMycatRouteStrategyError", t);
throw new SQLSyntaxErrorException(t);
}
/**
* 检验unsupported statement
*/
checkUnSupportedStatement(statement);
DruidParser druidParser = (schema, statement, visitor);
(schema, rrs, statement, stmt,cachePool,visitor);
/**
* DruidParser 解析过程中已完成了路由的直接返回
*/
if ( () ) {
return rrs;
}
/**
* 没有from的select语句或其他
*/
DruidShardingParseInfo ctx= () ;
if((() == null || ().size() == 0)&&(()==null||().isEmpty()))
{
return (rrs, (), ().getSql());
}
if(().getRouteCalculateUnits().size() == 0) {
RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit();
().addRouteCalculateUnit(routeCalculateUnit);
}
SortedSet<RouteResultsetNode> nodeSet = new TreeSet<RouteResultsetNode>();
for(RouteCalculateUnit unit: ().getRouteCalculateUnits()) {
RouteResultset rrsTmp = (schema, (), unit, rrs, isSelect(statement), cachePool);
if(rrsTmp != null) {
for(RouteResultsetNode node :()) {
(node);
}
}
}
RouteResultsetNode[] nodes = new RouteResultsetNode[()];
int i = 0;
for (Iterator<RouteResultsetNode> iterator = (); ();) {
nodes[i] = ();
i++;
}
(nodes);
//分表
/**
* subTables="t_order$1-2,t_order3"
*目前分表 1.6 开始支持 幵丏 dataNode 在分表条件下只能配置一个,分表条件下不支持join。
*/
if(()){
return (statement,rrs);
}
return rrs;
}
它使用druid做数据库连接池,支持分库分表,下面我们以多个表的分库分表路由策略为例子进行分析。
public static void findRouteWithcConditionsForTables(SchemaConfig schema, RouteResultset rrs,
Map<String, Map<String, Set<ColumnRoutePair>>> tablesAndConditions,
Map<String, Set<String>> tablesRouteMap, String sql, LayerCachePool cachePool, boolean isSelect)
throws SQLNonTransientException {
//为分库表找路由
for(<String, Map<String, Set<ColumnRoutePair>>> entry : ()) {
String tableName = ().toUpperCase();
TableConfig tableConfig = ().get(tableName);
if(tableConfig == null) {
String msg = "can't find table define in schema "
+ tableName + " schema:" + ();
(msg);
throw new SQLNonTransientException(msg);
}
if(()!=null && ().size()>0){
routeToDistTableNode(tableName,schema,rrs,sql, tablesAndConditions, cachePool,isSelect);
}
//全局表或者不分库的表略过(全局表后面再计算)
if(() || ().get(tableName).getDataNodes().size() == 1) {
continue;
} else {//非全局表:分库表、childTable、其他
Map<String, Set<ColumnRoutePair>> columnsMap = ();
String joinKey = ();
String partionCol = ();
String primaryKey = ();
boolean isFoundPartitionValue = partionCol != null && ().get(partionCol) != null;
boolean isLoadData=false;
if (()
&& ()||()) {
//由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能
isLoadData=true;
}
if(().get(primaryKey) != null && ().size() == 1&&!isLoadData)
{//主键查找
// try by primary key if found in cache
Set<ColumnRoutePair> primaryKeyPairs = ().get(primaryKey);
if (primaryKeyPairs != null) {
if (()) {
("try to find cache by primary key ");
}
String tableKey = () + '_' + tableName;
boolean allFound = true;
for (ColumnRoutePair pair : primaryKeyPairs) {//可能id in(1,2,3)多主键
String cacheKey = ;
String dataNode = (String) (tableKey, cacheKey);
if (dataNode == null) {
allFound = false;
continue;
} else {
if((tableName) == null) {
(tableName, new HashSet<String>());
}
(tableName).add(dataNode);
continue;
}
}
if (!allFound) {
// need cache primary key ->datanode relation
if (isSelect && () != null) {
(tableKey + '.' + ());
}
} else {//主键缓存中找到了就执行循环的下一轮
continue;
}
}
}
if (isFoundPartitionValue) {//分库表
Set<ColumnRoutePair> partitionValue = (partionCol);
if(partitionValue == null || () == 0) {
if((tableName) == null) {
(tableName, new HashSet<String>());
}
(tableName).addAll(());
} else {
for(ColumnRoutePair pair : partitionValue) {
if( != null) {
Integer nodeIndex = ().getRuleAlgorithm().calculate();
if(nodeIndex == null) {
String msg = "can't find any valid datanode :" + ()
+ " -> " + () + " -> " + ;
(msg);
throw new SQLNonTransientException(msg);
}
ArrayList<String> dataNodes = ();
String node;
if (nodeIndex >=0 && nodeIndex < ()) {
node = (nodeIndex);
} else {
node = null;
String msg = "Can't find a valid data node for specified node index :"
+ () + " -> " + ()
+ " -> " + + " -> " + "Index : " + nodeIndex;
(msg);
throw new SQLNonTransientException(msg);
}
if(node != null) {
if((tableName) == null) {
(tableName, new HashSet<String>());
}
(tableName).add(node);
}
}
if( != null) {
Integer[] nodeIndexs = ().getRuleAlgorithm()
.calculateRange((), ());
ArrayList<String> dataNodes = ();
String node;
for(Integer idx : nodeIndexs) {
if (idx >= 0 && idx < ()) {
node = (idx);
} else {
String msg = "Can't find valid data node(s) for some of specified node indexes :"
+ () + " -> " + ();
(msg);
throw new SQLNonTransientException(msg);
}
if(node != null) {
if((tableName) == null) {
(tableName, new HashSet<String>());
}
(tableName).add(node);
}
}
}
}
}
} else if(joinKey != null && (joinKey) != null && (joinKey).size() != 0) {//childTable (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root table
Set<ColumnRoutePair> joinKeyValue = (joinKey);
Set<String> dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue);
if (()) {
throw new SQLNonTransientException(
"parent key can't find any valid datanode ");
}
if (()) {
("found partion nodes (using parent partion rule directly) for child table to update "
+ (()) + " sql :" + sql);
}
if (() > 1) {
routeToMultiNode((), rrs, dataNodeSet, sql);
(true);
return;
} else {
(true);
routeToSingleNode(rrs, ().next(), sql);
return;
}
} else {
//没找到拆分字段,该表的所有节点都路由
if((tableName) == null) {
(tableName, new HashSet<String>());
}
(tableName).addAll(());
}
}
}
}
mycat会先找主键(支持多主键),根据主键去找不同的node节点,然后在不同的node分别执行sql,这样它就获取了sql的路由表,所谓的路由表就是查找表存在于哪些节点中。这个如果是在依据主键分库分表(同时存在多种分片类型如下图所示)的情况下主要通过分析sql中的存在的表名和主键的键值在schema配置中通过算法(RuleAlgorithm)查找的(如果没有主键范围就路由到所有节点),找到节点后,才具体去执行sql。
PartitionByDate
PartitionByFileMap
PartitionByHashMod
PartitionByHotDate
PartitionByJumpConsistentHash
PartitionByLong
PartitionByMod
PartitionByMonth
PartitionByMurmurHash
PartitionByPattern
PartitionByPrefixPattern
PartitionByRangeDateHash
PartitionByRangeMod
PartitionByString
PartitionDirectBySubString
在上面提到的routeEndExecuteSQL方法中找到路由节点后它会调用NonBlockingSession的execute方法,它分为单节点模式和多节点模式,下面以多节点模式为例,在这种情况下它会调用MultiNodeQueryHandler的execute方法。
public void execute() throws Exception {
final ReentrantLock lock = ;
();
try {
(().length);
= false;
= 0L;
= 0L;
} finally {
();
}
MycatConfig conf = ().getConfig();
startTime = ();
("()-" + ());
for (final RouteResultsetNode node : ()) {
BackendConnection conn = (node);
if ((conn, node)) {
("()-" + ());
(()); // 实现 master/slave注解
("()-" + ());
_execute(conn, node);
} else {
// create new connection
("()1-" + ());
(()); // 实现 master/slave注解
("()2-" + ());
PhysicalDBNode dn = ().get(());
((), autocommit, node, this, node);
// 注意该方法不仅仅是获取连接,获取新连接成功之后,会通过层层回调,最后回调到本类 的connectionAcquired
// 这是通过 上面方法的 this 参数的层层传递完成的。
// connectionAcquired 进行执行操作:
// (node, conn);
// _execute(conn, node);
}
}
}
到此优化后的sql被发给路由结果中的各个节点执行。