diff --git a/.gitignore b/.gitignore
index 1d2433fdc..c47314ad2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -173,6 +173,7 @@ plugins/v2ray
plugins/frp
plugins/file_search
plugins/proxysql
+plugins/tidb
debug.out
diff --git a/README.md b/README.md
index 301884e66..2337f28e7 100644
--- a/README.md
+++ b/README.md
@@ -110,9 +110,12 @@ docker run -itd --name mw-server --privileged=true -p 7200:7200 -p 80:80 -p 443:
```
-### 版本更新 0.16.8
+### 版本更新 0.16.9
-- 首页对网络/磁盘IO进行更细致的展示。
+- mysql同步优化,享受丝滑般感觉。
+- 网站统计 - 实时-可调节1-10s。
+- 网站统计 - 加入大小条件。
+- Sphinx优化。
### JSDelivr安装地址
diff --git a/class/core/config_api.py b/class/core/config_api.py
index f3702440d..71dd0d896 100755
--- a/class/core/config_api.py
+++ b/class/core/config_api.py
@@ -28,7 +28,7 @@ from flask import request
class config_api:
- __version = '0.16.8'
+ __version = '0.16.9'
__api_addr = 'data/api.json'
# 统一默认配置文件
diff --git a/class/core/files_api.py b/class/core/files_api.py
index 3c8843201..d44715129 100755
--- a/class/core/files_api.py
+++ b/class/core/files_api.py
@@ -121,6 +121,7 @@ class files_api:
def fileAccessApi(self):
filename = request.form.get('filename', '')
data = self.getAccess(filename)
+ data['sys_users'] = self.getSysUserList()
return mw.getJson(data)
def setFileAccessApi(self):
@@ -933,6 +934,20 @@ class files_api:
data['chown'] = 'www'
return data
+ def getSysUserList(self):
+ pwd_file = '/etc/passwd'
+ if os.path.exists(pwd_file):
+ content = mw.readFile(pwd_file)
+ clist = content.split('\n')
+ sys_users = []
+ for line in clist:
+ if line.find(":")<0:
+ continue
+ lines = line.split(":",1)
+ sys_users.append(lines[0])
+ return sys_users
+ return ['root','mysql','www']
+
# 计算文件数量
def getCount(self, path, search):
i = 0
diff --git a/class/core/mw.py b/class/core/mw.py
index b54ad11dd..e88a3f57e 100755
--- a/class/core/mw.py
+++ b/class/core/mw.py
@@ -79,10 +79,11 @@ def getRootDir():
def getPluginDir():
return getRunDir() + '/plugins'
-
def getPanelDataDir():
return getRunDir() + '/data'
+def getMWLogs():
+ return getRunDir() + '/logs'
def getPanelTmp():
return getRunDir() + '/tmp'
diff --git a/class/plugin/orm.py b/class/plugin/orm.py
index 3e96ff9f1..a1852451d 100755
--- a/class/plugin/orm.py
+++ b/class/plugin/orm.py
@@ -17,37 +17,50 @@ class ORM:
__DB_CUR = None
__DB_ERR = None
__DB_CNF = '/etc/my.cnf'
+ __DB_TIMEOUT=1
__DB_SOCKET = '/www/server/mysql/mysql.sock'
__DB_CHARSET = "utf8"
def __Conn(self):
+ # print(self.__DB_HOST, self.__DB_USER)
'''连接数据库'''
try:
- if os.path.exists(self.__DB_SOCKET):
+ if self.__DB_HOST != 'localhost':
+ try:
+ self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
+ database=self.__DB_NAME,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
+ cursorclass=pymysql.cursors.DictCursor)
+ except Exception as e:
+ self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
+ database=self.__DB_NAME,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
+ cursorclass=pymysql.cursors.DictCursor)
+ elif os.path.exists(self.__DB_SOCKET):
try:
self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
database=self.__DB_NAME,
- port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=1,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
unix_socket=self.__DB_SOCKET, cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
self.__DB_HOST = '127.0.0.1'
self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
database=self.__DB_NAME,
- port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=1,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
unix_socket=self.__DB_SOCKET, cursorclass=pymysql.cursors.DictCursor)
else:
try:
self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
database=self.__DB_NAME,
- port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=1,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
self.__DB_HOST = '127.0.0.1'
self.__DB_CONN = pymysql.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS,
database=self.__DB_NAME,
- port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=1,
+ port=int(self.__DB_PORT), charset=self.__DB_CHARSET, connect_timeout=self.__DB_TIMEOUT,
cursorclass=pymysql.cursors.DictCursor)
self.__DB_CUR = self.__DB_CONN.cursor()
@@ -80,6 +93,10 @@ class ORM:
def getPwd(self):
return self.__DB_PASS
+ def setTimeout(self, timeout = 1):
+ self.__DB_TIMEOUT = timeout
+ return True
+
def setDbName(self, name):
self.__DB_NAME = name
@@ -95,6 +112,13 @@ class ORM:
except Exception as ex:
return ex
+ def ping(self):
+ try:
+ self.__DB_CONN.ping()
+ except Exception as e:
+ print(e)
+ return True
+
def query(self, sql):
# 执行SQL语句返回数据集
if not self.__Conn():
diff --git a/cmd.md b/cmd.md
index 0214eb241..202efe7d5 100644
--- a/cmd.md
+++ b/cmd.md
@@ -5,7 +5,8 @@
```
/etc/init.d/mw default | 显示登录信息
-/etc/init.d/mw db | 快捷连接数据库
+/etc/init.d/mw db | 快捷连接MySQL
+/etc/init.d/mw redis | 快捷连接Redis
----------------------------------------
mw open | 开启面板
mw close | 关闭面板
diff --git a/plugins/clean/index.py b/plugins/clean/index.py
index b42abdac1..5a04076f2 100755
--- a/plugins/clean/index.py
+++ b/plugins/clean/index.py
@@ -116,6 +116,7 @@ def initConf():
"/www/server/php/82/var/log",
"/www/server/php/83/var/log",
"/www/server/php/84/var/log",
+ "/www/server/openresty/nginx/logs",
"/www/server/phpmyadmin",
"/www/server/redis/data",
"/www/server/cron",
diff --git a/plugins/data_query/index.html b/plugins/data_query/index.html
index 4b6ee1fca..03b14c611 100755
--- a/plugins/data_query/index.html
+++ b/plugins/data_query/index.html
@@ -5,6 +5,16 @@
white-space: nowrap;
vertical-align: middle;
}
+
+#select_db xm-select{
+ min-height: 30px;
+ line-height: 30px;
+}
+
+#select_db xm-select *{
+ font-size: 12px;
+}
+
diff --git a/plugins/data_query/static/html/index.html b/plugins/data_query/static/html/index.html
index 95292cf54..ab943d3a6 100644
--- a/plugins/data_query/static/html/index.html
+++ b/plugins/data_query/static/html/index.html
@@ -123,9 +123,16 @@
-
- 查找
-
+
+
+
+ 查找
+
+
+
+ 刷新
+
+
diff --git a/plugins/data_query/static/js/app.js b/plugins/data_query/static/js/app.js
index 2e8d74db5..1e75b636a 100755
--- a/plugins/data_query/static/js/app.js
+++ b/plugins/data_query/static/js/app.js
@@ -738,6 +738,10 @@ function mongodbInitField(f, data){
}
mongodbDataList(1);
});
+
+ $('#mongodb .mongodb_refresh').unbind('click').click(function(){
+ mongodbDataList(1);
+ });
}
var mogodb_db_list;
@@ -1252,7 +1256,7 @@ function redisBatchClear(){
xm_db_list = xmSelect.render({
el: '#select_db',
- repeat: true,
+ repeat: false,
toolbar: {show: true},
data: idx_db,
});
diff --git a/plugins/mongodb/index.html b/plugins/mongodb/index.html
index 954e0bb6a..46f25956f 100755
--- a/plugins/mongodb/index.html
+++ b/plugins/mongodb/index.html
@@ -63,6 +63,7 @@
负载状态
复制状态
日志
+
相关说明
diff --git a/plugins/mongodb/index.py b/plugins/mongodb/index.py
index 1300bd55c..2517afd98 100755
--- a/plugins/mongodb/index.py
+++ b/plugins/mongodb/index.py
@@ -18,7 +18,6 @@ if mw.isAppleSystem():
# /usr/lib/systemd/system/mongod.service
-# /var/lib/mongo
# python3 /www/server/mdserver-web/plugins/mongodb/index.py repl_init
# python3 /www/server/mdserver-web/plugins/mongodb/index.py run_repl_info
@@ -107,6 +106,9 @@ def getConfIp():
data = getConfigData()
return data['net']['bindIp']
+def getConfLocalIp():
+ return '127.0.0.1'
+
def getConfPort():
data = getConfigData()
return data['net']['port']
@@ -186,7 +188,7 @@ def mongdbClientS():
import pymongo
port = getConfPort()
auth = getConfAuth()
- ip = getConfIp()
+ ip = getConfLocalIp()
mg_root = pSqliteDb('config').where('id=?', (1,)).getField('mg_root')
if auth == 'disabled':
@@ -200,7 +202,7 @@ def mongdbClient():
import pymongo
port = getConfPort()
auth = getConfAuth()
- ip = getConfIp()
+ ip = getConfLocalIp()
mg_root = pSqliteDb('config').where('id=?', (1,)).getField('mg_root')
# print(ip,port,auth,mg_root)
if auth == 'disabled':
@@ -488,14 +490,13 @@ def runDocInfo():
def runReplInfo():
client = mongdbClient()
db = client.admin
-
+ result = {}
try:
serverStatus = db.command('serverStatus')
except Exception as e:
return mw.returnJson(False, str(e))
d = getConfigData()
- result = {}
if 'replication' in d and 'replSetName' in d['replication']:
result['repl_name'] = d['replication']['replSetName']
@@ -518,7 +519,22 @@ def runReplInfo():
hosts = mw.getDefault(repl,'hosts', '')
result['hosts'] = ','.join(hosts)
-
+ result['members'] = []
+ try:
+ members_list = []
+ replStatus = db.command('replSetGetStatus')
+ if 'members' in replStatus:
+ members = replStatus['members']
+ for m in members:
+ t = {}
+ t['name'] = m['name']
+ t['stateStr'] = m['stateStr']
+ t['uptime'] = m['uptime']
+ members_list.append(t)
+ result['members'] = members_list
+ except Exception as e:
+ pass
+
return mw.returnJson(True, 'OK', result)
def getDbList():
@@ -602,7 +618,7 @@ def addDb():
username = data_name
- client[data_name].chat.insert_one({})
+ client[data_name].zchat.insert_one({})
user_roles = [{'role': 'dbOwner', 'db': data_name}, {'role': 'userAdmin', 'db': data_name}]
if auth_status:
# db.command("dropUser", username)
@@ -787,7 +803,7 @@ def toDbBase(find):
data_name = find['name']
db = client[data_name]
- db.chat.insert_one({})
+ db.zchat.insert_one({})
user_roles = [{'role': 'dbOwner', 'db': data_name}, {'role': 'userAdmin', 'db': data_name}]
try:
db_admin.command("createUser", find['username'], pwd=find['password'], roles=user_roles)
@@ -1011,10 +1027,6 @@ def replSetNode():
add_node = args['node'].strip()
idx = int(args['idx'])
-
-
-
-
priority = -1
if 'priority' in args:
priority = args['priority'].strip()
diff --git a/plugins/mongodb/js/mongodb.js b/plugins/mongodb/js/mongodb.js
index ef370d5af..b606ed04d 100644
--- a/plugins/mongodb/js/mongodb.js
+++ b/plugins/mongodb/js/mongodb.js
@@ -159,6 +159,12 @@ function mongoReplStatus() {
me ' + rdata.me + ' 本机 ';
}
+ var tbody_members = '';
+ var member_list = rdata['members'];
+ for (var i = 0; i < member_list.length; i++) {
+ tbody_members += '
'+member_list[i]['name']+' ' + member_list[i]['stateStr'] + ' '+member_list[i]['uptime']+' ';
+ }
+
// console.log(rdata);
var repl_on = 'btn-danger';
var repl_on_title = '未开启';
@@ -176,9 +182,14 @@ function mongoReplStatus() {
con += '
\
\
字段 当前值 说明 \
- \
- '+tbody+'\
- \
+ '+tbody+' \
+
\
+
';
+
+ con += '
\
+
\
+ IP 状态 在线 \
+ '+tbody_members+' \
\
';
@@ -188,67 +199,66 @@ function mongoReplStatus() {
//设置副本名称
function mongoReplCfgReplSetName(){
- mgPost('run_doc_info', '', '', function(rdata){
- var rdata = $.parseJSON(rdata.data);
+ //
+ layer.open({
+ type: 1,
+ area: '300px',
+ title: '设置副本名称',
+ closeBtn: 1,
+ shift: 5,
+ shadeClose: false,
+ btn:["提交","关闭"],
+ content: "
",
- layer.open({
- type: 1,
- area: '300px',
- title: '设置副本名称',
- closeBtn: 1,
- shift: 5,
- shadeClose: false,
- btn:["提交","关闭"],
- content: "
",
-
- success: function(){
- // console.log(rdata);
- var rlist = rdata['dbs'];
- var dbs = [];
- var selectHtml = '';
- for (var i = 0; i < rlist.length; i++) {
- // console.log(rlist[i]['db']);
- var dbname = rlist[i]['db'];
-
- if (['admin','local','config'].includes(dbname)){
- } else {
- dbs.push(dbname);
- }
- }
-
- if (dbs.length == 0 ){
- selectHtml += "
无 ";
- }
-
- for (index in dbs) {
- selectHtml += "
"+dbs[index]+" ";
- }
-
- $('select[name="replSetName"]').html(selectHtml);
- },
- yes:function(index){
- var data = {};
- data['name'] = $('select[name=replSetName]').val();
- mgPost('repl_set_name', '',data, function(data){
- var rdata = $.parseJSON(data.data);
- showMsg(rdata.msg,function(){
- if (rdata['status']){
- layer.close(index);
- mongoReplCfgInit();
- }
- },{icon: rdata.status ? 1 : 2});
- });
- }
- });
+ success: function(){
+ // // console.log(rdata);
+ // var rlist = rdata['dbs'];
+ // var dbs = [];
+ // var selectHtml = '';
+ // for (var i = 0; i < rlist.length; i++) {
+ // // console.log(rlist[i]['db']);
+ // var dbname = rlist[i]['db'];
+
+ // if (['admin','local','config'].includes(dbname)){
+ // } else {
+ // dbs.push(dbname);
+ // }
+ // }
+
+ // if (dbs.length == 0 ){
+ // selectHtml += "
无 ";
+ // }
+
+ // for (index in dbs) {
+ // selectHtml += "
"+dbs[index]+" ";
+ // }
+
+ // $('select[name="replSetName"]').html(selectHtml);
+ },
+ yes:function(index){
+ var data = {};
+ data['name'] = $('input[name=replSetName]').val();
+ if (data['name'] == ''){
+ layer.msg("副本名称不能为空");
+ return;
+ }
+ mgPost('repl_set_name', '',data, function(data){
+ var rdata = $.parseJSON(data.data);
+ showMsg(rdata.msg,function(){
+ if (rdata['status']){
+ layer.close(index);
+ mongoReplCfgInit();
+ }
+ },{icon: rdata.status ? 1 : 2});
+ });
+ }
});
}
@@ -1193,3 +1203,12 @@ function importDbExternal(file,name){
});
}
+function mgdbReadme(){
+ var readme = '
';
+ readme += '认证同步说明 ';
+ readme += 'root/用户,配置Key完全一致才能同步。 ';
+ readme += ' ';
+
+ $('.soft-man-con').html(readme);
+}
+
diff --git a/plugins/mysql-apt/scripts/backup.py b/plugins/mysql-apt/scripts/backup.py
index 1098abc21..b4236c6fe 100755
--- a/plugins/mysql-apt/scripts/backup.py
+++ b/plugins/mysql-apt/scripts/backup.py
@@ -63,7 +63,7 @@ class backupTools:
if len(mycnf) > 100:
mw.writeFile(db_path + '/etc/my.cnf', mycnf)
- cmd = db_path + "/bin/usr/bin/mysqldump --defaults-file=" + my_conf_path + " --single-transaction --quick --default-character-set=utf8 " + \
+ cmd = db_path + "/bin/usr/bin/mysqldump --defaults-file=" + my_conf_path + " --single-transaction -q --default-character-set=utf8 " + \
name + " | gzip > " + filename
mw.execShell(cmd)
diff --git a/plugins/mysql-yum/scripts/backup.py b/plugins/mysql-yum/scripts/backup.py
index d1d7f32be..ddca912c7 100755
--- a/plugins/mysql-yum/scripts/backup.py
+++ b/plugins/mysql-yum/scripts/backup.py
@@ -65,7 +65,7 @@ class backupTools:
if len(content) > 100:
mw.writeFile(my_cnf, content)
- cmd = db_path + "/bin/usr/bin/mysqldump --defaults-file=" + my_cnf + " --single-transaction --quick --default-character-set=utf8 " + \
+ cmd = db_path + "/bin/usr/bin/mysqldump --defaults-file=" + my_cnf + " --single-transaction -q --default-character-set=utf8 " + \
name + " | gzip > " + filename
mw.execShell(cmd)
diff --git a/plugins/mysql/README.md b/plugins/mysql/README.md
new file mode 100644
index 000000000..5b3b7cc89
--- /dev/null
+++ b/plugins/mysql/README.md
@@ -0,0 +1,27 @@
+
+```
+show global variables like '%gtid%';
+show global variables like 'server_uuid';
+```
+
+```
+# 不锁表,需要删除原来数据表
+# tables = db.query('show tables from `%s`' % sync_db_import)
+# table_key = "Tables_in_" + sync_db_import
+# for tname in tables:
+# drop_db_cmd = 'drop table if exists '+sync_db_import+'.'+tname[table_key]
+# # print(drop_db_cmd)
+# db.query(drop_db_cmd)
+```
+
+```
+# 修改同步位置
+# master_info = sync_mdb.query('show master status')
+# slave_info = db.query('show slave status')
+# if len(master_info)>0:
+# channel_name = slave_info[0]['Channel_Name']
+# change_cmd = "CHANGE MASTER TO MASTER_LOG_FILE='"+master_info[0]['File']+"', MASTER_LOG_POS="+str(master_info[0]['Position'])+" for channel '"+channel_name+"';"
+# print(change_cmd)
+# r = db.execute(change_cmd)
+# print(r)
+```
\ No newline at end of file
diff --git a/plugins/mysql/conf/mysql.sql b/plugins/mysql/conf/mysql.sql
index 8262a9ec2..f98ddf79b 100755
--- a/plugins/mysql/conf/mysql.sql
+++ b/plugins/mysql/conf/mysql.sql
@@ -50,7 +50,9 @@ CREATE TABLE IF NOT EXISTS `slave_sync_user` (
`pass` TEXT,
`mode` TEXT,
`cmd` TEXT,
+ `db` TEXT,
`addtime` TEXT
);
+ALTER TABLE `slave_sync_user` ADD COLUMN `db` TEXT DEFAULT '';
diff --git a/plugins/mysql/index.py b/plugins/mysql/index.py
index 27f51e426..d97e3d1f1 100755
--- a/plugins/mysql/index.py
+++ b/plugins/mysql/index.py
@@ -63,12 +63,12 @@ def getArgs():
if t.strip() == '':
tmp = []
else:
- t = t.split(':', 1)
+ t = t.split(':',1)
tmp[t[0]] = t[1]
tmp[t[0]] = t[1]
elif args_len > 1:
for i in range(len(args)):
- t = args[i].split(':', 1)
+ t = args[i].split(':',1)
tmp[t[0]] = t[1]
return tmp
@@ -2349,7 +2349,7 @@ def addMasterRepSlaveUser(version=''):
if isError != None:
return isError
- sql_select = "grant select,lock tables,PROCESS on *.* to " + username + "@'%';"
+ sql_select = "grant select,reload,REPLICATION CLIENT,PROCESS on *.* to " + username + "@'%';"
pdb.execute(sql_select)
pdb.execute('FLUSH PRIVILEGES;')
@@ -2394,29 +2394,31 @@ def getMasterRepSlaveUserCmd(version):
if sid != '':
channel_name = " for channel 'r{}';".format(sid)
- if mode == "gtid":
- sql = "CHANGE MASTER TO MASTER_HOST='" + ip + "', MASTER_PORT=" + port + ", MASTER_USER='" + \
- clist[0]['username'] + "', MASTER_PASSWORD='" + \
- clist[0]['password'] + "', MASTER_AUTO_POSITION=1" + channel_name
- if version == '8.0':
- sql = "CHANGE REPLICATION SOURCE TO SOURCE_HOST='" + ip + "', SOURCE_PORT=" + port + ", SOURCE_USER='" + \
- clist[0]['username'] + "', SOURCE_PASSWORD='" + \
- clist[0]['password'] + \
- "', MASTER_AUTO_POSITION=1" + channel_name
+ mdb8 = ['8.0','8.1','8.2','8.3','8.4']
+ sql = ''
+ if not mw.inArray(mdb8,version):
+ base_sql = "CHANGE MASTER TO MASTER_HOST='" + ip + "', MASTER_PORT=" + port + ", MASTER_USER='" + \
+ clist[0]['username'] + "', MASTER_PASSWORD='" + \
+ clist[0]['password'] + "'"
+
+ sql += base_sql +';'
+ sql += "
"
+ # sql += base_sql + ", MASTER_AUTO_POSITION=1" + channel_name
+ sql += base_sql + channel_name
+ sql += "
"
+
+ sql += base_sql + "', MASTER_LOG_FILE='" + mstatus[0]["File"] + "',MASTER_LOG_POS=" + str(mstatus[0]["Position"]) + channel_name
else:
- sql = "CHANGE MASTER TO MASTER_HOST='" + ip + "', MASTER_PORT=" + port + ", MASTER_USER='" + \
- clist[0]['username'] + "', MASTER_PASSWORD='" + \
- clist[0]['password'] + \
- "', MASTER_LOG_FILE='" + mstatus[0]["File"] + \
- "',MASTER_LOG_POS=" + str(mstatus[0]["Position"]) + channel_name
-
- if version == "8.0":
- sql = "CHANGE REPLICATION SOURCE TO SOURCE_HOST='" + ip + "', SOURCE_PORT=" + port + ", SOURCE_USER='" + \
+ base_sql = "CHANGE REPLICATION SOURCE TO SOURCE_HOST='" + ip + "', SOURCE_PORT=" + port + ", SOURCE_USER='" + \
clist[0]['username'] + "', SOURCE_PASSWORD='" + \
- clist[0]['password'] + \
- "', SOURCE_LOG_FILE='" + mstatus[0]["File"] + \
- "',SOURCE_LOG_POS=" + \
- str(mstatus[0]["Position"]) + channel_name
+ clist[0]['password']+"'"
+ sql += base_sql +';'
+ sql += "
"
+ # sql += base_sql + ", MASTER_AUTO_POSITION=1" + channel_name
+ sql += base_sql + channel_name
+ sql += "
"
+ sql += base_sql + "', SOURCE_LOG_FILE='" + mstatus[0]["File"] + "',SOURCE_LOG_POS=" + str(mstatus[0]["Position"]) + channel_name
+
data = {}
data['cmd'] = sql
@@ -2769,6 +2771,7 @@ def initSlaveStatus(version=''):
return mw.returnJson(False, 'MySQL未启动', [])
mode_file = getSyncModeFile()
+ # print(mode_file)
if not os.path.exists(mode_file):
return mw.returnJson(False, '需要先设置同步配置')
@@ -2842,7 +2845,7 @@ def initSlaveStatusSyncUser(version=''):
if slave_t['mode'] == '1':
mode_name = 'gtid'
-
+ # print(local_mode, mode_name)
if local_mode != mode_name:
msg += base_t + '->同步模式不一致'
continue
@@ -2856,8 +2859,9 @@ def initSlaveStatusSyncUser(version=''):
pinfo = parseSlaveSyncCmd(cmd_sql)
except Exception as e:
return mw.returnJson(False, base_t + '->CMD同步命令不合规范!')
- # print(u['cmd'])
+ # print(cmd_sql)
t = pdb.query(cmd_sql)
+ # print(t)
isError = isSqlError(t)
if isError:
return isError
@@ -2972,18 +2976,21 @@ def setSlaveStatus(version=''):
return mw.returnJson(True, '设置成功!')
-
-def deleteSlave(version=''):
- args = getArgs()
+def deleteSlaveFunc(sign = ''):
db = pMysqlDb()
- if 'sign' in args:
- sign = args['sign']
+ if sign != '':
db.query("stop slave for channel '{}'".format(sign))
db.query("reset slave all for channel '{}'".format(sign))
else:
db.query('stop slave')
db.query('reset slave all')
+def deleteSlave(version=''):
+ args = getArgs()
+ sign = ''
+ if 'sign' in args:
+ sign = args['sign']
+ deleteSlaveFunc(sign)
return mw.returnJson(True, '删除成功!')
@@ -3018,6 +3025,293 @@ def dumpMysqlData(version=''):
return 'fail'
+############### --- 重要 数据补足同步 ---- ###########
+
+def getSyncMysqlDB(dbname,sign = ''):
+ conn = pSqliteDb('slave_sync_user')
+ if sign != '':
+ data = conn.field('ip,port,user,pass,mode,cmd').where('ip=?', (sign,)).find()
+ else:
+ data = conn.field('ip,port,user,pass,mode,cmd').find()
+ user = data['user']
+ apass = data['pass']
+ port = data['port']
+ ip = data['ip']
+ # 远程数据
+ sync_db = mw.getMyORM()
+ # MySQLdb |
+ sync_db.setPort(port)
+ sync_db.setHost(ip)
+ sync_db.setUser(user)
+ sync_db.setPwd(apass)
+ sync_db.setDbName(dbname)
+ sync_db.setTimeout(60)
+ return sync_db
+
+def syncDatabaseRepairTempFile():
+ tmp_log = mw.getMWLogs()+ '/mysql-check.log'
+ return tmp_log
+
+def syncDatabaseRepairLog(version=''):
+ import subprocess
+ args = getArgs()
+ data = checkArgs(args, ['db','sign','op'])
+ if not data[0]:
+ return data[1]
+
+ sync_args_db = args['db']
+ sync_args_sign = args['sign']
+ op = args['op']
+ tmp_log = syncDatabaseRepairTempFile()
+ cmd = 'cd '+mw.getServerDir()+'/mdserver-web && source bin/activate && python3 plugins/mysql/index.py sync_database_repair {"db":"'+sync_args_db+'","sign":"'+sync_args_sign+'"}'
+ # print(cmd)
+
+ if op == 'get':
+ log = mw.getLastLine(tmp_log, 15)
+ return mw.returnJson(True, log)
+
+ if op == 'cmd':
+ return mw.returnJson(True, 'ok', cmd)
+
+ if op == 'do':
+ os.system(' echo "开始执行" > '+ tmp_log)
+ subprocess.Popen(cmd +' >> '+ tmp_log +' &')
+ # time.sleep(10)
+ # mw.execShell('rm -rf '+tmp_log)
+ return mw.returnJson(True, 'ok')
+
+ return mw.returnJson(False, '无效请求!')
+
+
+def syncDatabaseRepair(version=''):
+ time_stats_s = time.time()
+ tmp_log = syncDatabaseRepairTempFile()
+
+ from pymysql.converters import escape_string
+ args = getArgs()
+ data = checkArgs(args, ['db','sign'])
+ if not data[0]:
+ return data[1]
+
+ sync_args_db = args['db']
+ sync_args_sign = args['sign']
+
+ # 本地数据
+ local_db = pMysqlDb()
+ # 远程数据
+ sync_db = getSyncMysqlDB(sync_args_db,sync_args_sign)
+
+ tables = local_db.query('show tables from `%s`' % sync_args_db)
+ table_key = "Tables_in_" + sync_args_db
+ inconsistent_table = []
+
+ tmp_dir = '/tmp/sync_db_repair'
+ mw.execShell('mkdir -p '+tmp_dir)
+
+ for tb in tables:
+
+ table_name = sync_args_db+'.'+tb[table_key]
+ table_check_file = tmp_dir+'/'+table_name+'.txt'
+
+ if os.path.exists(table_check_file):
+ # print(table_name+', 已检查OK')
+ continue
+
+ primary_key_sql = "SHOW INDEX FROM "+table_name+" WHERE Key_name = 'PRIMARY';";
+ primary_key_data = local_db.query(primary_key_sql)
+ # print(primary_key_sql,primary_key_data)
+ pkey_name = '*'
+ if len(primary_key_data) == 1:
+ pkey_name = primary_key_data[0]['Column_name']
+ # print(pkey_name)
+ if pkey_name != '*' :
+ # 智能校验(由于服务器同步可能会慢,比较总数总是对不上)
+ cmd_local_newpk_sql = 'select ' + pkey_name + ' from ' + table_name + " order by " + pkey_name + " desc limit 1"
+ cmd_local_newpk_data = local_db.query(cmd_local_newpk_sql)
+ # print(cmd_local_newpk_data)
+ if len(cmd_local_newpk_data) == 1:
+ # 比较总数
+ cmd_count_sql = 'select count('+pkey_name+') as num from '+table_name + ' where '+pkey_name + ' <= '+ str(cmd_local_newpk_data[0][pkey_name])
+ local_count_data = local_db.query(cmd_count_sql)
+ sync_count_data = sync_db.query(cmd_count_sql)
+
+ if local_count_data != sync_count_data:
+ print(cmd_count_sql)
+ print("all data compare: ",local_count_data, sync_count_data)
+ else:
+ print(table_name+' smart compare check ok.')
+ mw.writeFile(tmp_log, table_name+' smart compare check ok.\n','a+')
+ mw.execShell("echo 'ok' > "+table_check_file)
+ continue
+
+
+
+ # 比较总数
+ cmd_count_sql = 'select count('+pkey_name+') as num from '+table_name
+ local_count_data = local_db.query(cmd_count_sql)
+ sync_count_data = sync_db.query(cmd_count_sql)
+
+ if local_count_data != sync_count_data:
+ print("all data compare: ",local_count_data, sync_count_data)
+ inconsistent_table.append(table_name)
+ diff = sync_count_data[0]['num'] - local_count_data[0]['num']
+ print(table_name+', need sync. diff,'+str(diff))
+ mw.writeFile(tmp_log, table_name+', need sync. diff,'+str(diff)+'\n','a+')
+ else:
+ print(table_name+' check ok.')
+ mw.writeFile(tmp_log, table_name+' check ok.\n','a+')
+ mw.execShell("echo 'ok' > "+table_check_file)
+
+
+ # inconsistent_table = ['xx.xx']
+ # 数据对齐
+ for table_name in inconsistent_table:
+ is_break = False
+ while not is_break:
+ local_db.ping()
+ # 远程数据
+ sync_db.ping()
+
+ print("check table:"+table_name)
+ mw.writeFile(tmp_log, "check table:"+table_name+'\n','a+')
+ table_name_pos = 0
+ table_name_pos_file = tmp_dir+'/'+table_name+'.pos.txt'
+ primary_key_sql = "SHOW INDEX FROM "+table_name+" WHERE Key_name = 'PRIMARY';";
+ primary_key_data = local_db.query(primary_key_sql)
+ pkey_name = primary_key_data[0]['Column_name']
+
+ if os.path.exists(table_name_pos_file):
+ table_name_pos = mw.readFile(table_name_pos_file)
+
+
+ data_select_sql = 'select * from '+table_name + ' where '+pkey_name+' > '+str(table_name_pos)+' limit 10000'
+ print(data_select_sql)
+ local_select_data = local_db.query(data_select_sql)
+
+ time_s = time.time()
+ sync_select_data = sync_db.query(data_select_sql)
+ print(f'sync query cos:{time.time() - time_s:.4f}s')
+ mw.writeFile(tmp_log, f'sync query cos:{time.time() - time_s:.4f}s\n','a+')
+
+ # print(local_select_data)
+ # print(sync_select_data)
+
+ # print(len(local_select_data))
+ # print(len(sync_select_data))
+ print('pos:',str(table_name_pos),'local compare sync,',local_select_data == sync_select_data)
+
+
+ cmd_count_sql = 'select count('+pkey_name+') as num from '+table_name
+ local_count_data = local_db.query(cmd_count_sql)
+ time_s = time.time()
+ sync_count_data = sync_db.query(cmd_count_sql)
+ print(f'sync count data cos:{time.time() - time_s:.4f}s')
+ print(local_count_data,sync_count_data)
+ # 数据同步有延迟,相等即任务数据补足完成
+ if local_count_data[0]['num'] == sync_count_data[0]['num']:
+ is_break = True
+ break
+
+ diff = sync_count_data[0]['num'] - local_count_data[0]['num']
+ print("diff," + str(diff)+' line data!')
+
+ if local_select_data == sync_select_data:
+ data_count = len(local_select_data)
+ if data_count == 0:
+ # mw.writeFile(table_name_pos_file, '0')
+ print(table_name+",data is equal ok..")
+ is_break = True
+ break
+
+ # print(table_name,data_count)
+ pos = local_select_data[data_count-1][pkey_name]
+ print('pos',pos)
+ progress = pos/sync_count_data[0]['num']
+ print('progress,%.2f' % progress+'%')
+ mw.writeFile(table_name_pos_file, str(pos))
+ else:
+ sync_select_data_len = len(sync_select_data)
+ skip_idx = 0
+ # 主库PK -> 查询本地 | 保证一致
+ if sync_select_data_len > 0:
+ for idx in range(sync_select_data_len):
+ sync_idx_data = sync_select_data[idx]
+ local_idx_data = None
+ if idx in local_select_data:
+ local_idx_data = local_select_data[idx]
+ if sync_select_data[idx] == local_idx_data:
+ skip_idx = idx
+ pos = local_select_data[idx][pkey_name]
+ mw.writeFile(table_name_pos_file, str(pos))
+
+ # print(insert_data)
+ local_inquery_sql = 'select * from ' + table_name+ ' where ' +pkey_name+' = '+ str(sync_idx_data[pkey_name])
+ # print(local_inquery_sql)
+ ldata = local_db.query(local_inquery_sql)
+ # print('ldata:',ldata)
+ if len(ldata) == 0:
+ print("id:"+ str(sync_idx_data[pkey_name])+ " no exists, insert")
+ insert_sql = 'insert into ' + table_name
+ field_str = ''
+ value_str = ''
+ for field in sync_idx_data:
+ field_str += '`'+field+'`,'
+ value_str += '\''+escape_string(str(sync_idx_data[field]))+'\','
+ field_str = '(' +field_str.strip(',')+')'
+ value_str = '(' +value_str.strip(',')+')'
+ insert_sql = insert_sql+' '+field_str+' values'+value_str+';'
+ print(insert_sql)
+ r = local_db.execute(insert_sql)
+ print(r)
+ else:
+ # print('compare sync->local:',sync_idx_data == ldata[0] )
+ if ldata[0] == sync_idx_data:
+ continue
+
+ print("id:"+ str(sync_idx_data[pkey_name])+ " data is not equal, update")
+ update_sql = 'update ' + table_name
+ field_str = ''
+ value_str = ''
+ for field in sync_idx_data:
+ if field == pkey_name:
+ continue
+ field_str += '`'+field+'`=\''+escape_string(str(sync_idx_data[field]))+'\','
+ field_str = field_str.strip(',')
+ update_sql = update_sql+' set '+field_str+' where '+pkey_name+'=\''+str(sync_idx_data[pkey_name])+'\';'
+ print(update_sql)
+ r = local_db.execute(update_sql)
+ print(r)
+
+ # 本地PK -> 查询主库 | 保证一致
+ # local_select_data_len = len(local_select_data)
+ # if local_select_data_len > 0:
+ # for idx in range(local_select_data_len):
+ # if idx < skip_idx:
+ # continue
+ # local_idx_data = local_select_data[idx]
+ # print('local idx check', idx, skip_idx)
+ # local_inquery_sql = 'select * from ' + table_name+ ' where ' +pkey_name+' = '+ str(local_idx_data[pkey_name])
+ # print(local_inquery_sql)
+ # sdata = sync_db.query(local_inquery_sql)
+ # sdata_len = len(sdata)
+ # print('sdata:',sdata,sdata_len)
+ # if sdata_len == 0:
+ # delete_sql = 'delete from ' + table_name + ' where ' +pkey_name+' = '+ str(local_idx_data[pkey_name])
+ # print(delete_sql)
+ # r = local_db.execute(delete_sql)
+ # print(r)
+ # break
+
+
+ if is_break:
+ print("break all")
+ break
+ time.sleep(3)
+ print(f'data check cos:{time.time() - time_stats_s:.4f}s')
+ print("data supplementation completed")
+ mw.execShell('rm -rf '+tmp_dir)
+ return 'ok'
+
############### --- 重要 同步---- ###########
def asyncTmpfile():
@@ -3028,7 +3322,20 @@ def asyncTmpfile():
def writeDbSyncStatus(data):
path = asyncTmpfile()
mw.writeFile(path, json.dumps(data))
+ return True
+def fullSyncCmd():
+ time_all_s = time.time()
+ args = getArgs()
+ data = checkArgs(args, ['db', 'sign'])
+ if not data[0]:
+ return data[1]
+
+ db = args['db']
+ sign = args['sign']
+
+ cmd = 'cd '+mw.getServerDir()+'/mdserver-web && source bin/activate && python3 plugins/mysql/index.py do_full_sync {"db":"'+db+'","sign":"'+sign+'"}'
+ return mw.returnJson(True,'ok',cmd)
def doFullSync(version=''):
mode_file = getSyncModeFile()
@@ -3042,7 +3349,53 @@ def doFullSync(version=''):
return doFullSyncUser(version)
+def isSimpleSyncCmd(sql):
+ new_sql = sql.lower()
+ if new_sql.find('master_auto_position') > 0:
+ return False
+ return True
+
+def getChannelNameForCmd(cmd):
+ cmd = cmd.lower()
+ cmd_arr = cmd.split('channel')
+ if len(cmd_arr) == 2:
+ cmd_channel_info = cmd_arr[1]
+ channel_name = cmd_channel_info.strip()
+ channel_name = channel_name.strip(';')
+ channel_name = channel_name.strip("'")
+ return channel_name
+ return ''
+
+
+
+def doFullSyncUserImportContentForChannel(file, channel_name):
+ # print(file, channel_name)
+ content = mw.readFile(file)
+
+ content = content.replace('STOP SLAVE;', "STOP SLAVE for channel '{}';".format(channel_name))
+ content = content.replace('START SLAVE;', "START SLAVE for channel '{}';".format(channel_name))
+
+ find_head = "CHANGE MASTER TO "
+ find_re = find_head+"(.*?);"
+ find_r = re.search(find_re, content, re.I|re.M)
+ if find_r:
+ find_rg = find_r.groups()
+ if len(find_rg)>0:
+ find_str = find_head+find_rg[0]
+ if find_str.lower().find('channel')==-1:
+ content = content.replace(find_str+';', find_str+" for channel '{}';".format(channel_name))
+
+ mw.writeFile(file,content)
+ return True
+
+
def doFullSyncUser(version=''):
+ which_pv = mw.execShell('which pv')
+ is_exist_pv = False
+ if not os.path.exists(which_pv[0]):
+ is_exist_pv = True
+
+ time_all_s = time.time()
args = getArgs()
data = checkArgs(args, ['db', 'sign'])
if not data[0]:
@@ -3063,6 +3416,9 @@ def doFullSyncUser(version=''):
db = pMysqlDb()
+ # 重置
+ # deleteSlaveFunc(sync_sign)
+
conn = pSqliteDb('slave_sync_user')
if sync_sign != '':
data = conn.field('ip,port,user,pass,mode,cmd').where(
@@ -3070,10 +3426,15 @@ def doFullSyncUser(version=''):
else:
data = conn.field('ip,port,user,pass,mode,cmd').find()
+ # print(data)
user = data['user']
apass = data['pass']
port = data['port']
ip = data['ip']
+ cmd = data['cmd']
+ channel_name = getChannelNameForCmd(cmd)
+
+ sync_mdb = getSyncMysqlDB(sync_db,sync_sign)
bak_file = '/tmp/tmp.sql'
if os.path.exists(bak_file):
@@ -3087,41 +3448,111 @@ def doFullSyncUser(version=''):
time.sleep(1)
writeDbSyncStatus({'code': 1, 'msg': '正在停止从库...', 'progress': 15})
- if version == '8.0':
+
+ mdb8 = ['8.0','8.1','8.2','8.3','8.4']
+ if mw.inArray(mdb8,version):
db.query("stop slave user='{}' password='{}';".format(user, apass))
else:
db.query("stop slave")
- time.sleep(2)
+ time.sleep(1)
writeDbSyncStatus({'code': 2, 'msg': '远程导出数据...', 'progress': 20})
+ # --master-data=2表示在dump过程中记录主库的binlog和pos点,并在dump文件中注释掉这一行
+ # --master-data=1表示在dump过程中记录主库的binlog和pos点,并在dump文件中不注释掉这一行,即恢复时会执行
+
+ # --dump-slave=2表示在dump过程中,在从库dump,mysqldump进程也要在从库执行,记录当时主库的binlog和pos点,并在dump文件中注释掉这一行
+ # --dump-slave=1表示在dump过程中,在从库dump,mysqldump进程也要在从库执行,记录当时主库的binlog和pos点,并在dump文件中不注释掉这一行
+
+ # --force --opt --single-transaction
+ # --skip-opt --create-options
+ # --master-data=1
+
+ find_run_dump = mw.execShell('ps -ef | grep mysqldump | grep -v grep')
+ if find_run_dump[0] != "":
+ print("正在远程导出数据中,别着急...")
+ writeDbSyncStatus({'code': 3.1, 'msg': '正在远程导出数据中,别着急...', 'progress': 19})
+ return False
+
+ time_s = time.time()
if not os.path.exists(bak_file):
- dump_sql_data = getServerDir() + "/bin/mysqldump " + dmp_option + " --force --opt --default-character-set=utf8 --single-transaction -h" + ip + " -P" + \
- port + " -u" + user + " -p'" + apass + \
- "' --ssl-mode=DISABLED " + sync_db + " > " + bak_file
- print(dump_sql_data)
- mw.execShell(dump_sql_data)
+ if isSimpleSyncCmd(cmd):
+ dmp_option += " --master-data=1 --apply-slave-statements --include-master-host-port "
+ else:
+ dmp_option += ' '
+ dump_sql_data = getServerDir() + "/bin/mysqldump --single-transaction --default-character-set=utf8mb4 --compress -q " + dmp_option + " -h" + ip + " -P" + \
+ port + " -u" + user + " -p'" + apass + "' --ssl-mode=DISABLED " + sync_db + " > " + bak_file
+ print(dump_sql_data)
+ time_s = time.time()
+ r = mw.execShell(dump_sql_data)
+ print(r)
+ time_e = time.time()
+ export_cos = time_e - time_s
+ print("export cos:", export_cos)
- writeDbSyncStatus({'code': 3, 'msg': '正在到本地导入数据中...', 'progress': 40})
+ writeDbSyncStatus({'code': 3, 'msg': '导出耗时:'+str(int(export_cos))+'秒,正在到本地导入数据中...', 'progress': 40})
+
+ find_run_import = mw.execShell('ps -ef | grep mysql| grep '+ bak_file +' | grep -v grep')
+ if find_run_import[0] != "":
+ print("正在导入数据中,别着急...")
+ writeDbSyncStatus({'code': 4.1, 'msg': '正在导入数据中,别着急...', 'progress': 39})
+ return False
+
+ time_s = time.time()
if os.path.exists(bak_file):
+ # 重置
+ db.execute('reset master')
+
+ # 加快导入 - 开始
+ # db.execute('set global innodb_flush_log_at_trx_commit = 2')
+ # db.execute('set global sync_binlog = 2000')
+
+ if channel_name != '':
+ doFullSyncUserImportContentForChannel(bak_file, channel_name)
+
pwd = pSqliteDb('config').where('id=?', (1,)).getField('mysql_root')
sock = getSocketFile()
- my_import_cmd = getServerDir() + '/bin/mysql -S ' + sock + " -uroot -p'" + pwd + \
- "' " + sync_db_import + ' < ' + bak_file
- print(my_import_cmd)
- mw.execShell(my_import_cmd)
- if version == '8.0':
+ if is_exist_pv:
+ my_import_cmd = getServerDir() + '/bin/mysql -S ' + sock + " -uroot -p'" + pwd + "' " + sync_db_import
+ my_import_cmd = "pv -t -p " + bak_file + '|' + my_import_cmd
+ print(my_import_cmd)
+ os.system(my_import_cmd)
+ else:
+ my_import_cmd = getServerDir() + '/bin/mysql -S ' + sock + " -uroot -p'" + pwd + "' " + sync_db_import + ' < ' + bak_file
+ print(my_import_cmd)
+ mw.execShell(my_import_cmd)
+
+ # 加快导入 - 结束
+ # db.execute('set global innodb_flush_log_at_trx_commit = 1')
+ # db.execute('set global sync_binlog = 1')
+
+ time_e = time.time()
+ import_cos = time_e - time_s
+ print("import cos:", import_cos)
+ writeDbSyncStatus({'code': 4, 'msg': '导入耗时:'+str(int(import_cos))+'秒', 'progress': 60})
+
+ time.sleep(3)
+ # print(cmd)
+ # r = db.query(cmd)
+ # print(r)
+
+ mdb8 = ['8.0','8.1','8.2','8.3','8.4']
+ if mw.inArray(mdb8,version):
db.query("start slave user='{}' password='{}';".format(user, apass))
else:
db.query("start slave")
- writeDbSyncStatus({'code': 6, 'msg': '从库重启完成...', 'progress': 100})
+ db.query("start all slaves")
+ time_all_e = time.time()
+ cos = time_all_e - time_all_s
+ writeDbSyncStatus({'code': 6, 'msg': '总耗时:'+str(int(cos))+'秒,从库重启完成...', 'progress': 100})
if os.path.exists(bak_file):
os.system("rm -rf " + bak_file)
+
return True
@@ -3252,10 +3683,13 @@ def doFullSyncSSH(version=''):
print(import_data[0])
writeDbSyncStatus({'code': 5, 'msg': '导入数据失败...', 'progress': 100})
return 'fail'
-
- # "start slave user='{}' password='{}';".format(uinfo['username'], uinfo['password'])
-
- db.query("start slave")
+
+ mdb8 = ['8.0','8.1','8.2','8.3','8.4']
+ if mw.inArray(mdb8,version):
+ db.query("start slave user='{}' password='{}';".format(uinfo['username'], uinfo['password']))
+ else:
+ db.query("start slave")
+
writeDbSyncStatus({'code': 6, 'msg': '从库重启完成...', 'progress': 100})
os.system("rm -rf " + SSH_PRIVATE_KEY)
@@ -3300,9 +3734,16 @@ def fullSync(version=''):
def installPreInspection(version):
+ import psutil
+ mem = psutil.virtual_memory()
+ memTotal = mem.total
+ memG = memTotal/1024/1024/1024
+ if memG > 2:
+ return 'ok'
+
swap_path = mw.getServerDir() + "/swap"
if not os.path.exists(swap_path):
- return "为了稳定安装MySQL,先安装swap插件!"
+ return "内存小,为了稳定安装MySQL,先安装swap插件!"
return 'ok'
@@ -3494,7 +3935,13 @@ if __name__ == "__main__":
print(fullSync(version))
elif func == 'do_full_sync':
print(doFullSync(version))
+ elif func == 'full_sync_cmd':
+ print(fullSyncCmd())
elif func == 'dump_mysql_data':
print(dumpMysqlData(version))
+ elif func == 'sync_database_repair':
+ print(syncDatabaseRepair())
+ elif func == 'sync_database_repair_log':
+ print(syncDatabaseRepairLog())
else:
print('error')
diff --git a/plugins/mysql/install.sh b/plugins/mysql/install.sh
index da748592f..45718b124 100755
--- a/plugins/mysql/install.sh
+++ b/plugins/mysql/install.sh
@@ -6,8 +6,11 @@ export PATH
# https://www.cnblogs.com/whiteY/p/17331882.html
# cd /www/server/mdserver-web/plugins/mysql && bash install.sh install 8.2
-# cd /www/server/mdserver-web && source bin/activate && python3 /www/server/mdserver-web/plugins/mysql/index.py try_slave_sync_bugfix {}
-
+# cd /www/server/mdserver-web && source bin/activate && python3 plugins/mysql/index.py try_slave_sync_bugfix {}
+# cd /www/server/mdserver-web && source bin/activate && python3 plugins/mysql/index.py do_full_sync {"db":"xxx","sign":"","begin":1}
+# cd /www/server/mdserver-web && source bin/activate && python3 plugins/mysql/index.py sync_database_repair {"db":"xxx","sign":""}
+# cd /www/server/mdserver-web && source bin/activate && python3 plugins/mysql/index.py init_slave_status
+# cd /www/server/mdserver-web && source bin/activate && python3 plugins/mysql/index.py install_pre_inspection
curPath=`pwd`
rootPath=$(dirname "$curPath")
rootPath=$(dirname "$rootPath")
diff --git a/plugins/mysql/js/mysql.js b/plugins/mysql/js/mysql.js
index 1ccb51c8a..47d9f895a 100755
--- a/plugins/mysql/js/mysql.js
+++ b/plugins/mysql/js/mysql.js
@@ -1985,6 +1985,7 @@ function getFullSyncStatus(db){
\
\
开始 \
+ 手动命令 \
\
",
cancel: function(){
@@ -2003,10 +2004,36 @@ function getFullSyncStatus(db){
fullSync(db,sign,0);
}, 1000);
$(this).data('status','starting');
+ $('#begin_full_sync').text("同步中");
} else {
layer.msg("正在同步中..",{icon:0});
}
});
+
+ $('#full_sync_cmd').click(function(){
+ myPostN('full_sync_cmd', {'db':db,'sign':''}, function(rdata){
+ var rdata = $.parseJSON(rdata.data);
+ layer.open({
+ title: "手动执行命令CMD",
+ area: ['600px', '180px'],
+ type:1,
+ closeBtn: 1,
+ shadeClose: false,
+ btn:["复制","取消"],
+ content: '',
+ success:function(){
+ copyText(rdata.data);
+ },
+ yes:function(){
+ copyText(rdata.data);
+ }
+ });
+ });
+ });
}
});
});
@@ -2022,12 +2049,91 @@ function getFullSyncStatus(db){
if (rdata['code']==6 ||rdata['code']<0){
layer.msg(rdata['msg']);
clearInterval(timeId);
+ $('#begin_full_sync').text("同步结束,再次同步?");
$("#begin_full_sync").attr('data-status','init');
}
});
}
}
+function dataSyncVerify(db){
+ var reqTimer = null;
+
+ function requestLogs(layerIndex){
+ myPostN('sync_database_repair_log', {db:db, sign:'',op:'get'}, function(rdata){
+ var rdata = $.parseJSON(rdata.data);
+
+ if(!rdata.status) {
+ layer.close(layerIndex);
+ layer.msg(rdata.msg,{icon:2, time:2000});
+ clearInterval(reqTimer);
+ return;
+ };
+
+ if (rdata.msg == ''){
+ rdata.msg = '暂无数据!';
+ }
+
+ $("#data_verify_log").html(rdata.msg);
+ //滚动到最低
+ var ob = document.getElementById('data_verify_log');
+ ob.scrollTop = ob.scrollHeight;
+ });
+ }
+
+ layer.open({
+ type: 1,
+ title: '同步数据库['+db+']数据校验',
+ area: '500px',
+ btn:[ "开始","取消","手动"],
+ content:"",
+ cancel: function(){
+ if (reqTimer){
+ clearInterval(reqTimer);
+ }
+ },
+ yes:function(index,layer_index){
+ myPostN('sync_database_repair_log', {db:db, sign:'',op:'do'}, function(data){});
+ layer.msg("执行成功");
+
+ requestLogs(layer_index);
+ reqTimer = setInterval(function(){
+ requestLogs(layer_index);
+ },3000);
+ },
+ success:function(){
+ },
+ btn3: function(){
+ myPostN('sync_database_repair_log', {db:db, sign:'',op:'cmd'}, function(rdata){
+ var rdata = $.parseJSON(rdata.data);
+ layer.open({
+ title: "手动执行命令CMD",
+ area: ['600px', '180px'],
+ type:1,
+ closeBtn: 1,
+ shadeClose: false,
+ btn:["复制","取消"],
+ content: '',
+ success:function(){
+ copyText(rdata.data);
+ },
+ yes:function(){
+ copyText(rdata.data);
+ }
+ });
+ });
+ return false;
+ }
+
+ });
+}
+
function addSlaveSSH(ip=''){
myPost('get_slave_ssh_by_ip', {ip:ip}, function(rdata){
@@ -2183,7 +2289,7 @@ function addSlaveSyncUser(ip=''){
var index = layer.open({
type: 1,
- area: ['500px','470px'],
+ area: ['500px','510px'],
title: '同步账户',
closeBtn: 1,
shift: 5,
@@ -2194,6 +2300,15 @@ function addSlaveSyncUser(ip=''){
\
\
\
+ \
+
同步模式 \
+
\
+ \
+ 经典 \
+ GTID \
+ \
+
\
+
\
\
CMD[必须填写] \
\
@@ -2204,6 +2319,7 @@ function addSlaveSyncUser(ip=''){
$('textarea[name="cmd"]').html(cmd);
$('textarea[name="cmd"]').change(function(){
var val = $(this).val();
+ val = val.replace(';','');
var a = {};
if (val.toLowerCase().indexOf('for')>0){
cmd_tmp = val.split('for');
@@ -2230,11 +2346,6 @@ function addSlaveSyncUser(ip=''){
$('input[name="port"]').val(a['MASTER_PORT']);
$('input[name="user"]').val(a['MASTER_USER']);
$('input[name="pass"]').val(a['MASTER_PASSWORD']);
-
- console.log(a['MASTER_AUTO_POSITION'],typeof(a['MASTER_AUTO_POSITION']));
- if (typeof(a['MASTER_AUTO_POSITION']) != 'undefined' ){
- $('input[name="mode"]').val('1');
- }
});
},
yes:function(index){
@@ -2243,7 +2354,7 @@ function addSlaveSyncUser(ip=''){
var user = $('input[name="user"]').val();
var pass = $('input[name="pass"]').val();
var cmd = $('textarea[name="cmd"]').val();
- var mode = $('input[name="mode"]').val();
+ var mode = $('select[name="mode"]').val();
var data = {ip:ip,port:port,cmd:cmd,user:user,pass:pass,mode:mode};
myPost('add_slave_sync_user', data, function(ret_data){
@@ -2521,7 +2632,7 @@ function masterOrSlaveConf(version=''){
for(i in rdata.data){
var v = rdata.data[i];
- if ('Channel_Name' in v){
+ if ('Channel_Name' in v && v['Channel_Name'] !=''){
isHasSign = true;
}
@@ -2692,7 +2803,8 @@ function masterOrSlaveConf(version=''){
list += '
' + rdata.data[i]['name'] +' ';
list += '
' +
''+(rdata.data[i]['slave']?'退出':'加入')+' | ' +
- '同步 ' +
+ '同步 | ' +
+ '数据校验 ' +
' ';
list += '';
}
@@ -2824,10 +2936,10 @@ function masterOrSlaveConf(version=''){
getMasterDbList();
}
- // if (rdata.slave_status){
+ if (rdata.slave_status){
getAsyncMasterDbList();
getAsyncDataList()
- // }
+ }
});
}
getMasterStatus();
diff --git a/plugins/openresty/conf/nginx.conf b/plugins/openresty/conf/nginx.conf
index 066868f62..9533df6c7 100644
--- a/plugins/openresty/conf/nginx.conf
+++ b/plugins/openresty/conf/nginx.conf
@@ -51,7 +51,7 @@ http
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
- gzip_comp_level 2;
+ gzip_comp_level 9;
gzip_types text/plain application/javascript application/x-javascript text/javascript text/css application/xml;
gzip_vary on;
gzip_proxied expired no-cache no-store private auth;
diff --git a/plugins/php/versions/common/mongodb.sh b/plugins/php/versions/common/mongodb.sh
index 616543409..3f60b9f79 100755
--- a/plugins/php/versions/common/mongodb.sh
+++ b/plugins/php/versions/common/mongodb.sh
@@ -17,6 +17,10 @@ sysName=`uname`
actionType=$1
version=$2
+if [ "$version" -ge '74' ];then
+ LIBV=1.19.0
+fi
+
if [ "$version" == '71' ];then
LIBV=1.11.1
fi
diff --git a/plugins/redis/config/redis.conf b/plugins/redis/config/redis.conf
index 02ea8dfeb..e0d806668 100644
--- a/plugins/redis/config/redis.conf
+++ b/plugins/redis/config/redis.conf
@@ -1,5 +1,5 @@
daemonize yes
-pidfile {$SERVER_PATH}/redis/redis_6379.pid
+pidfile {$SERVER_PATH}/redis/redis.pid
bind 127.0.0.1
port 6379
@@ -40,15 +40,20 @@ maxclients 10000
#maxmemory-samples 3
maxmemory 218mb
maxmemory-policy volatile-ttl
+#maxmemory-policy allkeys-lru
############################## APPEND ONLY MODE ###############################
-#appendonly no
+# appendonly no
+
# appendfsync always
-#appendfsync everysec
+# appendfsync everysec
# appendfsync no
-#no-appendfsync-on-rewrite no
+
+# appendfilename "appendonly.aof"
+
+# no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
diff --git a/plugins/redis/init.d/redis.tpl b/plugins/redis/init.d/redis.tpl
index b7cf93630..fb4f76e46 100644
--- a/plugins/redis/init.d/redis.tpl
+++ b/plugins/redis/init.d/redis.tpl
@@ -23,7 +23,7 @@ if [ "$REDISPASS" != "" ];then
fi
EXEC={$SERVER_PATH}/redis/bin/redis-server
CLIEXEC="{$SERVER_PATH}/redis/bin/redis-cli -p $REDISPORT$REDISPASS"
-PIDFILE={$SERVER_PATH}/redis/redis_6379.pid
+PIDFILE={$SERVER_PATH}/redis/redis.pid
echo $REDISPASS
echo $REDISPORT
diff --git a/plugins/redis/install.sh b/plugins/redis/install.sh
index 22b40fd23..444726156 100755
--- a/plugins/redis/install.sh
+++ b/plugins/redis/install.sh
@@ -25,14 +25,16 @@ Install_App()
{
echo '正在安装脚本文件...' > $install_tmp
mkdir -p $serverPath/source
+ mkdir -p $serverPath/source/redis
- if [ ! -f $serverPath/source/redis-${VERSION}.tar.gz ];then
- wget -O $serverPath/source/redis-${VERSION}.tar.gz http://download.redis.io/releases/redis-${VERSION}.tar.gz
- fi
-
- cd $serverPath/source && tar -zxvf redis-${VERSION}.tar.gz
+ FILE_TGZ=redis-${VERSION}.tar.gz
+ REDIS_DIR=$serverPath/source/redis
+ if [ ! -f $REDIS_DIR/${FILE_TGZ} ];then
+ wget -O $REDIS_DIR/${FILE_TGZ} http://download.redis.io/releases/${FILE_TGZ}
+ fi
+ cd $REDIS_DIR && tar -zxvf ${FILE_TGZ}
CMD_MAKE=`which gmake`
if [ "$?" == "0" ];then
@@ -55,8 +57,8 @@ Install_App()
echo '安装失败!'
fi
- if [ -d $serverPath/source/redis-${VERSION} ];then
- rm -rf $serverPath/source/redis-${VERSION}
+ if [ -d ${REDIS_DIR}/redis-${VERSION} ];then
+ rm -rf ${REDIS_DIR}/redis-${VERSION}
fi
}
@@ -80,8 +82,11 @@ Uninstall_App()
$serverPath/redis/initd/redis stop
fi
- rm -rf $serverPath/redis
- echo "Uninstall_redis" > $install_tmp
+ if [ -d $serverPath/redis ];then
+ rm -rf $serverPath/redis
+ fi
+
+ echo "卸载redis成功"
}
action=$1
diff --git a/plugins/redis/tpl/redis_cluster.conf b/plugins/redis/tpl/redis_cluster.conf
index 7c2364132..ed0952156 100644
--- a/plugins/redis/tpl/redis_cluster.conf
+++ b/plugins/redis/tpl/redis_cluster.conf
@@ -1,5 +1,5 @@
daemonize yes
-pidfile {$SERVER_PATH}/redis/redis_6379.pid
+pidfile {$SERVER_PATH}/redis/redis.pid
loglevel notice
logfile {$SERVER_PATH}/redis/data/redis.log
diff --git a/plugins/redis/tpl/redis_simple.conf b/plugins/redis/tpl/redis_simple.conf
index d38e0e7fe..aa0945783 100644
--- a/plugins/redis/tpl/redis_simple.conf
+++ b/plugins/redis/tpl/redis_simple.conf
@@ -1,5 +1,5 @@
daemonize yes
-pidfile {$SERVER_PATH}/redis/redis_6379.pid
+pidfile {$SERVER_PATH}/redis/redis.pid
bind 127.0.0.1
port 6379
diff --git a/plugins/redis/tpl/redis_slave.conf b/plugins/redis/tpl/redis_slave.conf
index 3415444e9..ddc9ab2a0 100644
--- a/plugins/redis/tpl/redis_slave.conf
+++ b/plugins/redis/tpl/redis_slave.conf
@@ -1,16 +1,17 @@
daemonize yes
-pidfile {$SERVER_PATH}/redis/redis_6379.pid
+pidfile {$SERVER_PATH}/redis/redis.pid
-loglevel notice
-logfile {$SERVER_PATH}/redis/data/redis.log
-databases 16
+bind 127.0.0.1
+port 6379
+requirepass {$REDIS_PASS}
timeout 0
tcp-keepalive 0
-bind 127.0.0.1
-port 6379
-requirepass {$REDIS_PASS}
+loglevel notice
+
+logfile {$SERVER_PATH}/redis/data/redis.log
+databases 16
################################ SNAPSHOTTING #################################
@@ -33,7 +34,7 @@ slave-priority 100
# 填写主库信息
#slaveof 127.0.0.1 6379
-#masterauth master_pwd
+#masterauth 123123
################################## SECURITY ###################################
@@ -41,7 +42,7 @@ slave-priority 100
################################### LIMITS ####################################
maxclients 10000
#maxmemory-samples 3
-maxmemory 218mb
+maxmemory 0mb
maxmemory-policy volatile-ttl
############################## APPEND ONLY MODE ###############################
diff --git a/plugins/redis/tpl/redis_slave_mem.conf b/plugins/redis/tpl/redis_slave_mem.conf
new file mode 100644
index 000000000..bbe35527f
--- /dev/null
+++ b/plugins/redis/tpl/redis_slave_mem.conf
@@ -0,0 +1,75 @@
+daemonize yes
+pidfile {$SERVER_PATH}/redis/redis.pid
+
+loglevel notice
+logfile {$SERVER_PATH}/redis/data/redis.log
+databases 16
+
+timeout 0
+tcp-keepalive 0
+
+bind 127.0.0.1
+port 6379
+requirepass {$REDIS_PASS}
+
+################################ SNAPSHOTTING #################################
+
+save ""
+stop-writes-on-bgsave-error no
+
+################################# REPLICATION #################################
+
+slave-serve-stale-data yes
+slave-read-only yes
+
+repl-disable-tcp-nodelay no
+slave-priority 100
+
+# 填写主库信息
+#slaveof 127.0.0.1 6379
+#masterauth 123123
+
+################################## SECURITY ###################################
+
+
+################################### LIMITS ####################################
+maxclients 10000
+#maxmemory-samples 3
+maxmemory 218mb
+maxmemory-policy allkeys-lru
+
+############################## APPEND ONLY MODE ###############################
+
+
+################################ LUA SCRIPTING ###############################
+
+lua-time-limit 5000
+
+################################## SLOW LOG ###################################
+
+
+slowlog-log-slower-than 10000
+slowlog-max-len 128
+
+############################### ADVANCED CONFIG ###############################
+
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+list-max-ziplist-entries 512
+list-max-ziplist-value 64
+
+set-max-intset-entries 512
+
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+activerehashing no
+
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+hz 10
+
+aof-rewrite-incremental-fsync yes
\ No newline at end of file
diff --git a/plugins/sphinx/class/sphinx_make.py b/plugins/sphinx/class/sphinx_make.py
new file mode 100644
index 000000000..0ccb1f409
--- /dev/null
+++ b/plugins/sphinx/class/sphinx_make.py
@@ -0,0 +1,486 @@
+# coding:utf-8
+
+import sys
+import io
+import os
+import time
+import subprocess
+import re
+import json
+
+
+sys.path.append(os.getcwd() + "/class/core")
+import mw
+
+
+def getServerDir():
+ return mw.getServerDir() + '/mysql'
+
+def getPluginDir():
+ return mw.getPluginDir() + '/mysql'
+
+def getConf():
+ path = getServerDir() + '/etc/my.cnf'
+ return path
+
+def getDbPort():
+ file = getConf()
+ content = mw.readFile(file)
+ rep = 'port\s*=\s*(.*)'
+ tmp = re.search(rep, content)
+ return tmp.groups()[0].strip()
+
+def getSocketFile():
+ file = getConf()
+ content = mw.readFile(file)
+ rep = 'socket\s*=\s*(.*)'
+ tmp = re.search(rep, content)
+ return tmp.groups()[0].strip()
+
+def pSqliteDb(dbname='databases'):
+ file = getServerDir() + '/mysql.db'
+ name = 'mysql'
+
+ conn = mw.M(dbname).dbPos(getServerDir(), name)
+ return conn
+
+def pMysqlDb():
+ # pymysql
+ db = mw.getMyORM()
+
+ db.setPort(getDbPort())
+ db.setSocket(getSocketFile())
+ # db.setCharset("utf8")
+ db.setPwd(pSqliteDb('config').where('id=?', (1,)).getField('mysql_root'))
+ return db
+
+class sphinxMake():
+
+ pdb = None
+ psdb = None
+
+ pkey_name_cache = {}
+ delta = 'sph_counter'
+ ver = ''
+
+
+ def __init__(self):
+ self.pdb = pMysqlDb()
+
+ def setDeltaName(self, name):
+ self.delta = name
+ return True
+
+ def setVersion(self, ver):
+ self.ver = ver
+
+ def createSql(self, db):
+ conf = '''
+CREATE TABLE IF NOT EXISTS `{$DB_NAME}`.`{$TABLE_NAME}` (
+ `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ `table` varchar(200) NOT NULL,
+ `max_id` bigint(20) unsigned NOT NULL DEFAULT '0',
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `table_uniq` (`table`),
+ KEY `table` (`table`)
+) ENGINE=InnoDB AUTO_INCREMENT=1 CHARSET=utf8mb4;
+'''
+ conf = conf.replace("{$TABLE_NAME}", self.delta)
+ conf = conf.replace("{$DB_NAME}", db)
+ return conf
+
+ def eqVerField(self, field):
+ ver = self.ver.replace(".1",'')
+ if float(ver) >= 3.6:
+ if field == 'sql_attr_timestamp':
+ return 'attr_bigint'
+
+ if field == 'sql_attr_bigint':
+ return 'attr_bigint'
+
+ if field == 'sql_attr_float':
+ return 'attr_float'
+
+ if field == 'sql_field_string':
+ return 'field_string'
+
+ if float(ver) >= 3.3:
+ if field == 'sql_attr_timestamp':
+ return 'sql_attr_bigint'
+
+ return field
+
+ def pathVerName(self):
+ ver = self.ver.replace(".1",'')
+ # if float(ver) >= 3.6:
+ # return 'datadir'
+ return 'path'
+
+ def getTablePk(self, db, table):
+ key = db+'_'+table
+ if key in self.pkey_name_cache:
+ return self.pkey_name_cache[key]
+
+ # SHOW INDEX FROM bbs.bbs_ucenter_vars WHERE Key_name = 'PRIMARY'
+ pkey_sql = "SHOW INDEX FROM {}.{} WHERE Key_name = 'PRIMARY';".format(db,table,);
+ pkey_data = self.pdb.query(pkey_sql)
+
+ # print(db, table)
+ # print(pkey_data)
+ key = ''
+ if len(pkey_data) == 1:
+ pkey_name = pkey_data[0]['Column_name']
+ sql = "select COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS where `TABLE_SCHEMA`='{}' and `TABLE_NAME` = '{}' and `COLUMN_NAME`='{}';"
+ sql = sql.format(db,table,pkey_name,)
+ # print(sql)
+ fields = self.pdb.query(sql)
+
+ if len(fields) == 1:
+ # print(fields[0]['DATA_TYPE'])
+ if mw.inArray(['bigint','smallint','tinyint','int','mediumint'], fields[0]['DATA_TYPE']):
+ key = pkey_name
+ return key
+
+
+ def getTableFieldStr(self, db, table):
+ sql = "select COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS where `TABLE_SCHEMA`='{}' and `TABLE_NAME` = '{}';"
+ sql = sql.format(db,table,)
+ fields = self.pdb.query(sql)
+
+ field_str = ''
+ for x in range(len(fields)):
+ field_str += '`'+fields[x]['COLUMN_NAME']+'`,'
+
+ field_str = field_str.strip(',')
+ return field_str
+
+ def makeSphinxHeader(self):
+ conf = '''
+indexer
+{
+ mem_limit = 128M
+}
+
+searchd
+{
+ listen = 9312
+ listen = 9306:mysql41
+ log = {$server_dir}/sphinx/index/searchd.log
+ query_log = {$server_dir}/sphinx/index/query.log
+ read_timeout = 5
+ max_children = 0
+ pid_file = {$server_dir}/sphinx/index/searchd.pid
+ seamless_rotate = 1
+ preopen_indexes = 1
+ unlink_old = 1
+ #workers = threads # for RT to work
+ binlog_path = {$server_dir}/sphinx/index/binlog
+}
+ '''
+ conf = conf.replace("{$server_dir}", mw.getServerDir())
+ return conf
+
+ def makeSphinxDbSourceRangeSql(self, db, table):
+ pkey_name = self.getTablePk(db,table)
+ sql = "SELECT min("+pkey_name+"), max("+pkey_name+") FROM "+table
+ return sql
+
+ def makeSphinxDbSourceQuerySql(self, db, table):
+ pkey_name = self.getTablePk(db,table)
+ field_str = self.getTableFieldStr(db,table)
+ # print(field_str)
+ if pkey_name == 'id':
+ sql = "SELECT " + field_str + " FROM " + table + " where id >= $start AND id <= $end"
+ else:
+ sql = "SELECT `"+pkey_name+'` as `id`,' + field_str + " FROM " + table + " where "+pkey_name+" >= $start AND "+pkey_name+" <= $end"
+ return sql
+
+
+ def makeSphinxDbSourceDeltaRange(self, db, table):
+ pkey_name = self.getTablePk(db,table)
+ conf = "SELECT (SELECT max_id FROM `{$SPH_TABLE}` where `table`='{$TABLE_NAME}') as min, (SELECT max({$PK_NAME}) FROM {$TABLE_NAME}) as max"
+ conf = conf.replace("{$DB_NAME}", db)
+ conf = conf.replace("{$TABLE_NAME}", table)
+ conf = conf.replace("{$SPH_TABLE}", self.delta)
+ conf = conf.replace("{$PK_NAME}", pkey_name)
+ return conf
+
+ def makeSphinxDbSourcePost(self, db, table):
+ pkey_name = self.getTablePk(db,table)
+ conf = "sql_query_post = UPDATE {$SPH_TABLE} SET max_id=(SELECT MAX({$PK_NAME}) FROM {$TABLE_NAME}) where `table`='{$TABLE_NAME}'"
+ # conf = "REPLACE INTO {$SPH_TABLE} (`table`,`max_id`) VALUES ('{$TABLE_NAME}',(SELECT MAX({$PK_NAME}) FROM {$TABLE_NAME}))"
+ conf = conf.replace("{$DB_NAME}", db)
+ conf = conf.replace("{$TABLE_NAME}", table)
+ conf = conf.replace("{$SPH_TABLE}", self.delta)
+ conf = conf.replace("{$PK_NAME}", pkey_name)
+ return conf
+
+ def makeSphinxDbSourceDelta(self, db, table):
+ conf = '''
+source {$DB_NAME}_{$TABLE_NAME}_delta:{$DB_NAME}_{$TABLE_NAME}
+{
+ sql_query_pre = SET NAMES utf8
+ sql_query_range = {$DELTA_RANGE}
+ sql_query = {$DELTA_QUERY}
+ {$DELTA_UPDATE}
+
+{$SPH_FIELD}
+}
+
+index {$DB_NAME}_{$TABLE_NAME}_delta:{$DB_NAME}_{$TABLE_NAME}
+{
+ source = {$DB_NAME}_{$TABLE_NAME}_delta
+ {$PATH_NAME} = {$server_dir}/sphinx/index/db/{$DB_NAME}.{$TABLE_NAME}/delta
+
+ html_strip = 1
+ ngram_len = 1
+ ngram_chars = U+3000..U+2FA1F
+
+{$SPH_FIELD_INDEX}
+}
+''';
+ conf = conf.replace("{$server_dir}", mw.getServerDir())
+ conf = conf.replace("{$PATH_NAME}", self.pathVerName())
+
+ conf = conf.replace("{$DB_NAME}", db)
+ conf = conf.replace("{$TABLE_NAME}", table)
+
+ delta_range = self.makeSphinxDbSourceDeltaRange(db, table)
+ conf = conf.replace("{$DELTA_RANGE}", delta_range)
+
+ delta_query = self.makeSphinxDbSourceQuerySql(db, table)
+ conf = conf.replace("{$DELTA_QUERY}", delta_query)
+
+ delta_update = self.makeSphinxDbSourcePost(db, table)
+ conf = conf.replace("{$DELTA_UPDATE}", delta_update)
+
+
+ sph_field = self.makeSqlToSphinxTable(db, table)
+ conf = self.makeSphinxDbFieldRepalce(conf, sph_field)
+
+ return conf;
+
+ def makeSphinxDbSource(self, db, table, create_sphinx_table = False):
+ db_info = pSqliteDb('databases').field('username,password').where('name=?', (db,)).find()
+ port = getDbPort()
+
+ conf = '''
+source {$DB_NAME}_{$TABLE_NAME}
+{
+ type = mysql
+ sql_host = 127.0.0.1
+ sql_user = {$DB_USER}
+ sql_pass = {$DB_PASS}
+ sql_db = {$DB_NAME}
+ sql_port = {$DB_PORT}
+
+ sql_query_pre = SET NAMES utf8
+
+ {$UPDATE}
+
+ sql_query_range = {$DB_RANGE_SQL}
+ sql_range_step = 1000
+
+ sql_query = {$DB_QUERY_SQL}
+
+{$SPH_FIELD}
+}
+
+index {$DB_NAME}_{$TABLE_NAME}
+{
+ source = {$DB_NAME}_{$TABLE_NAME}
+ {$PATH_NAME} = {$server_dir}/sphinx/index/db/{$DB_NAME}.{$TABLE_NAME}/index
+
+ ngram_len = 1
+ ngram_chars = U+3000..U+2FA1F
+
+{$SPH_FIELD_INDEX}
+}
+ '''
+ conf = conf.replace("{$server_dir}", mw.getServerDir())
+ conf = conf.replace("{$PATH_NAME}", self.pathVerName())
+
+ conf = conf.replace("{$DB_NAME}", db)
+ conf = conf.replace("{$TABLE_NAME}", table)
+ conf = conf.replace("{$DB_USER}", db_info['username'])
+ conf = conf.replace("{$DB_PASS}", db_info['password'])
+ conf = conf.replace("{$DB_PORT}", port)
+
+ range_sql = self.makeSphinxDbSourceRangeSql(db, table)
+ conf = conf.replace("{$DB_RANGE_SQL}", range_sql)
+
+ query_sql = self.makeSphinxDbSourceQuerySql(db, table)
+ conf = conf.replace("{$DB_QUERY_SQL}", query_sql)
+
+ sph_field = self.makeSqlToSphinxTable(db, table)
+ # conf = conf.replace("{$SPH_FIELD}", sph_field)
+
+
+ conf = self.makeSphinxDbFieldRepalce(conf, sph_field)
+
+ if create_sphinx_table:
+ update = self.makeSphinxDbSourcePost(db, table)
+ conf = conf.replace("{$UPDATE}", update)
+ else:
+ conf = conf.replace("{$UPDATE}", '')
+
+ if create_sphinx_table:
+ sph_sql = self.createSql(db)
+ self.pdb.query(sph_sql)
+ sql_find = "select * from {}.{} where `table`='{}'".format(db,self.delta,table)
+ find_data = self.pdb.query(sql_find)
+ if len(find_data) == 0:
+ insert_sql = "insert into `{}`.`{}`(`table`,`max_id`) values ('{}',{}) ".format(db,self.delta,table,0)
+ # print(insert_sql)
+ self.pdb.execute(insert_sql)
+ conf += self.makeSphinxDbSourceDelta(db,table)
+
+ # print(ver)
+ # print(conf)
+
+ return conf
+
+ def makeSphinxDbFieldRepalce(self, content, sph_field):
+ ver = self.ver.replace(".1",'')
+ ver = float(ver)
+ if ver >= 3.6:
+ content = content.replace("{$SPH_FIELD}", '')
+ content = content.replace("{$SPH_FIELD_INDEX}", '')
+ else:
+ content = content.replace("{$SPH_FIELD}", sph_field)
+ content = content.replace("{$SPH_FIELD_INDEX}", '')
+
+ return content
+
+
+ def makeSqlToSphinxDb(self, db, table = [], is_delta = False):
+ conf = ''
+
+
+ for tn in table:
+ pkey_name = self.getTablePk(db,tn)
+ if pkey_name == '':
+ continue
+ conf += self.makeSphinxDbSource(db, tn,is_delta)
+
+ if len(table) == 0:
+ tables = self.pdb.query("show tables in "+ db)
+ for x in range(len(tables)):
+ key = 'Tables_in_'+db
+ table_name = tables[x][key]
+ pkey_name = self.getTablePk(db, table_name, is_delta)
+ if pkey_name == '':
+ continue
+
+ if self.makeSqlToSphinxTableIsHaveFulltext(db, table_name):
+ conf += self.makeSphinxDbSource(db, table_name)
+ return conf
+
+ def makeSqlToSphinxTableIsHaveFulltext(self, db, table):
+ sql = "select COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS where `TABLE_SCHEMA`='{}' and `TABLE_NAME` = '{}';"
+ sql = sql.format(db,table,)
+ cols = self.pdb.query(sql)
+ cols_len = len(cols)
+
+ for x in range(cols_len):
+ data_type = cols[x]['DATA_TYPE']
+ column_name = cols[x]['COLUMN_NAME']
+
+ if mw.inArray(['varchar'], data_type):
+ return True
+ if mw.inArray(['text','mediumtext','tinytext','longtext'], data_type):
+ return True
+ return False
+
+ def makeSqlToSphinxTable(self,db,table):
+ pkey_name = self.getTablePk(db,table)
+ sql = "select COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS where `TABLE_SCHEMA`='{}' and `TABLE_NAME` = '{}';"
+ sql = sql.format(db,table,)
+ cols = self.pdb.query(sql)
+ cols_len = len(cols)
+ conf = ''
+ run_pos = 0
+ for x in range(cols_len):
+ data_type = cols[x]['DATA_TYPE']
+ column_name = cols[x]['COLUMN_NAME']
+ # print(column_name+":"+data_type)
+
+ # if mw.inArray(['tinyint'], data_type):
+ # conf += 'sql_attr_bool = '+ column_name + "\n"
+
+ if pkey_name == column_name:
+ # run_pos += 1
+ # conf += '\tsql_attr_bigint = '+column_name+"\n"
+ continue
+
+ if mw.inArray(['enum'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_attr_string')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['decimal'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_attr_float')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['bigint','smallint','tinyint','int','mediumint'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_attr_bigint')+' = '+ column_name + "\n"
+ continue
+
+
+ if mw.inArray(['float'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_attr_float')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['char'], data_type):
+ conf += '\t'+self.eqVerField('sql_attr_string')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['varchar'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_field_string')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['text','mediumtext','tinytext','longtext'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_field_string')+' = '+ column_name + "\n"
+ continue
+
+ if mw.inArray(['datetime','date'], data_type):
+ run_pos += 1
+ conf += '\t'+self.eqVerField('sql_attr_timestamp')+' = '+ column_name + "\n"
+ continue
+
+ return conf
+
+ def checkDbName(self, db):
+ filter_db = ['information_schema','performance_schema','sys','mysql']
+ if db in filter_db:
+ return False
+ return True
+
+ def makeSqlToSphinx(self, db, tables = [], is_delta = False):
+ conf = ''
+ conf += self.makeSphinxHeader()
+ conf += self.makeSqlToSphinxDb(db, tables, is_delta)
+ return conf
+
+ def makeSqlToSphinxAll(self):
+ filter_db = ['information_schema','performance_schema','sys','mysql']
+
+ dblist = self.pdb.query('show databases')
+
+ conf = ''
+ conf += self.makeSphinxHeader()
+
+ # conf += makeSqlToSphinxDb(pdb, 'bbs')
+ for x in range(len(dblist)):
+ dbname = dblist[x]['Database']
+ if mw.inArray(filter_db, dbname):
+ continue
+ conf += self.makeSqlToSphinxDb(dbname)
+ return conf
+
+
diff --git a/plugins/sphinx/index.html b/plugins/sphinx/index.html
index fd332a0d3..b62232b49 100755
--- a/plugins/sphinx/index.html
+++ b/plugins/sphinx/index.html
@@ -1,21 +1,35 @@
+
+
-
-
-
-
{% for menu in data['hook_menu'] %}
@@ -188,4 +179,30 @@ if (thisPath.indexOf('?')>-1){
{% endif %}
{% endfor %}
+
+
+
+
+
+
+