mirror of https://github.com/midoks/mdserver-web
parent
bc71c1876c
commit
0900c542d2
@ -0,0 +1,8 @@ |
||||
MIT License |
||||
Copyright (c) <year> <copyright holders> |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@ -0,0 +1,14 @@ |
||||
# mw-simdht # mw-simdht |
||||
mdserver-web|simdht管理 |
||||
|
||||
|
||||
### 安装过程 |
||||
|
||||
``` |
||||
* 先进行压缩 `cd mw-simdht && zip simdht.zip -r ./* ` |
||||
* 在mdserver-web点击`添加插件` |
||||
``` |
||||
|
||||
### 截图 |
||||
|
||||
[](/screenshot/ss1.png) |
@ -0,0 +1,91 @@ |
||||
# coding: utf-8 |
||||
|
||||
import re |
||||
import os |
||||
import sys |
||||
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages") |
||||
|
||||
|
||||
class mysql: |
||||
__DB_PASS = None |
||||
__DB_USER = 'root' |
||||
__DB_PORT = 3306 |
||||
__DB_HOST = 'localhost' |
||||
__DB_NAME = 'test' |
||||
__DB_CONN = None |
||||
__DB_CUR = None |
||||
__DB_ERR = None |
||||
__DB_CNF = '/etc/my.cnf' |
||||
|
||||
def __Conn(self): |
||||
'''连接MYSQL数据库''' |
||||
try: |
||||
socket = '/tmp/mysql.sock' |
||||
try: |
||||
import MySQLdb |
||||
except Exception, ex: |
||||
self.__DB_ERR = ex |
||||
return False |
||||
try: |
||||
self.__DB_CONN = MySQLdb.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS, |
||||
port=self.__DB_PORT, db=self.__DB_NAME, charset="utf8", connect_timeout=10, unix_socket=socket) |
||||
except MySQLdb.Error, e: |
||||
self.__DB_HOST = '127.0.0.1' |
||||
self.__DB_CONN = MySQLdb.connect(host=self.__DB_HOST, user=self.__DB_USER, passwd=self.__DB_PASS, |
||||
port=self.__DB_PORT, db=self.__DB_NAME, charset="utf8", connect_timeout=10, unix_socket=socket) |
||||
self.__DB_CUR = self.__DB_CONN.cursor() |
||||
return True |
||||
|
||||
except MySQLdb.Error, e: |
||||
self.__DB_ERR = e |
||||
return False |
||||
|
||||
def setHost(self, host): |
||||
self.__DB_HOST = host |
||||
|
||||
def setPwd(self, pwd): |
||||
self.__DB_PASS = pwd |
||||
|
||||
def setUser(self, user): |
||||
self.__DB_USER = user |
||||
|
||||
def setPort(self, port): |
||||
self.__DB_PORT = port |
||||
|
||||
def setDb(self, name): |
||||
self.__DB_NAME = name |
||||
|
||||
def getPwd(self): |
||||
return self.__DB_PASS |
||||
|
||||
def execute(self, sql): |
||||
# 执行SQL语句返回受影响行 |
||||
if not self.__Conn(): |
||||
return self.__DB_ERR |
||||
try: |
||||
result = self.__DB_CUR.execute(sql) |
||||
self.__DB_CONN.commit() |
||||
self.__Close() |
||||
return result |
||||
except Exception, ex: |
||||
return ex |
||||
|
||||
def query(self, sql): |
||||
# 执行SQL语句返回数据集 |
||||
if not self.__Conn(): |
||||
return self.__DB_ERR |
||||
try: |
||||
self.__DB_CUR.execute(sql) |
||||
result = self.__DB_CUR.fetchall() |
||||
# 将元组转换成列表 |
||||
data = map(list, result) |
||||
self.__Close() |
||||
return data |
||||
except Exception, ex: |
||||
return ex |
||||
|
||||
# 关闭连接 |
||||
def __Close(self): |
||||
self.__DB_CUR.close() |
||||
self.__DB_CONN.close() |
@ -0,0 +1,47 @@ |
||||
CREATE TABLE `search_hash` ( |
||||
`id` int(11) NOT NULL AUTO_INCREMENT, |
||||
`info_hash` varchar(40) NOT NULL, |
||||
`category` varchar(20) NOT NULL, |
||||
`data_hash` varchar(32) NOT NULL, |
||||
`name` varchar(255) NOT NULL, |
||||
`extension` varchar(20) NOT NULL, |
||||
`classified` tinyint(1) NOT NULL, |
||||
`source_ip` varchar(20) DEFAULT NULL, |
||||
`tagged` tinyint(1) NOT NULL, |
||||
`length` bigint(20) NOT NULL, |
||||
`create_time` datetime NOT NULL, |
||||
`last_seen` datetime NOT NULL, |
||||
`requests` int(10) unsigned NOT NULL, |
||||
`comment` varchar(255) DEFAULT NULL, |
||||
`is_dmca` tinyint(4) NULL DEFAULT 0, |
||||
`is_has` tinyint(4) NULL DEFAULT 1, |
||||
`creator` varchar(20) DEFAULT NULL, |
||||
PRIMARY KEY (`id`), |
||||
UNIQUE KEY `info_hash` (`info_hash`), |
||||
KEY `search_hash_uniq` (`tagged`), |
||||
KEY `create_time` (`create_time`) |
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8; |
||||
|
||||
CREATE TABLE `search_filelist` ( |
||||
`info_hash` varchar(40) NOT NULL, |
||||
`file_list` longtext NOT NULL, |
||||
PRIMARY KEY (`info_hash`) |
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8; |
||||
|
||||
|
||||
CREATE TABLE `search_statusreport` ( |
||||
`id` int(11) NOT NULL AUTO_INCREMENT, |
||||
`date` date NOT NULL, |
||||
`new_hashes` int(11) NOT NULL, |
||||
`total_requests` int(11) NOT NULL, |
||||
`valid_requests` int(11) NOT NULL, |
||||
PRIMARY KEY (`id`), |
||||
UNIQUE KEY `search_statusreport_uniq` (`date`) |
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8; |
||||
|
||||
-- sphinx delta need --- |
||||
CREATE TABLE `sph_counter` ( |
||||
`counter_id` int(11) NOT NULL COMMENT '标识不同的数据表', |
||||
`max_doc_id` int(11) NOT NULL COMMENT '每个索引表的最大ID,会实时更新', |
||||
PRIMARY KEY (`counter_id`) |
||||
) ENGINE=MyISAM DEFAULT CHARSET=utf8; |
After Width: | Height: | Size: 482 B |
@ -0,0 +1,24 @@ |
||||
<div class="bt-form"> |
||||
<div class="bt-w-main"> |
||||
<div class="bt-w-menu"> |
||||
<p class="bgw" onclick="pluginService('simdht');">服务</p> |
||||
<p onclick="pluginInitD('simdht');">自启动</p> |
||||
<p onclick="pluginConfig('simdht', '','get_sql');" title="手动导入SQL">导入SQL</p> |
||||
<p onclick="pluginConfig('simdht', '','get_db_conf');">配置</p> |
||||
<p onclick="pluginConfig('simdht', '','get_checkdb_pos');">检查POS</p> |
||||
<p onclick="pluginConfig('simdht', '','get_black_list');">白名单</p> |
||||
<p onclick="pluginLogs('simdht','','get_run_Log', 10);">日志</p> |
||||
<p onclick="dhtTrend();">收录趋势</p> |
||||
<p onclick="dhtRead();">说明</p> |
||||
</div> |
||||
<div class="bt-w-con pd15"> |
||||
<div class="soft-man-con"> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
|
||||
</div> |
||||
<script type="text/javascript"> |
||||
pluginService('simdht'); |
||||
$.getScript( "/plugins/file?name=simdht&f=js/simdht.js"); |
||||
</script> |
@ -0,0 +1,313 @@ |
||||
# coding: utf-8 |
||||
|
||||
import time |
||||
import random |
||||
import os |
||||
import json |
||||
import re |
||||
import sys |
||||
|
||||
sys.path.append(os.getcwd() + "/class/core") |
||||
import public |
||||
|
||||
|
||||
app_debug = False |
||||
if public.isAppleSystem(): |
||||
app_debug = True |
||||
|
||||
|
||||
def getPluginName(): |
||||
return 'simdht' |
||||
|
||||
|
||||
def getPluginDir(): |
||||
return public.getPluginDir() + '/' + getPluginName() |
||||
|
||||
sys.path.append(getPluginDir() + "/class") |
||||
import mysql |
||||
|
||||
|
||||
def getServerDir(): |
||||
return public.getServerDir() + '/' + getPluginName() |
||||
|
||||
|
||||
def getInitDFile(): |
||||
if app_debug: |
||||
return '/tmp/' + getPluginName() |
||||
return '/etc/init.d/' + getPluginName() |
||||
|
||||
|
||||
def getArgs(): |
||||
args = sys.argv[2:] |
||||
tmp = {} |
||||
args_len = len(args) |
||||
|
||||
if args_len == 1: |
||||
t = args[0].strip('{').strip('}') |
||||
t = t.split(':') |
||||
tmp[t[0]] = t[1] |
||||
elif args_len > 1: |
||||
for i in range(len(args)): |
||||
t = args[i].split(':') |
||||
tmp[t[0]] = t[1] |
||||
|
||||
return tmp |
||||
|
||||
|
||||
def checkArgs(data, ck=[]): |
||||
for i in range(len(ck)): |
||||
if not ck[i] in data: |
||||
return (False, public.returnJson(False, '参数:(' + ck[i] + ')没有!')) |
||||
return (True, public.returnJson(True, 'ok')) |
||||
|
||||
|
||||
def getInitDTpl(): |
||||
path = getPluginDir() + "/init.d/" + getPluginName() + ".tpl" |
||||
return path |
||||
|
||||
|
||||
def getSqlFile(): |
||||
file = getPluginDir() + "/conf/simdht.sql" |
||||
return file |
||||
|
||||
|
||||
def getDbConf(): |
||||
file = getServerDir() + "/db.cfg" |
||||
return file |
||||
|
||||
|
||||
def getCheckdbPos(): |
||||
file = getServerDir() + "/start_pos.pl" |
||||
return file |
||||
|
||||
def getBlackList(): |
||||
file = getServerDir() + "/workers/black_list.txt" |
||||
return file |
||||
|
||||
def getRunLog(): |
||||
file = getServerDir() + "/logs.pl" |
||||
return file |
||||
|
||||
|
||||
def initDreplace(): |
||||
|
||||
ddir = getServerDir() + '/workers' |
||||
if not os.path.exists(ddir): |
||||
sdir = getPluginDir() + '/workers' |
||||
public.execShell('cp -rf ' + sdir + ' ' + getServerDir()) |
||||
|
||||
cfg = getServerDir() + '/db.cfg' |
||||
if not os.path.exists(cfg): |
||||
cfg_tpl = getPluginDir() + '/workers/db.cfg' |
||||
content = public.readFile(cfg_tpl) |
||||
public.writeFile(cfg, content) |
||||
|
||||
file_tpl = getInitDTpl() |
||||
service_path = os.path.dirname(os.getcwd()) |
||||
|
||||
initD_path = getServerDir() + '/init.d' |
||||
if not os.path.exists(initD_path): |
||||
os.mkdir(initD_path) |
||||
file_bin = initD_path + '/' + getPluginName() |
||||
|
||||
# initd replace |
||||
content = public.readFile(file_tpl) |
||||
content = content.replace('{$SERVER_PATH}', service_path) |
||||
public.writeFile(file_bin, content) |
||||
public.execShell('chmod +x ' + file_bin) |
||||
|
||||
return file_bin |
||||
|
||||
|
||||
def status(): |
||||
data = public.execShell( |
||||
"ps -ef|grep \"simdht_worker.py\" | grep -v grep | awk '{print $2}'") |
||||
if data[0] == '': |
||||
return 'stop' |
||||
return 'start' |
||||
|
||||
|
||||
def start(): |
||||
file = initDreplace() |
||||
|
||||
data = public.execShell(file + ' start') |
||||
if data[1] == '': |
||||
return 'ok' |
||||
return data[1] |
||||
|
||||
|
||||
def stop(): |
||||
file = initDreplace() |
||||
data = public.execShell(file + ' stop') |
||||
if data[1] == '': |
||||
return 'ok' |
||||
return data[1] |
||||
|
||||
|
||||
def restart(): |
||||
file = initDreplace() |
||||
data = public.execShell(file + ' restart') |
||||
if data[1] == '': |
||||
return 'ok' |
||||
return 'fail' |
||||
|
||||
|
||||
def reload(): |
||||
file = initDreplace() |
||||
data = public.execShell(file + ' reload') |
||||
if data[1] == '': |
||||
return 'ok' |
||||
return 'fail' |
||||
|
||||
|
||||
def initdStatus(): |
||||
if not app_debug: |
||||
if public.isAppleSystem(): |
||||
return "Apple Computer does not support" |
||||
|
||||
initd_bin = getInitDFile() |
||||
if os.path.exists(initd_bin): |
||||
return 'ok' |
||||
return 'fail' |
||||
|
||||
|
||||
def initdInstall(): |
||||
import shutil |
||||
if not app_debug: |
||||
if public.isAppleSystem(): |
||||
return "Apple Computer does not support" |
||||
|
||||
mysql_bin = initDreplace() |
||||
initd_bin = getInitDFile() |
||||
shutil.copyfile(mysql_bin, initd_bin) |
||||
public.execShell('chmod +x ' + initd_bin) |
||||
public.execShell('chkconfig --add ' + getPluginName()) |
||||
return 'ok' |
||||
|
||||
|
||||
def initdUinstall(): |
||||
if not app_debug: |
||||
if public.isAppleSystem(): |
||||
return "Apple Computer does not support" |
||||
initd_bin = getInitDFile() |
||||
os.remove(initd_bin) |
||||
public.execShell('chkconfig --del ' + getPluginName()) |
||||
return 'ok' |
||||
|
||||
|
||||
def matchData(reg, content): |
||||
tmp = re.search(reg, content).groups() |
||||
return tmp[0] |
||||
|
||||
|
||||
def getDbConfInfo(): |
||||
cfg = getDbConf() |
||||
content = public.readFile(cfg) |
||||
data = {} |
||||
data['DB_HOST'] = matchData("DB_HOST\s*=\s(.*)", content) |
||||
data['DB_USER'] = matchData("DB_USER\s*=\s(.*)", content) |
||||
data['DB_PORT'] = matchData("DB_PORT\s*=\s(.*)", content) |
||||
data['DB_PASS'] = matchData("DB_PASS\s*=\s(.*)", content) |
||||
data['DB_NAME'] = matchData("DB_NAME\s*=\s(.*)", content) |
||||
return data |
||||
|
||||
|
||||
def pMysqlDb(): |
||||
data = getDbConfInfo() |
||||
conn = mysql.mysql() |
||||
conn.setHost(data['DB_HOST']) |
||||
conn.setUser(data['DB_USER']) |
||||
conn.setPwd(data['DB_PASS']) |
||||
conn.setPort(int(data['DB_PORT'])) |
||||
conn.setDb(data['DB_NAME']) |
||||
return conn |
||||
|
||||
|
||||
def isSqlError(mysqlMsg): |
||||
# 检测数据库执行错误 |
||||
mysqlMsg = str(mysqlMsg) |
||||
if "MySQLdb" in mysqlMsg: |
||||
return public.returnJson(False, 'MySQLdb组件缺失! <br>进入SSH命令行输入: pip install mysql-python') |
||||
if "2002," in mysqlMsg: |
||||
return public.returnJson(False, '数据库连接失败,请检查数据库服务是否启动!') |
||||
if "using password:" in mysqlMsg: |
||||
return public.returnJson(False, '数据库管理密码错误!') |
||||
if "Connection refused" in mysqlMsg: |
||||
return public.returnJson(False, '数据库连接失败,请检查数据库服务是否启动!') |
||||
if "1133" in mysqlMsg: |
||||
return public.returnJson(False, '数据库用户不存在!') |
||||
if "1007" in mysqlMsg: |
||||
return public.returnJson(False, '数据库已经存在!') |
||||
return None |
||||
|
||||
|
||||
def getMinData(conn, sec): |
||||
time_diff = 0 |
||||
if public.isAppleSystem(): |
||||
time_diff = 3 * 60 |
||||
pre = time.strftime("%Y-%m-%d %H:%M:%S", |
||||
time.localtime(time.time() - sec - time_diff)) |
||||
sql = "select count(id) from search_hash where create_time > '" + pre + "'" |
||||
data = conn.query(sql) |
||||
return data[0][0] |
||||
|
||||
|
||||
def getTrendData(): |
||||
try: |
||||
args = getArgs() |
||||
data = checkArgs(args, ['interval']) |
||||
if not data[0]: |
||||
return data[1] |
||||
pdb = pMysqlDb() |
||||
# interval = int(args['interval']) |
||||
result = pdb.execute("show tables") |
||||
isError = isSqlError(result) |
||||
if isError: |
||||
return isError |
||||
one = getMinData(pdb, 2) |
||||
two = getMinData(pdb, 5) |
||||
three = getMinData(pdb, 10) |
||||
return public.getJson([one, two, three]) |
||||
except Exception as e: |
||||
print str(e) |
||||
return public.getJson([0, 0, 0]) |
||||
|
||||
|
||||
def dhtCmd(): |
||||
file = initDreplace() |
||||
return file + ' restart' |
||||
|
||||
if __name__ == "__main__": |
||||
func = sys.argv[1] |
||||
if func == 'status': |
||||
print status() |
||||
elif func == 'start': |
||||
print start() |
||||
elif func == 'stop': |
||||
print stop() |
||||
elif func == 'restart': |
||||
print restart() |
||||
elif func == 'reload': |
||||
print reload() |
||||
elif func == 'initd_status': |
||||
print initdStatus() |
||||
elif func == 'initd_install': |
||||
print initdInstall() |
||||
elif func == 'initd_uninstall': |
||||
print initdUinstall() |
||||
elif func == 'get_sql': |
||||
print getSqlFile() |
||||
elif func == 'get_db_conf': |
||||
print getDbConf() |
||||
elif func == 'get_checkdb_pos': |
||||
print getCheckdbPos() |
||||
elif func == 'get_black_list': |
||||
print getBlackList() |
||||
elif func == 'get_run_Log': |
||||
print getRunLog() |
||||
elif func == 'get_trend_data': |
||||
print getTrendData() |
||||
elif func == 'dht_cmd': |
||||
print dhtCmd() |
||||
else: |
||||
print 'error' |
@ -0,0 +1,16 @@ |
||||
{ |
||||
"id":3, |
||||
"title":"DHTSpider", |
||||
"tip":"soft", |
||||
"name":"simdht", |
||||
"type":"", |
||||
"ps":"DHT(Distributed Hash Table,分布式哈希表)类似Tracker的根据种子特征码返回种子信息的网络。", |
||||
"versions":"1.0", |
||||
"shell":"install.sh", |
||||
"checks":"server/simdht", |
||||
"path":"server/simdht", |
||||
"author":"midoks", |
||||
"home":"", |
||||
"date":"2018-12-20", |
||||
"pid":"5" |
||||
} |
@ -0,0 +1,44 @@ |
||||
#!/bin/sh |
||||
# chkconfig: 2345 55 25 |
||||
# description: DHTSpider Service |
||||
|
||||
### BEGIN INIT INFO |
||||
# Provides: DHTSpider |
||||
# Required-Start: $all |
||||
# Required-Stop: $all |
||||
# Default-Start: 2 3 4 5 |
||||
# Default-Stop: 0 1 6 |
||||
# Short-Description: starts DHTSpider |
||||
# Description: starts the MDW-Web |
||||
### END INIT INFO |
||||
|
||||
|
||||
dht_start(){ |
||||
cd {$SERVER_PATH}/simdht/workers |
||||
nohup python simdht_worker.py > {$SERVER_PATH}/simdht/logs.pl 2>&1 & |
||||
echo "simdht started" |
||||
} |
||||
dht_stop(){ |
||||
echo "Stopping ..." |
||||
ps -ef | grep "python simdht" | grep -v grep | awk '{print $2}' | xargs kill |
||||
echo "simdht stopped" |
||||
} |
||||
|
||||
|
||||
case "$1" in |
||||
start) |
||||
dht_start |
||||
;; |
||||
stop) |
||||
dht_stop |
||||
;; |
||||
restart|reload) |
||||
dht_stop |
||||
sleep 0.3 |
||||
dht_start |
||||
;; |
||||
*) |
||||
echo "Please use start or stop as first argument" |
||||
;; |
||||
esac |
||||
|
@ -0,0 +1,37 @@ |
||||
#!/bin/bash |
||||
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin |
||||
export PATH |
||||
|
||||
|
||||
curPath=`pwd` |
||||
rootPath=$(dirname "$curPath") |
||||
rootPath=$(dirname "$rootPath") |
||||
serverPath=$(dirname "$rootPath") |
||||
|
||||
|
||||
install_tmp=${rootPath}/tmp/bt_install.pl |
||||
|
||||
pip install pygeoip |
||||
pip install pytz |
||||
|
||||
Install_dht() |
||||
{ |
||||
echo '正在安装脚本文件...' > $install_tmp |
||||
mkdir -p $serverPath/simdht |
||||
echo '1.0' > $serverPath/simdht/version.pl |
||||
echo '安装完成' > $install_tmp |
||||
|
||||
} |
||||
|
||||
Uninstall_dht() |
||||
{ |
||||
rm -rf $serverPath/simdht |
||||
echo "卸载完成" > $install_tmp |
||||
} |
||||
|
||||
action=$1 |
||||
if [ "${1}" == 'install' ];then |
||||
Install_dht |
||||
else |
||||
Uninstall_dht |
||||
fi |
@ -0,0 +1,272 @@ |
||||
function dhtPostMin(method, args, callback){ |
||||
|
||||
var req_data = {}; |
||||
req_data['name'] = 'simdht'; |
||||
req_data['func'] = method; |
||||
|
||||
if (typeof(args) != 'undefined' && args!=''){ |
||||
req_data['args'] = JSON.stringify(args); |
||||
} |
||||
|
||||
$.post('/plugins/run', req_data, function(data) { |
||||
if (!data.status){ |
||||
layer.msg(data.msg,{icon:0,time:2000,shade: [0.3, '#000']}); |
||||
return; |
||||
} |
||||
|
||||
if(typeof(callback) == 'function'){ |
||||
callback(data); |
||||
} |
||||
},'json');
|
||||
} |
||||
|
||||
function dhtPost(method, args, callback){ |
||||
var loadT = layer.msg('正在获取...', { icon: 16, time: 0, shade: 0.3 }); |
||||
dhtPostMin(method,args,function(data){ |
||||
layer.close(loadT); |
||||
if(typeof(callback) == 'function'){ |
||||
callback(data); |
||||
}
|
||||
}); |
||||
} |
||||
|
||||
|
||||
function dhtTrend(){ |
||||
var obj = $('#dht_trend'); |
||||
if (obj.length>0){ |
||||
console.log('已经加载图表...'); |
||||
return; |
||||
} |
||||
|
||||
var trend = '<div id="dht_trend" style="width:100%;height:330px;"></div>'; |
||||
$('.soft-man-con').html(trend); |
||||
dhtTrendRender(); |
||||
} |
||||
|
||||
function dhtTrendData(callback){ |
||||
dhtPostMin('get_trend_data',{interval:1},function(data){ |
||||
if(typeof(callback) == 'function'){ |
||||
callback(data); |
||||
} |
||||
}); |
||||
} |
||||
|
||||
|
||||
function dhtTrendRender() { |
||||
var myChartNetwork = echarts.init(document.getElementById('dht_trend')); |
||||
var xData = []; |
||||
var oneData = []; |
||||
var twoData = []; |
||||
var threeData = []; |
||||
|
||||
function getTime() { |
||||
var now = new Date(); |
||||
var hour = now.getHours(); |
||||
var minute = now.getMinutes(); |
||||
var second = now.getSeconds(); |
||||
if (minute < 10) { |
||||
minute = "0" + minute; |
||||
} |
||||
if (second < 10) { |
||||
second = "0" + second; |
||||
} |
||||
var nowdate = hour + ":" + minute + ":" + second; |
||||
return nowdate; |
||||
} |
||||
|
||||
function ts(m) { return m < 10 ? '0' + m : m } |
||||
|
||||
function format(sjc) { |
||||
var time = new Date(sjc); |
||||
var h = time.getHours(); |
||||
var mm = time.getMinutes(); |
||||
var s = time.getSeconds(); |
||||
return ts(h) + ':' + ts(mm) + ':' + ts(s); |
||||
} |
||||
|
||||
function addData(data) { |
||||
// console.log(data);
|
||||
var rdata = $.parseJSON(data.data); |
||||
xData.push(getTime()); |
||||
oneData.push(rdata[0]); |
||||
twoData.push(rdata[1]); |
||||
threeData.push(rdata[2]); |
||||
|
||||
xData.shift(); |
||||
oneData.shift(); |
||||
twoData.shift(); |
||||
threeData.shift(); |
||||
} |
||||
for (var i = 8; i >= 0; i--) { |
||||
var time = (new Date()).getTime(); |
||||
xData.push(format(time - (i * 5 * 1000))); |
||||
oneData.push(0); |
||||
twoData.push(0); |
||||
threeData.push(0); |
||||
} |
||||
// 指定图表的配置项和数据
|
||||
var option = { |
||||
title: { |
||||
text: '种子收录趋势', |
||||
left: 'center', |
||||
textStyle: { |
||||
color: '#888888',fontStyle: 'normal', |
||||
fontFamily: '宋体',fontSize: 16, |
||||
} |
||||
}, |
||||
tooltip: { |
||||
trigger: 'axis' |
||||
}, |
||||
legend: { |
||||
data: ['1s', '5s', '10s'], |
||||
bottom: '2%' |
||||
}, |
||||
xAxis: { |
||||
type: 'category', |
||||
boundaryGap: false, |
||||
data: xData, |
||||
axisLine: { |
||||
lineStyle: { |
||||
color: "#666" |
||||
} |
||||
} |
||||
}, |
||||
yAxis: { |
||||
name: '单位个数', |
||||
splitLine: { |
||||
lineStyle: { |
||||
color: "#eee" |
||||
} |
||||
}, |
||||
axisLine: { |
||||
lineStyle: { |
||||
color: "#666" |
||||
} |
||||
} |
||||
}, |
||||
series: [{ |
||||
name: '1s', |
||||
type: 'line', |
||||
data: oneData, |
||||
smooth: true, |
||||
showSymbol: false, |
||||
symbol: 'circle', |
||||
symbolSize: 6, |
||||
areaStyle: { |
||||
normal: { |
||||
color: new echarts.graphic.LinearGradient(0, 0, 0, 1,
|
||||
[{offset: 0,color: 'rgba(205, 51, 51,0.5)'},
|
||||
{offset: 1,color: 'rgba(205, 51, 51,0.8)'}], false) |
||||
} |
||||
}, |
||||
itemStyle: { |
||||
normal: {color: '#cd3333'} |
||||
}, |
||||
lineStyle: { |
||||
normal: {width: 1} |
||||
} |
||||
}, { |
||||
name: '5s', |
||||
type: 'line', |
||||
data: twoData, |
||||
smooth: true, |
||||
showSymbol: false, |
||||
symbol: 'circle', |
||||
symbolSize: 6, |
||||
areaStyle: { |
||||
normal: { |
||||
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{ |
||||
offset: 0, |
||||
color: 'rgba(30, 144, 255,0.5)' |
||||
}, { |
||||
offset: 1, |
||||
color: 'rgba(30, 144, 255,0.8)' |
||||
}], false) |
||||
} |
||||
}, |
||||
itemStyle: { |
||||
normal: {color: '#52a9ff'} |
||||
}, |
||||
lineStyle: { |
||||
normal: { |
||||
width: 1 |
||||
} |
||||
} |
||||
},{ |
||||
name: '10s', |
||||
type: 'line', |
||||
data: threeData, |
||||
smooth: true, |
||||
showSymbol: false, |
||||
symbol: 'circle', |
||||
symbolSize: 6, |
||||
areaStyle: { |
||||
normal: { |
||||
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{ |
||||
offset: 0, |
||||
color: 'rgba(30, 144, 255,0.5)' |
||||
}, { |
||||
offset: 1, |
||||
color: 'rgba(30, 144, 255,0.8)' |
||||
}], false) |
||||
} |
||||
}, |
||||
itemStyle: { |
||||
normal: {color: '#C6E2FF'} |
||||
}, |
||||
lineStyle: { |
||||
normal: { |
||||
width: 1 |
||||
} |
||||
} |
||||
}] |
||||
}; |
||||
|
||||
|
||||
// 使用刚指定的配置项和数据显示图表。
|
||||
myChartNetwork.setOption(option); |
||||
window.addEventListener("resize", function() { |
||||
myChartNetwork.resize(); |
||||
}); |
||||
|
||||
function render(){ |
||||
dhtTrendData(function(data){ |
||||
addData(data); |
||||
}); |
||||
myChartNetwork.setOption({ |
||||
xAxis: {data: xData}, |
||||
series: [ |
||||
{name: '1s',data: oneData},
|
||||
{name: '5s',data: twoData}, |
||||
{name: '10s',data: threeData} |
||||
] |
||||
}); |
||||
} |
||||
render(); |
||||
|
||||
renderTick = setInterval(function() { |
||||
render(); |
||||
}, 3000); |
||||
|
||||
checkTick = setInterval(function() { |
||||
var obj = $('#dht_trend'); |
||||
if (obj.length>0){ |
||||
return; |
||||
} else { |
||||
console.log('取消定时请求...'); |
||||
clearInterval(renderTick); |
||||
clearInterval(checkTick); |
||||
} |
||||
}, 300); |
||||
} |
||||
|
||||
function dhtRead(){ |
||||
dhtPost('dht_cmd','', function(data){ |
||||
// console.log(data);
|
||||
var readme = '<p>* 在手动导入SQL-先把数据表创建</p>'; |
||||
readme += '<p>* 修改成对应的配置文件</p>'; |
||||
readme += '<p>* 加入到计划[自行调节]:'+data.data+'</p>'; |
||||
$('.soft-man-con').html(readme); |
||||
}); |
||||
|
||||
} |
After Width: | Height: | Size: 84 KiB |
Binary file not shown.
@ -0,0 +1,339 @@ |
||||
GNU GENERAL PUBLIC LICENSE |
||||
Version 2, June 1991 |
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/> |
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
Everyone is permitted to copy and distribute verbatim copies |
||||
of this license document, but changing it is not allowed. |
||||
|
||||
Preamble |
||||
|
||||
The licenses for most software are designed to take away your |
||||
freedom to share and change it. By contrast, the GNU General Public |
||||
License is intended to guarantee your freedom to share and change free |
||||
software--to make sure the software is free for all its users. This |
||||
General Public License applies to most of the Free Software |
||||
Foundation's software and to any other program whose authors commit to |
||||
using it. (Some other Free Software Foundation software is covered by |
||||
the GNU Lesser General Public License instead.) You can apply it to |
||||
your programs, too. |
||||
|
||||
When we speak of free software, we are referring to freedom, not |
||||
price. Our General Public Licenses are designed to make sure that you |
||||
have the freedom to distribute copies of free software (and charge for |
||||
this service if you wish), that you receive source code or can get it |
||||
if you want it, that you can change the software or use pieces of it |
||||
in new free programs; and that you know you can do these things. |
||||
|
||||
To protect your rights, we need to make restrictions that forbid |
||||
anyone to deny you these rights or to ask you to surrender the rights. |
||||
These restrictions translate to certain responsibilities for you if you |
||||
distribute copies of the software, or if you modify it. |
||||
|
||||
For example, if you distribute copies of such a program, whether |
||||
gratis or for a fee, you must give the recipients all the rights that |
||||
you have. You must make sure that they, too, receive or can get the |
||||
source code. And you must show them these terms so they know their |
||||
rights. |
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and |
||||
(2) offer you this license which gives you legal permission to copy, |
||||
distribute and/or modify the software. |
||||
|
||||
Also, for each author's protection and ours, we want to make certain |
||||
that everyone understands that there is no warranty for this free |
||||
software. If the software is modified by someone else and passed on, we |
||||
want its recipients to know that what they have is not the original, so |
||||
that any problems introduced by others will not reflect on the original |
||||
authors' reputations. |
||||
|
||||
Finally, any free program is threatened constantly by software |
||||
patents. We wish to avoid the danger that redistributors of a free |
||||
program will individually obtain patent licenses, in effect making the |
||||
program proprietary. To prevent this, we have made it clear that any |
||||
patent must be licensed for everyone's free use or not licensed at all. |
||||
|
||||
The precise terms and conditions for copying, distribution and |
||||
modification follow. |
||||
|
||||
GNU GENERAL PUBLIC LICENSE |
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION |
||||
|
||||
0. This License applies to any program or other work which contains |
||||
a notice placed by the copyright holder saying it may be distributed |
||||
under the terms of this General Public License. The "Program", below, |
||||
refers to any such program or work, and a "work based on the Program" |
||||
means either the Program or any derivative work under copyright law: |
||||
that is to say, a work containing the Program or a portion of it, |
||||
either verbatim or with modifications and/or translated into another |
||||
language. (Hereinafter, translation is included without limitation in |
||||
the term "modification".) Each licensee is addressed as "you". |
||||
|
||||
Activities other than copying, distribution and modification are not |
||||
covered by this License; they are outside its scope. The act of |
||||
running the Program is not restricted, and the output from the Program |
||||
is covered only if its contents constitute a work based on the |
||||
Program (independent of having been made by running the Program). |
||||
Whether that is true depends on what the Program does. |
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's |
||||
source code as you receive it, in any medium, provided that you |
||||
conspicuously and appropriately publish on each copy an appropriate |
||||
copyright notice and disclaimer of warranty; keep intact all the |
||||
notices that refer to this License and to the absence of any warranty; |
||||
and give any other recipients of the Program a copy of this License |
||||
along with the Program. |
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and |
||||
you may at your option offer warranty protection in exchange for a fee. |
||||
|
||||
2. You may modify your copy or copies of the Program or any portion |
||||
of it, thus forming a work based on the Program, and copy and |
||||
distribute such modifications or work under the terms of Section 1 |
||||
above, provided that you also meet all of these conditions: |
||||
|
||||
a) You must cause the modified files to carry prominent notices |
||||
stating that you changed the files and the date of any change. |
||||
|
||||
b) You must cause any work that you distribute or publish, that in |
||||
whole or in part contains or is derived from the Program or any |
||||
part thereof, to be licensed as a whole at no charge to all third |
||||
parties under the terms of this License. |
||||
|
||||
c) If the modified program normally reads commands interactively |
||||
when run, you must cause it, when started running for such |
||||
interactive use in the most ordinary way, to print or display an |
||||
announcement including an appropriate copyright notice and a |
||||
notice that there is no warranty (or else, saying that you provide |
||||
a warranty) and that users may redistribute the program under |
||||
these conditions, and telling the user how to view a copy of this |
||||
License. (Exception: if the Program itself is interactive but |
||||
does not normally print such an announcement, your work based on |
||||
the Program is not required to print an announcement.) |
||||
|
||||
These requirements apply to the modified work as a whole. If |
||||
identifiable sections of that work are not derived from the Program, |
||||
and can be reasonably considered independent and separate works in |
||||
themselves, then this License, and its terms, do not apply to those |
||||
sections when you distribute them as separate works. But when you |
||||
distribute the same sections as part of a whole which is a work based |
||||
on the Program, the distribution of the whole must be on the terms of |
||||
this License, whose permissions for other licensees extend to the |
||||
entire whole, and thus to each and every part regardless of who wrote it. |
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest |
||||
your rights to work written entirely by you; rather, the intent is to |
||||
exercise the right to control the distribution of derivative or |
||||
collective works based on the Program. |
||||
|
||||
In addition, mere aggregation of another work not based on the Program |
||||
with the Program (or with a work based on the Program) on a volume of |
||||
a storage or distribution medium does not bring the other work under |
||||
the scope of this License. |
||||
|
||||
3. You may copy and distribute the Program (or a work based on it, |
||||
under Section 2) in object code or executable form under the terms of |
||||
Sections 1 and 2 above provided that you also do one of the following: |
||||
|
||||
a) Accompany it with the complete corresponding machine-readable |
||||
source code, which must be distributed under the terms of Sections |
||||
1 and 2 above on a medium customarily used for software interchange; or, |
||||
|
||||
b) Accompany it with a written offer, valid for at least three |
||||
years, to give any third party, for a charge no more than your |
||||
cost of physically performing source distribution, a complete |
||||
machine-readable copy of the corresponding source code, to be |
||||
distributed under the terms of Sections 1 and 2 above on a medium |
||||
customarily used for software interchange; or, |
||||
|
||||
c) Accompany it with the information you received as to the offer |
||||
to distribute corresponding source code. (This alternative is |
||||
allowed only for noncommercial distribution and only if you |
||||
received the program in object code or executable form with such |
||||
an offer, in accord with Subsection b above.) |
||||
|
||||
The source code for a work means the preferred form of the work for |
||||
making modifications to it. For an executable work, complete source |
||||
code means all the source code for all modules it contains, plus any |
||||
associated interface definition files, plus the scripts used to |
||||
control compilation and installation of the executable. However, as a |
||||
special exception, the source code distributed need not include |
||||
anything that is normally distributed (in either source or binary |
||||
form) with the major components (compiler, kernel, and so on) of the |
||||
operating system on which the executable runs, unless that component |
||||
itself accompanies the executable. |
||||
|
||||
If distribution of executable or object code is made by offering |
||||
access to copy from a designated place, then offering equivalent |
||||
access to copy the source code from the same place counts as |
||||
distribution of the source code, even though third parties are not |
||||
compelled to copy the source along with the object code. |
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program |
||||
except as expressly provided under this License. Any attempt |
||||
otherwise to copy, modify, sublicense or distribute the Program is |
||||
void, and will automatically terminate your rights under this License. |
||||
However, parties who have received copies, or rights, from you under |
||||
this License will not have their licenses terminated so long as such |
||||
parties remain in full compliance. |
||||
|
||||
5. You are not required to accept this License, since you have not |
||||
signed it. However, nothing else grants you permission to modify or |
||||
distribute the Program or its derivative works. These actions are |
||||
prohibited by law if you do not accept this License. Therefore, by |
||||
modifying or distributing the Program (or any work based on the |
||||
Program), you indicate your acceptance of this License to do so, and |
||||
all its terms and conditions for copying, distributing or modifying |
||||
the Program or works based on it. |
||||
|
||||
6. Each time you redistribute the Program (or any work based on the |
||||
Program), the recipient automatically receives a license from the |
||||
original licensor to copy, distribute or modify the Program subject to |
||||
these terms and conditions. You may not impose any further |
||||
restrictions on the recipients' exercise of the rights granted herein. |
||||
You are not responsible for enforcing compliance by third parties to |
||||
this License. |
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent |
||||
infringement or for any other reason (not limited to patent issues), |
||||
conditions are imposed on you (whether by court order, agreement or |
||||
otherwise) that contradict the conditions of this License, they do not |
||||
excuse you from the conditions of this License. If you cannot |
||||
distribute so as to satisfy simultaneously your obligations under this |
||||
License and any other pertinent obligations, then as a consequence you |
||||
may not distribute the Program at all. For example, if a patent |
||||
license would not permit royalty-free redistribution of the Program by |
||||
all those who receive copies directly or indirectly through you, then |
||||
the only way you could satisfy both it and this License would be to |
||||
refrain entirely from distribution of the Program. |
||||
|
||||
If any portion of this section is held invalid or unenforceable under |
||||
any particular circumstance, the balance of the section is intended to |
||||
apply and the section as a whole is intended to apply in other |
||||
circumstances. |
||||
|
||||
It is not the purpose of this section to induce you to infringe any |
||||
patents or other property right claims or to contest validity of any |
||||
such claims; this section has the sole purpose of protecting the |
||||
integrity of the free software distribution system, which is |
||||
implemented by public license practices. Many people have made |
||||
generous contributions to the wide range of software distributed |
||||
through that system in reliance on consistent application of that |
||||
system; it is up to the author/donor to decide if he or she is willing |
||||
to distribute software through any other system and a licensee cannot |
||||
impose that choice. |
||||
|
||||
This section is intended to make thoroughly clear what is believed to |
||||
be a consequence of the rest of this License. |
||||
|
||||
8. If the distribution and/or use of the Program is restricted in |
||||
certain countries either by patents or by copyrighted interfaces, the |
||||
original copyright holder who places the Program under this License |
||||
may add an explicit geographical distribution limitation excluding |
||||
those countries, so that distribution is permitted only in or among |
||||
countries not thus excluded. In such case, this License incorporates |
||||
the limitation as if written in the body of this License. |
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions |
||||
of the General Public License from time to time. Such new versions will |
||||
be similar in spirit to the present version, but may differ in detail to |
||||
address new problems or concerns. |
||||
|
||||
Each version is given a distinguishing version number. If the Program |
||||
specifies a version number of this License which applies to it and "any |
||||
later version", you have the option of following the terms and conditions |
||||
either of that version or of any later version published by the Free |
||||
Software Foundation. If the Program does not specify a version number of |
||||
this License, you may choose any version ever published by the Free Software |
||||
Foundation. |
||||
|
||||
10. If you wish to incorporate parts of the Program into other free |
||||
programs whose distribution conditions are different, write to the author |
||||
to ask for permission. For software which is copyrighted by the Free |
||||
Software Foundation, write to the Free Software Foundation; we sometimes |
||||
make exceptions for this. Our decision will be guided by the two goals |
||||
of preserving the free status of all derivatives of our free software and |
||||
of promoting the sharing and reuse of software generally. |
||||
|
||||
NO WARRANTY |
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY |
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN |
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES |
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED |
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS |
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE |
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, |
||||
REPAIR OR CORRECTION. |
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING |
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR |
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, |
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING |
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED |
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY |
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER |
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE |
||||
POSSIBILITY OF SUCH DAMAGES. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
How to Apply These Terms to Your New Programs |
||||
|
||||
If you develop a new program, and you want it to be of the greatest |
||||
possible use to the public, the best way to achieve this is to make it |
||||
free software which everyone can redistribute and change under these terms. |
||||
|
||||
To do so, attach the following notices to the program. It is safest |
||||
to attach them to the start of each source file to most effectively |
||||
convey the exclusion of warranty; and each file should have at least |
||||
the "copyright" line and a pointer to where the full notice is found. |
||||
|
||||
{description} |
||||
Copyright (C) {year} {fullname} |
||||
|
||||
This program is free software; you can redistribute it and/or modify |
||||
it under the terms of the GNU General Public License as published by |
||||
the Free Software Foundation; either version 2 of the License, or |
||||
(at your option) any later version. |
||||
|
||||
This program is distributed in the hope that it will be useful, |
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
GNU General Public License for more details. |
||||
|
||||
You should have received a copy of the GNU General Public License along |
||||
with this program; if not, write to the Free Software Foundation, Inc., |
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
||||
|
||||
Also add information on how to contact you by electronic and paper mail. |
||||
|
||||
If the program is interactive, make it output a short notice like this |
||||
when it starts in an interactive mode: |
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author |
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. |
||||
This is free software, and you are welcome to redistribute it |
||||
under certain conditions; type `show c' for details. |
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate |
||||
parts of the General Public License. Of course, the commands you use may |
||||
be called something other than `show w' and `show c'; they could even be |
||||
mouse-clicks or menu items--whatever suits your program. |
||||
|
||||
You should also get your employer (if you work as a programmer) or your |
||||
school, if any, to sign a "copyright disclaimer" for the program, if |
||||
necessary. Here is a sample; alter the names: |
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program |
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker. |
||||
|
||||
{signature of Ty Coon}, 1 April 1989 |
||||
Ty Coon, President of Vice |
||||
|
||||
This General Public License does not permit incorporating your program into |
||||
proprietary programs. If your program is a subroutine library, you may |
||||
consider it more useful to permit linking proprietary applications with the |
||||
library. If this is what you want to do, use the GNU Lesser General |
||||
Public License instead of this License. |
@ -0,0 +1,129 @@ |
||||
# The contents of this file are subject to the BitTorrent Open Source License |
||||
# Version 1.1 (the License). You may not copy or use this file, in either |
||||
# source code or executable form, except in compliance with the License. You |
||||
# may obtain a copy of the License at http://www.bittorrent.com/license/. |
||||
# |
||||
# Software distributed under the License is distributed on an AS IS basis, |
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
||||
# for the specific language governing rights and limitations under the |
||||
# License. |
||||
|
||||
# Written by Petru Paler |
||||
|
||||
|
||||
def decode_int(x, f): |
||||
f += 1 |
||||
newf = x.index('e', f) |
||||
n = int(x[f:newf]) |
||||
if x[f] == '-': |
||||
if x[f + 1] == '0': |
||||
raise ValueError |
||||
elif x[f] == '0' and newf != f+1: |
||||
raise ValueError |
||||
return (n, newf+1) |
||||
|
||||
def decode_string(x, f): |
||||
colon = x.index(':', f) |
||||
n = int(x[f:colon]) |
||||
if x[f] == '0' and colon != f+1: |
||||
raise ValueError |
||||
colon += 1 |
||||
return (x[colon:colon+n], colon+n) |
||||
|
||||
def decode_list(x, f): |
||||
r, f = [], f+1 |
||||
while x[f] != 'e': |
||||
v, f = decode_func[x[f]](x, f) |
||||
r.append(v) |
||||
return (r, f + 1) |
||||
|
||||
def decode_dict(x, f): |
||||
r, f = {}, f+1 |
||||
while x[f] != 'e': |
||||
k, f = decode_string(x, f) |
||||
r[k], f = decode_func[x[f]](x, f) |
||||
return (r, f + 1) |
||||
|
||||
decode_func = {} |
||||
decode_func['l'] = decode_list |
||||
decode_func['d'] = decode_dict |
||||
decode_func['i'] = decode_int |
||||
decode_func['0'] = decode_string |
||||
decode_func['1'] = decode_string |
||||
decode_func['2'] = decode_string |
||||
decode_func['3'] = decode_string |
||||
decode_func['4'] = decode_string |
||||
decode_func['5'] = decode_string |
||||
decode_func['6'] = decode_string |
||||
decode_func['7'] = decode_string |
||||
decode_func['8'] = decode_string |
||||
decode_func['9'] = decode_string |
||||
|
||||
def bdecode(x): |
||||
try: |
||||
r, l = decode_func[x[0]](x, 0) |
||||
except (IndexError, KeyError, ValueError): |
||||
raise Exception("not a valid bencoded string") |
||||
#if l != len(x): |
||||
# raise Exception("invalid bencoded value (data after valid prefix)") |
||||
return r |
||||
|
||||
from types import StringType, IntType, LongType, DictType, ListType, TupleType |
||||
|
||||
|
||||
class Bencached(object): |
||||
|
||||
__slots__ = ['bencoded'] |
||||
|
||||
def __init__(self, s): |
||||
self.bencoded = s |
||||
|
||||
def encode_bencached(x,r): |
||||
r.append(x.bencoded) |
||||
|
||||
def encode_int(x, r): |
||||
r.extend(('i', str(x), 'e')) |
||||
|
||||
def encode_bool(x, r): |
||||
if x: |
||||
encode_int(1, r) |
||||
else: |
||||
encode_int(0, r) |
||||
|
||||
def encode_string(x, r): |
||||
r.extend((str(len(x)), ':', x)) |
||||
|
||||
def encode_list(x, r): |
||||
r.append('l') |
||||
for i in x: |
||||
encode_func[type(i)](i, r) |
||||
r.append('e') |
||||
|
||||
def encode_dict(x,r): |
||||
r.append('d') |
||||
ilist = x.items() |
||||
ilist.sort() |
||||
for k, v in ilist: |
||||
r.extend((str(len(k)), ':', k)) |
||||
encode_func[type(v)](v, r) |
||||
r.append('e') |
||||
|
||||
encode_func = {} |
||||
encode_func[Bencached] = encode_bencached |
||||
encode_func[IntType] = encode_int |
||||
encode_func[LongType] = encode_int |
||||
encode_func[StringType] = encode_string |
||||
encode_func[ListType] = encode_list |
||||
encode_func[TupleType] = encode_list |
||||
encode_func[DictType] = encode_dict |
||||
|
||||
try: |
||||
from types import BooleanType |
||||
encode_func[BooleanType] = encode_bool |
||||
except ImportError: |
||||
pass |
||||
|
||||
def bencode(x): |
||||
r = [] |
||||
encode_func[type(x)](x, r) |
||||
return ''.join(r) |
@ -0,0 +1,32 @@ |
||||
bingyouhome.com |
||||
imgshao123.com |
||||
henduofuli.net |
||||
fuliboke.net |
||||
pronvideo.pw |
||||
rm6.org |
||||
olife.org |
||||
ixx.io |
||||
eye.rs |
||||
bad.mn |
||||
jox99.com |
||||
getxxx.pw |
||||
xyz1234.cf |
||||
xyz1234.ga |
||||
xyz1234.ml |
||||
xyz1234.tk |
||||
xyz1234.gq |
||||
fuli123.gq |
||||
fuli123.ga |
||||
fuli123.ml |
||||
fuli123.cf |
||||
fuli123.tk |
||||
henhei.cf |
||||
henhei.ga |
||||
henhei.ml |
||||
henhei.tk |
||||
henhei.gq |
||||
.cf |
||||
.ga |
||||
.ml |
||||
.gq |
||||
.tk |
@ -0,0 +1,41 @@ |
||||
#!/usr/bin/env python |
||||
#coding: utf8 |
||||
|
||||
import MySQLdb as mdb |
||||
import MySQLdb.cursors |
||||
|
||||
SRC_HOST = '127.0.0.1' |
||||
SRC_USER = 'root' |
||||
SRC_PASS = '' |
||||
DATABASE_NAME = '' |
||||
DST_HOST = '127.0.0.1' |
||||
DST_USER = 'root' |
||||
DST_PASS = '' |
||||
|
||||
|
||||
src_conn = mdb.connect(SRC_HOST, SRC_USER, SRC_PASS, DATABASE_NAME, charset='utf8', cursorclass=MySQLdb.cursors.DictCursor) |
||||
src_curr = src_conn.cursor() |
||||
src_curr.execute('SET NAMES utf8') |
||||
|
||||
dst_conn = mdb.connect(DST_HOST, DST_USER, DST_PASS, 'rt_main', port=9306, charset='utf8') |
||||
dst_curr = dst_conn.cursor() |
||||
dst_curr.execute('SET NAMES utf8') |
||||
|
||||
def delete(resname): |
||||
onetimecount = 20; |
||||
while True: |
||||
ret = dst_curr.execute('select id from rt_main where match(\'*%s*\') limit %s'%(resname,onetimecount)) |
||||
if ret < 0: |
||||
print 'done' |
||||
break |
||||
result = list(dst_curr.fetchall()) |
||||
for id in iter(result): |
||||
src_curr.execute('select info_hash from search_hash where id = %s'%(id)) |
||||
info_hash = src_curr.fetchall() |
||||
for hash in iter(info_hash): |
||||
src_curr.execute('delete from search_filelist where info_hash = \'%s\''%(hash['info_hash'])) |
||||
src_curr.execute('delete from search_hash where id = %s'%(id)) |
||||
dst_curr.execute('delete from rt_main where id = %s'%(id)) |
||||
|
||||
if __name__ == '__main__': |
||||
delete(sys.argv[1]) |
@ -0,0 +1,20 @@ |
||||
[db] |
||||
DB_HOST = 127.0.0.1 |
||||
DB_USER = ssbc |
||||
DB_PORT = 3306 |
||||
DB_PASS = ssbc |
||||
DB_NAME = ssbc |
||||
|
||||
#UNIT(G) |
||||
DB_SIZE_LIMIT = 1 |
||||
DB_SIZE_TICK = 3 |
||||
|
||||
#0:close,1:open |
||||
DB_DEL_SWITCH = 0 |
||||
DB_DEL_LINE = 1000 |
||||
DB_DEL_TIMER = 600 |
||||
|
||||
[queue] |
||||
MAX_QUEUE_LT = 30 |
||||
MAX_QUEUE_PT = 200 |
||||
MAX_NODE_QSIZE = 200 |
@ -0,0 +1,52 @@ |
||||
#!/usr/bin/env python |
||||
# coding: utf8 |
||||
""" |
||||
从MySQL数据库中读取未索引的资源,更新到Sphinx的实时索引中。 |
||||
xiaoxia@xiaoxia.org |
||||
2015.5 created |
||||
""" |
||||
|
||||
import time |
||||
import MySQLdb as mdb |
||||
import MySQLdb.cursors |
||||
|
||||
SRC_HOST = '127.0.0.1' |
||||
SRC_USER = 'root' |
||||
SRC_PASS = 'root' |
||||
DST_HOST = '127.0.0.1' |
||||
DST_USER = 'root' |
||||
DST_PASS = 'root' |
||||
|
||||
src_conn = mdb.connect(SRC_HOST, SRC_USER, SRC_PASS, 'ssbc', |
||||
charset='utf8', cursorclass=MySQLdb.cursors.DictCursor) |
||||
src_curr = src_conn.cursor() |
||||
src_curr.execute('SET NAMES utf8') |
||||
|
||||
dst_conn = mdb.connect(DST_HOST, DST_USER, DST_PASS, |
||||
'rt_main', port=9306, charset='utf8') |
||||
dst_curr = dst_conn.cursor() |
||||
dst_curr.execute('SET NAMES utf8') |
||||
|
||||
|
||||
def work(): |
||||
src_curr.execute('SELECT id, name, CRC32(category) AS category, length, UNIX_TIMESTAMP(create_time) AS create_time, ' + |
||||
'UNIX_TIMESTAMP(last_seen) AS last_seen FROM search_hash WHERE tagged=false LIMIT 10000') |
||||
total = src_curr.rowcount |
||||
print 'fetched', total |
||||
for one in src_curr: |
||||
ret = dst_curr.execute('insert into rt_main(id,name,category,length,create_time,last_seen) values(%s,%s,%s,%s,%s,%s)', |
||||
(one['id'], one['name'], one['category'], one['length'], one['create_time'], one['last_seen'])) |
||||
if ret: |
||||
src_curr.execute( |
||||
'UPDATE search_hash SET tagged=True WHERE id=%s', (one['id'],)) |
||||
print 'Indexed', one['name'].encode('utf8') |
||||
print 'Done!' |
||||
return total |
||||
|
||||
if __name__ == '__main__': |
||||
while True: |
||||
if work() == 10000: |
||||
print 'Continue...' |
||||
continue |
||||
print 'Wait 10mins...' |
||||
time.sleep(600) |
@ -0,0 +1,66 @@ |
||||
#coding: utf8 |
||||
import threading |
||||
import traceback |
||||
import random |
||||
import time |
||||
import os |
||||
import socket |
||||
|
||||
import libtorrent as lt |
||||
|
||||
threading.stack_size(200*1024) |
||||
socket.setdefaulttimeout(30) |
||||
|
||||
def fetch_torrent(session, ih, timeout): |
||||
name = ih.upper() |
||||
url = 'magnet:?xt=urn:btih:%s' % (name,) |
||||
data = '' |
||||
params = { |
||||
'save_path': '/tmp/downloads/', |
||||
'storage_mode': lt.storage_mode_t(2), |
||||
'paused': False, |
||||
'auto_managed': False, |
||||
'duplicate_is_error': True} |
||||
try: |
||||
handle = lt.add_magnet_uri(session, url, params) |
||||
except: |
||||
return None |
||||
status = session.status() |
||||
#print 'downloading metadata:', url |
||||
handle.set_sequential_download(1) |
||||
meta = None |
||||
down_time = time.time() |
||||
down_path = None |
||||
for i in xrange(0, timeout): |
||||
if handle.has_metadata(): |
||||
info = handle.get_torrent_info() |
||||
down_path = '/tmp/downloads/%s' % info.name() |
||||
#print 'status', 'p', status.num_peers, 'g', status.dht_global_nodes, 'ts', status.dht_torrents, 'u', status.total_upload, 'd', status.total_download |
||||
meta = info.metadata() |
||||
break |
||||
time.sleep(1) |
||||
if down_path and os.path.exists(down_path): |
||||
os.system('rm -rf "%s"' % down_path) |
||||
session.remove_torrent(handle) |
||||
return meta |
||||
|
||||
|
||||
def download_metadata(address, binhash, metadata_queue, timeout=40): |
||||
metadata = None |
||||
start_time = time.time() |
||||
try: |
||||
session = lt.session() |
||||
r = random.randrange(10000, 50000) |
||||
session.listen_on(r, r+10) |
||||
session.add_dht_router('router.bittorrent.com',6881) |
||||
session.add_dht_router('router.utorrent.com',6881) |
||||
session.add_dht_router('dht.transmission.com',6881) |
||||
session.add_dht_router('127.0.0.1',6881) |
||||
session.start_dht() |
||||
metadata = fetch_torrent(session, binhash.encode('hex'), timeout) |
||||
session = None |
||||
except: |
||||
traceback.print_exc() |
||||
finally: |
||||
metadata_queue.put((binhash, address, metadata, 'lt', start_time)) |
||||
|
@ -0,0 +1,155 @@ |
||||
# coding: utf-8 |
||||
import traceback |
||||
import pygeoip |
||||
import threading |
||||
import socket |
||||
import sys |
||||
import hashlib |
||||
import datetime |
||||
import time |
||||
import json |
||||
|
||||
|
||||
import metautils |
||||
from bencode import bencode, bdecode |
||||
geoip = pygeoip.GeoIP('GeoIP.dat') |
||||
|
||||
# setting time |
||||
import pytz |
||||
pytz.timezone('Asia/Shanghai') |
||||
# print datetime.datetime.utcnow() |
||||
|
||||
|
||||
def decode(encoding, s): |
||||
if type(s) is list: |
||||
s = ';'.join(s) |
||||
u = s |
||||
for x in (encoding, 'utf8', 'gbk', 'big5'): |
||||
try: |
||||
u = s.decode(x) |
||||
return u |
||||
except: |
||||
pass |
||||
return s.decode(encoding, 'ignore') |
||||
|
||||
|
||||
def decode_utf8(encoding, d, i): |
||||
if i + '.utf-8' in d: |
||||
return d[i + '.utf-8'].decode('utf8') |
||||
return decode(encoding, d[i]) |
||||
|
||||
|
||||
def parse_metadata(data): |
||||
info = {} |
||||
encoding = 'utf8' |
||||
try: |
||||
torrent = bdecode(data) |
||||
if not torrent.get('name'): |
||||
return None |
||||
except: |
||||
return None |
||||
try: |
||||
info['create_time'] = datetime.datetime.fromtimestamp( |
||||
float(torrent['creation date'])) |
||||
except: |
||||
info['create_time'] = datetime.datetime.now() |
||||
|
||||
if torrent.get('encoding'): |
||||
encoding = torrent['encoding'] |
||||
if torrent.get('announce'): |
||||
info['announce'] = decode_utf8(encoding, torrent, 'announce') |
||||
if torrent.get('comment'): |
||||
info['comment'] = decode_utf8(encoding, torrent, 'comment')[:200] |
||||
if torrent.get('publisher-url'): |
||||
info['publisher-url'] = decode_utf8(encoding, torrent, 'publisher-url') |
||||
if torrent.get('publisher'): |
||||
info['publisher'] = decode_utf8(encoding, torrent, 'publisher') |
||||
if torrent.get('created by'): |
||||
info['creator'] = decode_utf8(encoding, torrent, 'created by')[:15] |
||||
|
||||
if 'info' in torrent: |
||||
detail = torrent['info'] |
||||
else: |
||||
detail = torrent |
||||
info['name'] = decode_utf8(encoding, detail, 'name') |
||||
if 'files' in detail: |
||||
info['files'] = [] |
||||
for x in detail['files']: |
||||
if 'path.utf-8' in x: |
||||
v = {'path': decode( |
||||
encoding, '/'.join(x['path.utf-8'])), 'length': x['length']} |
||||
else: |
||||
v = {'path': decode( |
||||
encoding, '/'.join(x['path'])), 'length': x['length']} |
||||
if 'filehash' in x: |
||||
v['filehash'] = x['filehash'].encode('hex') |
||||
info['files'].append(v) |
||||
info['length'] = sum([x['length'] for x in info['files']]) |
||||
else: |
||||
info['length'] = detail['length'] |
||||
info['data_hash'] = hashlib.md5(detail['pieces']).hexdigest() |
||||
if 'profiles' in detail: |
||||
info['profiles'] = detail['profiles'] |
||||
return info |
||||
|
||||
|
||||
def save_metadata(dbcurr, binhash, address, start_time, data, blacklist): |
||||
utcnow = datetime.datetime.now() |
||||
name = threading.currentThread().getName() |
||||
try: |
||||
info = parse_metadata(data) |
||||
if not info: |
||||
return |
||||
except: |
||||
traceback.print_exc() |
||||
return |
||||
info_hash = binhash.encode('hex') |
||||
info['info_hash'] = info_hash |
||||
# need to build tags |
||||
info['tagged'] = False |
||||
info['classified'] = False |
||||
info['requests'] = 1 |
||||
info['last_seen'] = utcnow |
||||
info['source_ip'] = address[0] |
||||
|
||||
for item in blacklist: |
||||
if str(item) in info['name']: |
||||
return |
||||
if info.get('files'): |
||||
files = [z for z in info['files'] if not z['path'].startswith('_')] |
||||
if not files: |
||||
files = info['files'] |
||||
else: |
||||
files = [{'path': info['name'], 'length': info['length']}] |
||||
files.sort(key=lambda z: z['length'], reverse=True) |
||||
bigfname = files[0]['path'] |
||||
info['extension'] = metautils.get_extension(bigfname).lower() |
||||
info['category'] = metautils.get_category(info['extension']) |
||||
|
||||
if 'files' in info: |
||||
try: |
||||
dbcurr.execute('INSERT INTO search_filelist VALUES(%s, %s)', (info[ |
||||
'info_hash'], json.dumps(info['files']))) |
||||
except: |
||||
print name, 'insert error', sys.exc_info()[1] |
||||
del info['files'] |
||||
|
||||
try: |
||||
try: |
||||
print '\n', 'Saved', utcnow, info['info_hash'], info['name'], (time.time() - start_time), 's', address[0], geoip.country_name_by_addr(address[0]), |
||||
except: |
||||
print '\n', 'Saved', utcnow, info['info_hash'], sys.exc_info()[1] |
||||
try: |
||||
ret = dbcurr.execute('INSERT INTO search_hash(info_hash,category,data_hash,name,extension,classified,source_ip,tagged,' + |
||||
'length,create_time,last_seen,requests,comment,creator) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', |
||||
(info['info_hash'], info['category'], info['data_hash'], info['name'], info['extension'], info['classified'], |
||||
info['source_ip'], info['tagged'], info['length'], info[ |
||||
'create_time'], info['last_seen'], info['requests'], |
||||
info.get('comment', ''), info.get('creator', ''))) |
||||
except: |
||||
print 'insert search_hash err: ', info['info_hash'] |
||||
dbcurr.connection.commit() |
||||
except: |
||||
print name, 'save error', info |
||||
traceback.print_exc() |
||||
return |
@ -0,0 +1,54 @@ |
||||
#coding: utf8 |
||||
import os |
||||
import binascii |
||||
|
||||
cats = { |
||||
u'video': u'Videos', |
||||
u'image': u'Images', |
||||
u'document': u'Books', |
||||
u'music': u'Musics', |
||||
u'package': u'Packages', |
||||
u'software': u'Softwares', |
||||
} |
||||
|
||||
def get_label(name): |
||||
if name in cats: |
||||
return cats[name] |
||||
return u'Others' |
||||
|
||||
def get_label_by_crc32(n): |
||||
for k in cats: |
||||
if binascii.crc32(k)&0xFFFFFFFFL == n: |
||||
return k |
||||
return u'other' |
||||
|
||||
def get_extension(name): |
||||
return os.path.splitext(name)[1] |
||||
|
||||
def get_category(ext): |
||||
ext = ext + '.' |
||||
cats = { |
||||
u'video': '.avi.mp4.rmvb.m2ts.wmv.mkv.flv.qmv.rm.mov.vob.asf.3gp.mpg.mpeg.m4v.f4v.', |
||||
u'image': '.jpg.bmp.jpeg.png.gif.tiff.', |
||||
u'document': '.pdf.isz.chm.txt.epub.bc!.doc.ppt.', |
||||
u'music': '.mp3.ape.wav.dts.mdf.flac.', |
||||
u'package': '.zip.rar.7z.tar.gz.iso.dmg.pkg.', |
||||
u'software': '.exe.app.msi.apk.' |
||||
} |
||||
for k, v in cats.iteritems(): |
||||
if ext in v: |
||||
return k |
||||
return u'other' |
||||
|
||||
def get_detail(y): |
||||
if y.get('files'): |
||||
y['files'] = [z for z in y['files'] if not z['path'].startswith('_')] |
||||
else: |
||||
y['files'] = [{'path': y['name'], 'length': y['length']}] |
||||
y['files'].sort(key=lambda z:z['length'], reverse=True) |
||||
bigfname = y['files'][0]['path'] |
||||
ext = get_extension(bigfname).lower() |
||||
y['category'] = get_category(ext) |
||||
y['extension'] = ext |
||||
|
||||
|
@ -0,0 +1,141 @@ |
||||
#!/usr/bin/env python |
||||
# encoding: utf-8 |
||||
import socket |
||||
import math |
||||
from struct import pack, unpack |
||||
from socket import inet_ntoa |
||||
from threading import Timer, Thread |
||||
from time import sleep, time |
||||
from hashlib import sha1 |
||||
|
||||
from simdht_worker import entropy |
||||
from bencode import bencode, bdecode |
||||
|
||||
|
||||
BT_PROTOCOL = "BitTorrent protocol" |
||||
BT_MSG_ID = 20 |
||||
EXT_HANDSHAKE_ID = 0 |
||||
|
||||
def random_id(): |
||||
hash = sha1() |
||||
hash.update(entropy(20)) |
||||
return hash.digest() |
||||
|
||||
def send_packet(the_socket, msg): |
||||
the_socket.send(msg) |
||||
|
||||
def send_message(the_socket, msg): |
||||
msg_len = pack(">I", len(msg)) |
||||
send_packet(the_socket, msg_len + msg) |
||||
|
||||
def send_handshake(the_socket, infohash): |
||||
bt_header = chr(len(BT_PROTOCOL)) + BT_PROTOCOL |
||||
ext_bytes = "\x00\x00\x00\x00\x00\x10\x00\x00" |
||||
peer_id = random_id() |
||||
packet = bt_header + ext_bytes + infohash + peer_id |
||||
|
||||
send_packet(the_socket, packet) |
||||
|
||||
def check_handshake(packet, self_infohash): |
||||
try: |
||||
bt_header_len, packet = ord(packet[:1]), packet[1:] |
||||
if bt_header_len != len(BT_PROTOCOL): |
||||
return False |
||||
except TypeError: |
||||
return False |
||||
|
||||
bt_header, packet = packet[:bt_header_len], packet[bt_header_len:] |
||||
if bt_header != BT_PROTOCOL: |
||||
return False |
||||
|
||||
packet = packet[8:] |
||||
infohash = packet[:20] |
||||
if infohash != self_infohash: |
||||
return False |
||||
|
||||
return True |
||||
|
||||
def send_ext_handshake(the_socket): |
||||
msg = chr(BT_MSG_ID) + chr(EXT_HANDSHAKE_ID) + bencode({"m":{"ut_metadata": 1}}) |
||||
send_message(the_socket, msg) |
||||
|
||||
def request_metadata(the_socket, ut_metadata, piece): |
||||
"""bep_0009""" |
||||
msg = chr(BT_MSG_ID) + chr(ut_metadata) + bencode({"msg_type": 0, "piece": piece}) |
||||
send_message(the_socket, msg) |
||||
|
||||
def get_ut_metadata(data): |
||||
ut_metadata = "_metadata" |
||||
index = data.index(ut_metadata)+len(ut_metadata) + 1 |
||||
return int(data[index]) |
||||
|
||||
def get_metadata_size(data): |
||||
metadata_size = "metadata_size" |
||||
start = data.index(metadata_size) + len(metadata_size) + 1 |
||||
data = data[start:] |
||||
return int(data[:data.index("e")]) |
||||
|
||||
def recvall(the_socket, timeout=5): |
||||
the_socket.setblocking(0) |
||||
total_data = [] |
||||
data = "" |
||||
begin = time() |
||||
|
||||
while True: |
||||
sleep(0.05) |
||||
if total_data and time()-begin > timeout: |
||||
break |
||||
elif time()-begin > timeout*2: |
||||
break |
||||
try: |
||||
data = the_socket.recv(1024) |
||||
if data: |
||||
total_data.append(data) |
||||
begin = time() |
||||
except Exception: |
||||
pass |
||||
return "".join(total_data) |
||||
|
||||
def download_metadata(address, infohash, metadata_queue, timeout=5): |
||||
metadata = None |
||||
start_time = time() |
||||
try: |
||||
the_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
||||
the_socket.settimeout(timeout) |
||||
the_socket.connect(address) |
||||
|
||||
# handshake |
||||
send_handshake(the_socket, infohash) |
||||
packet = the_socket.recv(4096) |
||||
|
||||
# handshake error |
||||
if not check_handshake(packet, infohash): |
||||
return |
||||
|
||||
# ext handshake |
||||
send_ext_handshake(the_socket) |
||||
packet = the_socket.recv(4096) |
||||
|
||||
# get ut_metadata and metadata_size |
||||
ut_metadata, metadata_size = get_ut_metadata(packet), get_metadata_size(packet) |
||||
#print 'ut_metadata_size: ', metadata_size |
||||
|
||||
# request each piece of metadata |
||||
metadata = [] |
||||
for piece in range(int(math.ceil(metadata_size/(16.0*1024)))): |
||||
request_metadata(the_socket, ut_metadata, piece) |
||||
packet = recvall(the_socket, timeout) #the_socket.recv(1024*17) # |
||||
metadata.append(packet[packet.index("ee")+2:]) |
||||
|
||||
metadata = "".join(metadata) |
||||
#print 'Fetched', bdecode(metadata)["name"], "size: ", len(metadata) |
||||
|
||||
except socket.timeout: |
||||
pass |
||||
except Exception, e: |
||||
pass #print e |
||||
|
||||
finally: |
||||
the_socket.close() |
||||
metadata_queue.put((infohash, address, metadata, 'pt', start_time)) |
||||
|
@ -0,0 +1,616 @@ |
||||
#!/usr/bin/env python |
||||
# encoding: utf-8 |
||||
""" |
||||
磁力搜索meta信息入库程序 |
||||
xiaoxia@xiaoxia.org |
||||
2015.6 Forked CreateChen's Project: https://github.com/CreateChen/simDownloader |
||||
2016.12!冰剑 !新增功能:过滤恶意推广网址的无效磁力链接 |
||||
""" |
||||
|
||||
import hashlib |
||||
import os |
||||
import SimpleXMLRPCServer |
||||
import time |
||||
import datetime |
||||
import traceback |
||||
import math |
||||
import sys |
||||
import json |
||||
import socket |
||||
import threading |
||||
from hashlib import sha1 |
||||
from random import randint |
||||
from struct import unpack |
||||
from socket import inet_ntoa |
||||
from threading import Timer, Thread |
||||
from time import sleep |
||||
from collections import deque |
||||
from Queue import Queue |
||||
|
||||
reload(sys) |
||||
sys.setdefaultencoding('utf-8') |
||||
|
||||
sys.path.append('/usr/local/lib/python2.7/site-packages') |
||||
|
||||
import pygeoip |
||||
import MySQLdb as mdb |
||||
try: |
||||
raise |
||||
import libtorrent as lt |
||||
import ltMetadata |
||||
except: |
||||
lt = None |
||||
print sys.exc_info()[1] |
||||
|
||||
import metautils |
||||
import simMetadata |
||||
from bencode import bencode, bdecode |
||||
from metadata import save_metadata |
||||
|
||||
|
||||
from configparser import ConfigParser |
||||
cp = ConfigParser() |
||||
cp.read("../db.cfg") |
||||
section_db = cp.sections()[0] |
||||
DB_HOST = cp.get(section_db, "DB_HOST") |
||||
DB_USER = cp.get(section_db, "DB_USER") |
||||
DB_PORT = cp.getint(section_db, "DB_PORT") |
||||
DB_PASS = cp.get(section_db, "DB_PASS") |
||||
DB_NAME = cp.get(section_db, "DB_NAME") |
||||
DB_SIZE_LIMIT = cp.get(section_db, "DB_SIZE_LIMIT") |
||||
DB_SIZE_TICK = cp.getint(section_db, "DB_SIZE_TICK") |
||||
DB_DEL_LINE = cp.getint(section_db, "DB_DEL_LINE") |
||||
DB_DEL_SWITCH = cp.get(section_db, "DB_DEL_SWITCH") |
||||
DB_DEL_TIMER = cp.getint(section_db, "DB_DEL_TIMER") |
||||
|
||||
BLACK_FILE = 'black_list.txt' |
||||
|
||||
BOOTSTRAP_NODES = ( |
||||
("router.bittorrent.com", 6881), |
||||
("dht.transmissionbt.com", 6881), |
||||
("router.utorrent.com", 6881) |
||||
) |
||||
TID_LENGTH = 2 |
||||
RE_JOIN_DHT_INTERVAL = 3 |
||||
TOKEN_LENGTH = 2 |
||||
|
||||
section_queue = cp.sections()[1] |
||||
MAX_QUEUE_LT = cp.getint(section_queue, "MAX_QUEUE_LT") |
||||
MAX_QUEUE_PT = cp.getint(section_queue, "MAX_QUEUE_PT") |
||||
MAX_NODE_QSIZE = cp.getint(section_queue, "MAX_NODE_QSIZE") |
||||
|
||||
geoip = pygeoip.GeoIP('GeoIP.dat') |
||||
|
||||
|
||||
def load_res_blacklist(black_list_path): |
||||
black_list = [] |
||||
file_path = os.path.join(os.path.dirname(__file__), black_list_path) |
||||
f = open(file_path, 'r') |
||||
while True: |
||||
line = f.readline() |
||||
if not(line): |
||||
break |
||||
black_list.append(line) |
||||
f.close() |
||||
return black_list |
||||
|
||||
|
||||
def is_ip_allowed(ip): |
||||
return geoip.country_code_by_addr(ip) not in ('CN') |
||||
|
||||
|
||||
def entropy(length): |
||||
return "".join(chr(randint(0, 255)) for _ in xrange(length)) |
||||
|
||||
|
||||
def random_id(): |
||||
h = sha1() |
||||
h.update(entropy(20)) |
||||
return h.digest() |
||||
|
||||
|
||||
def decode_nodes(nodes): |
||||
n = [] |
||||
length = len(nodes) |
||||
if (length % 26) != 0: |
||||
return n |
||||
|
||||
for i in range(0, length, 26): |
||||
nid = nodes[i:i + 20] |
||||
ip = inet_ntoa(nodes[i + 20:i + 24]) |
||||
port = unpack("!H", nodes[i + 24:i + 26])[0] |
||||
n.append((nid, ip, port)) |
||||
|
||||
return n |
||||
|
||||
|
||||
def timer(t, f): |
||||
Timer(t, f).start() |
||||
|
||||
|
||||
def get_neighbor(target, nid, end=10): |
||||
return target[:end] + nid[end:] |
||||
|
||||
|
||||
def writeFile(filename, str): |
||||
# 写文件内容 |
||||
try: |
||||
fp = open(filename, 'w+') |
||||
fp.write(str) |
||||
fp.close() |
||||
return True |
||||
except: |
||||
return False |
||||
|
||||
|
||||
def readFile(filename): |
||||
# 读文件内容 |
||||
try: |
||||
fp = open(filename, 'r') |
||||
fBody = fp.read() |
||||
fp.close() |
||||
return fBody |
||||
except: |
||||
return False |
||||
|
||||
|
||||
class KNode(object): |
||||
|
||||
def __init__(self, nid, ip, port): |
||||
self.nid = nid |
||||
self.ip = ip |
||||
self.port = port |
||||
|
||||
|
||||
class DHTClient(Thread): |
||||
|
||||
def __init__(self, max_node_qsize): |
||||
Thread.__init__(self) |
||||
self.setDaemon(True) |
||||
self.max_node_qsize = max_node_qsize |
||||
self.nid = random_id() |
||||
self.nodes = deque(maxlen=max_node_qsize) |
||||
|
||||
def send_krpc(self, msg, address): |
||||
try: |
||||
self.ufd.sendto(bencode(msg), address) |
||||
except Exception: |
||||
pass |
||||
|
||||
def send_find_node(self, address, nid=None): |
||||
nid = get_neighbor(nid, self.nid) if nid else self.nid |
||||
tid = entropy(TID_LENGTH) |
||||
msg = { |
||||
"t": tid, |
||||
"y": "q", |
||||
"q": "find_node", |
||||
"a": { |
||||
"id": nid, |
||||
"target": random_id() |
||||
} |
||||
} |
||||
self.send_krpc(msg, address) |
||||
|
||||
def join_DHT(self): |
||||
for address in BOOTSTRAP_NODES: |
||||
self.send_find_node(address) |
||||
|
||||
def re_join_DHT(self): |
||||
if len(self.nodes) == 0: |
||||
self.join_DHT() |
||||
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT) |
||||
|
||||
def auto_send_find_node(self): |
||||
wait = 1.0 / self.max_node_qsize |
||||
while True: |
||||
try: |
||||
node = self.nodes.popleft() |
||||
self.send_find_node((node.ip, node.port), node.nid) |
||||
except IndexError: |
||||
pass |
||||
try: |
||||
sleep(wait) |
||||
except KeyboardInterrupt: |
||||
os._exit(0) |
||||
|
||||
def process_find_node_response(self, msg, address): |
||||
nodes = decode_nodes(msg["r"]["nodes"]) |
||||
for node in nodes: |
||||
(nid, ip, port) = node |
||||
if len(nid) != 20: |
||||
continue |
||||
if ip == self.bind_ip: |
||||
continue |
||||
n = KNode(nid, ip, port) |
||||
self.nodes.append(n) |
||||
|
||||
|
||||
class DHTServer(DHTClient): |
||||
|
||||
def __init__(self, master, bind_ip, bind_port, max_node_qsize): |
||||
DHTClient.__init__(self, max_node_qsize) |
||||
|
||||
self.master = master |
||||
self.bind_ip = bind_ip |
||||
self.bind_port = bind_port |
||||
|
||||
self.process_request_actions = { |
||||
"get_peers": self.on_get_peers_request, |
||||
"announce_peer": self.on_announce_peer_request, |
||||
} |
||||
|
||||
self.ufd = socket.socket( |
||||
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) |
||||
self.ufd.bind((self.bind_ip, self.bind_port)) |
||||
|
||||
timer(RE_JOIN_DHT_INTERVAL, self.re_join_DHT) |
||||
|
||||
def run(self): |
||||
self.re_join_DHT() |
||||
while True: |
||||
try: |
||||
(data, address) = self.ufd.recvfrom(65536) |
||||
msg = bdecode(data) |
||||
self.on_message(msg, address) |
||||
except Exception: |
||||
pass |
||||
|
||||
def on_message(self, msg, address): |
||||
try: |
||||
if msg["y"] == "r": |
||||
if msg["r"].has_key("nodes"): |
||||
self.process_find_node_response(msg, address) |
||||
elif msg["y"] == "q": |
||||
try: |
||||
self.process_request_actions[msg["q"]](msg, address) |
||||
except KeyError: |
||||
self.play_dead(msg, address) |
||||
except KeyError: |
||||
pass |
||||
|
||||
def on_get_peers_request(self, msg, address): |
||||
try: |
||||
infohash = msg["a"]["info_hash"] |
||||
tid = msg["t"] |
||||
nid = msg["a"]["id"] |
||||
token = infohash[:TOKEN_LENGTH] |
||||
msg = { |
||||
"t": tid, |
||||
"y": "r", |
||||
"r": { |
||||
"id": get_neighbor(infohash, self.nid), |
||||
"nodes": "", |
||||
"token": token |
||||
} |
||||
} |
||||
self.master.log_hash(infohash, address) |
||||
self.send_krpc(msg, address) |
||||
except KeyError: |
||||
pass |
||||
|
||||
def on_announce_peer_request(self, msg, address): |
||||
try: |
||||
infohash = msg["a"]["info_hash"] |
||||
token = msg["a"]["token"] |
||||
nid = msg["a"]["id"] |
||||
tid = msg["t"] |
||||
|
||||
if infohash[:TOKEN_LENGTH] == token: |
||||
if msg["a"].has_key("implied_port ") and msg["a"]["implied_port "] != 0: |
||||
port = address[1] |
||||
else: |
||||
port = msg["a"]["port"] |
||||
self.master.log_announce(infohash, (address[0], port)) |
||||
except Exception: |
||||
print 'error' |
||||
pass |
||||
finally: |
||||
self.ok(msg, address) |
||||
|
||||
def play_dead(self, msg, address): |
||||
try: |
||||
tid = msg["t"] |
||||
msg = { |
||||
"t": tid, |
||||
"y": "e", |
||||
"e": [202, "Server Error"] |
||||
} |
||||
self.send_krpc(msg, address) |
||||
except KeyError: |
||||
pass |
||||
|
||||
def ok(self, msg, address): |
||||
try: |
||||
tid = msg["t"] |
||||
nid = msg["a"]["id"] |
||||
msg = { |
||||
"t": tid, |
||||
"y": "r", |
||||
"r": { |
||||
"id": get_neighbor(nid, self.nid) |
||||
} |
||||
} |
||||
self.send_krpc(msg, address) |
||||
except KeyError: |
||||
pass |
||||
|
||||
|
||||
class Master(Thread): |
||||
|
||||
def __init__(self): |
||||
Thread.__init__(self) |
||||
self.setDaemon(True) |
||||
self.queue = Queue() |
||||
self.metadata_queue = Queue() |
||||
self.dbconn = mdb.connect( |
||||
DB_HOST, DB_USER, DB_PASS, DB_NAME, port=DB_PORT, charset='utf8') |
||||
self.dbconn.autocommit(False) |
||||
self.dbcurr = self.dbconn.cursor() |
||||
self.dbcurr.execute('SET NAMES utf8') |
||||
self.n_reqs = self.n_valid = self.n_new = 0 |
||||
self.n_downloading_lt = self.n_downloading_pt = 0 |
||||
self.visited = set() |
||||
self.black_list = load_res_blacklist(BLACK_FILE) |
||||
|
||||
def isSqlError(self, mysqlMsg): |
||||
mysqlMsg = str(mysqlMsg) |
||||
if "MySQLdb" in mysqlMsg: |
||||
return [False, 'MySQLdb组件缺失! <br>进入SSH命令行输入: pip install mysql-python'] |
||||
if "2002," in mysqlMsg: |
||||
return [False, '数据库连接失败,请检查数据库服务是否启动!'] |
||||
if "using password:" in mysqlMsg: |
||||
return [False, '数据库管理密码错误!'] |
||||
if "Connection refused" in mysqlMsg: |
||||
return [False, '数据库连接失败,请检查数据库服务是否启动!'] |
||||
if "1133" in mysqlMsg: |
||||
return [False, '数据库用户不存在!'] |
||||
if "1007" in mysqlMsg: |
||||
return [False, '数据库已经存在!'] |
||||
return [True, 'OK'] |
||||
|
||||
def query(self, sql): |
||||
try: |
||||
self.dbcurr.execute(sql) |
||||
result = self.dbcurr.fetchall() |
||||
data = map(list, result) |
||||
return data |
||||
except Exception as e: |
||||
print e |
||||
return [] |
||||
|
||||
def got_torrent(self): |
||||
binhash, address, data, dtype, start_time = self.metadata_queue.get() |
||||
if dtype == 'pt': |
||||
self.n_downloading_pt -= 1 |
||||
elif dtype == 'lt': |
||||
self.n_downloading_lt -= 1 |
||||
if not data: |
||||
return |
||||
self.n_valid += 1 |
||||
|
||||
save_metadata(self.dbcurr, binhash, address, |
||||
start_time, data, self.black_list) |
||||
self.n_new += 1 |
||||
|
||||
def run(self): |
||||
self.name = threading.currentThread().getName() |
||||
print self.name, 'started' |
||||
limit_file = '../limit.pl' |
||||
while True: |
||||
while self.metadata_queue.qsize() > 0: |
||||
if not os.path.exists(limit_file): |
||||
self.got_torrent() |
||||
else: |
||||
print 'no crawling beyond limit !!!' |
||||
time.sleep(600) |
||||
|
||||
address, binhash, dtype = self.queue.get() |
||||
if binhash in self.visited: |
||||
continue |
||||
if len(self.visited) > 100000: |
||||
self.visited = set() |
||||
self.visited.add(binhash) |
||||
|
||||
self.n_reqs += 1 |
||||
info_hash = binhash.encode('hex') |
||||
|
||||
utcnow = datetime.datetime.utcnow() |
||||
date = (utcnow + datetime.timedelta(hours=8)) |
||||
date = datetime.datetime(date.year, date.month, date.day) |
||||
|
||||
# Check if we have this info_hash |
||||
self.dbcurr.execute('SELECT id FROM search_hash WHERE info_hash=%s', (info_hash,)) |
||||
y = self.dbcurr.fetchone() |
||||
if y: |
||||
self.n_valid += 1 |
||||
# 更新最近发现时间,请求数 |
||||
self.dbcurr.execute( |
||||
'UPDATE search_hash SET last_seen=%s, requests=requests+1 WHERE info_hash=%s', (utcnow, info_hash)) |
||||
else: |
||||
if dtype == 'pt': |
||||
t = threading.Thread(target=simMetadata.download_metadata, args=( |
||||
address, binhash, self.metadata_queue)) |
||||
t.setDaemon(True) |
||||
t.start() |
||||
self.n_downloading_pt += 1 |
||||
elif dtype == 'lt' and self.n_downloading_lt < MAX_QUEUE_LT: |
||||
t = threading.Thread(target=ltMetadata.download_metadata, args=( |
||||
address, binhash, self.metadata_queue)) |
||||
t.setDaemon(True) |
||||
t.start() |
||||
self.n_downloading_lt += 1 |
||||
|
||||
if self.n_reqs >= 1000: |
||||
self.dbcurr.execute('INSERT INTO search_statusreport(date,new_hashes,total_requests, valid_requests) VALUES(%s,%s,%s,%s) ON DUPLICATE KEY UPDATE ' + |
||||
'total_requests=total_requests+%s, valid_requests=valid_requests+%s, new_hashes=new_hashes+%s', |
||||
(date, self.n_new, self.n_reqs, self.n_valid, self.n_reqs, self.n_valid, self.n_new)) |
||||
self.dbconn.commit() |
||||
print '\n', time.ctime(), 'n_reqs', self.n_reqs, 'n_valid', self.n_valid, 'n_new', self.n_new, 'n_queue', self.queue.qsize(), |
||||
print 'n_d_pt', self.n_downloading_pt, 'n_d_lt', self.n_downloading_lt, |
||||
self.n_reqs = self.n_valid = self.n_new = 0 |
||||
|
||||
def log_announce(self, binhash, address=None): |
||||
self.queue.put([address, binhash, 'pt']) |
||||
|
||||
def log_hash(self, binhash, address=None): |
||||
if not lt: |
||||
return |
||||
if is_ip_allowed(address[0]): |
||||
return |
||||
if self.n_downloading_lt < MAX_QUEUE_LT: |
||||
self.queue.put([address, binhash, 'lt']) |
||||
|
||||
|
||||
class DBCheck(Master): |
||||
|
||||
def __init__(self, master): |
||||
Master.__init__(self) |
||||
self.setDaemon(True) |
||||
|
||||
def delete_db(self, line=1): |
||||
sql = 'select id, info_hash from search_hash order by id limit ' + \ |
||||
str(line) |
||||
data = self.query(sql) |
||||
for x in range(len(data)): |
||||
iid = str(data[x][0]) |
||||
infohash = str(data[x][1]) |
||||
|
||||
sqldel = "delete from search_hash where id='" + iid + "'" |
||||
self.query(sqldel) |
||||
|
||||
sqldel2 = "delete from search_filelist where info_hash='" + infohash + "'" |
||||
self.query(sqldel2) |
||||
print 'delete ', iid, infohash, 'done' |
||||
|
||||
def check_db_size(self): |
||||
sql = "select (concat(round(sum(DATA_LENGTH/1024/1024),2),'M') + concat(round(sum(INDEX_LENGTH/1024/1024),2),'M') ) \ |
||||
as sdata from information_schema.tables where TABLE_SCHEMA='" + DB_NAME + "' and TABLE_NAME in('search_hash','search_filelist', 'search_statusreport')" |
||||
|
||||
db_size_limit = float(DB_SIZE_LIMIT) * 1024 |
||||
data = self.query(sql) |
||||
db_size = data[0][0] |
||||
|
||||
limit_file = '../limit.pl' |
||||
if db_size > db_size_limit: |
||||
if not os.path.exists(limit_file): |
||||
writeFile(limit_file, 'ok') |
||||
#Open delete |
||||
if DB_DEL_SWITCH == '1': |
||||
self.delete_db(DB_DEL_LINE) |
||||
self.query('OPTIMIZE TABLE `search_hash`') |
||||
self.query('OPTIMIZE TABLE `search_filelist`') |
||||
else: |
||||
if os.path.exists(limit_file): |
||||
os.remove(limit_file) |
||||
print 'db size limit:', db_size_limit, 'db has size:', db_size |
||||
# self.delete_db(DB_DEL_LINE) |
||||
|
||||
def run(self): |
||||
while True: |
||||
self.check_db_size() |
||||
time.sleep(DB_SIZE_TICK) |
||||
|
||||
|
||||
class DBDataCheck(Master): |
||||
|
||||
def __init__(self, master): |
||||
Master.__init__(self) |
||||
self.setDaemon(True) |
||||
|
||||
def get_start_id(self): |
||||
file = '../start_pos.pl' |
||||
if os.path.exists(file): |
||||
c = readFile(file) |
||||
return int(c) |
||||
else: |
||||
return 0 |
||||
|
||||
def set_start_id(self, start_id): |
||||
file = '../start_pos.pl' |
||||
writeFile(file, str(start_id)) |
||||
return True |
||||
|
||||
def check_db_data(self): |
||||
|
||||
print 'check_db_data' |
||||
|
||||
max_data = self.query('select max(id) from search_hash') |
||||
max_id = max_data[0][0] |
||||
|
||||
min_id = self.get_start_id() |
||||
if min_id == None: |
||||
min_id = 0 |
||||
self.set_start_id(max_id) |
||||
|
||||
print 'min_id', min_id, 'max_id', max_id, 'ok!' |
||||
|
||||
limit_num = 1000 |
||||
page = math.ceil((max_id - min_id) / limit_num) + 1 |
||||
print 'page:',page |
||||
|
||||
for p in range(int(page)): |
||||
start_id = int(min_id) + p * limit_num |
||||
end_id = start_id + 1000 |
||||
sql = 'select sh.id, sh.info_hash as h1, sf.info_hash as h2 from search_hash sh \ |
||||
left join search_filelist sf on sh.info_hash = sf.info_hash \ |
||||
WHERE sf.info_hash is null and sh.id between ' + str(start_id) + ' and ' + str(end_id) + ' limit ' + str(limit_num) |
||||
print 'delete invalid data page ', p, 'start_id:', str(start_id), ' end_id:', str(end_id), 'done' |
||||
# print sql |
||||
list_data = [] |
||||
try: |
||||
list_data = self.query(sql) |
||||
except Exception as e: |
||||
print str(e) |
||||
|
||||
# print list_data |
||||
for x in range(len(list_data)): |
||||
iid = str(list_data[x][0]) |
||||
infohash = str(list_data[x][1]) |
||||
sqldel = "delete from search_hash where info_hash='" + infohash + "'" |
||||
self.query(sqldel) |
||||
print 'delete invalid data', iid, infohash, 'done' |
||||
|
||||
self.query('OPTIMIZE TABLE `search_hash`') |
||||
self.query('OPTIMIZE TABLE `search_filelist`') |
||||
|
||||
def run(self): |
||||
while True: |
||||
self.check_db_data() |
||||
print DB_DEL_TIMER |
||||
time.sleep(float(DB_DEL_TIMER)) |
||||
|
||||
|
||||
def announce(info_hash, address): |
||||
binhash = info_hash.decode('hex') |
||||
master.log_announce(binhash, address) |
||||
return 'ok' |
||||
|
||||
|
||||
def rpc_server(): |
||||
rpcserver = SimpleXMLRPCServer.SimpleXMLRPCServer( |
||||
('localhost', 8004), logRequests=False) |
||||
rpcserver.register_function(announce, 'announce') |
||||
print 'Starting xml rpc server...' |
||||
rpcserver.serve_forever() |
||||
|
||||
if __name__ == "__main__": |
||||
# max_node_qsize bigger, bandwith bigger, spped higher |
||||
master = Master() |
||||
master.start() |
||||
|
||||
rpcthread = threading.Thread(target=rpc_server) |
||||
rpcthread.setDaemon(True) |
||||
rpcthread.start() |
||||
|
||||
print 'DBCheck start' |
||||
# check = DBCheck(master) |
||||
# check.start() |
||||
|
||||
print 'DBDataCheck start' |
||||
# checkData = DBDataCheck(master) |
||||
# checkData.start() |
||||
|
||||
print 'DHTServer start' |
||||
dht = DHTServer(master, "0.0.0.0", 6881, max_node_qsize=MAX_NODE_QSIZE) |
||||
dht.start() |
||||
dht.auto_send_find_node() |
Loading…
Reference in new issue