- Fixed unicode support in btjunkie and mininova search engines

This commit is contained in:
Christophe Dumez
2009-03-26 16:53:17 +00:00
parent 813f52f452
commit 22ff0374d5
8 changed files with 173 additions and 46 deletions

View File

@@ -2,6 +2,7 @@
<qresource> <qresource>
<file>search_engine/nova2.py</file> <file>search_engine/nova2.py</file>
<file>search_engine/novaprinter.py</file> <file>search_engine/novaprinter.py</file>
<file>search_engine/helpers.py</file>
<file>search_engine/engines/btjunkie.png</file> <file>search_engine/engines/btjunkie.png</file>
<file>search_engine/engines/btjunkie.py</file> <file>search_engine/engines/btjunkie.py</file>
<file>search_engine/engines/isohunt.png</file> <file>search_engine/engines/isohunt.png</file>

View File

@@ -58,6 +58,8 @@ SearchEngine::SearchEngine(bittorrent *BTSession, QSystemTrayIcon *myTrayIcon, b
search_stopped = false; search_stopped = false;
// Creating Search Process // Creating Search Process
searchProcess = new QProcess(this); searchProcess = new QProcess(this);
QStringList env = QProcess::systemEnvironment();
searchProcess->setEnvironment(env);
connect(searchProcess, SIGNAL(started()), this, SLOT(searchStarted())); connect(searchProcess, SIGNAL(started()), this, SLOT(searchStarted()));
connect(searchProcess, SIGNAL(readyReadStandardOutput()), this, SLOT(readSearchOutput())); connect(searchProcess, SIGNAL(readyReadStandardOutput()), this, SLOT(readSearchOutput()));
connect(searchProcess, SIGNAL(finished(int, QProcess::ExitStatus)), this, SLOT(searchFinished(int,QProcess::ExitStatus))); connect(searchProcess, SIGNAL(finished(int, QProcess::ExitStatus)), this, SLOT(searchFinished(int,QProcess::ExitStatus)));
@@ -216,6 +218,7 @@ void SearchEngine::downloadSelectedItem(const QModelIndex& index){
// line to search results calling appendSearchResult(). // line to search results calling appendSearchResult().
void SearchEngine::readSearchOutput(){ void SearchEngine::readSearchOutput(){
QByteArray output = searchProcess->readAllStandardOutput(); QByteArray output = searchProcess->readAllStandardOutput();
std::cerr << searchProcess->readAllStandardError().data() << std::endl;
output.replace("\r", ""); output.replace("\r", "");
QList<QByteArray> lines_list = output.split('\n'); QList<QByteArray> lines_list = output.split('\n');
if(!search_result_line_truncated.isEmpty()){ if(!search_result_line_truncated.isEmpty()){
@@ -224,7 +227,7 @@ void SearchEngine::readSearchOutput(){
} }
search_result_line_truncated = lines_list.takeLast().trimmed(); search_result_line_truncated = lines_list.takeLast().trimmed();
foreach(const QByteArray &line, lines_list){ foreach(const QByteArray &line, lines_list){
appendSearchResult(QString(line)); appendSearchResult(QString::fromUtf8(line));
} }
currentSearchTab->getCurrentLabel()->setText(tr("Results")+QString::fromUtf8(" <i>(")+misc::toQString(nb_search_results)+QString::fromUtf8(")</i>:")); currentSearchTab->getCurrentLabel()->setText(tr("Results")+QString::fromUtf8(" <i>(")+misc::toQString(nb_search_results)+QString::fromUtf8(")</i>:"));
} }
@@ -263,6 +266,14 @@ void SearchEngine::updateNova() {
} }
QFile::copy(":/search_engine/novaprinter.py", filePath); QFile::copy(":/search_engine/novaprinter.py", filePath);
} }
QFile(misc::qBittorrentPath()+"search_engine"+QDir::separator()+"helpers.py").setPermissions(perm);
filePath = misc::qBittorrentPath()+"search_engine"+QDir::separator()+"helpers.py";
if(misc::getPluginVersion(":/search_engine/helpers.py") > misc::getPluginVersion(filePath)) {
if(QFile::exists(filePath)){
QFile::remove(filePath);
}
QFile::copy(":/search_engine/helpers.py", filePath);
}
QString destDir = misc::qBittorrentPath()+"search_engine"+QDir::separator()+"engines"+QDir::separator(); QString destDir = misc::qBittorrentPath()+"search_engine"+QDir::separator()+"engines"+QDir::separator();
QDir shipped_subDir(":/search_engine/engines/"); QDir shipped_subDir(":/search_engine/engines/");
QStringList files = shipped_subDir.entryList(); QStringList files = shipped_subDir.entryList();

View File

@@ -1,5 +1,5 @@
#VERSION: 1.13 #VERSION: 2.1
#AUTHORS: Fabien Devaux (fab@gnux.info) #AUTHORS: Christophe Dumez (chris@qbittorrent.org)
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
@@ -25,37 +25,88 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
import urllib from helpers import retrieve_url
import sgmllib
import re import re
class btjunkie(object): class btjunkie(object):
url = 'http://btjunkie.org' url = 'http://btjunkie.org'
name = 'btjunkie' name = 'btjunkie'
def __init__(self):
self.results = []
self.parser = self.SimpleSGMLParser(self.results, self.url)
class SimpleSGMLParser(sgmllib.SGMLParser):
def __init__(self, results, url, *args):
sgmllib.SGMLParser.__init__(self)
self.url = url
self.th_counter = None
self.current_item = None
self.results = results
def start_a(self, attr):
params = dict(attr)
#print params
if params.has_key('href') and params['href'].startswith("http://dl.btjunkie.org/torrent"):
self.current_item = {}
self.th_counter = 0
self.current_item['link']=params['href'].strip()
def handle_data(self, data):
if self.th_counter == 0:
if not self.current_item.has_key('name'):
self.current_item['name'] = ''
self.current_item['name']+= data.strip()
elif self.th_counter == 3:
if not self.current_item.has_key('size'):
self.current_item['size'] = ''
self.current_item['size']+= data.strip()
elif self.th_counter == 5:
if not self.current_item.has_key('seeds'):
self.current_item['seeds'] = ''
self.current_item['seeds']+= data.strip()
elif self.th_counter == 6:
if not self.current_item.has_key('leech'):
self.current_item['leech'] = ''
self.current_item['leech']+= data.strip()
def start_th(self,attr):
if isinstance(self.th_counter,int):
self.th_counter += 1
if self.th_counter > 6:
self.th_counter = None
# Display item
if self.current_item:
self.current_item['engine_url'] = self.url
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
self.results.append('a')
def search(self, what): def search(self, what):
ret = []
i = 1 i = 1
while True and i<11: while True and i<11:
res = 0 results = []
dat = urllib.urlopen(self.url+'/search?q=%s&o=52&p=%d'%(what,i)).read().decode('utf8', 'replace') parser = self.SimpleSGMLParser(results, self.url)
# I know it's not very readable, but the SGML parser feels in pain dat = retrieve_url(self.url+'/search?q=%s&o=52&p=%d'%(what,i))
section_re = re.compile('(?s)href="http://dl.btjunkie.org/torrent/.*?</tr><tr') # Remove <font> tags from page
torrent_re = re.compile('(?s)href="(?P<link>.*?[^"]+).*?' p = re.compile( '<[/]?font.*?>')
'class="BlckUnd">(?P<name>.*?)</a>.*?' dat = p.sub('', dat)
'>(?P<size>\d+MB)</font>.*?' #print dat
'>.*</font>.*' #return
'>(?P<seeds>\d+)</font>.*?' results_re = re.compile('(?s)class="tab_results">.*')
'>(?P<leech>\d+)</font>') for match in results_re.finditer(dat):
for match in section_re.finditer(dat): res_tab = match.group(0)
txt = match.group(0) parser.feed(res_tab)
m = torrent_re.search(txt) parser.close()
if m:
torrent_infos = m.groupdict()
torrent_infos['name'] = re.sub('<.*?>', '', torrent_infos['name'])
torrent_infos['engine_url'] = self.url
#torrent_infos['link'] = self.url+torrent_infos['link']
prettyPrinter(torrent_infos)
res = res + 1
if res == 0:
break break
i = i + 1 if len(results) <= 0:
break
i += 1

View File

@@ -1,4 +1,4 @@
#VERSION: 1.13 #VERSION: 1.2
#AUTHORS: Fabien Devaux (fab@gnux.info) #AUTHORS: Fabien Devaux (fab@gnux.info)
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@@ -26,7 +26,7 @@
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter from novaprinter import prettyPrinter
import urllib from helpers import retrieve_url
from xml.dom import minidom from xml.dom import minidom
import re import re
@@ -64,12 +64,15 @@ class mininova(object):
return ''.join([ get_text(n) for n in txt.childNodes]) return ''.join([ get_text(n) for n in txt.childNodes])
page = 1 page = 1
while True and page<11: while True and page<11:
file = open('/home/chris/mytest.txt', 'w')
file.write(self.url+'/search/%s/seeds/%d'%(what, page))
file.close()
res = 0 res = 0
dat = urllib.urlopen(self.url+'/search/%s/seeds/%d'%(what, page)).read().decode('utf-8', 'replace') dat = retrieve_url(self.url+'/search/%s/seeds/%d'%(what, page))
dat = re.sub("<a href=\"http://www.boardreader.com/index.php.*\"", "<a href=\"plop\"", dat) dat = re.sub("<a href=\"http://www.boardreader.com/index.php.*\"", "<a href=\"plop\"", dat)
dat = re.sub("<=", "&lt;=", dat) dat = re.sub("<=", "&lt;=", dat)
dat = re.sub("&\s", "&amp; ", dat) dat = re.sub("&\s", "&amp; ", dat)
x = minidom.parseString(dat.encode('utf-8', 'replace')) x = minidom.parseString(dat)
table = x.getElementsByTagName('table').item(0) table = x.getElementsByTagName('table').item(0)
if not table: return if not table: return
for tr in table.getElementsByTagName('tr'): for tr in table.getElementsByTagName('tr'):

View File

@@ -1,5 +1,5 @@
isohunt: 1.1 isohunt: 1.1
torrentreactor: 1.02 torrentreactor: 1.02
btjunkie: 1.13 btjunkie: 2.1
mininova: 1.13 mininova: 1.2
piratebay: 1.04 piratebay: 1.04

View File

@@ -0,0 +1,59 @@
#VERSION: 1.0
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re, htmlentitydefs
import urllib2
def htmlentitydecode(s):
# First convert alpha entities (such as &eacute;)
# (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)
def entity2char(m):
entity = m.group(1)
if entity in htmlentitydefs.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[entity])
return u" " # Unknown entity: We replace with a space.
t = re.sub(u'&(%s);' % u'|'.join(htmlentitydefs.name2codepoint), entity2char, s)
# Then convert numerical entities (such as &#233;)
t = re.sub(u'&#(\d+);', lambda x: unichr(int(x.group(1))), t)
# Then convert hexa entities (such as &#x00E9;)
return re.sub(u'&#x(\w+);', lambda x: unichr(int(x.group(1),16)), t)
def retrieve_url(url):
""" Return the content of the url page as a string """
req = urllib2.Request(url)
response = urllib2.urlopen(req)
dat = response.read()
info = response.info()
charset = 'utf-8'
try:
ignore, charset = info['Content-Type'].split('charset=')
except:
pass
dat = dat.decode(charset)
dat = htmlentitydecode(dat)
return dat.encode('utf-8', 'replace')

View File

@@ -98,18 +98,18 @@ if __name__ == '__main__':
engines_list = supported_engines engines_list = supported_engines
what = '+'.join(sys.argv[2:]) what = '+'.join(sys.argv[2:])
threads = [] threads = []
for engine in engines_list: for engine in engines_list:
try: if 1:
#try:
if THREADED: if THREADED:
exec "l = EngineLauncher(%s(), what)" % engine exec "l = EngineLauncher(%s(), what)" % engine
threads.append(l) threads.append(l)
l.start() l.start()
else: else:
engine().search(what) engine().search(what)
except: #except:
pass # pass
if THREADED: if THREADED:
for t in threads: for t in threads:
t.join() t.join()

View File

@@ -1,4 +1,4 @@
#VERSION: 1.11 #VERSION: 1.2
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met: # modification, are permitted provided that the following conditions are met:
@@ -26,7 +26,9 @@
def prettyPrinter(dictionnary): def prettyPrinter(dictionnary):
dictionnary['size'] = anySizeToBytes(dictionnary['size']) dictionnary['size'] = anySizeToBytes(dictionnary['size'])
print "%(link)s|%(name)s|%(size)s|%(seeds)s|%(leech)s|%(engine_url)s" % dictionnary if isinstance(dictionnary['name'], unicode):
dictionnary['name'] = dictionnary['name'].encode('utf-8')
print dictionnary['link'],'|',dictionnary['name'],'|',dictionnary['size'],'|',dictionnary['seeds'],'|',dictionnary['leech'],'|',dictionnary['engine_url']
def anySizeToBytes(size_string): def anySizeToBytes(size_string):
""" """