diff --git a/src/search.qrc b/src/search.qrc
index 074110bca..735122372 100644
--- a/src/search.qrc
+++ b/src/search.qrc
@@ -2,6 +2,7 @@
search_engine/nova2.py
search_engine/novaprinter.py
+ search_engine/helpers.py
search_engine/engines/btjunkie.png
search_engine/engines/btjunkie.py
search_engine/engines/isohunt.png
@@ -13,4 +14,4 @@
search_engine/engines/torrentreactor.png
search_engine/engines/torrentreactor.py
-
\ No newline at end of file
+
diff --git a/src/searchEngine.cpp b/src/searchEngine.cpp
index 43e148ef7..8d5acc58c 100644
--- a/src/searchEngine.cpp
+++ b/src/searchEngine.cpp
@@ -58,6 +58,8 @@ SearchEngine::SearchEngine(bittorrent *BTSession, QSystemTrayIcon *myTrayIcon, b
search_stopped = false;
// Creating Search Process
searchProcess = new QProcess(this);
+ QStringList env = QProcess::systemEnvironment();
+ searchProcess->setEnvironment(env);
connect(searchProcess, SIGNAL(started()), this, SLOT(searchStarted()));
connect(searchProcess, SIGNAL(readyReadStandardOutput()), this, SLOT(readSearchOutput()));
connect(searchProcess, SIGNAL(finished(int, QProcess::ExitStatus)), this, SLOT(searchFinished(int,QProcess::ExitStatus)));
@@ -216,6 +218,7 @@ void SearchEngine::downloadSelectedItem(const QModelIndex& index){
// line to search results calling appendSearchResult().
void SearchEngine::readSearchOutput(){
QByteArray output = searchProcess->readAllStandardOutput();
+ std::cerr << searchProcess->readAllStandardError().data() << std::endl;
output.replace("\r", "");
QList lines_list = output.split('\n');
if(!search_result_line_truncated.isEmpty()){
@@ -224,7 +227,7 @@ void SearchEngine::readSearchOutput(){
}
search_result_line_truncated = lines_list.takeLast().trimmed();
foreach(const QByteArray &line, lines_list){
- appendSearchResult(QString(line));
+ appendSearchResult(QString::fromUtf8(line));
}
currentSearchTab->getCurrentLabel()->setText(tr("Results")+QString::fromUtf8(" (")+misc::toQString(nb_search_results)+QString::fromUtf8("):"));
}
@@ -263,6 +266,14 @@ void SearchEngine::updateNova() {
}
QFile::copy(":/search_engine/novaprinter.py", filePath);
}
+ QFile(misc::qBittorrentPath()+"search_engine"+QDir::separator()+"helpers.py").setPermissions(perm);
+ filePath = misc::qBittorrentPath()+"search_engine"+QDir::separator()+"helpers.py";
+ if(misc::getPluginVersion(":/search_engine/helpers.py") > misc::getPluginVersion(filePath)) {
+ if(QFile::exists(filePath)){
+ QFile::remove(filePath);
+ }
+ QFile::copy(":/search_engine/helpers.py", filePath);
+ }
QString destDir = misc::qBittorrentPath()+"search_engine"+QDir::separator()+"engines"+QDir::separator();
QDir shipped_subDir(":/search_engine/engines/");
QStringList files = shipped_subDir.entryList();
diff --git a/src/search_engine/engines/btjunkie.py b/src/search_engine/engines/btjunkie.py
index 2ff9cb553..57d663509 100644
--- a/src/search_engine/engines/btjunkie.py
+++ b/src/search_engine/engines/btjunkie.py
@@ -1,5 +1,5 @@
-#VERSION: 1.13
-#AUTHORS: Fabien Devaux (fab@gnux.info)
+#VERSION: 2.1
+#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -25,37 +25,88 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
+
from novaprinter import prettyPrinter
-import urllib
+from helpers import retrieve_url
+import sgmllib
import re
class btjunkie(object):
- url = 'http://btjunkie.org'
- name = 'btjunkie'
+ url = 'http://btjunkie.org'
+ name = 'btjunkie'
+
+ def __init__(self):
+ self.results = []
+ self.parser = self.SimpleSGMLParser(self.results, self.url)
+
+ class SimpleSGMLParser(sgmllib.SGMLParser):
+ def __init__(self, results, url, *args):
+ sgmllib.SGMLParser.__init__(self)
+ self.url = url
+ self.th_counter = None
+ self.current_item = None
+ self.results = results
+
+ def start_a(self, attr):
+ params = dict(attr)
+ #print params
+ if params.has_key('href') and params['href'].startswith("http://dl.btjunkie.org/torrent"):
+ self.current_item = {}
+ self.th_counter = 0
+ self.current_item['link']=params['href'].strip()
+
+ def handle_data(self, data):
+ if self.th_counter == 0:
+ if not self.current_item.has_key('name'):
+ self.current_item['name'] = ''
+ self.current_item['name']+= data.strip()
+ elif self.th_counter == 3:
+ if not self.current_item.has_key('size'):
+ self.current_item['size'] = ''
+ self.current_item['size']+= data.strip()
+ elif self.th_counter == 5:
+ if not self.current_item.has_key('seeds'):
+ self.current_item['seeds'] = ''
+ self.current_item['seeds']+= data.strip()
+ elif self.th_counter == 6:
+ if not self.current_item.has_key('leech'):
+ self.current_item['leech'] = ''
+ self.current_item['leech']+= data.strip()
+
+ def start_th(self,attr):
+ if isinstance(self.th_counter,int):
+ self.th_counter += 1
+ if self.th_counter > 6:
+ self.th_counter = None
+ # Display item
+ if self.current_item:
+ self.current_item['engine_url'] = self.url
+ if not self.current_item['seeds'].isdigit():
+ self.current_item['seeds'] = 0
+ if not self.current_item['leech'].isdigit():
+ self.current_item['leech'] = 0
+ prettyPrinter(self.current_item)
+ self.results.append('a')
- def search(self, what):
- i = 1
- while True and i<11:
- res = 0
- dat = urllib.urlopen(self.url+'/search?q=%s&o=52&p=%d'%(what,i)).read().decode('utf8', 'replace')
- # I know it's not very readable, but the SGML parser feels in pain
- section_re = re.compile('(?s)href="http://dl.btjunkie.org/torrent/.*?.*?[^"]+).*?'
- 'class="BlckUnd">(?P.*?).*?'
- '>(?P\d+MB).*?'
- '>.*.*'
- '>(?P\d+).*?'
- '>(?P\d+)')
- for match in section_re.finditer(dat):
- txt = match.group(0)
- m = torrent_re.search(txt)
- if m:
- torrent_infos = m.groupdict()
- torrent_infos['name'] = re.sub('<.*?>', '', torrent_infos['name'])
- torrent_infos['engine_url'] = self.url
- #torrent_infos['link'] = self.url+torrent_infos['link']
- prettyPrinter(torrent_infos)
- res = res + 1
- if res == 0:
- break
- i = i + 1
+ def search(self, what):
+ ret = []
+ i = 1
+ while True and i<11:
+ results = []
+ parser = self.SimpleSGMLParser(results, self.url)
+ dat = retrieve_url(self.url+'/search?q=%s&o=52&p=%d'%(what,i))
+ # Remove tags from page
+ p = re.compile( '<[/]?font.*?>')
+ dat = p.sub('', dat)
+ #print dat
+ #return
+ results_re = re.compile('(?s)class="tab_results">.*')
+ for match in results_re.finditer(dat):
+ res_tab = match.group(0)
+ parser.feed(res_tab)
+ parser.close()
+ break
+ if len(results) <= 0:
+ break
+ i += 1
+
\ No newline at end of file
diff --git a/src/search_engine/engines/mininova.py b/src/search_engine/engines/mininova.py
index cbd07ed0f..400b155cd 100644
--- a/src/search_engine/engines/mininova.py
+++ b/src/search_engine/engines/mininova.py
@@ -1,4 +1,4 @@
-#VERSION: 1.13
+#VERSION: 1.2
#AUTHORS: Fabien Devaux (fab@gnux.info)
# Redistribution and use in source and binary forms, with or without
@@ -26,7 +26,7 @@
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
-import urllib
+from helpers import retrieve_url
from xml.dom import minidom
import re
@@ -64,12 +64,15 @@ class mininova(object):
return ''.join([ get_text(n) for n in txt.childNodes])
page = 1
while True and page<11:
+ file = open('/home/chris/mytest.txt', 'w')
+ file.write(self.url+'/search/%s/seeds/%d'%(what, page))
+ file.close()
res = 0
- dat = urllib.urlopen(self.url+'/search/%s/seeds/%d'%(what, page)).read().decode('utf-8', 'replace')
+ dat = retrieve_url(self.url+'/search/%s/seeds/%d'%(what, page))
dat = re.sub("