mirror of
https://github.com/qbittorrent/qBittorrent.git
synced 2026-01-03 14:12:30 -06:00
Improve coding style
This commit is contained in:
committed by
sledgehammer999
parent
d3f46452a9
commit
1728c16580
@@ -65,13 +65,16 @@ bool BandwidthScheduler::isTimeForAlternative() const
|
||||
const int day = QDate::currentDate().dayOfWeek();
|
||||
bool alternative = false;
|
||||
|
||||
if (start > end) {
|
||||
if (start > end)
|
||||
{
|
||||
std::swap(start, end);
|
||||
alternative = true;
|
||||
}
|
||||
|
||||
if ((start <= now) && (end >= now)) {
|
||||
switch (schedulerDays) {
|
||||
if ((start <= now) && (end >= now))
|
||||
{
|
||||
switch (schedulerDays)
|
||||
{
|
||||
case EVERY_DAY:
|
||||
alternative = !alternative;
|
||||
break;
|
||||
@@ -96,7 +99,8 @@ void BandwidthScheduler::onTimeout()
|
||||
{
|
||||
const bool alternative = isTimeForAlternative();
|
||||
|
||||
if (alternative != m_lastAlternative) {
|
||||
if (alternative != m_lastAlternative)
|
||||
{
|
||||
m_lastAlternative = alternative;
|
||||
emit bandwidthLimitRequested(alternative);
|
||||
}
|
||||
|
||||
@@ -54,7 +54,8 @@ lt::storage_holder CustomDiskIOThread::new_torrent(const lt::storage_params &sto
|
||||
lt::storage_holder storageHolder = m_nativeDiskIO->new_torrent(storageParams, torrent);
|
||||
|
||||
const QString savePath = Utils::Fs::expandPathAbs(QString::fromStdString(storageParams.path));
|
||||
m_storageData[storageHolder] = {
|
||||
m_storageData[storageHolder] =
|
||||
{
|
||||
savePath
|
||||
, storageParams.mapped_files ? *storageParams.mapped_files : storageParams.files
|
||||
, storageParams.priorities};
|
||||
@@ -196,7 +197,8 @@ void CustomDiskIOThread::handleCompleteFiles(lt::storage_index_t storage, const
|
||||
const QDir saveDir {savePath};
|
||||
const StorageData storageData = m_storageData[storage];
|
||||
const lt::file_storage &fileStorage = storageData.files;
|
||||
for (const lt::file_index_t fileIndex : fileStorage.file_range()) {
|
||||
for (const lt::file_index_t fileIndex : fileStorage.file_range())
|
||||
{
|
||||
// ignore files that have priority 0
|
||||
if ((storageData.filePriorities.end_index() > fileIndex) && (storageData.filePriorities[fileIndex] == lt::dont_download))
|
||||
continue;
|
||||
@@ -205,10 +207,12 @@ void CustomDiskIOThread::handleCompleteFiles(lt::storage_index_t storage, const
|
||||
if (fileStorage.pad_file_at(fileIndex)) continue;
|
||||
|
||||
const QString filePath = QString::fromStdString(fileStorage.file_path(fileIndex));
|
||||
if (filePath.endsWith(QB_EXT)) {
|
||||
if (filePath.endsWith(QB_EXT))
|
||||
{
|
||||
const QString completeFilePath = filePath.left(filePath.size() - QB_EXT.size());
|
||||
QFile completeFile {saveDir.absoluteFilePath(completeFilePath)};
|
||||
if (completeFile.exists()) {
|
||||
if (completeFile.exists())
|
||||
{
|
||||
QFile incompleteFile {saveDir.absoluteFilePath(filePath)};
|
||||
incompleteFile.remove();
|
||||
completeFile.rename(incompleteFile.fileName());
|
||||
@@ -261,7 +265,8 @@ void CustomStorage::handleCompleteFiles(const QString &savePath)
|
||||
const QDir saveDir {savePath};
|
||||
|
||||
const lt::file_storage &fileStorage = files();
|
||||
for (const lt::file_index_t fileIndex : fileStorage.file_range()) {
|
||||
for (const lt::file_index_t fileIndex : fileStorage.file_range())
|
||||
{
|
||||
// ignore files that have priority 0
|
||||
if ((m_filePriorities.end_index() > fileIndex) && (m_filePriorities[fileIndex] == lt::dont_download))
|
||||
continue;
|
||||
@@ -270,10 +275,12 @@ void CustomStorage::handleCompleteFiles(const QString &savePath)
|
||||
if (fileStorage.pad_file_at(fileIndex)) continue;
|
||||
|
||||
const QString filePath = QString::fromStdString(fileStorage.file_path(fileIndex));
|
||||
if (filePath.endsWith(QB_EXT)) {
|
||||
if (filePath.endsWith(QB_EXT))
|
||||
{
|
||||
const QString completeFilePath = filePath.left(filePath.size() - QB_EXT.size());
|
||||
QFile completeFile {saveDir.absoluteFilePath(completeFilePath)};
|
||||
if (completeFile.exists()) {
|
||||
if (completeFile.exists())
|
||||
{
|
||||
QFile incompleteFile {saveDir.absoluteFilePath(filePath)};
|
||||
incompleteFile.remove();
|
||||
completeFile.rename(incompleteFile.fileName());
|
||||
|
||||
@@ -32,7 +32,8 @@ namespace BitTorrent
|
||||
{
|
||||
bool isValidDownloadPriority(const DownloadPriority priority)
|
||||
{
|
||||
switch (priority) {
|
||||
switch (priority)
|
||||
{
|
||||
case DownloadPriority::Ignored:
|
||||
case DownloadPriority::Normal:
|
||||
case DownloadPriority::High:
|
||||
|
||||
@@ -48,8 +48,10 @@ namespace
|
||||
|
||||
const char *octetStart = str;
|
||||
char *endptr;
|
||||
for (; *str; ++str) {
|
||||
if (*str == '.') {
|
||||
for (; *str; ++str)
|
||||
{
|
||||
if (*str == '.')
|
||||
{
|
||||
const long int extractedNum = strtol(octetStart, &endptr, 10);
|
||||
if ((extractedNum >= 0L) && (extractedNum <= 255L))
|
||||
m_buf[octetIndex++] = static_cast<unsigned char>(extractedNum);
|
||||
@@ -65,7 +67,8 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
if (str != octetStart) {
|
||||
if (str != octetStart)
|
||||
{
|
||||
const long int extractedNum = strtol(octetStart, &endptr, 10);
|
||||
if ((extractedNum >= 0L) && (extractedNum <= 255L))
|
||||
m_buf[octetIndex] = static_cast<unsigned char>(strtol(octetStart, &endptr, 10));
|
||||
@@ -124,7 +127,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
QFile file(m_filePath);
|
||||
if (!file.exists()) return ruleCount;
|
||||
|
||||
if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||
if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
|
||||
{
|
||||
LogMsg(tr("I/O Error: Could not open IP filter file in read mode."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -142,7 +146,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
LogMsg(msg, Log::CRITICAL);
|
||||
};
|
||||
|
||||
while (true) {
|
||||
while (true)
|
||||
{
|
||||
bytesRead = file.read(buffer.data() + offset, BUFFER_SIZE - offset - 1);
|
||||
if (bytesRead < 0)
|
||||
break;
|
||||
@@ -150,12 +155,16 @@ int FilterParserThread::parseDATFilterFile()
|
||||
if ((bytesRead == 0) && (dataSize == 0))
|
||||
break;
|
||||
|
||||
for (start = 0; start < dataSize; ++start) {
|
||||
for (start = 0; start < dataSize; ++start)
|
||||
{
|
||||
endOfLine = -1;
|
||||
// The file might have ended without the last line having a newline
|
||||
if (!((bytesRead == 0) && (dataSize > 0))) {
|
||||
for (int i = start; i < dataSize; ++i) {
|
||||
if (buffer[i] == '\n') {
|
||||
if (!((bytesRead == 0) && (dataSize > 0)))
|
||||
{
|
||||
for (int i = start; i < dataSize; ++i)
|
||||
{
|
||||
if (buffer[i] == '\n')
|
||||
{
|
||||
endOfLine = i;
|
||||
// We need to NULL the newline in case the line has only an IP range.
|
||||
// In that case the parser won't work for the end IP, because it ends
|
||||
@@ -165,12 +174,14 @@ int FilterParserThread::parseDATFilterFile()
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
endOfLine = dataSize;
|
||||
buffer[dataSize] = '\0';
|
||||
}
|
||||
|
||||
if (endOfLine == -1) {
|
||||
if (endOfLine == -1)
|
||||
{
|
||||
// read the next chunk from file
|
||||
// but first move(copy) the leftover data to the front of the buffer
|
||||
offset = dataSize - start;
|
||||
@@ -181,7 +192,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
++nbLine;
|
||||
|
||||
if ((buffer[start] == '#')
|
||||
|| ((buffer[start] == '/') && ((start + 1 < dataSize) && (buffer[start + 1] == '/')))) {
|
||||
|| ((buffer[start] == '/') && ((start + 1 < dataSize) && (buffer[start + 1] == '/'))))
|
||||
{
|
||||
start = endOfLine;
|
||||
continue;
|
||||
}
|
||||
@@ -194,11 +206,13 @@ int FilterParserThread::parseDATFilterFile()
|
||||
findAndNullDelimiter(buffer.data(), ',', firstComma + 1, endOfLine);
|
||||
|
||||
// Check if there is an access value (apparently not mandatory)
|
||||
if (firstComma != -1) {
|
||||
if (firstComma != -1)
|
||||
{
|
||||
// There is possibly one
|
||||
const long int nbAccess = strtol(buffer.data() + firstComma + 1, nullptr, 10);
|
||||
// Ignoring this rule because access value is too high
|
||||
if (nbAccess > 127L) {
|
||||
if (nbAccess > 127L)
|
||||
{
|
||||
start = endOfLine;
|
||||
continue;
|
||||
}
|
||||
@@ -207,7 +221,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
// IP Range should be split by a dash
|
||||
const int endOfIPRange = ((firstComma == -1) ? (endOfLine - 1) : (firstComma - 1));
|
||||
const int delimIP = findAndNullDelimiter(buffer.data(), '-', start, endOfIPRange);
|
||||
if (delimIP == -1) {
|
||||
if (delimIP == -1)
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -216,7 +231,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
|
||||
lt::address startAddr;
|
||||
int newStart = trim(buffer.data(), start, delimIP - 1);
|
||||
if (!parseIPAddress(buffer.data() + newStart, startAddr)) {
|
||||
if (!parseIPAddress(buffer.data() + newStart, startAddr))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. Start IP of the range is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -225,7 +241,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
|
||||
lt::address endAddr;
|
||||
newStart = trim(buffer.data(), delimIP + 1, endOfIPRange);
|
||||
if (!parseIPAddress(buffer.data() + newStart, endAddr)) {
|
||||
if (!parseIPAddress(buffer.data() + newStart, endAddr))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. End IP of the range is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -233,7 +250,8 @@ int FilterParserThread::parseDATFilterFile()
|
||||
}
|
||||
|
||||
if ((startAddr.is_v4() != endAddr.is_v4())
|
||||
|| (startAddr.is_v6() != endAddr.is_v6())) {
|
||||
|| (startAddr.is_v6() != endAddr.is_v6()))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. One IP is IPv4 and the other is IPv6!").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -243,11 +261,13 @@ int FilterParserThread::parseDATFilterFile()
|
||||
start = endOfLine;
|
||||
|
||||
// Now Add to the filter
|
||||
try {
|
||||
try
|
||||
{
|
||||
m_filter.add_rule(startAddr, endAddr, lt::ip_filter::blocked);
|
||||
++ruleCount;
|
||||
}
|
||||
catch (const std::exception &e) {
|
||||
catch (const std::exception &e)
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter exception thrown for line %1. Exception is: %2")
|
||||
.arg(nbLine).arg(QString::fromLocal8Bit(e.what())));
|
||||
@@ -271,7 +291,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
QFile file(m_filePath);
|
||||
if (!file.exists()) return ruleCount;
|
||||
|
||||
if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) {
|
||||
if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
|
||||
{
|
||||
LogMsg(tr("I/O Error: Could not open IP filter file in read mode."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -289,7 +310,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
LogMsg(msg, Log::CRITICAL);
|
||||
};
|
||||
|
||||
while (true) {
|
||||
while (true)
|
||||
{
|
||||
bytesRead = file.read(buffer.data() + offset, BUFFER_SIZE - offset - 1);
|
||||
if (bytesRead < 0)
|
||||
break;
|
||||
@@ -297,12 +319,16 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
if ((bytesRead == 0) && (dataSize == 0))
|
||||
break;
|
||||
|
||||
for (start = 0; start < dataSize; ++start) {
|
||||
for (start = 0; start < dataSize; ++start)
|
||||
{
|
||||
endOfLine = -1;
|
||||
// The file might have ended without the last line having a newline
|
||||
if (!((bytesRead == 0) && (dataSize > 0))) {
|
||||
for (int i = start; i < dataSize; ++i) {
|
||||
if (buffer[i] == '\n') {
|
||||
if (!((bytesRead == 0) && (dataSize > 0)))
|
||||
{
|
||||
for (int i = start; i < dataSize; ++i)
|
||||
{
|
||||
if (buffer[i] == '\n')
|
||||
{
|
||||
endOfLine = i;
|
||||
// We need to NULL the newline in case the line has only an IP range.
|
||||
// In that case the parser won't work for the end IP, because it ends
|
||||
@@ -312,12 +338,14 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
endOfLine = dataSize;
|
||||
buffer[dataSize] = '\0';
|
||||
}
|
||||
|
||||
if (endOfLine == -1) {
|
||||
if (endOfLine == -1)
|
||||
{
|
||||
// read the next chunk from file
|
||||
// but first move(copy) the leftover data to the front of the buffer
|
||||
offset = dataSize - start;
|
||||
@@ -328,7 +356,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
++nbLine;
|
||||
|
||||
if ((buffer[start] == '#')
|
||||
|| ((buffer[start] == '/') && ((start + 1 < dataSize) && (buffer[start + 1] == '/')))) {
|
||||
|| ((buffer[start] == '/') && ((start + 1 < dataSize) && (buffer[start + 1] == '/'))))
|
||||
{
|
||||
start = endOfLine;
|
||||
continue;
|
||||
}
|
||||
@@ -337,7 +366,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
// Some organization:1.0.0.0-1.255.255.255
|
||||
// The "Some organization" part might contain a ':' char itself so we find the last occurrence
|
||||
const int partsDelimiter = findAndNullDelimiter(buffer.data(), ':', start, endOfLine, true);
|
||||
if (partsDelimiter == -1) {
|
||||
if (partsDelimiter == -1)
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -346,7 +376,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
|
||||
// IP Range should be split by a dash
|
||||
const int delimIP = findAndNullDelimiter(buffer.data(), '-', partsDelimiter + 1, endOfLine);
|
||||
if (delimIP == -1) {
|
||||
if (delimIP == -1)
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -355,7 +386,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
|
||||
lt::address startAddr;
|
||||
int newStart = trim(buffer.data(), partsDelimiter + 1, delimIP - 1);
|
||||
if (!parseIPAddress(buffer.data() + newStart, startAddr)) {
|
||||
if (!parseIPAddress(buffer.data() + newStart, startAddr))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. Start IP of the range is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -364,7 +396,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
|
||||
lt::address endAddr;
|
||||
newStart = trim(buffer.data(), delimIP + 1, endOfLine);
|
||||
if (!parseIPAddress(buffer.data() + newStart, endAddr)) {
|
||||
if (!parseIPAddress(buffer.data() + newStart, endAddr))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. End IP of the range is malformed.").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -372,7 +405,8 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
}
|
||||
|
||||
if ((startAddr.is_v4() != endAddr.is_v4())
|
||||
|| (startAddr.is_v6() != endAddr.is_v6())) {
|
||||
|| (startAddr.is_v6() != endAddr.is_v6()))
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter line %1 is malformed. One IP is IPv4 and the other is IPv6!").arg(nbLine));
|
||||
start = endOfLine;
|
||||
@@ -381,11 +415,13 @@ int FilterParserThread::parseP2PFilterFile()
|
||||
|
||||
start = endOfLine;
|
||||
|
||||
try {
|
||||
try
|
||||
{
|
||||
m_filter.add_rule(startAddr, endAddr, lt::ip_filter::blocked);
|
||||
++ruleCount;
|
||||
}
|
||||
catch (const std::exception &e) {
|
||||
catch (const std::exception &e)
|
||||
{
|
||||
++parseErrorCount;
|
||||
addLog(tr("IP filter exception thrown for line %1. Exception is: %2")
|
||||
.arg(nbLine).arg(QString::fromLocal8Bit(e.what())));
|
||||
@@ -407,14 +443,18 @@ int FilterParserThread::getlineInStream(QDataStream &stream, std::string &name,
|
||||
char c;
|
||||
int totalRead = 0;
|
||||
int read;
|
||||
do {
|
||||
do
|
||||
{
|
||||
read = stream.readRawData(&c, 1);
|
||||
totalRead += read;
|
||||
if (read > 0) {
|
||||
if (c != delim) {
|
||||
if (read > 0)
|
||||
{
|
||||
if (c != delim)
|
||||
{
|
||||
name += c;
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
// Delim found
|
||||
return totalRead;
|
||||
}
|
||||
@@ -432,7 +472,8 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
QFile file(m_filePath);
|
||||
if (!file.exists()) return ruleCount;
|
||||
|
||||
if (!file.open(QIODevice::ReadOnly)) {
|
||||
if (!file.open(QIODevice::ReadOnly))
|
||||
{
|
||||
LogMsg(tr("I/O Error: Could not open IP filter file in read mode."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -443,19 +484,23 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
unsigned char version;
|
||||
if (!stream.readRawData(buf, sizeof(buf))
|
||||
|| memcmp(buf, "\xFF\xFF\xFF\xFFP2B", 7)
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&version), sizeof(version))) {
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&version), sizeof(version)))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
|
||||
if ((version == 1) || (version == 2)) {
|
||||
if ((version == 1) || (version == 2))
|
||||
{
|
||||
qDebug ("p2b version 1 or 2");
|
||||
unsigned int start, end;
|
||||
|
||||
std::string name;
|
||||
while (getlineInStream(stream, name, '\0') && !m_abort) {
|
||||
while (getlineInStream(stream, name, '\0') && !m_abort)
|
||||
{
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&start), sizeof(start))
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&end), sizeof(end))) {
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&end), sizeof(end)))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -466,26 +511,31 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
const lt::address_v4 first(ntohl(start));
|
||||
const lt::address_v4 last(ntohl(end));
|
||||
// Apply to bittorrent session
|
||||
try {
|
||||
try
|
||||
{
|
||||
m_filter.add_rule(first, last, lt::ip_filter::blocked);
|
||||
++ruleCount;
|
||||
}
|
||||
catch (const std::exception &) {}
|
||||
}
|
||||
}
|
||||
else if (version == 3) {
|
||||
else if (version == 3)
|
||||
{
|
||||
qDebug ("p2b version 3");
|
||||
unsigned int namecount;
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&namecount), sizeof(namecount))) {
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&namecount), sizeof(namecount)))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
|
||||
namecount = ntohl(namecount);
|
||||
// Reading names although, we don't really care about them
|
||||
for (unsigned int i = 0; i < namecount; ++i) {
|
||||
for (unsigned int i = 0; i < namecount; ++i)
|
||||
{
|
||||
std::string name;
|
||||
if (!getlineInStream(stream, name, '\0')) {
|
||||
if (!getlineInStream(stream, name, '\0'))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -495,17 +545,20 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
|
||||
// Reading the ranges
|
||||
unsigned int rangecount;
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&rangecount), sizeof(rangecount))) {
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&rangecount), sizeof(rangecount)))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
|
||||
rangecount = ntohl(rangecount);
|
||||
unsigned int name, start, end;
|
||||
for (unsigned int i = 0; i < rangecount; ++i) {
|
||||
for (unsigned int i = 0; i < rangecount; ++i)
|
||||
{
|
||||
if (!stream.readRawData(reinterpret_cast<char*>(&name), sizeof(name))
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&start), sizeof(start))
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&end), sizeof(end))) {
|
||||
|| !stream.readRawData(reinterpret_cast<char*>(&end), sizeof(end)))
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
return ruleCount;
|
||||
}
|
||||
@@ -516,7 +569,8 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
const lt::address_v4 first(ntohl(start));
|
||||
const lt::address_v4 last(ntohl(end));
|
||||
// Apply to bittorrent session
|
||||
try {
|
||||
try
|
||||
{
|
||||
m_filter.add_rule(first, last, lt::ip_filter::blocked);
|
||||
++ruleCount;
|
||||
}
|
||||
@@ -525,7 +579,8 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
if (m_abort) return ruleCount;
|
||||
}
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
LogMsg(tr("Parsing Error: The filter file is not a valid PeerGuardian P2B file."), Log::CRITICAL);
|
||||
}
|
||||
|
||||
@@ -539,7 +594,8 @@ int FilterParserThread::parseP2BFilterFile()
|
||||
// * PeerGuardian Binary (P2B): http://wiki.phoenixlabs.org/wiki/P2B_Format
|
||||
void FilterParserThread::processFilterFile(const QString &filePath)
|
||||
{
|
||||
if (isRunning()) {
|
||||
if (isRunning())
|
||||
{
|
||||
// Already parsing a filter, m_abort first
|
||||
m_abort = true;
|
||||
wait();
|
||||
@@ -561,25 +617,30 @@ void FilterParserThread::run()
|
||||
{
|
||||
qDebug("Processing filter file");
|
||||
int ruleCount = 0;
|
||||
if (m_filePath.endsWith(".p2p", Qt::CaseInsensitive)) {
|
||||
if (m_filePath.endsWith(".p2p", Qt::CaseInsensitive))
|
||||
{
|
||||
// PeerGuardian p2p file
|
||||
ruleCount = parseP2PFilterFile();
|
||||
}
|
||||
else if (m_filePath.endsWith(".p2b", Qt::CaseInsensitive)) {
|
||||
else if (m_filePath.endsWith(".p2b", Qt::CaseInsensitive))
|
||||
{
|
||||
// PeerGuardian p2b file
|
||||
ruleCount = parseP2BFilterFile();
|
||||
}
|
||||
else if (m_filePath.endsWith(".dat", Qt::CaseInsensitive)) {
|
||||
else if (m_filePath.endsWith(".dat", Qt::CaseInsensitive))
|
||||
{
|
||||
// eMule DAT format
|
||||
ruleCount = parseDATFilterFile();
|
||||
}
|
||||
|
||||
if (m_abort) return;
|
||||
|
||||
try {
|
||||
try
|
||||
{
|
||||
emit IPFilterParsed(ruleCount);
|
||||
}
|
||||
catch (const std::exception &) {
|
||||
catch (const std::exception &)
|
||||
{
|
||||
emit IPFilterError();
|
||||
}
|
||||
|
||||
@@ -588,17 +649,23 @@ void FilterParserThread::run()
|
||||
|
||||
int FilterParserThread::findAndNullDelimiter(char *const data, const char delimiter, const int start, const int end, const bool reverse)
|
||||
{
|
||||
if (!reverse) {
|
||||
for (int i = start; i <= end; ++i) {
|
||||
if (data[i] == delimiter) {
|
||||
if (!reverse)
|
||||
{
|
||||
for (int i = start; i <= end; ++i)
|
||||
{
|
||||
if (data[i] == delimiter)
|
||||
{
|
||||
data[i] = '\0';
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (int i = end; i >= start; --i) {
|
||||
if (data[i] == delimiter) {
|
||||
else
|
||||
{
|
||||
for (int i = end; i >= start; --i)
|
||||
{
|
||||
if (data[i] == delimiter)
|
||||
{
|
||||
data[i] = '\0';
|
||||
return i;
|
||||
}
|
||||
@@ -613,17 +680,21 @@ int FilterParserThread::trim(char *const data, const int start, const int end)
|
||||
if (start >= end) return start;
|
||||
int newStart = start;
|
||||
|
||||
for (int i = start; i <= end; ++i) {
|
||||
if (isspace(data[i]) != 0) {
|
||||
for (int i = start; i <= end; ++i)
|
||||
{
|
||||
if (isspace(data[i]) != 0)
|
||||
{
|
||||
data[i] = '\0';
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
newStart = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = end; i >= start; --i) {
|
||||
for (int i = end; i >= start; --i)
|
||||
{
|
||||
if (isspace(data[i]) != 0)
|
||||
data[i] = '\0';
|
||||
else
|
||||
|
||||
@@ -36,7 +36,8 @@ namespace
|
||||
{
|
||||
void handleFastresumeRejectedAlert(const lt::fastresume_rejected_alert *alert)
|
||||
{
|
||||
if (alert->error.value() == lt::errors::mismatching_file_size) {
|
||||
if (alert->error.value() == lt::errors::mismatching_file_size)
|
||||
{
|
||||
alert->handle.unset_flags(lt::torrent_flags::auto_managed);
|
||||
alert->handle.pause();
|
||||
}
|
||||
@@ -55,7 +56,8 @@ std::shared_ptr<lt::torrent_plugin> NativeSessionExtension::new_torrent(const lt
|
||||
|
||||
void NativeSessionExtension::on_alert(const lt::alert *alert)
|
||||
{
|
||||
switch (alert->type()) {
|
||||
switch (alert->type())
|
||||
{
|
||||
case lt::fastresume_rejected_alert::alert_type:
|
||||
handleFastresumeRejectedAlert(static_cast<const lt::fastresume_rejected_alert *>(alert));
|
||||
break;
|
||||
|
||||
@@ -36,14 +36,17 @@ PeerAddress PeerAddress::parse(const QString &address)
|
||||
{
|
||||
QVector<QStringRef> ipPort;
|
||||
|
||||
if (address.startsWith('[') && address.contains("]:")) { // IPv6
|
||||
if (address.startsWith('[') && address.contains("]:"))
|
||||
{ // IPv6
|
||||
ipPort = address.splitRef("]:");
|
||||
ipPort[0] = ipPort[0].mid(1); // chop '['
|
||||
}
|
||||
else if (address.contains(':')) { // IPv4
|
||||
else if (address.contains(':'))
|
||||
{ // IPv4
|
||||
ipPort = address.splitRef(':');
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -208,7 +208,8 @@ qlonglong PeerInfo::totalDownload() const
|
||||
QBitArray PeerInfo::pieces() const
|
||||
{
|
||||
QBitArray result(m_nativeInfo.pieces.size());
|
||||
for (int i = 0; i < result.size(); ++i) {
|
||||
for (int i = 0; i < result.size(); ++i)
|
||||
{
|
||||
if (m_nativeInfo.pieces[lt::piece_index_t {i}])
|
||||
result.setBit(i, true);
|
||||
}
|
||||
@@ -233,8 +234,10 @@ void PeerInfo::calcRelevance(const TorrentHandle *torrent)
|
||||
int localMissing = 0;
|
||||
int remoteHaves = 0;
|
||||
|
||||
for (int i = 0; i < allPieces.size(); ++i) {
|
||||
if (!allPieces[i]) {
|
||||
for (int i = 0; i < allPieces.size(); ++i)
|
||||
{
|
||||
if (!allPieces[i])
|
||||
{
|
||||
++localMissing;
|
||||
if (peerPieces[i])
|
||||
++remoteHaves;
|
||||
@@ -254,14 +257,17 @@ qreal PeerInfo::relevance() const
|
||||
|
||||
void PeerInfo::determineFlags()
|
||||
{
|
||||
if (isInteresting()) {
|
||||
if (isInteresting())
|
||||
{
|
||||
// d = Your client wants to download, but peer doesn't want to send (interested and choked)
|
||||
if (isRemoteChocked()) {
|
||||
if (isRemoteChocked())
|
||||
{
|
||||
m_flags += "d ";
|
||||
m_flagsDescription += ("d = "
|
||||
+ tr("Interested(local) and Choked(peer)") + '\n');
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
// D = Currently downloading (interested and not choked)
|
||||
m_flags += "D ";
|
||||
m_flagsDescription += ("D = "
|
||||
@@ -269,14 +275,17 @@ void PeerInfo::determineFlags()
|
||||
}
|
||||
}
|
||||
|
||||
if (isRemoteInterested()) {
|
||||
if (isRemoteInterested())
|
||||
{
|
||||
// u = Peer wants your client to upload, but your client doesn't want to (interested and choked)
|
||||
if (isChocked()) {
|
||||
if (isChocked())
|
||||
{
|
||||
m_flags += "u ";
|
||||
m_flagsDescription += ("u = "
|
||||
+ tr("interested(peer) and choked(local)") + '\n');
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
// U = Currently uploading (interested and not choked)
|
||||
m_flags += "U ";
|
||||
m_flagsDescription += ("U = "
|
||||
@@ -285,69 +294,80 @@ void PeerInfo::determineFlags()
|
||||
}
|
||||
|
||||
// O = Optimistic unchoke
|
||||
if (optimisticUnchoke()) {
|
||||
if (optimisticUnchoke())
|
||||
{
|
||||
m_flags += "O ";
|
||||
m_flagsDescription += ("O = " + tr("optimistic unchoke") + '\n');
|
||||
}
|
||||
|
||||
// S = Peer is snubbed
|
||||
if (isSnubbed()) {
|
||||
if (isSnubbed())
|
||||
{
|
||||
m_flags += "S ";
|
||||
m_flagsDescription += ("S = " + tr("peer snubbed") + '\n');
|
||||
}
|
||||
|
||||
// I = Peer is an incoming connection
|
||||
if (!isLocalConnection()) {
|
||||
if (!isLocalConnection())
|
||||
{
|
||||
m_flags += "I ";
|
||||
m_flagsDescription += ("I = " + tr("incoming connection") + '\n');
|
||||
}
|
||||
|
||||
// K = Peer is unchoking your client, but your client is not interested
|
||||
if (!isRemoteChocked() && !isInteresting()) {
|
||||
if (!isRemoteChocked() && !isInteresting())
|
||||
{
|
||||
m_flags += "K ";
|
||||
m_flagsDescription += ("K = "
|
||||
+ tr("not interested(local) and unchoked(peer)") + '\n');
|
||||
}
|
||||
|
||||
// ? = Your client unchoked the peer but the peer is not interested
|
||||
if (!isChocked() && !isRemoteInterested()) {
|
||||
if (!isChocked() && !isRemoteInterested())
|
||||
{
|
||||
m_flags += "? ";
|
||||
m_flagsDescription += ("? = "
|
||||
+ tr("not interested(peer) and unchoked(local)") + '\n');
|
||||
}
|
||||
|
||||
// X = Peer was included in peerlists obtained through Peer Exchange (PEX)
|
||||
if (fromPeX()) {
|
||||
if (fromPeX())
|
||||
{
|
||||
m_flags += "X ";
|
||||
m_flagsDescription += ("X = " + tr("peer from PEX") + '\n');
|
||||
}
|
||||
|
||||
// H = Peer was obtained through DHT
|
||||
if (fromDHT()) {
|
||||
if (fromDHT())
|
||||
{
|
||||
m_flags += "H ";
|
||||
m_flagsDescription += ("H = " + tr("peer from DHT") + '\n');
|
||||
}
|
||||
|
||||
// E = Peer is using Protocol Encryption (all traffic)
|
||||
if (isRC4Encrypted()) {
|
||||
if (isRC4Encrypted())
|
||||
{
|
||||
m_flags += "E ";
|
||||
m_flagsDescription += ("E = " + tr("encrypted traffic") + '\n');
|
||||
}
|
||||
|
||||
// e = Peer is using Protocol Encryption (handshake)
|
||||
if (isPlaintextEncrypted()) {
|
||||
if (isPlaintextEncrypted())
|
||||
{
|
||||
m_flags += "e ";
|
||||
m_flagsDescription += ("e = " + tr("encrypted handshake") + '\n');
|
||||
}
|
||||
|
||||
// P = Peer is using uTorrent uTP
|
||||
if (useUTPSocket()) {
|
||||
if (useUTPSocket())
|
||||
{
|
||||
m_flags += "P ";
|
||||
m_flagsDescription += ("P = " + QString::fromUtf8(C_UTP) + '\n');
|
||||
}
|
||||
|
||||
// L = Peer is local
|
||||
if (fromLSD()) {
|
||||
if (fromLSD())
|
||||
{
|
||||
m_flags += "L ";
|
||||
m_flagsDescription += ("L = " + tr("peer from LSD") + '\n');
|
||||
}
|
||||
|
||||
@@ -58,7 +58,8 @@ bool PortForwarderImpl::isEnabled() const
|
||||
|
||||
void PortForwarderImpl::setEnabled(const bool enabled)
|
||||
{
|
||||
if (m_active != enabled) {
|
||||
if (m_active != enabled)
|
||||
{
|
||||
if (enabled)
|
||||
start();
|
||||
else
|
||||
@@ -71,7 +72,8 @@ void PortForwarderImpl::setEnabled(const bool enabled)
|
||||
|
||||
void PortForwarderImpl::addPort(const quint16 port)
|
||||
{
|
||||
if (!m_mappedPorts.contains(port)) {
|
||||
if (!m_mappedPorts.contains(port))
|
||||
{
|
||||
m_mappedPorts.insert(port, {});
|
||||
if (isEnabled())
|
||||
m_mappedPorts[port] = {m_provider->add_port_mapping(lt::session::tcp, port, port)};
|
||||
@@ -80,8 +82,10 @@ void PortForwarderImpl::addPort(const quint16 port)
|
||||
|
||||
void PortForwarderImpl::deletePort(const quint16 port)
|
||||
{
|
||||
if (m_mappedPorts.contains(port)) {
|
||||
if (isEnabled()) {
|
||||
if (m_mappedPorts.contains(port))
|
||||
{
|
||||
if (isEnabled())
|
||||
{
|
||||
for (const lt::port_mapping_t &portMapping : m_mappedPorts[port])
|
||||
m_provider->delete_port_mapping(portMapping);
|
||||
}
|
||||
@@ -96,7 +100,8 @@ void PortForwarderImpl::start()
|
||||
settingsPack.set_bool(lt::settings_pack::enable_upnp, true);
|
||||
settingsPack.set_bool(lt::settings_pack::enable_natpmp, true);
|
||||
m_provider->apply_settings(settingsPack);
|
||||
for (auto i = m_mappedPorts.begin(); i != m_mappedPorts.end(); ++i) {
|
||||
for (auto i = m_mappedPorts.begin(); i != m_mappedPorts.end(); ++i)
|
||||
{
|
||||
// quint16 port = i.key();
|
||||
i.value() = {m_provider->add_port_mapping(lt::session::tcp, i.key(), i.key())};
|
||||
}
|
||||
|
||||
@@ -48,7 +48,8 @@ void ResumeDataSavingManager::save(const QString &filename, const QByteArray &da
|
||||
const QString filepath = m_resumeDataDir.absoluteFilePath(filename);
|
||||
|
||||
QSaveFile file {filepath};
|
||||
if (!file.open(QIODevice::WriteOnly) || (file.write(data) != data.size()) || !file.commit()) {
|
||||
if (!file.open(QIODevice::WriteOnly) || (file.write(data) != data.size()) || !file.commit())
|
||||
{
|
||||
LogMsg(tr("Couldn't save data to '%1'. Error: %2")
|
||||
.arg(filepath, file.errorString()), Log::CRITICAL);
|
||||
}
|
||||
@@ -59,14 +60,16 @@ void ResumeDataSavingManager::save(const QString &filename, const std::shared_pt
|
||||
const QString filepath = m_resumeDataDir.absoluteFilePath(filename);
|
||||
|
||||
QSaveFile file {filepath};
|
||||
if (!file.open(QIODevice::WriteOnly)) {
|
||||
if (!file.open(QIODevice::WriteOnly))
|
||||
{
|
||||
LogMsg(tr("Couldn't save data to '%1'. Error: %2")
|
||||
.arg(filepath, file.errorString()), Log::CRITICAL);
|
||||
return;
|
||||
}
|
||||
|
||||
lt::bencode(Utils::IO::FileDeviceOutputIterator {file}, *data);
|
||||
if ((file.error() != QFileDevice::NoError) || !file.commit()) {
|
||||
if ((file.error() != QFileDevice::NoError) || !file.commit())
|
||||
{
|
||||
LogMsg(tr("Couldn't save data to '%1'. Error: %2")
|
||||
.arg(filepath, file.errorString()), Log::CRITICAL);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -36,7 +36,8 @@ SpeedMonitor::SpeedMonitor()
|
||||
|
||||
void SpeedMonitor::addSample(const SpeedSample &sample)
|
||||
{
|
||||
if (m_speedSamples.size() >= MAX_SAMPLES) {
|
||||
if (m_speedSamples.size() >= MAX_SAMPLES)
|
||||
{
|
||||
m_sum -= m_speedSamples.front();
|
||||
}
|
||||
|
||||
|
||||
@@ -71,11 +71,13 @@ quint64 Statistics::getAlltimeUL() const
|
||||
void Statistics::gather()
|
||||
{
|
||||
const SessionStatus &ss = m_session->status();
|
||||
if (ss.totalDownload > m_sessionDL) {
|
||||
if (ss.totalDownload > m_sessionDL)
|
||||
{
|
||||
m_sessionDL = ss.totalDownload;
|
||||
m_dirty = true;
|
||||
}
|
||||
if (ss.totalUpload > m_sessionUL) {
|
||||
if (ss.totalUpload > m_sessionUL)
|
||||
{
|
||||
m_sessionUL = ss.totalUpload;
|
||||
m_dirty = true;
|
||||
}
|
||||
|
||||
@@ -59,7 +59,8 @@ namespace
|
||||
#if (LIBTORRENT_VERSION_NUM >= 20000)
|
||||
lt::create_flags_t toNativeTorrentFormatFlag(const BitTorrent::TorrentFormat torrentFormat)
|
||||
{
|
||||
switch (torrentFormat) {
|
||||
switch (torrentFormat)
|
||||
{
|
||||
case BitTorrent::TorrentFormat::V1:
|
||||
return lt::create_torrent::v1_only;
|
||||
case BitTorrent::TorrentFormat::Hybrid:
|
||||
@@ -102,20 +103,24 @@ void TorrentCreatorThread::run()
|
||||
|
||||
emit updateProgress(0);
|
||||
|
||||
try {
|
||||
try
|
||||
{
|
||||
const QString parentPath = Utils::Fs::branchPath(m_params.inputPath) + '/';
|
||||
|
||||
// Adding files to the torrent
|
||||
lt::file_storage fs;
|
||||
if (QFileInfo(m_params.inputPath).isFile()) {
|
||||
if (QFileInfo(m_params.inputPath).isFile())
|
||||
{
|
||||
lt::add_files(fs, Utils::Fs::toNativePath(m_params.inputPath).toStdString(), fileFilter);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
// need to sort the file names by natural sort order
|
||||
QStringList dirs = {m_params.inputPath};
|
||||
|
||||
QDirIterator dirIter(m_params.inputPath, (QDir::AllDirs | QDir::NoDotAndDotDot), QDirIterator::Subdirectories);
|
||||
while (dirIter.hasNext()) {
|
||||
while (dirIter.hasNext())
|
||||
{
|
||||
dirIter.next();
|
||||
dirs += dirIter.filePath();
|
||||
}
|
||||
@@ -124,11 +129,13 @@ void TorrentCreatorThread::run()
|
||||
QStringList fileNames;
|
||||
QHash<QString, qint64> fileSizeMap;
|
||||
|
||||
for (const auto &dir : asConst(dirs)) {
|
||||
for (const auto &dir : asConst(dirs))
|
||||
{
|
||||
QStringList tmpNames; // natural sort files within each dir
|
||||
|
||||
QDirIterator fileIter(dir, QDir::Files);
|
||||
while (fileIter.hasNext()) {
|
||||
while (fileIter.hasNext())
|
||||
{
|
||||
fileIter.next();
|
||||
|
||||
const QString relFilePath = fileIter.filePath().mid(parentPath.length());
|
||||
@@ -154,14 +161,16 @@ void TorrentCreatorThread::run()
|
||||
#endif
|
||||
|
||||
// Add url seeds
|
||||
for (QString seed : asConst(m_params.urlSeeds)) {
|
||||
for (QString seed : asConst(m_params.urlSeeds))
|
||||
{
|
||||
seed = seed.trimmed();
|
||||
if (!seed.isEmpty())
|
||||
newTorrent.add_url_seed(seed.toStdString());
|
||||
}
|
||||
|
||||
int tier = 0;
|
||||
for (const QString &tracker : asConst(m_params.trackers)) {
|
||||
for (const QString &tracker : asConst(m_params.trackers))
|
||||
{
|
||||
if (tracker.isEmpty())
|
||||
++tier;
|
||||
else
|
||||
@@ -195,16 +204,20 @@ void TorrentCreatorThread::run()
|
||||
|
||||
// create the torrent
|
||||
QFile outfile {m_params.savePath};
|
||||
if (!outfile.open(QIODevice::WriteOnly)) {
|
||||
throw RuntimeError {tr("Create new torrent file failed. Reason: %1")
|
||||
if (!outfile.open(QIODevice::WriteOnly))
|
||||
{
|
||||
throw RuntimeError
|
||||
{tr("Create new torrent file failed. Reason: %1")
|
||||
.arg(outfile.errorString())};
|
||||
}
|
||||
|
||||
if (isInterruptionRequested()) return;
|
||||
|
||||
lt::bencode(Utils::IO::FileDeviceOutputIterator {outfile}, entry);
|
||||
if (outfile.error() != QFileDevice::NoError) {
|
||||
throw RuntimeError {tr("Create new torrent file failed. Reason: %1")
|
||||
if (outfile.error() != QFileDevice::NoError)
|
||||
{
|
||||
throw RuntimeError
|
||||
{tr("Create new torrent file failed. Reason: %1")
|
||||
.arg(outfile.errorString())};
|
||||
}
|
||||
outfile.close();
|
||||
@@ -212,7 +225,8 @@ void TorrentCreatorThread::run()
|
||||
emit updateProgress(100);
|
||||
emit creationSuccess(m_params.savePath, parentPath);
|
||||
}
|
||||
catch (const std::exception &e) {
|
||||
catch (const std::exception &e)
|
||||
{
|
||||
emit creationFailure(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,10 +126,12 @@ TorrentHandleImpl::TorrentHandleImpl(Session *session, const lt::torrent_handle
|
||||
updateStatus();
|
||||
m_hash = InfoHash(m_nativeStatus.info_hash);
|
||||
|
||||
if (hasMetadata()) {
|
||||
if (hasMetadata())
|
||||
{
|
||||
applyFirstLastPiecePriority(m_hasFirstLastPiecePriority);
|
||||
|
||||
if (!params.restored) {
|
||||
if (!params.restored)
|
||||
{
|
||||
if (filesCount() == 1)
|
||||
m_hasRootFolder = false;
|
||||
}
|
||||
@@ -138,11 +140,13 @@ TorrentHandleImpl::TorrentHandleImpl(Session *session, const lt::torrent_handle
|
||||
// TODO: Remove the following upgrade code in v.4.4
|
||||
// == BEGIN UPGRADE CODE ==
|
||||
const QString spath = actualStorageLocation();
|
||||
for (int i = 0; i < filesCount(); ++i) {
|
||||
for (int i = 0; i < filesCount(); ++i)
|
||||
{
|
||||
const QString filepath = filePath(i);
|
||||
// Move "unwanted" files back to their original folder
|
||||
const QString parentRelPath = Utils::Fs::branchPath(filepath);
|
||||
if (QDir(parentRelPath).dirName() == ".unwanted") {
|
||||
if (QDir(parentRelPath).dirName() == ".unwanted")
|
||||
{
|
||||
const QString oldName = Utils::Fs::fileName(filepath);
|
||||
const QString newRelPath = Utils::Fs::branchPath(parentRelPath);
|
||||
if (newRelPath.isEmpty())
|
||||
@@ -178,7 +182,8 @@ QString TorrentHandleImpl::name() const
|
||||
name = QString::fromStdString(m_nativeStatus.name);
|
||||
if (!name.isEmpty()) return name;
|
||||
|
||||
if (hasMetadata()) {
|
||||
if (hasMetadata())
|
||||
{
|
||||
name = QString::fromStdString(m_torrentInfo.nativeInfo()->orig_files().name());
|
||||
if (!name.isEmpty()) return name;
|
||||
}
|
||||
@@ -330,8 +335,10 @@ void TorrentHandleImpl::addTrackers(const QVector<TrackerEntry> &trackers)
|
||||
QVector<TrackerEntry> newTrackers;
|
||||
newTrackers.reserve(trackers.size());
|
||||
|
||||
for (const TrackerEntry &tracker : trackers) {
|
||||
if (!currentTrackers.contains(tracker)) {
|
||||
for (const TrackerEntry &tracker : trackers)
|
||||
{
|
||||
if (!currentTrackers.contains(tracker))
|
||||
{
|
||||
m_nativeHandle.add_tracker(tracker.nativeEntry());
|
||||
newTrackers << tracker;
|
||||
}
|
||||
@@ -351,7 +358,8 @@ void TorrentHandleImpl::replaceTrackers(const QVector<TrackerEntry> &trackers)
|
||||
std::vector<lt::announce_entry> nativeTrackers;
|
||||
nativeTrackers.reserve(trackers.size());
|
||||
|
||||
for (const TrackerEntry &tracker : trackers) {
|
||||
for (const TrackerEntry &tracker : trackers)
|
||||
{
|
||||
nativeTrackers.emplace_back(tracker.nativeEntry());
|
||||
|
||||
if (!currentTrackers.removeOne(tracker))
|
||||
@@ -360,11 +368,13 @@ void TorrentHandleImpl::replaceTrackers(const QVector<TrackerEntry> &trackers)
|
||||
|
||||
m_nativeHandle.replace_trackers(nativeTrackers);
|
||||
|
||||
if (newTrackers.isEmpty() && currentTrackers.isEmpty()) {
|
||||
if (newTrackers.isEmpty() && currentTrackers.isEmpty())
|
||||
{
|
||||
// when existing tracker reorders
|
||||
m_session->handleTorrentTrackersChanged(this);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
if (!currentTrackers.isEmpty())
|
||||
m_session->handleTorrentTrackersRemoved(this, currentTrackers);
|
||||
|
||||
@@ -398,9 +408,11 @@ void TorrentHandleImpl::addUrlSeeds(const QVector<QUrl> &urlSeeds)
|
||||
QVector<QUrl> addedUrlSeeds;
|
||||
addedUrlSeeds.reserve(urlSeeds.size());
|
||||
|
||||
for (const QUrl &url : urlSeeds) {
|
||||
for (const QUrl &url : urlSeeds)
|
||||
{
|
||||
const std::string nativeUrl = url.toString().toStdString();
|
||||
if (currentSeeds.find(nativeUrl) == currentSeeds.end()) {
|
||||
if (currentSeeds.find(nativeUrl) == currentSeeds.end())
|
||||
{
|
||||
m_nativeHandle.add_url_seed(nativeUrl);
|
||||
addedUrlSeeds << url;
|
||||
}
|
||||
@@ -417,9 +429,11 @@ void TorrentHandleImpl::removeUrlSeeds(const QVector<QUrl> &urlSeeds)
|
||||
QVector<QUrl> removedUrlSeeds;
|
||||
removedUrlSeeds.reserve(urlSeeds.size());
|
||||
|
||||
for (const QUrl &url : urlSeeds) {
|
||||
for (const QUrl &url : urlSeeds)
|
||||
{
|
||||
const std::string nativeUrl = url.toString().toStdString();
|
||||
if (currentSeeds.find(nativeUrl) != currentSeeds.end()) {
|
||||
if (currentSeeds.find(nativeUrl) != currentSeeds.end())
|
||||
{
|
||||
m_nativeHandle.remove_url_seed(nativeUrl);
|
||||
removedUrlSeeds << url;
|
||||
}
|
||||
@@ -443,10 +457,12 @@ bool TorrentHandleImpl::connectPeer(const PeerAddress &peerAddress)
|
||||
if (ec) return false;
|
||||
|
||||
const lt::tcp::endpoint endpoint(addr, peerAddress.port);
|
||||
try {
|
||||
try
|
||||
{
|
||||
m_nativeHandle.connect_peer(endpoint);
|
||||
}
|
||||
catch (const lt::system_error &err) {
|
||||
catch (const lt::system_error &err)
|
||||
{
|
||||
LogMsg(tr("Failed to add peer \"%1\" to torrent \"%2\". Reason: %3")
|
||||
.arg(peerAddress.toString(), name(), QString::fromLocal8Bit(err.what())), Log::WARNING);
|
||||
return false;
|
||||
@@ -533,7 +549,8 @@ bool TorrentHandleImpl::addTag(const QString &tag)
|
||||
if (!Session::isValidTag(tag))
|
||||
return false;
|
||||
|
||||
if (!hasTag(tag)) {
|
||||
if (!hasTag(tag))
|
||||
{
|
||||
if (!m_session->hasTag(tag))
|
||||
if (!m_session->addTag(tag))
|
||||
return false;
|
||||
@@ -546,7 +563,8 @@ bool TorrentHandleImpl::addTag(const QString &tag)
|
||||
|
||||
bool TorrentHandleImpl::removeTag(const QString &tag)
|
||||
{
|
||||
if (m_tags.remove(tag)) {
|
||||
if (m_tags.remove(tag))
|
||||
{
|
||||
m_session->handleTorrentTagRemoved(this, tag);
|
||||
return true;
|
||||
}
|
||||
@@ -719,25 +737,31 @@ TorrentState TorrentHandleImpl::state() const
|
||||
|
||||
void TorrentHandleImpl::updateState()
|
||||
{
|
||||
if (m_nativeStatus.state == lt::torrent_status::checking_resume_data) {
|
||||
if (m_nativeStatus.state == lt::torrent_status::checking_resume_data)
|
||||
{
|
||||
m_state = TorrentState::CheckingResumeData;
|
||||
}
|
||||
else if (isMoveInProgress()) {
|
||||
else if (isMoveInProgress())
|
||||
{
|
||||
m_state = TorrentState::Moving;
|
||||
}
|
||||
else if (hasMissingFiles()) {
|
||||
else if (hasMissingFiles())
|
||||
{
|
||||
m_state = TorrentState::MissingFiles;
|
||||
}
|
||||
else if (hasError()) {
|
||||
else if (hasError())
|
||||
{
|
||||
m_state = TorrentState::Error;
|
||||
}
|
||||
else if ((m_nativeStatus.state == lt::torrent_status::checking_files)
|
||||
&& (!isPaused() || (m_nativeStatus.flags & lt::torrent_flags::auto_managed)
|
||||
|| !(m_nativeStatus.flags & lt::torrent_flags::paused))) {
|
||||
|| !(m_nativeStatus.flags & lt::torrent_flags::paused)))
|
||||
{
|
||||
// If the torrent is not just in the "checking" state, but is being actually checked
|
||||
m_state = m_hasSeedStatus ? TorrentState::CheckingUploading : TorrentState::CheckingDownloading;
|
||||
}
|
||||
else if (isSeed()) {
|
||||
else if (isSeed())
|
||||
{
|
||||
if (isPaused())
|
||||
m_state = TorrentState::PausedUploading;
|
||||
else if (m_session->isQueueingSystemEnabled() && isQueued())
|
||||
@@ -749,7 +773,8 @@ void TorrentHandleImpl::updateState()
|
||||
else
|
||||
m_state = TorrentState::StalledUploading;
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
if (isPaused())
|
||||
m_state = TorrentState::PausedDownloading;
|
||||
else if (m_nativeStatus.state == lt::torrent_status::downloading_metadata)
|
||||
@@ -832,14 +857,16 @@ qlonglong TorrentHandleImpl::eta() const
|
||||
|
||||
const SpeedSampleAvg speedAverage = m_speedMonitor.average();
|
||||
|
||||
if (isSeed()) {
|
||||
if (isSeed())
|
||||
{
|
||||
const qreal maxRatioValue = maxRatio();
|
||||
const int maxSeedingTimeValue = maxSeedingTime();
|
||||
if ((maxRatioValue < 0) && (maxSeedingTimeValue < 0)) return MAX_ETA;
|
||||
|
||||
qlonglong ratioEta = MAX_ETA;
|
||||
|
||||
if ((speedAverage.upload > 0) && (maxRatioValue >= 0)) {
|
||||
if ((speedAverage.upload > 0) && (maxRatioValue >= 0))
|
||||
{
|
||||
|
||||
qlonglong realDL = totalDownload();
|
||||
if (realDL <= 0)
|
||||
@@ -850,7 +877,8 @@ qlonglong TorrentHandleImpl::eta() const
|
||||
|
||||
qlonglong seedingTimeEta = MAX_ETA;
|
||||
|
||||
if (maxSeedingTimeValue >= 0) {
|
||||
if (maxSeedingTimeValue >= 0)
|
||||
{
|
||||
seedingTimeEta = (maxSeedingTimeValue * 60) - seedingTime();
|
||||
if (seedingTimeEta < 0)
|
||||
seedingTimeEta = 0;
|
||||
@@ -872,7 +900,8 @@ QVector<qreal> TorrentHandleImpl::filesProgress() const
|
||||
const int count = static_cast<int>(fp.size());
|
||||
QVector<qreal> result;
|
||||
result.reserve(count);
|
||||
for (int i = 0; i < count; ++i) {
|
||||
for (int i = 0; i < count; ++i)
|
||||
{
|
||||
const qlonglong size = fileSize(i);
|
||||
if ((size <= 0) || (fp[i] == size))
|
||||
result << 1;
|
||||
@@ -995,7 +1024,8 @@ QVector<PeerInfo> TorrentHandleImpl::peers() const
|
||||
QBitArray TorrentHandleImpl::pieces() const
|
||||
{
|
||||
QBitArray result(m_nativeStatus.pieces.size());
|
||||
for (int i = 0; i < result.size(); ++i) {
|
||||
for (int i = 0; i < result.size(); ++i)
|
||||
{
|
||||
if (m_nativeStatus.pieces[lt::piece_index_t {i}])
|
||||
result.setBit(i, true);
|
||||
}
|
||||
@@ -1097,7 +1127,8 @@ qlonglong TorrentHandleImpl::nextAnnounce() const
|
||||
|
||||
void TorrentHandleImpl::setName(const QString &name)
|
||||
{
|
||||
if (m_name != name) {
|
||||
if (m_name != name)
|
||||
{
|
||||
m_name = name;
|
||||
m_session->handleTorrentNameChanged(this);
|
||||
}
|
||||
@@ -1105,7 +1136,8 @@ void TorrentHandleImpl::setName(const QString &name)
|
||||
|
||||
bool TorrentHandleImpl::setCategory(const QString &category)
|
||||
{
|
||||
if (m_category != category) {
|
||||
if (m_category != category)
|
||||
{
|
||||
if (!category.isEmpty() && !m_session->categories().contains(category))
|
||||
return false;
|
||||
|
||||
@@ -1113,7 +1145,8 @@ bool TorrentHandleImpl::setCategory(const QString &category)
|
||||
m_category = category;
|
||||
m_session->handleTorrentCategoryChanged(this, oldCategory);
|
||||
|
||||
if (m_useAutoTMM) {
|
||||
if (m_useAutoTMM)
|
||||
{
|
||||
if (!m_session->isDisableAutoTMMWhenCategoryChanged())
|
||||
move_impl(m_session->categorySavePath(m_category), MoveStorageMode::Overwrite);
|
||||
else
|
||||
@@ -1126,7 +1159,8 @@ bool TorrentHandleImpl::setCategory(const QString &category)
|
||||
|
||||
void TorrentHandleImpl::move(QString path)
|
||||
{
|
||||
if (m_useAutoTMM) {
|
||||
if (m_useAutoTMM)
|
||||
{
|
||||
m_useAutoTMM = false;
|
||||
m_session->handleTorrentSavingModeChanged(this);
|
||||
}
|
||||
@@ -1145,10 +1179,12 @@ void TorrentHandleImpl::move_impl(QString path, const MoveStorageMode mode)
|
||||
if (path == savePath()) return;
|
||||
path = Utils::Fs::toNativePath(path);
|
||||
|
||||
if (!useTempPath()) {
|
||||
if (!useTempPath())
|
||||
{
|
||||
moveStorage(path, mode);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
m_savePath = path;
|
||||
m_session->handleTorrentSavePathChanged(this);
|
||||
}
|
||||
@@ -1172,7 +1208,8 @@ void TorrentHandleImpl::forceRecheck()
|
||||
m_hasMissingFiles = false;
|
||||
m_unchecked = false;
|
||||
|
||||
if (isPaused()) {
|
||||
if (isPaused())
|
||||
{
|
||||
// When "force recheck" is applied on paused torrent, we temporarily resume it
|
||||
// (really we just allow libtorrent to resume it by enabling auto management for it).
|
||||
m_nativeHandle.set_flags(lt::torrent_flags::stop_when_ready | lt::torrent_flags::auto_managed);
|
||||
@@ -1181,11 +1218,13 @@ void TorrentHandleImpl::forceRecheck()
|
||||
|
||||
void TorrentHandleImpl::setSequentialDownload(const bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
if (enable)
|
||||
{
|
||||
m_nativeHandle.set_flags(lt::torrent_flags::sequential_download);
|
||||
m_nativeStatus.flags |= lt::torrent_flags::sequential_download; // prevent return cached value
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
m_nativeHandle.unset_flags(lt::torrent_flags::sequential_download);
|
||||
m_nativeStatus.flags &= ~lt::torrent_flags::sequential_download; // prevent return cached value
|
||||
}
|
||||
@@ -1220,7 +1259,8 @@ void TorrentHandleImpl::applyFirstLastPiecePriority(const bool enabled, const QV
|
||||
|
||||
// Updating file priorities is an async operation in libtorrent, when we just updated it and immediately query it
|
||||
// we might get the old/wrong values, so we rely on `updatedFilePrio` in this case.
|
||||
for (int index = 0; index < static_cast<int>(filePriorities.size()); ++index) {
|
||||
for (int index = 0; index < static_cast<int>(filePriorities.size()); ++index)
|
||||
{
|
||||
const lt::download_priority_t filePrio = filePriorities[index];
|
||||
if (filePrio <= lt::download_priority_t {0})
|
||||
continue;
|
||||
@@ -1231,7 +1271,8 @@ void TorrentHandleImpl::applyFirstLastPiecePriority(const bool enabled, const QV
|
||||
|
||||
// worst case: AVI index = 1% of total file size (at the end of the file)
|
||||
const int nNumPieces = std::ceil(fileSize(index) * 0.01 / pieceLength());
|
||||
for (int i = 0; i < nNumPieces; ++i) {
|
||||
for (int i = 0; i < nNumPieces; ++i)
|
||||
{
|
||||
piecePriorities[extremities.first() + i] = newPrio;
|
||||
piecePriorities[extremities.last() - i] = newPrio;
|
||||
}
|
||||
@@ -1247,7 +1288,8 @@ void TorrentHandleImpl::pause()
|
||||
|
||||
m_speedMonitor.reset();
|
||||
|
||||
if (!m_isStopped) {
|
||||
if (!m_isStopped)
|
||||
{
|
||||
m_isStopped = true;
|
||||
m_session->handleTorrentPaused(this);
|
||||
}
|
||||
@@ -1258,12 +1300,14 @@ void TorrentHandleImpl::resume(const TorrentOperatingMode mode)
|
||||
if (hasError())
|
||||
m_nativeHandle.clear_error();
|
||||
|
||||
if (m_hasMissingFiles) {
|
||||
if (m_hasMissingFiles)
|
||||
{
|
||||
m_hasMissingFiles = false;
|
||||
m_nativeHandle.force_recheck();
|
||||
}
|
||||
|
||||
if (m_isStopped) {
|
||||
if (m_isStopped)
|
||||
{
|
||||
// Torrent may have been temporarily resumed to perform checking files
|
||||
// so we have to ensure it will not pause after checking is done.
|
||||
m_nativeHandle.unset_flags(lt::torrent_flags::stop_when_ready);
|
||||
@@ -1275,7 +1319,8 @@ void TorrentHandleImpl::resume(const TorrentOperatingMode mode)
|
||||
|
||||
m_operatingMode = mode;
|
||||
|
||||
if (m_isStopped) {
|
||||
if (m_isStopped)
|
||||
{
|
||||
m_isStopped = false;
|
||||
m_session->handleTorrentResumed(this);
|
||||
}
|
||||
@@ -1283,7 +1328,8 @@ void TorrentHandleImpl::resume(const TorrentOperatingMode mode)
|
||||
|
||||
void TorrentHandleImpl::moveStorage(const QString &newPath, const MoveStorageMode mode)
|
||||
{
|
||||
if (m_session->addMoveTorrentStorageJob(this, newPath, mode)) {
|
||||
if (m_session->addMoveTorrentStorageJob(this, newPath, mode))
|
||||
{
|
||||
m_storageIsMoving = true;
|
||||
updateStatus();
|
||||
}
|
||||
@@ -1307,7 +1353,8 @@ void TorrentHandleImpl::handleMoveStorageJobFinished(const bool hasOutstandingJo
|
||||
|
||||
updateStatus();
|
||||
const QString newPath = QString::fromStdString(m_nativeStatus.save_path);
|
||||
if (!useTempPath() && (newPath != m_savePath)) {
|
||||
if (!useTempPath() && (newPath != m_savePath))
|
||||
{
|
||||
m_savePath = newPath;
|
||||
m_session->handleTorrentSavePathChanged(this);
|
||||
}
|
||||
@@ -1363,14 +1410,16 @@ void TorrentHandleImpl::handleTorrentCheckedAlert(const lt::torrent_checked_aler
|
||||
Q_UNUSED(p);
|
||||
qDebug("\"%s\" have just finished checking", qUtf8Printable(name()));
|
||||
|
||||
if (m_fastresumeDataRejected && !m_hasMissingFiles) {
|
||||
if (m_fastresumeDataRejected && !m_hasMissingFiles)
|
||||
{
|
||||
saveResumeData();
|
||||
m_fastresumeDataRejected = false;
|
||||
}
|
||||
|
||||
updateStatus();
|
||||
|
||||
if (!m_hasMissingFiles) {
|
||||
if (!m_hasMissingFiles)
|
||||
{
|
||||
if ((progress() < 1.0) && (wantedSize() > 0))
|
||||
m_hasSeedStatus = false;
|
||||
else if (progress() == 1.0)
|
||||
@@ -1398,12 +1447,14 @@ void TorrentHandleImpl::handleTorrentFinishedAlert(const lt::torrent_finished_al
|
||||
manageIncompleteFiles();
|
||||
|
||||
const bool recheckTorrentsOnCompletion = Preferences::instance()->recheckTorrentsOnCompletion();
|
||||
if (isMoveInProgress() || (m_renameCount > 0)) {
|
||||
if (isMoveInProgress() || (m_renameCount > 0))
|
||||
{
|
||||
if (recheckTorrentsOnCompletion)
|
||||
m_moveFinishedTriggers.append([this]() { forceRecheck(); });
|
||||
m_moveFinishedTriggers.append([this]() { m_session->handleTorrentFinished(this); });
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
if (recheckTorrentsOnCompletion && m_unchecked)
|
||||
forceRecheck();
|
||||
m_session->handleTorrentFinished(this);
|
||||
@@ -1422,18 +1473,22 @@ void TorrentHandleImpl::handleTorrentResumedAlert(const lt::torrent_resumed_aler
|
||||
|
||||
void TorrentHandleImpl::handleSaveResumeDataAlert(const lt::save_resume_data_alert *p)
|
||||
{
|
||||
if (p && !m_hasMissingFiles) {
|
||||
if (p && !m_hasMissingFiles)
|
||||
{
|
||||
// Update recent resume data
|
||||
m_ltAddTorrentParams = p->params;
|
||||
}
|
||||
|
||||
if (!m_isStopped) {
|
||||
if (!m_isStopped)
|
||||
{
|
||||
// Torrent can be actually "running" but temporarily "paused" to perform some
|
||||
// service jobs behind the scenes so we need to restore it as "running"
|
||||
if (m_operatingMode == TorrentOperatingMode::AutoManaged) {
|
||||
if (m_operatingMode == TorrentOperatingMode::AutoManaged)
|
||||
{
|
||||
m_ltAddTorrentParams.flags |= lt::torrent_flags::auto_managed;
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
m_ltAddTorrentParams.flags &= ~lt::torrent_flags::paused;
|
||||
m_ltAddTorrentParams.flags &= ~lt::torrent_flags::auto_managed;
|
||||
}
|
||||
@@ -1449,7 +1504,8 @@ void TorrentHandleImpl::handleSaveResumeDataAlert(const lt::save_resume_data_ale
|
||||
// TODO: The following code is deprecated. Remove after several releases in 4.3.x.
|
||||
// === BEGIN DEPRECATED CODE === //
|
||||
const bool useDummyResumeData = !p;
|
||||
if (useDummyResumeData) {
|
||||
if (useDummyResumeData)
|
||||
{
|
||||
updateStatus();
|
||||
|
||||
resumeData["qBt-magnetUri"] = createMagnetURI().toStdString();
|
||||
@@ -1489,12 +1545,14 @@ void TorrentHandleImpl::handleFastResumeRejectedAlert(const lt::fastresume_rejec
|
||||
{
|
||||
m_fastresumeDataRejected = true;
|
||||
|
||||
if (p->error.value() == lt::errors::mismatching_file_size) {
|
||||
if (p->error.value() == lt::errors::mismatching_file_size)
|
||||
{
|
||||
// Mismatching file size (files were probably moved)
|
||||
m_hasMissingFiles = true;
|
||||
LogMsg(tr("File sizes mismatch for torrent '%1'. Cannot proceed further.").arg(name()), Log::CRITICAL);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
LogMsg(tr("Fast resume data was rejected for torrent '%1'. Reason: %2. Checking again...")
|
||||
.arg(name(), QString::fromStdString(p->message())), Log::WARNING);
|
||||
}
|
||||
@@ -1527,13 +1585,15 @@ void TorrentHandleImpl::handleFileRenamedAlert(const lt::file_renamed_alert *p)
|
||||
#endif
|
||||
|
||||
int pathIdx = 0;
|
||||
while ((pathIdx < oldPathParts.size()) && (pathIdx < newPathParts.size())) {
|
||||
while ((pathIdx < oldPathParts.size()) && (pathIdx < newPathParts.size()))
|
||||
{
|
||||
if (oldPathParts[pathIdx].compare(newPathParts[pathIdx], caseSensitivity) != 0)
|
||||
break;
|
||||
++pathIdx;
|
||||
}
|
||||
|
||||
for (int i = (oldPathParts.size() - 1); i >= pathIdx; --i) {
|
||||
for (int i = (oldPathParts.size() - 1); i >= pathIdx; --i)
|
||||
{
|
||||
QDir().rmdir(savePath() + Utils::String::join(oldPathParts, QLatin1String("/")));
|
||||
oldPathParts.removeLast();
|
||||
}
|
||||
@@ -1571,9 +1631,11 @@ void TorrentHandleImpl::handleFileCompletedAlert(const lt::file_completed_alert
|
||||
m_torrentInfo = TorrentInfo {m_nativeHandle.torrent_file()};
|
||||
|
||||
qDebug("A file completed download in torrent \"%s\"", qUtf8Printable(name()));
|
||||
if (m_session->isAppendExtensionEnabled()) {
|
||||
if (m_session->isAppendExtensionEnabled())
|
||||
{
|
||||
QString name = filePath(static_cast<LTUnderlyingType<lt::file_index_t>>(p->index));
|
||||
if (name.endsWith(QB_EXT)) {
|
||||
if (name.endsWith(QB_EXT))
|
||||
{
|
||||
const QString oldName = name;
|
||||
name.chop(QB_EXT.size());
|
||||
qDebug("Renaming %s to %s", qUtf8Printable(oldName), qUtf8Printable(name));
|
||||
@@ -1627,7 +1689,8 @@ void TorrentHandleImpl::handleAppendExtensionToggled()
|
||||
|
||||
void TorrentHandleImpl::handleAlert(const lt::alert *a)
|
||||
{
|
||||
switch (a->type()) {
|
||||
switch (a->type())
|
||||
{
|
||||
case lt::file_renamed_alert::alert_type:
|
||||
handleFileRenamedAlert(static_cast<const lt::file_renamed_alert*>(a));
|
||||
break;
|
||||
@@ -1680,22 +1743,28 @@ void TorrentHandleImpl::manageIncompleteFiles()
|
||||
{
|
||||
const bool isAppendExtensionEnabled = m_session->isAppendExtensionEnabled();
|
||||
const QVector<qreal> fp = filesProgress();
|
||||
if (fp.size() != filesCount()) {
|
||||
if (fp.size() != filesCount())
|
||||
{
|
||||
qDebug() << "skip manageIncompleteFiles because of invalid torrent meta-data or empty file-progress";
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < filesCount(); ++i) {
|
||||
for (int i = 0; i < filesCount(); ++i)
|
||||
{
|
||||
QString name = filePath(i);
|
||||
if (isAppendExtensionEnabled && (fileSize(i) > 0) && (fp[i] < 1)) {
|
||||
if (!name.endsWith(QB_EXT)) {
|
||||
if (isAppendExtensionEnabled && (fileSize(i) > 0) && (fp[i] < 1))
|
||||
{
|
||||
if (!name.endsWith(QB_EXT))
|
||||
{
|
||||
const QString newName = name + QB_EXT;
|
||||
qDebug() << "Renaming" << name << "to" << newName;
|
||||
renameFile(i, newName);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (name.endsWith(QB_EXT)) {
|
||||
else
|
||||
{
|
||||
if (name.endsWith(QB_EXT))
|
||||
{
|
||||
const QString oldName = name;
|
||||
name.chop(QB_EXT.size());
|
||||
qDebug() << "Renaming" << oldName << "to" << name;
|
||||
@@ -1722,8 +1791,10 @@ void TorrentHandleImpl::adjustActualSavePath_impl()
|
||||
|
||||
if (targetDir == currentDir) return;
|
||||
|
||||
if (!needUseTempDir) {
|
||||
if ((currentDir == tempDir) && (currentDir != QDir {m_session->tempPath()})) {
|
||||
if (!needUseTempDir)
|
||||
{
|
||||
if ((currentDir == tempDir) && (currentDir != QDir {m_session->tempPath()}))
|
||||
{
|
||||
// torrent without root folder still has it in its temporary save path
|
||||
// so its temp path isn't equal to temp path root
|
||||
const QString currentDirPath = currentDir.absolutePath();
|
||||
@@ -1790,7 +1861,8 @@ void TorrentHandleImpl::setRatioLimit(qreal limit)
|
||||
else if (limit > MAX_RATIO)
|
||||
limit = MAX_RATIO;
|
||||
|
||||
if (m_ratioLimit != limit) {
|
||||
if (m_ratioLimit != limit)
|
||||
{
|
||||
m_ratioLimit = limit;
|
||||
m_session->handleTorrentShareLimitChanged(this);
|
||||
}
|
||||
@@ -1803,7 +1875,8 @@ void TorrentHandleImpl::setSeedingTimeLimit(int limit)
|
||||
else if (limit > MAX_SEEDING_TIME)
|
||||
limit = MAX_SEEDING_TIME;
|
||||
|
||||
if (m_seedingTimeLimit != limit) {
|
||||
if (m_seedingTimeLimit != limit)
|
||||
{
|
||||
m_seedingTimeLimit = limit;
|
||||
m_session->handleTorrentShareLimitChanged(this);
|
||||
}
|
||||
@@ -1846,10 +1919,12 @@ void TorrentHandleImpl::prioritizeFiles(const QVector<DownloadPriority> &priorit
|
||||
// 'torrent_finished_alert' and eg show tray notifications
|
||||
const QVector<qreal> progress = filesProgress();
|
||||
const QVector<DownloadPriority> oldPriorities = filePriorities();
|
||||
for (int i = 0; i < oldPriorities.size(); ++i) {
|
||||
for (int i = 0; i < oldPriorities.size(); ++i)
|
||||
{
|
||||
if ((oldPriorities[i] == DownloadPriority::Ignored)
|
||||
&& (priorities[i] > DownloadPriority::Ignored)
|
||||
&& (progress[i] < 1.0)) {
|
||||
&& (progress[i] < 1.0))
|
||||
{
|
||||
m_hasSeedStatus = false;
|
||||
break;
|
||||
}
|
||||
@@ -1875,7 +1950,8 @@ QVector<qreal> TorrentHandleImpl::availableFileFractions() const
|
||||
QVector<qreal> res;
|
||||
res.reserve(filesCount);
|
||||
const TorrentInfo info = this->info();
|
||||
for (int i = 0; i < filesCount; ++i) {
|
||||
for (int i = 0; i < filesCount; ++i)
|
||||
{
|
||||
const TorrentInfo::PieceRange filePieces = info.filePieces(i);
|
||||
|
||||
int availablePieces = 0;
|
||||
|
||||
@@ -77,14 +77,16 @@ TorrentInfo TorrentInfo::load(const QByteArray &data, QString *error) noexcept
|
||||
lt::error_code ec;
|
||||
const lt::bdecode_node node = lt::bdecode(data, ec
|
||||
, nullptr, depthLimit, tokenLimit);
|
||||
if (ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (error)
|
||||
*error = QString::fromStdString(ec.message());
|
||||
return TorrentInfo();
|
||||
}
|
||||
|
||||
TorrentInfo info {std::shared_ptr<lt::torrent_info>(new lt::torrent_info(node, ec))};
|
||||
if (ec) {
|
||||
if (ec)
|
||||
{
|
||||
if (error)
|
||||
*error = QString::fromStdString(ec.message());
|
||||
return TorrentInfo();
|
||||
@@ -99,28 +101,33 @@ TorrentInfo TorrentInfo::loadFromFile(const QString &path, QString *error) noexc
|
||||
error->clear();
|
||||
|
||||
QFile file {path};
|
||||
if (!file.open(QIODevice::ReadOnly)) {
|
||||
if (!file.open(QIODevice::ReadOnly))
|
||||
{
|
||||
if (error)
|
||||
*error = file.errorString();
|
||||
return TorrentInfo();
|
||||
}
|
||||
|
||||
if (file.size() > MAX_TORRENT_SIZE) {
|
||||
if (file.size() > MAX_TORRENT_SIZE)
|
||||
{
|
||||
if (error)
|
||||
*error = tr("File size exceeds max limit %1").arg(Utils::Misc::friendlyUnit(MAX_TORRENT_SIZE));
|
||||
return TorrentInfo();
|
||||
}
|
||||
|
||||
QByteArray data;
|
||||
try {
|
||||
try
|
||||
{
|
||||
data = file.readAll();
|
||||
}
|
||||
catch (const std::bad_alloc &e) {
|
||||
catch (const std::bad_alloc &e)
|
||||
{
|
||||
if (error)
|
||||
*error = tr("Torrent file read error: %1").arg(e.what());
|
||||
return TorrentInfo();
|
||||
}
|
||||
if (data.size() != file.size()) {
|
||||
if (data.size() != file.size())
|
||||
{
|
||||
if (error)
|
||||
*error = tr("Torrent file read error: size mismatch");
|
||||
return TorrentInfo();
|
||||
@@ -284,7 +291,8 @@ QVector<QUrl> TorrentInfo::urlSeeds() const
|
||||
QVector<QUrl> urlSeeds;
|
||||
urlSeeds.reserve(nativeWebSeeds.size());
|
||||
|
||||
for (const lt::web_seed_entry &webSeed : nativeWebSeeds) {
|
||||
for (const lt::web_seed_entry &webSeed : nativeWebSeeds)
|
||||
{
|
||||
if (webSeed.type == lt::web_seed_entry::url_seed)
|
||||
urlSeeds.append(QUrl(webSeed.url.c_str()));
|
||||
}
|
||||
@@ -353,7 +361,8 @@ TorrentInfo::PieceRange TorrentInfo::filePieces(const QString &file) const
|
||||
return {};
|
||||
|
||||
const int index = fileIndex(file);
|
||||
if (index == -1) {
|
||||
if (index == -1)
|
||||
{
|
||||
qDebug() << "Filename" << file << "was not found in torrent" << name();
|
||||
return {};
|
||||
}
|
||||
@@ -365,7 +374,8 @@ TorrentInfo::PieceRange TorrentInfo::filePieces(const int fileIndex) const
|
||||
if (!isValid())
|
||||
return {};
|
||||
|
||||
if ((fileIndex < 0) || (fileIndex >= filesCount())) {
|
||||
if ((fileIndex < 0) || (fileIndex >= filesCount()))
|
||||
{
|
||||
qDebug() << "File index (" << fileIndex << ") is out of range for torrent" << name();
|
||||
return {};
|
||||
}
|
||||
@@ -402,7 +412,8 @@ int TorrentInfo::fileIndex(const QString &fileName) const
|
||||
QString TorrentInfo::rootFolder() const
|
||||
{
|
||||
QString rootFolder;
|
||||
for (int i = 0; i < filesCount(); ++i) {
|
||||
for (int i = 0; i < filesCount(); ++i)
|
||||
{
|
||||
const QString filePath = this->filePath(i);
|
||||
if (QDir::isAbsolutePath(filePath)) continue;
|
||||
|
||||
@@ -433,7 +444,8 @@ void TorrentInfo::stripRootFolder()
|
||||
// Solution for case of renamed root folder
|
||||
const QString path = filePath(0);
|
||||
const std::string newName = path.left(path.indexOf('/')).toStdString();
|
||||
if (files.name() != newName) {
|
||||
if (files.name() != newName)
|
||||
{
|
||||
files.set_name(newName);
|
||||
for (int i = 0; i < files.num_files(); ++i)
|
||||
files.rename_file(lt::file_index_t {i}, files.file_path(lt::file_index_t {i}));
|
||||
|
||||
@@ -92,9 +92,11 @@ namespace
|
||||
QByteArray toBigEndianByteArray(const QHostAddress &addr)
|
||||
{
|
||||
// translate IP address to a sequence of bytes in big-endian order
|
||||
switch (addr.protocol()) {
|
||||
switch (addr.protocol())
|
||||
{
|
||||
case QAbstractSocket::IPv4Protocol:
|
||||
case QAbstractSocket::AnyIPProtocol: {
|
||||
case QAbstractSocket::AnyIPProtocol:
|
||||
{
|
||||
const quint32 ipv4 = addr.toIPv4Address();
|
||||
QByteArray ret;
|
||||
ret.append(static_cast<char>((ipv4 >> 24) & 0xFF))
|
||||
@@ -104,7 +106,8 @@ namespace
|
||||
return ret;
|
||||
}
|
||||
|
||||
case QAbstractSocket::IPv6Protocol: {
|
||||
case QAbstractSocket::IPv6Protocol:
|
||||
{
|
||||
const Q_IPV6ADDR ipv6 = addr.toIPv6Address();
|
||||
QByteArray ret;
|
||||
for (const quint8 i : ipv6.c)
|
||||
@@ -162,7 +165,8 @@ struct Tracker::TrackerAnnounceRequest
|
||||
void Tracker::TorrentStats::setPeer(const Peer &peer)
|
||||
{
|
||||
// always replace existing peer
|
||||
if (!removePeer(peer)) {
|
||||
if (!removePeer(peer))
|
||||
{
|
||||
// Too many peers, remove a random one
|
||||
if (peers.size() >= MAX_PEERS_PER_TORRENT)
|
||||
removePeer(*peers.begin());
|
||||
@@ -198,8 +202,10 @@ bool Tracker::start()
|
||||
const QHostAddress ip = QHostAddress::Any;
|
||||
const int port = Preferences::instance()->getTrackerPort();
|
||||
|
||||
if (m_server->isListening()) {
|
||||
if (m_server->serverPort() == port) {
|
||||
if (m_server->isListening())
|
||||
{
|
||||
if (m_server->serverPort() == port)
|
||||
{
|
||||
// Already listening on the right port, just return
|
||||
return true;
|
||||
}
|
||||
@@ -211,11 +217,13 @@ bool Tracker::start()
|
||||
// Listen on the predefined port
|
||||
const bool listenSuccess = m_server->listen(ip, port);
|
||||
|
||||
if (listenSuccess) {
|
||||
if (listenSuccess)
|
||||
{
|
||||
LogMsg(tr("Embedded Tracker: Now listening on IP: %1, port: %2")
|
||||
.arg(ip.toString(), QString::number(port)), Log::INFO);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
LogMsg(tr("Embedded Tracker: Unable to bind to IP: %1, port: %2. Reason: %3")
|
||||
.arg(ip.toString(), QString::number(port), m_server->errorString())
|
||||
, Log::WARNING);
|
||||
@@ -233,7 +241,8 @@ Http::Response Tracker::processRequest(const Http::Request &request, const Http:
|
||||
|
||||
status(200);
|
||||
|
||||
try {
|
||||
try
|
||||
{
|
||||
// Is it a GET request?
|
||||
if (request.method != Http::HEADER_REQUEST_METHOD_GET)
|
||||
throw MethodNotAllowedHTTPError();
|
||||
@@ -243,16 +252,19 @@ Http::Response Tracker::processRequest(const Http::Request &request, const Http:
|
||||
else
|
||||
throw NotFoundHTTPError();
|
||||
}
|
||||
catch (const HTTPError &error) {
|
||||
catch (const HTTPError &error)
|
||||
{
|
||||
status(error.statusCode(), error.statusText());
|
||||
if (!error.message().isEmpty())
|
||||
print(error.message(), Http::CONTENT_TYPE_TXT);
|
||||
}
|
||||
catch (const TrackerError &error) {
|
||||
catch (const TrackerError &error)
|
||||
{
|
||||
clear(); // clear response
|
||||
status(200);
|
||||
|
||||
const lt::entry::dictionary_type bencodedEntry = {
|
||||
const lt::entry::dictionary_type bencodedEntry =
|
||||
{
|
||||
{ANNOUNCE_RESPONSE_FAILURE_REASON, {error.what()}}
|
||||
};
|
||||
QByteArray reply;
|
||||
@@ -312,7 +324,8 @@ void Tracker::processAnnounceRequest()
|
||||
|
||||
// 4. numwant
|
||||
const auto numWantIter = queryParams.find(ANNOUNCE_REQUEST_NUM_WANT);
|
||||
if (numWantIter != queryParams.end()) {
|
||||
if (numWantIter != queryParams.end())
|
||||
{
|
||||
const int num = numWantIter->toInt();
|
||||
if (num < 0)
|
||||
throw TrackerError("Invalid \"numwant\" parameter");
|
||||
@@ -348,15 +361,18 @@ void Tracker::processAnnounceRequest()
|
||||
|| (announceReq.event == ANNOUNCE_REQUEST_EVENT_EMPTY)
|
||||
|| (announceReq.event == ANNOUNCE_REQUEST_EVENT_COMPLETED)
|
||||
|| (announceReq.event == ANNOUNCE_REQUEST_EVENT_STARTED)
|
||||
|| (announceReq.event == ANNOUNCE_REQUEST_EVENT_PAUSED)) {
|
||||
|| (announceReq.event == ANNOUNCE_REQUEST_EVENT_PAUSED))
|
||||
{
|
||||
// [BEP-21] Extension for partial seeds
|
||||
// (partial support - we don't support BEP-48 so the part that concerns that is not supported)
|
||||
registerPeer(announceReq);
|
||||
}
|
||||
else if (announceReq.event == ANNOUNCE_REQUEST_EVENT_STOPPED) {
|
||||
else if (announceReq.event == ANNOUNCE_REQUEST_EVENT_STOPPED)
|
||||
{
|
||||
unregisterPeer(announceReq);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
throw TrackerError("Invalid \"event\" parameter");
|
||||
}
|
||||
|
||||
@@ -365,7 +381,8 @@ void Tracker::processAnnounceRequest()
|
||||
|
||||
void Tracker::registerPeer(const TrackerAnnounceRequest &announceReq)
|
||||
{
|
||||
if (!m_torrents.contains(announceReq.infoHash)) {
|
||||
if (!m_torrents.contains(announceReq.infoHash))
|
||||
{
|
||||
// Reached max size, remove a random torrent
|
||||
if (m_torrents.size() >= MAX_TORRENTS)
|
||||
m_torrents.erase(m_torrents.begin());
|
||||
@@ -390,7 +407,8 @@ void Tracker::prepareAnnounceResponse(const TrackerAnnounceRequest &announceReq)
|
||||
{
|
||||
const TorrentStats &torrentStats = m_torrents[announceReq.infoHash];
|
||||
|
||||
lt::entry::dictionary_type replyDict {
|
||||
lt::entry::dictionary_type replyDict
|
||||
{
|
||||
{ANNOUNCE_RESPONSE_INTERVAL, ANNOUNCE_INTERVAL},
|
||||
{ANNOUNCE_RESPONSE_COMPLETE, torrentStats.seeders},
|
||||
{ANNOUNCE_RESPONSE_INCOMPLETE, (torrentStats.peers.size() - torrentStats.seeders)},
|
||||
@@ -402,13 +420,16 @@ void Tracker::prepareAnnounceResponse(const TrackerAnnounceRequest &announceReq)
|
||||
// peer list
|
||||
// [BEP-7] IPv6 Tracker Extension (partial support - only the part that concerns BEP-23)
|
||||
// [BEP-23] Tracker Returns Compact Peer Lists
|
||||
if (announceReq.compact) {
|
||||
if (announceReq.compact)
|
||||
{
|
||||
lt::entry::string_type peers;
|
||||
lt::entry::string_type peers6;
|
||||
|
||||
if (announceReq.event != ANNOUNCE_REQUEST_EVENT_STOPPED) {
|
||||
if (announceReq.event != ANNOUNCE_REQUEST_EVENT_STOPPED)
|
||||
{
|
||||
int counter = 0;
|
||||
for (const Peer &peer : asConst(torrentStats.peers)) {
|
||||
for (const Peer &peer : asConst(torrentStats.peers))
|
||||
{
|
||||
if (counter++ >= announceReq.numwant)
|
||||
break;
|
||||
|
||||
@@ -423,16 +444,20 @@ void Tracker::prepareAnnounceResponse(const TrackerAnnounceRequest &announceReq)
|
||||
if (!peers6.empty())
|
||||
replyDict[ANNOUNCE_RESPONSE_PEERS6] = peers6;
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
lt::entry::list_type peerList;
|
||||
|
||||
if (announceReq.event != ANNOUNCE_REQUEST_EVENT_STOPPED) {
|
||||
if (announceReq.event != ANNOUNCE_REQUEST_EVENT_STOPPED)
|
||||
{
|
||||
int counter = 0;
|
||||
for (const Peer &peer : torrentStats.peers) {
|
||||
for (const Peer &peer : torrentStats.peers)
|
||||
{
|
||||
if (counter++ >= announceReq.numwant)
|
||||
break;
|
||||
|
||||
lt::entry::dictionary_type peerDict = {
|
||||
lt::entry::dictionary_type peerDict =
|
||||
{
|
||||
{ANNOUNCE_RESPONSE_PEERS_IP, peer.address},
|
||||
{ANNOUNCE_RESPONSE_PEERS_PORT, peer.port}
|
||||
};
|
||||
|
||||
@@ -107,7 +107,8 @@ void TrackerEntry::setTier(const int value)
|
||||
int TrackerEntry::numSeeds() const
|
||||
{
|
||||
int value = -1;
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints) {
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints)
|
||||
{
|
||||
#if (LIBTORRENT_VERSION_NUM >= 20000)
|
||||
for (const lt::announce_infohash &infoHash : endpoint.info_hashes)
|
||||
value = std::max(value, infoHash.scrape_complete);
|
||||
@@ -121,7 +122,8 @@ int TrackerEntry::numSeeds() const
|
||||
int TrackerEntry::numLeeches() const
|
||||
{
|
||||
int value = -1;
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints) {
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints)
|
||||
{
|
||||
#if (LIBTORRENT_VERSION_NUM >= 20000)
|
||||
for (const lt::announce_infohash &infoHash : endpoint.info_hashes)
|
||||
value = std::max(value, infoHash.scrape_incomplete);
|
||||
@@ -135,7 +137,8 @@ int TrackerEntry::numLeeches() const
|
||||
int TrackerEntry::numDownloaded() const
|
||||
{
|
||||
int value = -1;
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints) {
|
||||
for (const lt::announce_endpoint &endpoint : nativeEntry().endpoints)
|
||||
{
|
||||
#if (LIBTORRENT_VERSION_NUM >= 20000)
|
||||
for (const lt::announce_infohash &infoHash : endpoint.info_hashes)
|
||||
value = std::max(value, infoHash.scrape_downloaded);
|
||||
|
||||
Reference in New Issue
Block a user