Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion src/libsync/propagateupload.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
/*
* SPDX-FileCopyrightText: 2017 Nextcloud GmbH and Nextcloud contributors
* SPDX-FileCopyrightText: 2014 ownCloud GmbH
* SPDX-License-Identifier: GPL-2.0-or-later
*/

#include "config.h"

Check failure on line 7 in src/libsync/propagateupload.cpp

View workflow job for this annotation

GitHub Actions / build

src/libsync/propagateupload.cpp:7:10 [clang-diagnostic-error]

'config.h' file not found
#include "propagateupload.h"
#include "propagateuploadencrypted.h"
#include "owncloudpropagator_p.h"
Expand Down Expand Up @@ -71,7 +71,7 @@
qCWarning(lcPutJob) << " Network error: " << reply()->errorString();
}

connect(reply(), &QNetworkReply::uploadProgress, this, [requestID] (qint64 bytesSent, qint64 bytesTotal) {

Check warning on line 74 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Unmodified variable "bytesSent" of type "long long" should be const-qualified.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4B&open=AZ138BHF8C5q95abOY4B&pullRequest=9823

Check warning on line 74 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Unmodified variable "bytesTotal" of type "long long" should be const-qualified.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4C&open=AZ138BHF8C5q95abOY4C&pullRequest=9823
qCDebug(lcPutJob()) << requestID << "upload progress" << bytesSent << bytesTotal;
});

Expand Down Expand Up @@ -114,7 +114,7 @@
_item->_requestId = requestId();
_item->_status = classifyError(err, _item->_httpErrorCode);
_item->_errorString = errorString();
const auto exceptionParsed = getExceptionFromReply(reply());

Check warning on line 117 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Replace this declaration by a structured binding declaration.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4D&open=AZ138BHF8C5q95abOY4D&pullRequest=9823
_item->_errorExceptionName = exceptionParsed.first;
_item->_errorExceptionMessage = exceptionParsed.second;

Expand Down Expand Up @@ -159,7 +159,7 @@
_item->_fileId = json["fileId"].toString().toUtf8();

if (SyncJournalFileRecord oldRecord; _journal->getFileRecord(_item->destination(), &oldRecord) && oldRecord.isValid()) {
if (oldRecord._etag != _item->_etag) {

Check warning on line 162 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Merge this "if" statement with the enclosing one.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4E&open=AZ138BHF8C5q95abOY4E&pullRequest=9823
_item->updateLockStateFromDbRecord(oldRecord);
}
}
Expand All @@ -182,9 +182,9 @@

PropagateUploadFileCommon::PropagateUploadFileCommon(OwncloudPropagator *propagator, const SyncFileItemPtr &item)
: PropagateItemJob(propagator, item)
, _finished(false)

Check warning on line 185 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Do not use the constructor's initializer list for data member "_finished". Use the in-class initializer instead.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY3_&open=AZ138BHF8C5q95abOY3_&pullRequest=9823
, _deleteExisting(false)

Check warning on line 186 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Do not use the constructor's initializer list for data member "_deleteExisting". Use the in-class initializer instead.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4A&open=AZ138BHF8C5q95abOY4A&pullRequest=9823
, _aborting(false)

Check warning on line 187 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Do not use the constructor's initializer list for data member "_aborting". Use the in-class initializer instead.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY3-&open=AZ138BHF8C5q95abOY3-&pullRequest=9823
{
const auto path = _item->_file;
const auto slashPosition = path.lastIndexOf('/');
Expand Down Expand Up @@ -434,13 +434,29 @@
return slotOnErrorStartFolderUnlock(SyncFileItem::SoftError, tr("Local file changed during syncing. It will be resumed."));
}

// for new uploads also ensure the file sizes stays the same, relying on the mtime alone is not always reliable
const auto prevFileToUploadSize = _fileToUpload._size;
const auto prevItemSize = _item->_size;
_fileToUpload._size = FileSystem::getSize(fullFilePath);
_item->_size = FileSystem::getSize(originalFilePath);

auto fileSizesChangedForNewItem = _item->_instruction == CSYNC_INSTRUCTION_NEW
&& !(prevItemSize == 0 && prevFileToUploadSize == 0) // file conflict items created during propagation may not have a file size, ignore those
&& !(prevFileToUploadSize == _fileToUpload._size && prevItemSize == _item->_size);

Check warning on line 445 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Unmodified variable "fileSizesChangedForNewItem" of type "_Bool" should be const-qualified.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4F&open=AZ138BHF8C5q95abOY4F&pullRequest=9823
if (fileSizesChangedForNewItem) {
qCWarning(lcPropagateUpload).nospace() << "File sizes changed between discovery and propagation phase"
<< " fileToUpload.path=" << _fileToUpload._path
<< " fileToUpload.size=" << _fileToUpload._size
<< " prevFileToUploadSize=" << prevFileToUploadSize
<< " item.file=" << _item->_file
<< " item.size=" << _item->_size
<< " prevItemSize=" << prevItemSize;
}

// But skip the file if the mtime is too close to 'now'!
// That usually indicates a file that is still being changed
// or not yet fully copied to the destination.
if (fileIsStillChanging(*_item)) {
if (fileIsStillChanging(*_item) || fileSizesChangedForNewItem) {
propagator()->_anotherSyncNeeded = true;
return slotOnErrorStartFolderUnlock(SyncFileItem::SoftError, tr("Local file changed during sync."));
}
Expand Down Expand Up @@ -741,7 +757,7 @@

job->setTimeout(qBound(
// Calculate 3 minutes for each gigabyte of data
qMin(thirtyMinutes - 1, qRound64(threeMinutes * fileSize / 1e9)),

Check warning on line 760 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

implicit conversion from 'qint64' (aka 'long long') to 'double' may lose precision

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY39&open=AZ138BHF8C5q95abOY39&pullRequest=9823
job->timeoutMsec(),
// Maximum of 30 minutes
thirtyMinutes));
Expand All @@ -749,7 +765,7 @@

void PropagateUploadFileCommon::slotJobDestroyed(QObject *job)
{
_jobs.erase(std::remove(_jobs.begin(), _jobs.end(), job), _jobs.end());

Check warning on line 768 in src/libsync/propagateupload.cpp

View check run for this annotation

SonarQubeCloud / SonarCloud Code Analysis

Replace with the version of "std::ranges::remove" that takes a range.

See more on https://sonarcloud.io/project/issues?id=nextcloud_desktop&issues=AZ138BHF8C5q95abOY4G&open=AZ138BHF8C5q95abOY4G&pullRequest=9823
}

// This function is used whenever there is an error occurring and jobs might be in progress
Expand Down
103 changes: 103 additions & 0 deletions test/testsyncengine.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
/*
* SPDX-FileCopyrightText: 2021 Nextcloud GmbH and Nextcloud contributors
* SPDX-FileCopyrightText: 2016 ownCloud, Inc.
Expand All @@ -8,7 +8,7 @@
* any purpose.
*/

#include <QtTest>

Check failure on line 11 in test/testsyncengine.cpp

View workflow job for this annotation

GitHub Actions / build

test/testsyncengine.cpp:11:10 [clang-diagnostic-error]

'QtTest' file not found
#include <QTextCodec>

#include "syncenginetestutils.h"
Expand All @@ -17,13 +17,15 @@
#include "configfile.h"
#include "propagatorjobs.h"
#include "syncengine.h"
#include "syncoptions.h"

#include <QFile>
#include <QtTest>

#include <filesystem>

using namespace OCC;
using namespace Qt::StringLiterals;

namespace {

Expand Down Expand Up @@ -88,6 +90,10 @@
return -1;
}

constexpr quint64 operator""_MiB(quint64 value) {

Check warning on line 93 in test/testsyncengine.cpp

View workflow job for this annotation

GitHub Actions / build

test/testsyncengine.cpp:93:19 [modernize-use-trailing-return-type]

use a trailing return type for this function
return value * 1024LL * 1024LL;
}

}

class TestSyncEngine : public QObject
Expand Down Expand Up @@ -2461,6 +2467,103 @@
const auto directoryItem = fakeFolder.remoteModifier().find("directory");
QCOMPARE(directoryItem, nullptr);
}

void testUploadWhileFileIsChanging()
{
// While files are still being copied/saved it is possible that the sync
// engine discovers a new file while it's still being written to.
// With a small enough file size during discovery the propagation will
// be handled by a PropagateUploadFileV1 job.
// This is fine as long as it stays at a small file size, otherwise its
// implementation of the old chunking algorithm will be used. That was
// still available until Nextcloud 30, since then any chunked uploads
// performed with the now-removed API will be finished right after the
// first chunk was uploaded: with the old V1 chunking system the server
// did not respond with an ETag until the final chunk was uploaded.
// Since the removal the ETag is always present in the response, which
// lead the client to believe that all chunks finished uploading, and
// considers the upload to be done.

FakeFolder fakeFolder{FileInfo{}};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
QVERIFY(fakeFolder.syncOnce());

const auto changemePath = fakeFolder.localPath().append("/changeme");
QFile changemeFile{changemePath};
QVERIFY(changemeFile.open(QIODevice::WriteOnly | QIODevice::Unbuffered));
QVERIFY(fakeFolder.localModifier().find("changeme").exists());
QCOMPARE(changemeFile.write("AA"), 2);

auto syncOptions = fakeFolder.syncEngine().syncOptions();
syncOptions._initialChunkSize = 5_MiB;
fakeFolder.syncEngine().setSyncOptions(syncOptions);

// the file should be discovered with an initial size of 2 bytes
// --> propagation is done by PropagateUploadFileV1
QSignalSpy itemDiscoveredSpy(&fakeFolder.syncEngine(), &OCC::SyncEngine::itemDiscovered);
fakeFolder.scheduleSync();
fakeFolder.execUntilBeforePropagation();
QCOMPARE(itemDiscoveredSpy.size(), 1);
{
const auto discoveredItem = itemDiscoveredSpy.takeFirst().takeFirst().value<OCC::SyncFileItemPtr>();
QCOMPARE(discoveredItem->_size, 2);
}

// just before propagation starts, the file size changed!
// the PropagateUploadFileV1 job would then try to upload the file in
// chunks as it's now large enough to be chunked (25MiB).
const QByteArray oneMegabyteOfScreaming = "A"_ba.repeated(1_MiB);
for (auto i = 0; i < 25; i++) {
changemeFile.write(oneMegabyteOfScreaming);
}

// finish the sync: the file should be reuploaded on the next sync run,
// and nothing should have been uploaded
ItemCompletedSpy itemCompleteSpy(fakeFolder);
QVERIFY(!fakeFolder.execUntilFinished());
QVERIFY(!itemDidCompleteSuccessfully(itemCompleteSpy, "changeme"));
QCOMPARE(itemCompleteSpy.findItem("changeme")->_errorString, "Local file changed during sync.");
QVERIFY(fakeFolder.syncEngine().isAnotherSyncNeeded());
QCOMPARE(fakeFolder.currentRemoteState().children.size(), 0);

// retry the sync again -- the file is still changing
// this time as the file size is large enough the correct chunking
// system from PropagateUploadFileNG would be used
itemDiscoveredSpy.clear();
itemCompleteSpy.clear();
fakeFolder.scheduleSync();
fakeFolder.execUntilBeforePropagation();
QCOMPARE(itemDiscoveredSpy.size(), 1);
{
const auto discoveredItem = itemDiscoveredSpy.takeFirst().takeFirst().value<OCC::SyncFileItemPtr>();
QCOMPARE(discoveredItem->_size, 25_MiB + 2);
}
for (auto i = 0; i < 25; i++) {
changemeFile.write(oneMegabyteOfScreaming);
}
// and we fail again!
QVERIFY(!fakeFolder.execUntilFinished());
QVERIFY(!itemDidCompleteSuccessfully(itemCompleteSpy, "changeme"));
QCOMPARE(itemCompleteSpy.findItem("changeme")->_errorString, "Local file changed during sync.");
QVERIFY(fakeFolder.syncEngine().isAnotherSyncNeeded());
QCOMPARE(fakeFolder.currentRemoteState().children.size(), 0);

// the file is now complete, the next sync should work and result in the
// same remote and local states. again, still using the chunking from
// the PropagateUploadFileNG job
changemeFile.close();
itemDiscoveredSpy.clear();
itemCompleteSpy.clear();
QVERIFY(fakeFolder.syncOnce());
QCOMPARE(itemDiscoveredSpy.size(), 1);
{
const auto discoveredItem = itemDiscoveredSpy.takeFirst().takeFirst().value<OCC::SyncFileItemPtr>();
QCOMPARE(discoveredItem->_size, 50_MiB + 2);
}
QVERIFY(itemDidCompleteSuccessfully(itemCompleteSpy, "changeme"));
QVERIFY(!fakeFolder.syncEngine().isAnotherSyncNeeded());
QCOMPARE(fakeFolder.currentLocalState(), fakeFolder.currentRemoteState());
}
};

QTEST_GUILESS_MAIN(TestSyncEngine)
Expand Down