blob: 5e9ca9399e7cb84a3e1605616177348dfbefd808 [file] [log] [blame]
// Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/sync/engine_impl/syncer.h"
#include <stddef.h>
#include <algorithm>
#include <limits>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/metrics/histogram_tester.h"
#include "base/test/scoped_task_environment.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "components/sync/base/cancelation_signal.h"
#include "components/sync/base/cryptographer.h"
#include "components/sync/base/extensions_activity.h"
#include "components/sync/base/fake_encryptor.h"
#include "components/sync/base/time.h"
#include "components/sync/engine/cycle/commit_counters.h"
#include "components/sync/engine/cycle/status_counters.h"
#include "components/sync/engine/cycle/update_counters.h"
#include "components/sync/engine/model_safe_worker.h"
#include "components/sync/engine_impl/backoff_delay_provider.h"
#include "components/sync/engine_impl/cycle/mock_debug_info_getter.h"
#include "components/sync/engine_impl/cycle/sync_cycle_context.h"
#include "components/sync/engine_impl/get_commit_ids.h"
#include "components/sync/engine_impl/net/server_connection_manager.h"
#include "components/sync/engine_impl/sync_scheduler_impl.h"
#include "components/sync/engine_impl/syncer_proto_util.h"
#include "components/sync/protocol/bookmark_specifics.pb.h"
#include "components/sync/protocol/nigori_specifics.pb.h"
#include "components/sync/protocol/preference_specifics.pb.h"
#include "components/sync/syncable/mutable_entry.h"
#include "components/sync/syncable/nigori_util.h"
#include "components/sync/syncable/syncable_delete_journal.h"
#include "components/sync/syncable/syncable_read_transaction.h"
#include "components/sync/syncable/syncable_util.h"
#include "components/sync/syncable/syncable_write_transaction.h"
#include "components/sync/syncable/test_user_share.h"
#include "components/sync/test/engine/fake_model_worker.h"
#include "components/sync/test/engine/mock_connection_manager.h"
#include "components/sync/test/engine/mock_nudge_handler.h"
#include "components/sync/test/engine/test_id_factory.h"
#include "components/sync/test/engine/test_syncable_utils.h"
#include "components/sync/test/fake_sync_encryption_handler.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using base::TimeDelta;
using std::count;
using std::map;
using std::multimap;
using std::set;
using std::string;
using std::vector;
namespace syncer {
using syncable::CountEntriesWithName;
using syncable::Directory;
using syncable::Entry;
using syncable::GetFirstEntryWithName;
using syncable::GetOnlyEntryWithName;
using syncable::Id;
using syncable::kEncryptedString;
using syncable::MutableEntry;
using syncable::CREATE;
using syncable::GET_BY_HANDLE;
using syncable::GET_BY_ID;
using syncable::GET_BY_CLIENT_TAG;
using syncable::GET_BY_SERVER_TAG;
using syncable::GET_TYPE_ROOT;
using syncable::UNITTEST;
namespace {
// A helper to hold on to the counters emitted by the sync engine.
class TypeDebugInfoCache : public TypeDebugInfoObserver {
public:
TypeDebugInfoCache();
~TypeDebugInfoCache() override;
CommitCounters GetLatestCommitCounters(ModelType type) const;
UpdateCounters GetLatestUpdateCounters(ModelType type) const;
StatusCounters GetLatestStatusCounters(ModelType type) const;
// TypeDebugInfoObserver implementation.
void OnCommitCountersUpdated(ModelType type,
const CommitCounters& counters) override;
void OnUpdateCountersUpdated(ModelType type,
const UpdateCounters& counters) override;
void OnStatusCountersUpdated(ModelType type,
const StatusCounters& counters) override;
private:
std::map<ModelType, CommitCounters> commit_counters_map_;
std::map<ModelType, UpdateCounters> update_counters_map_;
std::map<ModelType, StatusCounters> status_counters_map_;
};
TypeDebugInfoCache::TypeDebugInfoCache() {}
TypeDebugInfoCache::~TypeDebugInfoCache() {}
CommitCounters TypeDebugInfoCache::GetLatestCommitCounters(
ModelType type) const {
auto it = commit_counters_map_.find(type);
if (it == commit_counters_map_.end()) {
return CommitCounters();
} else {
return it->second;
}
}
UpdateCounters TypeDebugInfoCache::GetLatestUpdateCounters(
ModelType type) const {
auto it = update_counters_map_.find(type);
if (it == update_counters_map_.end()) {
return UpdateCounters();
} else {
return it->second;
}
}
StatusCounters TypeDebugInfoCache::GetLatestStatusCounters(
ModelType type) const {
auto it = status_counters_map_.find(type);
if (it == status_counters_map_.end()) {
return StatusCounters();
} else {
return it->second;
}
}
void TypeDebugInfoCache::OnCommitCountersUpdated(
ModelType type,
const CommitCounters& counters) {
commit_counters_map_[type] = counters;
}
void TypeDebugInfoCache::OnUpdateCountersUpdated(
ModelType type,
const UpdateCounters& counters) {
update_counters_map_[type] = counters;
}
void TypeDebugInfoCache::OnStatusCountersUpdated(
ModelType type,
const StatusCounters& counters) {
status_counters_map_[type] = counters;
}
} // namespace
// Syncer unit tests. Unfortunately a lot of these tests
// are outdated and need to be reworked and updated.
class SyncerTest : public testing::Test,
public SyncCycle::Delegate,
public SyncEngineEventListener {
protected:
SyncerTest()
: extensions_activity_(new ExtensionsActivity),
syncer_(nullptr),
last_client_invalidation_hint_buffer_size_(10) {}
// SyncCycle::Delegate implementation.
void OnThrottled(const base::TimeDelta& throttle_duration) override {
FAIL() << "Should not get silenced.";
}
void OnTypesThrottled(ModelTypeSet types,
const base::TimeDelta& throttle_duration) override {
scheduler_->OnTypesThrottled(types, throttle_duration);
}
void OnTypesBackedOff(ModelTypeSet types) override {
scheduler_->OnTypesBackedOff(types);
}
bool IsAnyThrottleOrBackoff() override { return false; }
void OnReceivedLongPollIntervalUpdate(
const base::TimeDelta& new_interval) override {
last_long_poll_interval_received_ = new_interval;
}
void OnReceivedShortPollIntervalUpdate(
const base::TimeDelta& new_interval) override {
last_short_poll_interval_received_ = new_interval;
}
void OnReceivedCustomNudgeDelays(
const std::map<ModelType, base::TimeDelta>& delay_map) override {
auto iter = delay_map.find(SESSIONS);
if (iter != delay_map.end() && iter->second > base::TimeDelta())
last_sessions_commit_delay_ = iter->second;
iter = delay_map.find(BOOKMARKS);
if (iter != delay_map.end() && iter->second > base::TimeDelta())
last_bookmarks_commit_delay_ = iter->second;
}
void OnReceivedClientInvalidationHintBufferSize(int size) override {
last_client_invalidation_hint_buffer_size_ = size;
}
void OnReceivedGuRetryDelay(const base::TimeDelta& delay) override {}
void OnReceivedMigrationRequest(ModelTypeSet types) override {}
void OnProtocolEvent(const ProtocolEvent& event) override {}
void OnSyncProtocolError(const SyncProtocolError& error) override {}
void OnSyncCycleEvent(const SyncCycleEvent& event) override {
DVLOG(1) << "HandleSyncEngineEvent in unittest " << event.what_happened;
// we only test for entry-specific events, not status changed ones.
switch (event.what_happened) {
case SyncCycleEvent::SYNC_CYCLE_BEGIN: // Fall through.
case SyncCycleEvent::STATUS_CHANGED:
case SyncCycleEvent::SYNC_CYCLE_ENDED:
return;
default:
FAIL() << "Handling unknown error type in unit tests!!";
}
}
void OnActionableError(const SyncProtocolError& error) override {}
void OnRetryTimeChanged(base::Time retry_time) override {}
void OnThrottledTypesChanged(ModelTypeSet throttled_types) override {}
void OnBackedOffTypesChanged(ModelTypeSet backed_off_types) override {}
void OnMigrationRequested(ModelTypeSet types) override {}
void ResetCycle() {
cycle_ = std::make_unique<SyncCycle>(context_.get(), this);
}
bool SyncShareNudge() {
ResetCycle();
// Pretend we've seen a local change, to make the nudge_tracker look normal.
nudge_tracker_.RecordLocalChange(ModelTypeSet(BOOKMARKS));
return syncer_->NormalSyncShare(context_->GetEnabledTypes(),
&nudge_tracker_, cycle_.get());
}
bool SyncShareConfigure() {
return SyncShareConfigureTypes(context_->GetEnabledTypes());
}
bool SyncShareConfigureTypes(ModelTypeSet types) {
ResetCycle();
return syncer_->ConfigureSyncShare(
types, sync_pb::SyncEnums::RECONFIGURATION, cycle_.get());
}
void SetUp() override {
test_user_share_.SetUp();
mock_server_ = std::make_unique<MockConnectionManager>(
directory(), &cancelation_signal_);
debug_info_getter_ = std::make_unique<MockDebugInfoGetter>();
workers_.push_back(
scoped_refptr<ModelSafeWorker>(new FakeModelWorker(GROUP_PASSIVE)));
std::vector<SyncEngineEventListener*> listeners;
listeners.push_back(this);
model_type_registry_ = std::make_unique<ModelTypeRegistry>(
workers_, test_user_share_.user_share(), &mock_nudge_handler_,
UssMigrator(), &cancelation_signal_);
model_type_registry_->RegisterDirectoryTypeDebugInfoObserver(
&debug_info_cache_);
EnableDatatype(BOOKMARKS);
EnableDatatype(EXTENSIONS);
EnableDatatype(NIGORI);
EnableDatatype(PREFERENCES);
context_ = std::make_unique<SyncCycleContext>(
mock_server_.get(), directory(), extensions_activity_.get(), listeners,
debug_info_getter_.get(), model_type_registry_.get(),
true, // enable keystore encryption
false, // force enable pre-commit GU avoidance experiment
"fake_invalidator_client_id",
/*short_poll_interval=*/base::TimeDelta::FromMinutes(30),
/*long_poll_interval=*/base::TimeDelta::FromMinutes(180));
syncer_ = new Syncer(&cancelation_signal_);
scheduler_ = std::make_unique<SyncSchedulerImpl>(
"TestSyncScheduler", BackoffDelayProvider::FromDefaults(),
context_.get(),
// scheduler_ owned syncer_ now and will manage the memory of syncer_
syncer_, false);
syncable::ReadTransaction trans(FROM_HERE, directory());
Directory::Metahandles children;
directory()->GetChildHandlesById(&trans, trans.root_id(), &children);
ASSERT_EQ(0u, children.size());
root_id_ = TestIdFactory::root();
parent_id_ = ids_.MakeServer("parent id");
child_id_ = ids_.MakeServer("child id");
directory()->set_store_birthday(mock_server_->store_birthday());
mock_server_->SetKeystoreKey("encryption_key");
}
void TearDown() override {
model_type_registry_->UnregisterDirectoryTypeDebugInfoObserver(
&debug_info_cache_);
mock_server_.reset();
scheduler_.reset();
test_user_share_.TearDown();
}
void WriteTestDataToEntry(syncable::WriteTransaction* trans,
MutableEntry* entry) {
EXPECT_FALSE(entry->GetIsDir());
EXPECT_FALSE(entry->GetIsDel());
sync_pb::EntitySpecifics specifics;
specifics.mutable_bookmark()->set_url("http://demo/");
specifics.mutable_bookmark()->set_favicon("PNG");
entry->PutSpecifics(specifics);
entry->PutIsUnsynced(true);
}
void VerifyTestDataInEntry(syncable::BaseTransaction* trans, Entry* entry) {
EXPECT_FALSE(entry->GetIsDir());
EXPECT_FALSE(entry->GetIsDel());
VerifyTestBookmarkDataInEntry(entry);
}
void VerifyTestBookmarkDataInEntry(Entry* entry) {
const sync_pb::EntitySpecifics& specifics = entry->GetSpecifics();
EXPECT_TRUE(specifics.has_bookmark());
EXPECT_EQ("PNG", specifics.bookmark().favicon());
EXPECT_EQ("http://demo/", specifics.bookmark().url());
}
void VerifyHierarchyConflictsReported(
const sync_pb::ClientToServerMessage& message) {
// Our request should have included a warning about hierarchy conflicts.
const sync_pb::ClientStatus& client_status = message.client_status();
EXPECT_TRUE(client_status.has_hierarchy_conflict_detected());
EXPECT_TRUE(client_status.hierarchy_conflict_detected());
}
void VerifyNoHierarchyConflictsReported(
const sync_pb::ClientToServerMessage& message) {
// Our request should have reported no hierarchy conflicts detected.
const sync_pb::ClientStatus& client_status = message.client_status();
EXPECT_TRUE(client_status.has_hierarchy_conflict_detected());
EXPECT_FALSE(client_status.hierarchy_conflict_detected());
}
void VerifyHierarchyConflictsUnspecified(
const sync_pb::ClientToServerMessage& message) {
// Our request should have neither confirmed nor denied hierarchy conflicts.
const sync_pb::ClientStatus& client_status = message.client_status();
EXPECT_FALSE(client_status.has_hierarchy_conflict_detected());
}
sync_pb::EntitySpecifics DefaultBookmarkSpecifics() {
sync_pb::EntitySpecifics result;
AddDefaultFieldValue(BOOKMARKS, &result);
return result;
}
sync_pb::EntitySpecifics DefaultPreferencesSpecifics() {
sync_pb::EntitySpecifics result;
AddDefaultFieldValue(PREFERENCES, &result);
return result;
}
// Enumeration of alterations to entries for commit ordering tests.
enum EntryFeature {
LIST_END = 0, // Denotes the end of the list of features from below.
SYNCED, // Items are unsynced by default
DELETED,
OLD_MTIME,
MOVED_FROM_ROOT,
};
struct CommitOrderingTest {
// expected commit index.
int commit_index;
// Details about the item
syncable::Id id;
syncable::Id parent_id;
EntryFeature features[10];
static CommitOrderingTest MakeLastCommitItem() {
CommitOrderingTest last_commit_item;
last_commit_item.commit_index = -1;
last_commit_item.id = TestIdFactory::root();
return last_commit_item;
}
};
void RunCommitOrderingTest(CommitOrderingTest* test) {
map<int, syncable::Id> expected_positions;
{ // Transaction scope.
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
while (!test->id.IsRoot()) {
if (test->commit_index >= 0) {
map<int, syncable::Id>::value_type entry(test->commit_index,
test->id);
bool double_position = !expected_positions.insert(entry).second;
ASSERT_FALSE(double_position) << "Two id's expected at one position";
}
string utf8_name = test->id.GetServerId();
string name(utf8_name.begin(), utf8_name.end());
MutableEntry entry(&trans, CREATE, BOOKMARKS, test->parent_id, name);
entry.PutId(test->id);
if (test->id.ServerKnows()) {
entry.PutBaseVersion(5);
entry.PutServerVersion(5);
entry.PutServerParentId(test->parent_id);
}
entry.PutIsDir(true);
entry.PutIsUnsynced(true);
entry.PutSpecifics(DefaultBookmarkSpecifics());
// Set the time to 30 seconds in the future to reduce the chance of
// flaky tests.
const base::Time& now_plus_30s =
base::Time::Now() + base::TimeDelta::FromSeconds(30);
const base::Time& now_minus_2h =
base::Time::Now() - base::TimeDelta::FromHours(2);
entry.PutMtime(now_plus_30s);
for (size_t i = 0; i < base::size(test->features); ++i) {
switch (test->features[i]) {
case LIST_END:
break;
case SYNCED:
entry.PutIsUnsynced(false);
break;
case DELETED:
entry.PutIsDel(true);
break;
case OLD_MTIME:
entry.PutMtime(now_minus_2h);
break;
case MOVED_FROM_ROOT:
entry.PutServerParentId(trans.root_id());
break;
default:
FAIL() << "Bad value in CommitOrderingTest list";
}
}
test++;
}
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(expected_positions.size(), mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
for (size_t i = 0; i < expected_positions.size(); ++i) {
SCOPED_TRACE(i);
EXPECT_EQ(1u, expected_positions.count(i));
EXPECT_EQ(expected_positions[i], mock_server_->committed_ids()[i]);
}
}
CommitCounters GetCommitCounters(ModelType type) {
return debug_info_cache_.GetLatestCommitCounters(type);
}
UpdateCounters GetUpdateCounters(ModelType type) {
return debug_info_cache_.GetLatestUpdateCounters(type);
}
StatusCounters GetStatusCounters(ModelType type) {
return debug_info_cache_.GetLatestStatusCounters(type);
}
Directory* directory() {
return test_user_share_.user_share()->directory.get();
}
const std::string local_cache_guid() { return directory()->cache_guid(); }
const std::string foreign_cache_guid() { return "kqyg7097kro6GSUod+GSg=="; }
int64_t CreateUnsyncedDirectory(const string& entry_name,
const string& idstring) {
return CreateUnsyncedDirectory(entry_name,
syncable::Id::CreateFromServerId(idstring));
}
int64_t CreateUnsyncedDirectory(const string& entry_name,
const syncable::Id& id) {
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(),
entry_name);
EXPECT_TRUE(entry.good());
entry.PutIsUnsynced(true);
entry.PutIsDir(true);
entry.PutSpecifics(DefaultBookmarkSpecifics());
entry.PutBaseVersion(id.ServerKnows() ? 1 : 0);
entry.PutId(id);
return entry.GetMetahandle();
}
void EnableDatatype(ModelType model_type) {
enabled_datatypes_.Put(model_type);
model_type_registry_->RegisterDirectoryType(model_type, GROUP_PASSIVE);
mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
}
void DisableDatatype(ModelType model_type) {
enabled_datatypes_.Remove(model_type);
model_type_registry_->UnregisterDirectoryType(model_type);
mock_server_->ExpectGetUpdatesRequestTypes(enabled_datatypes_);
}
Cryptographer* GetCryptographer(syncable::BaseTransaction* trans) {
return directory()->GetCryptographer(trans);
}
// Configures SyncCycleContext and NudgeTracker so Syncer won't call
// GetUpdates prior to Commit. This method can be used to ensure a Commit is
// not preceeded by GetUpdates.
void ConfigureNoGetUpdatesRequired() {
context_->set_server_enabled_pre_commit_update_avoidance(true);
nudge_tracker_.OnInvalidationsEnabled();
nudge_tracker_.RecordSuccessfulSyncCycle();
ASSERT_FALSE(context_->ShouldFetchUpdatesBeforeCommit());
ASSERT_FALSE(nudge_tracker_.IsGetUpdatesRequired());
}
base::test::ScopedTaskEnvironment task_environment_;
// Some ids to aid tests. Only the root one's value is specific. The rest
// are named for test clarity.
// TODO(chron): Get rid of these inbuilt IDs. They only make it
// more confusing.
syncable::Id root_id_;
syncable::Id parent_id_;
syncable::Id child_id_;
TestIdFactory ids_;
TestUserShare test_user_share_;
FakeEncryptor encryptor_;
scoped_refptr<ExtensionsActivity> extensions_activity_;
std::unique_ptr<MockConnectionManager> mock_server_;
CancelationSignal cancelation_signal_;
Syncer* syncer_;
std::unique_ptr<SyncCycle> cycle_;
TypeDebugInfoCache debug_info_cache_;
MockNudgeHandler mock_nudge_handler_;
std::unique_ptr<ModelTypeRegistry> model_type_registry_;
std::unique_ptr<SyncSchedulerImpl> scheduler_;
std::unique_ptr<SyncCycleContext> context_;
base::TimeDelta last_short_poll_interval_received_;
base::TimeDelta last_long_poll_interval_received_;
base::TimeDelta last_sessions_commit_delay_;
base::TimeDelta last_bookmarks_commit_delay_;
int last_client_invalidation_hint_buffer_size_;
std::vector<scoped_refptr<ModelSafeWorker>> workers_;
ModelTypeSet enabled_datatypes_;
NudgeTracker nudge_tracker_;
std::unique_ptr<MockDebugInfoGetter> debug_info_getter_;
private:
DISALLOW_COPY_AND_ASSIGN(SyncerTest);
};
TEST_F(SyncerTest, TestCallGatherUnsyncedEntries) {
{
syncable::Directory::Metahandles handles;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
syncable::GetUnsyncedEntries(&trans, &handles);
}
ASSERT_EQ(0u, handles.size());
}
// TODO(sync): When we can dynamically connect and disconnect the mock
// ServerConnectionManager test disconnected GetUnsyncedEntries here. It's a
// regression for a very old bug.
}
TEST_F(SyncerTest, GetCommitIdsFiltersThrottledEntries) {
const ModelTypeSet throttled_types(BOOKMARKS);
sync_pb::EntitySpecifics bookmark_data;
AddDefaultFieldValue(BOOKMARKS, &bookmark_data);
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10, foreign_cache_guid(),
"-1");
EXPECT_TRUE(SyncShareNudge());
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
A.PutIsUnsynced(true);
A.PutSpecifics(bookmark_data);
A.PutNonUniqueName("bookmark");
}
// Now sync without enabling bookmarks.
mock_server_->ExpectGetUpdatesRequestTypes(
Difference(context_->GetEnabledTypes(), throttled_types));
ResetCycle();
syncer_->NormalSyncShare(
Difference(context_->GetEnabledTypes(), throttled_types), &nudge_tracker_,
cycle_.get());
{
// Nothing should have been committed as bookmarks is throttled.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
Entry entryA(&rtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(entryA.good());
EXPECT_TRUE(entryA.GetIsUnsynced());
}
// Sync again with bookmarks enabled.
mock_server_->ExpectGetUpdatesRequestTypes(context_->GetEnabledTypes());
EXPECT_TRUE(SyncShareNudge());
{
// It should have been committed.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
Entry entryA(&rtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(entryA.good());
EXPECT_FALSE(entryA.GetIsUnsynced());
}
}
// We use a macro so we can preserve the error location.
#define VERIFY_ENTRY(id, is_unapplied, is_unsynced, prev_initialized, \
parent_id, version, server_version, id_fac, rtrans) \
do { \
Entry entryA(rtrans, GET_BY_ID, id_fac.FromNumber(id)); \
ASSERT_TRUE(entryA.good()); \
/* We don't use EXPECT_EQ here because if the left side param is false,*/ \
/* gcc 4.6 warns converting 'false' to pointer type for argument 1.*/ \
EXPECT_TRUE(is_unsynced == entryA.GetIsUnsynced()); \
EXPECT_TRUE(is_unapplied == entryA.GetIsUnappliedUpdate()); \
EXPECT_TRUE(prev_initialized == IsRealDataType(GetModelTypeFromSpecifics( \
entryA.GetBaseServerSpecifics()))); \
EXPECT_TRUE(parent_id == -1 || \
entryA.GetParentId() == id_fac.FromNumber(parent_id)); \
EXPECT_EQ(version, entryA.GetBaseVersion()); \
EXPECT_EQ(server_version, entryA.GetServerVersion()); \
} while (0)
TEST_F(SyncerTest, GetCommitIdsFiltersUnreadyEntries) {
KeyParams key_params = {KeyDerivationParams::CreateForPbkdf2(), "foobar"};
KeyParams other_params = {KeyDerivationParams::CreateForPbkdf2(), "foobar2"};
sync_pb::EntitySpecifics bookmark, encrypted_bookmark;
bookmark.mutable_bookmark()->set_url("url");
bookmark.mutable_bookmark()->set_title("title");
AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10, foreign_cache_guid(),
"-1");
mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10, foreign_cache_guid(),
"-2");
mock_server_->AddUpdateDirectory(3, 0, "C", 10, 10, foreign_cache_guid(),
"-3");
mock_server_->AddUpdateDirectory(4, 0, "D", 10, 10, foreign_cache_guid(),
"-4");
EXPECT_TRUE(SyncShareNudge());
// Server side change will put A in conflict.
mock_server_->AddUpdateDirectory(1, 0, "A", 20, 20, foreign_cache_guid(),
"-1");
{
// Mark bookmarks as encrypted and set the cryptographer to have pending
// keys.
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
Cryptographer other_cryptographer(&encryptor_);
other_cryptographer.AddKey(other_params);
sync_pb::EntitySpecifics specifics;
sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
test_user_share_.encryption_handler()->EnableEncryptEverything();
// Set up with an old passphrase, but have pending keys
GetCryptographer(&wtrans)->AddKey(key_params);
GetCryptographer(&wtrans)->Encrypt(bookmark,
encrypted_bookmark.mutable_encrypted());
GetCryptographer(&wtrans)->SetPendingKeys(nigori->encryption_keybag());
// In conflict but properly encrypted.
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
A.PutIsUnsynced(true);
A.PutSpecifics(encrypted_bookmark);
A.PutNonUniqueName(kEncryptedString);
// Not in conflict and properly encrypted.
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.PutIsUnsynced(true);
B.PutSpecifics(encrypted_bookmark);
B.PutNonUniqueName(kEncryptedString);
// Unencrypted specifics.
MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
ASSERT_TRUE(C.good());
C.PutIsUnsynced(true);
C.PutNonUniqueName(kEncryptedString);
// Unencrypted non_unique_name.
MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
ASSERT_TRUE(D.good());
D.PutIsUnsynced(true);
D.PutSpecifics(encrypted_bookmark);
D.PutNonUniqueName("not encrypted");
}
EXPECT_TRUE(SyncShareNudge());
{
// Nothing should have commited due to bookmarks being encrypted and
// the cryptographer having pending keys. A would have been resolved
// as a simple conflict, but still be unsynced until the next sync cycle.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, true, false, 0, 20, 20, ids_, &rtrans);
VERIFY_ENTRY(2, false, true, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
// Resolve the pending keys.
GetCryptographer(&rtrans)->DecryptPendingKeys(other_params);
}
EXPECT_TRUE(SyncShareNudge());
{
// All properly encrypted and non-conflicting items should commit. "A" was
// conflicting, but last sync cycle resolved it as simple conflict, so on
// this sync cycle it committed succesfullly.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
// Committed successfully.
VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
// Committed successfully.
VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
// Was not properly encrypted.
VERIFY_ENTRY(3, false, true, false, 0, 10, 10, ids_, &rtrans);
// Was not properly encrypted.
VERIFY_ENTRY(4, false, true, false, 0, 10, 10, ids_, &rtrans);
}
{
// Fix the remaining items.
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
ASSERT_TRUE(C.good());
C.PutSpecifics(encrypted_bookmark);
C.PutNonUniqueName(kEncryptedString);
MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
ASSERT_TRUE(D.good());
D.PutSpecifics(encrypted_bookmark);
D.PutNonUniqueName(kEncryptedString);
}
EXPECT_TRUE(SyncShareNudge());
{
const StatusController& status_controller = cycle_->status_controller();
// Expect success.
EXPECT_EQ(SyncerError::SYNCER_OK,
status_controller.model_neutral_state().commit_result.value());
// None should be unsynced anymore.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 21, 21, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 0, 11, 11, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 0, 11, 11, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 11, 11, ids_, &rtrans);
}
}
TEST_F(SyncerTest, GetUpdatesPartialThrottled) {
sync_pb::EntitySpecifics bookmark, pref;
bookmark.mutable_bookmark()->set_title("title");
pref.mutable_preference()->set_name("name");
AddDefaultFieldValue(BOOKMARKS, &bookmark);
AddDefaultFieldValue(PREFERENCES, &pref);
// Normal sync, all the data types should get synced.
mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
EXPECT_TRUE(SyncShareNudge());
{
// Initial state. Everything is normal.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
}
// Set BOOKMARKS throttled but PREFERENCES not,
// then BOOKMARKS should not get synced but PREFERENCES should.
ModelTypeSet throttled_types(BOOKMARKS);
mock_server_->set_throttling(true);
mock_server_->SetPartialFailureTypes(throttled_types);
mock_server_->AddUpdateSpecifics(1, 0, "E", 20, 20, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "F", 20, 20, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "G", 20, 20, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "H", 20, 20, false, 0, pref);
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
A.PutIsUnsynced(true);
B.PutIsUnsynced(true);
C.PutIsUnsynced(true);
D.PutIsUnsynced(true);
}
EXPECT_TRUE(SyncShareNudge());
{
// BOOKMARKS throttled.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, true, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(2, false, true, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, true, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 21, 21, ids_, &rtrans);
}
// Unthrottled BOOKMARKS, then BOOKMARKS should get synced now.
mock_server_->set_throttling(false);
mock_server_->AddUpdateSpecifics(1, 0, "E", 30, 30, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "F", 30, 30, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "G", 30, 30, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "H", 30, 30, false, 0, pref);
EXPECT_TRUE(SyncShareNudge());
{
// BOOKMARKS unthrottled.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 30, 30, ids_, &rtrans);
}
}
TEST_F(SyncerTest, GetUpdatesPartialFailure) {
sync_pb::EntitySpecifics bookmark, pref;
bookmark.mutable_bookmark()->set_title("title");
pref.mutable_preference()->set_name("name");
AddDefaultFieldValue(BOOKMARKS, &bookmark);
AddDefaultFieldValue(PREFERENCES, &pref);
// Normal sync, all the data types should get synced.
mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
EXPECT_TRUE(SyncShareNudge());
{
// Initial state. Everything is normal.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
}
// Set BOOKMARKS failure but PREFERENCES not,
// then BOOKMARKS should not get synced but PREFERENCES should.
ModelTypeSet failed_types(BOOKMARKS);
mock_server_->set_partial_failure(true);
mock_server_->SetPartialFailureTypes(failed_types);
mock_server_->AddUpdateSpecifics(1, 0, "E", 20, 20, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "F", 20, 20, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "G", 20, 20, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "H", 20, 20, false, 0, pref);
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
A.PutIsUnsynced(true);
B.PutIsUnsynced(true);
C.PutIsUnsynced(true);
D.PutIsUnsynced(true);
}
EXPECT_TRUE(SyncShareNudge());
{
// BOOKMARKS failed.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, true, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(2, false, true, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, true, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 21, 21, ids_, &rtrans);
}
// Set BOOKMARKS not partial failed, then BOOKMARKS should get synced now.
mock_server_->set_partial_failure(false);
mock_server_->AddUpdateSpecifics(1, 0, "E", 30, 30, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "F", 30, 30, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "G", 30, 30, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "H", 30, 30, false, 0, pref);
EXPECT_TRUE(SyncShareNudge());
{
// BOOKMARKS not failed.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 30, 30, ids_, &rtrans);
}
}
// This test uses internal knowledge of the directory to test correctness of
// GetCommitIds. In almost every other test, the hierarchy is created from
// parent to child order, and so parents always have metahandles that are
// smaller than those of their children. This makes it very difficult to test
// some GetCommitIds edge cases, since it uses metahandle ordering as
// a starting point.
TEST_F(SyncerTest, GetCommitIds_VerifyDeletionCommitOrder) {
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
// Create four bookmarks folders at the root node.
for (int i = 1; i < 5; ++i) {
MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "");
entry.PutId(ids_.FromNumber(i));
entry.PutIsDir(true);
entry.PutBaseVersion(5);
entry.PutServerVersion(5);
entry.PutServerParentId(trans.root_id());
entry.PutServerIsDir(true);
entry.PutIsUnsynced(true);
entry.PutSpecifics(DefaultBookmarkSpecifics());
}
// Now iterate in reverse order make a hierarchy of them.
// While we're at it, also mark them as deleted.
syncable::Id parent_id = trans.root_id();
for (int i = 4; i > 0; --i) {
MutableEntry entry(&trans, GET_BY_ID, ids_.FromNumber(i));
entry.PutParentId(parent_id);
entry.PutServerParentId(parent_id);
entry.PutIsDel(true);
parent_id = ids_.FromNumber(i);
}
}
{
// Run GetCommitIds, the function being tested.
Directory::Metahandles result_handles;
syncable::ReadTransaction trans(FROM_HERE, directory());
GetCommitIdsForType(&trans, BOOKMARKS, 100, &result_handles);
// Now verify the output. We expect four results in child to parent order.
ASSERT_EQ(4U, result_handles.size());
Entry entry0(&trans, GET_BY_HANDLE, result_handles[0]);
EXPECT_EQ(ids_.FromNumber(1), entry0.GetId());
Entry entry1(&trans, GET_BY_HANDLE, result_handles[1]);
EXPECT_EQ(ids_.FromNumber(2), entry1.GetId());
Entry entry2(&trans, GET_BY_HANDLE, result_handles[2]);
EXPECT_EQ(ids_.FromNumber(3), entry2.GetId());
Entry entry3(&trans, GET_BY_HANDLE, result_handles[3]);
EXPECT_EQ(ids_.FromNumber(4), entry3.GetId());
}
}
// Verify that if there are more deleted items than the maximum number of
// entries, child to parent order is still preserved.
TEST_F(SyncerTest, GetCommitIds_VerifyDeletionCommitOrderMaxEntries) {
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
// Create a bookmark tree with one root, two second level, and three third
// level bookmarks, all folders.
for (int i = 1; i <= 6; ++i) {
MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "");
entry.PutId(ids_.FromNumber(i));
entry.PutIsDir(true);
entry.PutBaseVersion(5);
entry.PutServerVersion(5);
entry.PutParentId(ids_.FromNumber(i / 2));
entry.PutServerParentId(ids_.FromNumber(i / 2));
entry.PutServerIsDir(true);
entry.PutIsUnsynced(true);
entry.PutSpecifics(DefaultBookmarkSpecifics());
entry.PutIsDel(true);
}
}
{
// Run GetCommitIds with a limit of 2 entries to commit.
Directory::Metahandles result_handles;
syncable::ReadTransaction trans(FROM_HERE, directory());
GetCommitIdsForType(&trans, BOOKMARKS, 2, &result_handles);
// Now verify the output. We expect two results in child to parent order
// (descending id order).
ASSERT_EQ(2U, result_handles.size());
Entry entry0(&trans, GET_BY_HANDLE, result_handles[0]);
EXPECT_EQ(ids_.FromNumber(6), entry0.GetId());
Entry entry1(&trans, GET_BY_HANDLE, result_handles[1]);
EXPECT_EQ(ids_.FromNumber(5), entry1.GetId());
}
}
TEST_F(SyncerTest, EncryptionAwareConflicts) {
KeyParams key_params = {KeyDerivationParams::CreateForPbkdf2(), "foobar"};
Cryptographer other_cryptographer(&encryptor_);
other_cryptographer.AddKey(key_params);
sync_pb::EntitySpecifics bookmark, encrypted_bookmark, modified_bookmark;
bookmark.mutable_bookmark()->set_title("title");
other_cryptographer.Encrypt(bookmark, encrypted_bookmark.mutable_encrypted());
AddDefaultFieldValue(BOOKMARKS, &encrypted_bookmark);
modified_bookmark.mutable_bookmark()->set_title("title2");
other_cryptographer.Encrypt(modified_bookmark,
modified_bookmark.mutable_encrypted());
sync_pb::EntitySpecifics pref, encrypted_pref, modified_pref;
pref.mutable_preference()->set_name("name");
AddDefaultFieldValue(PREFERENCES, &encrypted_pref);
other_cryptographer.Encrypt(pref, encrypted_pref.mutable_encrypted());
modified_pref.mutable_preference()->set_name("name2");
other_cryptographer.Encrypt(modified_pref, modified_pref.mutable_encrypted());
{
// Mark bookmarks and preferences as encrypted and set the cryptographer to
// have pending keys.
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
sync_pb::EntitySpecifics specifics;
sync_pb::NigoriSpecifics* nigori = specifics.mutable_nigori();
other_cryptographer.GetKeys(nigori->mutable_encryption_keybag());
test_user_share_.encryption_handler()->EnableEncryptEverything();
GetCryptographer(&wtrans)->SetPendingKeys(nigori->encryption_keybag());
EXPECT_TRUE(GetCryptographer(&wtrans)->has_pending_keys());
}
// We need to remember the exact position of our local items, so we can
// make updates that do not modify those positions.
UniquePosition pos1;
UniquePosition pos2;
UniquePosition pos3;
mock_server_->AddUpdateSpecifics(1, 0, "A", 10, 10, true, 0, bookmark,
foreign_cache_guid(), "-1");
mock_server_->AddUpdateSpecifics(2, 1, "B", 10, 10, false, 2, bookmark,
foreign_cache_guid(), "-2");
mock_server_->AddUpdateSpecifics(3, 1, "C", 10, 10, false, 1, bookmark,
foreign_cache_guid(), "-3");
mock_server_->AddUpdateSpecifics(4, 0, "D", 10, 10, false, 0, pref);
EXPECT_TRUE(SyncShareNudge());
{
// Initial state. Everything is normal.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 10, 10, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 10, 10, ids_, &rtrans);
Entry entry1(&rtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(
entry1.GetUniquePosition().Equals(entry1.GetServerUniquePosition()));
pos1 = entry1.GetUniquePosition();
Entry entry2(&rtrans, GET_BY_ID, ids_.FromNumber(2));
pos2 = entry2.GetUniquePosition();
Entry entry3(&rtrans, GET_BY_ID, ids_.FromNumber(3));
pos3 = entry3.GetUniquePosition();
}
// Server side encryption will not be applied due to undecryptable data.
// At this point, BASE_SERVER_SPECIFICS should be filled for all four items.
mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 20, 20, true, 0,
encrypted_bookmark, foreign_cache_guid(),
"-1");
mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 20, 20, false, 2,
encrypted_bookmark, foreign_cache_guid(),
"-2");
mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 20, 20, false, 1,
encrypted_bookmark, foreign_cache_guid(),
"-3");
mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 20, 20, false, 0,
encrypted_pref, foreign_cache_guid(), "-4");
EXPECT_TRUE(SyncShareNudge());
{
// All should be unapplied due to being undecryptable and have a valid
// BASE_SERVER_SPECIFICS.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, true, false, true, 0, 10, 20, ids_, &rtrans);
VERIFY_ENTRY(2, true, false, true, 1, 10, 20, ids_, &rtrans);
VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
VERIFY_ENTRY(4, true, false, true, 0, 10, 20, ids_, &rtrans);
}
// Server side change that don't modify anything should not affect
// BASE_SERVER_SPECIFICS (such as name changes and mtime changes).
mock_server_->AddUpdateSpecifics(1, 0, kEncryptedString, 30, 30, true, 0,
encrypted_bookmark, foreign_cache_guid(),
"-1");
mock_server_->AddUpdateSpecifics(2, 1, kEncryptedString, 30, 30, false, 2,
encrypted_bookmark, foreign_cache_guid(),
"-2");
// Item 3 doesn't change.
mock_server_->AddUpdateSpecifics(4, 0, kEncryptedString, 30, 30, false, 0,
encrypted_pref, foreign_cache_guid(), "-4");
EXPECT_TRUE(SyncShareNudge());
{
// Items 1, 2, and 4 should have newer server versions, 3 remains the same.
// All should remain unapplied due to be undecryptable.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, true, false, true, 0, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(3, true, false, true, 1, 10, 20, ids_, &rtrans);
VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
}
// Positional changes, parent changes, and specifics changes should reset
// BASE_SERVER_SPECIFICS.
// Became unencrypted.
mock_server_->AddUpdateSpecifics(1, 0, "A", 40, 40, true, 0, bookmark,
foreign_cache_guid(), "-1");
// Reordered to after item 2.
mock_server_->AddUpdateSpecifics(3, 1, kEncryptedString, 30, 30, false, 3,
encrypted_bookmark, foreign_cache_guid(),
"-3");
EXPECT_TRUE(SyncShareNudge());
{
// Items 2 and 4 should be the only ones with BASE_SERVER_SPECIFICS set.
// Items 1 is now unencrypted, so should have applied normally.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 40, 40, ids_, &rtrans);
VERIFY_ENTRY(2, true, false, true, 1, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(3, true, false, false, 1, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(4, true, false, true, 0, 10, 30, ids_, &rtrans);
}
// Make local changes, which should remain unsynced for items 2, 3, 4.
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
A.PutSpecifics(modified_bookmark);
A.PutNonUniqueName(kEncryptedString);
A.PutIsUnsynced(true);
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.PutSpecifics(modified_bookmark);
B.PutNonUniqueName(kEncryptedString);
B.PutIsUnsynced(true);
MutableEntry C(&wtrans, GET_BY_ID, ids_.FromNumber(3));
ASSERT_TRUE(C.good());
C.PutSpecifics(modified_bookmark);
C.PutNonUniqueName(kEncryptedString);
C.PutIsUnsynced(true);
MutableEntry D(&wtrans, GET_BY_ID, ids_.FromNumber(4));
ASSERT_TRUE(D.good());
D.PutSpecifics(modified_pref);
D.PutNonUniqueName(kEncryptedString);
D.PutIsUnsynced(true);
}
EXPECT_TRUE(SyncShareNudge());
{
// Item 1 remains unsynced due to there being pending keys.
// Items 2, 3, 4 should remain unsynced since they were not up to date.
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, true, false, 0, 40, 40, ids_, &rtrans);
VERIFY_ENTRY(2, true, true, true, 1, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(3, true, true, false, 1, 10, 30, ids_, &rtrans);
VERIFY_ENTRY(4, true, true, true, 0, 10, 30, ids_, &rtrans);
}
{
syncable::ReadTransaction rtrans(FROM_HERE, directory());
// Resolve the pending keys.
GetCryptographer(&rtrans)->DecryptPendingKeys(key_params);
}
// First cycle resolves conflicts, second cycle commits changes.
EXPECT_TRUE(SyncShareNudge());
EXPECT_EQ(1, GetUpdateCounters(BOOKMARKS).num_server_overwrites);
EXPECT_EQ(1, GetUpdateCounters(PREFERENCES).num_server_overwrites);
EXPECT_EQ(1, GetUpdateCounters(BOOKMARKS).num_local_overwrites);
// We successfully commited item(s).
EXPECT_EQ(2, GetCommitCounters(BOOKMARKS).num_update_commits_attempted);
EXPECT_EQ(2, GetCommitCounters(BOOKMARKS).num_commits_success);
EXPECT_EQ(1, GetCommitCounters(PREFERENCES).num_update_commits_attempted);
EXPECT_EQ(1, GetCommitCounters(PREFERENCES).num_commits_success);
EXPECT_TRUE(SyncShareNudge());
// Everything should be resolved now. The local changes should have
// overwritten the server changes for 2 and 4, while the server changes
// overwrote the local for entry 3.
//
// Expect there will be no new overwrites.
EXPECT_EQ(1, GetUpdateCounters(BOOKMARKS).num_server_overwrites);
EXPECT_EQ(1, GetUpdateCounters(BOOKMARKS).num_local_overwrites);
EXPECT_EQ(2, GetCommitCounters(BOOKMARKS).num_commits_success);
EXPECT_EQ(1, GetCommitCounters(PREFERENCES).num_commits_success);
syncable::ReadTransaction rtrans(FROM_HERE, directory());
VERIFY_ENTRY(1, false, false, false, 0, 41, 41, ids_, &rtrans);
VERIFY_ENTRY(2, false, false, false, 1, 31, 31, ids_, &rtrans);
VERIFY_ENTRY(3, false, false, false, 1, 30, 30, ids_, &rtrans);
VERIFY_ENTRY(4, false, false, false, 0, 31, 31, ids_, &rtrans);
}
#undef VERIFY_ENTRY
TEST_F(SyncerTest, TestGetUnsyncedAndSimpleCommit) {
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutBaseVersion(1);
parent.PutId(parent_id_);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent_id_, "Pete");
ASSERT_TRUE(child.good());
child.PutId(child_id_);
child.PutBaseVersion(1);
WriteTestDataToEntry(&wtrans, &child);
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(2u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
{
syncable::ReadTransaction rt(FROM_HERE, directory());
Entry entry(&rt, GET_BY_ID, child_id_);
ASSERT_TRUE(entry.good());
VerifyTestDataInEntry(&rt, &entry);
}
}
TEST_F(SyncerTest, TestPurgeWhileUnsynced) {
// Similar to above, but throw a purge operation into the mix. Bug 49278.
syncable::Id pref_node_id = TestIdFactory::MakeServer("Tim");
{
directory()->SetDownloadProgress(BOOKMARKS,
syncable::BuildProgress(BOOKMARKS));
directory()->SetDownloadProgress(PREFERENCES,
syncable::BuildProgress(PREFERENCES));
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutBaseVersion(1);
parent.PutId(parent_id_);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent_id_, "Pete");
ASSERT_TRUE(child.good());
child.PutId(child_id_);
child.PutBaseVersion(1);
WriteTestDataToEntry(&wtrans, &child);
MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Tim");
ASSERT_TRUE(parent2.good());
parent2.PutIsUnsynced(true);
parent2.PutIsDir(true);
parent2.PutSpecifics(DefaultPreferencesSpecifics());
parent2.PutBaseVersion(1);
parent2.PutId(pref_node_id);
}
directory()->PurgeEntriesWithTypeIn(ModelTypeSet(PREFERENCES), ModelTypeSet(),
ModelTypeSet());
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(2U, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
{
syncable::ReadTransaction rt(FROM_HERE, directory());
Entry entry(&rt, GET_BY_ID, child_id_);
ASSERT_TRUE(entry.good());
VerifyTestDataInEntry(&rt, &entry);
}
directory()->SaveChanges();
{
syncable::ReadTransaction rt(FROM_HERE, directory());
Entry entry(&rt, GET_BY_ID, pref_node_id);
ASSERT_FALSE(entry.good());
}
}
TEST_F(SyncerTest, TestPurgeWhileUnapplied) {
// Similar to above, but for unapplied items. Bug 49278.
{
directory()->SetDownloadProgress(BOOKMARKS,
syncable::BuildProgress(BOOKMARKS));
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
ASSERT_TRUE(parent.good());
parent.PutIsUnappliedUpdate(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutBaseVersion(1);
parent.PutId(parent_id_);
}
directory()->PurgeEntriesWithTypeIn(ModelTypeSet(BOOKMARKS), ModelTypeSet(),
ModelTypeSet());
EXPECT_TRUE(SyncShareNudge());
directory()->SaveChanges();
{
syncable::ReadTransaction rt(FROM_HERE, directory());
Entry entry(&rt, GET_BY_ID, parent_id_);
ASSERT_FALSE(entry.good());
}
}
TEST_F(SyncerTest, TestPurgeWithJournal) {
{
directory()->SetDownloadProgress(BOOKMARKS,
syncable::BuildProgress(BOOKMARKS));
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, syncable::CREATE, BOOKMARKS, wtrans.root_id(),
"Pete");
ASSERT_TRUE(parent.good());
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutBaseVersion(1);
parent.PutId(parent_id_);
MutableEntry child(&wtrans, syncable::CREATE, BOOKMARKS, parent_id_,
"Pete");
ASSERT_TRUE(child.good());
child.PutId(child_id_);
child.PutBaseVersion(1);
WriteTestDataToEntry(&wtrans, &child);
MutableEntry parent2(&wtrans, syncable::CREATE, PREFERENCES,
wtrans.root_id(), "Tim");
ASSERT_TRUE(parent2.good());
parent2.PutIsDir(true);
parent2.PutSpecifics(DefaultPreferencesSpecifics());
parent2.PutBaseVersion(1);
parent2.PutId(TestIdFactory::MakeServer("Tim"));
}
directory()->PurgeEntriesWithTypeIn(ModelTypeSet(PREFERENCES, BOOKMARKS),
ModelTypeSet(BOOKMARKS), ModelTypeSet());
{
// Verify bookmark nodes are saved in delete journal but not preference
// node.
syncable::ReadTransaction rt(FROM_HERE, directory());
syncable::DeleteJournal* delete_journal = directory()->delete_journal();
EXPECT_EQ(2u, delete_journal->GetDeleteJournalSize(&rt));
syncable::EntryKernelSet journal_entries;
directory()->delete_journal()->GetDeleteJournals(&rt, BOOKMARKS,
&journal_entries);
EXPECT_EQ(parent_id_, (*journal_entries.begin())->ref(syncable::ID));
EXPECT_EQ(child_id_, (*journal_entries.rbegin())->ref(syncable::ID));
}
}
TEST_F(SyncerTest, ResetVersions) {
// Download some pref items.
mock_server_->AddUpdatePref("id1", "", "tag1", 20, 20);
mock_server_->AddUpdatePref("id2", "", "tag2", 30, 30);
mock_server_->AddUpdatePref("id3", "", "tag3", 40, 40);
EXPECT_TRUE(SyncShareNudge());
{
// Modify one of the preferences locally, mark another one as unapplied,
// and create another unsynced preference.
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&wtrans, GET_BY_CLIENT_TAG, "tag1");
entry.PutIsUnsynced(true);
MutableEntry entry2(&wtrans, GET_BY_CLIENT_TAG, "tag2");
entry2.PutIsUnappliedUpdate(true);
MutableEntry entry4(&wtrans, CREATE, PREFERENCES, "name");
entry4.PutUniqueClientTag("tag4");
entry4.PutIsUnsynced(true);
}
{
// Reset the versions.
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
ASSERT_TRUE(directory()->ResetVersionsForType(&wtrans, PREFERENCES));
}
{
// Verify the synced items are all with version 1 now, with
// unsynced/unapplied state preserved.
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_CLIENT_TAG, "tag1");
EXPECT_EQ(1, entry.GetBaseVersion());
EXPECT_EQ(1, entry.GetServerVersion());
EXPECT_TRUE(entry.GetIsUnsynced());
EXPECT_FALSE(entry.GetIsUnappliedUpdate());
Entry entry2(&trans, GET_BY_CLIENT_TAG, "tag2");
EXPECT_EQ(1, entry2.GetBaseVersion());
EXPECT_EQ(1, entry2.GetServerVersion());
EXPECT_FALSE(entry2.GetIsUnsynced());
EXPECT_TRUE(entry2.GetIsUnappliedUpdate());
Entry entry3(&trans, GET_BY_CLIENT_TAG, "tag3");
EXPECT_EQ(1, entry3.GetBaseVersion());
EXPECT_EQ(1, entry3.GetServerVersion());
EXPECT_FALSE(entry3.GetIsUnsynced());
EXPECT_FALSE(entry3.GetIsUnappliedUpdate());
// Entry 4 (the locally created one) should remain the same.
Entry entry4(&trans, GET_BY_CLIENT_TAG, "tag4");
EXPECT_EQ(-1, entry4.GetBaseVersion());
EXPECT_EQ(0, entry4.GetServerVersion());
EXPECT_TRUE(entry4.GetIsUnsynced());
EXPECT_FALSE(entry4.GetIsUnappliedUpdate());
}
}
TEST_F(SyncerTest, TestCommitListOrderingTwoItemsTall) {
CommitOrderingTest items[] = {
{1, ids_.FromNumber(-1001), ids_.FromNumber(-1000)},
{0, ids_.FromNumber(-1000), ids_.FromNumber(0)},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTall) {
CommitOrderingTest items[] = {
{1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
{0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
{2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingFourItemsTall) {
CommitOrderingTest items[] = {
{3, ids_.FromNumber(-2003), ids_.FromNumber(-2002)},
{1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
{0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
{2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingThreeItemsTallLimitedSize) {
context_->set_max_commit_batch_size(2);
CommitOrderingTest items[] = {
{1, ids_.FromNumber(-2001), ids_.FromNumber(-2000)},
{0, ids_.FromNumber(-2000), ids_.FromNumber(0)},
{2, ids_.FromNumber(-2002), ids_.FromNumber(-2001)},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItem) {
CommitOrderingTest items[] = {
{0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingSingleUncommittedDeletedItem) {
CommitOrderingTest items[] = {
{-1, ids_.FromNumber(-1000), ids_.FromNumber(0), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingSingleDeletedItemWithUnroll) {
CommitOrderingTest items[] = {
{0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingSingleLongDeletedItemWithUnroll) {
CommitOrderingTest items[] = {
{0, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingTwoLongDeletedItemWithUnroll) {
CommitOrderingTest items[] = {
{1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
{0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrdering3LongDeletedItemsWithSizeLimit) {
context_->set_max_commit_batch_size(2);
CommitOrderingTest items[] = {
{2, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
{1, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED, OLD_MTIME}},
{0, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingTwoDeletedItemsWithUnroll) {
CommitOrderingTest items[] = {
{1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED}},
{0, ids_.FromNumber(1001), ids_.FromNumber(1000), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingComplexDeletionScenario) {
CommitOrderingTest items[] = {
{2, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
{-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
{1, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
{-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
{0, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest,
TestCommitListOrderingComplexDeletionScenarioWith2RecentDeletes) {
CommitOrderingTest items[] = {
{3, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
{-1, ids_.FromNumber(1001), ids_.FromNumber(0), {SYNCED}},
{2, ids_.FromNumber(1002), ids_.FromNumber(1001), {DELETED, OLD_MTIME}},
{-1, ids_.FromNumber(1003), ids_.FromNumber(1001), {SYNCED}},
{1, ids_.FromNumber(1004), ids_.FromNumber(1003), {DELETED}},
{0, ids_.FromNumber(1005), ids_.FromNumber(1003), {DELETED}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingDeleteMovedItems) {
CommitOrderingTest items[] = {
{1, ids_.FromNumber(1000), ids_.FromNumber(0), {DELETED, OLD_MTIME}},
{0,
ids_.FromNumber(1001),
ids_.FromNumber(1000),
{DELETED, OLD_MTIME, MOVED_FROM_ROOT}},
CommitOrderingTest::MakeLastCommitItem(),
};
RunCommitOrderingTest(items);
}
TEST_F(SyncerTest, TestCommitListOrderingWithNesting) {
const base::Time& now_minus_2h =
base::Time::Now() - base::TimeDelta::FromHours(2);
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
{
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Bob");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(ids_.FromNumber(100));
parent.PutBaseVersion(1);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, ids_.FromNumber(100),
"Bob");
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(ids_.FromNumber(101));
child.PutBaseVersion(1);
MutableEntry grandchild(&wtrans, CREATE, BOOKMARKS, ids_.FromNumber(101),
"Bob");
ASSERT_TRUE(grandchild.good());
grandchild.PutId(ids_.FromNumber(102));
grandchild.PutIsUnsynced(true);
grandchild.PutSpecifics(DefaultBookmarkSpecifics());
grandchild.PutBaseVersion(1);
}
{
// Create three deleted items which deletions we expect to be sent to the
// server.
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "Pete");
ASSERT_TRUE(parent.good());
parent.PutId(ids_.FromNumber(103));
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutIsDel(true);
parent.PutBaseVersion(1);
parent.PutMtime(now_minus_2h);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, ids_.FromNumber(103),
"Pete");
ASSERT_TRUE(child.good());
child.PutId(ids_.FromNumber(104));
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutIsDel(true);
child.PutBaseVersion(1);
child.PutMtime(now_minus_2h);
MutableEntry grandchild(&wtrans, CREATE, BOOKMARKS, ids_.FromNumber(104),
"Pete");
ASSERT_TRUE(grandchild.good());
grandchild.PutId(ids_.FromNumber(105));
grandchild.PutIsUnsynced(true);
grandchild.PutIsDel(true);
grandchild.PutIsDir(false);
grandchild.PutSpecifics(DefaultBookmarkSpecifics());
grandchild.PutBaseVersion(1);
grandchild.PutMtime(now_minus_2h);
}
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// This test will NOT unroll deletes because SERVER_PARENT_ID is not set.
// It will treat these like moves.
vector<syncable::Id> commit_ids(mock_server_->committed_ids());
EXPECT_EQ(ids_.FromNumber(100), commit_ids[0]);
EXPECT_EQ(ids_.FromNumber(101), commit_ids[1]);
EXPECT_EQ(ids_.FromNumber(102), commit_ids[2]);
// We don't guarantee the delete orders in this test, only that they occur
// at the end.
std::sort(commit_ids.begin() + 3, commit_ids.end());
EXPECT_EQ(ids_.FromNumber(103), commit_ids[3]);
EXPECT_EQ(ids_.FromNumber(104), commit_ids[4]);
EXPECT_EQ(ids_.FromNumber(105), commit_ids[5]);
}
TEST_F(SyncerTest, TestCommitListOrderingWithNewItems) {
syncable::Id parent1_id = ids_.MakeServer("p1");
syncable::Id parent2_id = ids_.MakeServer("p2");
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "1");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(parent1_id);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "2");
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(parent2_id);
parent.PutBaseVersion(1);
child.PutBaseVersion(1);
}
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, parent1_id, "A");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(ids_.FromNumber(102));
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent1_id, "B");
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(ids_.FromNumber(-103));
parent.PutBaseVersion(1);
}
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, parent2_id, "A");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(ids_.FromNumber(-104));
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent2_id, "B");
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(ids_.FromNumber(105));
child.PutBaseVersion(1);
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(6u, mock_server_->committed_ids().size());
// This strange iteration and std::count() usage is to allow the order to
// vary. All we really care about is that parent1_id and parent2_id are the
// first two IDs, and that the children make up the next four. Other than
// that, ordering doesn't matter.
auto i = mock_server_->committed_ids().begin();
auto parents_begin = i;
i++;
i++;
auto parents_end = i;
auto children_begin = i;
auto children_end = mock_server_->committed_ids().end();
EXPECT_EQ(1, count(parents_begin, parents_end, parent1_id));
EXPECT_EQ(1, count(parents_begin, parents_end, parent2_id));
EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-103)));
EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(102)));
EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(105)));
EXPECT_EQ(1, count(children_begin, children_end, ids_.FromNumber(-104)));
}
TEST_F(SyncerTest, TestCommitListOrderingCounterexample) {
syncable::Id child2_id = ids_.NewServerId();
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(), "P");
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(parent_id_);
MutableEntry child1(&wtrans, CREATE, BOOKMARKS, parent_id_, "1");
ASSERT_TRUE(child1.good());
child1.PutIsUnsynced(true);
child1.PutId(child_id_);
child1.PutSpecifics(DefaultBookmarkSpecifics());
MutableEntry child2(&wtrans, CREATE, BOOKMARKS, parent_id_, "2");
ASSERT_TRUE(child2.good());
child2.PutIsUnsynced(true);
child2.PutSpecifics(DefaultBookmarkSpecifics());
child2.PutId(child2_id);
parent.PutBaseVersion(1);
child1.PutBaseVersion(1);
child2.PutBaseVersion(1);
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
// There are two possible valid orderings.
if (child2_id == mock_server_->committed_ids()[1]) {
EXPECT_EQ(child2_id, mock_server_->committed_ids()[1]);
EXPECT_EQ(child_id_, mock_server_->committed_ids()[2]);
} else {
EXPECT_EQ(child_id_, mock_server_->committed_ids()[1]);
EXPECT_EQ(child2_id, mock_server_->committed_ids()[2]);
}
}
TEST_F(SyncerTest, TestCommitListOrderingAndNewParent) {
string parent1_name = "1";
string parent2_name = "A";
string child_name = "B";
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(),
parent1_name);
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(parent_id_);
parent.PutBaseVersion(1);
}
syncable::Id parent2_id = ids_.NewLocalId();
syncable::Id child_id = ids_.NewServerId();
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, parent_id_, parent2_name);
ASSERT_TRUE(parent2.good());
parent2.PutIsUnsynced(true);
parent2.PutIsDir(true);
parent2.PutSpecifics(DefaultBookmarkSpecifics());
parent2.PutId(parent2_id);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent2_id, child_name);
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(child_id);
child.PutBaseVersion(1);
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
EXPECT_EQ(parent2_id, mock_server_->committed_ids()[1]);
EXPECT_EQ(child_id, mock_server_->committed_ids()[2]);
{
syncable::ReadTransaction rtrans(FROM_HERE, directory());
// Check that things committed correctly.
Entry entry_1(&rtrans, GET_BY_ID, parent_id_);
EXPECT_EQ(parent1_name, entry_1.GetNonUniqueName());
// Check that parent2 is a subfolder of parent1.
EXPECT_EQ(1, CountEntriesWithName(&rtrans, parent_id_, parent2_name));
// Parent2 was a local ID and thus should have changed on commit!
Entry pre_commit_entry_parent2(&rtrans, GET_BY_ID, parent2_id);
ASSERT_FALSE(pre_commit_entry_parent2.good());
// Look up the new ID.
Id parent2_committed_id =
GetOnlyEntryWithName(&rtrans, parent_id_, parent2_name);
EXPECT_TRUE(parent2_committed_id.ServerKnows());
Entry child(&rtrans, GET_BY_ID, child_id);
EXPECT_EQ(parent2_committed_id, child.GetParentId());
}
}
TEST_F(SyncerTest, TestCommitListOrderingAndNewParentAndChild) {
string parent_name = "1";
string parent2_name = "A";
string child_name = "B";
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent(&wtrans, CREATE, BOOKMARKS, wtrans.root_id(),
parent_name);
ASSERT_TRUE(parent.good());
parent.PutIsUnsynced(true);
parent.PutIsDir(true);
parent.PutSpecifics(DefaultBookmarkSpecifics());
parent.PutId(parent_id_);
parent.PutBaseVersion(1);
}
int64_t meta_handle_b;
const Id parent2_local_id = ids_.NewLocalId();
const Id child_local_id = ids_.NewLocalId();
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry parent2(&wtrans, CREATE, BOOKMARKS, parent_id_, parent2_name);
ASSERT_TRUE(parent2.good());
parent2.PutIsUnsynced(true);
parent2.PutIsDir(true);
parent2.PutSpecifics(DefaultBookmarkSpecifics());
parent2.PutId(parent2_local_id);
MutableEntry child(&wtrans, CREATE, BOOKMARKS, parent2_local_id,
child_name);
ASSERT_TRUE(child.good());
child.PutIsUnsynced(true);
child.PutIsDir(true);
child.PutSpecifics(DefaultBookmarkSpecifics());
child.PutId(child_local_id);
meta_handle_b = child.GetMetahandle();
}
EXPECT_TRUE(SyncShareNudge());
ASSERT_EQ(3u, mock_server_->committed_ids().size());
// If this test starts failing, be aware other sort orders could be valid.
EXPECT_EQ(parent_id_, mock_server_->committed_ids()[0]);
EXPECT_EQ(parent2_local_id, mock_server_->committed_ids()[1]);
EXPECT_EQ(child_local_id, mock_server_->committed_ids()[2]);
{
syncable::ReadTransaction rtrans(FROM_HERE, directory());
Entry parent(&rtrans, GET_BY_ID,
GetOnlyEntryWithName(&rtrans, rtrans.root_id(), parent_name));
ASSERT_TRUE(parent.good());
EXPECT_TRUE(parent.GetId().ServerKnows());
Entry parent2(&rtrans, GET_BY_ID,
GetOnlyEntryWithName(&rtrans, parent.GetId(), parent2_name));
ASSERT_TRUE(parent2.good());
EXPECT_TRUE(parent2.GetId().ServerKnows());
// Id changed on commit, so this should fail.
Entry local_parent2_id_entry(&rtrans, GET_BY_ID, parent2_local_id);
ASSERT_FALSE(local_parent2_id_entry.good());
Entry entry_b(&rtrans, GET_BY_HANDLE, meta_handle_b);
EXPECT_TRUE(entry_b.GetId().ServerKnows());
EXPECT_EQ(parent2.GetId(), entry_b.GetParentId());
}
}
TEST_F(SyncerTest, UpdateWithZeroLengthName) {
// One illegal update
mock_server_->AddUpdateDirectory(1, 0, std::string(), 1, 10,
foreign_cache_guid(), "-1");
// And one legal one that we're going to delete.
mock_server_->AddUpdateDirectory(2, 0, "FOO", 1, 10, foreign_cache_guid(),
"-2");
EXPECT_TRUE(SyncShareNudge());
// Delete the legal one. The new update has a null name.
mock_server_->AddUpdateDirectory(2, 0, std::string(), 2, 20,
foreign_cache_guid(), "-2");
mock_server_->SetLastUpdateDeleted();
EXPECT_TRUE(SyncShareNudge());
}
TEST_F(SyncerTest, TestBasicUpdate) {
string id = "some_id";
string parent_id = "0";
string name = "in_root";
int64_t version = 10;
int64_t timestamp = 10;
mock_server_->AddUpdateDirectory(id, parent_id, name, version, timestamp,
foreign_cache_guid(), "-1");
EXPECT_TRUE(SyncShareNudge());
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
Entry entry(&trans, GET_BY_ID, syncable::Id::CreateFromServerId("some_id"));
ASSERT_TRUE(entry.good());
EXPECT_TRUE(entry.GetIsDir());
EXPECT_EQ(version, entry.GetServerVersion());
EXPECT_EQ(version, entry.GetBaseVersion());
EXPECT_FALSE(entry.GetIsUnappliedUpdate());
EXPECT_FALSE(entry.GetIsUnsynced());
EXPECT_FALSE(entry.GetServerIsDel());
EXPECT_FALSE(entry.GetIsDel());
}
}
TEST_F(SyncerTest, IllegalAndLegalUpdates) {
Id root = TestIdFactory::root();
// Should apply just fine.
mock_server_->AddUpdateDirectory(1, 0, "in_root", 10, 10,
foreign_cache_guid(), "-1");
// Same name. But this SHOULD work.
mock_server_->AddUpdateDirectory(2, 0, "in_root", 10, 10,
foreign_cache_guid(), "-2");
// Unknown parent: should never be applied. "-80" is a legal server ID,
// because any string sent by the server is a legal server ID in the sync
// protocol, but it's not the ID of any item known to the client. This
// update should succeed validation, but be stuck in the unapplied state
// until an item with the server ID "-80" arrives.
mock_server_->AddUpdateDirectory(3, -80, "bad_parent", 10, 10,
foreign_cache_guid(), "-3");
EXPECT_TRUE(SyncShareNudge());
// Id 3 should be in conflict now.
EXPECT_EQ(
1,
GetUpdateCounters(BOOKMARKS).num_hierarchy_conflict_application_failures);
// The only request in that loop should have been a GetUpdate.
// At that point, we didn't know whether or not we had conflicts.
ASSERT_TRUE(mock_server_->last_request().has_get_updates());
VerifyHierarchyConflictsUnspecified(mock_server_->last_request());
// These entries will be used in the second set of updates.
mock_server_->AddUpdateDirectory(4, 0, "newer_version", 20, 10,
foreign_cache_guid(), "-4");
mock_server_->AddUpdateDirectory(5, 0, "circular1", 10, 10,
foreign_cache_guid(), "-5");
mock_server_->AddUpdateDirectory(6, 5, "circular2", 10, 10,
foreign_cache_guid(), "-6");
mock_server_->AddUpdateDirectory(9, 3, "bad_parent_child", 10, 10,
foreign_cache_guid(), "-9");
mock_server_->AddUpdateDirectory(100, 9, "bad_parent_child2", 10, 10,
foreign_cache_guid(), "-100");
mock_server_->AddUpdateDirectory(10, 0, "dir_to_bookmark", 10, 10,
foreign_cache_guid(), "-10");
EXPECT_TRUE(SyncShareNudge());
// The three items with an unresolved parent should be unapplied (3, 9, 100).
// The name clash should also still be in conflict.
EXPECT_EQ(
3,
GetUpdateCounters(BOOKMARKS).num_hierarchy_conflict_application_failures);
// This time around, we knew that there were conflicts.
ASSERT_TRUE(mock_server_->last_request().has_get_updates());
VerifyHierarchyConflictsReported(mock_server_->last_request());
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
// Even though it has the same name, it should work.
Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(name_clash.good());
EXPECT_FALSE(name_clash.GetIsUnappliedUpdate())
<< "Duplicate name SHOULD be OK.";
Entry bad_parent(&trans, GET_BY_ID, ids_.FromNumber(3));
ASSERT_TRUE(bad_parent.good());
EXPECT_TRUE(bad_parent.GetIsUnappliedUpdate())
<< "child of unknown parent should be in conflict";
Entry bad_parent_child(&trans, GET_BY_ID, ids_.FromNumber(9));
ASSERT_TRUE(bad_parent_child.good());
EXPECT_TRUE(bad_parent_child.GetIsUnappliedUpdate())
<< "grandchild of unknown parent should be in conflict";
Entry bad_parent_child2(&trans, GET_BY_ID, ids_.FromNumber(100));
ASSERT_TRUE(bad_parent_child2.good());
EXPECT_TRUE(bad_parent_child2.GetIsUnappliedUpdate())
<< "great-grandchild of unknown parent should be in conflict";
}
// Updating 1 should not affect item 2 of the same name.
mock_server_->AddUpdateDirectory(1, 0, "new_name", 20, 20,
foreign_cache_guid(), "-1");
// Moving 5 under 6 will create a cycle: a conflict.
mock_server_->AddUpdateDirectory(5, 6, "circular3", 20, 20,
foreign_cache_guid(), "-5");
// Flip the is_dir bit: should fail verify & be dropped.
mock_server_->AddUpdateBookmark(10, 0, "dir_to_bookmark", 20, 20,
foreign_cache_guid(), "-10");
EXPECT_TRUE(SyncShareNudge());
// Version number older than last known: should fail verify & be dropped.
mock_server_->AddUpdateDirectory(4, 0, "old_version", 10, 10,
foreign_cache_guid(), "-4");
EXPECT_TRUE(SyncShareNudge());
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry still_a_dir(&trans, GET_BY_ID, ids_.FromNumber(10));
ASSERT_TRUE(still_a_dir.good());
EXPECT_FALSE(still_a_dir.GetIsUnappliedUpdate());
EXPECT_EQ(10u, still_a_dir.GetBaseVersion());
EXPECT_EQ(10u, still_a_dir.GetServerVersion());
EXPECT_TRUE(still_a_dir.GetIsDir());
Entry rename(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(rename.good());
EXPECT_EQ(root, rename.GetParentId());
EXPECT_EQ("new_name", rename.GetNonUniqueName());
EXPECT_FALSE(rename.GetIsUnappliedUpdate());
EXPECT_EQ(ids_.FromNumber(1), rename.GetId());
EXPECT_EQ(20u, rename.GetBaseVersion());
Entry name_clash(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(name_clash.good());
EXPECT_EQ(root, name_clash.GetParentId());
EXPECT_EQ(ids_.FromNumber(2), name_clash.GetId());
EXPECT_EQ(10u, name_clash.GetBaseVersion());
EXPECT_EQ("in_root", name_clash.GetNonUniqueName());
Entry ignored_old_version(&trans, GET_BY_ID, ids_.FromNumber(4));
ASSERT_TRUE(ignored_old_version.good());
EXPECT_EQ("newer_version", ignored_old_version.GetNonUniqueName());
EXPECT_FALSE(ignored_old_version.GetIsUnappliedUpdate());
EXPECT_EQ(20u, ignored_old_version.GetBaseVersion());
Entry circular_parent_issue(&trans, GET_BY_ID, ids_.FromNumber(5));
ASSERT_TRUE(circular_parent_issue.good());
EXPECT_TRUE(circular_parent_issue.GetIsUnappliedUpdate())
<< "circular move should be in conflict";
EXPECT_EQ(root_id_, circular_parent_issue.GetParentId());
EXPECT_EQ(ids_.FromNumber(6), circular_parent_issue.GetServerParentId());
EXPECT_EQ(10u, circular_parent_issue.GetBaseVersion());
Entry circular_parent_target(&trans, GET_BY_ID, ids_.FromNumber(6));
ASSERT_TRUE(circular_parent_target.good());
EXPECT_FALSE(circular_parent_target.GetIsUnappliedUpdate());
EXPECT_EQ(circular_parent_issue.GetId(),
circular_parent_target.GetParentId());
EXPECT_EQ(10u, circular_parent_target.GetBaseVersion());
}
EXPECT_EQ(
4,
GetUpdateCounters(BOOKMARKS).num_hierarchy_conflict_application_failures);
}
// A commit with a lost response produces an update that has to be reunited with
// its parent.
TEST_F(SyncerTest, CommitReuniteUpdateAdjustsChildren) {
// Create a folder in the root.
int64_t metahandle_folder;
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(),
"new_folder");
ASSERT_TRUE(entry.good());
entry.PutIsDir(true);
entry.PutSpecifics(DefaultBookmarkSpecifics());
entry.PutIsUnsynced(true);
metahandle_folder = entry.GetMetahandle();
}
// Verify it and pull the ID out of the folder.
syncable::Id folder_id;
int64_t metahandle_entry;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_HANDLE, metahandle_folder);
ASSERT_TRUE(entry.good());
folder_id = entry.GetId();
ASSERT_TRUE(!folder_id.ServerKnows());
}
// Create an entry in the newly created folder.
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, CREATE, BOOKMARKS, folder_id, "new_entry");
ASSERT_TRUE(entry.good());
metahandle_entry = entry.GetMetahandle();
WriteTestDataToEntry(&trans, &entry);
}
// Verify it and pull the ID out of the entry.
syncable::Id entry_id;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_HANDLE, metahandle_entry);
ASSERT_TRUE(entry.good());
EXPECT_EQ(folder_id, entry.GetParentId());
EXPECT_EQ("new_entry", entry.GetNonUniqueName());
entry_id = entry.GetId();
EXPECT_TRUE(!entry_id.ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
}
// Now, to emulate a commit response failure, we just don't commit it.
int64_t new_version = 150; // any larger value.
int64_t timestamp = 20; // arbitrary value.
syncable::Id new_folder_id =
syncable::Id::CreateFromServerId("folder_server_id");
// The following update should cause the folder to both apply the update, as
// well as reassociate the id.
mock_server_->AddUpdateDirectory(new_folder_id, root_id_, "new_folder",
new_version, timestamp, local_cache_guid(),
folder_id.GetServerId());
// We don't want it accidentally committed, just the update applied.
mock_server_->set_conflict_all_commits(true);
// Alright! Apply that update!
EXPECT_FALSE(SyncShareNudge());
{
// The folder's ID should have been updated.
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry folder(&trans, GET_BY_HANDLE, metahandle_folder);
ASSERT_TRUE(folder.good());
EXPECT_EQ("new_folder", folder.GetNonUniqueName());
EXPECT_EQ(new_version, folder.GetBaseVersion());
EXPECT_EQ(new_folder_id, folder.GetId());
EXPECT_TRUE(folder.GetId().ServerKnows());
EXPECT_EQ(trans.root_id(), folder.GetParentId());
// Since it was updated, the old folder should not exist.
Entry old_dead_folder(&trans, GET_BY_ID, folder_id);
EXPECT_FALSE(old_dead_folder.good());
// The child's parent should have changed.
Entry entry(&trans, GET_BY_HANDLE, metahandle_entry);
ASSERT_TRUE(entry.good());
EXPECT_EQ("new_entry", entry.GetNonUniqueName());
EXPECT_EQ(new_folder_id, entry.GetParentId());
EXPECT_TRUE(!entry.GetId().ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
}
}
// A commit with a lost response produces an update that has to be reunited with
// its parent.
TEST_F(SyncerTest, CommitReuniteUpdate) {
// Create an entry in the root.
int64_t entry_metahandle;
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "new_entry");
ASSERT_TRUE(entry.good());
entry_metahandle = entry.GetMetahandle();
WriteTestDataToEntry(&trans, &entry);
}
// Verify it and pull the ID out.
syncable::Id entry_id;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
entry_id = entry.GetId();
EXPECT_TRUE(!entry_id.ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
}
// Now, to emulate a commit response failure, we just don't commit it.
int64_t new_version = 150; // any larger value.
int64_t timestamp = 20; // arbitrary value.
syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
// Generate an update from the server with a relevant ID reassignment.
mock_server_->AddUpdateBookmark(new_entry_id, root_id_, "new_entry",
new_version, timestamp, local_cache_guid(),
entry_id.GetServerId());
// We don't want it accidentally committed, just the update applied.
mock_server_->set_conflict_all_commits(true);
// Alright! Apply that update!
EXPECT_TRUE(SyncShareNudge());
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
EXPECT_EQ(new_version, entry.GetBaseVersion());
EXPECT_EQ(new_entry_id, entry.GetId());
EXPECT_EQ("new_entry", entry.GetNonUniqueName());
}
}
// A commit with a lost response must work even if the local entry was deleted
// before the update is applied. We should not duplicate the local entry in
// this case, but just create another one alongside. We may wish to examine
// this behavior in the future as it can create hanging uploads that never
// finish, that must be cleaned up on the server side after some time.
TEST_F(SyncerTest, CommitReuniteUpdateDoesNotChokeOnDeletedLocalEntry) {
// Create a entry in the root.
int64_t entry_metahandle;
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
MutableEntry entry(&trans, CREATE, BOOKMARKS, trans.root_id(), "new_entry");
ASSERT_TRUE(entry.good());
entry_metahandle = entry.GetMetahandle();
WriteTestDataToEntry(&trans, &entry);
}
// Verify it and pull the ID out.
syncable::Id entry_id;
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry entry(&trans, GET_BY_HANDLE, entry_metahandle);
ASSERT_TRUE(entry.good());
entry_id = entry.GetId();
EXPECT_TRUE(!entry_id.ServerKnows());
VerifyTestDataInEntry(&trans, &entry);
}
// Now, to emulate a commit response failure, we just don't commit it.
int64_t new_version = 150; // any larger value.
int64_t timestamp = 20; // arbitrary value.
syncable::Id new_entry_id = syncable::Id::CreateFromServerId("server_id");
// Generate an update from the server with a relevant ID reassignment.
mock_server_->AddUpdateBookmark(new_entry_id, root_id_, "new_entry",
new_version, timestamp, local_cache_guid(),
entry_id.GetServerId());
// We don't want it accidentally committed, just the update applied.
mock_server_->set_conflict_all_commits(true);
// Purposefully delete the entry now before the update application finishes.
{
syncable::WriteTransaction trans(FROM_HERE, UNITTEST, directory());
Id new_entry_id =
GetOnlyEntryWithName(&trans, trans.root_id(), "new_entry");
MutableEntry entry(&trans, GET_BY_ID, new_entry_id);
ASSERT_TRUE(entry.good());
entry.PutIsDel(true);
}
// Just don't CHECK fail in sync, have the update split.
EXPECT_TRUE(SyncShareNudge());
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Id new_entry_id =
GetOnlyEntryWithName(&trans, trans.root_id(), "new_entry");
Entry entry(&trans, GET_BY_ID, new_entry_id);
ASSERT_TRUE(entry.good());
EXPECT_FALSE(entry.GetIsDel());
Entry old_entry(&trans, GET_BY_ID, entry_id);
ASSERT_TRUE(old_entry.good());
EXPECT_TRUE(old_entry.GetIsDel());
}
}
// TODO(chron): Add more unsanitized name tests.
TEST_F(SyncerTest, ConflictMatchingEntryHandlesUnsanitizedNames) {
mock_server_->AddUpdateDirectory(1, 0, "A/A", 10, 10, foreign_cache_guid(),
"-1");
mock_server_->AddUpdateDirectory(2, 0, "B/B", 10, 10, foreign_cache_guid(),
"-2");
mock_server_->set_conflict_all_commits(true);
EXPECT_TRUE(SyncShareNudge());
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
A.PutIsUnsynced(true);
A.PutIsUnappliedUpdate(true);
A.PutServerVersion(20);
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.PutIsUnappliedUpdate(true);
B.PutServerVersion(20);
}
EXPECT_TRUE(SyncShareNudge());
mock_server_->set_conflict_all_commits(false);
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
EXPECT_FALSE(A.GetIsUnsynced());
EXPECT_FALSE(A.GetIsUnappliedUpdate());
EXPECT_EQ(20, A.GetServerVersion());
Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
EXPECT_FALSE(B.GetIsUnsynced());
EXPECT_FALSE(B.GetIsUnappliedUpdate());
EXPECT_EQ(20, B.GetServerVersion());
}
}
TEST_F(SyncerTest, ConflictMatchingEntryHandlesNormalNames) {
mock_server_->AddUpdateDirectory(1, 0, "A", 10, 10, foreign_cache_guid(),
"-1");
mock_server_->AddUpdateDirectory(2, 0, "B", 10, 10, foreign_cache_guid(),
"-2");
mock_server_->set_conflict_all_commits(true);
EXPECT_TRUE(SyncShareNudge());
{
syncable::WriteTransaction wtrans(FROM_HERE, UNITTEST, directory());
MutableEntry A(&wtrans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
A.PutIsUnsynced(true);
A.PutIsUnappliedUpdate(true);
A.PutServerVersion(20);
MutableEntry B(&wtrans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
B.PutIsUnappliedUpdate(true);
B.PutServerVersion(20);
}
EXPECT_TRUE(SyncShareNudge());
mock_server_->set_conflict_all_commits(false);
{
syncable::ReadTransaction trans(FROM_HERE, directory());
Entry A(&trans, GET_BY_ID, ids_.FromNumber(1));
ASSERT_TRUE(A.good());
EXPECT_FALSE(A.GetIsUnsynced());
EXPECT_FALSE(A.GetIsUnappliedUpdate());
EXPECT_EQ(20, A.GetServerVersion());
Entry B(&trans, GET_BY_ID, ids_.FromNumber(2));
ASSERT_TRUE(B.good());
EXPECT_FALSE(B.GetIsUnsynced());
EXPECT_FALSE(B.GetIsUnappliedUpdate());
EXPECT_EQ(20, B.GetServerVersion());
}
}
TEST_F(SyncerTest, ReverseFolderOrderingTest) {
mock_server_->AddUpdateDirectory(4, 3, "ggchild", 10, 10,
foreign_cache_guid(), "-4");
mock_server_->AddUpdateDirectory(3, 2, "gchild", 10, 10, foreign_cache_guid(),
"-3");
mock_server_->AddUpdateDirectory(5, 4, "gggchild", 10, 10,
foreign_cache_guid(), "-5");
mock_server_->AddUpdateDirectory(2, 1, "child", 10, 10, foreign_cache_guid(),
"-2");
mock_server_->AddUpdateDirectory(1, 0, "parent", 10, 10, foreign_cache_guid(),
"-1");
EXPECT_TRUE(SyncShareNudge());
syncable::ReadTransaction trans(FROM_HERE, directory());
Id child_id = GetOnlyEntryWithName(&trans, ids_.FromNumber(4), "gggchild");
Entry child(&trans, GET_BY_ID