Revert "Delay erasing proxy ports when peer died"
This reverts commit 0b1a94ed9435dcfacb28dd0e0857bdb23909a692.
Reason for revert: Causing browser test flakiness.
Original change's description:
> Delay erasing proxy ports when peer died
>
> Fixed: chromium:1259596
>
> Change-Id: I3e0a4338529f23e974b796aefc2c362b7b8cacbf
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3372822
> Reviewed-by: Ken Rockot <rockot@google.com>
> Commit-Queue: Stephen Röttger <sroettger@google.com>
> Cr-Commit-Position: refs/heads/main@{#958502}
Fixed: 1287587
Change-Id: I2cbe379a537a61c03be6c1b24e007a56fc5d28c7
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3389815
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Owners-Override: Scott Violet <sky@chromium.org>
Reviewed-by: Ken Rockot <rockot@google.com>
Commit-Queue: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/main@{#959434}
diff --git a/mojo/core/ports/node.cc b/mojo/core/ports/node.cc
index 3f94bce..c5bb76e 100644
--- a/mojo/core/ports/node.cc
+++ b/mojo/core/ports/node.cc
@@ -914,7 +914,6 @@
bool notify_delegate = false;
NodeName peer_node_name;
bool try_remove_proxy = false;
- bool erase_port = false;
{
SinglePortLocker locker(&port_ref);
auto* port = locker.port();
@@ -947,10 +946,6 @@
// read from the other end.
port->last_sequence_num_acknowledged =
port->next_sequence_num_to_send - 1;
- } else if (port->state == Port::kClosed) {
- // This is the ack for a closed proxy port notification. Now it's fine to
- // delete the port.
- erase_port = true;
} else {
// We haven't yet reached the receiving peer of the closed port, so we'll
// forward the message along as-is.
@@ -979,9 +974,6 @@
if (try_remove_proxy)
TryRemoveProxy(port_ref);
- if (erase_port)
- ErasePort(port_ref.name());
-
if (event)
delegate_->ForwardEvent(peer_node_name, std::move(event));
@@ -1834,9 +1826,6 @@
std::vector<PortName> dead_proxies_to_broadcast;
std::vector<std::unique_ptr<UserMessageEvent>> undelivered_messages;
- ScopedEvent closure_event;
- NodeName closure_event_target_node;
-
{
PortLocker::AssertNoPortsLockedOnCurrentThread();
base::AutoLock ports_lock(ports_lock_);
@@ -1873,16 +1862,6 @@
SinglePortLocker locker(&local_port_ref);
auto* port = locker.port();
- if (port_name != kInvalidPortName) {
- // If this is a targeted observe dead proxy event, send out an
- // ObserveClosure to acknowledge it.
- closure_event_target_node = port->peer_node_name;
- closure_event = std::make_unique<ObserveClosureEvent>(
- port->peer_port_name, local_port_ref.name(),
- port->next_control_sequence_num_to_send++,
- port->last_sequence_num_to_receive);
- }
-
if (!port->peer_closed) {
// Treat this as immediate peer closure. It's an exceptional
// condition akin to a broken pipe, so we don't care about losing
@@ -1901,7 +1880,6 @@
// broadcast our own death so it can be back-propagated. This is
// inefficient but rare.
if (port->state != Port::kReceiving) {
- port->state = Port::kClosed;
dead_proxies_to_broadcast.push_back(local_port_ref.name());
std::vector<std::unique_ptr<UserMessageEvent>> messages;
port->message_queue.TakeAllMessages(&messages);
@@ -1913,17 +1891,10 @@
}
}
-#ifdef MOJO_BACKWARDS_COMPAT
for (const auto& proxy_name : dead_proxies_to_broadcast) {
ErasePort(proxy_name);
DVLOG(2) << "Forcibly deleted port " << proxy_name << "@" << name_;
}
-#endif
-
- if (closure_event) {
- delegate_->ForwardEvent(closure_event_target_node,
- std::move(closure_event));
- }
// Wake up any receiving ports who have just observed simulated peer closure.
for (const auto& port : ports_to_notify)