MojoIpcz: Fix more spurious data pipe trap events

Recently r1107084 landed to address another kind of spurious data pipe
trap event, but it introduced a new problem: a portal may now raise
IPCZ_PORTAL_STATUS_DEAD without also raising
IPCZ_PORTAL_STATUS_PEER_CLOSED. This can confuse data pipe trap arming
logic, such that it may correctly fail to arm due to peer closure
while incorrectly returning an impossible event to indicate that the
pipe must be -- but is not -- readable.

The fix here is to ensure that any time we set IPCZ_PORTAL_STATUS_DEAD,
we also set IPCZ_PORTAL_STATUS_PEER_CLOSED (but not the other way
around.)

A regression test is added which can flakily reproduce the failure
case without this fix applied.

Bug: 1409259
Change-Id: Ic33dbb66084e149530db112be3e9a97aa984675c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4273245
Commit-Queue: Ken Rockot <rockot@google.com>
Reviewed-by: Alex Gough <ajgo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1107545}
NOKEYCHECK=True
GitOrigin-RevId: 5d111b12f803758b21be41bb8ac6655a457e8141
diff --git a/src/ipcz/router.cc b/src/ipcz/router.cc
index cdeb509..ae68187 100644
--- a/src/ipcz/router.cc
+++ b/src/ipcz/router.cc
@@ -306,10 +306,13 @@
       if (!inward_edge_ && !bridge_) {
         is_peer_closed_ = true;
         if (inbound_parcels_.IsSequenceFullyConsumed()) {
-          status_.flags |= IPCZ_PORTAL_STATUS_DEAD;
+          status_.flags |=
+              IPCZ_PORTAL_STATUS_PEER_CLOSED | IPCZ_PORTAL_STATUS_DEAD;
         }
         status_.num_remote_bytes = 0;
         status_.num_remote_parcels = 0;
+        traps_.UpdatePortalStatus(
+            context, status_, TrapSet::UpdateReason::kPeerClosed, dispatcher);
       }
     } else if (link_type.is_peripheral_inward()) {
       if (!outbound_parcels_.SetFinalSequenceLength(sequence_length)) {
@@ -360,10 +363,13 @@
       // Terminal routers may have trap events to fire.
       is_peer_closed_ = true;
       if (inbound_parcels_.IsSequenceFullyConsumed()) {
-        status_.flags |= IPCZ_PORTAL_STATUS_DEAD;
+        status_.flags |=
+            IPCZ_PORTAL_STATUS_PEER_CLOSED | IPCZ_PORTAL_STATUS_DEAD;
       }
       status_.num_remote_parcels = 0;
       status_.num_remote_bytes = 0;
+      traps_.UpdatePortalStatus(context, status_,
+                                TrapSet::UpdateReason::kPeerClosed, dispatcher);
     }
   }
 
@@ -447,7 +453,7 @@
     status_.num_local_parcels = inbound_parcels_.GetNumAvailableElements();
     status_.num_local_bytes = inbound_parcels_.GetTotalAvailableElementSize();
     if (inbound_parcels_.IsSequenceFullyConsumed()) {
-      status_.flags |= IPCZ_PORTAL_STATUS_DEAD;
+      status_.flags |= IPCZ_PORTAL_STATUS_PEER_CLOSED | IPCZ_PORTAL_STATUS_DEAD;
     }
     traps_.UpdatePortalStatus(context, status_,
                               TrapSet::UpdateReason::kLocalParcelConsumed,
@@ -518,7 +524,7 @@
     status_.num_local_parcels = inbound_parcels_.GetNumAvailableElements();
     status_.num_local_bytes = inbound_parcels_.GetTotalAvailableElementSize();
     if (inbound_parcels_.IsSequenceFullyConsumed()) {
-      status_.flags |= IPCZ_PORTAL_STATUS_DEAD;
+      status_.flags |= IPCZ_PORTAL_STATUS_PEER_CLOSED | IPCZ_PORTAL_STATUS_DEAD;
     }
     traps_.UpdatePortalStatus(context, status_,
                               TrapSet::UpdateReason::kLocalParcelConsumed,
@@ -600,7 +606,8 @@
         return nullptr;
       }
       if (router->inbound_parcels_.IsSequenceFullyConsumed()) {
-        router->status_.flags |= IPCZ_PORTAL_STATUS_DEAD;
+        router->status_.flags |=
+            IPCZ_PORTAL_STATUS_PEER_CLOSED | IPCZ_PORTAL_STATUS_DEAD;
       }
     }