verity: change maybe_read_entries to maybe_read_entry

Since root is guaranteed to be a single block we no longer need to
read multiple entries in maybe_read_entries.

BUG=9752
TEST=Ran dm-verity.git unit tests. Ran platform_DMVerityCorruption on H/W.

kernel-next.git Review URL: http://codereview.chromium.org/6821076

TBRing. Already LGTMed and committed to kernel-next.git

Change-Id: I23e19d13e7f3b079a5fe739be7fdd9f2df760335

R=wad@chromium.org,taysom@chromium.org

Review URL: http://codereview.chromium.org/6880133
diff --git a/dm-bht.c b/dm-bht.c
index 72af2a7..1b53ffc 100644
--- a/dm-bht.c
+++ b/dm-bht.c
@@ -305,6 +305,9 @@
 	if (status)
 		goto bad_entries_alloc;
 
+	/* We compute depth such that there is only be 1 block at level 0. */
+	BUG_ON(bht->levels[0].count != 1);
+
 	return 0;
 
 bad_entries_alloc:
@@ -461,100 +464,68 @@
 }
 EXPORT_SYMBOL(dm_bht_write_completed);
 
-
-/* dm_bht_maybe_read_entries
- * Attempts to atomically acquire each entry, allocated any needed
- * memory, and issues I/O callbacks to load the hashes from disk.
- * Returns 0 if all entries are loaded and verified.  On error, the
- * return value is negative. When positive, it is the state values
- * ORd.
+/* dm_bht_maybe_read_entry
+ * Attempts to atomically acquire an entry, allocate any needed
+ * memory, and issues the I/O callback to load the hash from disk.
+ * Return value is negative on error. When positive, it is the state
+ * value.
  */
-static int dm_bht_maybe_read_entries(struct dm_bht *bht, void *ctx,
-				     unsigned int depth, unsigned int index,
-				     unsigned int count, bool until_exist)
+static int dm_bht_maybe_read_entry(struct dm_bht *bht, void *ctx,
+				   unsigned int depth, unsigned int index)
 {
-	struct dm_bht_level *level;
-	struct dm_bht_entry *entry, *last_entry;
-	sector_t current_sector;
-	int state = 0;
-	int status = 0;
-	struct page *node_page = NULL;
+	struct dm_bht_level *level = &bht->levels[depth];
+	struct dm_bht_entry *entry = &level->entries[index];
+	sector_t current_sector = level->sector + to_sector(index * PAGE_SIZE);
+	struct page *node_page;
+	int state;
+
 	BUG_ON(depth >= bht->depth);
 
-	level = &bht->levels[depth];
-	if (count > level->count - index) {
-		DMERR("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
-		      "index+count exceeds available entries %u",
-			depth, index, count, level->count);
-		return -EINVAL;
-	}
 	/* XXX: hardcoding PAGE_SIZE means that a perfectly valid image
 	 *      on one system may not work on a different kernel.
 	 * TODO(wad) abstract PAGE_SIZE with a bht->entry_size or
 	 *           at least a define and ensure bht->entry_size is
 	 *           sector aligned at least.
 	 */
-	current_sector = level->sector + to_sector(index * PAGE_SIZE);
-	for (entry = &level->entries[index], last_entry = entry + count;
-	     entry < last_entry;
-	     ++entry, current_sector += to_sector(PAGE_SIZE)) {
-		/* If the entry's state is UNALLOCATED, then we'll claim it
-		 * for allocation and loading.
-		 */
-		state = atomic_cmpxchg(&entry->state,
-				       DM_BHT_ENTRY_UNALLOCATED,
-				       DM_BHT_ENTRY_PENDING);
-		DMDEBUG("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
-			"ei=%lu, state=%d",
-			depth, index, count,
-			(unsigned long)(entry - level->entries), state);
-		if (state <= DM_BHT_ENTRY_ERROR) {
-			DMCRIT("entry %u is in an error state", index);
-			return state;
-		}
 
-		/* Currently, the verified state is unused. */
-		if (state == DM_BHT_ENTRY_VERIFIED) {
-			if (until_exist)
-				return 0;
-			/* Makes 0 == verified. Is that ideal? */
-			continue;
-		}
+	/* If the entry's state is UNALLOCATED, then we'll claim it
+	 * for allocation and loading.
+	 */
+	state = atomic_cmpxchg(&entry->state,
+			       DM_BHT_ENTRY_UNALLOCATED,
+			       DM_BHT_ENTRY_PENDING);
+	DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): ei=%lu, state=%d",
+		depth, index, (unsigned long)(entry - level->entries), state);
 
-		if (state != DM_BHT_ENTRY_UNALLOCATED) {
-			/* PENDING, READY, ... */
-			if (until_exist)
-				return state;
-			status |= state;
-			continue;
-		}
-		/* Current entry is claimed for allocation and loading */
-		node_page = (struct page *) mempool_alloc(bht->entry_pool,
-							  GFP_NOIO);
-		if (!node_page) {
-			DMCRIT("failed to allocate memory for "
-			       "entry->nodes from pool");
-			return -ENOMEM;
-		}
-		/* dm-bht guarantees page-aligned memory for callbacks. */
-		entry->nodes = page_address(node_page);
-		/* Let the caller know that not all the data is yet available */
-		status |= DM_BHT_ENTRY_REQUESTED;
-		/* Issue the read callback */
-		/* TODO(wad) error check callback here too */
-		DMDEBUG("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
-			"reading %lu",
-			depth, index, count,
-			(unsigned long)(entry - level->entries));
-		bht->read_cb(ctx,   /* external context */
-			     current_sector,  /* starting sector */
-			     entry->nodes,  /* destination */
-			     to_sector(PAGE_SIZE),
-			     entry);  /* io context */
+	if (state != DM_BHT_ENTRY_UNALLOCATED)
+		goto out;
 
-	}
-	/* Should only be 0 if all entries were verified and not just ready */
-	return status;
+	state = DM_BHT_ENTRY_REQUESTED;
+
+	/* Current entry is claimed for allocation and loading */
+	node_page = (struct page *) mempool_alloc(bht->entry_pool, GFP_NOIO);
+	if (!node_page)
+		goto nomem;
+	/* dm-bht guarantees page-aligned memory for callbacks. */
+	entry->nodes = page_address(node_page);
+
+	/* TODO(wad) error check callback here too */
+	DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): reading %lu",
+		depth, index, (unsigned long)(entry - level->entries));
+	bht->read_cb(ctx, current_sector, entry->nodes,
+		     to_sector(PAGE_SIZE), entry);
+
+out:
+	if (state <= DM_BHT_ENTRY_ERROR)
+		DMCRIT("entry %u is in an error state", index);
+
+	return state;
+
+nomem:
+	DMCRIT("failed to allocate memory for entry->nodes from pool");
+	return -ENOMEM;
+
+
 }
 
 static int dm_bht_compare_hash(struct dm_bht *bht, u8 *known, u8 *computed)
@@ -824,15 +795,15 @@
 		struct dm_bht_entry *child = child_level->entries;
 		unsigned int i, j;
 
-		r = dm_bht_maybe_read_entries(bht, read_cb_ctx, depth,
-					      0, level->count, true);
-		if (r < 0) {
-			DMCRIT("an error occurred while reading entry");
-			goto out;
-		}
-
 		for (i = 0; i < level->count; i++, entry++) {
 			unsigned int count = bht->node_count;
+
+			r = dm_bht_maybe_read_entry(bht, read_cb_ctx, depth, i);
+			if (r < 0) {
+				DMCRIT("an error occurred while reading entry");
+				goto out;
+			}
+
 			if (i == (level->count - 1))
 				count = child_level->count % bht->node_count;
 			if (count == 0)
@@ -968,9 +939,7 @@
 	if (root_state != DM_BHT_ENTRY_VERIFIED) {
 		DMDEBUG("root data is not yet loaded");
 		/* If positive, it means some are pending. */
-		populated = dm_bht_maybe_read_entries(bht, read_cb_ctx, 0, 0,
-						      bht->levels[0].count,
-						      true);
+		populated = dm_bht_maybe_read_entry(bht, read_cb_ctx, 0, 0);
 		if (populated < 0) {
 			DMCRIT("an error occurred while reading level[0]");
 			/* TODO(wad) define std error codes */
@@ -988,9 +957,8 @@
 		/* Except for the root node case, we should only ever need
 		 * to load one entry along the path.
 		 */
-		read_status = dm_bht_maybe_read_entries(bht, read_cb_ctx,
-							depth, entry_index,
-							1, false);
+		read_status = dm_bht_maybe_read_entry(bht, read_cb_ctx,
+						      depth, entry_index);
 		if (unlikely(read_status < 0)) {
 			DMCRIT("failure occurred reading entry %u depth %u",
 			       entry_index, depth);