Add flag for statistics for reorderings being blocked
diff --git a/a.out b/a.out new file mode 100755 index 0000000..747a5b3 --- /dev/null +++ b/a.out Binary files differ
diff --git a/src/ir/effects.cpp b/src/ir/effects.cpp index 2f9dbdd..770d9f1 100644 --- a/src/ir/effects.cpp +++ b/src/ir/effects.cpp
@@ -17,6 +17,56 @@ #include "ir/effects.h" #include "wasm.h" +#include <iostream> +#include <map> +#include <mutex> +#include <string> + +namespace wasm { + +bool debugReorderingEnabled = getenv("BINARYEN_DEBUG_REORDERING") != nullptr; + +struct BlockingEffectCounter { + std::map<std::string, size_t> counts; + std::mutex mutex; + + BlockingEffectCounter() {} + + ~BlockingEffectCounter() {} + + static void print_counts(); + + void record(const char* reason) { + if (debugReorderingEnabled) { + std::lock_guard<std::mutex> lock(mutex); + counts[reason]++; + } + } +}; + +BlockingEffectCounter globalBlockingEffectCounter; + +void BlockingEffectCounter::print_counts() { + auto& counter = globalBlockingEffectCounter; + std::lock_guard<std::mutex> lock(counter.mutex); + if (debugReorderingEnabled && counter.counts.size() > 0) { + std::cerr << "Blocking Effect Counts:\n"; + for (const auto& pair : counter.counts) { + std::cerr << " " << pair.first << ": " << pair.second << "\n"; + } + std::cerr.flush(); + counter.counts.clear(); // To avoid double-printing + } +} + +void recordBlockingEffect(const char* reason) { + globalBlockingEffectCounter.record(reason); +} + +void printBlockingEffectCounts() { globalBlockingEffectCounter.print_counts(); } + +} // namespace wasm + namespace std { std::ostream& operator<<(std::ostream& o, wasm::EffectAnalyzer& effects) {
diff --git a/src/ir/effects.h b/src/ir/effects.h index af866b9..7f03180 100644 --- a/src/ir/effects.h +++ b/src/ir/effects.h
@@ -333,10 +333,21 @@ // can reorder them even if B traps (even if A has a global effect like a // global.set, since we assume B does not trap in traps-never-happen). bool orderedBefore(const EffectAnalyzer& other) const { + extern bool debugReorderingEnabled; + bool canReorder = true; +#define RECORD_BLOCK(reason) \ + do { \ + if (!debugReorderingEnabled) \ + return true; \ + extern void recordBlockingEffect(const char*); \ + recordBlockingEffect(reason); \ + canReorder = false; \ + } while (0) + // Cannot reorder control flow and side effects. if ((transfersControlFlow() && other.hasSideEffects()) || (other.transfersControlFlow() && hasSideEffects())) { - return true; + RECORD_BLOCK("control flow and side effects"); } // write-write, write-read, and read-write conflicts on possibly aliasing // locations prevent reordering. Calls may access these locations. @@ -356,22 +367,22 @@ ((other.writesArray || other.calls) && accessesArray()) || ((writesSharedArray || calls) && other.accessesSharedArray()) || ((other.writesSharedArray || other.calls) && accessesSharedArray())) { - return true; + RECORD_BLOCK("memory/table/struct/array alias conflict"); } // Cannot reorder anything before dangling pops. if (danglingPop) { - return true; + RECORD_BLOCK("dangling pop"); } // Shared location accesses cannot be reordered after (but may be able to be // reordered before) release stores. if (other.writeOrder >= MemoryOrder::AcqRel && accessesSharedGlobalState()) { - return true; + RECORD_BLOCK("shared access after release store"); } // Shared location accesses cannot be reordered before (but may be able to // be reordered after) acquire loads. if (readOrder >= MemoryOrder::AcqRel && other.accessesSharedGlobalState()) { - return true; + RECORD_BLOCK("shared access before acquire load"); } // No shared location accesses may be reordered in either direction around a // seqcst operation. @@ -381,36 +392,40 @@ ((other.readOrder == MemoryOrder::SeqCst || other.writeOrder == MemoryOrder::SeqCst) && accessesSharedGlobalState())) { - return true; + RECORD_BLOCK("shared access around seqcst"); } // write-write, write-read, and read-write conflicts on a local prevent // reordering. for (auto local : localsWritten) { if (other.localsRead.contains(local) || other.localsWritten.contains(local)) { - return true; + RECORD_BLOCK("local conflict"); + break; } } for (auto local : localsRead) { if (other.localsWritten.contains(local)) { - return true; + RECORD_BLOCK("local conflict"); + break; } } // write-write, write-read, and read-write conflicts on globals prevent // reordering. Unlike for locals, calls can access globals. if ((other.calls && accessesMutableGlobal()) || (calls && other.accessesMutableGlobal())) { - return true; + RECORD_BLOCK("global conflict with call"); } for (auto global : globalsWritten) { if (other.mutableGlobalsRead.contains(global) || other.globalsWritten.contains(global)) { - return true; + RECORD_BLOCK("global conflict"); + break; } } for (auto global : mutableGlobalsRead) { if (other.globalsWritten.contains(global)) { - return true; + RECORD_BLOCK("global conflict"); + break; } } // Note that the above includes disallowing the reordering of a trap with an @@ -418,7 +433,9 @@ // function, so transfersControlFlow would be true) - while we allow the // reordering of traps with each other, we do not reorder exceptions with // anything. - assert(!((trap && other.throws()) || (throws() && other.trap))); + if ((trap && other.throws()) || (throws() && other.trap)) { + RECORD_BLOCK("trap and throws"); + } // We can't reorder an implicit trap in a way that could alter what global // state is modified. However, in trapsNeverHappen mode we assume traps do // not occur in practice, which lets us ignore this, at least in the case @@ -429,10 +446,12 @@ other.transfersControlFlow()) { if ((trap && other.writesGlobalState()) || (other.trap && writesGlobalState())) { - return true; + RECORD_BLOCK("trap and global state modification"); } } - return false; + +#undef RECORD_BLOCK + return !canReorder; } bool orderedAfter(const EffectAnalyzer& other) { @@ -443,7 +462,13 @@ // either direction. // TODO: Update users to check order more precisely and remove this. bool invalidates(const EffectAnalyzer& other) { - return orderedBefore(other) || orderedAfter(other); + extern bool debugReorderingEnabled; + if (!debugReorderingEnabled) { + return orderedBefore(other) || orderedAfter(other); + } + bool before = orderedBefore(other); + bool after = orderedAfter(other); + return before || after; } void mergeIn(const EffectAnalyzer& other) {
diff --git a/src/support/file.cpp b/src/support/file.cpp index 918ab29..dd237ef 100644 --- a/src/support/file.cpp +++ b/src/support/file.cpp
@@ -138,7 +138,8 @@ return infile.tellg(); } -void wasm::flush_and_quick_exit(int code) { +namespace wasm { +void flush_and_quick_exit(int code) { // We expect C++ files to be flushed by their destructors already. Flush the // standard streams manually. std::cout << std::flush; @@ -163,3 +164,5 @@ _Exit(code); #endif } + +} // namespace wasm
diff --git a/src/tools/wasm-opt.cpp b/src/tools/wasm-opt.cpp index 5c2807c..a8a047c 100644 --- a/src/tools/wasm-opt.cpp +++ b/src/tools/wasm-opt.cpp
@@ -73,6 +73,10 @@ // main // +namespace wasm { +extern void printBlockingEffectCounts(); +} + int main(int argc, const char* argv[]) { Name entry; bool emitBinary = true; @@ -477,6 +481,7 @@ } } + wasm::printBlockingEffectCounts(); flush_and_quick_exit(0); return 0; } @@ -502,5 +507,6 @@ } } + wasm::printBlockingEffectCounts(); flush_and_quick_exit(0); }
diff --git a/src/tools/wasm-shell.cpp b/src/tools/wasm-shell.cpp index ca86499..63e13de 100644 --- a/src/tools/wasm-shell.cpp +++ b/src/tools/wasm-shell.cpp
@@ -654,6 +654,10 @@ } }; +namespace wasm { +extern void printBlockingEffectCounts(); +} + int main(int argc, const char* argv[]) { Name entry; std::set<size_t> skipped; @@ -688,5 +692,6 @@ std::cerr << "all checks passed.\n"; Colors::normal(std::cerr); + wasm::printBlockingEffectCounts(); flush_and_quick_exit(0); }
diff --git a/test.wat b/test.wat new file mode 100644 index 0000000..f5a83e1 --- /dev/null +++ b/test.wat
@@ -0,0 +1 @@ +(module (memory 1) (func $main (i32.store (i32.const 0) (i32.const 1)) (i32.store (i32.const 0) (i32.const 2)) (drop (i32.load (i32.const 0)))))