fix a race condition

There was an obvious race condition where both Accept and Close were
twiddling with PipeListener.close.. but there was also a less obvious race
cnodition where if you called Close between createpipe and
connectnamedpipe, the connect would hang forever. These are both now fixed
by making the mutex protect the entire accept/close methods (aside from
waitsingleobject, which will correctly error out if you call close while
it waits).
diff --git a/example_windows_test.go b/example_windows_test.go
index 14595ae..cb3f93b 100644
--- a/example_windows_test.go
+++ b/example_windows_test.go
@@ -3,8 +3,9 @@
 import (
 	"bufio"
 	"fmt"
-	"github.com/natefinch/npipe"
 	"net"
+
+	"gopkg.in/natefinch/npipe.v2"
 )
 
 // Use Dial to connect to a server and read messages from it.
diff --git a/npipe_windows.go b/npipe_windows.go
index 48287e6..5e7cf13 100755
--- a/npipe_windows.go
+++ b/npipe_windows.go
@@ -233,6 +233,7 @@
 	if err != nil {
 		return nil, err
 	}
+
 	return &PipeListener{
 		addr:   PipeAddr(address),
 		handle: handle,
@@ -242,6 +243,8 @@
 // PipeListener is a named pipe listener. Clients should typically
 // use variables of type net.Listener instead of assuming named pipe.
 type PipeListener struct {
+	mu sync.Mutex
+
 	addr   PipeAddr
 	handle syscall.Handle
 	closed bool
@@ -252,8 +255,6 @@
 	// acceptOverlapped is set before waiting on a connection.
 	// If not waiting, it is nil.
 	acceptOverlapped *syscall.Overlapped
-	// acceptMutex protects the handle and overlapped structure.
-	acceptMutex sync.Mutex
 }
 
 // Accept implements the Accept method in the net.Listener interface; it
@@ -274,7 +275,14 @@
 // It might return an error if a client connected and immediately cancelled
 // the connection.
 func (l *PipeListener) AcceptPipe() (*PipeConn, error) {
-	if l == nil || l.addr == "" || l.closed {
+	if l == nil {
+		return nil, syscall.EINVAL
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	if l.addr == "" || l.closed {
 		return nil, syscall.EINVAL
 	}
 
@@ -298,29 +306,33 @@
 		return nil, err
 	}
 	defer syscall.CloseHandle(overlapped.HEvent)
-	if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected {
-		if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING {
-			l.acceptMutex.Lock()
-			l.acceptOverlapped = overlapped
-			l.acceptHandle = handle
-			l.acceptMutex.Unlock()
-			defer func() {
-				l.acceptMutex.Lock()
-				l.acceptOverlapped = nil
-				l.acceptHandle = 0
-				l.acceptMutex.Unlock()
-			}()
+	err = connectNamedPipe(handle, overlapped)
+	if err == nil || err == error_pipe_connected {
+		return &PipeConn{handle: handle, addr: l.addr}, nil
+	}
 
-			_, err = waitForCompletion(handle, overlapped)
-		}
-		if err == syscall.ERROR_OPERATION_ABORTED {
-			// Return error compatible to net.Listener.Accept() in case the
-			// listener was closed.
-			return nil, ErrClosed
-		}
-		if err != nil {
-			return nil, err
-		}
+	if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING {
+		l.acceptOverlapped = overlapped
+		l.acceptHandle = handle
+		// unlock here so close can function correctly while we wait (we'll
+		// get relocked via the defer below, before the original defer
+		// unlock happens.)
+		l.mu.Unlock()
+		defer func() {
+			l.mu.Lock()
+			l.acceptOverlapped = nil
+			l.acceptHandle = 0
+			// unlock is via defer above.
+		}()
+		_, err = waitForCompletion(handle, overlapped)
+	}
+	if err == syscall.ERROR_OPERATION_ABORTED {
+		// Return error compatible to net.Listener.Accept() in case the
+		// listener was closed.
+		return nil, ErrClosed
+	}
+	if err != nil {
+		return nil, err
 	}
 	return &PipeConn{handle: handle, addr: l.addr}, nil
 }
@@ -328,6 +340,9 @@
 // Close stops listening on the address.
 // Already Accepted connections are not closed.
 func (l *PipeListener) Close() error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
 	if l.closed {
 		return nil
 	}
@@ -343,8 +358,6 @@
 		}
 		l.handle = 0
 	}
-	l.acceptMutex.Lock()
-	defer l.acceptMutex.Unlock()
 	if l.acceptOverlapped != nil && l.acceptHandle != 0 {
 		// Cancel the pending IO. This call does not block, so it is safe
 		// to hold onto the mutex above.
diff --git a/npipe_windows_test.go b/npipe_windows_test.go
index 8b3b25a..2a7da25 100755
--- a/npipe_windows_test.go
+++ b/npipe_windows_test.go
@@ -210,8 +210,11 @@
 	if err != nil {
 		t.Fatalf("Listen(%q): %v", address, err)
 	}
-	cancelled := make(chan struct{}, 0)
+
+	cancelled := make(chan struct{})
+	started := make(chan struct{})
 	go func() {
+		close(started)
 		conn, _ := ln.Accept()
 		if conn != nil {
 			t.Fatalf("Unexpected incoming connection: %v", conn)
@@ -219,6 +222,7 @@
 		}
 		cancelled <- struct{}{}
 	}()
+	<-started
 	// Close listener after 20ms. This should give the go routine enough time to be actually
 	// waiting for incoming connections inside ln.Accept().
 	time.AfterFunc(20*time.Millisecond, func() {
@@ -429,17 +433,15 @@
 	if err != nil {
 		t.Fatalf("Error listening on %q: %v", address, err)
 	}
-	defer ln.Close()
-	waitExit := make(chan bool)
+	waitExit := make(chan struct{})
 	defer func() {
 		ln.Close()
 		<-waitExit
 	}()
-	server := rpc.NewServer()
-	service := &RPCService{}
-	server.Register(service)
-	go server.Accept(ln)
+
 	go func() {
+		server := rpc.NewServer()
+		server.Register(&RPCService{})
 		for {
 			conn, err := ln.Accept()
 			if err != nil {
@@ -451,17 +453,16 @@
 			}
 			go server.ServeConn(conn)
 		}
-		waitExit <- true
+		close(waitExit)
 	}()
-	var conn *PipeConn
-	conn, err = Dial(address)
+	conn, err := Dial(address)
 	if err != nil {
 		t.Fatalf("Error dialing %q: %v", address, err)
 	}
 	client := rpc.NewClient(conn)
 	defer client.Close()
 	req := "dummy"
-	resp := ""
+	var resp string
 	if err = client.Call("RPCService.GetResponse", req, &resp); err != nil {
 		t.Fatalf("Error calling RPCService.GetResponse: %v", err)
 	}
@@ -506,34 +507,32 @@
 		convos := 5
 		clients := 10
 
-		done := make(chan bool)
-		quit := make(chan bool)
-
-		go aggregateDones(done, quit, clients)
+		wg := sync.WaitGroup{}
 
 		for x := 0; x < clients; x++ {
-			go startClient(address, done, convos, t)
+			wg.Add(1)
+			go startClient(address, &wg, convos, t)
 		}
 
 		go startServer(ln, convos, t)
 
 		select {
-		case <-quit:
+		case <-wait(&wg):
+		// good!
 		case <-time.After(time.Second):
-			t.Fatal("Failed to receive quit message after a reasonable timeout")
+			t.Fatal("Failed to finish after a reasonable timeout")
 		}
 	}
 }
 
-// aggregateDones simply aggregates messages from the done channel
-// until it sees total, and then sends a message on the quit channel
-func aggregateDones(done, quit chan bool, total int) {
-	dones := 0
-	for dones < total {
-		<-done
-		dones++
-	}
-	quit <- true
+// wait simply waits on the waitgroup and closes the returned channel when done.
+func wait(wg *sync.WaitGroup) <-chan struct{} {
+	done := make(chan struct{})
+	go func() {
+		wg.Wait()
+		close(done)
+	}()
+	return done
 }
 
 // startServer accepts connections and spawns goroutines to handle them
@@ -575,15 +574,17 @@
 // startClient waits on a pipe at the given address. It expects to write a message and then
 // read a message from the pipe, convos times, and then sends a message on the done
 // channel
-func startClient(address string, done chan bool, convos int, t *testing.T) {
+func startClient(address string, wg *sync.WaitGroup, convos int, t *testing.T) {
+	defer wg.Done()
 	c := make(chan *PipeConn)
 	go asyncdial(address, c, t)
 
 	var conn *PipeConn
 	select {
 	case conn = <-c:
-	case <-time.After(250 * time.Millisecond):
-		t.Fatal("Client timed out waiting for dial to resolve")
+	case <-time.After(time.Second):
+		// Yes this is a long timeout, but sometimes it really does take a long time.
+		t.Fatalf("Client timed out waiting for dial to resolve")
 	}
 	r := bufio.NewReader(conn)
 	for x := 0; x < convos; x++ {
@@ -603,7 +604,6 @@
 	if err := conn.Close(); err != nil {
 		t.Fatalf("Error closing client side of pipe %v", err)
 	}
-	done <- true
 }
 
 // asyncdial is a helper that dials and returns the connection on the given channel.