Implement shared locking.

Implement shared locking semantics for Windows and POSIX targets. Add
tests for shared locks and shared/exclusive lock interaction.

Update README and testing documentation.
diff --git a/.travis.yml b/.travis.yml
index 7908de8..0208b04 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,6 +10,7 @@
   - 1.5
   - 1.6
   - 1.7
+  - 1.8
   - tip
 
 before_install:
diff --git a/README.md b/README.md
index 89477e0..f98fe4b 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,15 @@
 
 [![GoDoc](https://godoc.org/github.com/danjacques/gofslock?status.svg)](http://godoc.org/github.com/danjacques/gofslock)
 [![Build Status](https://travis-ci.org/danjacques/gofslock.svg?branch=master)](https://travis-ci.org/danjacques/gofslock)
+[![Coverage Status](https://coveralls.io/repos/github/danjacques/gofslock/badge.svg?branch=master)](https://coveralls.io/github/danjacques/gofslock?branch=master)
+
+`gofslock` offers several features:
+* Exclusive and shared locking senamtics.
+* Consistent intra- and inter-processing locking behavior across major operating
+  systems (notably Linux, Mac, and Windows).
+* Works on all Go versions.
+* Only depends on Go standard library.
+* Locking behavior and interaction is heavily tested.
 
 Feedback
 --------
@@ -14,7 +23,7 @@
 -------------
 
 Contributions to this project are welcome, though please
-[file an issue](https://github.com/danjacques/gofslock/issues/new).
+[file an issue](https://github.com/danjacques/gofslock/issues/new)
 before starting work on anything major.
 
 To get started contributing to this project,
diff --git a/fslock/interface.go b/fslock/interface.go
index 0e5eb5b..7231882 100644
--- a/fslock/interface.go
+++ b/fslock/interface.go
@@ -8,6 +8,15 @@
 // implementation offers its own nuances, and not all of those nuances are
 // addressed by this package.
 //
+// Locks can either be exclusive (default) or shared. For a given file, exactly
+// one of the following circumstances may be true at a given moment:
+//	- No locks are held.
+//	- A single exclusive lock may be held.
+//	- Multiple shared locks may be held.
+//
+// Notably, an exclusive lock may not be held on a file that has shared locks,
+// and a shared lock may not be held on a file that has an exclusive lock.
+//
 // fslock will work as long as you don't do anything particularly weird, such
 // as:
 //
@@ -28,15 +37,16 @@
 // Lock is a convenience method for L's Lock.
 func Lock(path string) (Handle, error) { return LockBlocking(path, nil) }
 
-// LockBlocking acquires a filesystem lock for the given path. If the lock is
-// already held, LockBlocking will repeatedly attempt to acquire it using
-// the supplied Blocker in between attempts.
+// LockBlocking acquires an exclusive filesystem lock for the given path. If the
+// lock is already held, LockBlocking will repeatedly attempt to acquire it
+// using the supplied Blocker in between attempts.
 //
-// If the lock could not be acquired because it is held by another entity,
-// ErrLockHeld will be returned. If an error is encountered while locking,
-// or an error is returned by b, that error will be returned.
+// If no Blocker is provided and the lock could not be acquired because it is
+// held by another entity, ErrLockHeld will be returned. If an error is
+// encountered while locking, or an error is returned by b, that error will be
+// returned.
 //
-// Lock is a convenience method for L's Lock.
+// LockBlocking is a convenience method for L's Lock.
 func LockBlocking(path string, b Blocker) (Handle, error) {
 	l := L{
 		Path:  path,
@@ -63,3 +73,51 @@
 	}
 	return l.With(fn)
 }
+
+// Lock acquires a filesystem lock for the given path.
+//
+// If the lock could not be acquired because it is held by another entity,
+// ErrLockHeld will be returned. If an error is encountered while locking,
+// that error will be returned.
+//
+// Lock is a convenience method for L's Lock.
+func LockShared(path string) (Handle, error) { return LockSharedBlocking(path, nil) }
+
+// LockSharedBlocking acquires a shared filesystem lock for the given path. If
+// the lock is already held, LockSharedBlocking will repeatedly attempt to
+// acquire it using the supplied Blocker in between attempts.
+//
+// If no Blocker is provided and the lock could not be acquired because it is
+// held by another entity, ErrLockHeld will be returned. If an error is
+// encountered while locking, or an error is returned by b, that error will be
+// returned.
+//
+// LockSharedBlocking is a convenience method for L's Lock.
+func LockSharedBlocking(path string, b Blocker) (Handle, error) {
+	l := L{
+		Path:   path,
+		Shared: true,
+		Block:  b,
+	}
+	return l.Lock()
+}
+
+// WithShared is a convenience function to create a lock, execute a function while
+// holding that lock, and then release the lock on completion.
+//
+// See L's With method for details.
+func WithShared(path string, fn func() error) error { return WithSharedBlocking(path, nil, fn) }
+
+// WithSharedBlocking is a convenience function to create a lock, execute a
+// function while holding that lock, and then release the lock on completion.
+// The supplied block function is used to retry (see L's Block field).
+//
+// See L's With method for details.
+func WithSharedBlocking(path string, b Blocker, fn func() error) error {
+	l := L{
+		Path:   path,
+		Shared: true,
+		Block:  b,
+	}
+	return l.With(fn)
+}
diff --git a/fslock/lock.go b/fslock/lock.go
index 0b96a7c..b63366a 100644
--- a/fslock/lock.go
+++ b/fslock/lock.go
@@ -36,6 +36,12 @@
 	// Path is the path of the file to lock.
 	Path string
 
+	// Shared, if true, indicates that this should be a shared lock rather than
+	// an exclusive lock.
+	//
+	// See package documentation for details.
+	Shared bool
+
 	// Content, if populated, is the lock file content. Content is written to the
 	// file when the lock call creates it, and only if the lock call actually
 	// creates the file. Failure to write Content is non-fatal.
diff --git a/fslock/lock_posix.go b/fslock/lock_posix.go
index 45fb363..b01961f 100644
--- a/fslock/lock_posix.go
+++ b/fslock/lock_posix.go
@@ -26,13 +26,25 @@
 	return globalPosixLockState.lockImpl(l)
 }
 
+// posixLockEntry is an entry in a posixLockState represneting a single inode.
+type posixLockEntry struct {
+	// file is the underlying open file descriptor.
+	file *os.File
+
+	// isShared is true if this is a shared lock entry, false otherwise.
+	shared bool
+	// sharedCount, if "shared" is true, is the number of in-process open shared
+	// handles.
+	sharedCount uint64
+}
+
 // posixLockState maintains an internal state of filesystem locks.
 //
 // For runtime usage, this is maintained in the global variable,
 // globalPosixLockState.
 type posixLockState struct {
 	sync.RWMutex
-	held map[uint64]*os.File
+	held map[uint64]*posixLockEntry
 }
 
 func (pls *posixLockState) lockImpl(l *L) (Handle, error) {
@@ -55,12 +67,15 @@
 
 	// Do we already have a lock on this file?
 	pls.RLock()
-	has := pls.held[stat.Ino]
+	ple := pls.held[stat.Ino]
 	pls.RUnlock()
 
-	if has != nil {
-		// Some other code path within our process already holds the lock.
-		return nil, ErrLockHeld
+	if ple != nil {
+		// If we are requesting an exclusive lock, or if "ple" is held exclusively,
+		// then deny the request.
+		if !(l.Shared && ple.shared) {
+			return nil, ErrLockHeld
+		}
 	}
 
 	// Attempt to register the lock.
@@ -68,15 +83,29 @@
 	defer pls.Unlock()
 
 	// Check again, with write lock held.
-	if has := pls.held[stat.Ino]; has != nil {
-		return nil, ErrLockHeld
+	if ple := pls.held[stat.Ino]; ple != nil {
+		if !(l.Shared && ple.shared) {
+			return nil, ErrLockHeld
+		}
+
+		// We're requesting a shared lock, and "ple" is shared, so we can grant a
+		// handle.
+		ple.sharedCount++
+		return &posixLockHandle{pls, stat.Ino, true}, nil
 	}
 
 	// Use "flock()" to get a lock on the file.
 	//
 	// LOCK_EX: Exclusive lock
 	// LOCK_NB: Non-blocking.
-	if err := syscall.Flock(int(fd.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+	flags := syscall.LOCK_NB
+	if l.Shared {
+		flags |= syscall.LOCK_SH
+	} else {
+		flags |= syscall.LOCK_EX
+	}
+
+	if err := syscall.Flock(int(fd.Fd()), flags); err != nil {
 		if errno, ok := err.(syscall.Errno); ok {
 			switch errno {
 			case syscall.EWOULDBLOCK:
@@ -90,16 +119,23 @@
 	}
 
 	if pls.held == nil {
-		pls.held = make(map[uint64]*os.File)
+		pls.held = make(map[uint64]*posixLockEntry)
 	}
-	pls.held[stat.Ino] = fd
+
+	ple = &posixLockEntry{
+		file:        fd,
+		shared:      l.Shared,
+		sharedCount: 1, // Ignored for exclusive.
+	}
+	pls.held[stat.Ino] = ple
 	fd = nil // Don't Close in defer().
-	return &posixLockHandle{pls, stat.Ino}, nil
+	return &posixLockHandle{pls, stat.Ino, ple.shared}, nil
 }
 
 type posixLockHandle struct {
-	pls *posixLockState
-	ino uint64
+	pls    *posixLockState
+	ino    uint64
+	shared bool
 }
 
 func (l *posixLockHandle) Unlock() error {
@@ -110,14 +146,27 @@
 	l.pls.Lock()
 	defer l.pls.Unlock()
 
-	fd := l.pls.held[l.ino]
-	if fd == nil {
+	ple := l.pls.held[l.ino]
+	if ple == nil {
 		panic(fmt.Errorf("lock for inode %d is not held", l.ino))
 	}
-	if err := fd.Close(); err != nil {
-		return err
+	if l.shared {
+		if !ple.shared {
+			panic(fmt.Errorf("lock for inode %d is not shared, but handle is shared", l.ino))
+		}
+		ple.sharedCount--
 	}
-	delete(l.pls.held, l.ino)
+
+	if !ple.shared || ple.sharedCount == 0 {
+		// Last holder of the lock. Clean it up and unregister.
+		if err := ple.file.Close(); err != nil {
+			return err
+		}
+		delete(l.pls.held, l.ino)
+	}
+
+	// Clear the lock's "pls" field so that future calls to Unlock will fail
+	// immediately.
 	l.pls = nil
 	return nil
 }
diff --git a/fslock/lock_test.go b/fslock/lock_test.go
index c48d925..c5095a9 100644
--- a/fslock/lock_test.go
+++ b/fslock/lock_test.go
@@ -6,18 +6,31 @@
 
 import (
 	"bytes"
+	"flag"
 	"fmt"
 	"io"
 	"io/ioutil"
+	"log"
 	"os"
 	"os/exec"
 	"path/filepath"
 	"strconv"
 	"strings"
+	"sync/atomic"
 	"testing"
 	"time"
 )
 
+var logSubprocess = flag.Bool("test.logsubprocess", false, "Enable verbose subprocess logging.")
+
+const logSubprocessEnv = "_FSLOCK_LOG_SUBPROCESS"
+
+func logf(fmt string, args ...interface{}) {
+	if v := os.Getenv(logSubprocessEnv); v != "" {
+		log.Printf(fmt, args...)
+	}
+}
+
 func withTempDir(t *testing.T, prefix string, fn func(string)) {
 	wd, err := os.Getwd()
 	if err != nil {
@@ -114,9 +127,9 @@
 //
 // Success is if all of the subprocesses succeeded and the output file has the
 // correct value.
+//
+// NOTE: We don't run this test in parallel because it can tax CI resources.
 func TestMultiProcessing(t *testing.T) {
-	t.Parallel()
-
 	getFiles := func(tdir string) (lock, out string) {
 		lock = filepath.Join(tdir, "lock")
 		out = filepath.Join(tdir, "out")
@@ -141,6 +154,9 @@
 		return
 	}
 
+	// TODO: Replace with os.Executable for Go 1.8.
+	executable := os.Args[0]
+
 	// This pipe will be used to signal that the processes should start the test.
 	signalR, signalW, err := os.Pipe()
 	if err != nil {
@@ -164,23 +180,27 @@
 		}
 		t.Logf("wrote initial output file to [%s]", out)
 
-		// TODO: Replace with os.Executable for Go 1.8.
-		executable := os.Args[0]
-
 		const count = 256
 		cmds := make([]*exec.Cmd, count)
 
 		// Kill all of our processes on cleanup, regardless of success/failure.
 		defer func() {
 			for _, cmd := range cmds {
-				_ = cmd.Process.Kill()
-				_ = cmd.Wait()
+				if cmd != nil {
+					_ = cmd.Process.Kill()
+					_ = cmd.Wait()
+				}
 			}
 		}()
 
 		for i := range cmds {
+			env := append(os.Environ(), fmt.Sprintf("%s=%s", envSentinel, tdir))
+			if *logSubprocess {
+				env = append(env, fmt.Sprintf("%s=1", logSubprocessEnv))
+			}
+
 			cmd := exec.Command(executable, "-test.run", "^TestMultiProcessing$")
-			cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", envSentinel, tdir))
+			cmd.Env = env
 			cmd.Stdin = signalR
 			cmd.Stdout = respW
 			cmd.Stderr = os.Stderr
@@ -255,13 +275,13 @@
 func testMultiProcessingSubprocess(lock, out string, respW io.Writer, signalR io.Reader) byte {
 	// Signal that we're ready to start.
 	if _, err := respW.Write([]byte{0}); err != nil {
-		fmt.Fprintf(os.Stderr, "failed to send ready signal: %v", err)
+		logf("failed to send ready signal: %v", err)
 		return 1
 	}
 
 	// Wait for our signal (signalR closing).
 	if _, err := ioutil.ReadAll(signalR); err != nil {
-		fmt.Fprintf(os.Stderr, "failed to wait for signal: %v", err)
+		logf("failed to wait for signal: %v", err)
 		return 2
 	}
 
@@ -291,7 +311,7 @@
 		return nil
 	})
 	if err != nil {
-		fmt.Fprintln(os.Stderr, err.Error())
+		logf("encountered error: %s", err)
 		return rc
 	}
 	return 0
@@ -373,7 +393,7 @@
 func TestUnlock(t *testing.T) {
 	t.Parallel()
 
-	withTempDir(t, "content", func(tdir string) {
+	withTempDir(t, "unlock", func(tdir string) {
 		lock := filepath.Join(tdir, "lock")
 		h, err := Lock(lock)
 		if err != nil {
@@ -403,3 +423,321 @@
 		t.Logf("panicked with: %v", panicVal)
 	})
 }
+
+// TestSharedConcurrent tests file locking within the same process using
+// concurrency (via goroutines).
+//
+// For this to really be effective, the test should be run with "-race", since
+// it's *possible* that all of the goroutines end up cooperating in spite of a
+// bug.
+func TestSharedConcurrent(t *testing.T) {
+	t.Parallel()
+
+	withTempDir(t, "shared_concurrent", func(tdir string) {
+		lock := filepath.Join(tdir, "lock")
+
+		// Each goroutine will obtain a shared lock simultaneously. Once all
+		// goroutines hold the lock, another will attempt to exclusively take the
+		// lock. We will ensure that this succeeds only after all of the shared
+		// locks have been released.
+		const count = 1024
+		var sharedCounter int32
+		hasLockC := make(chan struct{}, count)
+		waitForEveryoneC := make(chan struct{})
+		sharedErrC := make(chan error, count)
+
+		for i := 0; i < count; i++ {
+			go func() {
+				sharedErrC <- WithShared(lock, func() error {
+					atomic.AddInt32(&sharedCounter, 1)
+
+					// Note that we have the lock.
+					hasLockC <- struct{}{}
+
+					// Wait until everyone else does, too.
+					<-waitForEveryoneC
+
+					atomic.AddInt32(&sharedCounter, -1)
+					return nil
+				})
+			}()
+		}
+
+		// Wait for all of the goroutines to hold their shared lock.
+		exclusiveTriedAndFailedC := make(chan struct{})
+		exclusiveHasLockC := make(chan int32)
+		exclusiveErrC := make(chan error)
+		for i := 0; i < count; i++ {
+			<-hasLockC
+
+			// After the first goroutine holds the lock, start our exclusive lock
+			// goroutine.
+			if i == 0 {
+				go func() {
+					attempt := 0
+					exclusiveErrC <- WithBlocking(lock, func() error {
+						// (Blocker)
+						if attempt == 0 {
+							close(exclusiveTriedAndFailedC)
+						}
+						attempt++
+						time.Sleep(time.Millisecond)
+						return nil
+					}, func() error {
+						exclusiveHasLockC <- sharedCounter
+						return nil
+					})
+				}()
+			}
+		}
+
+		// Our shared lock is still being held, waiting for "waitForEveryoneC".
+		// Snapshot our shared counter, which should not be in contention.
+		if v := int(sharedCounter); v != count {
+			t.Errorf("Shared counter has unexpected value: %d", v)
+		}
+
+		// Wait for our exclusive lock to try and fail.
+		<-exclusiveTriedAndFailedC
+
+		// Let all of our shared locks release.
+		close(waitForEveryoneC)
+		for i := 0; i < count; i++ {
+			if err := <-sharedErrC; err != nil {
+				t.Errorf("Shared lock returned error: %s", err)
+			}
+		}
+		close(sharedErrC)
+
+		// Wait for our exclusive lock to finish.
+		if v := <-exclusiveHasLockC; v != 0 {
+			t.Errorf("Exclusive lock reported non-zero shared counter value: %d", v)
+		}
+		if err := <-exclusiveErrC; err != nil {
+			t.Errorf("Exclusive lock reported error: %s", err)
+		}
+	})
+}
+
+// TestSharedMultiProcessing tests access from multiple separate processes.
+//
+// We open by holding an exclusive lock, then spawning all of our subprocesses.
+// Each subprocess will try and fail to acquire the lock, then write a
+// "failed_shared" file to note this failure.
+//
+// After all "failed" files have been confirmed, we release our exclusive lock.
+// At this point, each process will acquire its shared lock, write a
+// "has_shared_lock" file to provie that it hols a shared lock, and wait for all
+// of the other processes' "has_shared_lock" files to show.
+//
+// Our main process, meanwhile, is scanning for any "has_shared_lock" files.
+// Once it sees one, it attempts to obtain an exclusive lock again. After the
+// first exclusive lock failure, it will write a "failed_exclusive" file.
+//
+// Once a process holding the shared lock observes the "failed_exclusive" file,
+// they will terminate.
+//
+// Finally, the exclusive lock will be obtained and the test will complete.
+//
+// NOTE: We don't run this test in parallel because it can tax CI resources.
+func TestSharedMultiProcessing(t *testing.T) {
+	getFiles := func(tdir string) (lock string) {
+		lock = filepath.Join(tdir, "lock")
+		return
+	}
+
+	// Are we a testing process instance, or the main process?
+	const envSentinel = "_FSLOCK_TEST_WORKDIR"
+	if state := os.Getenv(envSentinel); state != "" {
+		parts := strings.SplitN(state, ":", 2)
+		if len(parts) != 2 {
+			os.Exit(1)
+		}
+		name, path := parts[0], parts[1]
+
+		lock := getFiles(path)
+		rv := testSharedMultiProcessingSubprocess(name, lock, path)
+		os.Exit(rv)
+		return
+	}
+
+	// TODO: Replace with os.Executable for Go 1.8.
+	executable := os.Args[0]
+
+	withTempDir(t, "shared_multiprocessing", func(tdir string) {
+		const count = 256
+		const delay = 10 * time.Millisecond
+
+		lock := getFiles(tdir)
+
+		// Start our exclusive lock monitor goroutine.
+		exclusiveLockHeldC := make(chan struct{})
+		monitor := func() error {
+			err := With(lock, func() error {
+				t.Logf("monitor: acquired exclusive lock")
+
+				// Notify that we hold the exclusive lock.
+				close(exclusiveLockHeldC)
+
+				// Wait for "failed_shared" files.
+				if err := waitForFiles(tdir, "failed_shared", count); err != nil {
+					return err
+				}
+				t.Logf("monitor: observed 'failed_shared' files")
+
+				// Release exclusive lock...
+				return nil
+			})
+			if err != nil {
+				return err
+			}
+
+			// Wait for "has_shared_lock" files.
+			if err := waitForFiles(tdir, "has_shared_lock", count); err != nil {
+				return err
+			}
+			t.Logf("monitor: observed 'has_shared_lock' files")
+
+			// Try and get an exclusive lock. When we fail, write "failed_exclusive"
+			// file.
+			attempts := 0
+			return WithBlocking(lock, func() error {
+				t.Logf("monitor: failed to re-acquire exclusive lock (%d)", attempts)
+				if attempts == 0 {
+					if err := writeFileStamp(tdir, "failed_exclusive", "master"); err != nil {
+						return err
+					}
+				}
+				attempts++
+				time.Sleep(delay)
+				return nil
+			}, func() error {
+				// All shared locks are released, gained exclusive lock.
+				t.Logf("monitor: acquired exclusive lock")
+				return nil
+			})
+		}
+		errC := make(chan error)
+		go func() {
+			errC <- monitor()
+		}()
+
+		// Wait for our exclusive lock to be held. Then spawn our subprocesses.
+		//
+		// In defer, kill any spawned processes on cleanup, regardless of
+		// success/failure.
+		<-exclusiveLockHeldC
+
+		cmds := make([]*exec.Cmd, count)
+		defer func() {
+			for _, cmd := range cmds {
+				if cmd != nil {
+					_ = cmd.Process.Kill()
+					_ = cmd.Wait()
+				}
+			}
+		}()
+
+		for i := range cmds {
+			env := append(os.Environ(), fmt.Sprintf("%s=%d:%s", envSentinel, i, tdir))
+			if *logSubprocess {
+				env = append(env, fmt.Sprintf("%s=1", logSubprocessEnv))
+			}
+
+			cmd := exec.Command(executable, "-test.run", "^TestSharedMultiProcessing$")
+			cmd.Env = env
+			cmd.Stderr = os.Stderr
+			if err := cmd.Start(); err != nil {
+				t.Fatalf("failed to start subprocess: %v", err)
+			}
+			cmds[i] = cmd
+		}
+
+		// Reap all of our subprocesses.
+		for _, cmd := range cmds {
+			if err := cmd.Wait(); err != nil {
+				t.Errorf("failed to wait for process: %v", err)
+			}
+		}
+
+		// Our exclusive lock should have exited without an error.
+		if err := <-errC; err != nil {
+			t.Errorf("exclusive lock monitor failed with error: %s", err)
+		}
+	})
+}
+
+func testSharedMultiProcessingSubprocess(name, lock, dir string) int {
+	const delay = 10 * time.Millisecond
+
+	attempts := 0
+	err := WithSharedBlocking(lock, func() error {
+		logf("%s: failed to acquire shared lock (%d)", name, attempts)
+		if attempts == 0 {
+			if err := writeFileStamp(dir, "failed_shared", name); err != nil {
+				return err
+			}
+		}
+		attempts++
+		time.Sleep(delay)
+		return nil
+	}, func() error {
+		// We received the shared lock. Write our stamp file noting this.
+		logf("%s: acquired shared lock", name)
+		if err := writeFileStamp(dir, "has_shared_lock", name); err != nil {
+			return err
+		}
+
+		// Wait for "failed_exclusive" file.
+		if err := waitForFiles(dir, "failed_exclusive", 1); err != nil {
+			return err
+		}
+		logf("%s: observed 'failed_exclusive' file", name)
+
+		return nil
+	})
+	if err != nil {
+		logf("%s: terminating with error: %s", err)
+		return 1
+	}
+
+	logf("%s: terminating successfully", name)
+	return 0
+}
+
+func scanForFiles(dir, prefix string) (int, error) {
+	fileInfos, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return 0, err
+	}
+
+	count := 0
+	for _, fi := range fileInfos {
+		if strings.HasPrefix(fi.Name(), prefix) {
+			count++
+		}
+	}
+	return count, nil
+}
+
+func waitForFiles(dir, prefix string, count int) error {
+	for {
+		num, err := scanForFiles(dir, prefix)
+		if err != nil {
+			return err
+		}
+		if num >= count {
+			return nil
+		}
+		time.Sleep(10 * time.Millisecond)
+	}
+}
+
+func writeFileStamp(dir, prefix, name string) error {
+	path := filepath.Join(dir, fmt.Sprintf("%s.%s", prefix, name))
+	fd, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	return fd.Close()
+}
diff --git a/fslock/lock_windows.go b/fslock/lock_windows.go
index 1e05ae5..709d82f 100644
--- a/fslock/lock_windows.go
+++ b/fslock/lock_windows.go
@@ -13,7 +13,7 @@
 const errno_ERROR_SHARING_VIOLATION syscall.Errno = 32
 
 func lockImpl(l *L) (Handle, error) {
-	fd, created, err := exclusiveGetOrCreateFile(l.Path)
+	fd, created, err := getOrCreateFile(l.Path, l.Shared)
 	if err != nil {
 		return nil, err
 	}
@@ -43,7 +43,7 @@
 	return nil
 }
 
-func exclusiveGetOrCreateFile(path string) (*os.File, bool, error) {
+func getOrCreateFile(path string, shared bool) (*os.File, bool, error) {
 	mod := syscall.NewLazyDLL("kernel32.dll")
 	proc := mod.NewProc("CreateFileW")
 
@@ -52,10 +52,15 @@
 		return nil, false, err
 	}
 
+	dwShareMode := uint32(0) // Exclusive (no sharing)
+	if shared {
+		dwShareMode = syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE
+	}
+
 	a, _, err := proc.Call(
 		uintptr(unsafe.Pointer(pathp)),
 		uintptr(syscall.GENERIC_READ|syscall.GENERIC_WRITE),
-		0,          // No sharing.
+		uintptr(dwShareMode),
 		uintptr(0), // No security attributes.
 		uintptr(syscall.OPEN_ALWAYS),
 		uintptr(syscall.FILE_ATTRIBUTE_NORMAL),
diff --git a/pre-commit-go.yml b/pre-commit-go.yml
index e6e7d43..ce451f7 100644
--- a/pre-commit-go.yml
+++ b/pre-commit-go.yml
@@ -46,23 +46,12 @@
       test:
       - extra_args:
         - -short
-    max_duration: 60
+    max_duration: 1200
   pre-push:
     checks:
-      coverage:
-      - use_global_inference: false
-        use_coveralls: false
-        global:
-          min_coverage: 50
-          max_coverage: 100
-        per_dir_default:
-          min_coverage: 1
-          max_coverage: 100
-        per_dir: {}
       goimports:
       - {}
       test:
       - extra_args:
         - -v
-        - -race
-    max_duration: 60
+    max_duration: 1200