Merged trunk.
diff --git a/.bzrignore b/.bzrignore
index 340cde7..e39772e 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1,2 +1,3 @@
 _*
 [856].out
+[856].out.exe
diff --git a/TODO b/TODO
index b7b9183..3349827 100644
--- a/TODO
+++ b/TODO
@@ -1,3 +1,2 @@
 - Assert(slice, Contains, item)
 - Parallel test support
-- Benchmark support
diff --git a/benchmark.go b/benchmark.go
new file mode 100644
index 0000000..58fc41a
--- /dev/null
+++ b/benchmark.go
@@ -0,0 +1,176 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocheck
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"time"
+)
+
+// testingB is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type timer struct {
+	start     time.Time // Time test or benchmark started
+	duration  time.Duration
+	N         int
+	bytes     int64
+	timerOn   bool
+	benchTime time.Duration
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (c *C) StartTimer() {
+	if !c.timerOn {
+		c.start = time.Now()
+		c.timerOn = true
+	}
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (c *C) StopTimer() {
+	if c.timerOn {
+		c.duration += time.Now().Sub(c.start)
+		c.timerOn = false
+	}
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (c *C) ResetTimer() {
+	if c.timerOn {
+		c.start = time.Now()
+	}
+	c.duration = 0
+}
+
+// SetBytes informs the number of bytes that the benchmark processes
+// on each iteration. If this is called in a benchmark it will also
+// report MB/s.
+func (c *C) SetBytes(n int64) {
+	c.bytes = n
+}
+
+func (c *C) nsPerOp() int64 {
+	if c.N <= 0 {
+		return 0
+	}
+	return c.duration.Nanoseconds() / int64(c.N)
+}
+
+func (c *C) mbPerSec() float64 {
+	if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
+		return 0
+	}
+	return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
+}
+
+func (c *C) timerString() string {
+	if c.N <= 0 {
+		return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
+	}
+	mbs := c.mbPerSec()
+	mb := ""
+	if mbs != 0 {
+		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+	}
+	nsop := c.nsPerOp()
+	ns := fmt.Sprintf("%10d ns/op", nsop)
+	if c.N > 0 && nsop < 100 {
+		// The format specifiers here make sure that
+		// the ones digits line up for all three possible formats.
+		if nsop < 10 {
+			ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		} else {
+			ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		}
+	}
+	return fmt.Sprintf("%8d\t%s%s", c.N, ns, mb)
+}
+
+func min(x, y int) int {
+	if x > y {
+		return y
+	}
+	return x
+}
+
+func max(x, y int) int {
+	if x < y {
+		return y
+	}
+	return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+	var tens = 0
+	// tens = floor(log_10(n))
+	for n > 10 {
+		n = n / 10
+		tens++
+	}
+	// result = 10^tens
+	result := 1
+	for i := 0; i < tens; i++ {
+		result *= 10
+	}
+	return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+	base := roundDown10(n)
+	if n < (2 * base) {
+		return 2 * base
+	}
+	if n < (5 * base) {
+		return 5 * base
+	}
+	return 10 * base
+}
+
+// benchmarkN runs a single benchmark for the specified number of iterations.
+func benchmarkN(c *C, n int) {
+	// Try to get a comparable environment for each run
+	// by clearing garbage from previous runs.
+	runtime.GC()
+	c.N = n
+	c.ResetTimer()
+	c.StartTimer()
+	c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+	c.StopTimer()
+}
+
+// benchmark runs the benchmark function.  It gradually increases the number
+// of benchmark iterations until the benchmark runs for a second in order
+// to get a reasonable measurement.
+func benchmark(c *C) {
+	// Run the benchmark for a single iteration in case it's expensive.
+	n := 1
+	benchmarkN(c, n)
+	// Run the benchmark for at least the specified amount of time.
+	for c.status == succeededSt && c.duration < c.benchTime && n < 1e9 {
+		last := n
+		// Predict iterations/sec.
+		if c.nsPerOp() == 0 {
+			n = 1e9
+		} else {
+			n = int(c.benchTime.Nanoseconds() / c.nsPerOp())
+		}
+		// Run more iterations than we think we'll need for a second (1.5x).
+		// Don't grow too fast in case we had timing errors previously.
+		// Be sure to run at least one more than last time.
+		n = max(min(n+n/2, 100*last), last+1)
+		// Round up to something easy to read.
+		n = roundUp(n)
+		benchmarkN(c, n)
+	}
+}
diff --git a/benchmark_test.go b/benchmark_test.go
new file mode 100644
index 0000000..6d2b009
--- /dev/null
+++ b/benchmark_test.go
@@ -0,0 +1,69 @@
+// These tests verify the test running logic.
+
+package gocheck_test
+
+import (
+	. "launchpad.net/gocheck"
+	"time"
+)
+
+var benchmarkS = Suite(&BenchmarkS{})
+
+type BenchmarkS struct{}
+
+func (s *BenchmarkS) TestCountSuite(c *C) {
+	suitesRun += 1
+}
+
+func (s *BenchmarkS) TestBasicTestTiming(c *C) {
+	helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond}
+	output := String{}
+	runConf := RunConf{Output: &output, Verbose: true}
+	Run(&helper, &runConf)
+
+	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" +
+		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestStreamTestTiming(c *C) {
+	helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond}
+	output := String{}
+	runConf := RunConf{Output: &output, Stream: true}
+	Run(&helper, &runConf)
+
+	expected := "(?s).*\nPASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*"
+	c.Assert(output.value, Matches, expected)
+}
+
+// Quite unfortunate that these two tests alone account for most of the 
+
+func (s *BenchmarkS) TestBenchmark(c *C) {
+	helper := FixtureHelper{sleep: 100000}
+	output := String{}
+	runConf := RunConf{
+		Output: &output,
+		Benchmark: true,
+		BenchmarkTime: 10000000,
+		Filter: "Benchmark1",
+	}
+	Run(&helper, &runConf)
+
+	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n"
+	c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkBytes(c *C) {
+	helper := FixtureHelper{sleep: 100000}
+	output := String{}
+	runConf := RunConf{
+		Output: &output,
+		Benchmark: true,
+		BenchmarkTime: 10000000,
+		Filter: "Benchmark2",
+	}
+	Run(&helper, &runConf)
+
+	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n"
+	c.Assert(output.value, Matches, expected)
+}
diff --git a/foundation_test.go b/foundation_test.go
index 68709f8..9f89150 100644
--- a/foundation_test.go
+++ b/foundation_test.go
@@ -242,7 +242,7 @@
 
 	expected := "" +
 		"FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
-		" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\n"
+		" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n"
 
 	matched, err := regexp.MatchString(expected, output.value)
 	if err != nil {
diff --git a/gocheck.go b/gocheck.go
index 4f94eff..c353b3e 100644
--- a/gocheck.go
+++ b/gocheck.go
@@ -15,6 +15,7 @@
 	"strconv"
 	"strings"
 	"sync"
+	"time"
 )
 
 // -----------------------------------------------------------------------
@@ -23,7 +24,6 @@
 const (
 	fixtureKd = iota
 	testKd
-	benchmarkKd
 )
 
 type funcKind int
@@ -62,13 +62,13 @@
 }
 
 func (method *methodType) String() string {
-	return fmt.Sprintf("%v.%s", method.suiteName(), method.Info.Name)
+	return method.suiteName()+"."+method.Info.Name
 }
 
 func (method *methodType) matches(re *regexp.Regexp) bool {
-	return re.MatchString(method.Info.Name) ||
+	return (re.MatchString(method.Info.Name) ||
 		re.MatchString(method.suiteName()) ||
-		re.MatchString(method.String())
+		re.MatchString(method.String()))
 }
 
 type C struct {
@@ -81,13 +81,7 @@
 	reason   string
 	mustFail bool
 	tempDir  *tempDir
-}
-
-func newC(method *methodType, kind funcKind, logb *bytes.Buffer, logw io.Writer, tempDir *tempDir) *C {
-	if logb == nil {
-		logb = bytes.NewBuffer(nil)
-	}
-	return &C{method: method, kind: kind, logb: logb, logw: logw, tempDir: tempDir, done: make(chan *C, 1)}
+	timer
 }
 
 func (c *C) stopNow() {
@@ -317,10 +311,16 @@
 
 var initWD, initWDErr = os.Getwd()
 
+func init() {
+	if initWDErr == nil {
+		initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
+	}
+}
+
 func nicePath(path string) string {
 	if initWDErr == nil {
-		if strings.HasPrefix(path, initWD+"/") {
-			return path[len(initWD)+1:]
+		if strings.HasPrefix(path, initWD) {
+			return path[len(initWD):]
 		}
 	}
 	return path
@@ -470,30 +470,29 @@
 	tempDir                   *tempDir
 	output                    *outputWriter
 	reportedProblemLast       bool
+	benchTime                 time.Duration
 }
 
 type RunConf struct {
-	Output  io.Writer
-	Stream  bool
-	Verbose bool
-	Filter  string
+	Output        io.Writer
+	Stream        bool
+	Verbose       bool
+	Filter        string
+	Benchmark     bool
+	BenchmarkTime time.Duration // Defaults to 1 second
 }
 
 // Create a new suiteRunner able to run all methods in the given suite.
 func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
-	var writer io.Writer
-	var stream, verbose bool
-	var filter string
-
-	writer = os.Stdout
-
+	var conf RunConf
 	if runConf != nil {
-		if runConf.Output != nil {
-			writer = runConf.Output
-		}
-		stream = runConf.Stream
-		verbose = runConf.Verbose
-		filter = runConf.Filter
+		conf = *runConf
+	}
+	if conf.Output == nil {
+		conf.Output = os.Stdout
+	}
+	if conf.Benchmark {
+		conf.Verbose = true
 	}
 
 	suiteType := reflect.TypeOf(suite)
@@ -501,17 +500,20 @@
 	suiteValue := reflect.ValueOf(suite)
 
 	runner := &suiteRunner{
-		suite:   suite,
-		output:  newOutputWriter(writer, stream, verbose),
-		tracker: newResultTracker(),
+		suite:     suite,
+		output:    newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
+		tracker:   newResultTracker(),
+		benchTime: conf.BenchmarkTime,
 	}
-	runner.tests = make([]*methodType, suiteNumMethods)
+	runner.tests = make([]*methodType, 0, suiteNumMethods)
 	runner.tempDir = new(tempDir)
-	testsLen := 0
+	if runner.benchTime == 0 {
+		runner.benchTime = 1 * time.Second
+	}
 
 	var filterRegexp *regexp.Regexp
-	if filter != "" {
-		if regexp, err := regexp.Compile(filter); err != nil {
+	if conf.Filter != "" {
+		if regexp, err := regexp.Compile(conf.Filter); err != nil {
 			msg := "Bad filter expression: " + err.Error()
 			runner.tracker.result.RunError = errors.New(msg)
 			return runner
@@ -532,17 +534,18 @@
 		case "TearDownTest":
 			runner.tearDownTest = method
 		default:
-			if !strings.HasPrefix(method.Info.Name, "Test") {
+			prefix := "Test"
+			if conf.Benchmark {
+				prefix = "Benchmark"
+			}
+			if !strings.HasPrefix(method.Info.Name, prefix) {
 				continue
 			}
 			if filterRegexp == nil || method.matches(filterRegexp) {
-				runner.tests[testsLen] = method
-				testsLen += 1
+				runner.tests = append(runner.tests, method)
 			}
 		}
 	}
-
-	runner.tests = runner.tests[0:testsLen]
 	return runner
 }
 
@@ -582,7 +585,18 @@
 	if runner.output.Stream {
 		logw = runner.output
 	}
-	c := newC(method, kind, logb, logw, runner.tempDir)
+	if logb == nil {
+		logb = bytes.NewBuffer(nil)
+	}
+	c := &C{
+		method:  method,
+		kind:    kind,
+		logb:    logb,
+		logw:    logw,
+		tempDir: runner.tempDir,
+		done:    make(chan *C, 1),
+		timer:   timer{benchTime: runner.benchTime},
+	}
 	runner.tracker.expectCall(c)
 	go (func() {
 		runner.reportCallStarted(c)
@@ -639,6 +653,9 @@
 func (runner *suiteRunner) runFixture(method *methodType, logb *bytes.Buffer) *C {
 	if method != nil {
 		c := runner.runFunc(method, fixtureKd, logb, func(c *C) {
+			c.ResetTimer()
+			c.StartTimer()
+			defer c.StopTimer()
 			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
 		})
 		return c
@@ -674,14 +691,25 @@
 		defer runner.runFixtureWithPanic(runner.tearDownTest, nil, &skipped)
 		runner.runFixtureWithPanic(runner.setUpTest, c.logb, &skipped)
 		mt := c.method.Type()
-		if mt.NumIn() == 1 && mt.In(0) == reflect.TypeOf(c) {
-			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
-		} else {
+		if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
 			// Rather than a plain panic, provide a more helpful message when
 			// the argument type is incorrect.
 			c.status = panickedSt
 			c.logArgPanic(c.method, "*gocheck.C")
+			return
 		}
+		if strings.HasPrefix(c.method.Info.Name, "Test") {
+			c.ResetTimer()
+			c.StartTimer()
+			defer c.StopTimer()
+			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+			return
+		}
+		if strings.HasPrefix(c.method.Info.Name, "Benchmark") {
+			benchmark(c)
+			return
+		}
+		panic("unexpected method prefix: " + c.method.Info.Name)
 	})
 }
 
@@ -802,10 +830,14 @@
 
 func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
 	if ow.Stream || (ow.Verbose && c.kind == testKd) {
+		// TODO Use a buffer here.
 		var suffix string
 		if c.reason != "" {
 			suffix = " (" + c.reason + ")"
 		}
+		if c.status == succeededSt {
+			suffix += "\t" + c.timerString()
+		}
 		suffix += "\n"
 		if ow.Stream {
 			suffix += "\n"
@@ -829,3 +861,4 @@
 	return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
 		niceFuncName(pc), suffix)
 }
+
diff --git a/gocheck_test.go b/gocheck_test.go
index 7bbf708..aad261a 100644
--- a/gocheck_test.go
+++ b/gocheck_test.go
@@ -11,12 +11,13 @@
 	"regexp"
 	"runtime"
 	"testing"
+	"time"
 )
 
 // We count the number of suites run at least to get a vague hint that the
 // test suite is behaving as it should.  Otherwise a bug introduced at the
 // very core of the system could go unperceived.
-const suitesRunExpected = 7
+const suitesRunExpected = 8
 
 var suitesRun int = 0
 
@@ -93,11 +94,14 @@
 // Helper suite for testing ordering and behavior of fixture.
 
 type FixtureHelper struct {
-	calls   [64]string
-	n       int
-	panicOn string
-	skip    bool
-	skipOnN int
+	calls    [64]string
+	n        int
+	panicOn  string
+	skip     bool
+	skipOnN  int
+	sleepOn  string
+	sleep    time.Duration
+	bytes    int64
 }
 
 func (s *FixtureHelper) trace(name string, c *gocheck.C) {
@@ -107,6 +111,9 @@
 	if name == s.panicOn {
 		panic(name)
 	}
+	if s.sleep > 0 && s.sleepOn == name {
+		time.Sleep(s.sleep)
+	}
 	if s.skip && s.skipOnN == n {
 		c.Skip("skipOnN == n")
 	}
@@ -136,6 +143,21 @@
 	s.trace("Test2", c)
 }
 
+func (s *FixtureHelper) Benchmark1(c *gocheck.C) {
+	s.trace("Benchmark1", c)
+	for i := 0; i < c.N; i++ {
+		time.Sleep(s.sleep)
+	}
+}
+
+func (s *FixtureHelper) Benchmark2(c *gocheck.C) {
+	s.trace("Benchmark2", c)
+	c.SetBytes(1024)
+	for i := 0; i < c.N; i++ {
+		time.Sleep(s.sleep)
+	}
+}
+
 // -----------------------------------------------------------------------
 // Helper which checks the state of the test and ensures that it matches
 // the given expectations.  Depends on c.Errorf() working, so shouldn't
diff --git a/run.go b/run.go
index 85c3853..dc75d77 100644
--- a/run.go
+++ b/run.go
@@ -6,6 +6,7 @@
 	"fmt"
 	"os"
 	"testing"
+	"time"
 )
 
 // -----------------------------------------------------------------------
@@ -24,24 +25,30 @@
 // -----------------------------------------------------------------------
 // Public running interface.
 
-var filterFlag = flag.String("gocheck.f", "",
-	"Regular expression selecting which tests and/or suites to run")
-var verboseFlag = flag.Bool("gocheck.v", false,
-	"Verbose mode")
-var streamFlag = flag.Bool("gocheck.vv", false,
-	"Super verbose mode (disables output caching)")
-var listFlag = flag.Bool("gocheck.list", false,
-	"List the names of all tests that will be run")
+var (
+	filterFlag  = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
+	verboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
+	streamFlag  = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
+	benchFlag   = flag.Bool("gocheck.b", false, "Run benchmarks")
+	benchTime   = flag.Duration("gocheck.btime", 1 * time.Second, "approximate run time for each benchmark")
+	listFlag    = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
+)
 
 // Run all test suites registered with the Suite() function, printing
 // results to stdout, and reporting any failures back to the 'testing'
 // module.
 func TestingT(testingT *testing.T) {
-	conf := &RunConf{Filter: *filterFlag, Verbose: *verboseFlag, Stream: *streamFlag}
+	conf := &RunConf{
+		Filter:    *filterFlag,
+		Verbose:   *verboseFlag,
+		Stream:    *streamFlag,
+		Benchmark: *benchFlag,
+		BenchmarkTime: *benchTime,
+	}
 	if *listFlag {
 		w := bufio.NewWriter(os.Stdout)
 		for _, name := range ListAll(conf) {
-			fmt.Fprintf(w, "%s\n", name)
+			fmt.Fprintln(w, name)
 		}
 		w.Flush()
 		return
diff --git a/run_test.go b/run_test.go
index ca0d82a..3700ac7 100644
--- a/run_test.go
+++ b/run_test.go
@@ -307,8 +307,8 @@
 	runConf := RunConf{Output: &output, Verbose: true}
 	Run(&helper, &runConf)
 
-	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test1\n" +
-		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\n"
+	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" +
+		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
 
 	c.Assert(output.value, Matches, expected)
 }
@@ -320,7 +320,7 @@
 	Run(&helper, &runConf)
 
 	expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
-		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\n"
+		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
 
 	c.Assert(output.value, Matches, expected)
 }
@@ -363,9 +363,9 @@
 	Run(helper, &runConf)
 
 	expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
-		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" +
 		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
-		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" +
 		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
 		"FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"