- Polished integration of benchmark code.
- Integrated showing of timing for succeeding tests.
- Fixed all tests accordingly.
diff --git a/benchmark.go b/benchmark.go
index 9ae801f..c8f1057 100644
--- a/benchmark.go
+++ b/benchmark.go
@@ -7,89 +7,93 @@
 import (
 	"flag"
 	"fmt"
+	"reflect"
 	"runtime"
 	"time"
 )
 
 var benchTime = flag.Float64("gocheck.btime", 1, "approximate run time for each benchmark, in seconds")
 
-// An internal type but exported because it is cross-package; part of the implementation
-// of the "go test" command.
-type testingInternalBenchmark struct {
-	Name string
-	F    func(b *testingB)
-}
-
-// common holds the elements common between T and B and
-// captures common methods such as Errorf.
-type common struct {
-	output   []byte    // Output generated by test or benchmark.
-	failed   bool      // Test or benchmark has failed.
-	start    time.Time // Time test or benchmark started
-	duration time.Duration
-}
-
 // testingB is a type passed to Benchmark functions to manage benchmark
 // timing and to specify the number of iterations to run.
-type testingB struct {
-	common
-	N         int
-	benchmark testingInternalBenchmark
-	bytes     int64
-	timerOn   bool
-	result    testingBenchmarkResult
+type timer struct {
+	start    time.Time // Time test or benchmark started
+	duration time.Duration
+	N        int
+	bytes    int64
+	timerOn  bool
 }
 
-// StartTimer starts timing a test.  This function is called automatically
+// StartTimer starts timing a test. This function is called automatically
 // before a benchmark starts, but it can also used to resume timing after
 // a call to StopTimer.
-func (b *testingB) StartTimer() {
-	if !b.timerOn {
-		b.start = time.Now()
-		b.timerOn = true
+func (c *C) StartTimer() {
+	if !c.timerOn {
+		c.start = time.Now()
+		c.timerOn = true
 	}
 }
 
-// StopTimer stops timing a test.  This can be used to pause the timer
+// StopTimer stops timing a test. This can be used to pause the timer
 // while performing complex initialization that you don't
 // want to measure.
-func (b *testingB) StopTimer() {
-	if b.timerOn {
-		b.duration += time.Now().Sub(b.start)
-		b.timerOn = false
+func (c *C) StopTimer() {
+	if c.timerOn {
+		c.duration += time.Now().Sub(c.start)
+		c.timerOn = false
 	}
 }
 
 // ResetTimer sets the elapsed benchmark time to zero.
 // It does not affect whether the timer is running.
-func (b *testingB) ResetTimer() {
-	if b.timerOn {
-		b.start = time.Now()
+func (c *C) ResetTimer() {
+	if c.timerOn {
+		c.start = time.Now()
 	}
-	b.duration = 0
+	c.duration = 0
 }
 
 // SetBytes records the number of bytes processed in a single operation.
-// If this is called, the benchmark will report ns/op and MB/s.
-func (b *testingB) SetBytes(n int64) { b.bytes = n }
-
-func (b *testingB) nsPerOp() int64 {
-	if b.N <= 0 {
-		return 0
-	}
-	return b.duration.Nanoseconds() / int64(b.N)
+// If this is called in a benchmark it will report ns/op and MB/s.
+func (c *C) SetBytes(n int64) {
+	c.bytes = n
 }
 
-// runN runs a single benchmark for the specified number of iterations.
-func (b *testingB) runN(n int) {
-	// Try to get a comparable environment for each run
-	// by clearing garbage from previous runs.
-	runtime.GC()
-	b.N = n
-	b.ResetTimer()
-	b.StartTimer()
-	b.benchmark.F(b)
-	b.StopTimer()
+func (c *C) nsPerOp() int64 {
+	if c.N <= 0 {
+		return 0
+	}
+	return c.duration.Nanoseconds() / int64(c.N)
+}
+
+func (c *C) mbPerSec() float64 {
+	if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
+		return 0
+	}
+	return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
+}
+
+func (c *C) timerString() string {
+	if c.N <= 0 {
+		return fmt.Sprintf("\t%10d ns", c.duration.Nanoseconds())
+	}
+	mbs := c.mbPerSec()
+	mb := ""
+	if mbs != 0 {
+		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+	}
+	nsop := c.nsPerOp()
+	ns := fmt.Sprintf("%10d ns/op", nsop)
+	if c.N > 0 && nsop < 100 {
+		// The format specifiers here make sure that
+		// the ones digits line up for all three possible formats.
+		if nsop < 10 {
+			ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		} else {
+			ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+		}
+	}
+	return fmt.Sprintf("%8d\t%s%s", c.N, ns, mb)
 }
 
 func min(x, y int) int {
@@ -134,25 +138,34 @@
 	return 10 * base
 }
 
-// launch launches the benchmark function.  It gradually increases the number
+// benchmarkN runs a single benchmark for the specified number of iterations.
+func benchmarkN(c *C, n int) {
+	// Try to get a comparable environment for each run
+	// by clearing garbage from previous runs.
+	runtime.GC()
+	c.N = n
+	c.ResetTimer()
+	c.StartTimer()
+	c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+	c.StopTimer()
+}
+
+// benchmark runs the benchmark function.  It gradually increases the number
 // of benchmark iterations until the benchmark runs for a second in order
-// to get a reasonable measurement.  It prints timing information in this form
-//		testing.BenchmarkHello	100000		19 ns/op
-// launch is run by the fun function as a separate goroutine.
-func (b *testingB) launch() {
+// to get a reasonable measurement.
+func benchmark(c *C) {
 	// Run the benchmark for a single iteration in case it's expensive.
 	n := 1
-
-	b.runN(n)
+	benchmarkN(c, n)
 	// Run the benchmark for at least the specified amount of time.
 	d := time.Duration(*benchTime * float64(time.Second))
-	for !b.failed && b.duration < d && n < 1e9 {
+	for c.status == succeededSt && c.duration < d && n < 1e9 {
 		last := n
 		// Predict iterations/sec.
-		if b.nsPerOp() == 0 {
+		if c.nsPerOp() == 0 {
 			n = 1e9
 		} else {
-			n = int(d.Nanoseconds() / b.nsPerOp())
+			n = int(d.Nanoseconds() / c.nsPerOp())
 		}
 		// Run more iterations than we think we'll need for a second (1.5x).
 		// Don't grow too fast in case we had timing errors previously.
@@ -160,61 +173,6 @@
 		n = max(min(n+n/2, 100*last), last+1)
 		// Round up to something easy to read.
 		n = roundUp(n)
-		b.runN(n)
+		benchmarkN(c, n)
 	}
-	b.result = testingBenchmarkResult{b.N, b.duration, b.bytes}
-}
-
-// The results of a benchmark run.
-type testingBenchmarkResult struct {
-	N     int           // The number of iterations.
-	T     time.Duration // The total time taken.
-	Bytes int64         // Bytes processed in one iteration.
-}
-
-func (r testingBenchmarkResult) NsPerOp() int64 {
-	if r.N <= 0 {
-		return 0
-	}
-	return r.T.Nanoseconds() / int64(r.N)
-}
-
-func (r testingBenchmarkResult) mbPerSec() float64 {
-	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
-		return 0
-	}
-	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
-}
-
-func (r testingBenchmarkResult) String() string {
-	mbs := r.mbPerSec()
-	mb := ""
-	if mbs != 0 {
-		mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
-	}
-	nsop := r.NsPerOp()
-	ns := fmt.Sprintf("%10d ns/op", nsop)
-	if r.N > 0 && nsop < 100 {
-		// The format specifiers here make sure that
-		// the ones digits line up for all three possible formats.
-		if nsop < 10 {
-			ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
-		} else {
-			ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
-		}
-	}
-	return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
-}
-
-// Benchmark benchmarks a single function. Useful for creating
-// custom benchmarks that do not use the "go test" command.
-func testingBenchmark(f func(b *testingB)) testingBenchmarkResult {
-	b := &testingB{
-		benchmark: testingInternalBenchmark{"", f},
-	}
-	// XXX The main reason we fork this file is to have this
-	// running in the same goroutine rather than forking off
-	// a different one (and thus panicking in isolation).
-	b.launch()
-	return b.result
 }
diff --git a/foundation_test.go b/foundation_test.go
index cc309ac..6080941 100644
--- a/foundation_test.go
+++ b/foundation_test.go
@@ -242,7 +242,7 @@
 
 	expected := "" +
 		"FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
-		" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\n"
+		" ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[0-9]+ ns\n"
 
 	matched, err := regexp.MatchString(expected, output.value)
 	if err != nil {
diff --git a/gocheck.go b/gocheck.go
index c623184..401f37e 100644
--- a/gocheck.go
+++ b/gocheck.go
@@ -23,7 +23,6 @@
 const (
 	fixtureKd = iota
 	testKd
-	benchmarkKd
 )
 
 type funcKind int
@@ -63,9 +62,7 @@
 	reason   string
 	mustFail bool
 	tempDir  *tempDir
-	b        *testingB
-	bResult  testingBenchmarkResult
-	N        int
+	timer
 }
 
 func newC(method *methodType, kind funcKind, logb *bytes.Buffer, logw io.Writer, tempDir *tempDir) *C {
@@ -649,6 +646,9 @@
 func (runner *suiteRunner) runFixture(method *methodType, logb *bytes.Buffer) *C {
 	if method != nil {
 		c := runner.runFunc(method, fixtureKd, logb, func(c *C) {
+			c.ResetTimer()
+			c.StartTimer()
+			defer c.StopTimer()
 			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
 		})
 		return c
@@ -692,18 +692,17 @@
 			return
 		}
 		if strings.HasPrefix(c.method.Info.Name, "Test") {
+			c.ResetTimer()
+			c.StartTimer()
+			defer c.StopTimer()
 			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
 			return
 		}
-		if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
-			panic("unexpected method prefix: " + c.method.Info.Name)
+		if strings.HasPrefix(c.method.Info.Name, "Benchmark") {
+			benchmark(c)
+			return
 		}
-		f := func(b *testingB) {
-			c.b = b
-			c.N = b.N
-			c.method.Call([]reflect.Value{reflect.ValueOf(c)})
-		}
-		c.bResult = testingBenchmark(f)
+		panic("unexpected method prefix: " + c.method.Info.Name)
 	})
 }
 
@@ -829,8 +828,8 @@
 		if c.reason != "" {
 			suffix = " (" + c.reason + ")"
 		}
-		if c.b != nil {
-			suffix += "\t" + c.bResult.String()
+		if c.status == succeededSt {
+			suffix += c.timerString()
 		}
 		suffix += "\n"
 		if ow.Stream {
diff --git a/run_test.go b/run_test.go
index 64d0591..16e7fa6 100644
--- a/run_test.go
+++ b/run_test.go
@@ -288,8 +288,8 @@
 	runConf := RunConf{Output: &output, Verbose: true}
 	Run(&helper, &runConf)
 
-	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test1\n" +
-		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\n"
+	expected := "PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[0-9]+ ns\n" +
+		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[0-9]+ ns\n"
 
 	c.Assert(output.value, Matches, expected)
 }
@@ -301,7 +301,7 @@
 	Run(&helper, &runConf)
 
 	expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
-		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\n"
+		"PASS: gocheck_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[0-9]+ ns\n"
 
 	c.Assert(output.value, Matches, expected)
 }
@@ -344,9 +344,9 @@
 	Run(helper, &runConf)
 
 	expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
-		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[0-9]+ ns\n\n" +
 		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
-		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n\n" +
+		"PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[0-9]+ ns\n\n" +
 		"START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
 		"FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"