leveldb: improves coding style
diff --git a/leveldb/batch.go b/leveldb/batch.go
index 89fcf34..652fa41 100644
--- a/leveldb/batch.go
+++ b/leveldb/batch.go
@@ -15,6 +15,7 @@
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
+// ErrBatchCorrupted records reason of batch corruption.
 type ErrBatchCorrupted struct {
 	Reason string
 }
@@ -32,6 +33,7 @@
 	batchGrowRec = 3000
 )
 
+// BatchReplay wraps basic batch operations.
 type BatchReplay interface {
 	Put(key, value []byte)
 	Delete(key []byte)
@@ -68,20 +70,20 @@
 	}
 }
 
-func (b *Batch) appendRec(kt kType, key, value []byte) {
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
 	n := 1 + binary.MaxVarintLen32 + len(key)
-	if kt == ktVal {
+	if kt == keyTypeVal {
 		n += binary.MaxVarintLen32 + len(value)
 	}
 	b.grow(n)
 	off := len(b.data)
 	data := b.data[:off+n]
 	data[off] = byte(kt)
-	off += 1
+	off++
 	off += binary.PutUvarint(data[off:], uint64(len(key)))
 	copy(data[off:], key)
 	off += len(key)
-	if kt == ktVal {
+	if kt == keyTypeVal {
 		off += binary.PutUvarint(data[off:], uint64(len(value)))
 		copy(data[off:], value)
 		off += len(value)
@@ -95,13 +97,13 @@
 // Put appends 'put operation' of the given key/value pair to the batch.
 // It is safe to modify the contents of the argument after Put returns.
 func (b *Batch) Put(key, value []byte) {
-	b.appendRec(ktVal, key, value)
+	b.appendRec(keyTypeVal, key, value)
 }
 
 // Delete appends 'delete operation' of the given key to the batch.
 // It is safe to modify the contents of the argument after Delete returns.
 func (b *Batch) Delete(key []byte) {
-	b.appendRec(ktDel, key, nil)
+	b.appendRec(keyTypeDel, key, nil)
 }
 
 // Dump dumps batch contents. The returned slice can be loaded into the
@@ -122,11 +124,11 @@
 
 // Replay replays batch contents.
 func (b *Batch) Replay(r BatchReplay) error {
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
 		switch kt {
-		case ktVal:
+		case keyTypeVal:
 			r.Put(key, value)
-		case ktDel:
+		case keyTypeDel:
 			r.Delete(key)
 		}
 		return nil
@@ -195,18 +197,19 @@
 	return nil
 }
 
-func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte) error) error {
+func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
 	off := batchHdrLen
 	for i := 0; i < b.rLen; i++ {
 		if off >= len(b.data) {
 			return newErrBatchCorrupted("invalid records length")
 		}
 
-		kt := kType(b.data[off])
-		if kt > ktVal {
+		kt := keyType(b.data[off])
+		if kt > keyTypeVal {
+			panic(kt)
 			return newErrBatchCorrupted("bad record: invalid type")
 		}
-		off += 1
+		off++
 
 		x, n := binary.Uvarint(b.data[off:])
 		off += n
@@ -216,7 +219,7 @@
 		key := b.data[off : off+int(x)]
 		off += int(x)
 		var value []byte
-		if kt == ktVal {
+		if kt == keyTypeVal {
 			x, n := binary.Uvarint(b.data[off:])
 			off += n
 			if n <= 0 || off+int(x) > len(b.data) {
@@ -236,8 +239,8 @@
 
 func (b *Batch) memReplay(to *memdb.DB) error {
 	var ikScratch []byte
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
-		ikScratch = makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+		ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
 		return to.Put(ikScratch, value)
 	})
 }
@@ -251,8 +254,8 @@
 
 func (b *Batch) revertMemReplay(to *memdb.DB) error {
 	var ikScratch []byte
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
-		ikScratch := makeIkey(ikScratch, key, b.seq+uint64(i), kt)
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
+		ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
 		return to.Delete(ikScratch)
 	})
 }
diff --git a/leveldb/batch_test.go b/leveldb/batch_test.go
index 7fc842f..3504f08 100644
--- a/leveldb/batch_test.go
+++ b/leveldb/batch_test.go
@@ -15,7 +15,7 @@
 )
 
 type tbRec struct {
-	kt         kType
+	kt         keyType
 	key, value []byte
 }
 
@@ -24,11 +24,11 @@
 }
 
 func (p *testBatch) Put(key, value []byte) {
-	p.rec = append(p.rec, &tbRec{ktVal, key, value})
+	p.rec = append(p.rec, &tbRec{keyTypeVal, key, value})
 }
 
 func (p *testBatch) Delete(key []byte) {
-	p.rec = append(p.rec, &tbRec{ktDel, key, nil})
+	p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil})
 }
 
 func compareBatch(t *testing.T, b1, b2 *Batch) {
@@ -55,7 +55,7 @@
 		if !bytes.Equal(r1.key, r2.key) {
 			t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
 		}
-		if r1.kt == ktVal {
+		if r1.kt == keyTypeVal {
 			if !bytes.Equal(r1.value, r2.value) {
 				t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
 			}
diff --git a/leveldb/comparer.go b/leveldb/comparer.go
index d33d5e9..248bf7c 100644
--- a/leveldb/comparer.go
+++ b/leveldb/comparer.go
@@ -33,9 +33,9 @@
 }
 
 func (icmp *iComparer) Compare(a, b []byte) int {
-	x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
+	x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
 	if x == 0 {
-		if m, n := iKey(a).num(), iKey(b).num(); m > n {
+		if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
 			x = -1
 		} else if m < n {
 			x = 1
@@ -45,13 +45,13 @@
 }
 
 func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
-	ua, ub := iKey(a).ukey(), iKey(b).ukey()
+	ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
 	dst = icmp.ucmp.Separator(dst, ua, ub)
 	if dst == nil {
 		return nil
 	}
 	if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
-		dst = append(dst, kMaxNumBytes...)
+		dst = append(dst, keyMaxNumBytes...)
 	} else {
 		// Did not close possibilities that n maybe longer than len(ub).
 		dst = append(dst, a[len(a)-8:]...)
@@ -60,13 +60,13 @@
 }
 
 func (icmp *iComparer) Successor(dst, b []byte) []byte {
-	ub := iKey(b).ukey()
+	ub := internalKey(b).ukey()
 	dst = icmp.ucmp.Successor(dst, ub)
 	if dst == nil {
 		return nil
 	}
 	if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
-		dst = append(dst, kMaxNumBytes...)
+		dst = append(dst, keyMaxNumBytes...)
 	} else {
 		// Did not close possibilities that n maybe longer than len(ub).
 		dst = append(dst, b[len(b)-8:]...)
diff --git a/leveldb/db.go b/leveldb/db.go
index 537addb..eb6abd0 100644
--- a/leveldb/db.go
+++ b/leveldb/db.go
@@ -315,7 +315,7 @@
 		tw := table.NewWriter(writer, o)
 		for iter.Next() {
 			key := iter.Key()
-			if validIkey(key) {
+			if validInternalKey(key) {
 				err = tw.Append(key, iter.Value())
 				if err != nil {
 					return
@@ -380,7 +380,7 @@
 		// Scan the table.
 		for iter.Next() {
 			key := iter.Key()
-			_, seq, _, kerr := parseIkey(key)
+			_, seq, _, kerr := parseInternalKey(key)
 			if kerr != nil {
 				tcorruptedKey++
 				continue
@@ -472,15 +472,15 @@
 
 func (db *DB) recoverJournal() error {
 	// Get all journals and sort it by file number.
-	fds_, err := db.s.stor.List(storage.TypeJournal)
+	rawFds, err := db.s.stor.List(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	sortFds(fds_)
+	sortFds(rawFds)
 
 	// Journals that will be recovered.
 	var fds []storage.FileDesc
-	for _, fd := range fds_ {
+	for _, fd := range rawFds {
 		if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
 			fds = append(fds, fd)
 		}
@@ -633,15 +633,15 @@
 
 func (db *DB) recoverJournalRO() error {
 	// Get all journals and sort it by file number.
-	fds_, err := db.s.stor.List(storage.TypeJournal)
+	rawFds, err := db.s.stor.List(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	sortFds(fds_)
+	sortFds(rawFds)
 
 	// Journals that will be recovered.
 	var fds []storage.FileDesc
-	for _, fd := range fds_ {
+	for _, fd := range rawFds {
 		if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
 			fds = append(fds, fd)
 		}
@@ -728,16 +728,16 @@
 	return nil
 }
 
-func memGet(mdb *memdb.DB, ikey iKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
 	mk, mv, err := mdb.Find(ikey)
 	if err == nil {
-		ukey, _, kt, kerr := parseIkey(mk)
+		ukey, _, kt, kerr := parseInternalKey(mk)
 		if kerr != nil {
 			// Shouldn't have had happen.
 			panic(kerr)
 		}
 		if icmp.uCompare(ukey, ikey.ukey()) == 0 {
-			if kt == ktDel {
+			if kt == keyTypeDel {
 				return true, nil, ErrNotFound
 			}
 			return true, mv, nil
@@ -750,7 +750,7 @@
 }
 
 func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
-	ikey := makeIkey(nil, key, seq, ktSeek)
+	ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
 
 	if auxm != nil {
 		if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -788,7 +788,7 @@
 }
 
 func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
-	ikey := makeIkey(nil, key, seq, ktSeek)
+	ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
 
 	if auxm != nil {
 		if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
@@ -997,8 +997,8 @@
 
 	sizes := make(Sizes, 0, len(ranges))
 	for _, r := range ranges {
-		imin := makeIkey(nil, r.Start, kMaxSeq, ktSeek)
-		imax := makeIkey(nil, r.Limit, kMaxSeq, ktSeek)
+		imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+		imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
 		start, err := v.offsetOf(imin)
 		if err != nil {
 			return nil, err
@@ -1007,7 +1007,7 @@
 		if err != nil {
 			return nil, err
 		}
-		var size uint64
+		var size int64
 		if limit >= start {
 			size = limit - start
 		}
diff --git a/leveldb/db_compaction.go b/leveldb/db_compaction.go
index a94cf4c..c228e24 100644
--- a/leveldb/db_compaction.go
+++ b/leveldb/db_compaction.go
@@ -452,7 +452,7 @@
 		}
 
 		ikey := iter.Key()
-		ukey, seq, kt, kerr := parseIkey(ikey)
+		ukey, seq, kt, kerr := parseInternalKey(ikey)
 
 		if kerr == nil {
 			shouldStop := !resumed && b.c.shouldStopBefore(ikey)
@@ -478,14 +478,14 @@
 
 				hasLastUkey = true
 				lastUkey = append(lastUkey[:0], ukey...)
-				lastSeq = kMaxSeq
+				lastSeq = keyMaxSeq
 			}
 
 			switch {
 			case lastSeq <= b.minSeq:
 				// Dropped because newer entry for same user key exist
 				fallthrough // (A)
-			case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+			case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
 				// For this user key:
 				// (1) there is no data in higher levels
 				// (2) data in lower levels will have larger seq numbers
@@ -507,7 +507,7 @@
 			// Don't drop corrupted keys.
 			hasLastUkey = false
 			lastUkey = lastUkey[:0]
-			lastSeq = kMaxSeq
+			lastSeq = keyMaxSeq
 			b.kerrCnt++
 		}
 
diff --git a/leveldb/db_iter.go b/leveldb/db_iter.go
index 86bcb99..03c24cd 100644
--- a/leveldb/db_iter.go
+++ b/leveldb/db_iter.go
@@ -19,7 +19,7 @@
 )
 
 var (
-	errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
+	errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
 )
 
 type memdbReleaser struct {
@@ -70,10 +70,10 @@
 	if slice != nil {
 		islice = &util.Range{}
 		if slice.Start != nil {
-			islice.Start = makeIkey(nil, slice.Start, kMaxSeq, ktSeek)
+			islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
 		}
 		if slice.Limit != nil {
-			islice.Limit = makeIkey(nil, slice.Limit, kMaxSeq, ktSeek)
+			islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
 		}
 	}
 	rawIter := db.newRawIterator(auxm, auxt, islice, ro)
@@ -187,7 +187,7 @@
 		return false
 	}
 
-	ikey := makeIkey(nil, key, i.seq, ktSeek)
+	ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
 	if i.iter.Seek(ikey) {
 		i.dir = dirSOI
 		return i.next()
@@ -199,15 +199,15 @@
 
 func (i *dbIter) next() bool {
 	for {
-		if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+		if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 			i.sampleSeek()
 			if seq <= i.seq {
 				switch kt {
-				case ktDel:
+				case keyTypeDel:
 					// Skip deleted key.
 					i.key = append(i.key[:0], ukey...)
 					i.dir = dirForward
-				case ktVal:
+				case keyTypeVal:
 					if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
@@ -250,13 +250,13 @@
 	del := true
 	if i.iter.Valid() {
 		for {
-			if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 				i.sampleSeek()
 				if seq <= i.seq {
 					if !del && i.icmp.uCompare(ukey, i.key) < 0 {
 						return true
 					}
-					del = (kt == ktDel)
+					del = (kt == keyTypeDel)
 					if !del {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
@@ -292,7 +292,7 @@
 		return i.Last()
 	case dirForward:
 		for i.iter.Prev() {
-			if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
 				i.sampleSeek()
 				if i.icmp.uCompare(ukey, i.key) < 0 {
 					goto cont
diff --git a/leveldb/db_state.go b/leveldb/db_state.go
index 0207e22..40f454d 100644
--- a/leveldb/db_state.go
+++ b/leveldb/db_state.go
@@ -57,7 +57,7 @@
 	atomic.StoreUint64(&db.seq, seq)
 }
 
-func (db *DB) sampleSeek(ikey iKey) {
+func (db *DB) sampleSeek(ikey internalKey) {
 	v := db.s.version()
 	if v.sampleSeek(ikey) {
 		// Trigger table compaction.
diff --git a/leveldb/db_test.go b/leveldb/db_test.go
index 63592c0..9be0487 100644
--- a/leveldb/db_test.go
+++ b/leveldb/db_test.go
@@ -276,7 +276,7 @@
 	db := h.db
 	s := db.s
 
-	ikey := makeIkey(nil, []byte(key), kMaxSeq, ktVal)
+	ikey := makeInternalKey(nil, []byte(key), keyMaxSeq, keyTypeVal)
 	iter := db.newRawIterator(nil, nil, nil, nil)
 	if !iter.Seek(ikey) && iter.Error() != nil {
 		t.Error("AllEntries: error during seek, err: ", iter.Error())
@@ -285,7 +285,7 @@
 	res := "[ "
 	first := true
 	for iter.Valid() {
-		if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
+		if ukey, _, kt, kerr := parseInternalKey(iter.Key()); kerr == nil {
 			if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
 				break
 			}
@@ -294,9 +294,9 @@
 			}
 			first = false
 			switch kt {
-			case ktVal:
+			case keyTypeVal:
 				res += string(iter.Value())
-			case ktDel:
+			case keyTypeDel:
 				res += "DEL"
 			}
 		} else {
@@ -430,7 +430,7 @@
 	t.Log("DB range compaction done")
 }
 
-func (h *dbHarness) sizeOf(start, limit string) uint64 {
+func (h *dbHarness) sizeOf(start, limit string) int64 {
 	sz, err := h.db.SizeOf([]util.Range{
 		{[]byte(start), []byte(limit)},
 	})
@@ -440,7 +440,7 @@
 	return sz.Sum()
 }
 
-func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
+func (h *dbHarness) sizeAssert(start, limit string, low, hi int64) {
 	sz := h.sizeOf(start, limit)
 	if sz < low || sz > hi {
 		h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
@@ -505,7 +505,7 @@
 	return fmt.Sprintf("key%06d", num)
 }
 
-var _bloom_filter = filter.NewBloomFilter(10)
+var testingBloomFilter = filter.NewBloomFilter(10)
 
 func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) {
 	for i := 0; i < 4; i++ {
@@ -514,16 +514,22 @@
 			case 0:
 			case 1:
 				if o == nil {
-					o = &opt.Options{DisableLargeBatchTransaction: true, Filter: _bloom_filter}
+					o = &opt.Options{
+						DisableLargeBatchTransaction: true,
+						Filter: testingBloomFilter,
+					}
 				} else {
 					old := o
 					o = &opt.Options{}
 					*o = *old
-					o.Filter = _bloom_filter
+					o.Filter = testingBloomFilter
 				}
 			case 2:
 				if o == nil {
-					o = &opt.Options{DisableLargeBatchTransaction: true, Compression: opt.NoCompression}
+					o = &opt.Options{
+						DisableLargeBatchTransaction: true,
+						Compression:                  opt.NoCompression,
+					}
 				} else {
 					old := o
 					o = &opt.Options{}
@@ -1103,13 +1109,13 @@
 
 		for cs := 0; cs < n; cs += 10 {
 			for i := 0; i < n; i += 10 {
-				h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i))
-				h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1)))
-				h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10))
+				h.sizeAssert("", numKey(i), int64(s1*i), int64(s2*i))
+				h.sizeAssert("", numKey(i)+".suffix", int64(s1*(i+1)), int64(s2*(i+1)))
+				h.sizeAssert(numKey(i), numKey(i+10), int64(s1*10), int64(s2*10))
 			}
 
-			h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50))
-			h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50))
+			h.sizeAssert("", numKey(50), int64(s1*50), int64(s2*50))
+			h.sizeAssert("", numKey(50)+".suffix", int64(s1*50), int64(s2*50))
 
 			h.compactRangeAt(0, numKey(cs), numKey(cs+9))
 		}
@@ -1132,7 +1138,7 @@
 	})
 	defer h.close()
 
-	sizes := []uint64{
+	sizes := []int64{
 		10000,
 		10000,
 		100000,
@@ -1150,7 +1156,7 @@
 	for r := 0; r < 3; r++ {
 		h.reopenDB()
 
-		var x uint64
+		var x int64
 		for i, n := range sizes {
 			y := x
 			if i > 0 {
@@ -2498,7 +2504,7 @@
 			key := []byte(fmt.Sprintf("%09d", k))
 			seq += nSeq - 1
 			for x := uint64(0); x < nSeq; x++ {
-				if err := tw.append(makeIkey(nil, key, seq-x, ktVal), value); err != nil {
+				if err := tw.append(makeInternalKey(nil, key, seq-x, keyTypeVal), value); err != nil {
 					t.Fatal(err)
 				}
 			}
diff --git a/leveldb/db_transaction.go b/leveldb/db_transaction.go
index 5270287..fca8803 100644
--- a/leveldb/db_transaction.go
+++ b/leveldb/db_transaction.go
@@ -108,8 +108,8 @@
 	return nil
 }
 
-func (tr *Transaction) put(kt kType, key, value []byte) error {
-	tr.ikScratch = makeIkey(tr.ikScratch, key, tr.seq+1, kt)
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+	tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
 	if tr.mem.Free() < len(tr.ikScratch)+len(value) {
 		if err := tr.flush(); err != nil {
 			return err
@@ -134,7 +134,7 @@
 	if tr.closed {
 		return errTransactionDone
 	}
-	return tr.put(ktVal, key, value)
+	return tr.put(keyTypeVal, key, value)
 }
 
 // Delete deletes the value for the given key.
@@ -148,7 +148,7 @@
 	if tr.closed {
 		return errTransactionDone
 	}
-	return tr.put(ktDel, key, nil)
+	return tr.put(keyTypeDel, key, nil)
 }
 
 // Write apply the given batch to the transaction. The batch will be applied
@@ -167,7 +167,7 @@
 	if tr.closed {
 		return errTransactionDone
 	}
-	return b.decodeRec(func(i int, kt kType, key, value []byte) error {
+	return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
 		return tr.put(kt, key, value)
 	})
 }
diff --git a/leveldb/db_util.go b/leveldb/db_util.go
index 8ec86b2..7fd386c 100644
--- a/leveldb/db_util.go
+++ b/leveldb/db_util.go
@@ -21,14 +21,16 @@
 	NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
 }
 
-type Sizes []uint64
+// Sizes is list of size.
+type Sizes []int64
 
 // Sum returns sum of the sizes.
-func (p Sizes) Sum() (n uint64) {
-	for _, s := range p {
-		n += s
+func (sizes Sizes) Sum() int64 {
+	var sum int64
+	for _, size := range sizes {
+		sum += size
 	}
-	return n
+	return sum
 }
 
 // Logging.
diff --git a/leveldb/db_write.go b/leveldb/db_write.go
index 5200be6..fb78961 100644
--- a/leveldb/db_write.go
+++ b/leveldb/db_write.go
@@ -281,8 +281,8 @@
 func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
 	iter := mem.NewIterator(nil)
 	defer iter.Release()
-	return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
-		(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
+	return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+		(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
 }
 
 // CompactRange compacts the underlying DB for the given key range.
diff --git a/leveldb/filter.go b/leveldb/filter.go
index 37c1e14..e961e42 100644
--- a/leveldb/filter.go
+++ b/leveldb/filter.go
@@ -15,7 +15,7 @@
 }
 
 func (f iFilter) Contains(filter, key []byte) bool {
-	return f.Filter.Contains(filter, iKey(key).ukey())
+	return f.Filter.Contains(filter, internalKey(key).ukey())
 }
 
 func (f iFilter) NewGenerator() filter.FilterGenerator {
@@ -27,5 +27,5 @@
 }
 
 func (g iFilterGenerator) Add(key []byte) {
-	g.FilterGenerator.Add(iKey(key).ukey())
+	g.FilterGenerator.Add(internalKey(key).ukey())
 }
diff --git a/leveldb/key.go b/leveldb/key.go
index 1443c75..d0b80aa 100644
--- a/leveldb/key.go
+++ b/leveldb/key.go
@@ -14,26 +14,27 @@
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
-type ErrIkeyCorrupted struct {
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
 	Ikey   []byte
 	Reason string
 }
 
-func (e *ErrIkeyCorrupted) Error() string {
-	return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+func (e *ErrInternalKeyCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
 }
 
-func newErrIkeyCorrupted(ikey []byte, reason string) error {
-	return errors.NewErrCorrupted(storage.FileDesc{}, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+	return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
 }
 
-type kType int
+type keyType uint
 
-func (kt kType) String() string {
+func (kt keyType) String() string {
 	switch kt {
-	case ktDel:
+	case keyTypeDel:
 		return "d"
-	case ktVal:
+	case keyTypeVal:
 		return "v"
 	}
 	return "x"
@@ -42,39 +43,39 @@
 // Value types encoded as the last component of internal keys.
 // Don't modify; this value are saved to disk.
 const (
-	ktDel kType = iota
-	ktVal
+	keyTypeDel keyType = iota
+	keyTypeVal
 )
 
-// ktSeek defines the kType that should be passed when constructing an
+// keyTypeSeek defines the keyType that should be passed when constructing an
 // internal key for seeking to a particular sequence number (since we
 // sort sequence numbers in decreasing order and the value type is
 // embedded as the low 8 bits in the sequence number in internal keys,
 // we need to use the highest-numbered ValueType, not the lowest).
-const ktSeek = ktVal
+const keyTypeSeek = keyTypeVal
 
 const (
 	// Maximum value possible for sequence number; the 8-bits are
 	// used by value type, so its can packed together in single
 	// 64-bit integer.
-	kMaxSeq uint64 = (uint64(1) << 56) - 1
+	keyMaxSeq = (uint64(1) << 56) - 1
 	// Maximum value possible for packed sequence number and type.
-	kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
+	keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
 )
 
 // Maximum number encoded in bytes.
-var kMaxNumBytes = make([]byte, 8)
+var keyMaxNumBytes = make([]byte, 8)
 
 func init() {
-	binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
+	binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
 }
 
-type iKey []byte
+type internalKey []byte
 
-func makeIkey(dst, ukey []byte, seq uint64, kt kType) iKey {
-	if seq > kMaxSeq {
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+	if seq > keyMaxSeq {
 		panic("leveldb: invalid sequence number")
-	} else if kt > ktVal {
+	} else if kt > keyTypeVal {
 		panic("leveldb: invalid type")
 	}
 
@@ -85,63 +86,62 @@
 	}
 	copy(dst, ukey)
 	binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
-	return iKey(dst)
+	return internalKey(dst)
 }
 
-func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
 	if len(ik) < 8 {
-		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
+		return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
 	}
 	num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
-	seq, kt = uint64(num>>8), kType(num&0xff)
-	if kt > ktVal {
-		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
+	seq, kt = uint64(num>>8), keyType(num&0xff)
+	if kt > keyTypeVal {
+		return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
 	}
 	ukey = ik[:len(ik)-8]
 	return
 }
 
-func validIkey(ik []byte) bool {
-	_, _, _, err := parseIkey(ik)
+func validInternalKey(ik []byte) bool {
+	_, _, _, err := parseInternalKey(ik)
 	return err == nil
 }
 
-func (ik iKey) assert() {
+func (ik internalKey) assert() {
 	if ik == nil {
-		panic("leveldb: nil iKey")
+		panic("leveldb: nil internalKey")
 	}
 	if len(ik) < 8 {
-		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
+		panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
 	}
 }
 
-func (ik iKey) ukey() []byte {
+func (ik internalKey) ukey() []byte {
 	ik.assert()
 	return ik[:len(ik)-8]
 }
 
-func (ik iKey) num() uint64 {
+func (ik internalKey) num() uint64 {
 	ik.assert()
 	return binary.LittleEndian.Uint64(ik[len(ik)-8:])
 }
 
-func (ik iKey) parseNum() (seq uint64, kt kType) {
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
 	num := ik.num()
-	seq, kt = uint64(num>>8), kType(num&0xff)
-	if kt > ktVal {
-		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+	seq, kt = uint64(num>>8), keyType(num&0xff)
+	if kt > keyTypeVal {
+		panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
 	}
 	return
 }
 
-func (ik iKey) String() string {
+func (ik internalKey) String() string {
 	if ik == nil {
 		return "<nil>"
 	}
 
-	if ukey, seq, kt, err := parseIkey(ik); err == nil {
+	if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
 		return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
-	} else {
-		return "<invalid>"
 	}
+	return "<invalid>"
 }
diff --git a/leveldb/key_test.go b/leveldb/key_test.go
index a65cffc..2f33ccb 100644
--- a/leveldb/key_test.go
+++ b/leveldb/key_test.go
@@ -15,8 +15,8 @@
 
 var defaultIComparer = &iComparer{comparer.DefaultComparer}
 
-func ikey(key string, seq uint64, kt kType) iKey {
-	return makeIkey(nil, []byte(key), uint64(seq), kt)
+func ikey(key string, seq uint64, kt keyType) internalKey {
+	return makeInternalKey(nil, []byte(key), uint64(seq), kt)
 }
 
 func shortSep(a, b []byte) []byte {
@@ -37,7 +37,7 @@
 	return dst
 }
 
-func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
+func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) {
 	ik := ikey(key, seq, kt)
 
 	if !bytes.Equal(ik.ukey(), []byte(key)) {
@@ -52,7 +52,7 @@
 		t.Errorf("type does not equal, got %v, want %v", rt, kt)
 	}
 
-	if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
+	if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil {
 		if !bytes.Equal(rukey, []byte(key)) {
 			t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
 		}
@@ -67,7 +67,7 @@
 	}
 }
 
-func TestIkey_EncodeDecode(t *testing.T) {
+func TestInternalKey_EncodeDecode(t *testing.T) {
 	keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
 	seqs := []uint64{
 		1, 2, 3,
@@ -77,8 +77,8 @@
 	}
 	for _, key := range keys {
 		for _, seq := range seqs {
-			testSingleKey(t, key, seq, ktVal)
-			testSingleKey(t, "hello", 1, ktDel)
+			testSingleKey(t, key, seq, keyTypeVal)
+			testSingleKey(t, "hello", 1, keyTypeDel)
 		}
 	}
 }
@@ -89,45 +89,45 @@
 	}
 }
 
-func TestIkeyShortSeparator(t *testing.T) {
+func TestInternalKeyShortSeparator(t *testing.T) {
 	// When user keys are same
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("foo", 99, ktVal)))
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("foo", 101, ktVal)))
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("foo", 100, ktVal)))
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("foo", 100, ktDel)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("foo", 99, keyTypeVal)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("foo", 101, keyTypeVal)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("foo", 100, keyTypeVal)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("foo", 100, keyTypeDel)))
 
 	// When user keys are misordered
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("bar", 99, ktVal)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("bar", 99, keyTypeVal)))
 
 	// When user keys are different, but correctly ordered
-	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("hello", 200, ktVal)))
+	assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("hello", 200, keyTypeVal)))
 
 	// When start user key is prefix of limit user key
-	assertBytes(t, ikey("foo", 100, ktVal),
-		shortSep(ikey("foo", 100, ktVal),
-			ikey("foobar", 200, ktVal)))
+	assertBytes(t, ikey("foo", 100, keyTypeVal),
+		shortSep(ikey("foo", 100, keyTypeVal),
+			ikey("foobar", 200, keyTypeVal)))
 
 	// When limit user key is prefix of start user key
-	assertBytes(t, ikey("foobar", 100, ktVal),
-		shortSep(ikey("foobar", 100, ktVal),
-			ikey("foo", 200, ktVal)))
+	assertBytes(t, ikey("foobar", 100, keyTypeVal),
+		shortSep(ikey("foobar", 100, keyTypeVal),
+			ikey("foo", 200, keyTypeVal)))
 }
 
-func TestIkeyShortestSuccessor(t *testing.T) {
-	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
-		shortSuccessor(ikey("foo", 100, ktVal)))
-	assertBytes(t, ikey("\xff\xff", 100, ktVal),
-		shortSuccessor(ikey("\xff\xff", 100, ktVal)))
+func TestInternalKeyShortestSuccessor(t *testing.T) {
+	assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek),
+		shortSuccessor(ikey("foo", 100, keyTypeVal)))
+	assertBytes(t, ikey("\xff\xff", 100, keyTypeVal),
+		shortSuccessor(ikey("\xff\xff", 100, keyTypeVal)))
 }
diff --git a/leveldb/session.go b/leveldb/session.go
index a8d7b54..b0d3fef 100644
--- a/leveldb/session.go
+++ b/leveldb/session.go
@@ -18,6 +18,7 @@
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
+// ErrManifestCorrupted records manifest corruption.
 type ErrManifestCorrupted struct {
 	Field  string
 	Reason string
@@ -50,8 +51,8 @@
 	manifestWriter storage.Writer
 	manifestFd     storage.FileDesc
 
-	stCompPtrs []iKey   // compaction pointers; need external synchronization
-	stVersion  *version // current version
+	stCompPtrs []internalKey // compaction pointers; need external synchronization
+	stVersion  *version      // current version
 	vmu        sync.Mutex
 }
 
@@ -146,7 +147,7 @@
 		if err == nil {
 			// save compact pointers
 			for _, r := range rec.compPtrs {
-				s.setCompPtr(r.level, iKey(r.ikey))
+				s.setCompPtr(r.level, internalKey(r.ikey))
 			}
 			// commit record to version staging
 			staging.commit(rec)
@@ -154,9 +155,8 @@
 			err = errors.SetFd(err, fd)
 			if strict || !errors.IsCorrupted(err) {
 				return
-			} else {
-				s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
 			}
+			s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
 		}
 		rec.resetCompPtrs()
 		rec.resetAddedTables()
diff --git a/leveldb/session_compaction.go b/leveldb/session_compaction.go
index 471d68d..089cd00 100644
--- a/leveldb/session_compaction.go
+++ b/leveldb/session_compaction.go
@@ -139,7 +139,7 @@
 	gpi               int
 	seenKey           bool
 	gpOverlappedBytes int64
-	imin, imax        iKey
+	imin, imax        internalKey
 	tPtrs             []int
 	released          bool
 
@@ -242,7 +242,7 @@
 	return true
 }
 
-func (c *compaction) shouldStopBefore(ikey iKey) bool {
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
 	for ; c.gpi < len(c.gp); c.gpi++ {
 		gp := c.gp[c.gpi]
 		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
diff --git a/leveldb/session_record.go b/leveldb/session_record.go
index 9802e1a..854e1aa 100644
--- a/leveldb/session_record.go
+++ b/leveldb/session_record.go
@@ -36,15 +36,15 @@
 
 type cpRecord struct {
 	level int
-	ikey  iKey
+	ikey  internalKey
 }
 
 type atRecord struct {
 	level int
 	num   int64
 	size  int64
-	imin  iKey
-	imax  iKey
+	imin  internalKey
+	imax  internalKey
 }
 
 type dtRecord struct {
@@ -96,7 +96,7 @@
 	p.seqNum = num
 }
 
-func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+func (p *sessionRecord) addCompPtr(level int, ikey internalKey) {
 	p.hasRec |= 1 << recCompPtr
 	p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
 }
@@ -106,7 +106,7 @@
 	p.compPtrs = p.compPtrs[:0]
 }
 
-func (p *sessionRecord) addTable(level int, num, size int64, imin, imax iKey) {
+func (p *sessionRecord) addTable(level int, num, size int64, imin, imax internalKey) {
 	p.hasRec |= 1 << recAddTable
 	p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
 }
@@ -299,7 +299,7 @@
 			level := p.readLevel("comp-ptr.level", br)
 			ikey := p.readBytes("comp-ptr.ikey", br)
 			if p.err == nil {
-				p.addCompPtr(level, iKey(ikey))
+				p.addCompPtr(level, internalKey(ikey))
 			}
 		case recAddTable:
 			level := p.readLevel("add-table.level", br)
diff --git a/leveldb/session_record_test.go b/leveldb/session_record_test.go
index ea08340..5af399f 100644
--- a/leveldb/session_record_test.go
+++ b/leveldb/session_record_test.go
@@ -47,10 +47,10 @@
 	for ; i < 4; i++ {
 		test()
 		v.addTable(3, big+300+i, big+400+i,
-			makeIkey(nil, []byte("foo"), uint64(big+500+1), ktVal),
-			makeIkey(nil, []byte("zoo"), uint64(big+600+1), ktDel))
+			makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal),
+			makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel))
 		v.delTable(4, big+700+i)
-		v.addCompPtr(int(i), makeIkey(nil, []byte("x"), uint64(big+900+1), ktVal))
+		v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal))
 	}
 
 	v.setComparer("foo")
diff --git a/leveldb/session_util.go b/leveldb/session_util.go
index e4fa98d..674182f 100644
--- a/leveldb/session_util.go
+++ b/leveldb/session_util.go
@@ -106,17 +106,17 @@
 }
 
 // Set compaction ptr at given level; need external synchronization.
-func (s *session) setCompPtr(level int, ik iKey) {
+func (s *session) setCompPtr(level int, ik internalKey) {
 	if level >= len(s.stCompPtrs) {
-		newCompPtrs := make([]iKey, level+1)
+		newCompPtrs := make([]internalKey, level+1)
 		copy(newCompPtrs, s.stCompPtrs)
 		s.stCompPtrs = newCompPtrs
 	}
-	s.stCompPtrs[level] = append(iKey{}, ik...)
+	s.stCompPtrs[level] = append(internalKey{}, ik...)
 }
 
 // Get compaction ptr at given level; need external synchronization.
-func (s *session) getCompPtr(level int) iKey {
+func (s *session) getCompPtr(level int) internalKey {
 	if level >= len(s.stCompPtrs) {
 		return nil
 	}
@@ -165,7 +165,7 @@
 	}
 
 	for _, r := range rec.compPtrs {
-		s.setCompPtr(r.level, iKey(r.ikey))
+		s.setCompPtr(r.level, internalKey(r.ikey))
 	}
 }
 
diff --git a/leveldb/table.go b/leveldb/table.go
index 7030b22..a18baae 100644
--- a/leveldb/table.go
+++ b/leveldb/table.go
@@ -24,7 +24,7 @@
 	fd         storage.FileDesc
 	seekLeft   int32
 	size       int64
-	imin, imax iKey
+	imin, imax internalKey
 }
 
 // Returns true if given key is after largest key of this table.
@@ -48,7 +48,7 @@
 }
 
 // Creates new tFile.
-func newTableFile(fd storage.FileDesc, size int64, imin, imax iKey) *tFile {
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
 	f := &tFile{
 		fd:   fd,
 		size: size,
@@ -136,7 +136,7 @@
 
 // Searches smallest index of tables whose its smallest
 // key is after or equal with given key.
-func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
 	return sort.Search(len(tf), func(i int) bool {
 		return icmp.Compare(tf[i].imin, ikey) >= 0
 	})
@@ -144,7 +144,7 @@
 
 // Searches smallest index of tables whose its largest
 // key is after or equal with given key.
-func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
 	return sort.Search(len(tf), func(i int) bool {
 		return icmp.Compare(tf[i].imax, ikey) >= 0
 	})
@@ -166,7 +166,7 @@
 	i := 0
 	if len(umin) > 0 {
 		// Find the earliest possible internal key for min.
-		i = tf.searchMax(icmp, makeIkey(nil, umin, kMaxSeq, ktSeek))
+		i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
 	}
 	if i >= len(tf) {
 		// Beginning of range is after all files, so no overlap.
@@ -209,7 +209,7 @@
 }
 
 // Returns tables key range.
-func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
 	for i, t := range tf {
 		if i == 0 {
 			imin, imax = t.imin, t.imax
@@ -231,10 +231,10 @@
 	if slice != nil {
 		var start, limit int
 		if slice.Start != nil {
-			start = tf.searchMax(icmp, iKey(slice.Start))
+			start = tf.searchMax(icmp, internalKey(slice.Start))
 		}
 		if slice.Limit != nil {
-			limit = tf.searchMin(icmp, iKey(slice.Limit))
+			limit = tf.searchMin(icmp, internalKey(slice.Limit))
 		} else {
 			limit = tf.Len()
 		}
@@ -259,7 +259,7 @@
 }
 
 func (a *tFilesArrayIndexer) Search(key []byte) int {
-	return a.searchMax(a.icmp, iKey(key))
+	return a.searchMax(a.icmp, internalKey(key))
 }
 
 func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
@@ -393,14 +393,13 @@
 }
 
 // Returns approximate offset of the given key.
-func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
 	ch, err := t.open(f)
 	if err != nil {
 		return
 	}
 	defer ch.Release()
-	offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
-	return uint64(offset_), err
+	return ch.Value().(*table.Reader).OffsetOf(key)
 }
 
 // Creates an iterator from the given table.
@@ -515,7 +514,7 @@
 			return
 		}
 	}
-	f = newTableFile(w.fd, int64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+	f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
 	return
 }
 
diff --git a/leveldb/version.go b/leveldb/version.go
index 50870ed..d274eef 100644
--- a/leveldb/version.go
+++ b/leveldb/version.go
@@ -79,7 +79,7 @@
 	v.s.vmu.Unlock()
 }
 
-func (v *version) walkOverlapping(aux tFiles, ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
 	ukey := ikey.ukey()
 
 	// Aux level.
@@ -130,7 +130,7 @@
 	}
 }
 
-func (v *version) get(aux tFiles, ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
 	ukey := ikey.ukey()
 
 	var (
@@ -140,7 +140,7 @@
 		// Level-0.
 		zfound bool
 		zseq   uint64
-		zkt    kType
+		zkt    keyType
 		zval   []byte
 	)
 
@@ -176,7 +176,7 @@
 			return false
 		}
 
-		if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+		if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
 			if v.s.icmp.uCompare(ukey, fukey) == 0 {
 				// Level <= 0 may overlaps each-other.
 				if level <= 0 {
@@ -188,12 +188,12 @@
 					}
 				} else {
 					switch fkt {
-					case ktVal:
+					case keyTypeVal:
 						value = fval
 						err = nil
-					case ktDel:
+					case keyTypeDel:
 					default:
-						panic("leveldb: invalid iKey type")
+						panic("leveldb: invalid internalKey type")
 					}
 					return false
 				}
@@ -207,12 +207,12 @@
 	}, func(level int) bool {
 		if zfound {
 			switch zkt {
-			case ktVal:
+			case keyTypeVal:
 				value = zval
 				err = nil
-			case ktDel:
+			case keyTypeDel:
 			default:
-				panic("leveldb: invalid iKey type")
+				panic("leveldb: invalid internalKey type")
 			}
 			return false
 		}
@@ -227,19 +227,18 @@
 	return
 }
 
-func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
 	var tset *tSet
 
 	v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
 		if tset == nil {
 			tset = &tSet{level, t}
 			return true
-		} else {
-			if tset.table.consumeSeek() <= 0 {
-				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
-			}
-			return false
 		}
+		if tset.table.consumeSeek() <= 0 {
+			tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+		}
+		return false
 	}, nil)
 
 	return
@@ -286,12 +285,12 @@
 	return 0
 }
 
-func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
 	for level, tables := range v.levels {
 		for _, t := range tables {
 			if v.s.icmp.Compare(t.imax, ikey) <= 0 {
 				// Entire file is before "ikey", so just add the file size
-				n += uint64(t.size)
+				n += t.size
 			} else if v.s.icmp.Compare(t.imin, ikey) > 0 {
 				// Entire file is after "ikey", so ignore
 				if level > 0 {
@@ -303,12 +302,11 @@
 			} else {
 				// "ikey" falls in the range for this table. Add the
 				// approximate offset of "ikey" within the table.
-				var nn uint64
-				nn, err = v.s.tops.offsetOf(t, ikey)
-				if err != nil {
+				if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
+					n += m
+				} else {
 					return 0, err
 				}
-				n += nn
 			}
 		}
 	}
diff --git a/leveldb/version_test.go b/leveldb/version_test.go
index f157378..a643be1 100644
--- a/leveldb/version_test.go
+++ b/leveldb/version_test.go
@@ -28,9 +28,9 @@
 	v.newStaging()
 
 	tmp := make([]byte, 4)
-	makeIKey := func(i uint64) []byte {
+	mik := func(i uint64) []byte {
 		binary.BigEndian.PutUint32(tmp, uint32(i))
-		return []byte(makeIkey(nil, tmp, 0, ktVal))
+		return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal))
 	}
 
 	for i, x := range []struct {
@@ -152,7 +152,7 @@
 	} {
 		rec := &sessionRecord{}
 		for _, f := range x.add {
-			ik := makeIKey(uint64(f.num))
+			ik := mik(uint64(f.num))
 			rec.addTable(f.level, f.num, 1, ik, ik)
 		}
 		for _, f := range x.del {