Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -978,6 +978,8 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head
batch := bc.db.NewBatch()
defer batch.Close()

rawdb.WriteHeadHeaderHash(batch, block.Hash())
rawdb.WriteHeadFastBlockHash(batch, block.Hash())
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
Expand Down Expand Up @@ -1437,6 +1439,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
// Note all the components of block(td, hash->number map, header, body, receipts)
// should be written atomically. BlockBatch is used for containing all components.
blockBatch := bc.db.NewBatch()
defer blockBatch.Close()
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
Expand Down Expand Up @@ -2286,6 +2289,8 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Header) error {
// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
batch := bc.db.NewBatch()
defer batch.Close()

for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) {
rawdb.DeleteTxLookupEntry(batch, tx)
}
Expand Down
5 changes: 5 additions & 0 deletions core/rawdb/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,11 @@ func (b *tableBatch) Reset() {
b.batch.Reset()
}

// Close closes the batch and releases all associated resources.
func (b *tableBatch) Close() {
b.batch.Close()
}

// tableReplayer is a wrapper around a batch replayer which truncates
// the added prefix.
type tableReplayer struct {
Expand Down
2 changes: 2 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1095,6 +1095,8 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
return common.Hash{}, err
}

code.Close()

// Write the account trie changes, measuing the amount of wasted time
// The onleaf func is called _serially_, so we can reuse the same account
// for unmarshalling every time.
Expand Down
3 changes: 3 additions & 0 deletions ctxcdb/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ type Batch interface {

// Replay replays the batch contents.
Replay(w KeyValueWriter) error

// Close closes the batch and releases all associated resources.
Close()
}

// Batcher wraps the NewBatch method of a backing data store.
Expand Down
3 changes: 3 additions & 0 deletions ctxcdb/leveldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -492,6 +492,9 @@ func (b *batch) Replay(w ctxcdb.KeyValueWriter) error {
return b.b.Replay(&replayer{writer: w})
}

// Close closes the batch and releases all associated resources.
func (b *batch) Close() {}

// replayer is a small wrapper to implement the correct replay methods.
type replayer struct {
writer ctxcdb.KeyValueWriter
Expand Down
3 changes: 3 additions & 0 deletions ctxcdb/memorydb/memorydb.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,9 @@ func (b *batch) Replay(w ctxcdb.KeyValueWriter) error {
return nil
}

// Close closes the batch and releases all associated resources.
func (b *batch) Close() {}

// iterator can walk over the (potentially partial) keyspace of a memory key
// value store. Internally it is a deep copy of the entire iterated state,
// sorted by keys.
Expand Down
6 changes: 6 additions & 0 deletions ctxcdb/pebble/pebble.go
Original file line number Diff line number Diff line change
Expand Up @@ -719,6 +719,12 @@ func (b *batch) Replay(w ctxcdb.KeyValueWriter) error {
}
}

// Close closes the batch and releases all associated resources. After it is
// closed, any subsequent operations on this batch are undefined.
func (b *batch) Close() {
b.b.Close()
}

// pebbleIterator is a wrapper of underlying iterator in storage engine.
// The purpose of this structure is to implement the missing APIs.
//
Expand Down
1 change: 1 addition & 0 deletions trie/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -611,6 +611,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
log.Error("Failed to write flush list to disk", "err", err)
return err
}
batch.Close()
// Write successful, clear out the flushed data

for db.oldest != oldest {
Expand Down
1 change: 1 addition & 0 deletions trie/trie_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -776,6 +776,7 @@ func (b *spongeBatch) ValueSize() int { return 100 }
func (b *spongeBatch) Write() error { return nil }
func (b *spongeBatch) Reset() {}
func (b *spongeBatch) Replay(w ctxcdb.KeyValueWriter) error { return nil }
func (b *spongeBatch) Close() {}

// TestCommitSequence tests that the trie.Commit operation writes the elements
// of the trie in the expected order.
Expand Down
Loading