Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion block/internal/da/async_block_retriever_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func TestAsyncBlockRetriever_FetchAndCache(t *testing.T) {
var err error

// Poll for up to 2 seconds for the block to be cached
for i := 0; i < 40; i++ {
for range 40 {
block, err = fetcher.GetCachedBlock(ctx, 100)
require.NoError(t, err)
if block != nil {
Expand Down
10 changes: 5 additions & 5 deletions block/internal/da/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ func TestClient_Get(t *testing.T) {

blobs := make([]*blobrpc.Blob, 3)
ids := make([]datypes.ID, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
blb, err := blobrpc.NewBlobV0(ns, []byte{byte(i)})
require.NoError(t, err)
blobs[i] = blb
Expand All @@ -203,7 +203,7 @@ func TestClient_Get(t *testing.T) {
result, err := cl.Get(context.Background(), ids, nsBz)
require.NoError(t, err)
require.Len(t, result, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
assert.Equal(t, blobs[i].Data(), result[i])
}
})
Expand Down Expand Up @@ -235,7 +235,7 @@ func TestClient_GetProofs(t *testing.T) {
blobModule := mocks.NewMockBlobModule(t)

ids := make([]datypes.ID, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
blb, _ := blobrpc.NewBlobV0(ns, []byte{byte(i)})
ids[i] = blobrpc.MakeID(uint64(200+i), blb.Commitment)
blobModule.On("GetProof", mock.Anything, uint64(200+i), ns, blb.Commitment).Return(&blobrpc.Proof{}, nil).Once()
Expand Down Expand Up @@ -263,7 +263,7 @@ func TestClient_Validate(t *testing.T) {

ids := make([]datypes.ID, 3)
proofs := make([]datypes.Proof, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
blb, _ := blobrpc.NewBlobV0(ns, []byte{byte(i)})
ids[i] = blobrpc.MakeID(uint64(300+i), blb.Commitment)
proofBz, _ := json.Marshal(&blobrpc.Proof{})
Expand All @@ -281,7 +281,7 @@ func TestClient_Validate(t *testing.T) {
results, err := cl.Validate(context.Background(), ids, proofs, nsBz)
require.NoError(t, err)
require.Len(t, results, 3)
for i := 0; i < 3; i++ {
for i := range 3 {
assert.Equal(t, i%2 == 0, results[i])
}
})
Expand Down
2 changes: 1 addition & 1 deletion block/internal/executing/executor_restart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ func TestExecutor_RestartNoPendingHeader(t *testing.T) {

lastStateRoot := initStateRoot
for i := range numBlocks {
newStateRoot := []byte(fmt.Sprintf("new_root_%d", i+1))
newStateRoot := fmt.Appendf(nil, "new_root_%d", i+1)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This new syntax will take some time for me to get used to.

mockExec1.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, gen.InitialHeight+uint64(i), mock.AnythingOfType("time.Time"), lastStateRoot).
Return(newStateRoot, nil).Once()
lastStateRoot = newStateRoot
Expand Down
2 changes: 1 addition & 1 deletion block/internal/submitting/da_submitter.go
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ func submitToDA[T any](
func limitBatchBySize[T any](items []T, marshaled [][]byte, maxBytes int) ([]T, [][]byte, error) {
total := 0
count := 0
for i := 0; i < len(items); i++ {
for i := range items {
sz := len(marshaled[i])
if sz > maxBytes {
if i == 0 {
Expand Down
2 changes: 1 addition & 1 deletion block/internal/syncing/da_retriever_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func makeSignedDataBytesWithTime(t *testing.T, chainID string, height uint64, pr
d := &types.Data{Metadata: &types.Metadata{ChainID: chainID, Height: height, Time: timestamp}}
if txs > 0 {
d.Txs = make(types.Txs, txs)
for i := 0; i < txs; i++ {
for i := range txs {
d.Txs[i] = types.Tx([]byte{byte(height), byte(i)})
}
}
Expand Down
4 changes: 2 additions & 2 deletions block/internal/syncing/syncer_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay

// prepare height events to emit
heightEvents := make([]common.DAHeightEvent, totalHeights)
for i := uint64(0); i < totalHeights; i++ {
for i := range totalHeights {
blockHeight, daHeight := i+gen.InitialHeight, i+daHeightOffset
_, sh := makeSignedHeaderBytes(b, gen.ChainID, blockHeight, addr, pub, signer, nil, nil, nil)
d := &types.Data{Metadata: &types.Metadata{ChainID: gen.ChainID, Height: blockHeight, Time: uint64(time.Now().UnixNano())}}
Expand All @@ -137,7 +137,7 @@ func newBenchFixture(b *testing.B, totalHeights uint64, shuffledTx bool, daDelay
// Mock DA retriever to emit exactly totalHeights events, then HFF and cancel
daR := NewMockDARetriever(b)
daR.On("PopPriorityHeight").Return(uint64(0)).Maybe()
for i := uint64(0); i < totalHeights; i++ {
for i := range totalHeights {
daHeight := i + daHeightOffset
daR.On("RetrieveFromDA", mock.Anything, daHeight).
Run(func(_ mock.Arguments) {
Expand Down
12 changes: 6 additions & 6 deletions block/internal/syncing/syncer_forced_inclusion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func TestUpdateDynamicGracePeriod_IncreaseOnHighFullness(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Update multiple times with very high fullness to build up the effect
for i := 0; i < 20; i++ {
for range 20 {
s.updateDynamicGracePeriod(0.95)
}

Expand Down Expand Up @@ -169,7 +169,7 @@ func TestUpdateDynamicGracePeriod_DecreaseOnLowFullness(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Update multiple times with low fullness to build up the effect
for i := 0; i < 20; i++ {
for range 20 {
s.updateDynamicGracePeriod(0.2)
}

Expand Down Expand Up @@ -201,7 +201,7 @@ func TestUpdateDynamicGracePeriod_ClampToMin(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Update many times with very low fullness - should eventually clamp to min
for i := 0; i < 50; i++ {
for range 50 {
s.updateDynamicGracePeriod(0.0)
}

Expand All @@ -228,7 +228,7 @@ func TestUpdateDynamicGracePeriod_ClampToMax(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Update many times with very high fullness - should eventually clamp to max
for i := 0; i < 50; i++ {
for range 50 {
s.updateDynamicGracePeriod(1.0)
}

Expand Down Expand Up @@ -316,7 +316,7 @@ func TestDynamicGracePeriod_Integration_HighCongestion(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Simulate processing many blocks with very high fullness (above threshold)
for i := 0; i < 50; i++ {
for range 50 {
s.updateDynamicGracePeriod(0.95)
}

Expand Down Expand Up @@ -349,7 +349,7 @@ func TestDynamicGracePeriod_Integration_LowCongestion(t *testing.T) {
s.blockFullnessEMA.Store(&initialEMA)

// Simulate processing many blocks with very low fullness (below threshold)
for i := 0; i < 50; i++ {
for range 50 {
s.updateDynamicGracePeriod(0.1)
}

Expand Down
8 changes: 3 additions & 5 deletions block/internal/syncing/syncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func makeData(chainID string, height uint64, txs int) *types.Data {
}
if txs > 0 {
d.Txs = make(types.Txs, txs)
for i := 0; i < txs; i++ {
for i := range txs {
d.Txs[i] = types.Tx([]byte{byte(height), byte(i)})
}
}
Expand Down Expand Up @@ -918,8 +918,7 @@ func TestSyncer_Stop_SkipsDrainOnCriticalError(t *testing.T) {
s.hasCriticalError.Store(true)

// Start a no-op goroutine tracked by the WaitGroup so Stop() doesn't block on wg.Wait()
s.wg.Add(1)
go func() { defer s.wg.Done() }()
s.wg.Go(func() {})

// Stop must complete quickly — no drain, no ExecuteTxs calls
done := make(chan struct{})
Expand Down Expand Up @@ -991,8 +990,7 @@ func TestSyncer_Stop_DrainWorksWithoutCriticalError(t *testing.T) {
s.heightInCh <- evt

// hasCriticalError is false (default) — drain should process events including ExecuteTxs
s.wg.Add(1)
go func() { defer s.wg.Done() }()
s.wg.Go(func() {})

_ = s.Stop()

Expand Down
2 changes: 1 addition & 1 deletion node/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ func newTestP2PClient(config evconfig.Config, privKey crypto.PrivKey, ds datasto
func createNodeContexts(n int) ([]context.Context, []context.CancelFunc) {
ctxs := make([]context.Context, n)
cancels := make([]context.CancelFunc, n)
for i := 0; i < n; i++ {
for i := range n {
ctx, cancel := context.WithCancel(context.Background())
ctxs[i] = ctx
cancels[i] = cancel
Expand Down
2 changes: 1 addition & 1 deletion pkg/da/jsonrpc/header.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type Header struct {
Height uint64 `json:"height,string,omitempty"`
LastHash []byte `json:"last_header_hash,omitempty"`
ChainID string `json:"chain_id,omitempty"`
BlockTime time.Time `json:"time,omitempty"`
BlockTime time.Time `json:"time"`
}

// RawHeader contains the raw tendermint header fields.
Expand Down
14 changes: 7 additions & 7 deletions pkg/da/selector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestRoundRobinSelector_SingleAddress(t *testing.T) {
selector := NewRoundRobinSelector(addresses)

// All calls should return the same address
for i := 0; i < 10; i++ {
for range 10 {
addr := selector.Next()
assert.Equal(t, "celestia1abc123", addr, "should always return the single address")
}
Expand Down Expand Up @@ -69,11 +69,11 @@ func TestRoundRobinSelector_Concurrent(t *testing.T) {
var wg sync.WaitGroup

// Launch concurrent goroutines
for i := 0; i < numGoroutines; i++ {
for i := range numGoroutines {
wg.Add(1)
go func(start int) {
defer wg.Done()
for j := 0; j < numCallsPerGoroutine; j++ {
for j := range numCallsPerGoroutine {
addr := selector.Next()
results[start+j] = addr
}
Expand Down Expand Up @@ -110,7 +110,7 @@ func TestRoundRobinSelector_WrapAround(t *testing.T) {

// Test wrap around behavior with large number of calls
seen := make(map[string]int)
for i := 0; i < 1000; i++ {
for range 1000 {
addr := selector.Next()
seen[addr]++
}
Expand All @@ -124,7 +124,7 @@ func TestNoOpSelector(t *testing.T) {
selector := NewNoOpSelector()

// Should always return empty string
for i := 0; i < 10; i++ {
for range 10 {
addr := selector.Next()
assert.Empty(t, addr, "NoOpSelector should always return empty string")
}
Expand All @@ -136,9 +136,9 @@ func TestNoOpSelector_Concurrent(t *testing.T) {
const numGoroutines = 50
var wg sync.WaitGroup

for i := 0; i < numGoroutines; i++ {
for range numGoroutines {
wg.Go(func() {
for j := 0; j < 100; j++ {
for range 100 {
addr := selector.Next()
assert.Empty(t, addr)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/p2p/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ func TestClientInfoMethods(t *testing.T) {
var hosts []host.Host
var err error

for i := 0; i < 3; i++ {
for range 3 {
nodeKey, e := key.GenerateNodeKey()
require.NoError(e)
h, e := mn.AddPeer(nodeKey.PrivKey, multiaddr.StringCast("/ip4/127.0.0.1/tcp/0"))
Expand Down
2 changes: 1 addition & 1 deletion pkg/p2p/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func startTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]hos
require := require.New(t)

mnet := mocknet.New()
for i := 0; i < n; i++ {
for i := range n {
var descr hostDescr
if d, ok := conf[i]; ok {
descr = d
Expand Down
2 changes: 1 addition & 1 deletion pkg/raft/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ func (n *Node) SetApplyCallback(ch chan<- RaftApplyMsg) {
}

// Apply implements raft.FSM
func (f *FSM) Apply(log *raft.Log) interface{} {
func (f *FSM) Apply(log *raft.Log) any {
var state RaftBlockState
if err := proto.Unmarshal(log.Data, &state); err != nil {
f.logger.Error().Err(err).Msg("unmarshal block state")
Expand Down
2 changes: 1 addition & 1 deletion pkg/rpc/server/da_visualization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestRecordSubmissionMemoryLimit(t *testing.T) {
server := NewDAVisualizationServer(da, logger, true)

// Add 101 submissions (more than the limit of 100)
for i := 0; i < 101; i++ {
for i := range 101 {
result := &coreda.ResultSubmit{
BaseResult: coreda.BaseResult{
Code: coreda.StatusSuccess,
Expand Down
4 changes: 2 additions & 2 deletions pkg/sequencers/common/checkpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func TestCheckpointStore_ConcurrentAccess(t *testing.T) {

// Test concurrent reads
done := make(chan bool, 10)
for i := 0; i < 10; i++ {
for range 10 {
go func() {
defer func() { done <- true }()
loaded, err := store.Load(ctx)
Expand All @@ -129,7 +129,7 @@ func TestCheckpointStore_ConcurrentAccess(t *testing.T) {
}()
}

for i := 0; i < 10; i++ {
for range 10 {
<-done
}
}
12 changes: 6 additions & 6 deletions pkg/sequencers/single/queue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
// createTestBatch creates a batch with dummy transactions for testing
func createTestBatch(t *testing.T, txCount int) coresequencer.Batch {
txs := make([][]byte, txCount)
for i := 0; i < txCount; i++ {
for i := range txCount {
txs[i] = []byte{byte(i), byte(i + 1), byte(i + 2)}
}
return coresequencer.Batch{Transactions: txs}
Expand Down Expand Up @@ -353,7 +353,7 @@ func TestConcurrency(t *testing.T) {
addWg := new(sync.WaitGroup)
addWg.Add(numOperations)

for i := 0; i < numOperations; i++ {
for i := range numOperations {
go func(index int) {
defer addWg.Done()
batch := createTestBatch(t, index%10+1) // 1-10 transactions
Expand All @@ -377,7 +377,7 @@ func TestConcurrency(t *testing.T) {
nextCount := numOperations / 2
nextWg.Add(nextCount)

for i := 0; i < nextCount; i++ {
for range nextCount {
go func() {
defer nextWg.Done()
batch, err := bq.Next(ctx)
Expand Down Expand Up @@ -499,7 +499,7 @@ func TestBatchQueue_QueueLimit_WithNext(t *testing.T) {
ctx := context.Background()

// Fill the queue to capacity
for i := 0; i < maxSize; i++ {
for i := range maxSize {
batch := createTestBatch(t, i+1)
err := bq.AddBatch(ctx, batch)
if err != nil {
Expand Down Expand Up @@ -562,11 +562,11 @@ func TestBatchQueue_QueueLimit_Concurrency(t *testing.T) {
var errorCount int64

// Start multiple workers trying to add batches concurrently
for i := 0; i < numWorkers; i++ {
for i := range numWorkers {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
for j := 0; j < batchesPerWorker; j++ {
for j := range batchesPerWorker {
batch := createTestBatch(t, workerID*batchesPerWorker+j+1)
err := bq.AddBatch(ctx, batch)
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions pkg/sequencers/single/sequencer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -775,7 +775,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) {

// Phase 1: Normal operation - send some batches successfully
t.Log("Phase 1: Normal operation")
for i := 0; i < queueSize; i++ {
for i := range queueSize {
batch := createTestBatch(t, i+1)
req := coresequencer.SubmitBatchTxsRequest{
Id: []byte("test-chain"),
Expand Down Expand Up @@ -825,7 +825,7 @@ func TestSequencer_DAFailureAndQueueThrottling_Integration(t *testing.T) {

// Add batches until queue is full again
batchesAdded := 0
for i := 0; i < 10; i++ { // Try to add many batches
for i := range 10 { // Try to add many batches
batch := createTestBatch(t, 100+i)
req := coresequencer.SubmitBatchTxsRequest{
Id: []byte("test-chain"),
Expand Down Expand Up @@ -1299,7 +1299,7 @@ func TestSequencer_GetNextBatch_GasFilteringPreservesUnprocessedTxs(t *testing.T

// Process multiple batches to consume all forced txs
// Use maxBytes=120 to fetch only 2 txs at a time (each is 50 bytes)
for i := 0; i < 5; i++ { // Max 5 iterations to prevent infinite loop
for i := range 5 { // Max 5 iterations to prevent infinite loop
req := coresequencer.GetNextBatchRequest{
Id: []byte("test-gas-preserve"),
MaxBytes: 120, // Limits to ~2 txs per batch
Expand Down
4 changes: 2 additions & 2 deletions pkg/signer/file/file_signer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,9 +391,9 @@ func TestConcurrentAccess(t *testing.T) {
errChan := make(chan error, numGoRoutines*messageCount)
doneChan := make(chan struct{})

for i := 0; i < numGoRoutines; i++ {
for i := range numGoRoutines {
go func(routineNum int) {
for j := 0; j < messageCount; j++ {
for j := range messageCount {
// Create a unique message per goroutine and iteration
message := fmt.Appendf(nil, "Message %d-%d", routineNum, j)

Expand Down
Loading