Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions nemo/lightning/pytorch/strategies/fsdp2_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,19 +401,16 @@ def training_step(self, batch, batch_idx=None) -> STEP_OUTPUT:
else:
loss = self.lightning_module.training_step(batch, batch_idx)

self.lightning_module.log(
'global_step',
self.trainer.global_step,
# Log global_step and step together using log_dict to prevent W&B from
# incrementing its internal _step counter multiple times per training step.
# This fixes issue #15204 where W&B showed ~2x the actual global_step.
self.lightning_module.log_dict(
{'global_step': self.trainer.global_step, 'step': self.trainer.global_step},
prog_bar=True,
rank_zero_only=True,
batch_size=1,
)

self.lightning_module.log(
'step',
self.trainer.global_step,
)

return loss

@override
Expand Down
13 changes: 5 additions & 8 deletions nemo/lightning/pytorch/strategies/fsdp_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,18 +181,15 @@ def training_step(self, batch, batch_idx=None) -> STEP_OUTPUT:
with self.precision_plugin.train_step_context():
loss, reduced = self._step_proxy("training", batch, batch_idx)

self.lightning_module.log(
'global_step',
self.trainer.global_step,
# Log global_step and step together using log_dict to prevent W&B from
# incrementing its internal _step counter multiple times per training step.
# This fixes issue #15204 where W&B showed ~2x the actual global_step.
self.lightning_module.log_dict(
{'global_step': self.trainer.global_step, 'step': self.trainer.global_step},
prog_bar=True,
rank_zero_only=True,
batch_size=1,
)

self.lightning_module.log(
'step',
self.trainer.global_step,
)
self.lightning_module.log(
'reduced_train_loss', reduced['avg'], prog_bar=True, rank_zero_only=True, batch_size=1
)
Expand Down
13 changes: 5 additions & 8 deletions nemo/lightning/pytorch/strategies/megatron_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -800,18 +800,15 @@ def training_step(self, dataloader_iter, *args: Any, **kwargs: Any) -> STEP_OUTP
if (torch.is_tensor(self.trainer.global_step) and self.trainer.global_step.is_cuda)
else torch.tensor(self.trainer.global_step, pin_memory=True).to("cuda", non_blocking=True)
)
self.lightning_module.log(
"global_step",
global_step,
# Log global_step and step together using log_dict to prevent W&B from
# incrementing its internal _step counter multiple times per training step.
# This fixes issue #15204 where W&B showed ~2x the actual global_step.
self.lightning_module.log_dict(
{"global_step": global_step, "step": global_step},
prog_bar=True,
batch_size=1,
)

self.lightning_module.log(
"step",
global_step,
)

if self.log_memory_usage:
# maximum GPU memory that has been managed by the caching allocator
max_memory_reserved = torch.cuda.max_memory_reserved()
Expand Down