Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 24 additions & 4 deletions src/maxtext/trainers/pre_train/train_compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,16 @@ def is_oom(argv: Sequence[str]) -> bool:
model,
) = get_shaped_inputs(topology_mesh, config)

# Update params_shardings when shard_optimizer_over_data is enabled (Zero-1)
params_shardings, state_mesh_shardings = sharding.maybe_update_params_sharding_with_opt(config, state_mesh_shardings)

# When ZeRO-1 is enabled, we need to use the original params_shardings for input shardings
# but keep the updated state_mesh_shardings for the optimizer state
if config.shard_optimizer_over_data:
input_state_mesh_shardings = state_mesh_shardings.replace(params=params_shardings)
else:
input_state_mesh_shardings = state_mesh_shardings

# Get data sharding
data_sharding = sharding.get_input_data_sharding(config, topology_mesh)

Expand All @@ -217,7 +227,7 @@ def is_oom(argv: Sequence[str]) -> bool:
static_argnums,
donate_argnums,
) = maxtext_utils.get_functional_train_with_signature(
train.train_step, data_sharding, state_mesh_shardings, model, config
train.train_step, data_sharding, input_state_mesh_shardings, model, config, params_shardings
)

try:
Expand Down Expand Up @@ -272,6 +282,16 @@ def main(argv: Sequence[str]) -> None:
model,
) = get_shaped_inputs(topology_mesh, config)

# Update params_shardings when shard_optimizer_over_data is enabled (Zero-1)
params_shardings, state_mesh_shardings = sharding.maybe_update_params_sharding_with_opt(config, state_mesh_shardings)

# When ZeRO-1 is enabled, we need to use the original params_shardings for input shardings
# but keep the updated state_mesh_shardings for the optimizer state
if config.shard_optimizer_over_data:
input_state_mesh_shardings = state_mesh_shardings.replace(params=params_shardings)
else:
input_state_mesh_shardings = state_mesh_shardings

# Get data sharding
data_sharding = sharding.get_input_data_sharding(config, topology_mesh)
if config.enable_diloco:
Expand All @@ -283,13 +303,13 @@ def main(argv: Sequence[str]) -> None:
shaped_train_args = (diloco_state, shaped_train_args[1], shaped_train_args[2])

# Wrap train_step with diloco
train_step_partial = functools.partial(train.train_step, model, config, inner_state_shardings, None)
train_step_partial = functools.partial(train.train_step, model, config, inner_state_shardings, params_shardings)
train_step_fn = diloco.build_diloco_train_step(config, train_step_partial)

# For DiLoCo, the train_step_fn is already fully wrapped and takes (state, batch, prng)
func_to_compile = train_step_fn
func_to_compile.__name__ = "train_step"
in_shard = (state_mesh_shardings, data_sharding, None) # State, batch, rng
in_shard = (input_state_mesh_shardings, data_sharding, None) # State, batch, rng
out_shard = (state_mesh_shardings, None) # State, metrics
static_argnums = ()
donate_argnums = 0
Expand All @@ -302,7 +322,7 @@ def main(argv: Sequence[str]) -> None:
static_argnums,
donate_argnums,
) = maxtext_utils.get_functional_train_with_signature(
train.train_step, data_sharding, state_mesh_shardings, model, config
train.train_step, data_sharding, input_state_mesh_shardings, model, config, params_shardings
)

# print weights sharding info under debug sharding mode
Expand Down
21 changes: 21 additions & 0 deletions tests/unit/train_compile_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1053,3 +1053,24 @@ def test_qwen3_5(self):
"use_tokamax_splash=True",
)
)

@pytest.mark.cpu_only
def test_zero1_optimizer_sharding(self):
"""AOT test for Zero-1 optimizer sharding (shard_optimizer_over_data)"""
temp_dir = gettempdir()
compiled_trainstep_file = os.path.join(temp_dir, "test_zero1_optimizer_sharding.pickle")
train_compile_main(
(
"",
get_test_config_path(),
f"compiled_trainstep_file={compiled_trainstep_file}",
"compile_topology=v5p-8",
"compile_topology_num_slices=1",
"base_emb_dim=256",
"base_mlp_dim=256",
"base_num_decoder_layers=2",
"ici_data_parallelism=4",
"shard_optimizer_over_data=true",
"shard_mode=explicit",
)
)
Loading