Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
71 commits
Select commit Hold shift + click to select a range
9c49f10
Initial plan
Copilot Aug 29, 2025
c9342d4
feat(paddle): Add type hints to Paddle backend and enable ANN rule fo…
Copilot Aug 29, 2025
061098f
Merge remote-tracking branch 'origin/devel' into copilot/fix-4939
Copilot Sep 17, 2025
c41d4e9
Revert "Merge remote-tracking branch 'origin/devel' into copilot/fix-…
njzjz Sep 20, 2025
a132dbf
Merge branch 'devel' into copilot/fix-4939
njzjz Sep 20, 2025
71b8fc4
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Sep 20, 2025
0079e2a
feat(paddle): Enable ANN rule for train/wrapper.py and add type annot…
Copilot Sep 20, 2025
8bbdd1f
feat(paddle): Enable ANN rule for additional utility files and comple…
Copilot Sep 20, 2025
f9196c5
feat(paddle): Enable ANN rule for loss directory base files
Copilot Sep 20, 2025
731b3fa
feat(paddle): Enable ANN rule for 16 additional files across multiple…
Copilot Sep 20, 2025
49c51a7
feat(paddle): Enable ANN rule for 4 additional files and add type ann…
Copilot Sep 20, 2025
e120f91
Push previous changes
Copilot Sep 20, 2025
f673c2e
feat(paddle): Enable ANN rule for 2 additional model files with type …
Copilot Sep 20, 2025
178fe36
feat(paddle): Enable ANN rule for 3 additional utility files with typ…
Copilot Sep 21, 2025
dca9a9e
fix(paddle): Remove dpmodel configuration line to align with devel br…
Copilot Sep 21, 2025
aeb3340
Merge branch 'master' into copilot/fix-4939
njzjz Jan 21, 2026
deac791
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 21, 2026
8d2f27a
fix: Revert dpmodel changes, remove temp files, and restore Paddle AN…
Copilot Jan 21, 2026
afd9115
trigger: Push previous changes
Copilot Jan 21, 2026
8828c46
chore: Remove trigger file
Copilot Jan 21, 2026
c8a67eb
fix: Update dpmodel from master and fix all ruff errors
Copilot Jan 21, 2026
8c56fc3
Merge branch 'master' into copilot/fix-4939
njzjz Jan 21, 2026
322b482
revert unrelated changes
njzjz Jan 21, 2026
6d405f4
remove backup file
njzjz Jan 21, 2026
59347fd
revert pyproject.toml
njzjz Jan 21, 2026
541cbe8
revert tests
njzjz Jan 21, 2026
f1326ae
enable ANN for pd
njzjz Jan 21, 2026
1bdd73d
autofix
njzjz Jan 21, 2026
a5cb0f7
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jan 21, 2026
252f89c
fix: Add ANN to Paddle backend ignore list to align with gradual enab…
Copilot Jan 21, 2026
acaadf0
fix(pd): add type annotations to utils module (110 total errors fixed)
Copilot Jan 21, 2026
ce0a7c6
fix(paddle): Add type annotations to deepmd/pd/loss/ener.py (32 ANN e…
Copilot Jan 21, 2026
22b619c
fix(paddle): Add type annotations to deepmd/pd/model/task/fitting.py …
Copilot Jan 21, 2026
a3b8fde
fix(paddle): Add return type annotation to deepmd/pd/infer/deep_eval.…
Copilot Jan 21, 2026
98a3acd
fix(paddle): Add type annotations to deepmd/pd/model/model/make_model…
Copilot Jan 21, 2026
4374dc7
fix(paddle): Add type annotations to deepmd/pd/model/descriptor/se_at…
Copilot Jan 21, 2026
7d22e42
fix(paddle): Add type annotations to deepmd/pd/model/descriptor/se_a.…
Copilot Jan 21, 2026
4b80137
fix(paddle): Add type annotations to deepmd/pd/model/descriptor/se_t_…
Copilot Jan 22, 2026
eb4efbc
fix(paddle): Add return type annotation to deepmd/pd/model/descriptor…
Copilot Jan 22, 2026
19b6fae
fix(paddle): Add return type annotation to deepmd/pd/utils/neighbor_s…
Copilot Jan 22, 2026
e8b19fa
fix(paddle): Add type annotation to kwargs in deepmd/pd/model/task/en…
Copilot Jan 22, 2026
a84b062
fix(paddle): Add type annotations to deepmd/pd/model/atomic_model/bas…
Copilot Jan 22, 2026
793e40e
fix(paddle): Add return type annotation to deepmd/pd/model/atomic_mod…
Copilot Jan 22, 2026
c83e374
fix(paddle): Add type annotations to deepmd/pd/model/task/invar_fitti…
Copilot Jan 22, 2026
55b35e5
fix(paddle): Add type annotations to deepmd/pd/model/model/frozen.py …
Copilot Jan 22, 2026
2e42085
fix(paddle): Add type annotations to deepmd/pd/utils/stat.py (3 ANN e…
Copilot Jan 22, 2026
1777dde
fix(paddle): Add type annotations to deepmd/pd/model/descriptor/se_at…
Copilot Jan 22, 2026
474e4c5
fix(paddle): Add type annotations to training.py (21 ANN errors fixed)
Copilot Jan 22, 2026
211d394
fix(paddle): Add type annotations to repformers.py (21 ANN errors fixed)
Copilot Jan 22, 2026
f2481dc
fix(paddle): Add type annotations to repflows.py (21 ANN errors fixed)
Copilot Jan 22, 2026
732331c
fix(paddle): Add type annotations to dpa1.py (13 ANN errors fixed)
Copilot Jan 22, 2026
a1ecc18
fix(paddle): Add type annotations to dpa2.py (12 ANN errors fixed)
Copilot Jan 22, 2026
744735f
fix(paddle): Add type annotations to dpa3.py (12 ANN errors fixed)
Copilot Jan 22, 2026
4ed7efc
fix(paddle): Add type annotations to descriptor.py (12 ANN errors fixed)
Copilot Jan 22, 2026
4012692
fix(paddle): Add type annotations to utils/nlist.py (4 ANN errors fixed)
Copilot Jan 22, 2026
bdb8a16
fix(paddle): Add type annotations to model/model.py (4 ANN errors fixed)
Copilot Jan 22, 2026
e5b9a6e
fix(paddle): Add type annotations to transform_output.py (4 ANN error…
Copilot Jan 22, 2026
7d32e69
fix(paddle): Add type annotations to utils/utils.py (6 ANN errors fixed)
Copilot Jan 22, 2026
45f1f57
fix(paddle): Add type annotations to repformer_layer.py (5 ANN errors…
Copilot Jan 22, 2026
f76472e
fix(paddle): Add type annotations to env_mat.py (9 ANN errors fixed)
Copilot Jan 22, 2026
b11fa8a
fix(paddle): Add type annotations to model/__init__.py (11 ANN errors…
Copilot Jan 22, 2026
bcf758d
fix(paddle): Add type annotations to ener_model.py (9 ANN errors fixed)
Copilot Jan 22, 2026
4f4eed4
fix(paddle): Address code review feedback - Part 1
Copilot Jan 22, 2026
68f9f3c
fix(paddle): Address code review feedback - Part 2
Copilot Jan 22, 2026
b9ab901
Update deepmd/pd/loss/loss.py
njzjz Jan 22, 2026
e80e7d9
fix(paddle): Address additional code review feedback
Copilot Jan 22, 2026
5b4c024
Co-authored-by: njzjz <9496702+njzjz@users.noreply.github.com>
Copilot Jan 22, 2026
9b07e06
fix(paddle): Change object to Any in _can_be_converted_to_float
Copilot Jan 22, 2026
734734a
chore: Remove .dummy_file_for_commit
Copilot Jan 22, 2026
79c84b4
fix(paddle): Revert strict=True to strict=False in zip() calls
Copilot Jan 22, 2026
ae00680
fix(paddle): Remove strict parameter from zip() calls
Copilot Jan 22, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion deepmd/pd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ def change_bias(
log.info(f"Saved model to {output_path}")


def main(args: list[str] | argparse.Namespace | None = None):
def main(args: list[str] | argparse.Namespace | None = None) -> None:
if not isinstance(args, argparse.Namespace):
FLAGS = parse_args(args=args)
else:
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pd/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def _eval_model(
fparam: np.ndarray | None,
aparam: np.ndarray | None,
request_defs: list[OutputVariableDef],
):
) -> tuple[np.ndarray, ...]:
if not self.static_model:
model = self.dp.to(DEVICE)
prec = NP_PRECISION_DICT[RESERVED_PRECISION_DICT[GLOBAL_PD_FLOAT_PRECISION]]
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pd/infer/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@
class Tester:
def __init__(
self,
model_ckpt,
head=None,
):
model_ckpt: str,
head: str | None = None,
) -> None:
"""Construct a DeePMD tester.

Args:
Expand Down
57 changes: 39 additions & 18 deletions deepmd/pd/loss/ener.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
Any,
)

import paddle
import paddle.nn.functional as F
Expand All @@ -20,7 +23,9 @@
)


def custom_huber_loss(predictions, targets, delta=1.0):
def custom_huber_loss(
predictions: paddle.Tensor, targets: paddle.Tensor, delta: float = 1.0
) -> paddle.Tensor:
error = targets - predictions
abs_error = paddle.abs(error)
quadratic_loss = 0.5 * paddle.pow(error, 2)
Expand All @@ -32,13 +37,13 @@ def custom_huber_loss(predictions, targets, delta=1.0):
class EnergyStdLoss(TaskLoss):
def __init__(
self,
starter_learning_rate=1.0,
start_pref_e=0.0,
limit_pref_e=0.0,
start_pref_f=0.0,
limit_pref_f=0.0,
start_pref_v=0.0,
limit_pref_v=0.0,
starter_learning_rate: float = 1.0,
start_pref_e: float = 0.0,
limit_pref_e: float = 0.0,
start_pref_f: float = 0.0,
limit_pref_f: float = 0.0,
start_pref_v: float = 0.0,
limit_pref_v: float = 0.0,
start_pref_ae: float = 0.0,
limit_pref_ae: float = 0.0,
start_pref_pf: float = 0.0,
Expand All @@ -49,10 +54,10 @@ def __init__(
limit_pref_gf: float = 0.0,
numb_generalized_coord: int = 0,
use_l1_all: bool = False,
inference=False,
use_huber=False,
huber_delta=0.01,
**kwargs,
inference: bool = False,
use_huber: bool = False,
huber_delta: float = 0.01,
**kwargs: Any,
) -> None:
r"""Construct a layer to compute loss on energy, force and virial.

Expand Down Expand Up @@ -146,7 +151,15 @@ def __init__(
"Huber loss is not implemented for force with atom_pref, generalized force and relative force. "
)

def forward(self, input_dict, model, label, natoms, learning_rate, mae=False):
def forward(
self,
input_dict: dict[str, paddle.Tensor],
model: paddle.nn.Layer,
label: dict[str, paddle.Tensor],
natoms: int,
learning_rate: float,
mae: bool = False,
) -> tuple[dict[str, paddle.Tensor], paddle.Tensor, dict[str, paddle.Tensor]]:
"""Return loss on energy and force.

Parameters
Expand Down Expand Up @@ -535,10 +548,10 @@ def deserialize(cls, data: dict) -> "TaskLoss":
class EnergyHessianStdLoss(EnergyStdLoss):
def __init__(
self,
start_pref_h=0.0,
limit_pref_h=0.0,
**kwargs,
):
start_pref_h: float = 0.0,
limit_pref_h: float = 0.0,
**kwargs: Any,
) -> None:
r"""Enable the layer to compute loss on hessian.

Parameters
Expand All @@ -556,7 +569,15 @@ def __init__(
self.start_pref_h = start_pref_h
self.limit_pref_h = limit_pref_h

def forward(self, input_dict, model, label, natoms, learning_rate, mae=False):
def forward(
self,
input_dict: dict[str, paddle.Tensor],
model: paddle.nn.Module,
label: dict[str, paddle.Tensor],
natoms: int,
learning_rate: float,
mae: bool = False,
) -> tuple[dict[str, paddle.Tensor], paddle.Tensor, dict[str, paddle.Tensor]]:
model_pred, loss, more_loss = super().forward(
input_dict, model, label, natoms, learning_rate, mae=mae
)
Expand Down
15 changes: 13 additions & 2 deletions deepmd/pd/loss/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
ABC,
abstractmethod,
)
from typing import (
Any,
)

import paddle

Expand All @@ -15,11 +18,19 @@


class TaskLoss(paddle.nn.Layer, ABC, make_plugin_registry("loss")):
def __init__(self, **kwargs):
def __init__(self, **kwargs: Any) -> None:
"""Construct loss."""
super().__init__()

def forward(self, input_dict, model, label, natoms, learning_rate):
def forward(
self,
input_dict: dict[str, paddle.Tensor],
model: paddle.nn.Module,
label: dict[str, paddle.Tensor],
natoms: int,
learning_rate: float,
mae: bool | None = None,
) -> paddle.Tensor:
"""Return loss ."""
raise NotImplementedError

Expand Down
4 changes: 2 additions & 2 deletions deepmd/pd/model/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,7 @@ def _default_bias(self) -> paddle.Tensor:
device=device
)

def _default_std(self):
def _default_std(self) -> paddle.Tensor:
ntypes = self.get_ntypes()
return paddle.ones([self.n_out, ntypes, self.max_out_size], dtype=dtype).to(
device=device
Expand Down Expand Up @@ -626,7 +626,7 @@ def _store_out_stat(
paddle.assign(out_bias_data, self.out_bias)
paddle.assign(out_std_data, self.out_std)

def get_ntypes(self):
def get_ntypes(self) -> int:
return len(self.type_map)

def get_buffer_ntypes(self) -> paddle.Tensor:
Expand Down
2 changes: 1 addition & 1 deletion deepmd/pd/model/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ def compute_or_load_stat(
stat_file_path /= " ".join(self.type_map)

@functools.lru_cache
def wrapped_sampler():
def wrapped_sampler() -> list[dict]:
sampled = sampled_func()
if self.pair_excl is not None:
pair_exclude_types = self.pair_excl.get_exclude_types()
Expand Down
18 changes: 17 additions & 1 deletion deepmd/pd/model/atomic_model/energy_atomic_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,14 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from typing import (
Any,
)

from deepmd.pd.model.descriptor.base_descriptor import (
BaseDescriptor,
)
from deepmd.pd.model.task.base_fitting import (
BaseFitting,
)
from deepmd.pd.model.task.ener import (
EnergyFittingNet,
InvarFitting,
Expand All @@ -10,7 +20,13 @@


class DPEnergyAtomicModel(DPAtomicModel):
def __init__(self, descriptor, fitting, type_map, **kwargs):
def __init__(
self,
descriptor: BaseDescriptor,
fitting: BaseFitting,
type_map: list[str],
**kwargs: Any,
) -> None:
assert isinstance(fitting, EnergyFittingNet) or isinstance(
fitting, InvarFitting
)
Expand Down
17 changes: 11 additions & 6 deletions deepmd/pd/model/descriptor/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
Callable,
)
from typing import (
Any,
NoReturn,
)

Expand Down Expand Up @@ -43,7 +44,7 @@ class DescriptorBlock(paddle.nn.Layer, ABC, make_plugin_registry("DescriptorBloc

local_cluster = False

def __new__(cls, *args, **kwargs):
def __new__(cls, *args: Any, **kwargs: Any) -> "DescriptorBlock":
if cls is DescriptorBlock:
try:
descrpt_type = kwargs["type"]
Expand Down Expand Up @@ -126,7 +127,9 @@ def get_stats(self) -> dict[str, StatItem]:
"""Get the statistics of the descriptor."""
raise NotImplementedError

def share_params(self, base_class, shared_level, resume=False) -> None:
def share_params(
self, base_class: Any, shared_level: int, resume: bool = False
) -> None:
"""
Share the parameters of self to the base_class with shared_level during multitask training.
If not start from checkpoint (resume is False),
Expand Down Expand Up @@ -180,7 +183,7 @@ def forward(
extended_atype_embd: paddle.Tensor | None = None,
mapping: paddle.Tensor | None = None,
type_embedding: paddle.Tensor | None = None,
):
) -> paddle.Tensor:
"""Calculate DescriptorBlock."""
pass

Expand All @@ -194,14 +197,16 @@ def need_sorted_nlist_for_lower(self) -> bool:


def make_default_type_embedding(
ntypes,
):
ntypes: int,
) -> tuple[TypeEmbedNet, dict]:
aux = {}
aux["tebd_dim"] = 8
return TypeEmbedNet(ntypes, aux["tebd_dim"]), aux


def extend_descrpt_stat(des, type_map, des_with_stat=None) -> None:
def extend_descrpt_stat(
des: Any, type_map: list[str], des_with_stat: Any = None
) -> None:
r"""
Extend the statistics of a descriptor block with types from newly provided `type_map`.

Expand Down
25 changes: 15 additions & 10 deletions deepmd/pd/model/descriptor/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
from collections.abc import (
Callable,
)
from typing import (
Any,
)

import paddle

Expand Down Expand Up @@ -228,8 +231,8 @@ def __init__(
exclude_types: list[tuple[int, int]] = [],
env_protection: float = 0.0,
scaling_factor: int = 1.0,
normalize=True,
temperature=None,
normalize: bool = True,
temperature: float | None = None,
concat_output_tebd: bool = True,
trainable: bool = True,
trainable_ln: bool = True,
Expand All @@ -242,7 +245,7 @@ def __init__(
use_tebd_bias: bool = False,
type_map: list[str] | None = None,
# not implemented
spin=None,
spin: Any = None,
type: str | None = None,
) -> None:
super().__init__()
Expand Down Expand Up @@ -397,7 +400,9 @@ def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.se_atten.get_env_protection()

def share_params(self, base_class, shared_level, resume=False) -> None:
def share_params(
self, base_class: Any, shared_level: int, resume: bool = False
) -> None:
"""
Share the parameters of self to the base_class with shared_level during multitask training.
If not start from checkpoint (resume is False),
Expand Down Expand Up @@ -425,18 +430,18 @@ def share_params(self, base_class, shared_level, resume=False) -> None:
raise NotImplementedError

@property
def dim_out(self):
def dim_out(self) -> int:
return self.get_dim_out()

@property
def dim_emb(self):
def dim_emb(self) -> int:
return self.get_dim_emb()

def compute_input_stats(
self,
merged: Callable[[], list[dict]] | list[dict],
path: DPPath | None = None,
):
) -> None:
"""
Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data.

Expand Down Expand Up @@ -469,7 +474,7 @@ def get_stat_mean_and_stddev(self) -> tuple[paddle.Tensor, paddle.Tensor]:
return self.se_atten.mean, self.se_atten.stddev

def change_type_map(
self, type_map: list[str], model_with_new_type_stat=None
self, type_map: list[str], model_with_new_type_stat: Any = None
) -> None:
"""Change the type related params to new ones, according to `type_map` and the original one in the model.
If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types.
Expand Down Expand Up @@ -569,7 +574,7 @@ def deserialize(cls, data: dict) -> "DescrptDPA1":
data["use_tebd_bias"] = True
obj = cls(**data)

def t_cvt(xx):
def t_cvt(xx: Any) -> paddle.Tensor:
return paddle.to_tensor(xx, dtype=obj.se_atten.prec).to(device=env.DEVICE)

obj.type_embedding.embedding = TypeEmbedNetConsistent.deserialize(
Expand Down Expand Up @@ -620,7 +625,7 @@ def forward(
nlist: paddle.Tensor,
mapping: paddle.Tensor | None = None,
comm_dict: list[paddle.Tensor] | None = None,
):
) -> paddle.Tensor:
"""Compute the descriptor.

Parameters
Expand Down
Loading