Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
0d4ae79
[Frontend] Use ops instead of raw assembly code
YWHyuk Sep 9, 2025
bea9bd2
[Test] Add matmul vector fusion case
YWHyuk Sep 9, 2025
837b062
[Frontend] Fix ops conversion
YWHyuk Sep 9, 2025
a33659a
[Frontend] Use custom malloc in the validation wrapper code
YWHyuk Sep 10, 2025
4e2d0a0
[Device] Add missing operations
YWHyuk Sep 10, 2025
6e70edc
[Frontend] Add typecasting for logical operation
YWHyuk Sep 10, 2025
54f450a
[Device] register amp
YWHyuk Sep 10, 2025
8985ab8
[Frontend+Test] Support scatter pattern with a test case
YWHyuk Sep 12, 2025
1c2c8bf
[Fix] minor bugs
YWHyuk Dec 5, 2025
1895958
[Fix] Fix the acceess to wrong variable
YWHyuk Dec 8, 2025
cd14109
[Log] Add print lock to prevent log crash
YWHyuk Dec 8, 2025
5fe87e9
[Device] Add custom zero_, zeors_like
YWHyuk Dec 8, 2025
db18cbd
[Frontend/Spike] Use 64byte aligned buffer size
YWHyuk Dec 8, 2025
1152428
[Refactor] Seperate OpOverrides
YWHyuk Dec 8, 2025
8452f5c
[Test] Add Llama1&2 test cases
YWHyuk Dec 8, 2025
00cd8c7
[TOGSim] Add error handling
YWHyuk Dec 8, 2025
a8d96cd
[Scheduler] Use given config file for compilations
YWHyuk Dec 8, 2025
8aac3ab
[Fix/ops] Fix wrong implementation of sigmoid
YWHyuk Dec 9, 2025
fd6a846
[Tests] Use manual mask for Llama
YWHyuk Dec 9, 2025
dea7f47
[TOGSim] Use YAML instead of json
YWHyuk Dec 9, 2025
d66df91
[Frontend] Use YAML config file instead of json
YWHyuk Dec 9, 2025
dce58d0
[Test] Change attention masek for Llama
YWHyuk Dec 9, 2025
1c2ab36
[Autotune] Fix autotune log path
YWHyuk Dec 9, 2025
20af550
[Fix] Fix codegen error in ops.select
YWHyuk Dec 9, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions .github/workflows/pytorchsim_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -663,6 +663,27 @@ jobs:
-e vpu_spad_size_kb_per_lane="${{ inputs.spad_size }}" \
${{ inputs.image_name }} python3 PyTorchSim/tests/test_scheduler.py

test_llama:
name: Run test_llama1&2
runs-on: self-hosted
steps:
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Run test_llama.py
run: |
echo "Running test_llama.py"
docker run --rm \
-v /tmp/torchsim-ci/${GITHUB_SHA}:/dump \
-e TORCHSIM_DUMP_PATH=/dump \
-e vpu_num_lanes="${{ inputs.vector_lane }}" \
-e vpu_spad_size_kb_per_lane="${{ inputs.spad_size }}" \
${{ inputs.image_name }} python3 PyTorchSim/tests/Llama/test_llama.py

test_accuracy:
name: Run test_accuracy
runs-on: self-hosted
Expand Down
4 changes: 2 additions & 2 deletions PyTorchSimFrontend/extension_codecache.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,15 +278,15 @@ def dummy_simulator(*args, **kwargs):
vectorlane_size=vectorlane_size, spad_info=spad_info,
silent_mode=silent_mode)
if not extension_config.pytorchsim_timing_mode:
return
return [float("inf")]

onnx_path = os.path.join(result_path, "tile_graph.onnx")
attribute_path = os.path.join(runtime_path, "attribute")
togsim_path = os.path.join(extension_config.CONFIG_TORCHSIM_DIR, "TOGSim")
TOGSim = TOGSimulator(togsim_path, extension_config.CONFIG_TOGSIM_CONFIG)
TOGSim.vectorlane_size = vectorlane_size
attribute_path = TOGSim.create_attribute_file(attribute_path, args, loop_size=loop_size)
result_path = TOGSim.simulation(onnx_path, attribute_path, silent_mode=silent_mode)
result_path = TOGSim.simulation(onnx_path, attribute_path, silent_mode=silent_mode, autotune_mode=autotune)
result = TOGSimulator.get_result_from_file(result_path)
return result

Expand Down
32 changes: 17 additions & 15 deletions PyTorchSimFrontend/extension_config.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import sys
import importlib
import json
import yaml

CONFIG_TORCHSIM_DIR = os.environ.get('TORCHSIM_DIR', default='/workspace/PyTorchSim')
CONFIG_GEM5_PATH = os.environ.get('GEM5_PATH', default="/workspace/gem5/build/RISCV/gem5.opt")
Expand All @@ -13,51 +13,53 @@
def __getattr__(name):
# TOGSim config
config_path = os.environ.get('TOGSIM_CONFIG',
default=f"{CONFIG_TORCHSIM_DIR}/configs/systolic_ws_128x128_c1_simple_noc_tpuv3.json")
default=f"{CONFIG_TORCHSIM_DIR}/configs/systolic_ws_128x128_c1_simple_noc_tpuv3.yml")
if name == "CONFIG_TOGSIM_CONFIG":
return config_path
config_json = json.load(open(config_path, 'r'))

with open(config_path, 'r') as f:
config_yaml = yaml.safe_load(f)

# Hardware info config
if name == "vpu_num_lanes":
return config_json["vpu_num_lanes"]
return config_yaml["vpu_num_lanes"]
if name == "CONFIG_SPAD_INFO":
return {
"spad_vaddr" : 0xD0000000,
"spad_paddr" : 0x2000000000,
"spad_size" : config_json["vpu_spad_size_kb_per_lane"] << 10 # Note: spad size per lane
"spad_size" : config_yaml["vpu_spad_size_kb_per_lane"] << 10 # Note: spad size per lane
}

if name == "CONFIG_PRECISION":
return 4 # 32bit
if name == "CONFIG_NUM_CORES":
return config_json["num_cores"]
return config_yaml["num_cores"]
if name == "vpu_vector_length_bits":
return config_json["vpu_vector_length_bits"]
return config_yaml["vpu_vector_length_bits"]

if name == "pytorchsim_functional_mode":
return config_json['pytorchsim_functional_mode']
return config_yaml['pytorchsim_functional_mode']
if name == "pytorchsim_timing_mode":
return config_json['pytorchsim_timing_mode']
return config_yaml['pytorchsim_timing_mode']

# Mapping strategy
if name == "codegen_mapping_strategy":
codegen_mapping_strategy = config_json["codegen_mapping_strategy"]
codegen_mapping_strategy = config_yaml["codegen_mapping_strategy"]
assert(codegen_mapping_strategy in ["heuristic", "autotune", "external-then-heuristic", "external-then-autotune"]), "Invalid mapping strategy!"
return codegen_mapping_strategy

if name == "codegen_external_mapping_file":
return config_json["codegen_external_mapping_file"]
return config_yaml["codegen_external_mapping_file"]

# Autotune config
if name == "codegen_autotune_max_retry":
return config_json["codegen_autotune_max_retry"]
return config_yaml["codegen_autotune_max_retry"]
if name == "codegen_autotune_template_topk":
return config_json["codegen_autotune_template_topk"]
return config_yaml["codegen_autotune_template_topk"]

# Compiler Optimization
if name == "codegen_compiler_optimization":
opt_level = config_json["codegen_compiler_optimization"]
opt_level = config_yaml["codegen_compiler_optimization"]
valid_opts = {
"fusion",
"reduction_epilogue",
Expand All @@ -67,7 +69,7 @@ def __getattr__(name):
"multi_tile_conv",
"subtile"
}
if opt_level == "all" or opt_level is "none":
if opt_level == "all" or opt_level == "none":
pass
elif isinstance(opt_level, list):
# Check if provided list contains only valid options
Expand Down
Loading
Loading