Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
6ec4f62
refactor: separate statistic computation
tristan-f-r Oct 10, 2025
9987189
fix: correct tuple assumption
tristan-f-r Oct 10, 2025
25eef5e
fix: stably use graph statistic values
tristan-f-r Oct 10, 2025
cb373c1
style: fmt
tristan-f-r Oct 30, 2025
47a9e26
Merge branch 'main' into lazy-stats
tristan-f-r Oct 30, 2025
898d568
style: specify zip strict
tristan-f-r Oct 30, 2025
c675ece
fix: make undirected for determining number of connected components
tristan-f-r Nov 6, 2025
3c81d05
Merge branch 'main' into lazy-stats
tristan-f-r Jan 13, 2026
1ca730e
feat: snakemake-based summary generation
tristan-f-r Jan 13, 2026
d67186d
fix(Snakefile): use parse_output for edgelist parsing
tristan-f-r Jan 13, 2026
fd483c3
fix: parse edgelist with rank, embed header skip inside from_edgelist
tristan-f-r Jan 13, 2026
fd5046f
style: fmt
tristan-f-r Jan 13, 2026
79cf748
chore: mention statistics_files param
tristan-f-r Jan 13, 2026
339d915
Merge branch 'hash' into lazy-stats
tristan-f-r Jan 31, 2026
85e0ea8
docs: more info on summary & statistics
tristan-f-r Feb 14, 2026
804849a
style: fmt
tristan-f-r Feb 14, 2026
cf3c6a0
Merge branch 'hash' into lazy-stats
tristan-f-r Feb 14, 2026
0f7acca
Merge remote-tracking branch 'upstream/main' into lazy-stats
tristan-f-r Mar 19, 2026
ae61e57
Merge branch 'umain' into generate-all-inputs
tristan-f-r Apr 17, 2026
b038ecf
Merge branch 'main' into lazy-stats
tristan-f-r Apr 24, 2026
4fe949d
refactor: use dictionaries instead of a flat list
tristan-f-r Apr 25, 2026
a86354f
docs: clarification
tristan-f-r Apr 27, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 35 additions & 4 deletions Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@ import os
from spras import runner
import shutil
import yaml
from spras.dataset import Dataset
from spras.evaluation import Evaluation
from spras.analysis import ml, summary, cytoscape
from spras.config.revision import detach_spras_revision
import spras.config.config as _config
from spras.dataset import Dataset
from spras.evaluation import Evaluation
from spras.statistics import from_output_pathway, statistics_computation, statistics_options

# Snakemake updated the behavior in the 6.5.0 release https://github.com/snakemake/snakemake/pull/1037
# and using the wrong separator prevents Snakemake from matching filenames to the rules that can produce them
Expand Down Expand Up @@ -312,18 +313,48 @@ rule viz_cytoscape:
run:
cytoscape.run_cytoscape(input.pathways, output.session, container_settings)

# We generate new Snakemake rules for every statistic
# to allow parallel and lazy computation of individual statistics
for keys in statistics_computation.keys():
pythonic_name = 'generate_' + '_and_'.join([key.lower().replace(' ', '_') for key in keys])
rule:
# (See https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#procedural-rule-definition)
name: pythonic_name
Comment thread
tristan-f-r marked this conversation as resolved.
input: pathway_file = rules.parse_output.output.standardized_file
output: [SEP.join([out_dir, '{dataset}-{algorithm}-{params}', 'statistics', f'{key}.txt']) for key in keys]
# It is very tempting to use `.items()` instead of `.keys()` above, but
# We instead need to pass keys in via parameters, else the job would use the latest values in the statistics_computation.
# More info is in the procedural rule link above
params: statistics_names=keys
run:
(Path(input.pathway_file).parent / 'statistics').mkdir(exist_ok=True)
graph = from_output_pathway(input.pathway_file)
for computed, output in zip(statistics_computation[params.statistics_names](graph), output):
Path(output).write_text(str(computed))

# We isolate this to a separate input function, as we want to preserve the dictionary structure
def summary_files(wildcards):
return {
algorithm_param: expand(
'{out_dir}{sep}{dataset}-{algorithm_param}{sep}statistics{sep}{statistic}.txt',
out_dir=out_dir, sep=SEP, algorithm_param=algorithm_param, statistic=statistics_options,
dataset=wildcards.dataset
) for algorithm_param in algorithms_with_params
}

# Write a single summary table for all pathways for each dataset
rule summary_table:
input:
# Collect all pathways generated for the dataset
pathways = expand('{out_dir}{sep}{{dataset}}-{algorithm_params}{sep}pathway.txt', out_dir=out_dir, sep=SEP, algorithm_params=algorithms_with_params),
dataset_file = SEP.join([out_dir, 'dataset-{dataset}-merged.pickle'])
dataset_file = SEP.join([out_dir, 'dataset-{dataset}-merged.pickle']),
# Collect all possible statistics from the `summary_files` dictionary-based input function
statistics = lambda wildcards: flatten(list(summary_files(wildcards).values()))
output: summary_table = SEP.join([out_dir, '{dataset}-pathway-summary.txt'])
run:
# Load the node table from the pickled dataset file
node_table = Dataset.from_file(input.dataset_file).node_table
summary_df = summary.summarize_networks(input.pathways, node_table, algorithm_params, algorithms_with_params)
summary_df = summary.summarize_networks(input.pathways, node_table, algorithm_params, algorithms_with_params, summary_files(wildcards))
summary_df.to_csv(output.summary_table, sep='\t', index=False)

# Cluster the output pathways for each dataset
Expand Down
129 changes: 22 additions & 107 deletions spras/analysis/summary.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
import ast
import itertools
import json
import os
from pathlib import Path
from statistics import median
from typing import Iterable
from typing import Iterable, Mapping

import networkx as nx
import pandas as pd

from spras.statistics import from_output_pathway, statistics_options


def summarize_networks(file_paths: Iterable[Path], node_table: pd.DataFrame, algo_params: dict[str, dict],
algo_with_params: list[str]) -> pd.DataFrame:
algo_with_params: list[str], statistics_files: Mapping[str, Iterable[str | os.PathLike]]) -> pd.DataFrame:
"""
Generate a table that aggregates summary information about networks in file_paths, including which nodes are present
in node_table columns. Network directionality is ignored and all edges are treated as undirected. The order of the
Expand All @@ -18,6 +21,7 @@ def summarize_networks(file_paths: Iterable[Path], node_table: pd.DataFrame, alg
@param algo_params: a nested dict mapping algorithm names to dicts that map parameter hashes to parameter
combinations.
@param algo_with_params: a list of <algorithm>-params-<params_hash> combinations
@param statistics_files: a dictionary from algo_with_params to lists of statistic files with the computed statistics.
@return: pandas DataFrame with summary information
"""
# Ensure that NODEID is the first column
Expand All @@ -40,52 +44,22 @@ def summarize_networks(file_paths: Iterable[Path], node_table: pd.DataFrame, alg

# Iterate through each network file path
for index, file_path in enumerate(sorted(file_paths)):
with open(file_path, 'r') as f:
lines = f.readlines()[1:] # skip the header line

# directed or mixed graphs are parsed and summarized as an undirected graph
nw = nx.read_edgelist(lines, data=(('weight', float), ('Direction', str)))
nw = from_output_pathway(file_path)

# Save the network name, number of nodes, number edges, and number of connected components
nw_name = str(file_path)
number_nodes = nw.number_of_nodes()
number_edges = nw.number_of_edges()
ncc = nx.number_connected_components(nw)

# Save the max/median degree, average clustering coefficient, and density
if number_nodes == 0:
max_degree = 0
median_degree = 0.0
density = 0.0
else:
degrees = [deg for _, deg in nw.degree()]
max_degree = max(degrees)
median_degree = median(degrees)
density = nx.density(nw)

cc = list(nx.connected_components(nw))
# Save the max diameter
# Use diameter only for components with ≥2 nodes (singleton components have diameter 0)
diameters = [
nx.diameter(nw.subgraph(c).copy()) if len(c) > 1 else 0
for c in cc
]
max_diameter = max(diameters, default=0)

# Save the average path lengths
# Compute average shortest path length only for components with ≥2 nodes (undefined for singletons, set to 0.0)
avg_path_lengths = [
nx.average_shortest_path_length(nw.subgraph(c).copy()) if len(c) > 1 else 0.0
for c in cc
# We use ast.literal_eval here to convert statistic file outputs to ints or floats depending on their string representation.
# (e.g. "5.0" -> float(5.0), while "5" -> int(5).)
graph_statistics = [
ast.literal_eval(Path(file).read_text()) for file in
# along with sorting to keep the output stable (we do this same sorting procedure once more in this function)
sorted(statistics_files[algo_with_params[index]], key=lambda x: statistics_options.index(Path(x).stem))
]

if len(avg_path_lengths) != 0:
avg_path_len = sum(avg_path_lengths) / len(avg_path_lengths)
else:
avg_path_len = 0.0

# Initialize list to store current network information
cur_nw_info = [nw_name, number_nodes, number_edges, ncc, density, max_degree, median_degree, max_diameter, avg_path_len]
cur_nw_info = [nw_name, *graph_statistics]

# Iterate through each node property and save the intersection with the current network
for node_list in nodes_by_col:
Expand All @@ -107,8 +81,13 @@ def summarize_networks(file_paths: Iterable[Path], node_table: pd.DataFrame, alg
# Save the current network information to the network summary list
nw_info.append(cur_nw_info)

# Get the list of statistic names by their file names (via finding all requested statistics in the provided files)
current_statistics_options = sorted(
set(Path(file).stem for file in itertools.chain(*statistics_files.values())),
key=lambda x: statistics_options.index(x)
)
# Prepare column names
col_names = ['Name', 'Number of nodes', 'Number of edges', 'Number of connected components', 'Density', 'Max degree', 'Median degree', 'Max diameter', 'Average path length']
col_names = ['Name', *current_statistics_options]
col_names.extend(nodes_by_col_labs)
col_names.append('Parameter combination')

Expand All @@ -120,67 +99,3 @@ def summarize_networks(file_paths: Iterable[Path], node_table: pd.DataFrame, alg
)

return nw_info


def degree(g):
return dict(g.degree)

# TODO: redo .run code to work on mixed graphs
# stats is just a list of functions to apply to the graph.
# They should take as input a networkx graph or digraph but may have any output.
# stats = [degree, nx.clustering, nx.betweenness_centrality]


# def produce_statistics(g: nx.Graph, s=None) -> dict:
# global stats
# if s is not None:
# stats = s
# d = dict()
# for s in stats:
# sname = s.__name__
# d[sname] = s(g)
# return d


# def load_graph(path: str) -> nx.Graph:
# g = nx.read_edgelist(path, data=(('weight', float), ('Direction',str)))
# return g


# def save(data, pth):
# fout = open(pth, 'w')
# fout.write('#node\t%s\n' % '\t'.join([s.__name__ for s in stats]))
# for node in data[stats[0].__name__]:
# row = [data[s.__name__][node] for s in stats]
# fout.write('%s\t%s\n' % (node, '\t'.join([str(d) for d in row])))
# fout.close()


# def run(infile: str, outfile: str) -> None:
# """
# run function that wraps above functions.
# """
# # if output directory doesn't exist, make it.
# outdir = os.path.dirname(outfile)
# if not os.path.exists(outdir):
# os.makedirs(outdir)

# # load graph, produce stats, and write to human-readable file.
# g = load_graph(infile)
# dat = produce_statistics(g)
# save(dat, outfile)


# def main(argv):
# """
# for testing
# """
# g = load_graph(argv[1])
# print(g.nodes)
# dat = produce_statistics(g)
# print(dat)
# save(dat, argv[2])


# if __name__ == '__main__':
# main(sys.argv)
75 changes: 75 additions & 0 deletions spras/statistics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
"""
Graph statistics, used to power summary.py.
Comment thread
tristan-f-r marked this conversation as resolved.

We allow for arbitrary computation of any specific statistic on some graph,
computing more than necessary if we have dependencies. See the top level
`statistics_computation` dictionary for usage.

To make the statistics allow directed graph input, they will always take
in a networkx.DiGraph, which contains even more information, even though
the underlying graph may be just as easily represented by networkx.Graph.
"""

import itertools
from statistics import median
from typing import Callable

import networkx as nx


def compute_degree(graph: nx.DiGraph) -> tuple[int, float]:
"""
Computes the (max, median) degree of a `graph`.
"""
# number_of_nodes is a cheap call
if graph.number_of_nodes() == 0:
return (0, 0.0)
else:
degrees = [deg for _, deg in graph.degree()]
return max(degrees), median(degrees)

def compute_on_cc(directed_graph: nx.DiGraph) -> tuple[int, float]:
# We convert our directed_graph to an undirected graph as networkx (reasonably) does
# not allow for computing the connected components of a directed graph, but the connected
# component count still is a useful statistic for us.
graph: nx.Graph = directed_graph.to_undirected()
Comment thread
tristan-f-r marked this conversation as resolved.
cc = list(nx.connected_components(graph))
# Save the max diameter
# Use diameter only for components with ≥2 nodes (singleton components have diameter 0)
diameters = [
nx.diameter(graph.subgraph(c).copy()) if len(c) > 1 else 0
for c in cc
]
max_diameter = max(diameters, default=0)

# Save the average path lengths
# Compute average shortest path length only for components with ≥2 nodes (undefined for singletons, set to 0.0)
avg_path_lengths = [
nx.average_shortest_path_length(graph.subgraph(c).copy()) if len(c) > 1 else 0.0
for c in cc
]

if len(avg_path_lengths) != 0:
avg_path_len = sum(avg_path_lengths) / len(avg_path_lengths)
else:
avg_path_len = 0.0

return max_diameter, avg_path_len

# The type signature here is meant to be 'an n-tuple has n-outputs.'
statistics_computation: dict[tuple[str, ...], Callable[[nx.DiGraph], tuple[float | int, ...]]] = {
('Number of nodes',): lambda graph : (graph.number_of_nodes(),),
('Number of edges',): lambda graph : (graph.number_of_edges(),),
('Number of connected components',): lambda graph : (nx.number_connected_components(graph.to_undirected()),),
('Density',): lambda graph : (nx.density(graph),),
('Max degree', 'Median degree'): compute_degree,
('Max diameter', 'Average path length'): compute_on_cc,
}

# All of the keys inside statistics_computation, flattened.
statistics_options: list[str] = list(itertools.chain(*(list(key) for key in statistics_computation.keys())))

def from_output_pathway(lines) -> nx.Graph:
with open(lines, 'r') as f:
next(f) # skip the header line
return nx.read_edgelist(f, data=(('Rank', int), ('Direction', str)), delimiter='\t')
3 changes: 3 additions & 0 deletions test/analysis/input/egfr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,6 @@ datasets:
reconstruction_settings:
locations:
reconstruction_dir: "test/analysis/input/run/egfr"
analysis:
summary:
include: true
3 changes: 3 additions & 0 deletions test/analysis/input/example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,6 @@ gold_standards:
reconstruction_settings:
locations:
reconstruction_dir: "test/analysis/input/run/example"
analysis:
summary:
include: true
12 changes: 7 additions & 5 deletions test/analysis/test_summary.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import filecmp
import shutil
import subprocess
from pathlib import Path

Expand Down Expand Up @@ -32,7 +31,7 @@ def snakemake_output(request):
param = request.param
subprocess.run(["snakemake", "--cores", "1", "--configfile", f"test/analysis/input/{param}.yaml"])
yield param # this runs the test itself: once this is passed, we go to test cleanup.
shutil.rmtree(f"test/analysis/input/run/{param}")
# shutil.rmtree(f"test/analysis/input/run/{param}")

class TestSummary:
@classmethod
Expand All @@ -56,12 +55,15 @@ def test_example_networks(self, snakemake_output):
algorithms_with_params = [f'{algorithm}-params-{params_hash}' for algorithm, param_combos in
algorithm_params.items() for params_hash in param_combos.keys()]

example_network_files = (INPUT_DIR / "run" / snakemake_output).rglob("pathway.txt")
network_files = (INPUT_DIR / "run" / snakemake_output).rglob("pathway.txt")
statistics_folders = [Path(file) for file in (INPUT_DIR / "run" / snakemake_output).rglob("**/statistics") if Path(file).name == "statistics"]
# We do some string fiddling here to make sure the folder matches up with algorithms_with_params. This may be susceptible to a good refactor.
statistics_files = {"-".join(folder.parent.stem.split("-")[1:]): list(folder.glob("*.txt")) for folder in statistics_folders}

out_path = Path(OUT_DIR, f"test_{snakemake_output}_summary.txt")
out_path.unlink(missing_ok=True)
summarize_out = summarize_networks(example_network_files, example_node_table, algorithm_params,
algorithms_with_params)
summarize_out = summarize_networks(network_files, example_node_table, algorithm_params,
algorithms_with_params, statistics_files)
# We do some post-processing to ensure that we get a stable summarize_out, since the attached hash
# is subject to variation (especially in testing) whenever the SPRAS commit revision gets changed
summarize_out["Parameter combination"] = summarize_out["Parameter combination"].astype(str)
Expand Down
Loading