Skip to content

Commit b882be9

Browse files
committed
refactor: split doctor display helpers
Separate doctor configuration output, endpoint model listing, and inference messaging so the command flow can evolve each display surface independently. Made-with: Cursor
1 parent 0fb9ca5 commit b882be9

5 files changed

Lines changed: 144 additions & 130 deletions

File tree

TODO.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@
6868
- [x] `src/commands/eval/metrics/rules.rs`: separate aggregate math, rule counting, and summary reduction helpers.
6969
- [x] `src/commands/doctor/endpoint/inference.rs`: split request building, HTTP execution/error handling, and response parsing.
7070
- [x] `src/commands/feedback_eval/report/build/stats.rs`: split threshold confusion-matrix scoring from bucket primitives.
71-
- [ ] `src/commands/doctor/command/display.rs`: separate header/config output, endpoint listing, and inference result rendering.
71+
- [x] `src/commands/doctor/command/display.rs`: separate header/config output, endpoint listing, and inference result rendering.
7272
- [ ] `src/commands/doctor/command/run.rs`: separate endpoint discovery, recommendation flow, and test helpers.
7373
- [ ] `src/commands/eval/runner/matching.rs`: split required-match search, unexpected-match detection, and rule metric assembly.
7474
- [ ] `src/commands/eval/runner/execute/loading.rs`: separate diff resolution from repo-path resolution if it grows again.
Lines changed: 12 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -1,129 +1,12 @@
1-
use anyhow::Result;
2-
use std::time::Duration;
3-
4-
use crate::config::Config;
5-
use crate::core::offline::{LocalModel, ReadinessCheck};
6-
7-
pub(super) fn print_header() {
8-
println!("DiffScope Doctor");
9-
println!("================\n");
10-
}
11-
12-
pub(super) fn print_configuration(config: &Config) {
13-
println!("Configuration:");
14-
println!(" Model: {}", config.model);
15-
println!(
16-
" Adapter: {}",
17-
config.adapter.as_deref().unwrap_or("(auto-detect)")
18-
);
19-
println!(
20-
" Base URL: {}",
21-
config.base_url.as_deref().unwrap_or("(default)")
22-
);
23-
println!(
24-
" API Key: {}",
25-
if config.api_key.is_some() {
26-
"set"
27-
} else {
28-
"not set"
29-
}
30-
);
31-
if let Some(cw) = config.context_window {
32-
println!(" Context: {} tokens", cw);
33-
}
34-
println!();
35-
}
36-
37-
pub(super) fn print_endpoint_models(endpoint_type: &str, models: &[LocalModel]) {
38-
println!("\nEndpoint type: {}", endpoint_type);
39-
println!("\nAvailable models ({}):", models.len());
40-
if models.is_empty() {
41-
println!(" (none found)");
42-
if endpoint_type == "ollama" {
43-
println!("\n Pull a model: ollama pull codellama");
44-
}
45-
return;
46-
}
47-
48-
for model in models {
49-
println!(" - {}{}", model.name, format_model_size_info(model));
50-
}
51-
}
52-
53-
pub(super) fn print_recommended_model_summary(
54-
recommended: &LocalModel,
55-
estimated_ram_mb: usize,
56-
detected_context_window: Option<usize>,
57-
readiness: &ReadinessCheck,
58-
) {
59-
println!("\nRecommended for code review: {}", recommended.name);
60-
println!(" Estimated RAM: ~{}MB", estimated_ram_mb);
61-
62-
if let Some(ctx_size) = detected_context_window {
63-
println!(
64-
" Context window: {} tokens (detected from model)",
65-
ctx_size
66-
);
67-
}
68-
69-
if readiness.ready {
70-
println!("\nStatus: READY");
71-
} else {
72-
println!("\nStatus: NOT READY");
73-
for warning in &readiness.warnings {
74-
println!(" Warning: {}", warning);
75-
}
76-
}
77-
}
78-
79-
pub(super) fn print_inference_success(elapsed: Duration, tokens_per_sec: f64) {
80-
println!(
81-
"OK ({:.1}s, ~{:.0} tok/s)",
82-
elapsed.as_secs_f64(),
83-
tokens_per_sec
84-
);
85-
if tokens_per_sec < 2.0 {
86-
println!(" Warning: Very slow inference. Consider a smaller/quantized model.");
87-
}
88-
}
89-
90-
pub(super) fn print_inference_failure(error: &impl std::fmt::Display) {
91-
println!("FAILED");
92-
println!(" Error: {}", error);
93-
println!(" The model may still be loading. Try again in a moment.");
94-
}
95-
96-
pub(super) fn print_usage(base_url: &str, model_flag: &str) {
97-
println!("\nUsage:");
98-
println!(
99-
" git diff | diffscope review --base-url {} --model {}",
100-
base_url, model_flag
101-
);
102-
}
103-
104-
pub(super) fn print_unreachable(base_url: &str) -> Result<()> {
105-
println!("UNREACHABLE");
106-
println!(
107-
"\nCannot reach {}. Make sure your LLM server is running.",
108-
base_url
109-
);
110-
println!("\nQuick start:");
111-
println!(" Ollama: ollama serve");
112-
println!(" vLLM: vllm serve <model>");
113-
println!(" LM Studio: Start the app and enable the local server");
114-
Ok(())
115-
}
116-
117-
fn format_model_size_info(model: &LocalModel) -> String {
118-
if model.size_mb == 0 {
119-
return String::new();
120-
}
121-
122-
format!(" ({}MB", model.size_mb)
123-
+ &model
124-
.quantization
125-
.as_ref()
126-
.map(|quantization| format!(", {}", quantization))
127-
.unwrap_or_default()
128-
+ ")"
129-
}
1+
#[path = "display/config.rs"]
2+
mod config;
3+
#[path = "display/endpoint.rs"]
4+
mod endpoint;
5+
#[path = "display/inference.rs"]
6+
mod inference;
7+
8+
pub(super) use config::{print_configuration, print_header, print_unreachable};
9+
pub(super) use endpoint::print_endpoint_models;
10+
pub(super) use inference::{
11+
print_inference_failure, print_inference_success, print_recommended_model_summary, print_usage,
12+
};
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
use anyhow::Result;
2+
3+
use crate::config::Config;
4+
5+
pub(in super::super) fn print_header() {
6+
println!("DiffScope Doctor");
7+
println!("================\n");
8+
}
9+
10+
pub(in super::super) fn print_configuration(config: &Config) {
11+
println!("Configuration:");
12+
println!(" Model: {}", config.model);
13+
println!(
14+
" Adapter: {}",
15+
config.adapter.as_deref().unwrap_or("(auto-detect)")
16+
);
17+
println!(
18+
" Base URL: {}",
19+
config.base_url.as_deref().unwrap_or("(default)")
20+
);
21+
println!(
22+
" API Key: {}",
23+
if config.api_key.is_some() {
24+
"set"
25+
} else {
26+
"not set"
27+
}
28+
);
29+
if let Some(cw) = config.context_window {
30+
println!(" Context: {} tokens", cw);
31+
}
32+
println!();
33+
}
34+
35+
pub(in super::super) fn print_unreachable(base_url: &str) -> Result<()> {
36+
println!("UNREACHABLE");
37+
println!(
38+
"\nCannot reach {}. Make sure your LLM server is running.",
39+
base_url
40+
);
41+
println!("\nQuick start:");
42+
println!(" Ollama: ollama serve");
43+
println!(" vLLM: vllm serve <model>");
44+
println!(" LM Studio: Start the app and enable the local server");
45+
Ok(())
46+
}
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
use crate::core::offline::LocalModel;
2+
3+
pub(in super::super) fn print_endpoint_models(endpoint_type: &str, models: &[LocalModel]) {
4+
println!("\nEndpoint type: {}", endpoint_type);
5+
println!("\nAvailable models ({}):", models.len());
6+
if models.is_empty() {
7+
println!(" (none found)");
8+
if endpoint_type == "ollama" {
9+
println!("\n Pull a model: ollama pull codellama");
10+
}
11+
return;
12+
}
13+
14+
for model in models {
15+
println!(" - {}{}", model.name, format_model_size_info(model));
16+
}
17+
}
18+
19+
fn format_model_size_info(model: &LocalModel) -> String {
20+
if model.size_mb == 0 {
21+
return String::new();
22+
}
23+
24+
format!(" ({}MB", model.size_mb)
25+
+ &model
26+
.quantization
27+
.as_ref()
28+
.map(|quantization| format!(", {}", quantization))
29+
.unwrap_or_default()
30+
+ ")"
31+
}
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
use std::time::Duration;
2+
3+
use crate::core::offline::{LocalModel, ReadinessCheck};
4+
5+
pub(in super::super) fn print_recommended_model_summary(
6+
recommended: &LocalModel,
7+
estimated_ram_mb: usize,
8+
detected_context_window: Option<usize>,
9+
readiness: &ReadinessCheck,
10+
) {
11+
println!("\nRecommended for code review: {}", recommended.name);
12+
println!(" Estimated RAM: ~{}MB", estimated_ram_mb);
13+
14+
if let Some(ctx_size) = detected_context_window {
15+
println!(
16+
" Context window: {} tokens (detected from model)",
17+
ctx_size
18+
);
19+
}
20+
21+
if readiness.ready {
22+
println!("\nStatus: READY");
23+
} else {
24+
println!("\nStatus: NOT READY");
25+
for warning in &readiness.warnings {
26+
println!(" Warning: {}", warning);
27+
}
28+
}
29+
}
30+
31+
pub(in super::super) fn print_inference_success(elapsed: Duration, tokens_per_sec: f64) {
32+
println!(
33+
"OK ({:.1}s, ~{:.0} tok/s)",
34+
elapsed.as_secs_f64(),
35+
tokens_per_sec
36+
);
37+
if tokens_per_sec < 2.0 {
38+
println!(" Warning: Very slow inference. Consider a smaller/quantized model.");
39+
}
40+
}
41+
42+
pub(in super::super) fn print_inference_failure(error: &impl std::fmt::Display) {
43+
println!("FAILED");
44+
println!(" Error: {}", error);
45+
println!(" The model may still be loading. Try again in a moment.");
46+
}
47+
48+
pub(in super::super) fn print_usage(base_url: &str, model_flag: &str) {
49+
println!("\nUsage:");
50+
println!(
51+
" git diff | diffscope review --base-url {} --model {}",
52+
base_url, model_flag
53+
);
54+
}

0 commit comments

Comments
 (0)