|
1 | | -use anyhow::Result; |
2 | | -use std::time::Duration; |
3 | | - |
4 | | -use crate::config::Config; |
5 | | -use crate::core::offline::{LocalModel, ReadinessCheck}; |
6 | | - |
7 | | -pub(super) fn print_header() { |
8 | | - println!("DiffScope Doctor"); |
9 | | - println!("================\n"); |
10 | | -} |
11 | | - |
12 | | -pub(super) fn print_configuration(config: &Config) { |
13 | | - println!("Configuration:"); |
14 | | - println!(" Model: {}", config.model); |
15 | | - println!( |
16 | | - " Adapter: {}", |
17 | | - config.adapter.as_deref().unwrap_or("(auto-detect)") |
18 | | - ); |
19 | | - println!( |
20 | | - " Base URL: {}", |
21 | | - config.base_url.as_deref().unwrap_or("(default)") |
22 | | - ); |
23 | | - println!( |
24 | | - " API Key: {}", |
25 | | - if config.api_key.is_some() { |
26 | | - "set" |
27 | | - } else { |
28 | | - "not set" |
29 | | - } |
30 | | - ); |
31 | | - if let Some(cw) = config.context_window { |
32 | | - println!(" Context: {} tokens", cw); |
33 | | - } |
34 | | - println!(); |
35 | | -} |
36 | | - |
37 | | -pub(super) fn print_endpoint_models(endpoint_type: &str, models: &[LocalModel]) { |
38 | | - println!("\nEndpoint type: {}", endpoint_type); |
39 | | - println!("\nAvailable models ({}):", models.len()); |
40 | | - if models.is_empty() { |
41 | | - println!(" (none found)"); |
42 | | - if endpoint_type == "ollama" { |
43 | | - println!("\n Pull a model: ollama pull codellama"); |
44 | | - } |
45 | | - return; |
46 | | - } |
47 | | - |
48 | | - for model in models { |
49 | | - println!(" - {}{}", model.name, format_model_size_info(model)); |
50 | | - } |
51 | | -} |
52 | | - |
53 | | -pub(super) fn print_recommended_model_summary( |
54 | | - recommended: &LocalModel, |
55 | | - estimated_ram_mb: usize, |
56 | | - detected_context_window: Option<usize>, |
57 | | - readiness: &ReadinessCheck, |
58 | | -) { |
59 | | - println!("\nRecommended for code review: {}", recommended.name); |
60 | | - println!(" Estimated RAM: ~{}MB", estimated_ram_mb); |
61 | | - |
62 | | - if let Some(ctx_size) = detected_context_window { |
63 | | - println!( |
64 | | - " Context window: {} tokens (detected from model)", |
65 | | - ctx_size |
66 | | - ); |
67 | | - } |
68 | | - |
69 | | - if readiness.ready { |
70 | | - println!("\nStatus: READY"); |
71 | | - } else { |
72 | | - println!("\nStatus: NOT READY"); |
73 | | - for warning in &readiness.warnings { |
74 | | - println!(" Warning: {}", warning); |
75 | | - } |
76 | | - } |
77 | | -} |
78 | | - |
79 | | -pub(super) fn print_inference_success(elapsed: Duration, tokens_per_sec: f64) { |
80 | | - println!( |
81 | | - "OK ({:.1}s, ~{:.0} tok/s)", |
82 | | - elapsed.as_secs_f64(), |
83 | | - tokens_per_sec |
84 | | - ); |
85 | | - if tokens_per_sec < 2.0 { |
86 | | - println!(" Warning: Very slow inference. Consider a smaller/quantized model."); |
87 | | - } |
88 | | -} |
89 | | - |
90 | | -pub(super) fn print_inference_failure(error: &impl std::fmt::Display) { |
91 | | - println!("FAILED"); |
92 | | - println!(" Error: {}", error); |
93 | | - println!(" The model may still be loading. Try again in a moment."); |
94 | | -} |
95 | | - |
96 | | -pub(super) fn print_usage(base_url: &str, model_flag: &str) { |
97 | | - println!("\nUsage:"); |
98 | | - println!( |
99 | | - " git diff | diffscope review --base-url {} --model {}", |
100 | | - base_url, model_flag |
101 | | - ); |
102 | | -} |
103 | | - |
104 | | -pub(super) fn print_unreachable(base_url: &str) -> Result<()> { |
105 | | - println!("UNREACHABLE"); |
106 | | - println!( |
107 | | - "\nCannot reach {}. Make sure your LLM server is running.", |
108 | | - base_url |
109 | | - ); |
110 | | - println!("\nQuick start:"); |
111 | | - println!(" Ollama: ollama serve"); |
112 | | - println!(" vLLM: vllm serve <model>"); |
113 | | - println!(" LM Studio: Start the app and enable the local server"); |
114 | | - Ok(()) |
115 | | -} |
116 | | - |
117 | | -fn format_model_size_info(model: &LocalModel) -> String { |
118 | | - if model.size_mb == 0 { |
119 | | - return String::new(); |
120 | | - } |
121 | | - |
122 | | - format!(" ({}MB", model.size_mb) |
123 | | - + &model |
124 | | - .quantization |
125 | | - .as_ref() |
126 | | - .map(|quantization| format!(", {}", quantization)) |
127 | | - .unwrap_or_default() |
128 | | - + ")" |
129 | | -} |
| 1 | +#[path = "display/config.rs"] |
| 2 | +mod config; |
| 3 | +#[path = "display/endpoint.rs"] |
| 4 | +mod endpoint; |
| 5 | +#[path = "display/inference.rs"] |
| 6 | +mod inference; |
| 7 | + |
| 8 | +pub(super) use config::{print_configuration, print_header, print_unreachable}; |
| 9 | +pub(super) use endpoint::print_endpoint_models; |
| 10 | +pub(super) use inference::{ |
| 11 | + print_inference_failure, print_inference_success, print_recommended_model_summary, print_usage, |
| 12 | +}; |
0 commit comments