Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/bin/electrs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ fn run_server(config: Arc<Config>, salt_rwlock: Arc<RwLock<String>>) -> Result<(
signal.clone(),
&metrics,
)?);
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config));
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config, &metrics));
let mut indexer = Indexer::open(
Arc::clone(&store),
fetch_from(&config, &store),
Expand Down
7 changes: 3 additions & 4 deletions src/bin/popular-scripts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@ extern crate electrs;

use bitcoin::hex::DisplayHex;
use electrs::{
config::Config,
new_index::{Store, TxHistoryKey},
util::bincode,
config::Config, metrics::Metrics, new_index::{Store, TxHistoryKey}, util::bincode
};

fn main() {
let config = Config::from_args();
let store = Store::open(&config.db_path.join("newindex"), &config);
let metrics = Metrics::new(config.monitoring_addr);
let store = Store::open(&config.db_path.join("newindex"), &config, &metrics);

let mut iter = store.history_db().raw_iterator();
iter.seek(b"H");
Expand Down
3 changes: 2 additions & 1 deletion src/bin/tx-fingerprint-stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ fn main() {

let signal = Waiter::start(crossbeam_channel::never());
let config = Config::from_args();
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config));
let metrics = Metrics::new(config.monitoring_addr);
let store = Arc::new(Store::open(&config.db_path.join("newindex"), &config, &metrics));

let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
Expand Down
39 changes: 39 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,24 @@ pub struct Config {
/// however, this requires much more disk space.
pub initial_sync_compaction: bool,

/// RocksDB block cache size in MB (per database)
/// Caches decompressed blocks in memory to avoid repeated decompression (CPU intensive)
/// Total memory usage = cache_size * 3_databases (txstore, history, cache)
/// Recommendation: Start with 1024MB for production
/// Higher values reduce CPU load from cache misses but use more RAM
pub db_block_cache_mb: usize,

/// RocksDB parallelism level (background compaction and flush threads)
/// Recommendation: Set to number of CPU cores for optimal performance
/// This configures max_background_jobs and thread pools automatically
pub db_parallelism: usize,

/// RocksDB write buffer size in MB (per database)
/// Each database uses this much RAM for in-memory writes before flushing to disk
/// Total RAM usage = write_buffer_size * max_write_buffer_number * 3_databases
/// Larger buffers = fewer flushes (less CPU) but more RAM usage
pub db_write_buffer_size_mb: usize,

#[cfg(feature = "liquid")]
pub parent_network: BNetwork,
#[cfg(feature = "liquid")]
Expand Down Expand Up @@ -216,6 +234,24 @@ impl Config {
.long("initial-sync-compaction")
.help("Perform compaction during initial sync (slower but less disk space required)")
).arg(
Arg::with_name("db_block_cache_mb")
.long("db-block-cache-mb")
.help("RocksDB block cache size in MB per database")
.takes_value(true)
.default_value("8")
).arg(
Arg::with_name("db_parallelism")
.long("db-parallelism")
.help("RocksDB parallelism level. Set to number of CPU cores for optimal performance")
.takes_value(true)
.default_value("2")
).arg(
Arg::with_name("db_write_buffer_size_mb")
.long("db-write-buffer-size-mb")
.help("RocksDB write buffer size in MB per database. RAM usage = size * max_write_buffers(2) * 3_databases")
.takes_value(true)
.default_value("256")
).arg(
Arg::with_name("zmq_addr")
.long("zmq-addr")
.help("Optional zmq socket address of the bitcoind daemon")
Expand Down Expand Up @@ -452,6 +488,9 @@ impl Config {
cors: m.value_of("cors").map(|s| s.to_string()),
precache_scripts: m.value_of("precache_scripts").map(|s| s.to_string()),
initial_sync_compaction: m.is_present("initial_sync_compaction"),
db_block_cache_mb: value_t_or_exit!(m, "db_block_cache_mb", usize),
db_parallelism: value_t_or_exit!(m, "db_parallelism", usize),
db_write_buffer_size_mb: value_t_or_exit!(m, "db_write_buffer_size_mb", usize),
zmq_addr,

#[cfg(feature = "liquid")]
Expand Down
81 changes: 74 additions & 7 deletions src/new_index/db.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
use prometheus::GaugeVec;
use rocksdb;

use std::convert::TryInto;
use std::path::Path;
use std::sync::Arc;
use std::thread;
use std::time::Duration;

use crate::config::Config;
use crate::util::{bincode, Bytes};
use crate::new_index::db_metrics::RocksDbMetrics;
use crate::util::{bincode, spawn_thread, Bytes};

static DB_VERSION: u32 = 1;

Expand Down Expand Up @@ -71,7 +77,7 @@ impl<'a> Iterator for ReverseScanIterator<'a> {

#[derive(Debug)]
pub struct DB {
db: rocksdb::DB,
db: Arc<rocksdb::DB>,
}

#[derive(Copy, Clone, Debug)]
Expand All @@ -89,18 +95,29 @@ impl DB {
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
db_opts.set_target_file_size_base(1_073_741_824);
db_opts.set_write_buffer_size(256 << 20);
db_opts.set_disable_auto_compactions(!config.initial_sync_compaction); // for initial bulk load


let parallelism: i32 = config.db_parallelism.try_into()
.expect("db_parallelism value too large for i32");

// Configure parallelism (background jobs and thread pools)
db_opts.increase_parallelism(parallelism);

// Configure write buffer size (not set by increase_parallelism)
db_opts.set_write_buffer_size(config.db_write_buffer_size_mb * 1024 * 1024);

// db_opts.set_advise_random_on_open(???);
db_opts.set_compaction_readahead_size(1 << 20);
db_opts.increase_parallelism(2);

// let mut block_opts = rocksdb::BlockBasedOptions::default();
// block_opts.set_block_size(???);
// Configure block cache
let mut block_opts = rocksdb::BlockBasedOptions::default();
let cache_size_bytes = config.db_block_cache_mb * 1024 * 1024;
block_opts.set_block_cache(&rocksdb::Cache::new_lru_cache(cache_size_bytes));
db_opts.set_block_based_table_factory(&block_opts);

let db = DB {
db: rocksdb::DB::open(&db_opts, path).expect("failed to open RocksDB"),
db: Arc::new(rocksdb::DB::open(&db_opts, path).expect("failed to open RocksDB"))
};
db.verify_compatibility(config);
db
Expand Down Expand Up @@ -220,4 +237,54 @@ impl DB {
Some(_) => (),
}
}

pub fn start_stats_exporter(&self, db_metrics: Arc<RocksDbMetrics>, db_name: &str) {
let db_arc = Arc::clone(&self.db);
let label = db_name.to_string();

let update_gauge = move |gauge: &GaugeVec, property: &str| {
if let Ok(Some(value)) = db_arc.property_value(property) {
if let Ok(v) = value.parse::<f64>() {
gauge.with_label_values(&[&label]).set(v);
}
}
};

spawn_thread("db_stats_exporter", move || loop {
update_gauge(&db_metrics.num_immutable_mem_table, "rocksdb.num-immutable-mem-table");
update_gauge(&db_metrics.mem_table_flush_pending, "rocksdb.mem-table-flush-pending");
update_gauge(&db_metrics.compaction_pending, "rocksdb.compaction-pending");
update_gauge(&db_metrics.background_errors, "rocksdb.background-errors");
update_gauge(&db_metrics.cur_size_active_mem_table, "rocksdb.cur-size-active-mem-table");
update_gauge(&db_metrics.cur_size_all_mem_tables, "rocksdb.cur-size-all-mem-tables");
update_gauge(&db_metrics.size_all_mem_tables, "rocksdb.size-all-mem-tables");
update_gauge(&db_metrics.num_entries_active_mem_table, "rocksdb.num-entries-active-mem-table");
update_gauge(&db_metrics.num_entries_imm_mem_tables, "rocksdb.num-entries-imm-mem-tables");
update_gauge(&db_metrics.num_deletes_active_mem_table, "rocksdb.num-deletes-active-mem-table");
update_gauge(&db_metrics.num_deletes_imm_mem_tables, "rocksdb.num-deletes-imm-mem-tables");
update_gauge(&db_metrics.estimate_num_keys, "rocksdb.estimate-num-keys");
update_gauge(&db_metrics.estimate_table_readers_mem, "rocksdb.estimate-table-readers-mem");
update_gauge(&db_metrics.is_file_deletions_enabled, "rocksdb.is-file-deletions-enabled");
update_gauge(&db_metrics.num_snapshots, "rocksdb.num-snapshots");
update_gauge(&db_metrics.oldest_snapshot_time, "rocksdb.oldest-snapshot-time");
update_gauge(&db_metrics.num_live_versions, "rocksdb.num-live-versions");
update_gauge(&db_metrics.current_super_version_number, "rocksdb.current-super-version-number");
update_gauge(&db_metrics.estimate_live_data_size, "rocksdb.estimate-live-data-size");
update_gauge(&db_metrics.min_log_number_to_keep, "rocksdb.min-log-number-to-keep");
update_gauge(&db_metrics.min_obsolete_sst_number_to_keep, "rocksdb.min-obsolete-sst-number-to-keep");
update_gauge(&db_metrics.total_sst_files_size, "rocksdb.total-sst-files-size");
update_gauge(&db_metrics.live_sst_files_size, "rocksdb.live-sst-files-size");
update_gauge(&db_metrics.base_level, "rocksdb.base-level");
update_gauge(&db_metrics.estimate_pending_compaction_bytes, "rocksdb.estimate-pending-compaction-bytes");
update_gauge(&db_metrics.num_running_compactions, "rocksdb.num-running-compactions");
update_gauge(&db_metrics.num_running_flushes, "rocksdb.num-running-flushes");
update_gauge(&db_metrics.actual_delayed_write_rate, "rocksdb.actual-delayed-write-rate");
update_gauge(&db_metrics.is_write_stopped, "rocksdb.is-write-stopped");
update_gauge(&db_metrics.estimate_oldest_key_time, "rocksdb.estimate-oldest-key-time");
update_gauge(&db_metrics.block_cache_capacity, "rocksdb.block-cache-capacity");
update_gauge(&db_metrics.block_cache_usage, "rocksdb.block-cache-usage");
update_gauge(&db_metrics.block_cache_pinned_usage, "rocksdb.block-cache-pinned-usage");
thread::sleep(Duration::from_secs(5));
});
}
}
Loading
Loading