diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a5f6b4ca2..e83013246 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -22,7 +22,7 @@ jobs: uses: Swatinem/rust-cache@v2 - name: Setup pages id: pages - uses: actions/configure-pages@v5 + uses: actions/configure-pages@v6 - name: Clean docs folder run: cargo clean --doc - name: Build docs @@ -32,7 +32,7 @@ jobs: - name: Remove lock file run: rm target/doc/.lock - name: Upload artifact - uses: actions/upload-pages-artifact@v4 + uses: actions/upload-pages-artifact@v5 with: path: target/doc deploy: @@ -45,4 +45,4 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v4 + uses: actions/deploy-pages@v5 diff --git a/.gitignore b/.gitignore index 802b1ab3c..5de9feb35 100644 --- a/.gitignore +++ b/.gitignore @@ -2,13 +2,21 @@ /Cargo.lock *.log theta.csv -cycles.csv -pred.csv obs.csv time.csv n_psi.csv psi.csv r.csv +correlation.csv +/docs +diagnostics.json +predictions.csv +summary.csv +summary.json +iterations.csv +population.csv +shrinkage.csv +statistics.csv posterior.csv simulation_output.csv /examples/rosuva/* @@ -17,6 +25,8 @@ simulation_output.csv /examples/data/iohexol* /examples/data/rosuva* /examples/data/vori* +/examples/paper_benchmarks +/examples/*/output /.idea stop .vscode @@ -28,7 +38,13 @@ settings.json log.txt op.csv *results.txt -covs.csv +covariates.csv +individual_effects.csv +individual_parameters.csv +residual_error.csv error_theta.csv lcov.info -Fortran/ \ No newline at end of file +Fortran/ +paper/ +docs/ +examples/**/outputs/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a6baf3f9a..720a395f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.25.2](https://github.com/LAPKB/PMcore/compare/v0.25.1...v0.25.2) - 2026-04-20 + +### Fixed + +- Add wrappers for analytical solutions ([#272](https://github.com/LAPKB/PMcore/pull/272)) + +## [0.25.1](https://github.com/LAPKB/PMcore/compare/v0.25.0...v0.25.1) - 2026-04-13 + +### Other + +- Update rand requirement from 0.9.0 to 0.10.1 ([#270](https://github.com/LAPKB/PMcore/pull/270)) + +## [0.25.0](https://github.com/LAPKB/PMcore/compare/v0.24.0...v0.25.0) - 2026-04-11 + +### Added + +- Bump pharmsol and update examples ([#269](https://github.com/LAPKB/PMcore/pull/269)) +- Use pharmsol 0.25 ([#268](https://github.com/LAPKB/PMcore/pull/268)) + +### Other + +- Update faer requirement from 0.23.1 to 0.24.0 ([#241](https://github.com/LAPKB/PMcore/pull/241)) +- Update rand requirement from 0.9.0 to 0.10.0 ([#244](https://github.com/LAPKB/PMcore/pull/244)) + ## [0.24.0](https://github.com/LAPKB/PMcore/compare/v0.23.0...v0.24.0) - 2026-04-01 ### Added diff --git a/Cargo.toml b/Cargo.toml index c7cf0aa1d..8ef4dd7ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pmcore" -version = "0.24.0" +version = "0.25.2" edition = "2021" authors = [ "Julián D. Otálvaro ", @@ -27,11 +27,11 @@ tracing-subscriber = { version = "0.3.19", features = [ "fmt", "time", ] } -faer = "0.23.1" -pharmsol = "=0.24.1" -rand = "0.9.0" +faer = "0.24.0" +pharmsol = "=0.26.1" anyhow = "1.0.100" rayon = "1.10.0" +rand = "0.10.1" [features] default = [] diff --git a/benches/bimodal_ke.rs b/benches/bimodal_ke.rs index b8ce6c55e..cc847df2b 100644 --- a/benches/bimodal_ke.rs +++ b/benches/bimodal_ke.rs @@ -20,10 +20,10 @@ fn create_equation() -> equation::ODE { ) } -fn create_parameters() -> Parameters { - Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0) +fn create_parameter_space() -> ParameterSpace { + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)) } fn create_error_models() -> Result { @@ -37,51 +37,50 @@ fn load_data() -> Result { Ok(data::read_pmetrics("examples/bimodal_ke/bimodal_ke.csv")?) } -fn setup_with_algorithm(algorithm: Algorithm) -> Result<(Settings, equation::ODE, data::Data)> { - let params = create_parameters(); +fn setup_with_algorithm(method: NonparametricMethod) -> Result> { let ems = create_error_models()?; - - let mut settings = Settings::builder() - .set_algorithm(algorithm) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_cycles(1000); - settings.set_prior(Prior::sobol(2048, 22)); - settings.disable_output(); - settings.set_progress(false); - + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .with_assay_error_models(ems); + let model = ModelDefinition::builder(create_equation()) + .parameters(create_parameter_space()) + .observations(observations) + .build()?; let data = load_data()?; - Ok((settings, create_equation(), data)) + EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(method)) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 1000, + progress: false, + prior: Some(Prior::sobol(2048, 22)), + ..RuntimeOptions::default() + }) + .build() } -fn setup_npag() -> Result<(Settings, equation::ODE, data::Data)> { - setup_with_algorithm(Algorithm::NPAG) +fn setup_npag() -> Result> { + setup_with_algorithm(NonparametricMethod::Npag(NpagOptions)) } -fn setup_npod() -> Result<(Settings, equation::ODE, data::Data)> { - setup_with_algorithm(Algorithm::NPOD) +fn setup_npod() -> Result> { + setup_with_algorithm(NonparametricMethod::Npod(NpodOptions)) } -fn setup_postprob() -> Result<(Settings, equation::ODE, data::Data)> { - setup_with_algorithm(Algorithm::POSTPROB) +fn setup_postprob() -> Result> { + setup_with_algorithm(NonparametricMethod::Postprob(PostProbOptions)) } fn benchmark_algorithm(c: &mut Criterion, bench_name: &str, setup_fn: F) where - F: Fn() -> Result<(Settings, equation::ODE, data::Data)>, + F: Fn() -> Result>, { - let (settings, eq, data) = setup_fn().unwrap(); + let problem = setup_fn().unwrap(); c.bench_function(bench_name, |b| { b.iter_with_setup( - || (settings.clone(), eq.clone(), data.clone()), - |(s, e, d)| { - let mut algorithm = dispatch_algorithm(s, e, d).unwrap(); - let result = algorithm.fit().unwrap(); - black_box(result) - }, + || problem.clone(), + |problem| black_box(problem.run().unwrap()), ) }); } diff --git a/examples/bestdose.rs b/examples/bestdose.rs index 6968514a6..998855360 100644 --- a/examples/bestdose.rs +++ b/examples/bestdose.rs @@ -1,8 +1,6 @@ use anyhow::Result; -use pmcore::bestdose::{BestDosePosterior, DoseRange, Target}; - +use pmcore::bestdose::{BestDoseConfig, BestDosePosterior, DoseRange, Target}; use pmcore::prelude::*; -use pmcore::routines::initialization::parse_prior; fn main() -> Result<()> { // Example model @@ -18,23 +16,15 @@ fn main() -> Result<()> { }, }; - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); + let parameter_space = ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - - // Make settings - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); + let config = BestDoseConfig::new(parameter_space.clone(), ems.clone()).with_progress(false); // Generate a patient with known parameters // Ke = 0.5, V = 50 @@ -71,21 +61,14 @@ fn main() -> Result<()> { .observation(18.0, conc(6.0, 75.0) + conc(18.0, 150.0), 0) .build(); - let (theta, prior) = parse_prior( - &"examples/bimodal_ke/output/theta.csv".to_string(), - &settings, - ) - .unwrap(); + let (theta, prior) = read_prior("examples/bimodal_ke/output/theta.csv", ¶meter_space)?; - // Example usage - two-stage API: - // Stage 1: Compute posterior (expensive, done once) - // Stage 2: Optimize doses (can be called multiple times with different params) let posterior = BestDosePosterior::compute( &theta, &prior.unwrap(), Some(past_data.clone()), // Optional: past data for Bayesian updating eq.clone(), - settings.clone(), + config.clone(), )?; println!("Optimizing dose..."); diff --git a/examples/bestdose_auc.rs b/examples/bestdose_auc.rs index 70aecb429..d4e04c9ce 100644 --- a/examples/bestdose_auc.rs +++ b/examples/bestdose_auc.rs @@ -1,10 +1,11 @@ use anyhow::Result; -use pmcore::bestdose::{BestDosePosterior, DoseRange, Target}; +use pmcore::bestdose::{BestDoseConfig, BestDosePosterior, DoseRange, Target}; use pmcore::prelude::*; -use pmcore::routines::initialization::parse_prior; fn main() -> Result<()> { - tracing_subscriber::fmt::init(); + tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::new("info,diffsol=off")) + .init(); println!("BestDose AUC Target - Minimal Example\n"); println!("======================================\n"); @@ -22,30 +23,22 @@ fn main() -> Result<()> { }; // Minimal parameter ranges - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); + let parameter_space = ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_idelta(60.0); // 1 hour intervals for AUC calculation + let config = BestDoseConfig::new(parameter_space.clone(), ems.clone()) + .with_progress(false) + .with_prediction_interval(60.0); // Load realistic prior from previous NPAG run (47 support points) println!("Loading prior from bimodal_ke example..."); - let (theta, prior) = parse_prior( - &"examples/bimodal_ke/output/theta.csv".to_string(), - &settings, - )?; + let (theta, prior) = read_prior("examples/bimodal_ke/output/theta.csv", ¶meter_space)?; let weights = prior.as_ref().unwrap(); println!("Prior: {} support points\n", theta.matrix().nrows()); @@ -67,16 +60,16 @@ fn main() -> Result<()> { weights, None, // No past data - use prior directly eq.clone(), - settings.clone(), + config.clone(), )?; println!("Optimizing dose...\n"); let optimal = posterior.optimize( target_data.clone(), None, - DoseRange::new(100.0, 2000.0), // Wider range for AUC targets - 0.8, // for AUC targets higher bias_weight usually works best - Target::AUCFromZero, // Cumulative AUC from time 0 + DoseRange::new(100.0, 2000.0), + 0.8, + Target::AUCFromZero, )?; let opt_doses = optimal.doses(); @@ -143,7 +136,7 @@ fn main() -> Result<()> { None, DoseRange::new(50.0, 500.0), 0.8, - Target::AUCFromLastDose, // Interval AUC from last dose! + Target::AUCFromLastDose, )?; let doses: Vec = optimal_interval.doses(); diff --git a/examples/bestdose_bounds.rs b/examples/bestdose_bounds.rs index 4b2eab3bb..0b3e4b93f 100644 --- a/examples/bestdose_bounds.rs +++ b/examples/bestdose_bounds.rs @@ -1,10 +1,11 @@ use anyhow::Result; -use pmcore::bestdose::{BestDosePosterior, DoseRange, Target}; +use pmcore::bestdose::{BestDoseConfig, BestDosePosterior, DoseRange, Target}; use pmcore::prelude::*; -use pmcore::routines::initialization::parse_prior; fn main() -> Result<()> { - tracing_subscriber::fmt::init(); + tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::new("info,diffsol=off")) + .init(); println!("BestDose with Dose Range Bounds - Example\n"); println!("==========================================\n"); @@ -21,29 +22,20 @@ fn main() -> Result<()> { }, }; - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); + let parameter_space = ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); + let config = BestDoseConfig::new(parameter_space.clone(), ems.clone()).with_progress(false); // Load realistic prior from previous NPAG run println!("Loading prior from bimodal_ke example..."); - let (theta, prior) = parse_prior( - &"examples/bimodal_ke/output/theta.csv".to_string(), - &settings, - )?; + let (theta, prior) = read_prior("examples/bimodal_ke/output/theta.csv", ¶meter_space)?; let weights = prior.as_ref().unwrap(); println!("Prior: {} support points\n", theta.matrix().nrows()); @@ -63,14 +55,12 @@ fn main() -> Result<()> { (50.0, 2000.0, "Wide range (50-2000 mg)"), ]; + let posterior = BestDosePosterior::compute(&theta, weights, None, eq.clone(), config.clone())?; + println!("\nTesting optimization with different dose range constraints:\n"); println!("{:<30} | {:>12} | {:>10}", "Range", "Optimal Dose", "Cost"); println!("{}", "-".repeat(60)); - // Compute posterior once, reuse for all dose ranges - let posterior = - BestDosePosterior::compute(&theta, weights, None, eq.clone(), settings.clone())?; - for (min, max, description) in dose_ranges { let result = posterior.optimize( target_data.clone(), @@ -80,7 +70,24 @@ fn main() -> Result<()> { Target::Concentration, )?; - let doses: Vec = result.doses(); + let doses: Vec = result + .optimal_subject() + .iter() + .map(|occ| { + occ.iter() + .filter(|event| match event { + Event::Bolus(_) => true, + Event::Infusion(_) => true, + _ => false, + }) + .map(|event| match event { + Event::Bolus(bolus) => bolus.amount(), + Event::Infusion(infusion) => infusion.amount(), + _ => 0.0, + }) + }) + .flatten() + .collect(); // Check if dose hit the bound let at_bound = if (doses[0] - max).abs() < 1.0 { diff --git a/examples/bestdose_cov.rs b/examples/bestdose_cov.rs new file mode 100644 index 000000000..316f447a0 --- /dev/null +++ b/examples/bestdose_cov.rs @@ -0,0 +1,156 @@ +use anyhow::Result; +use pmcore::bestdose; // bestdose new + // use pmcore::bestdose::bestdose_old as bestdose; // bestdose old + +use pmcore::prelude::*; + +fn main() -> Result<()> { + // Example model + let eq = equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + // fetch_cov!(cov, t, wt); + fetch_params!(p, ke, _v); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _, _| lag! {}, + |_p, _, _| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, _ke, v); + let v = v * 70.0; + y[0] = x[0] / v; + }, + ); + + let parameter_space = ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0 / 70.0, 250.0 / 70.0)); + + let ems = AssayErrorModels::new().add( + 0, + AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), + )?; + + let config = + bestdose::BestDoseConfig::new(parameter_space.clone(), ems.clone()).with_progress(false); + + // Generate a patient with known parameters + // Ke = 0.5, V = 50 + // C(t) = Dose * exp(-ke * t) / V + + fn conc(t: f64, dose: f64) -> f64 { + let ke = 0.3406021231412888; // Elimination rate constant + let v = 99.99475717544556; // Volume of distribution + (dose * (-ke * t).exp()) / v + } + + // Some observed data + let subject = Subject::builder("Nikola Tesla") + .bolus(0.0, 150.0, 0) + .observation(2.0, conc(2.0, 150.0), 0) + .observation(4.0, conc(4.0, 150.0), 0) + .observation(6.0, conc(6.0, 150.0), 0) + .bolus(12.0, 75.0, 0) + .observation(14.0, conc(2.0, 75.0) + conc(14.0, 150.0), 0) + .observation(16.0, conc(4.0, 75.0) + conc(16.0, 150.0), 0) + .observation(18.0, conc(6.0, 75.0) + conc(18.0, 150.0), 0) + .build(); + + // simulate subject concentrations + + // for event in subject.occasions().first().unwrap().events().into_iter() { + // // if event is observations + // if let Event::Observation(obs) = event { + // println!("Time: {:.2} h, Observed: {:?}", obs.time(), obs.value()); + // } + // } + + // println!("++++++++++++++++++++++++++++++++++++++++++++++++++"); + + // let sim = eq.simulate_subject(&subject, &vec![0.09, 1.49], None)?; + // // dbg subject concentrations + + // for pred in sim.0.predictions().into_iter() { + // println!( + // "Time: {:.2} h, Observed: {:?}, Predicted: {:.4}", + // pred.time(), + // pred.observation(), + // pred.prediction() + // ); + // } + + let past_data = subject.clone(); + + let target_data = Subject::builder("Thomas Edison") + .bolus(0.0, 0.0, 0) + .observation(2.0, conc(2.0, 150.0), 0) + .observation(4.0, conc(4.0, 150.0), 0) + .observation(6.0, conc(6.0, 150.0), 0) + .bolus(12.0, 0.0, 0) + .observation(14.0, conc(2.0, 75.0) + conc(14.0, 150.0), 0) + .observation(16.0, conc(4.0, 75.0) + conc(16.0, 150.0), 0) + .observation(18.0, conc(6.0, 75.0) + conc(18.0, 150.0), 0) + .build(); + + let (mut theta, prior) = read_prior("examples/bimodal_ke/output/theta.csv", ¶meter_space)?; + + let m_t = theta.matrix_mut(); + for i in 0..m_t.nrows() { + m_t[(i, 1)] = m_t[(i, 1)] / 70.0; + } + + // Example usage - using new() constructor which calculates NPAGFULL11 posterior + // max_cycles controls NPAGFULL refinement: + // 0 = NPAGFULL11 only (fast but less accurate) + // 100 = moderate refinement + // 500 = full refinement (Fortran default, slow but most accurate) + let problem = bestdose::BestDoseProblem::new( + &theta, + &prior.unwrap(), + Some(past_data.clone()), // Optional: past data for Bayesian updating + target_data.clone(), + None, + eq.clone(), + bestdose::DoseRange::new(0.0, 300.0), + 0.0, + config.clone(), + bestdose::Target::Concentration, // Target concentrations (not AUCs) + )?; + + println!("Optimizing dose..."); + + let bias_weights = vec![0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]; + let mut results = Vec::new(); + + for bias_weight in &bias_weights { + println!("Running optimization with bias weight: {}", bias_weight); + let optimal = problem.clone().with_bias_weight(*bias_weight).optimize()?; + results.push((bias_weight, optimal)); + } + + // Print results + for (bias_weight, optimal) in &results { + let opt_doses = optimal.doses(); + + println!( + "Bias weight: {:.2}\t\t Optimal dose: {:?}\t\tCost: {:.6}\t\tln Cost: {:.4}\t\tMethod: {}", + bias_weight, + opt_doses, + optimal.objf(), + optimal.objf().ln(), + optimal.optimization_method() + ); + } + + // Print concentration-time predictions for the optimal dose + let optimal = &results.last().unwrap().1; + println!("\nConcentration-time predictions for optimal dose:"); + for pred in optimal.predictions().predictions().into_iter() { + println!( + "Time: {:.2} h, Observed: {:.2}, (Pop Mean: {:.4}, Pop Median: {:.4}, Post Mean: {:.4}, Post Median: {:.4})", + pred.time(), pred.obs().unwrap_or(0.0), pred.pop_mean(), pred.pop_median(), pred.post_mean(), pred.post_median() + ); + } + + Ok(()) +} diff --git a/examples/bimodal_ke/config.toml b/examples/bimodal_ke/config.toml deleted file mode 100644 index 498ba70a4..000000000 --- a/examples/bimodal_ke/config.toml +++ /dev/null @@ -1,24 +0,0 @@ -[config] -cycles = 1024 -algorithm = "NPAG" -cache = true - -[random] -Ke = [0.001, 3.0] -V = [25.0, 250.0] - -[error] -value = 0.0 -class = "additive" -poly = [0.0, 0.05, 0.0, 0.0] - -[log] -level = "info" - -[prior] -sampler = "sobol" -# file = "examples/bimodal_ke/prior.csv" - -[output] -write = true -path = "examples/bimodal_ke/output" diff --git a/examples/bimodal_ke/main.rs b/examples/bimodal_ke/main.rs index 0ea9bb68c..84d493bf1 100644 --- a/examples/bimodal_ke/main.rs +++ b/examples/bimodal_ke/main.rs @@ -15,35 +15,44 @@ fn main() -> Result<()> { } .with_solver(OdeSolver::ExplicitRk(ExplicitRkTableau::Tsit45)); - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); - - let ems = AssayErrorModels::new() - .add( - 1, - AssayErrorModel::additive(ErrorPoly::new(0.0, 0.5, 0.0, 0.0), 0.0), + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 1, + AssayErrorModel::additive(ErrorPoly::new(0.0, 0.5, 0.0, 0.0), 0.0), + ) + .unwrap(), + ); + + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)), ) - .unwrap(); - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_cycles(1000); - settings.set_prior(Prior::sobol(2028, 22)); - settings.set_output_path("examples/bimodal_ke/output/"); - settings.set_write_logs(true); - - settings.write()?; + .observations(observations) + .build()?; - // settings.enable_logs(stdout: bool, ) - settings.initialize_logs()?; let data = data::read_pmetrics("examples/bimodal_ke/bimodal_ke.csv")?; - let mut algorithm = dispatch_algorithm(settings, eq, data)?; - let mut result = algorithm.fit()?; + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/bimodal_ke/output/".to_string()), + }) + .runtime(RuntimeOptions { + cycles: 1000, + logging: LoggingOptions { + initialize: true, + ..LoggingOptions::default() + }, + ..RuntimeOptions::default() + }) + .run()?; result.write_outputs()?; Ok(()) diff --git a/examples/drusano/config.toml b/examples/drusano/config.toml deleted file mode 100644 index ee5f133f0..000000000 --- a/examples/drusano/config.toml +++ /dev/null @@ -1,50 +0,0 @@ -[config] -cycles = 1024 -algorithm = "NPAG" -cache = true - -[random] -v1 = [5.0, 160.0] -cl1 = [4.0, 9.0] -v2 = [100.0, 200.0] -cl2 = [25.0, 35.0] -popmax = [100000000.0, 100000000000.0] -kgs = [0.01, 0.25] -kks = [0.01, 0.5] -e50_1s = [0.1, 2.5] -e50_2s = [0.1, 10.0] -alpha_s = [-8.0, 5.0] -kgr1 = [0.004, 0.1] -kkr1 = [0.08, 0.4] -e50_1r1 = [8.0, 17.0] -alpha_r1 = [-8.0, 5.0] -kgr2 = [0.004, 0.3] -kkr2 = [0.1, 0.5] -e50_2r2 = [5.0, 8.0] -alpha_r2 = [-5.0, 5.0] -init_4 = [-1.0, 4.0] -init_5 = [-1.0, 3.0] -h1s = [0.5, 8.0] -h2s = [0.1, 4.0] -h1r1 = [5.0, 25.0] -h2r2 = [10.0, 22.0] - -[error] -value = 1.0 -class = "proportional" -poly = [0.1, 0.1, 0.0, 0.0] - -[log] -level = "info" -write = true - -[prior] -sampler = "sobol" -points = 212900 -seed = 347 - -[output] -write = true -path = "examples/drusano/output" - - diff --git a/examples/drusano/main.rs b/examples/drusano/main.rs index 640fb20eb..97fccc543 100644 --- a/examples/drusano/main.rs +++ b/examples/drusano/main.rs @@ -71,70 +71,87 @@ fn main() -> Result<()> { }, }; - let params = Parameters::new() - .add("v1", 5.0, 160.0) - .add("cl1", 4.0, 9.0) - .add("v2", 100.0, 200.0) - .add("cl2", 25.0, 35.0) - .add("popmax", 100000000.0, 100000000000.0) - .add("kgs", 0.01, 0.25) - .add("kks", 0.01, 0.5) - .add("e50_1s", 0.1, 2.5) - .add("e50_2s", 0.1, 10.0) - .add("alpha_s", -8.0, 5.0) - .add("kgr1", 0.004, 0.1) - .add("kkr1", 0.08, 0.4) - .add("e50_1r1", 8.0, 17.0) - .add("alpha_r1", -8.0, 5.0) - .add("kgr2", 0.004, 0.3) - .add("kkr2", 0.1, 0.5) - .add("e50_2r2", 5.0, 8.0) - .add("alpha_r2", -5.0, 5.0) - .add("init_4", -1.0, 4.0) - .add("init_5", -1.0, 3.0) - .add("h1s", 0.5, 8.0) - .add("h2s", 0.1, 4.0) - .add("h1r1", 5.0, 25.0) - .add("h2r2", 10.0, 22.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "drug_1")) + .add_channel(ObservationChannel::continuous(1, "drug_2")) + .add_channel(ObservationChannel::continuous(2, "total")) + .add_channel(ObservationChannel::continuous(3, "resistant_1")) + .add_channel(ObservationChannel::continuous(4, "resistant_2")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 0, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), + )? + .add( + 1, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), + )? + .add( + 2, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), + )? + .add( + 3, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), + )? + .add( + 4, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), + )?, + ); - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), - )? - .add( - 1, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), - )? - .add( - 2, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), - )? - .add( - 3, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), - )? - .add( - 4, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 1.0), - )?; + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("v1", 5.0, 160.0)) + .add(ParameterSpec::bounded("cl1", 4.0, 9.0)) + .add(ParameterSpec::bounded("v2", 100.0, 200.0)) + .add(ParameterSpec::bounded("cl2", 25.0, 35.0)) + .add(ParameterSpec::bounded( + "popmax", + 100000000.0, + 100000000000.0, + )) + .add(ParameterSpec::bounded("kgs", 0.01, 0.25)) + .add(ParameterSpec::bounded("kks", 0.01, 0.5)) + .add(ParameterSpec::bounded("e50_1s", 0.1, 2.5)) + .add(ParameterSpec::bounded("e50_2s", 0.1, 10.0)) + .add(ParameterSpec::bounded("alpha_s", -8.0, 5.0)) + .add(ParameterSpec::bounded("kgr1", 0.004, 0.1)) + .add(ParameterSpec::bounded("kkr1", 0.08, 0.4)) + .add(ParameterSpec::bounded("e50_1r1", 8.0, 17.0)) + .add(ParameterSpec::bounded("alpha_r1", -8.0, 5.0)) + .add(ParameterSpec::bounded("kgr2", 0.004, 0.3)) + .add(ParameterSpec::bounded("kkr2", 0.1, 0.5)) + .add(ParameterSpec::bounded("e50_2r2", 5.0, 8.0)) + .add(ParameterSpec::bounded("alpha_r2", -5.0, 5.0)) + .add(ParameterSpec::bounded("init_4", -1.0, 4.0)) + .add(ParameterSpec::bounded("init_5", -1.0, 3.0)) + .add(ParameterSpec::bounded("h1s", 0.5, 8.0)) + .add(ParameterSpec::bounded("h2s", 0.1, 4.0)) + .add(ParameterSpec::bounded("h1r1", 5.0, 25.0)) + .add(ParameterSpec::bounded("h2r2", 10.0, 22.0)), + ) + .observations(observations) + .build() + .unwrap(); - let mut settings = SettingsBuilder::new() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(212900, 347)); - settings.set_output_path("examples/drusano/output"); - - settings.initialize_logs()?; let data = data::read_pmetrics("examples/drusano/data.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, eq, data).unwrap(); - algorithm.initialize().unwrap(); - algorithm.fit().unwrap(); - // while !algorithm.next_cycle().unwrap() {} - let mut result = algorithm.into_npresult()?; + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/drusano/output".to_string()), + }) + .runtime(RuntimeOptions { + prior: Some(Prior::sobol(212900, 347)), + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); Ok(()) } diff --git a/examples/drusano/main_n.rs b/examples/drusano/main_n.rs deleted file mode 100644 index cba01d469..000000000 --- a/examples/drusano/main_n.rs +++ /dev/null @@ -1,351 +0,0 @@ -use std::process::exit; - -use anyhow::Error; -use argmin::{ - core::{CostFunction, Executor, TerminationReason, TerminationStatus}, - solver::neldermead::NelderMead, -}; -use logger::setup_log; -use pmcore::prelude::*; - -fn main() { - let eq = equation::ODE::new( - |x, p, _t, dx, rateiv, _cov| { - // fetch_cov!(cov, t, wt); - fetch_params!( - p, v1, cl1, v2, cl2, popmax, kgs, kks, e50_1s, e50_2s, alpha_s, kgr1, kkr1, - e50_1r1, alpha_r1, kgr2, kkr2, e50_2r2, alpha_r2, init_3, init_4, init_5, h1s, h2s, - h1r1, h2r2 - ); - // Sec - let e50_2r1 = e50_2s; - let e50_1r2 = e50_1s; - let h2r1 = h2s; - let h1r2 = h1s; - let mut xm0best = 0.0; - - dx[0] = rateiv[0] - cl1 * x[0] / v1; - dx[1] = rateiv[1] - cl2 * x[1] / v2; - - let xns = x[2]; - let xnr1 = x[3]; - let xnr2 = x[4]; - let e = 1.0 - (xns + xnr1 + xnr2) / popmax; - let mut d1 = x[0] / v1; - let mut d2 = x[1] / v2; - let mut u = d1 / e50_1s; - let mut v = d2 / e50_2s; - let mut w = alpha_s * d1 * d2 / (e50_1s * e50_2s); - let mut h1 = 1.0_f64 / h1s; - let mut h2 = 1.0_f64 / h2s; - let mut xx = (h1 + h2) / 2.0; - if u < 1.0E-5 && v < 1.0E-5 { - xm0best = 0.0; - } else { - if v < 0.0 { - xm0best = u.powf(1.0 / h1); - } - if u < 0.0 { - xm0best = v.powf(1.0 / h2); - } - - if v > 0.0 && u > 0.0 { - let start = 0.00001; - let tol = 1.0e-10; - let step = -2.0 * start; - // CALL ELDERY(1,START,XM0BEST1,VALMIN1,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let bm0 = BESTM0 { - u, - v, - w, - h1, - h2, - xx, - }; - let (xm0best1, valmin1, iconv) = bm0.get_best(start, step); - if iconv == false { - // Output a message indicating no convergence on the selection of best M0 for s - println!(" NO CONVERGENCE ON SELECTION OF BEST M0 FOR s."); - - // Output a message indicating the XP(3) EQ... - println!(" FOR THE XP(3) EQ.... "); - - // Output the values of XM0BEST1 and VALMIN1 with formatting - println!(" THE EST. FOR M0 FROM ELDERY WAS {:>20.12}", xm0best1); - println!(" AND THIS GAVE A VALMIN OF {:>20.12}", valmin1); - - // Output the values of D1, D2, U, V, W, ALPHA_S, H1, and H2 with formatting - println!(" NOTE THAT D1,D2 = {:>20.12} {:>20.12}", d1, d2); - println!(" U,V = {:>20.12} {:>20.12}", u, v); - println!(" W,ALPHA_S = {:>20.12} {:>20.12}", w, alpha_s); - println!(" H1,H2 = {:>20.12} {:>20.12}", h1, h2); - - exit(-1); - } - if valmin1 < 1.0e-10 { - xm0best = xm0best1; - } else { - // CALL FINDM0(U,V,alpha_s,H1,H2,XM0EST) - let xm0est = find_m0(u, v, alpha_s, h1, h2); - if xm0est < 0.0 { - xm0best = xm0best1; - } else { - // START(1) = XM0EST - // STEP(1)= -.2D0*START(1) - // CALL ELDERY(1,START,XM0BEST2,VALMIN2,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let bm0 = BESTM0 { - u, - v, - w, - h1, - h2, - xx, - }; - let (xm0best2, valmin2, iconv) = bm0.get_best(xm0est, -2.0 * xm0est); - xm0best = xm0best1; - if valmin2 < valmin1 { - xm0best = xm0best2; - } - if iconv == false { - panic!("NO CONVERGENCE ON SELECTION OF BEST M0 FOR s."); - } //235 - } //237 - } //240 - } //243 - } - let xms = xm0best / (xm0best + 1.0); - dx[2] = xns * (kgs * e - kks * xms); - - d1 = x[0] / v1; - d2 = x[1] / v2; - u = d1 / e50_1r1; - v = d2 / e50_2r1; - w = alpha_r1 * d1 * d2 / (e50_1r1 * e50_2r1); - h1 = 1.0_f64 / h1r1; - h2 = 1.0_f64 / h2r1; - xx = (h1 + h2) / 2.0; - if u < 1.0e-5 && v < 1.0e-5 { - xm0best = 0.0; - } else { - if v < 0.0 { - xm0best = u.powf(1.0 / h1); - } - if u < 0.0 { - xm0best = v.powf(1.0 / h2); - } - if v > 0.0 && u > 0.0 { - //START(1) = .00001 - let tol = 1.0e-10; - // STEP(1)= -.2D0*START(1) - // CALL ELDERY(1,START,XM0BEST1,VALMIN1,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let bm0 = BESTM0 { - u, - v, - w, - h1, - h2, - xx, - }; - let (xm0best1, valmin1, iconv) = bm0.get_best(0.00001, -2.0 * 0.00001); - if iconv == false { - panic!("NO CONVERGENCE ON SELECTION OF BEST M0 FOR r1."); - } - if valmin1 < 1.0e-10 { - xm0best = xm0best1; - } else { - // CALL FINDM0(U,V,alpha_r1,H1,H2,XM0EST) - let xm0est = find_m0(u, v, alpha_s, h1, h2); - if xm0est < 0.0 { - xm0best = xm0best1; - } else { - // START(1) = XM0EST - // STEP(1)= -.2D0*START(1) - // CALL ELDERY(1,START,XM0BEST2,VALMIN2,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let bm0 = BESTM0 { - u, - v, - w, - h1, - h2, - xx, - }; - let (xm0best2, valmin2, iconv) = bm0.get_best(xm0est, -2.0 * xm0est); - xm0best = xm0best1; - if valmin2 < valmin1 { - xm0best = xm0best2; - } - if iconv == false { - panic!("NO CONVERGENCE ON SELECTION OF BEST M0 FOR r1."); - } //235 - } //237 - } //240 - } - } - let xmr1 = xm0best / (xm0best + 1.0); - dx[3] = xnr1 * (kgr1 * e - kkr1 * xmr1); - - d1 = x[0] / v1; - d2 = x[1] / v2; - u = d1 / e50_1r2; - v = d2 / e50_2r2; - w = alpha_r2 * d1 * d2 / (e50_1r2 * e50_2r2); - h1 = 1.0_f64 / h1r2; - h2 = 1.0_f64 / h2r2; - xx = (h1 + h2) / 2.0; - if u < 1.0e-5 && v < 1.0e-5 { - xm0best = 0.0; - } else { - if v < 0.0 { - xm0best = u.powf(1.0 / h1); - } - if u < 0.0 { - xm0best = v.powf(1.0 / h2); - } - - if v > 0.0 && u > 0.0 { - //START(1) = .00001 - let tol = 1.0e-10; - // STEP(1)= -.2D0*START(1) - // CALL ELDERY(1,START,XM0BEST1,VALMIN1,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let xm0best1 = 0.0; - let valmin1 = 0.0; - let iconv = 0.0; - if iconv == 0.0 { - panic!("NO CONVERGENCE ON SELECTION OF BEST M0 FOR r1."); - } - if valmin1 < 1.0e-10 { - xm0best = xm0best1; - } else { - // CALL FINDM0(U,V,alpha_s,H1,H2,XM0EST) - let xm0est = find_m0(u, v, alpha_s, h1, h2); - if xm0est < 0.0 { - xm0best = xm0best1; - } else { - // START(1) = XM0EST - // STEP(1)= -.2D0*START(1) - // CALL ELDERY(1,START,XM0BEST2,VALMIN2,TOL,STEP,1000,BESTM0,0,ICONV,NITER,ICNT) - let xm0best2 = 0.0; - let valmin2 = 0.0; - let iconv = 0.0; - xm0best = xm0best1; - if valmin2 < valmin1 { - xm0best = xm0best2; - } - if iconv == 0.0 { - panic!("NO CONVERGENCE ON SELECTION OF BEST M0 FOR s."); - } //235 - } //237 - } //240 - } //243 - } - let xmr2 = xm0best / (xm0best + 1.0); - dx[4] = xnr2 * (kgr2 * e - kkr2 * xmr2); - }, - |_p, _t, _cov| lag! {}, - |_p, _t, _cov| fa! {}, - |p, t, cov, x| { - fetch_params!( - p, v1, cl1, v2, cl2, popmax, kgs, kks, e50_1s, e50_2s, alpha_s, kgr1, kkr1, - e50_1r1, alpha_r1, kgr2, kkr2, e50_2r2, alpha_r2, init_3, init_4, init_5, h1s, h2s, - h1r1, h2r2 - ); - fetch_cov!(cov, t, ic_t); - x[0] = 0.0; - x[1] = 0.0; - x[2] = 10.0_f64.powf(ic_t); - x[3] = 10.0_f64.powf(init_4); - x[4] = 10.0_f64.powf(init_5); - }, - |x, p, _t, _cov, y| { - fetch_params!( - p, v1, cl1, v2, cl2, popmax, kgs, kks, e50_1s, e50_2s, alpha_s, kgr1, kkr1, - e50_1r1, alpha_r1, kgr2, kkr2, e50_2r2, alpha_r2, init_3, init_4, init_5, h1s, h2s, - h1r1, h2r2 - ); - y[0] = x[0] / v1; - y[1] = x[1] / v2; - y[2] = (x[2] + x[3] + x[4]).log10(); - y[3] = x[3].log10(); - y[4] = x[4].log10(); - }, - (1, 1), - ); - let settings = settings::read("examples/drusano/config.toml").unwrap(); - setup_log(&settings); - let data = data::read_pmetrics("examples/drusano/drusano.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, eq, data).unwrap(); - let result = algorithm.fit().unwrap(); - result.write_outputs().unwrap(); -} - -struct BESTM0 { - u: f64, - v: f64, - w: f64, - h1: f64, - h2: f64, - xx: f64, -} -impl CostFunction for BESTM0 { - type Param = f64; - type Output = f64; - fn cost(&self, xm0: &Self::Param) -> Result { - let t1 = self.u / xm0.powf(self.h1); - let t2 = self.v / xm0.powf(self.h2); - let t3 = self.w / xm0.powf(self.xx); - - Ok((1.0 - t1 - t2 - t3).powi(2)) - } -} - -impl BESTM0 { - fn get_best(self, start: f64, step: f64) -> (f64, f64, bool) { - let other_point = start + step; - let solver = NelderMead::new(vec![start, other_point]) - .with_sd_tolerance(0.0001) - .unwrap(); - let res = Executor::new(self, solver) - .configure(|state| state.max_iters(1000)) - // .add_observer(SlogLogger::term(), ObserverMode::Always) - .run() - .unwrap(); - let converged = match res.state.termination_status { - TerminationStatus::Terminated(reason) => match reason { - TerminationReason::SolverConverged => true, - _ => false, - }, - _ => false, - }; - - ( - res.state.best_param.unwrap(), - res.state.best_cost, - converged, - ) - } -} -fn find_m0(ufinal: f64, v: f64, alpha: f64, h1: f64, h2: f64) -> f64 { - let noint = 1000; - let delu = ufinal / (noint as f64); - let mut xm = v.powf(1.0 / h2); - let mut u = 0.0; - let hh = (h1 + h2) / 2.0; - - for int in 1..=noint { - let top = 1.0 / xm.powf(h1) + alpha * v / xm.powf(hh); - let b1 = u * h1 / xm.powf(h1 + 1.0); - let b2 = v * h2 / xm.powf(h2 + 1.0); - let b3 = alpha * v * u * hh / xm.powf(hh + 1.0); - let xmp = top / (b1 + b2 + b3); - - xm = xm + xmp * delu; - - if xm <= 0.0 { - return -1.0; // Greco equation is not solvable - } - - u = delu * (int as f64); - } - - xm // Return the calculated xm0est -} diff --git a/examples/iov/main.rs b/examples/iov/main.rs index 222015dfc..3bdef5ab5 100644 --- a/examples/iov/main.rs +++ b/examples/iov/main.rs @@ -31,30 +31,34 @@ fn main() -> Result<()> { .with_ndrugs(1) .with_nout(1); - let params = Parameters::new().add("ke0", 0.001, 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add( + 0, + AssayErrorModel::additive(ErrorPoly::new(0.0, 0.0, 0.0, 0.0), 0.0000757575757576), + )?); - let ems = AssayErrorModels::new().add( - 0, - AssayErrorModel::additive(ErrorPoly::new(0.0, 0.0, 0.0, 0.0), 0.0000757575757576), - )?; - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_cycles(100000); - - settings.set_output_path("examples/iov/output"); - settings.set_prior(Prior::sobol(100, 347)); - - settings.initialize_logs()?; + let model = ModelDefinition::builder(sde) + .parameters(ParameterSpace::new().add(ParameterSpec::bounded("ke0", 0.001, 2.0))) + .observations(observations) + .build()?; let data = data::read_pmetrics("examples/iov/test.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, sde, data).unwrap(); - algorithm.initialize().unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/iov/output".to_string()), + }) + .runtime(RuntimeOptions { + cycles: 100000, + prior: Some(Prior::sobol(100, 347)), + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); Ok(()) diff --git a/examples/meta/main.rs b/examples/meta/main.rs index 61dc77ebf..8a7e777e3 100644 --- a/examples/meta/main.rs +++ b/examples/meta/main.rs @@ -2,7 +2,7 @@ #![allow(unused_variables)] #![allow(unused_imports)] -use pmcore::{prelude::*, routines::settings}; +use pmcore::prelude::*; fn main() { let eq = ode! { @@ -28,38 +28,48 @@ fn main() { }, }; - let params = Parameters::new() - .add("cls", 0.1, 10.0) - .add("fm", 0.0, 1.0) - .add("k20", 0.01, 1.0) - .add("relv", 0.1, 1.0) - .add("theta1", 0.1, 10.0) - .add("theta2", 0.1, 10.0) - .add("vs", 1.0, 10.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .add_channel(ObservationChannel::continuous(2, "metabolite")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 1, + AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + ) + .unwrap() + .add( + 2, + AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 1, - AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), - ) - .unwrap() - .add( - 2, - AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("cls", 0.1, 10.0)) + .add(ParameterSpec::bounded("fm", 0.0, 1.0)) + .add(ParameterSpec::bounded("k20", 0.01, 1.0)) + .add(ParameterSpec::bounded("relv", 0.1, 1.0)) + .add(ParameterSpec::bounded("theta1", 0.1, 10.0)) + .add(ParameterSpec::bounded("theta2", 0.1, 10.0)) + .add(ParameterSpec::bounded("vs", 1.0, 10.0)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/meta/meta.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, eq, data).unwrap(); - // let result = algorithm.fit().unwrap(); - algorithm.initialize().unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npod( + NpodOptions::default(), + ))) + .runtime(RuntimeOptions { + cycles: 10000, + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/examples/neely/main.rs b/examples/neely/main.rs index 3cf903b19..7e9dcfbd7 100644 --- a/examples/neely/main.rs +++ b/examples/neely/main.rs @@ -46,48 +46,62 @@ fn main() { y[3] = x[3] / vm2; }, }; - let params = Parameters::new() - .add("cls", 0.0, 0.4) - .add("k30", 0.0, 0.5) - .add("k40", 0.3, 1.5) - .add("qs", 0.0, 0.5) - .add("vps", 0.0, 5.0) - .add("vs", 0.0, 2.0) - .add("fm1", 0.0, 0.2) - .add("fm2", 0.0, 0.1) - .add("theta1", -4.0, 2.0) - .add("theta2", -2.0, 0.5); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .add_channel(ObservationChannel::continuous(2, "m1")) + .add_channel(ObservationChannel::continuous(3, "m2")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 1, + AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + ) + .unwrap() + .add( + 2, + AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + ) + .unwrap() + .add( + 3, + AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 1, - AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), - ) - .unwrap() - .add( - 2, - AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), - ) - .unwrap() - .add( - 3, - AssayErrorModel::proportional(ErrorPoly::new(1.0, 0.1, 0.0, 0.0), 5.0), + let model = ModelDefinition::builder(ode) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("cls", 0.0, 0.4)) + .add(ParameterSpec::bounded("k30", 0.0, 0.5)) + .add(ParameterSpec::bounded("k40", 0.3, 1.5)) + .add(ParameterSpec::bounded("qs", 0.0, 0.5)) + .add(ParameterSpec::bounded("vps", 0.0, 5.0)) + .add(ParameterSpec::bounded("vs", 0.0, 2.0)) + .add(ParameterSpec::bounded("fm1", 0.0, 0.2)) + .add(ParameterSpec::bounded("fm2", 0.0, 0.1)) + .add(ParameterSpec::bounded("theta1", -4.0, 2.0)) + .add(ParameterSpec::bounded("theta2", -2.0, 0.5)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - settings.set_cycles(1000); - settings.set_prior(Prior::sobol(2028, 22)); - settings.set_output_path("examples/neely/output/"); - settings.set_write_logs(true); - settings.write().unwrap(); - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/neely/data.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, ode, data).unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/neely/output/".to_string()), + }) + .runtime(RuntimeOptions { + cycles: 1000, + prior: Some(Prior::sobol(2028, 22)), + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/examples/new_iov/main.rs b/examples/new_iov/main.rs index 40490b7f2..ddea5c957 100644 --- a/examples/new_iov/main.rs +++ b/examples/new_iov/main.rs @@ -31,32 +31,46 @@ fn main() { .with_ndrugs(1) .with_nout(1); - let params = Parameters::new() - .add("ke0", 0.0001, 2.4) - .add("ske", 0.0001, 0.2); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "central")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 0, + AssayErrorModel::additive( + ErrorPoly::new(-0.00119, 0.44379, -0.45864, 0.16537), + 0.0, + ), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::additive(ErrorPoly::new(-0.00119, 0.44379, -0.45864, 0.16537), 0.0), + let model = ModelDefinition::builder(sde) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke0", 0.0001, 2.4)) + .add(ParameterSpec::bounded("ske", 0.0001, 0.2)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_cycles(1000); - settings.set_cache(true); - settings.set_output_path("examples/new_iov/output"); - settings.set_prior(Prior::sobol(100, 347)); - - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/new_iov/data.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, sde, data).unwrap(); - algorithm.initialize().unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/new_iov/output".to_string()), + }) + .runtime(RuntimeOptions { + cycles: 1000, + cache: true, + prior: Some(Prior::sobol(100, 347)), + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/examples/theophylline/main.rs b/examples/theophylline/main.rs index 9addb5e2f..df9941dd0 100644 --- a/examples/theophylline/main.rs +++ b/examples/theophylline/main.rs @@ -13,29 +13,34 @@ fn main() { }, ); - let params = Parameters::new() - .add("ka", 0.001, 3.0) - .add("ke", 0.001, 3.0) - .add("v", 0.001, 50.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 0, + AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 2.0), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::proportional(ErrorPoly::new(0.1, 0.1, 0.0, 0.0), 2.0), + let model = ModelDefinition::builder(analytical) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ka", 0.001, 3.0)) + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 0.001, 50.0)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/theophylline/theophylline.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, analytical, data).unwrap(); - // let result = algorithm.fit().unwrap(); - algorithm.initialize().unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/examples/theophylline/theophylline.csv b/examples/theophylline/theophylline.csv index 6fd956a07..eca08e459 100644 --- a/examples/theophylline/theophylline.csv +++ b/examples/theophylline/theophylline.csv @@ -1,145 +1,145 @@ ID,EVID,TIME,DUR,DOSE,ADDL,II,INPUT,OUT,OUTEQ,C0,C1,C2,C3,WEIGHT -1,1,0,0,4.02,.,.,1,.,.,.,.,.,.,79.6 -1,0,0.1,.,.,.,.,.,0.74,1,.,.,.,.,79.6 -1,0,0.25,.,.,.,.,.,2.84,1,.,.,.,.,79.6 -1,0,0.57,.,.,.,.,.,6.57,1,.,.,.,.,79.6 -1,0,1.12,.,.,.,.,.,10.5,1,.,.,.,.,79.6 -1,0,2.02,.,.,.,.,.,9.66,1,.,.,.,.,79.6 -1,0,3.82,.,.,.,.,.,8.58,1,.,.,.,.,79.6 -1,0,5.1,.,.,.,.,.,8.36,1,.,.,.,.,79.6 -1,0,7.03,.,.,.,.,.,7.47,1,.,.,.,.,79.6 -1,0,9.05,.,.,.,.,.,6.89,1,.,.,.,.,79.6 -1,0,12.12,.,.,.,.,.,5.94,1,.,.,.,.,79.6 -1,0,24.37,.,.,.,.,.,3.28,1,.,.,.,.,79.6 -2,1,0,0,4.4,.,.,1,.,.,.,.,.,.,72.4 -2,0,0.1,.,.,.,.,.,0,1,.,.,.,.,72.4 -2,0,0.27,.,.,.,.,.,1.72,1,.,.,.,.,72.4 -2,0,0.52,.,.,.,.,.,7.91,1,.,.,.,.,72.4 -2,0,1,.,.,.,.,.,8.31,1,.,.,.,.,72.4 -2,0,1.92,.,.,.,.,.,8.33,1,.,.,.,.,72.4 -2,0,3.5,.,.,.,.,.,6.85,1,.,.,.,.,72.4 -2,0,5.02,.,.,.,.,.,6.08,1,.,.,.,.,72.4 -2,0,7.03,.,.,.,.,.,5.4,1,.,.,.,.,72.4 -2,0,9,.,.,.,.,.,4.55,1,.,.,.,.,72.4 -2,0,12,.,.,.,.,.,3.01,1,.,.,.,.,72.4 -2,0,24.3,.,.,.,.,.,0.9,1,.,.,.,.,72.4 -3,1,0,0,4.53,.,.,1,.,.,.,.,.,.,70.5 -3,0,0.1,.,.,.,.,.,0,1,.,.,.,.,70.5 -3,0,0.27,.,.,.,.,.,4.4,1,.,.,.,.,70.5 -3,0,0.58,.,.,.,.,.,6.9,1,.,.,.,.,70.5 -3,0,1.02,.,.,.,.,.,8.2,1,.,.,.,.,70.5 -3,0,2.02,.,.,.,.,.,7.8,1,.,.,.,.,70.5 -3,0,3.62,.,.,.,.,.,7.5,1,.,.,.,.,70.5 -3,0,5.08,.,.,.,.,.,6.2,1,.,.,.,.,70.5 -3,0,7.07,.,.,.,.,.,5.3,1,.,.,.,.,70.5 -3,0,9,.,.,.,.,.,4.9,1,.,.,.,.,70.5 -3,0,12.15,.,.,.,.,.,3.7,1,.,.,.,.,70.5 -3,0,24.17,.,.,.,.,.,1.05,1,.,.,.,.,70.5 -4,1,0,0,4.4,.,.,1,.,.,.,.,.,.,72.7 -4,0,0.1,.,.,.,.,.,0,1,.,.,.,.,72.7 -4,0,0.35,.,.,.,.,.,1.89,1,.,.,.,.,72.7 -4,0,0.6,.,.,.,.,.,4.6,1,.,.,.,.,72.7 -4,0,1.07,.,.,.,.,.,8.6,1,.,.,.,.,72.7 -4,0,2.13,.,.,.,.,.,8.38,1,.,.,.,.,72.7 -4,0,3.5,.,.,.,.,.,7.54,1,.,.,.,.,72.7 -4,0,5.02,.,.,.,.,.,6.88,1,.,.,.,.,72.7 -4,0,7.02,.,.,.,.,.,5.78,1,.,.,.,.,72.7 -4,0,9.02,.,.,.,.,.,5.33,1,.,.,.,.,72.7 -4,0,11.98,.,.,.,.,.,4.19,1,.,.,.,.,72.7 -4,0,24.65,.,.,.,.,.,1.15,1,.,.,.,.,72.7 -5,1,0,0,5.86,.,.,1,.,.,.,.,.,.,54.6 -5,0,0.1,.,.,.,.,.,0,1,.,.,.,.,54.6 -5,0,0.3,.,.,.,.,.,2.02,1,.,.,.,.,54.6 -5,0,0.52,.,.,.,.,.,5.63,1,.,.,.,.,54.6 -5,0,1,.,.,.,.,.,11.4,1,.,.,.,.,54.6 -5,0,2.02,.,.,.,.,.,9.33,1,.,.,.,.,54.6 -5,0,3.5,.,.,.,.,.,8.74,1,.,.,.,.,54.6 -5,0,5.02,.,.,.,.,.,7.56,1,.,.,.,.,54.6 -5,0,7.02,.,.,.,.,.,7.09,1,.,.,.,.,54.6 -5,0,9.1,.,.,.,.,.,5.9,1,.,.,.,.,54.6 -5,0,12,.,.,.,.,.,4.37,1,.,.,.,.,54.6 -5,0,24.35,.,.,.,.,.,1.57,1,.,.,.,.,54.6 -6,1,0,0,4,.,.,1,.,.,.,.,.,.,80 -6,0,0.1,.,.,.,.,.,0,1,.,.,.,.,80 -6,0,0.27,.,.,.,.,.,1.29,1,.,.,.,.,80 -6,0,0.58,.,.,.,.,.,3.08,1,.,.,.,.,80 -6,0,1.15,.,.,.,.,.,6.44,1,.,.,.,.,80 -6,0,2.03,.,.,.,.,.,6.32,1,.,.,.,.,80 -6,0,3.57,.,.,.,.,.,5.53,1,.,.,.,.,80 -6,0,5,.,.,.,.,.,4.94,1,.,.,.,.,80 -6,0,7,.,.,.,.,.,4.02,1,.,.,.,.,80 -6,0,9.22,.,.,.,.,.,3.46,1,.,.,.,.,80 -6,0,12.1,.,.,.,.,.,2.78,1,.,.,.,.,80 -6,0,23.85,.,.,.,.,.,0.92,1,.,.,.,.,80 -7,1,0,0,4.95,.,.,1,.,.,.,.,.,.,64.6 -7,0,0.1,.,.,.,.,.,0.15,1,.,.,.,.,64.6 -7,0,0.25,.,.,.,.,.,0.85,1,.,.,.,.,64.6 -7,0,0.5,.,.,.,.,.,2.35,1,.,.,.,.,64.6 -7,0,1.02,.,.,.,.,.,5.02,1,.,.,.,.,64.6 -7,0,2.02,.,.,.,.,.,6.58,1,.,.,.,.,64.6 -7,0,3.48,.,.,.,.,.,7.09,1,.,.,.,.,64.6 -7,0,5,.,.,.,.,.,6.66,1,.,.,.,.,64.6 -7,0,6.98,.,.,.,.,.,5.25,1,.,.,.,.,64.6 -7,0,9,.,.,.,.,.,4.39,1,.,.,.,.,64.6 -7,0,12.05,.,.,.,.,.,3.53,1,.,.,.,.,64.6 -7,0,24.22,.,.,.,.,.,1.15,1,.,.,.,.,64.6 -8,1,0,0,4.53,.,.,1,.,.,.,.,.,.,70.5 -8,0,0.1,.,.,.,.,.,0,1,.,.,.,.,70.5 -8,0,0.25,.,.,.,.,.,3.05,1,.,.,.,.,70.5 -8,0,0.52,.,.,.,.,.,3.05,1,.,.,.,.,70.5 -8,0,0.98,.,.,.,.,.,7.31,1,.,.,.,.,70.5 -8,0,2.02,.,.,.,.,.,7.56,1,.,.,.,.,70.5 -8,0,3.53,.,.,.,.,.,6.59,1,.,.,.,.,70.5 -8,0,5.05,.,.,.,.,.,5.88,1,.,.,.,.,70.5 -8,0,7.15,.,.,.,.,.,4.73,1,.,.,.,.,70.5 -8,0,9.07,.,.,.,.,.,4.57,1,.,.,.,.,70.5 -8,0,12.1,.,.,.,.,.,3,1,.,.,.,.,70.5 -8,0,24.12,.,.,.,.,.,1.25,1,.,.,.,.,70.5 -9,1,0,0,3.1,.,.,1,.,.,.,.,.,.,86.4 -9,0,0.1,.,.,.,.,.,0,1,.,.,.,.,86.4 -9,0,0.3,.,.,.,.,.,7.37,1,.,.,.,.,86.4 -9,0,0.63,.,.,.,.,.,9.03,1,.,.,.,.,86.4 -9,0,1.05,.,.,.,.,.,7.14,1,.,.,.,.,86.4 -9,0,2.02,.,.,.,.,.,6.33,1,.,.,.,.,86.4 -9,0,3.53,.,.,.,.,.,5.66,1,.,.,.,.,86.4 -9,0,5.02,.,.,.,.,.,5.67,1,.,.,.,.,86.4 -9,0,7.17,.,.,.,.,.,4.24,1,.,.,.,.,86.4 -9,0,8.8,.,.,.,.,.,4.11,1,.,.,.,.,86.4 -9,0,11.6,.,.,.,.,.,3.16,1,.,.,.,.,86.4 -9,0,24.43,.,.,.,.,.,1.12,1,.,.,.,.,86.4 -10,1,0,0,5.5,.,.,1,.,.,.,.,.,.,58.2 -10,0,0.1,.,.,.,.,.,0.24,1,.,.,.,.,58.2 -10,0,0.37,.,.,.,.,.,2.89,1,.,.,.,.,58.2 -10,0,0.77,.,.,.,.,.,5.22,1,.,.,.,.,58.2 -10,0,1.02,.,.,.,.,.,6.41,1,.,.,.,.,58.2 -10,0,2.05,.,.,.,.,.,7.83,1,.,.,.,.,58.2 -10,0,3.55,.,.,.,.,.,10.21,1,.,.,.,.,58.2 -10,0,5.05,.,.,.,.,.,9.18,1,.,.,.,.,58.2 -10,0,7.08,.,.,.,.,.,8.02,1,.,.,.,.,58.2 -10,0,9.38,.,.,.,.,.,7.14,1,.,.,.,.,58.2 -10,0,12.1,.,.,.,.,.,5.68,1,.,.,.,.,58.2 -10,0,23.7,.,.,.,.,.,2.42,1,.,.,.,.,58.2 -11,1,0,0,4.92,.,.,1,.,.,.,.,.,.,65 -11,0,0.1,.,.,.,.,.,0,1,.,.,.,.,65 -11,0,0.25,.,.,.,.,.,4.86,1,.,.,.,.,65 -11,0,0.5,.,.,.,.,.,7.24,1,.,.,.,.,65 -11,0,0.98,.,.,.,.,.,8,1,.,.,.,.,65 -11,0,1.98,.,.,.,.,.,6.81,1,.,.,.,.,65 -11,0,3.6,.,.,.,.,.,5.87,1,.,.,.,.,65 -11,0,5.02,.,.,.,.,.,5.22,1,.,.,.,.,65 -11,0,7.03,.,.,.,.,.,4.45,1,.,.,.,.,65 -11,0,9.03,.,.,.,.,.,3.62,1,.,.,.,.,65 -11,0,12.12,.,.,.,.,.,2.69,1,.,.,.,.,65 -11,0,24.08,.,.,.,.,.,0.86,1,.,.,.,.,65 -12,1,0,0,5.3,.,.,1,.,.,.,.,.,.,60.5 -12,0,0.1,.,.,.,.,.,0,1,.,.,.,.,60.5 -12,0,0.25,.,.,.,.,.,1.25,1,.,.,.,.,60.5 -12,0,0.5,.,.,.,.,.,3.96,1,.,.,.,.,60.5 -12,0,1,.,.,.,.,.,7.82,1,.,.,.,.,60.5 -12,0,2,.,.,.,.,.,9.72,1,.,.,.,.,60.5 -12,0,3.52,.,.,.,.,.,9.75,1,.,.,.,.,60.5 -12,0,5.07,.,.,.,.,.,8.57,1,.,.,.,.,60.5 -12,0,7.07,.,.,.,.,.,6.59,1,.,.,.,.,60.5 -12,0,9.03,.,.,.,.,.,6.11,1,.,.,.,.,60.5 -12,0,12.05,.,.,.,.,.,4.57,1,.,.,.,.,60.5 -12,0,24.15,.,.,.,.,.,1.17,1,.,.,.,.,60.5 +1,1,0,0,4.02,.,.,0,.,.,.,.,.,.,79.6 +1,0,0.1,.,.,.,.,.,0.74,0,.,.,.,.,79.6 +1,0,0.25,.,.,.,.,.,2.84,0,.,.,.,.,79.6 +1,0,0.57,.,.,.,.,.,6.57,0,.,.,.,.,79.6 +1,0,1.12,.,.,.,.,.,10.5,0,.,.,.,.,79.6 +1,0,2.02,.,.,.,.,.,9.66,0,.,.,.,.,79.6 +1,0,3.82,.,.,.,.,.,8.58,0,.,.,.,.,79.6 +1,0,5.1,.,.,.,.,.,8.36,0,.,.,.,.,79.6 +1,0,7.03,.,.,.,.,.,7.47,0,.,.,.,.,79.6 +1,0,9.05,.,.,.,.,.,6.89,0,.,.,.,.,79.6 +1,0,12.12,.,.,.,.,.,5.94,0,.,.,.,.,79.6 +1,0,24.37,.,.,.,.,.,3.28,0,.,.,.,.,79.6 +2,1,0,0,4.4,.,.,0,.,.,.,.,.,.,72.4 +2,0,0.1,.,.,.,.,.,0,0,.,.,.,.,72.4 +2,0,0.27,.,.,.,.,.,1.72,0,.,.,.,.,72.4 +2,0,0.52,.,.,.,.,.,7.91,0,.,.,.,.,72.4 +2,0,1,.,.,.,.,.,8.31,0,.,.,.,.,72.4 +2,0,1.92,.,.,.,.,.,8.33,0,.,.,.,.,72.4 +2,0,3.5,.,.,.,.,.,6.85,0,.,.,.,.,72.4 +2,0,5.02,.,.,.,.,.,6.08,0,.,.,.,.,72.4 +2,0,7.03,.,.,.,.,.,5.4,0,.,.,.,.,72.4 +2,0,9,.,.,.,.,.,4.55,0,.,.,.,.,72.4 +2,0,12,.,.,.,.,.,3.01,0,.,.,.,.,72.4 +2,0,24.3,.,.,.,.,.,0.9,0,.,.,.,.,72.4 +3,1,0,0,4.53,.,.,0,.,.,.,.,.,.,70.5 +3,0,0.1,.,.,.,.,.,0,0,.,.,.,.,70.5 +3,0,0.27,.,.,.,.,.,4.4,0,.,.,.,.,70.5 +3,0,0.58,.,.,.,.,.,6.9,0,.,.,.,.,70.5 +3,0,1.02,.,.,.,.,.,8.2,0,.,.,.,.,70.5 +3,0,2.02,.,.,.,.,.,7.8,0,.,.,.,.,70.5 +3,0,3.62,.,.,.,.,.,7.5,0,.,.,.,.,70.5 +3,0,5.08,.,.,.,.,.,6.2,0,.,.,.,.,70.5 +3,0,7.07,.,.,.,.,.,5.3,0,.,.,.,.,70.5 +3,0,9,.,.,.,.,.,4.9,0,.,.,.,.,70.5 +3,0,12.15,.,.,.,.,.,3.7,0,.,.,.,.,70.5 +3,0,24.17,.,.,.,.,.,1.05,0,.,.,.,.,70.5 +4,1,0,0,4.4,.,.,0,.,.,.,.,.,.,72.7 +4,0,0.1,.,.,.,.,.,0,0,.,.,.,.,72.7 +4,0,0.35,.,.,.,.,.,1.89,0,.,.,.,.,72.7 +4,0,0.6,.,.,.,.,.,4.6,0,.,.,.,.,72.7 +4,0,1.07,.,.,.,.,.,8.6,0,.,.,.,.,72.7 +4,0,2.13,.,.,.,.,.,8.38,0,.,.,.,.,72.7 +4,0,3.5,.,.,.,.,.,7.54,0,.,.,.,.,72.7 +4,0,5.02,.,.,.,.,.,6.88,0,.,.,.,.,72.7 +4,0,7.02,.,.,.,.,.,5.78,0,.,.,.,.,72.7 +4,0,9.02,.,.,.,.,.,5.33,0,.,.,.,.,72.7 +4,0,11.98,.,.,.,.,.,4.19,0,.,.,.,.,72.7 +4,0,24.65,.,.,.,.,.,1.15,0,.,.,.,.,72.7 +5,1,0,0,5.86,.,.,0,.,.,.,.,.,.,54.6 +5,0,0.1,.,.,.,.,.,0,0,.,.,.,.,54.6 +5,0,0.3,.,.,.,.,.,2.02,0,.,.,.,.,54.6 +5,0,0.52,.,.,.,.,.,5.63,0,.,.,.,.,54.6 +5,0,1,.,.,.,.,.,11.4,0,.,.,.,.,54.6 +5,0,2.02,.,.,.,.,.,9.33,0,.,.,.,.,54.6 +5,0,3.5,.,.,.,.,.,8.74,0,.,.,.,.,54.6 +5,0,5.02,.,.,.,.,.,7.56,0,.,.,.,.,54.6 +5,0,7.02,.,.,.,.,.,7.09,0,.,.,.,.,54.6 +5,0,9.1,.,.,.,.,.,5.9,0,.,.,.,.,54.6 +5,0,12,.,.,.,.,.,4.37,0,.,.,.,.,54.6 +5,0,24.35,.,.,.,.,.,1.57,0,.,.,.,.,54.6 +6,1,0,0,4,.,.,0,.,.,.,.,.,.,80 +6,0,0.1,.,.,.,.,.,0,0,.,.,.,.,80 +6,0,0.27,.,.,.,.,.,1.29,0,.,.,.,.,80 +6,0,0.58,.,.,.,.,.,3.08,0,.,.,.,.,80 +6,0,1.15,.,.,.,.,.,6.44,0,.,.,.,.,80 +6,0,2.03,.,.,.,.,.,6.32,0,.,.,.,.,80 +6,0,3.57,.,.,.,.,.,5.53,0,.,.,.,.,80 +6,0,5,.,.,.,.,.,4.94,0,.,.,.,.,80 +6,0,7,.,.,.,.,.,4.02,0,.,.,.,.,80 +6,0,9.22,.,.,.,.,.,3.46,0,.,.,.,.,80 +6,0,12.1,.,.,.,.,.,2.78,0,.,.,.,.,80 +6,0,23.85,.,.,.,.,.,0.92,0,.,.,.,.,80 +7,1,0,0,4.95,.,.,0,.,.,.,.,.,.,64.6 +7,0,0.1,.,.,.,.,.,0.15,0,.,.,.,.,64.6 +7,0,0.25,.,.,.,.,.,0.85,0,.,.,.,.,64.6 +7,0,0.5,.,.,.,.,.,2.35,0,.,.,.,.,64.6 +7,0,1.02,.,.,.,.,.,5.02,0,.,.,.,.,64.6 +7,0,2.02,.,.,.,.,.,6.58,0,.,.,.,.,64.6 +7,0,3.48,.,.,.,.,.,7.09,0,.,.,.,.,64.6 +7,0,5,.,.,.,.,.,6.66,0,.,.,.,.,64.6 +7,0,6.98,.,.,.,.,.,5.25,0,.,.,.,.,64.6 +7,0,9,.,.,.,.,.,4.39,0,.,.,.,.,64.6 +7,0,12.05,.,.,.,.,.,3.53,0,.,.,.,.,64.6 +7,0,24.22,.,.,.,.,.,1.15,0,.,.,.,.,64.6 +8,1,0,0,4.53,.,.,0,.,.,.,.,.,.,70.5 +8,0,0.1,.,.,.,.,.,0,0,.,.,.,.,70.5 +8,0,0.25,.,.,.,.,.,3.05,0,.,.,.,.,70.5 +8,0,0.52,.,.,.,.,.,3.05,0,.,.,.,.,70.5 +8,0,0.98,.,.,.,.,.,7.31,0,.,.,.,.,70.5 +8,0,2.02,.,.,.,.,.,7.56,0,.,.,.,.,70.5 +8,0,3.53,.,.,.,.,.,6.59,0,.,.,.,.,70.5 +8,0,5.05,.,.,.,.,.,5.88,0,.,.,.,.,70.5 +8,0,7.15,.,.,.,.,.,4.73,0,.,.,.,.,70.5 +8,0,9.07,.,.,.,.,.,4.57,0,.,.,.,.,70.5 +8,0,12.1,.,.,.,.,.,3,0,.,.,.,.,70.5 +8,0,24.12,.,.,.,.,.,1.25,0,.,.,.,.,70.5 +9,1,0,0,3.1,.,.,0,.,.,.,.,.,.,86.4 +9,0,0.1,.,.,.,.,.,0,0,.,.,.,.,86.4 +9,0,0.3,.,.,.,.,.,7.37,0,.,.,.,.,86.4 +9,0,0.63,.,.,.,.,.,9.03,0,.,.,.,.,86.4 +9,0,1.05,.,.,.,.,.,7.14,0,.,.,.,.,86.4 +9,0,2.02,.,.,.,.,.,6.33,0,.,.,.,.,86.4 +9,0,3.53,.,.,.,.,.,5.66,0,.,.,.,.,86.4 +9,0,5.02,.,.,.,.,.,5.67,0,.,.,.,.,86.4 +9,0,7.17,.,.,.,.,.,4.24,0,.,.,.,.,86.4 +9,0,8.8,.,.,.,.,.,4.11,0,.,.,.,.,86.4 +9,0,11.6,.,.,.,.,.,3.16,0,.,.,.,.,86.4 +9,0,24.43,.,.,.,.,.,1.12,0,.,.,.,.,86.4 +10,1,0,0,5.5,.,.,0,.,.,.,.,.,.,58.2 +10,0,0.1,.,.,.,.,.,0.24,0,.,.,.,.,58.2 +10,0,0.37,.,.,.,.,.,2.89,0,.,.,.,.,58.2 +10,0,0.77,.,.,.,.,.,5.22,0,.,.,.,.,58.2 +10,0,1.02,.,.,.,.,.,6.41,0,.,.,.,.,58.2 +10,0,2.05,.,.,.,.,.,7.83,0,.,.,.,.,58.2 +10,0,3.55,.,.,.,.,.,10.21,0,.,.,.,.,58.2 +10,0,5.05,.,.,.,.,.,9.18,0,.,.,.,.,58.2 +10,0,7.08,.,.,.,.,.,8.02,0,.,.,.,.,58.2 +10,0,9.38,.,.,.,.,.,7.14,0,.,.,.,.,58.2 +10,0,12.1,.,.,.,.,.,5.68,0,.,.,.,.,58.2 +10,0,23.7,.,.,.,.,.,2.42,0,.,.,.,.,58.2 +11,1,0,0,4.92,.,.,0,.,.,.,.,.,.,65 +11,0,0.1,.,.,.,.,.,0,0,.,.,.,.,65 +11,0,0.25,.,.,.,.,.,4.86,0,.,.,.,.,65 +11,0,0.5,.,.,.,.,.,7.24,0,.,.,.,.,65 +11,0,0.98,.,.,.,.,.,8,0,.,.,.,.,65 +11,0,1.98,.,.,.,.,.,6.81,0,.,.,.,.,65 +11,0,3.6,.,.,.,.,.,5.87,0,.,.,.,.,65 +11,0,5.02,.,.,.,.,.,5.22,0,.,.,.,.,65 +11,0,7.03,.,.,.,.,.,4.45,0,.,.,.,.,65 +11,0,9.03,.,.,.,.,.,3.62,0,.,.,.,.,65 +11,0,12.12,.,.,.,.,.,2.69,0,.,.,.,.,65 +11,0,24.08,.,.,.,.,.,0.86,0,.,.,.,.,65 +12,1,0,0,5.3,.,.,0,.,.,.,.,.,.,60.5 +12,0,0.1,.,.,.,.,.,0,0,.,.,.,.,60.5 +12,0,0.25,.,.,.,.,.,1.25,0,.,.,.,.,60.5 +12,0,0.5,.,.,.,.,.,3.96,0,.,.,.,.,60.5 +12,0,1,.,.,.,.,.,7.82,0,.,.,.,.,60.5 +12,0,2,.,.,.,.,.,9.72,0,.,.,.,.,60.5 +12,0,3.52,.,.,.,.,.,9.75,0,.,.,.,.,60.5 +12,0,5.07,.,.,.,.,.,8.57,0,.,.,.,.,60.5 +12,0,7.07,.,.,.,.,.,6.59,0,.,.,.,.,60.5 +12,0,9.03,.,.,.,.,.,6.11,0,.,.,.,.,60.5 +12,0,12.05,.,.,.,.,.,4.57,0,.,.,.,.,60.5 +12,0,24.15,.,.,.,.,.,1.17,0,.,.,.,.,60.5 diff --git a/examples/two_eq_lag/main.rs b/examples/two_eq_lag/main.rs index 92454a6b6..58c5e7be7 100644 --- a/examples/two_eq_lag/main.rs +++ b/examples/two_eq_lag/main.rs @@ -64,31 +64,38 @@ fn main() { // (2, 1), // ); - let params = Parameters::new() - .add("ka", 0.1, 0.9) - .add("ke", 0.001, 0.1) - .add("tlag", 0.0, 4.0) - .add("v", 30.0, 120.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 1, + AssayErrorModel::additive( + ErrorPoly::new(-0.00119, 0.44379, -0.45864, 0.16537), + 0.0, + ), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 1, - AssayErrorModel::additive(ErrorPoly::new(-0.00119, 0.44379, -0.45864, 0.16537), 0.0), + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ka", 0.1, 0.9)) + .add(ParameterSpec::bounded("ke", 0.001, 0.1)) + .add(ParameterSpec::bounded("tlag", 0.0, 4.0)) + .add(ParameterSpec::bounded("v", 30.0, 120.0)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/two_eq_lag/two_eq_lag.csv").unwrap(); - let mut algorithm = dispatch_algorithm(settings, eq, data).unwrap(); - let mut result = algorithm.fit().unwrap(); - // algorithm.initialize().unwrap(); - // while !algorithm.next_cycle().unwrap() {} - // let result = algorithm.into_npresult(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/examples/two_eq_lag/two_eq_lag.csv b/examples/two_eq_lag/two_eq_lag.csv index 5de0fb11b..a62f21508 100644 --- a/examples/two_eq_lag/two_eq_lag.csv +++ b/examples/two_eq_lag/two_eq_lag.csv @@ -1,260 +1,260 @@ ID,EVID,TIME,DUR,DOSE,ADDL,II,INPUT,OUT,OUTEQ,C0,C1,C2,C3,WT,AFRICA,AGE,GENDER,HEIGHT -1,1,0,0,600,.,.,1,.,.,.,.,.,.,46.7,1,21,1,160 -1,1,24,0,600,.,.,1,.,.,.,.,.,.,.,.,.,.,. -1,1,48,0,600,.,.,1,.,.,.,.,.,.,.,.,.,.,. -1,1,72,0,600,.,.,1,.,.,.,.,.,.,.,.,.,.,. -1,1,96,0,600,.,.,1,.,.,.,.,.,.,.,.,.,.,. -1,0,120,.,.,.,.,.,10.44,1,.,.,.,.,.,.,.,.,. -1,1,120,0,600,.,.,1,.,.,.,.,.,.,.,.,.,.,. -1,0,121,.,.,.,.,.,12.89,1,.,.,.,.,.,.,.,.,. -1,0,122,.,.,.,.,.,14.98,1,.,.,.,.,.,.,.,.,. -1,0,125.99,.,.,.,.,.,16.69,1,.,.,.,.,.,.,.,.,. -1,0,129,.,.,.,.,.,20.15,1,.,.,.,.,.,.,.,.,. -1,0,132,.,.,.,.,.,14.97,1,.,.,.,.,.,.,.,.,. -1,0,143.98,.,.,.,.,.,12.57,1,.,.,.,.,.,.,.,.,. -2,1,0,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,1,24,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,1,48,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,1,72,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,1,96,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,0,120,.,.,.,.,.,3.56,1,.,.,.,.,66.5,1,30,1,174 -2,1,120,0,600,.,.,1,.,.,.,.,.,.,66.5,1,30,1,174 -2,0,120.98,.,.,.,.,.,5.84,1,.,.,.,.,66.5,1,30,1,174 -2,0,121.98,.,.,.,.,.,6.54,1,.,.,.,.,66.5,1,30,1,174 -2,0,126,.,.,.,.,.,6.14,1,.,.,.,.,66.5,1,30,1,174 -2,0,129.02,.,.,.,.,.,6.56,1,.,.,.,.,66.5,1,30,1,174 -2,0,132.02,.,.,.,.,.,4.44,1,.,.,.,.,66.5,1,30,1,174 -2,0,144,.,.,.,.,.,3.76,1,.,.,.,.,66.5,1,30,1,174 -3,1,0,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,1,24,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,1,48,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,1,72,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,1,96,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,1,120,0,600,.,.,1,.,.,.,.,.,.,46.7,1,24,0,164 -3,0,120.08,.,.,.,.,.,4.06,1,.,.,.,.,46.7,1,24,0,164 -3,0,121.07,.,.,.,.,.,3.24,1,.,.,.,.,46.7,1,24,0,164 -3,0,122.08,.,.,.,.,.,3.09,1,.,.,.,.,46.7,1,24,0,164 -3,0,126.08,.,.,.,.,.,7.98,1,.,.,.,.,46.7,1,24,0,164 -3,0,129.05,.,.,.,.,.,7.23,1,.,.,.,.,46.7,1,24,0,164 -3,0,132.1,.,.,.,.,.,4.71,1,.,.,.,.,46.7,1,24,0,164 -3,0,144.08,.,.,.,.,.,3.82,1,.,.,.,.,46.7,1,24,0,164 -4,1,0,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,1,24,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,1,48,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,1,72,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,1,96,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,0,120,.,.,.,.,.,2.1,1,.,.,.,.,50.8,1,25,1,165 -4,1,120,0,600,.,.,1,.,.,.,.,.,.,50.8,1,25,1,165 -4,0,121,.,.,.,.,.,3.05,1,.,.,.,.,50.8,1,25,1,165 -4,0,122.02,.,.,.,.,.,5.21,1,.,.,.,.,50.8,1,25,1,165 -4,0,126,.,.,.,.,.,5.09,1,.,.,.,.,50.8,1,25,1,165 -4,0,129.03,.,.,.,.,.,4.24,1,.,.,.,.,50.8,1,25,1,165 -4,0,132,.,.,.,.,.,3.69,1,.,.,.,.,50.8,1,25,1,165 -4,0,144.02,.,.,.,.,.,1.96,1,.,.,.,.,50.8,1,25,1,165 -5,1,0,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,1,24,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,1,48,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,1,72,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,1,96,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,0,120,.,.,.,.,.,2.93,1,.,.,.,.,65.8,1,22,1,181 -5,1,120,0,600,.,.,1,.,.,.,.,.,.,65.8,1,22,1,181 -5,0,121,.,.,.,.,.,2.64,1,.,.,.,.,65.8,1,22,1,181 -5,0,122,.,.,.,.,.,4.8,1,.,.,.,.,65.8,1,22,1,181 -5,0,126,.,.,.,.,.,3.7,1,.,.,.,.,65.8,1,22,1,181 -5,0,129.02,.,.,.,.,.,4.13,1,.,.,.,.,65.8,1,22,1,181 -5,0,132,.,.,.,.,.,2.81,1,.,.,.,.,65.8,1,22,1,181 -5,0,144,.,.,.,.,.,2.21,1,.,.,.,.,65.8,1,22,1,181 -6,1,0,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,1,24,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,1,48,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,1,72,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,1,96,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,0,120,.,.,.,.,.,6.92,1,.,.,.,.,65,1,23,1,177 -6,1,120,0,600,.,.,1,.,.,.,.,.,.,65,1,23,1,177 -6,0,121,.,.,.,.,.,6.89,1,.,.,.,.,65,1,23,1,177 -6,0,121.98,.,.,.,.,.,6.64,1,.,.,.,.,65,1,23,1,177 -6,0,126,.,.,.,.,.,13.72,1,.,.,.,.,65,1,23,1,177 -6,0,129,.,.,.,.,.,12.69,1,.,.,.,.,65,1,23,1,177 -6,0,131.98,.,.,.,.,.,10.58,1,.,.,.,.,65,1,23,1,177 -6,0,144.98,.,.,.,.,.,6.62,1,.,.,.,.,65,1,23,1,177 -7,1,0,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,1,24,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,1,48,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,1,72,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,1,96,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,0,120,.,.,.,.,.,5.41,1,.,.,.,.,51.7,1,27,0,161 -7,1,120,0,600,.,.,1,.,.,.,.,.,.,51.7,1,27,0,161 -7,0,121.03,.,.,.,.,.,4.46,1,.,.,.,.,51.7,1,27,0,161 -7,0,122.03,.,.,.,.,.,4.54,1,.,.,.,.,51.7,1,27,0,161 -7,0,126.02,.,.,.,.,.,12.19,1,.,.,.,.,51.7,1,27,0,161 -7,0,129.08,.,.,.,.,.,12.1,1,.,.,.,.,51.7,1,27,0,161 -7,0,132.03,.,.,.,.,.,8.61,1,.,.,.,.,51.7,1,27,0,161 -7,0,144.03,.,.,.,.,.,6.37,1,.,.,.,.,51.7,1,27,0,161 -8,1,0,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,1,24,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,1,48,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,1,72,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,1,96,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,0,120,.,.,.,.,.,6.19,1,.,.,.,.,51.2,1,22,1,163 -8,1,120,0,600,.,.,1,.,.,.,.,.,.,51.2,1,22,1,163 -8,0,121.03,.,.,.,.,.,6.33,1,.,.,.,.,51.2,1,22,1,163 -8,0,122,.,.,.,.,.,6.24,1,.,.,.,.,51.2,1,22,1,163 -8,0,125.98,.,.,.,.,.,13.03,1,.,.,.,.,51.2,1,22,1,163 -8,0,128.98,.,.,.,.,.,11.86,1,.,.,.,.,51.2,1,22,1,163 -8,0,132,.,.,.,.,.,11.45,1,.,.,.,.,51.2,1,22,1,163 -8,0,143.98,.,.,.,.,.,7.83,1,.,.,.,.,51.2,1,22,1,163 -9,1,0,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,1,24,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,1,48,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,1,72,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,1,96,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,0,120,.,.,.,.,.,2.85,1,.,.,.,.,55,1,23,1,174 -9,1,120,0,600,.,.,1,.,.,.,.,.,.,55,1,23,1,174 -9,0,120.97,.,.,.,.,.,3.7,1,.,.,.,.,55,1,23,1,174 -9,0,122,.,.,.,.,.,6.65,1,.,.,.,.,55,1,23,1,174 -9,0,125.98,.,.,.,.,.,6.81,1,.,.,.,.,55,1,23,1,174 -9,0,128.98,.,.,.,.,.,6.51,1,.,.,.,.,55,1,23,1,174 -9,0,132,.,.,.,.,.,7.48,1,.,.,.,.,55,1,23,1,174 -9,0,143.98,.,.,.,.,.,4.51,1,.,.,.,.,55,1,23,1,174 -10,1,0,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,1,24,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,1,48,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,1,72,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,1,96,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,0,120,.,.,.,.,.,2.93,1,.,.,.,.,52.1,1,32,1,163 -10,1,120,0,600,.,.,1,.,.,.,.,.,.,52.1,1,32,1,163 -10,0,121,.,.,.,.,.,4.36,1,.,.,.,.,52.1,1,32,1,163 -10,0,122.02,.,.,.,.,.,7.79,1,.,.,.,.,52.1,1,32,1,163 -10,0,126,.,.,.,.,.,11.02,1,.,.,.,.,52.1,1,32,1,163 -10,0,129,.,.,.,.,.,8.86,1,.,.,.,.,52.1,1,32,1,163 -10,0,131.97,.,.,.,.,.,6.09,1,.,.,.,.,52.1,1,32,1,163 -10,0,144,.,.,.,.,.,4.15,1,.,.,.,.,52.1,1,32,1,163 -11,1,0,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,1,24,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,1,48,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,1,72,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,1,96,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,0,120,.,.,.,.,.,2.09,1,.,.,.,.,56.5,1,34,1,165 -11,1,120,0,600,.,.,1,.,.,.,.,.,.,56.5,1,34,1,165 -11,0,121.03,.,.,.,.,.,2.68,1,.,.,.,.,56.5,1,34,1,165 -11,0,122,.,.,.,.,.,4.71,1,.,.,.,.,56.5,1,34,1,165 -11,0,125.98,.,.,.,.,.,7.71,1,.,.,.,.,56.5,1,34,1,165 -11,0,129,.,.,.,.,.,6.31,1,.,.,.,.,56.5,1,34,1,165 -11,0,132,.,.,.,.,.,5.82,1,.,.,.,.,56.5,1,34,1,165 -11,0,144.13,.,.,.,.,.,2.63,1,.,.,.,.,56.5,1,34,1,165 -12,1,0,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,1,24,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,1,48,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,1,72,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,1,96,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,0,120,.,.,.,.,.,7.09,1,.,.,.,.,47.9,1,54,0,160 -12,1,120,0,600,.,.,1,.,.,.,.,.,.,47.9,1,54,0,160 -12,0,121.03,.,.,.,.,.,6.18,1,.,.,.,.,47.9,1,54,0,160 -12,0,122.13,.,.,.,.,.,8.66,1,.,.,.,.,47.9,1,54,0,160 -12,0,126,.,.,.,.,.,11.16,1,.,.,.,.,47.9,1,54,0,160 -12,0,129,.,.,.,.,.,9.51,1,.,.,.,.,47.9,1,54,0,160 -12,0,132,.,.,.,.,.,8.14,1,.,.,.,.,47.9,1,54,0,160 -12,0,144,.,.,.,.,.,7.89,1,.,.,.,.,47.9,1,54,0,160 -13,1,0,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,1,24,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,1,48,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,1,72,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,1,96,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,0,120,.,.,.,.,.,6.62,1,.,.,.,.,60.5,1,24,1,180 -13,1,120,0,600,.,.,1,.,.,.,.,.,.,60.5,1,24,1,180 -13,0,121,.,.,.,.,.,3.18,1,.,.,.,.,60.5,1,24,1,180 -13,0,122,.,.,.,.,.,5.41,1,.,.,.,.,60.5,1,24,1,180 -13,0,126,.,.,.,.,.,10.18,1,.,.,.,.,60.5,1,24,1,180 -13,0,129.02,.,.,.,.,.,12.84,1,.,.,.,.,60.5,1,24,1,180 -13,0,132,.,.,.,.,.,12.35,1,.,.,.,.,60.5,1,24,1,180 -13,0,144,.,.,.,.,.,8.06,1,.,.,.,.,60.5,1,24,1,180 -14,1,0,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,1,24,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,1,48,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,1,72,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,1,96,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,0,120,.,.,.,.,.,3.63,1,.,.,.,.,59.2,1,26,1,174 -14,1,120,0,600,.,.,1,.,.,.,.,.,.,59.2,1,26,1,174 -14,0,121,.,.,.,.,.,4.49,1,.,.,.,.,59.2,1,26,1,174 -14,0,122,.,.,.,.,.,5.5,1,.,.,.,.,59.2,1,26,1,174 -14,0,126,.,.,.,.,.,7.28,1,.,.,.,.,59.2,1,26,1,174 -14,0,129,.,.,.,.,.,5.27,1,.,.,.,.,59.2,1,26,1,174 -14,0,132,.,.,.,.,.,4.89,1,.,.,.,.,59.2,1,26,1,174 -14,0,144,.,.,.,.,.,2.68,1,.,.,.,.,59.2,1,26,1,174 -15,1,0,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,1,24,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,1,48,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,1,72,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,1,96,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,0,120,.,.,.,.,.,5.53,1,.,.,.,.,43,1,19,0,150 -15,1,120,0,450,.,.,1,.,.,.,.,.,.,43,1,19,0,150 -15,0,121,.,.,.,.,.,4.81,1,.,.,.,.,43,1,19,0,150 -15,0,122,.,.,.,.,.,8.14,1,.,.,.,.,43,1,19,0,150 -15,0,126,.,.,.,.,.,9.96,1,.,.,.,.,43,1,19,0,150 -15,0,129,.,.,.,.,.,8.55,1,.,.,.,.,43,1,19,0,150 -15,0,132.05,.,.,.,.,.,7.54,1,.,.,.,.,43,1,19,0,150 -15,0,144.05,.,.,.,.,.,5.74,1,.,.,.,.,43,1,19,0,150 -16,1,0,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,1,24,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,1,48,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,1,72,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,1,96,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,0,120,.,.,.,.,.,5.48,1,.,.,.,.,64.4,1,25,1,173 -16,1,120,0,600,.,.,1,.,.,.,.,.,.,64.4,1,25,1,173 -16,0,121,.,.,.,.,.,6.59,1,.,.,.,.,64.4,1,25,1,173 -16,0,122,.,.,.,.,.,8.91,1,.,.,.,.,64.4,1,25,1,173 -16,0,126,.,.,.,.,.,10.57,1,.,.,.,.,64.4,1,25,1,173 -16,0,129,.,.,.,.,.,9.52,1,.,.,.,.,64.4,1,25,1,173 -16,0,132,.,.,.,.,.,7.83,1,.,.,.,.,64.4,1,25,1,173 -16,0,143.97,.,.,.,.,.,4.96,1,.,.,.,.,64.4,1,25,1,173 -17,1,0,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,1,24,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,1,48,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,1,72,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,1,96,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,0,120,.,.,.,.,.,2.11,1,.,.,.,.,54.8,1,23,1,170 -17,1,120,0,600,.,.,1,.,.,.,.,.,.,54.8,1,23,1,170 -17,0,121.02,.,.,.,.,.,1.86,1,.,.,.,.,54.8,1,23,1,170 -17,0,122.02,.,.,.,.,.,6.92,1,.,.,.,.,54.8,1,23,1,170 -17,0,126,.,.,.,.,.,9.11,1,.,.,.,.,54.8,1,23,1,170 -17,0,129,.,.,.,.,.,6.96,1,.,.,.,.,54.8,1,23,1,170 -17,0,132,.,.,.,.,.,5.64,1,.,.,.,.,54.8,1,23,1,170 -17,0,144.08,.,.,.,.,.,3.59,1,.,.,.,.,54.8,1,23,1,170 -18,1,0,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,1,24,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,1,48,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,1,72,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,1,96,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,0,120,.,.,.,.,.,7.95,1,.,.,.,.,44.3,1,20,0,164 -18,1,120,0,450,.,.,1,.,.,.,.,.,.,44.3,1,20,0,164 -18,0,120.98,.,.,.,.,.,7.47,1,.,.,.,.,44.3,1,20,0,164 -18,0,121.98,.,.,.,.,.,8.67,1,.,.,.,.,44.3,1,20,0,164 -18,0,126,.,.,.,.,.,13.83,1,.,.,.,.,44.3,1,20,0,164 -18,0,129.17,.,.,.,.,.,14.01,1,.,.,.,.,44.3,1,20,0,164 -18,0,132.17,.,.,.,.,.,8.97,1,.,.,.,.,44.3,1,20,0,164 -18,0,143.97,.,.,.,.,.,8.4,1,.,.,.,.,44.3,1,20,0,164 -19,1,0,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,1,24,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,1,48,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,1,72,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,1,96,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,0,120,.,.,.,.,.,5.42,1,.,.,.,.,50,1,36,1,168 -19,1,120,0,600,.,.,1,.,.,.,.,.,.,50,1,36,1,168 -19,0,121,.,.,.,.,.,7.08,1,.,.,.,.,50,1,36,1,168 -19,0,122,.,.,.,.,.,7.27,1,.,.,.,.,50,1,36,1,168 -19,0,125.98,.,.,.,.,.,20.07,1,.,.,.,.,50,1,36,1,168 -19,0,128.98,.,.,.,.,.,18.24,1,.,.,.,.,50,1,36,1,168 -19,0,132,.,.,.,.,.,15.36,1,.,.,.,.,50,1,36,1,168 -19,0,144,.,.,.,.,.,10.92,1,.,.,.,.,50,1,36,1,168 -20,1,0,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,1,24,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,1,48,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,1,72,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,1,96,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,0,120,.,.,.,.,.,4.71,1,.,.,.,.,59,1,31,1,170 -20,1,120,0,600,.,.,1,.,.,.,.,.,.,59,1,31,1,170 -20,0,120.77,.,.,.,.,.,4.5,1,.,.,.,.,59,1,31,1,170 -20,0,121.75,.,.,.,.,.,3.35,1,.,.,.,.,59,1,31,1,170 -20,0,125.67,.,.,.,.,.,12.35,1,.,.,.,.,59,1,31,1,170 -20,0,128.67,.,.,.,.,.,11.56,1,.,.,.,.,59,1,31,1,170 -20,0,143.67,.,.,.,.,.,6.45,1,.,.,.,.,59,1,31,1,170 +1,1,0,0,600,.,.,0,.,.,.,.,.,.,46.7,1,21,1,160 +1,1,24,0,600,.,.,0,.,.,.,.,.,.,.,.,.,.,. +1,1,48,0,600,.,.,0,.,.,.,.,.,.,.,.,.,.,. +1,1,72,0,600,.,.,0,.,.,.,.,.,.,.,.,.,.,. +1,1,96,0,600,.,.,0,.,.,.,.,.,.,.,.,.,.,. +1,0,120,.,.,.,.,.,10.44,0,.,.,.,.,.,.,.,.,. +1,1,120,0,600,.,.,0,.,.,.,.,.,.,.,.,.,.,. +1,0,121,.,.,.,.,.,12.89,0,.,.,.,.,.,.,.,.,. +1,0,122,.,.,.,.,.,14.98,0,.,.,.,.,.,.,.,.,. +1,0,125.99,.,.,.,.,.,16.69,0,.,.,.,.,.,.,.,.,. +1,0,129,.,.,.,.,.,20.15,0,.,.,.,.,.,.,.,.,. +1,0,132,.,.,.,.,.,14.97,0,.,.,.,.,.,.,.,.,. +1,0,143.98,.,.,.,.,.,12.57,0,.,.,.,.,.,.,.,.,. +2,1,0,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,1,24,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,1,48,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,1,72,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,1,96,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,0,120,.,.,.,.,.,3.56,0,.,.,.,.,66.5,1,30,1,174 +2,1,120,0,600,.,.,0,.,.,.,.,.,.,66.5,1,30,1,174 +2,0,120.98,.,.,.,.,.,5.84,0,.,.,.,.,66.5,1,30,1,174 +2,0,121.98,.,.,.,.,.,6.54,0,.,.,.,.,66.5,1,30,1,174 +2,0,126,.,.,.,.,.,6.14,0,.,.,.,.,66.5,1,30,1,174 +2,0,129.02,.,.,.,.,.,6.56,0,.,.,.,.,66.5,1,30,1,174 +2,0,132.02,.,.,.,.,.,4.44,0,.,.,.,.,66.5,1,30,1,174 +2,0,144,.,.,.,.,.,3.76,0,.,.,.,.,66.5,1,30,1,174 +3,1,0,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,1,24,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,1,48,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,1,72,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,1,96,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,1,120,0,600,.,.,0,.,.,.,.,.,.,46.7,1,24,0,164 +3,0,120.08,.,.,.,.,.,4.06,0,.,.,.,.,46.7,1,24,0,164 +3,0,121.07,.,.,.,.,.,3.24,0,.,.,.,.,46.7,1,24,0,164 +3,0,122.08,.,.,.,.,.,3.09,0,.,.,.,.,46.7,1,24,0,164 +3,0,126.08,.,.,.,.,.,7.98,0,.,.,.,.,46.7,1,24,0,164 +3,0,129.05,.,.,.,.,.,7.23,0,.,.,.,.,46.7,1,24,0,164 +3,0,132.1,.,.,.,.,.,4.71,0,.,.,.,.,46.7,1,24,0,164 +3,0,144.08,.,.,.,.,.,3.82,0,.,.,.,.,46.7,1,24,0,164 +4,1,0,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,1,24,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,1,48,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,1,72,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,1,96,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,0,120,.,.,.,.,.,2.1,0,.,.,.,.,50.8,1,25,1,165 +4,1,120,0,600,.,.,0,.,.,.,.,.,.,50.8,1,25,1,165 +4,0,121,.,.,.,.,.,3.05,0,.,.,.,.,50.8,1,25,1,165 +4,0,122.02,.,.,.,.,.,5.21,0,.,.,.,.,50.8,1,25,1,165 +4,0,126,.,.,.,.,.,5.09,0,.,.,.,.,50.8,1,25,1,165 +4,0,129.03,.,.,.,.,.,4.24,0,.,.,.,.,50.8,1,25,1,165 +4,0,132,.,.,.,.,.,3.69,0,.,.,.,.,50.8,1,25,1,165 +4,0,144.02,.,.,.,.,.,1.96,0,.,.,.,.,50.8,1,25,1,165 +5,1,0,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,1,24,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,1,48,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,1,72,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,1,96,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,0,120,.,.,.,.,.,2.93,0,.,.,.,.,65.8,1,22,1,181 +5,1,120,0,600,.,.,0,.,.,.,.,.,.,65.8,1,22,1,181 +5,0,121,.,.,.,.,.,2.64,0,.,.,.,.,65.8,1,22,1,181 +5,0,122,.,.,.,.,.,4.8,0,.,.,.,.,65.8,1,22,1,181 +5,0,126,.,.,.,.,.,3.7,0,.,.,.,.,65.8,1,22,1,181 +5,0,129.02,.,.,.,.,.,4.13,0,.,.,.,.,65.8,1,22,1,181 +5,0,132,.,.,.,.,.,2.81,0,.,.,.,.,65.8,1,22,1,181 +5,0,144,.,.,.,.,.,2.21,0,.,.,.,.,65.8,1,22,1,181 +6,1,0,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,1,24,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,1,48,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,1,72,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,1,96,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,0,120,.,.,.,.,.,6.92,0,.,.,.,.,65,1,23,1,177 +6,1,120,0,600,.,.,0,.,.,.,.,.,.,65,1,23,1,177 +6,0,121,.,.,.,.,.,6.89,0,.,.,.,.,65,1,23,1,177 +6,0,121.98,.,.,.,.,.,6.64,0,.,.,.,.,65,1,23,1,177 +6,0,126,.,.,.,.,.,13.72,0,.,.,.,.,65,1,23,1,177 +6,0,129,.,.,.,.,.,12.69,0,.,.,.,.,65,1,23,1,177 +6,0,131.98,.,.,.,.,.,10.58,0,.,.,.,.,65,1,23,1,177 +6,0,144.98,.,.,.,.,.,6.62,0,.,.,.,.,65,1,23,1,177 +7,1,0,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,1,24,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,1,48,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,1,72,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,1,96,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,0,120,.,.,.,.,.,5.41,0,.,.,.,.,51.7,1,27,0,161 +7,1,120,0,600,.,.,0,.,.,.,.,.,.,51.7,1,27,0,161 +7,0,121.03,.,.,.,.,.,4.46,0,.,.,.,.,51.7,1,27,0,161 +7,0,122.03,.,.,.,.,.,4.54,0,.,.,.,.,51.7,1,27,0,161 +7,0,126.02,.,.,.,.,.,12.19,0,.,.,.,.,51.7,1,27,0,161 +7,0,129.08,.,.,.,.,.,12.1,0,.,.,.,.,51.7,1,27,0,161 +7,0,132.03,.,.,.,.,.,8.61,0,.,.,.,.,51.7,1,27,0,161 +7,0,144.03,.,.,.,.,.,6.37,0,.,.,.,.,51.7,1,27,0,161 +8,1,0,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,1,24,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,1,48,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,1,72,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,1,96,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,0,120,.,.,.,.,.,6.19,0,.,.,.,.,51.2,1,22,1,163 +8,1,120,0,600,.,.,0,.,.,.,.,.,.,51.2,1,22,1,163 +8,0,121.03,.,.,.,.,.,6.33,0,.,.,.,.,51.2,1,22,1,163 +8,0,122,.,.,.,.,.,6.24,0,.,.,.,.,51.2,1,22,1,163 +8,0,125.98,.,.,.,.,.,13.03,0,.,.,.,.,51.2,1,22,1,163 +8,0,128.98,.,.,.,.,.,11.86,0,.,.,.,.,51.2,1,22,1,163 +8,0,132,.,.,.,.,.,11.45,0,.,.,.,.,51.2,1,22,1,163 +8,0,143.98,.,.,.,.,.,7.83,0,.,.,.,.,51.2,1,22,1,163 +9,1,0,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,1,24,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,1,48,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,1,72,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,1,96,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,0,120,.,.,.,.,.,2.85,0,.,.,.,.,55,1,23,1,174 +9,1,120,0,600,.,.,0,.,.,.,.,.,.,55,1,23,1,174 +9,0,120.97,.,.,.,.,.,3.7,0,.,.,.,.,55,1,23,1,174 +9,0,122,.,.,.,.,.,6.65,0,.,.,.,.,55,1,23,1,174 +9,0,125.98,.,.,.,.,.,6.81,0,.,.,.,.,55,1,23,1,174 +9,0,128.98,.,.,.,.,.,6.51,0,.,.,.,.,55,1,23,1,174 +9,0,132,.,.,.,.,.,7.48,0,.,.,.,.,55,1,23,1,174 +9,0,143.98,.,.,.,.,.,4.51,0,.,.,.,.,55,1,23,1,174 +10,1,0,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,1,24,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,1,48,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,1,72,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,1,96,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,0,120,.,.,.,.,.,2.93,0,.,.,.,.,52.1,1,32,1,163 +10,1,120,0,600,.,.,0,.,.,.,.,.,.,52.1,1,32,1,163 +10,0,121,.,.,.,.,.,4.36,0,.,.,.,.,52.1,1,32,1,163 +10,0,122.02,.,.,.,.,.,7.79,0,.,.,.,.,52.1,1,32,1,163 +10,0,126,.,.,.,.,.,11.02,0,.,.,.,.,52.1,1,32,1,163 +10,0,129,.,.,.,.,.,8.86,0,.,.,.,.,52.1,1,32,1,163 +10,0,131.97,.,.,.,.,.,6.09,0,.,.,.,.,52.1,1,32,1,163 +10,0,144,.,.,.,.,.,4.15,0,.,.,.,.,52.1,1,32,1,163 +11,1,0,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,1,24,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,1,48,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,1,72,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,1,96,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,0,120,.,.,.,.,.,2.09,0,.,.,.,.,56.5,1,34,1,165 +11,1,120,0,600,.,.,0,.,.,.,.,.,.,56.5,1,34,1,165 +11,0,121.03,.,.,.,.,.,2.68,0,.,.,.,.,56.5,1,34,1,165 +11,0,122,.,.,.,.,.,4.71,0,.,.,.,.,56.5,1,34,1,165 +11,0,125.98,.,.,.,.,.,7.71,0,.,.,.,.,56.5,1,34,1,165 +11,0,129,.,.,.,.,.,6.31,0,.,.,.,.,56.5,1,34,1,165 +11,0,132,.,.,.,.,.,5.82,0,.,.,.,.,56.5,1,34,1,165 +11,0,144.13,.,.,.,.,.,2.63,0,.,.,.,.,56.5,1,34,1,165 +12,1,0,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,1,24,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,1,48,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,1,72,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,1,96,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,0,120,.,.,.,.,.,7.09,0,.,.,.,.,47.9,1,54,0,160 +12,1,120,0,600,.,.,0,.,.,.,.,.,.,47.9,1,54,0,160 +12,0,121.03,.,.,.,.,.,6.18,0,.,.,.,.,47.9,1,54,0,160 +12,0,122.13,.,.,.,.,.,8.66,0,.,.,.,.,47.9,1,54,0,160 +12,0,126,.,.,.,.,.,11.16,0,.,.,.,.,47.9,1,54,0,160 +12,0,129,.,.,.,.,.,9.51,0,.,.,.,.,47.9,1,54,0,160 +12,0,132,.,.,.,.,.,8.14,0,.,.,.,.,47.9,1,54,0,160 +12,0,144,.,.,.,.,.,7.89,0,.,.,.,.,47.9,1,54,0,160 +13,1,0,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,1,24,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,1,48,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,1,72,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,1,96,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,0,120,.,.,.,.,.,6.62,0,.,.,.,.,60.5,1,24,1,180 +13,1,120,0,600,.,.,0,.,.,.,.,.,.,60.5,1,24,1,180 +13,0,121,.,.,.,.,.,3.18,0,.,.,.,.,60.5,1,24,1,180 +13,0,122,.,.,.,.,.,5.41,0,.,.,.,.,60.5,1,24,1,180 +13,0,126,.,.,.,.,.,10.18,0,.,.,.,.,60.5,1,24,1,180 +13,0,129.02,.,.,.,.,.,12.84,0,.,.,.,.,60.5,1,24,1,180 +13,0,132,.,.,.,.,.,12.35,0,.,.,.,.,60.5,1,24,1,180 +13,0,144,.,.,.,.,.,8.06,0,.,.,.,.,60.5,1,24,1,180 +14,1,0,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,1,24,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,1,48,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,1,72,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,1,96,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,0,120,.,.,.,.,.,3.63,0,.,.,.,.,59.2,1,26,1,174 +14,1,120,0,600,.,.,0,.,.,.,.,.,.,59.2,1,26,1,174 +14,0,121,.,.,.,.,.,4.49,0,.,.,.,.,59.2,1,26,1,174 +14,0,122,.,.,.,.,.,5.5,0,.,.,.,.,59.2,1,26,1,174 +14,0,126,.,.,.,.,.,7.28,0,.,.,.,.,59.2,1,26,1,174 +14,0,129,.,.,.,.,.,5.27,0,.,.,.,.,59.2,1,26,1,174 +14,0,132,.,.,.,.,.,4.89,0,.,.,.,.,59.2,1,26,1,174 +14,0,144,.,.,.,.,.,2.68,0,.,.,.,.,59.2,1,26,1,174 +15,1,0,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,1,24,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,1,48,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,1,72,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,1,96,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,0,120,.,.,.,.,.,5.53,0,.,.,.,.,43,1,19,0,150 +15,1,120,0,450,.,.,0,.,.,.,.,.,.,43,1,19,0,150 +15,0,121,.,.,.,.,.,4.81,0,.,.,.,.,43,1,19,0,150 +15,0,122,.,.,.,.,.,8.14,0,.,.,.,.,43,1,19,0,150 +15,0,126,.,.,.,.,.,9.96,0,.,.,.,.,43,1,19,0,150 +15,0,129,.,.,.,.,.,8.55,0,.,.,.,.,43,1,19,0,150 +15,0,132.05,.,.,.,.,.,7.54,0,.,.,.,.,43,1,19,0,150 +15,0,144.05,.,.,.,.,.,5.74,0,.,.,.,.,43,1,19,0,150 +16,1,0,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,1,24,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,1,48,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,1,72,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,1,96,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,0,120,.,.,.,.,.,5.48,0,.,.,.,.,64.4,1,25,1,173 +16,1,120,0,600,.,.,0,.,.,.,.,.,.,64.4,1,25,1,173 +16,0,121,.,.,.,.,.,6.59,0,.,.,.,.,64.4,1,25,1,173 +16,0,122,.,.,.,.,.,8.91,0,.,.,.,.,64.4,1,25,1,173 +16,0,126,.,.,.,.,.,10.57,0,.,.,.,.,64.4,1,25,1,173 +16,0,129,.,.,.,.,.,9.52,0,.,.,.,.,64.4,1,25,1,173 +16,0,132,.,.,.,.,.,7.83,0,.,.,.,.,64.4,1,25,1,173 +16,0,143.97,.,.,.,.,.,4.96,0,.,.,.,.,64.4,1,25,1,173 +17,1,0,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,1,24,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,1,48,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,1,72,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,1,96,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,0,120,.,.,.,.,.,2.11,0,.,.,.,.,54.8,1,23,1,170 +17,1,120,0,600,.,.,0,.,.,.,.,.,.,54.8,1,23,1,170 +17,0,121.02,.,.,.,.,.,1.86,0,.,.,.,.,54.8,1,23,1,170 +17,0,122.02,.,.,.,.,.,6.92,0,.,.,.,.,54.8,1,23,1,170 +17,0,126,.,.,.,.,.,9.11,0,.,.,.,.,54.8,1,23,1,170 +17,0,129,.,.,.,.,.,6.96,0,.,.,.,.,54.8,1,23,1,170 +17,0,132,.,.,.,.,.,5.64,0,.,.,.,.,54.8,1,23,1,170 +17,0,144.08,.,.,.,.,.,3.59,0,.,.,.,.,54.8,1,23,1,170 +18,1,0,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,1,24,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,1,48,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,1,72,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,1,96,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,0,120,.,.,.,.,.,7.95,0,.,.,.,.,44.3,1,20,0,164 +18,1,120,0,450,.,.,0,.,.,.,.,.,.,44.3,1,20,0,164 +18,0,120.98,.,.,.,.,.,7.47,0,.,.,.,.,44.3,1,20,0,164 +18,0,121.98,.,.,.,.,.,8.67,0,.,.,.,.,44.3,1,20,0,164 +18,0,126,.,.,.,.,.,13.83,0,.,.,.,.,44.3,1,20,0,164 +18,0,129.17,.,.,.,.,.,14.01,0,.,.,.,.,44.3,1,20,0,164 +18,0,132.17,.,.,.,.,.,8.97,0,.,.,.,.,44.3,1,20,0,164 +18,0,143.97,.,.,.,.,.,8.4,0,.,.,.,.,44.3,1,20,0,164 +19,1,0,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,1,24,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,1,48,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,1,72,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,1,96,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,0,120,.,.,.,.,.,5.42,0,.,.,.,.,50,1,36,1,168 +19,1,120,0,600,.,.,0,.,.,.,.,.,.,50,1,36,1,168 +19,0,121,.,.,.,.,.,7.08,0,.,.,.,.,50,1,36,1,168 +19,0,122,.,.,.,.,.,7.27,0,.,.,.,.,50,1,36,1,168 +19,0,125.98,.,.,.,.,.,20.07,0,.,.,.,.,50,1,36,1,168 +19,0,128.98,.,.,.,.,.,18.24,0,.,.,.,.,50,1,36,1,168 +19,0,132,.,.,.,.,.,15.36,0,.,.,.,.,50,1,36,1,168 +19,0,144,.,.,.,.,.,10.92,0,.,.,.,.,50,1,36,1,168 +20,1,0,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,1,24,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,1,48,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,1,72,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,1,96,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,0,120,.,.,.,.,.,4.71,0,.,.,.,.,59,1,31,1,170 +20,1,120,0,600,.,.,0,.,.,.,.,.,.,59,1,31,1,170 +20,0,120.77,.,.,.,.,.,4.5,0,.,.,.,.,59,1,31,1,170 +20,0,121.75,.,.,.,.,.,3.35,0,.,.,.,.,59,1,31,1,170 +20,0,125.67,.,.,.,.,.,12.35,0,.,.,.,.,59,1,31,1,170 +20,0,128.67,.,.,.,.,.,11.56,0,.,.,.,.,59,1,31,1,170 +20,0,143.67,.,.,.,.,.,6.45,0,.,.,.,.,59,1,31,1,170 diff --git a/examples/vanco_sde/main.rs b/examples/vanco_sde/main.rs index 024659c4e..acf7a3388 100644 --- a/examples/vanco_sde/main.rs +++ b/examples/vanco_sde/main.rs @@ -47,36 +47,47 @@ fn main() { // (3, 1), // ); - let params = Parameters::new() - .add("ka", 0.0001, 2.4) - .add("ke0", 0.0001, 2.7) - .add("kcp", 0.0001, 2.4) - .add("kpc", 0.0001, 2.4) - .add("vol", 0.2, 12.0) - .add("ske", 0.0001, 0.2); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "central")) + .with_assay_error_models( + AssayErrorModels::new() + .add( + 0, + AssayErrorModel::additive(ErrorPoly::new(0.00119, 0.20, 0.0, 0.0), 0.0), + ) + .unwrap(), + ); - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::additive(ErrorPoly::new(0.00119, 0.20, 0.0, 0.0), 0.0), + let model = ModelDefinition::builder(sde) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ka", 0.0001, 2.4)) + .add(ParameterSpec::bounded("ke0", 0.0001, 2.7)) + .add(ParameterSpec::bounded("kcp", 0.0001, 2.4)) + .add(ParameterSpec::bounded("kpc", 0.0001, 2.4)) + .add(ParameterSpec::bounded("vol", 0.2, 12.0)) + .add(ParameterSpec::bounded("ske", 0.0001, 0.2)), ) + .observations(observations) + .build() .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_cycles(usize::MAX); - settings.set_cache(true); - settings.set_output_path("examples/vanco_sde/output"); - settings.set_prior(Prior::sobol(100, 347)); - settings.initialize_logs().unwrap(); let data = data::read_pmetrics("examples/vanco_sde/vanco_clean.csv").unwrap(); - - let mut algorithm = dispatch_algorithm(settings, sde, data).unwrap(); - algorithm.initialize().unwrap(); - let mut result = algorithm.fit().unwrap(); + let mut result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan { + write: true, + path: Some("examples/vanco_sde/output".to_string()), + }) + .runtime(RuntimeOptions { + cycles: usize::MAX, + cache: true, + prior: Some(Prior::sobol(100, 347)), + ..RuntimeOptions::default() + }) + .run() + .unwrap(); result.write_outputs().unwrap(); } diff --git a/src/algorithms/mod.rs b/src/algorithms/mod.rs index 949ded9c0..11a2a5a8c 100644 --- a/src/algorithms/mod.rs +++ b/src/algorithms/mod.rs @@ -1,46 +1,147 @@ use std::fs; use std::path::Path; -use crate::routines::output::NPResult; -use crate::routines::settings::Settings; -use crate::structs::psi::Psi; -use crate::structs::theta::Theta; +use crate::api::{NonparametricMethod, OutputPlan, RuntimeOptions}; +use crate::estimation::nonparametric::{NonparametricWorkspace, Prior, Psi, Theta}; +use crate::model::{ModelDefinition, ObservationSpec, ParameterSpace}; +use crate::output::shared::RunConfiguration; use anyhow::Context; use anyhow::Result; use ndarray::parallel::prelude::{IntoParallelIterator, ParallelIterator}; - -use npag::*; -use npod::NPOD; +use ndarray::{Array, ArrayBase, Dim, OwnedRepr}; +use nonparametric::npag::*; +use nonparametric::npod::NPOD; +use nonparametric::postprob::POSTPROB; use pharmsol::prelude::{data::Data, simulator::Equation}; use pharmsol::{Predictions, Subject}; -use postprob::POSTPROB; use serde::{Deserialize, Serialize}; -pub mod npag; -pub mod npod; -pub mod postprob; +// Module organization for algorithm types +pub mod nonparametric; + +#[derive(Debug, Clone)] +pub(crate) struct NonparametricAlgorithmInput { + pub method: NonparametricMethod, + pub equation: E, + pub data: Data, + pub parameter_space: ParameterSpace, + pub observations: ObservationSpec, + pub output: OutputPlan, + pub runtime: RuntimeOptions, +} + +#[derive(Debug, Clone)] +pub(crate) struct NativeNonparametricConfig { + pub parameter_space: ParameterSpace, + pub ranges: Vec<(f64, f64)>, + pub prior: Prior, + pub max_cycles: usize, + pub progress: bool, + pub run_configuration: RunConfiguration, +} + +impl NonparametricAlgorithmInput { + pub(crate) fn new( + method: NonparametricMethod, + model: ModelDefinition, + data: Data, + output: OutputPlan, + runtime: RuntimeOptions, + ) -> Self { + Self { + method, + equation: model.equation, + data, + parameter_space: model.parameters, + observations: model.observations, + output, + runtime, + } + } + + pub(crate) fn algorithm(&self) -> Algorithm { + self.method.algorithm() + } + + pub(crate) fn error_models(&self) -> &pharmsol::prelude::data::AssayErrorModels { + &self.observations.assay_error_models + } + + pub(crate) fn max_cycles(&self) -> usize { + self.runtime.cycles + } + + pub(crate) fn progress_enabled(&self) -> bool { + self.runtime.progress + } + + pub(crate) fn prior(&self) -> Prior { + self.runtime.prior.clone().unwrap_or_default() + } + + pub(crate) fn run_configuration(&self) -> RunConfiguration { + RunConfiguration::new( + self.algorithm(), + &self.output, + &self.runtime, + self.parameter_space + .iter() + .map(|parameter| parameter.name.clone()) + .collect(), + ) + } + + pub(crate) fn native_config(&self) -> Result { + Ok(NativeNonparametricConfig { + ranges: self.parameter_space.finite_ranges()?, + parameter_space: self.parameter_space.clone(), + prior: self.prior(), + max_cycles: self.max_cycles(), + progress: self.progress_enabled(), + run_configuration: self.run_configuration(), + }) + } +} +/// Algorithm type enumeration +/// +/// This enum represents the algorithms available in the structure branch. #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] pub enum Algorithm { + /// Non-Parametric Adaptive Grid NPAG, + /// Non-Parametric Optimal Design NPOD, + /// Posterior Probability calculation POSTPROB, } +impl Algorithm { + /// Check if this is a non-parametric algorithm + pub fn is_nonparametric(&self) -> bool { + matches!( + self, + Algorithm::NPAG | Algorithm::NPOD | Algorithm::POSTPROB + ) + } + + /// Check if this is a parametric algorithm + pub fn is_parametric(&self) -> bool { + false + } +} + pub trait Algorithms: Sync + Send + 'static { - fn new(config: Settings, equation: E, data: Data) -> Result> - where - Self: Sized; fn validate_psi(&mut self) -> Result<()> { // Count problematic values in psi let mut nan_count = 0; let mut inf_count = 0; - let psi = self.psi().matrix(); + let psi = self.psi().to_ndarray(); // First coerce all NaN and infinite in psi to 0.0 for i in 0..psi.nrows() { - for j in 0..psi.ncols() { - let val = psi[(i, j)]; + for j in 0..self.psi().matrix().ncols() { + let val = psi.get((i, j)).unwrap(); if val.is_nan() { nan_count += 1; // *val = 0.0; @@ -60,11 +161,10 @@ pub trait Algorithms: Sync + Send + 'static { ); } - let (row, col) = (psi.nrows(), psi.ncols()); - let plam: Vec = (0..row) - .map(|i| (0..col).map(|j| psi[(i, j)]).sum::()) - .collect(); - let w: Vec = plam.iter().map(|&x| 1.0 / x).collect(); + let (_, col) = psi.dim(); + let ecol: ArrayBase, Dim<[usize; 1]>> = Array::ones(col); + let plam = psi.dot(&ecol); + let w = 1. / &plam; // Get the index of each element in `w` that is NaN or infinite let indices: Vec = w @@ -89,7 +189,7 @@ pub trait Algorithms: Sync + Send + 'static { for index in &indices { tracing::debug!("Subject with zero probability: {}", subject[*index].id()); - let error_model = self.settings().errormodels().clone(); + let error_model = self.error_models().clone(); // Simulate all support points in parallel let spp_results: Vec<_> = self @@ -185,7 +285,7 @@ pub trait Algorithms: Sync + Send + 'static { let outeqs = preds.iter().map(|x| x.outeq()).collect::>(); let states = preds .iter() - .map(|x| x.state().clone()) + .map(|x| x.state().to_vec()) .collect::>>(); tracing::debug!("\t\tTimes: {:?}", times); @@ -208,7 +308,7 @@ pub trait Algorithms: Sync + Send + 'static { Ok(()) } - fn settings(&self) -> &Settings; + fn error_models(&self) -> &pharmsol::prelude::data::AssayErrorModels; /// Get the equation used in the algorithm fn equation(&self) -> &E; /// Get the data used in the algorithm @@ -259,7 +359,7 @@ pub trait Algorithms: Sync + Send + 'static { /// It is typically performed after the estimation step in each cycle of the algorithm. fn condensation(&mut self) -> Result<()>; - /// Performs optimizations on the current [ErrorModels] and updates [Psi] accordingly + /// Performs optimizations on the current [AssayErrorModels] and updates [Psi] accordingly /// /// This step refines the error model parameters to better fit the data, /// and subsequently updates the [Psi] matrix to reflect these changes. @@ -296,31 +396,37 @@ pub trait Algorithms: Sync + Send + 'static { /// This method runs the full fitting process, starting with initialization, /// followed by iterative cycles of estimation, condensation, optimization, and evaluation /// until the algorithm converges or meets a stopping criteria. - fn fit(&mut self) -> Result> { + fn fit(&mut self) -> Result> { self.initialize().unwrap(); - #[allow(clippy::while_let_loop)] loop { match self.next_cycle()? { Status::Continue => continue, Status::Stop(_) => break, } } - self.into_npresult() + self.into_workspace() } #[allow(clippy::wrong_self_convention)] - fn into_npresult(&self) -> Result>; + fn into_workspace(&self) -> Result>; } -pub fn dispatch_algorithm( - settings: Settings, - equation: E, - data: Data, -) -> Result>> { - match settings.config().algorithm { - Algorithm::NPAG => Ok(NPAG::new(settings, equation, data)?), - Algorithm::NPOD => Ok(NPOD::new(settings, equation, data)?), - Algorithm::POSTPROB => Ok(POSTPROB::new(settings, equation, data)?), +pub(crate) fn run_nonparametric_algorithm( + input: NonparametricAlgorithmInput, +) -> Result> { + match input.method { + NonparametricMethod::Npag(_) => { + let mut algorithm = NPAG::from_input(input)?; + algorithm.fit() + } + NonparametricMethod::Npod(_) => { + let mut algorithm = NPOD::from_input(input)?; + algorithm.fit() + } + NonparametricMethod::Postprob(_) => { + let mut algorithm = POSTPROB::from_input(input)?; + algorithm.fit() + } } } diff --git a/src/algorithms/nonparametric/mod.rs b/src/algorithms/nonparametric/mod.rs new file mode 100644 index 000000000..abcca5bb8 --- /dev/null +++ b/src/algorithms/nonparametric/mod.rs @@ -0,0 +1,30 @@ +//! Non-parametric algorithm implementations +//! +//! This module contains the trait definition and implementations for non-parametric +//! population pharmacokinetic algorithms. These algorithms estimate the population +//! distribution as a discrete set of support points with associated probability weights. +//! +//! # Available Algorithms +//! +//! - [`NPAG`](npag): Non-Parametric Adaptive Grid +//! - [`NPOD`](npod): Non-Parametric Optimal Design +//! - [`POSTPROB`](postprob): Posterior probability calculation +//! +//! # Algorithm Trait +//! +//! All non-parametric algorithms implement the [`NPAlgorithm`] trait (aliased from `Algorithms`) +//! which defines the common interface for initialization, estimation, condensation, expansion, +//! and convergence evaluation. + +// Algorithm implementations +pub mod npag; +pub mod npod; +pub mod postprob; + +// Re-export algorithm structs +pub use npag::NPAG; +pub use npod::NPOD; +pub use postprob::POSTPROB; + +// Re-export the NP algorithm trait from parent +pub use super::Algorithms as NPAlgorithm; diff --git a/src/algorithms/npag.rs b/src/algorithms/nonparametric/npag.rs similarity index 86% rename from src/algorithms/npag.rs rename to src/algorithms/nonparametric/npag.rs index 3f17989e8..b623cfde1 100644 --- a/src/algorithms/npag.rs +++ b/src/algorithms/nonparametric/npag.rs @@ -1,14 +1,13 @@ -use crate::algorithms::{Status, StopReason}; +use crate::algorithms::{ + NativeNonparametricConfig, NonparametricAlgorithmInput, Status, StopReason, +}; +use crate::estimation::nonparametric::{ + calculate_psi, CycleLog, NPCycle, NonparametricWorkspace, Psi, Theta, Weights, +}; use crate::prelude::algorithms::Algorithms; -pub use crate::routines::estimation::ipm::burke; -pub use crate::routines::estimation::qr; -use crate::routines::settings::Settings; - -use crate::routines::output::{cycles::CycleLog, cycles::NPCycle, NPResult}; -use crate::structs::psi::{calculate_psi, Psi}; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; +pub(crate) use crate::estimation::nonparametric::ipm::burke; +pub(crate) use crate::estimation::nonparametric::qr; use anyhow::bail; use anyhow::Result; @@ -19,9 +18,9 @@ use pharmsol::prelude::{ use pharmsol::prelude::AssayErrorModel; -use crate::routines::initialization; +use crate::estimation::nonparametric::sample_space_for_parameters; -use crate::routines::expansion::adaptative_grid::adaptative_grid; +use crate::estimation::nonparametric::adaptative_grid; const THETA_E: f64 = 1e-4; // Convergence criteria const THETA_G: f64 = 1e-4; // Objective function convergence criteria @@ -47,14 +46,22 @@ pub struct NPAG { status: Status, cycle_log: CycleLog, data: Data, - settings: Settings, + config: NativeNonparametricConfig, } -impl Algorithms for NPAG { - fn new(settings: Settings, equation: E, data: Data) -> Result, anyhow::Error> { - Ok(Box::new(Self { +impl NPAG { + pub(crate) fn from_config( + equation: E, + data: Data, + error_models: AssayErrorModels, + config: NativeNonparametricConfig, + ) -> Box { + let ranges = config.ranges.clone(); + let gamma_delta = vec![0.1; error_models.len()]; + + Box::new(Self { equation, - ranges: settings.parameters().ranges(), + ranges, psi: Psi::new(), theta: Theta::new(), lambda: Weights::default(), @@ -65,20 +72,31 @@ impl Algorithms for NPAG { f0: -1e30, f1: f64::default(), cycle: 0, - gamma_delta: vec![0.1; settings.errormodels().len()], - error_models: settings.errormodels().clone(), + gamma_delta, + error_models, status: Status::Continue, cycle_log: CycleLog::new(), - settings, data, - })) + config, + }) } + pub(crate) fn from_input(input: NonparametricAlgorithmInput) -> Result> { + let config = input.native_config()?; + let error_models = input.error_models().clone(); + let equation = input.equation; + let data = input.data; + + Ok(Self::from_config(equation, data, error_models, config)) + } +} + +impl Algorithms for NPAG { fn equation(&self) -> &E { &self.equation } - fn into_npresult(&self) -> Result> { - NPResult::new( + fn into_workspace(&self) -> Result> { + NonparametricWorkspace::new( self.equation.clone(), self.data.clone(), self.theta.clone(), @@ -87,13 +105,13 @@ impl Algorithms for NPAG { -2. * self.objf, self.cycle, self.status.clone(), - self.settings.clone(), + self.config.run_configuration.clone(), self.cycle_log.clone(), ) } - fn settings(&self) -> &Settings { - &self.settings + fn error_models(&self) -> &AssayErrorModels { + &self.error_models } fn data(&self) -> &Data { @@ -101,7 +119,7 @@ impl Algorithms for NPAG { } fn get_prior(&self) -> Theta { - initialization::sample_space(&self.settings).unwrap() + sample_space_for_parameters(&self.config.parameter_space, &self.config.prior).unwrap() } fn likelihood(&self) -> f64 { @@ -175,7 +193,7 @@ impl Algorithms for NPAG { } // Stop if we have reached maximum number of cycles - if self.cycle >= self.settings.config().cycles { + if self.cycle >= self.config.max_cycles { tracing::warn!("Maximum number of cycles reached"); self.set_status(Status::Stop(StopReason::MaxCycles)); self.log_cycle_state(); @@ -202,7 +220,7 @@ impl Algorithms for NPAG { &self.data, &self.theta, &self.error_models, - self.cycle == 1 && self.settings.config().progress, + self.cycle == 1 && self.config.progress, )?; if let Err(err) = self.validate_psi() { diff --git a/src/algorithms/npod.rs b/src/algorithms/nonparametric/npod.rs similarity index 86% rename from src/algorithms/npod.rs rename to src/algorithms/nonparametric/npod.rs index a5756ffe8..dfd1acd38 100644 --- a/src/algorithms/npod.rs +++ b/src/algorithms/nonparametric/npod.rs @@ -1,21 +1,10 @@ -use crate::algorithms::StopReason; -use crate::routines::initialization::sample_space; -use crate::routines::output::{cycles::CycleLog, cycles::NPCycle, NPResult}; -use crate::structs::weights::Weights; -use crate::{ - algorithms::Status, - prelude::{ - algorithms::Algorithms, - routines::{ - estimation::{ipm::burke, qr}, - settings::Settings, - }, - }, - structs::{ - psi::{calculate_psi, Psi}, - theta::Theta, - }, +use crate::algorithms::{NativeNonparametricConfig, NonparametricAlgorithmInput, StopReason}; +use crate::estimation::nonparametric::ipm::burke; +use crate::estimation::nonparametric::qr; +use crate::estimation::nonparametric::{ + calculate_psi, CycleLog, NPCycle, NonparametricWorkspace, Psi, Theta, Weights, }; +use crate::{algorithms::Status, prelude::algorithms::Algorithms}; use pharmsol::SppOptimizer; use anyhow::bail; @@ -47,31 +36,12 @@ pub struct NPOD { status: Status, cycle_log: CycleLog, data: Data, - settings: Settings, + config: NativeNonparametricConfig, } impl Algorithms for NPOD { - fn new(settings: Settings, equation: E, data: Data) -> Result, anyhow::Error> { - Ok(Box::new(Self { - equation, - psi: Psi::new(), - theta: Theta::new(), - lambda: Weights::default(), - w: Weights::default(), - last_objf: -1e30, - objf: f64::NEG_INFINITY, - cycle: 0, - gamma_delta: vec![0.1; settings.errormodels().len()], - error_models: settings.errormodels().clone(), - converged: false, - status: Status::Continue, - cycle_log: CycleLog::new(), - settings, - data, - })) - } - fn into_npresult(&self) -> Result> { - NPResult::new( + fn into_workspace(&self) -> Result> { + NonparametricWorkspace::new( self.equation.clone(), self.data.clone(), self.theta.clone(), @@ -80,7 +50,7 @@ impl Algorithms for NPOD { -2. * self.objf, self.cycle, self.status.clone(), - self.settings.clone(), + self.config.run_configuration.clone(), self.cycle_log.clone(), ) } @@ -89,8 +59,8 @@ impl Algorithms for NPOD { &self.equation } - fn settings(&self) -> &Settings { - &self.settings + fn error_models(&self) -> &AssayErrorModels { + &self.error_models } fn data(&self) -> &Data { @@ -98,7 +68,11 @@ impl Algorithms for NPOD { } fn get_prior(&self) -> Theta { - sample_space(&self.settings).unwrap() + crate::estimation::nonparametric::sample_space_for_parameters( + &self.config.parameter_space, + &self.config.prior, + ) + .unwrap() } fn increment_cycle(&mut self) -> usize { @@ -161,7 +135,6 @@ impl Algorithms for NPOD { em.factor().unwrap_or_default() ); }); - // Increasing objf signals instability or model misspecification. if self.last_objf > self.objf + 1e-4 { tracing::warn!( "Objective function decreased from {:.4} to {:.4} (delta = {})", @@ -179,8 +152,7 @@ impl Algorithms for NPOD { return Ok(self.status.clone()); } - // Stop if we have reached maximum number of cycles - if self.cycle >= self.settings.config().cycles { + if self.cycle >= self.config.max_cycles { tracing::warn!("Maximum number of cycles reached"); self.converged = true; self.set_status(Status::Stop(StopReason::MaxCycles)); @@ -188,7 +160,6 @@ impl Algorithms for NPOD { return Ok(self.status.clone()); } - // Stop if stopfile exists if std::path::Path::new("stop").exists() { tracing::warn!("Stopfile detected - breaking"); self.converged = true; @@ -197,7 +168,6 @@ impl Algorithms for NPOD { return Ok(self.status.clone()); } - // Continue with normal operation self.status = Status::Continue; self.log_cycle_state(); Ok(self.status.clone()) @@ -211,7 +181,7 @@ impl Algorithms for NPOD { &self.data, &self.theta, &error_model, - self.cycle == 1 && self.settings.config().progress, + self.cycle == 1 && self.config.progress, )?; if let Err(err) = self.validate_psi() { @@ -249,12 +219,9 @@ impl Algorithms for NPOD { self.theta.filter_indices(keep.as_slice()); self.psi.filter_column_indices(keep.as_slice()); - //Rank-Revealing Factorization let (r, perm) = qr::qrd(&self.psi)?; let mut keep = Vec::::new(); - - // The minimum between the number of subjects and the actual number of support points let keep_n = self.psi.matrix().ncols().min(self.psi.matrix().nrows()); for i in 0..keep_n { let test = r.col(i).norm_l2(); @@ -265,7 +232,6 @@ impl Algorithms for NPOD { } } - // If a support point is dropped, log it as a debug message if self.psi.matrix().ncols() != keep.len() { tracing::debug!( "QR decomposition dropped {} support point(s)", @@ -298,8 +264,6 @@ impl Algorithms for NPOD { } }) .try_for_each(|(outeq, em)| -> Result<()> { - // OPTIMIZATION - let gamma_up = em.factor()? * (1.0 + self.gamma_delta[outeq]); let gamma_down = em.factor()? / (1.0 + self.gamma_delta[outeq]); @@ -361,11 +325,9 @@ impl Algorithms for NPOD { } fn expansion(&mut self) -> Result<()> { - // Compute pyl = psi * w using faer native operations let pyl_col = self.psi().matrix().as_ref() * self.w.weights().as_ref(); let pyl: Array1 = pyl_col.iter().copied().collect(); - // Add new point to theta based on the optimization of the D function let error_model: AssayErrorModels = self.error_models.clone(); let mut candididate_points: Vec> = Vec::default(); @@ -378,11 +340,6 @@ impl Algorithms for NPOD { let optimizer = SppOptimizer::new(&self.equation, &self.data, &error_model, &pyl); let candidate_point = optimizer.optimize_point(spp.to_owned()).unwrap(); *spp = candidate_point; - // add spp to theta - // recalculate psi - // re-run ipm to re-calculate w - // re-calculate pyl - // re-define a new optimization }); for cp in candididate_points { self.theta.suggest_point(cp.to_vec().as_slice(), THETA_D)?; @@ -392,6 +349,32 @@ impl Algorithms for NPOD { } impl NPOD { + pub(crate) fn from_input(input: NonparametricAlgorithmInput) -> Result> { + let config = input.native_config()?; + let error_models = input.error_models().clone(); + let gamma_delta = vec![0.1; error_models.len()]; + let equation = input.equation; + let data = input.data; + + Ok(Box::new(Self { + equation, + psi: Psi::new(), + theta: Theta::new(), + lambda: Weights::default(), + w: Weights::default(), + last_objf: -1e30, + objf: f64::NEG_INFINITY, + cycle: 0, + gamma_delta, + error_models, + converged: false, + status: Status::Continue, + cycle_log: CycleLog::new(), + data, + config, + })) + } + fn validate_psi(&mut self) -> Result<()> { let mut psi = self.psi().matrix().to_owned(); // First coerce all NaN and infinite in psi to 0.0 diff --git a/src/algorithms/postprob.rs b/src/algorithms/nonparametric/postprob.rs similarity index 72% rename from src/algorithms/postprob.rs rename to src/algorithms/nonparametric/postprob.rs index 19d030f66..3e778b009 100644 --- a/src/algorithms/postprob.rs +++ b/src/algorithms/nonparametric/postprob.rs @@ -1,11 +1,9 @@ use crate::{ - algorithms::{Status, StopReason}, - prelude::algorithms::Algorithms, - structs::{ - psi::{calculate_psi, Psi}, - theta::Theta, - weights::Weights, + algorithms::{NativeNonparametricConfig, NonparametricAlgorithmInput, Status, StopReason}, + estimation::nonparametric::{ + calculate_psi, CycleLog, NPCycle, NonparametricWorkspace, Psi, Theta, Weights, }, + prelude::algorithms::Algorithms, }; use anyhow::{Context, Result}; @@ -14,10 +12,8 @@ use pharmsol::prelude::{ simulator::Equation, }; -use crate::routines::estimation::ipm::burke; -use crate::routines::initialization; -use crate::routines::output::{cycles::CycleLog, NPResult}; -use crate::routines::settings::Settings; +use crate::estimation::nonparametric::ipm::burke; +use crate::estimation::nonparametric::sample_space_for_parameters; /// Posterior probability algorithm /// Reweights the prior probabilities to the observed data and error model @@ -30,29 +26,14 @@ pub struct POSTPROB { cycle: usize, status: Status, data: Data, - settings: Settings, + config: NativeNonparametricConfig, cyclelog: CycleLog, error_models: AssayErrorModels, } impl Algorithms for POSTPROB { - fn new(settings: Settings, equation: E, data: Data) -> Result, anyhow::Error> { - Ok(Box::new(Self { - equation, - psi: Psi::new(), - theta: Theta::new(), - w: Weights::default(), - objf: f64::INFINITY, - cycle: 0, - status: Status::Continue, - error_models: settings.errormodels().clone(), - settings, - data, - cyclelog: CycleLog::new(), - })) - } - fn into_npresult(&self) -> Result> { - NPResult::new( + fn into_workspace(&self) -> Result> { + NonparametricWorkspace::new( self.equation.clone(), self.data.clone(), self.theta.clone(), @@ -61,12 +42,12 @@ impl Algorithms for POSTPROB { self.objf, self.cycle, self.status.clone(), - self.settings.clone(), + self.config.run_configuration.clone(), self.cyclelog.clone(), ) } - fn settings(&self) -> &Settings { - &self.settings + fn error_models(&self) -> &AssayErrorModels { + &self.error_models } fn equation(&self) -> &E { @@ -78,7 +59,7 @@ impl Algorithms for POSTPROB { } fn get_prior(&self) -> Theta { - initialization::sample_space(&self.settings).unwrap() + sample_space_for_parameters(&self.config.parameter_space, &self.config.prior).unwrap() } fn likelihood(&self) -> f64 { @@ -143,7 +124,7 @@ impl Algorithms for POSTPROB { fn log_cycle_state(&mut self) { // Postprob doesn't track last_objf, so we use 0.0 as the delta - let state = crate::routines::output::cycles::NPCycle::new( + let state = NPCycle::new( self.cycle, self.objf, self.error_models.clone(), @@ -155,3 +136,26 @@ impl Algorithms for POSTPROB { self.cyclelog.push(state); } } + +impl POSTPROB { + pub(crate) fn from_input(input: NonparametricAlgorithmInput) -> Result> { + let config = input.native_config()?; + let error_models = input.error_models().clone(); + let equation = input.equation; + let data = input.data; + + Ok(Box::new(Self { + equation, + psi: Psi::new(), + theta: Theta::new(), + w: Weights::default(), + objf: f64::INFINITY, + cycle: 0, + status: Status::Continue, + data, + config, + cyclelog: CycleLog::new(), + error_models, + })) + } +} diff --git a/src/api/estimation_problem.rs b/src/api/estimation_problem.rs new file mode 100644 index 000000000..ea0de580a --- /dev/null +++ b/src/api/estimation_problem.rs @@ -0,0 +1,242 @@ +use anyhow::{bail, Result}; +use pharmsol::{Data, Equation}; +use serde::Serialize; + +use crate::algorithms::Algorithm; +use crate::estimation::nonparametric::Prior; +use crate::model::ModelDefinition; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum EstimationMethod { + Nonparametric(NonparametricMethod), +} + +impl EstimationMethod { + pub fn algorithm(self) -> Algorithm { + match self { + EstimationMethod::Nonparametric(method) => method.algorithm(), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NonparametricMethod { + Npag(NpagOptions), + Npod(NpodOptions), + Postprob(PostProbOptions), +} + +impl NonparametricMethod { + pub fn algorithm(self) -> Algorithm { + match self { + NonparametricMethod::Npag(_) => Algorithm::NPAG, + NonparametricMethod::Npod(_) => Algorithm::NPOD, + NonparametricMethod::Postprob(_) => Algorithm::POSTPROB, + } + } +} + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct NpagOptions; + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct NpodOptions; + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct PostProbOptions; + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct OutputPlan { + pub write: bool, + pub path: Option, +} + +impl OutputPlan { + pub fn disabled() -> Self { + Self { + write: false, + path: None, + } + } +} + +impl Default for OutputPlan { + fn default() -> Self { + Self { + write: true, + path: None, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +pub enum LoggingLevel { + Trace, + Debug, + Info, + Warn, + Error, +} + +impl Default for LoggingLevel { + fn default() -> Self { + Self::Info + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct LoggingOptions { + pub initialize: bool, + pub level: LoggingLevel, + pub write: bool, + pub stdout: bool, +} + +impl Default for LoggingOptions { + fn default() -> Self { + Self { + initialize: false, + level: LoggingLevel::Info, + write: false, + stdout: true, + } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct ConvergenceOptions { + pub likelihood: f64, + pub pyl: f64, + pub eps: f64, +} + +impl Default for ConvergenceOptions { + fn default() -> Self { + Self { + likelihood: 1e-4, + pyl: 1e-2, + eps: 1e-2, + } + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct AlgorithmTuning { + pub min_distance: f64, + pub nm_steps: usize, + pub tolerance: f64, +} + +impl Default for AlgorithmTuning { + fn default() -> Self { + Self { + min_distance: 1e-4, + nm_steps: 100, + tolerance: 1e-6, + } + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct RuntimeOptions { + pub cycles: usize, + pub cache: bool, + pub progress: bool, + pub idelta: f64, + pub tad: f64, + pub prior: Option, + pub logging: LoggingOptions, + pub convergence: ConvergenceOptions, + pub tuning: AlgorithmTuning, +} + +impl Default for RuntimeOptions { + fn default() -> Self { + Self { + cycles: 100, + cache: true, + progress: true, + idelta: 0.12, + tad: 0.0, + prior: None, + logging: LoggingOptions::default(), + convergence: ConvergenceOptions::default(), + tuning: AlgorithmTuning::default(), + } + } +} + +#[derive(Debug, Clone)] +pub struct EstimationProblem { + pub model: ModelDefinition, + pub data: Data, + pub method: EstimationMethod, + pub output: OutputPlan, + pub runtime: RuntimeOptions, +} + +impl EstimationProblem { + pub fn builder(model: ModelDefinition, data: Data) -> EstimationProblemBuilder { + EstimationProblemBuilder { + model, + data, + method: None, + output: Some(OutputPlan::default()), + runtime: Some(RuntimeOptions::default()), + } + } +} + +impl EstimationProblem { + pub fn run(self) -> Result> { + crate::api::fit(self) + } +} + +pub struct EstimationProblemBuilder { + model: ModelDefinition, + data: Data, + method: Option, + output: Option, + runtime: Option, +} + +impl EstimationProblemBuilder { + pub fn method(mut self, method: EstimationMethod) -> Self { + self.method = Some(method); + self + } + + pub fn output(mut self, output: OutputPlan) -> Self { + self.output = Some(output); + self + } + + pub fn runtime(mut self, runtime: RuntimeOptions) -> Self { + self.runtime = Some(runtime); + self + } + + pub fn build(self) -> Result> { + let method = self + .method + .ok_or_else(|| anyhow::anyhow!("estimation method is required"))?; + if self.model.parameters.is_empty() { + bail!("estimation problem requires at least one parameter"); + } + + Ok(EstimationProblem { + model: self.model, + data: self.data, + method, + output: self.output.unwrap_or_default(), + runtime: self.runtime.unwrap_or_default(), + }) + } +} + +impl EstimationProblemBuilder { + pub fn run(self) -> Result> { + self.build()?.run() + } +} diff --git a/src/api/fit.rs b/src/api/fit.rs new file mode 100644 index 000000000..0787bf645 --- /dev/null +++ b/src/api/fit.rs @@ -0,0 +1,17 @@ +use anyhow::Result; +use pharmsol::equation::Equation; + +use crate::api::estimation_problem::EstimationProblem; +use crate::estimation::nonparametric; +use crate::results::FitResult; + +pub fn fit( + problem: EstimationProblem, +) -> Result> { + if problem.runtime.logging.initialize { + problem.initialize_logs()?; + } + + let compiled = problem.compile()?; + nonparametric::fit(compiled) +} diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 000000000..cb757ba1a --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1,11 @@ +pub mod estimation_problem; +pub mod fit; +pub mod model_definition; + +pub use estimation_problem::{ + AlgorithmTuning, ConvergenceOptions, EstimationMethod, EstimationProblem, + EstimationProblemBuilder, LoggingLevel, LoggingOptions, NonparametricMethod, NpagOptions, + NpodOptions, OutputPlan, PostProbOptions, RuntimeOptions, +}; +pub use fit::fit; +pub use model_definition::{ModelDefinition, ModelDefinitionBuilder}; diff --git a/src/api/model_definition.rs b/src/api/model_definition.rs new file mode 100644 index 000000000..f596d46c3 --- /dev/null +++ b/src/api/model_definition.rs @@ -0,0 +1 @@ +pub use crate::model::{ModelDefinition, ModelDefinitionBuilder}; diff --git a/src/bestdose/cost.rs b/src/bestdose/cost.rs index b9dede94c..aa6ed04e4 100644 --- a/src/bestdose/cost.rs +++ b/src/bestdose/cost.rs @@ -114,7 +114,7 @@ use pharmsol::Equation; /// /// - **AUC** ([`Target::AUC`]): /// Predictions are cumulative AUC values calculated via trapezoidal rule -/// on a dense time grid (controlled by `settings.predictions().idelta`) +/// on a dense time grid (controlled by `config.prediction_interval()`) /// /// # Example /// @@ -129,7 +129,7 @@ use pharmsol::Equation; /// - Model simulation fails /// - Prediction length doesn't match observation count /// - AUC calculation fails (for AUC targets) -pub(crate) fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) -> Result { +pub fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) -> Result { // Validate candidate_doses length matches expected optimizable dose count let expected_optimizable = problem .target @@ -233,7 +233,7 @@ pub(crate) fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) // Calculate variance (using posterior weights) and population mean (using prior weights) - for ((row, post_prob), _prior_prob) in problem + for ((row, post_prob), prior_prob) in problem .theta .matrix() .row_iter() @@ -252,13 +252,12 @@ pub(crate) fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) } Target::AUCFromZero => { // For AUC: simulate at dense time grid and calculate cumulative AUC - let idelta = problem.settings.predictions().idelta; + let idelta = problem.config.prediction_interval(); let start_time = 0.0; // Future starts at 0 let end_time = obs_times.last().copied().unwrap_or(0.0); // Generate dense time grid - let dense_times = - calculate_dense_times(start_time, end_time, &obs_times, idelta as usize); + let dense_times = calculate_dense_times(start_time, end_time, &obs_times, idelta); // Create temporary subject with dense time points for simulation let subject_id = target_subject.id().to_string(); @@ -372,11 +371,11 @@ pub(crate) fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) } Target::AUCFromLastDose => { // For interval AUC: simulate at dense time grid and calculate AUC from last dose - let idelta = problem.settings.predictions().idelta; + let idelta = problem.config.prediction_interval(); let end_time = obs_times.last().copied().unwrap_or(0.0); // Generate dense time grid from 0 to end_time (need full grid for intervals) - let dense_times = calculate_dense_times(0.0, end_time, &obs_times, idelta as usize); + let dense_times = calculate_dense_times(0.0, end_time, &obs_times, idelta); // Create temporary subject with dense time points for simulation let subject_id = target_subject.id().to_string(); @@ -509,8 +508,8 @@ pub(crate) fn calculate_cost(problem: &BestDoseProblem, candidate_doses: &[f64]) let pj = preds_i[j]; let se = (obs_val - pj).powi(2); sumsq_i += se; - // Calculate population mean using POSTERIOR probabilities - y_bar[j] += post_prob * pj; + // Calculate population mean using population probabilities + y_bar[j] += prior_prob * pj; } variance += post_prob * sumsq_i; // Weighted by posterior diff --git a/src/bestdose/mod.rs b/src/bestdose/mod.rs index 4502b817e..6ffdedd4b 100644 --- a/src/bestdose/mod.rs +++ b/src/bestdose/mod.rs @@ -9,37 +9,36 @@ //! # Quick Start //! //! ```rust,no_run,ignore -//! use pmcore::bestdose::{BestDosePosterior, Target, DoseRange}; +//! use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; //! -//! # fn example(population_theta: pmcore::structs::theta::Theta, -//! # population_weights: pmcore::structs::weights::Weights, +//! # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +//! # population_weights: pmcore::estimation::nonparametric::Weights, //! # past_data: pharmsol::prelude::Subject, //! # target: pharmsol::prelude::Subject, //! # eq: pharmsol::prelude::ODE, -//! # settings: pmcore::routines::settings::Settings) +//! # config: pmcore::bestdose::BestDoseConfig) //! # -> anyhow::Result<()> { -//! // Stage 1: Compute posterior from patient history -//! let posterior = BestDosePosterior::compute( -//! &population_theta, // Population support points from NPAG -//! &population_weights, // Population probabilities +//! // Create optimization problem +//! let problem = BestDoseProblem::new( +//! &population_theta, // Population support points from NPAG +//! &population_weights, // Population probabilities //! Some(past_data), // Patient history (None = use prior) -//! eq, // PK/PD model -//! settings, // NPAG settings -//! )?; -//! -//! // Stage 2 & 3: Optimize doses and get predictions -//! let result = posterior.optimize( //! target, // Future template with targets //! None, // time_offset (None = standard mode) +//! eq, // PK/PD model //! DoseRange::new(0.0, 1000.0), // Dose constraints (0-1000 mg) //! 0.5, // bias_weight: 0=personalized, 1=population +//! config, // BestDose refinement and prediction settings //! Target::Concentration, // Target type //! )?; //! +//! // Run optimization +//! let result = problem.optimize()?; +//! //! // Extract results -//! println!("Optimal dose: {:?} mg", result.doses()); -//! println!("Final cost: {}", result.objf()); -//! println!("Method: {}", result.optimization_method()); +//! println!("Optimal dose: {:?} mg", result.dose); +//! println!("Final cost: {}", result.objf); +//! println!("Method: {}", result.optimization_method); // "posterior" or "uniform" //! # Ok(()) //! # } //! ``` @@ -143,33 +142,31 @@ //! ## Single Dose Optimization //! //! ```rust,no_run,ignore -//! use pmcore::bestdose::{BestDosePosterior, Target, DoseRange}; +//! use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; //! use pharmsol::prelude::Subject; //! -//! # fn example(population_theta: pmcore::structs::theta::Theta, -//! # population_weights: pmcore::structs::weights::Weights, +//! # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +//! # population_weights: pmcore::estimation::nonparametric::Weights, //! # past: pharmsol::prelude::Subject, //! # eq: pharmsol::prelude::ODE, -//! # settings: pmcore::routines::settings::Settings) +//! # config: pmcore::bestdose::BestDoseConfig) //! # -> anyhow::Result<()> { //! // Define target: 5 mg/L at 24 hours //! let target = Subject::builder("patient_001") -//! .bolus(0.0, 0.0, 0) // Dose placeholder (will be optimized) +//! .bolus(0.0, 100.0, 0) // Initial dose (will be optimized) //! .observation(24.0, 5.0, 0) // Target: 5 mg/L at 24h //! .build(); //! -//! let posterior = BestDosePosterior::compute( -//! &population_theta, &population_weights, Some(past), eq, settings, -//! )?; -//! -//! let result = posterior.optimize( -//! target, None, +//! let problem = BestDoseProblem::new( +//! &population_theta, &population_weights, Some(past), target, None, +//! eq, //! DoseRange::new(10.0, 500.0), // 10-500 mg allowed //! 0.3, // Slight population emphasis -//! Target::Concentration, +//! config, Target::Concentration, //! )?; //! -//! println!("Optimal dose: {} mg", result.doses()[0]); +//! let result = problem.optimize()?; +//! println!("Optimal dose: {} mg", result.dose[0]); //! # Ok(()) //! # } //! ``` @@ -177,36 +174,34 @@ //! ## Multiple Doses with AUC Target //! //! ```rust,no_run,ignore -//! use pmcore::bestdose::{BestDosePosterior, Target, DoseRange}; +//! use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; //! use pharmsol::prelude::Subject; //! -//! # fn example(population_theta: pmcore::structs::theta::Theta, -//! # population_weights: pmcore::structs::weights::Weights, +//! # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +//! # population_weights: pmcore::estimation::nonparametric::Weights, //! # past: pharmsol::prelude::Subject, //! # eq: pharmsol::prelude::ODE, -//! # settings: pmcore::routines::settings::Settings) +//! # config: pmcore::bestdose::BestDoseConfig) //! # -> anyhow::Result<()> { //! // Target: Achieve AUC₂₄ = 400 mg·h/L //! let target = Subject::builder("patient_002") -//! .bolus(0.0, 0.0, 0) // Dose 1 placeholder (optimized) -//! .bolus(12.0, 0.0, 0) // Dose 2 placeholder (optimized) +//! .bolus(0.0, 100.0, 0) // Dose 1 (optimized) +//! .bolus(12.0, 100.0, 0) // Dose 2 (optimized) //! .observation(24.0, 400.0, 0) // Target: AUC₂₄ = 400 //! .build(); //! -//! let posterior = BestDosePosterior::compute( -//! &population_theta, &population_weights, Some(past), eq, settings, -//! )?; -//! -//! let result = posterior.optimize( -//! target, None, +//! let problem = BestDoseProblem::new( +//! &population_theta, &population_weights, Some(past), target, None, +//! eq, //! DoseRange::new(50.0, 300.0), //! 0.0, // Full personalization -//! Target::AUCFromZero, // Cumulative AUC target! +//! config, Target::AUCFromZero, // Cumulative AUC target! //! )?; //! -//! println!("Dose 1: {} mg at t=0", result.doses()[0]); -//! println!("Dose 2: {} mg at t=12", result.doses()[1]); -//! if let Some(auc) = result.auc_predictions() { +//! let result = problem.optimize()?; +//! println!("Dose 1: {} mg at t=0", result.dose[0]); +//! println!("Dose 2: {} mg at t=12", result.dose[1]); +//! if let Some(auc) = result.auc_predictions { //! println!("Predicted AUC₂₄: {} mg·h/L", auc[0].1); //! } //! # Ok(()) @@ -216,26 +211,26 @@ //! ## Population-Only Optimization //! //! ```rust,no_run,ignore -//! # use pmcore::bestdose::{BestDosePosterior, Target, DoseRange}; -//! # fn example(population_theta: pmcore::structs::theta::Theta, -//! # population_weights: pmcore::structs::weights::Weights, +//! # use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; +//! # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +//! # population_weights: pmcore::estimation::nonparametric::Weights, //! # target: pharmsol::prelude::Subject, //! # eq: pharmsol::prelude::ODE, -//! # settings: pmcore::routines::settings::Settings) +//! # config: pmcore::bestdose::BestDoseConfig) //! # -> anyhow::Result<()> { //! // No patient history - use population prior directly -//! let posterior = BestDosePosterior::compute( +//! let problem = BestDoseProblem::new( //! &population_theta, &population_weights, -//! None, // No past data → use prior -//! eq, settings, -//! )?; -//! -//! let result = posterior.optimize( -//! target, None, +//! None, // No past data +//! target, None, // time_offset +//! eq, //! DoseRange::new(0.0, 1000.0), //! 1.0, // Full population weighting +//! config, //! Target::Concentration, //! )?; +//! +//! let result = problem.optimize()?; //! // Returns population-typical dose //! # Ok(()) //! # } @@ -249,7 +244,7 @@ //! - `0.0`: Minimize patient-specific variance (full personalization) //! - `1.0`: Minimize deviation from population (robustness) //! -//! - **`max_cycles`**: NPAGFULL refinement iterations +//! - **`refinement_cycles`**: NPAGFULL refinement iterations //! - `0`: Skip refinement (use filtered points directly) //! - `100-500`: Typical range for refinement //! @@ -261,46 +256,70 @@ //! - `Target::AUCFromZero`: Cumulative AUC from time 0 //! - `Target::AUCFromLastDose`: Interval AUC from last dose //! +//! ## Performance Tuning +//! +//! For faster optimization: +//! ```rust,no_run,ignore +//! # use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; +//! # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +//! # population_weights: pmcore::estimation::nonparametric::Weights, +//! # target: pharmsol::prelude::Subject, +//! # eq: pharmsol::ODE, +//! # error_models: pharmsol::prelude::AssayErrorModels, +//! # parameter_space: pmcore::prelude::ParameterSpace) +//! # -> anyhow::Result<()> { +//! let config = pmcore::bestdose::BestDoseConfig::new(parameter_space, error_models) +//! .with_refinement_cycles(100) +//! .with_prediction_interval(30.0); +//! +//! let problem = BestDoseProblem::new( +//! &population_theta, &population_weights, None, target, None, +//! eq, +//! DoseRange::new(0.0, 1000.0), 0.5, +//! config, +//! Target::Concentration, +//! )?; +//! # Ok(()) +//! # } +//! ``` +//! //! # See Also //! -//! - [`BestDosePosterior`]: Two-stage API entry point (compute posterior, then optimize) +//! - [`BestDoseProblem`]: Main entry point for optimization //! - [`BestDoseResult`]: Output structure with optimal doses //! - [`Target`]: Enum for concentration vs AUC targets //! - [`DoseRange`]: Dose constraint specification -pub(crate) mod cost; +pub mod cost; mod optimization; mod posterior; -pub(crate) mod predictions; +pub mod predictions; mod types; // Re-export public API pub use types::{ - BestDosePosterior, BestDoseResult, BestDoseStatus, DoseRange, OptimalMethod, Target, + BestDoseConfig, BestDosePosterior, BestDoseProblem, BestDoseResult, BestDoseStatus, DoseRange, + OptimalMethod, Target, }; /// Helper function to concatenate past and future subjects (Option 3: Fortran MAKETMP approach) /// /// This mimics Fortran's MAKETMP subroutine logic: /// 1. Takes doses (only doses, not observations) from past subject -/// 2. Offsets all future subject event times by `effective_offset` (absolute) +/// 2. Offsets all future subject event times by `time_offset` /// 3. Combines into single continuous subject /// -/// Note: This function receives the **effective** (absolute) offset, computed -/// by `optimize()` as `max_past_time + time_offset` where `time_offset` is the -/// user-facing gap parameter. -/// /// # Arguments /// /// * `past` - Subject with past history (only doses will be used) /// * `future` - Subject template for future (all events: doses + observations) -/// * `effective_offset` - Absolute time offset to apply to all future events +/// * `time_offset` - Time offset to apply to all future events /// /// # Returns /// /// Combined subject with: -/// - Past doses at original times [0, effective_offset) -/// - Future doses + observations at offset times [effective_offset, ∞) +/// - Past doses at original times [0, time_offset) +/// - Future doses + observations at offset times [time_offset, ∞) /// /// # Example /// @@ -308,24 +327,24 @@ pub use types::{ /// // Past: dose at t=0, observation at t=6 (patient has been on therapy 6 hours) /// let past = Subject::builder("patient") /// .bolus(0.0, 500.0, 0) -/// .observation(6.0, 15.0, 0) // 15 mg/L at 6 hours (max_past_time = 6) +/// .observation(6.0, 15.0, 0) // 15 mg/L at 6 hours /// .build(); /// /// // Future: dose at t=0 (relative), target at t=24 (relative) /// let future = Subject::builder("patient") -/// .bolus(0.0, 100.0, 0) // At absolute t=6 (with gap=0) -/// .observation(24.0, 10.0, 0) // At absolute t=30 (with gap=0) +/// .bolus(0.0, 100.0, 0) // Dose to optimize, will be at t=6 absolute +/// .observation(24.0, 10.0, 0) // Target at t=30 absolute /// .build(); /// -/// // effective_offset = max_past_time(6) + gap(0) = 6 +/// // Concatenate with time_offset = 6.0 /// let combined = concatenate_past_and_future(&past, &future, 6.0); -/// // Result: dose at t=0 (fixed, 500mg), dose at t=6 (optimizable), +/// // Result: dose at t=0 (fixed, 500mg), dose at t=6 (optimizable, 100mg initial), /// // observation target at t=30 (10 mg/L) /// ``` fn concatenate_past_and_future( past: &pharmsol::prelude::Subject, future: &pharmsol::prelude::Subject, - effective_offset: f64, + time_offset: f64, ) -> pharmsol::prelude::Subject { use pharmsol::prelude::*; @@ -349,20 +368,17 @@ fn concatenate_past_and_future( } } - // Add future events with effective offset + // Add future events with time offset for occasion in future.occasions() { for event in occasion.events() { match event { Event::Bolus(bolus) => { - builder = builder.bolus( - bolus.time() + effective_offset, - bolus.amount(), - bolus.input(), - ); + builder = + builder.bolus(bolus.time() + time_offset, bolus.amount(), bolus.input()); } Event::Infusion(inf) => { builder = builder.infusion( - inf.time() + effective_offset, + inf.time() + time_offset, inf.amount(), inf.input(), inf.duration(), @@ -371,7 +387,7 @@ fn concatenate_past_and_future( Event::Observation(obs) => { builder = match obs.value() { Some(val) => { - builder.observation(obs.time() + effective_offset, val, obs.outeq()) + builder.observation(obs.time() + time_offset, val, obs.outeq()) } None => builder, }; @@ -383,65 +399,79 @@ fn concatenate_past_and_future( builder.build() } +/// Calculate which doses are optimizable based on dose amounts +/// +/// Returns a boolean mask where: +/// - `true` = dose amount is 0 (placeholder, optimizable) +/// - `false` = dose amount > 0 (fixed past dose) +/// +/// This allows users to specify a combined subject with: +/// - Non-zero doses for past doses (e.g., 500 mg at t=0) - these are fixed +/// - Zero doses as placeholders for future doses (e.g., 0 mg at t=6) - these are optimized +/// +/// # Arguments +/// +/// * `subject` - The subject with both fixed and placeholder doses +/// +/// # Returns +/// +/// Vector of booleans, one per dose in the subject +/// +/// # Example +/// +/// ```rust,ignore +/// let subject = Subject::builder("patient") +/// .bolus(0.0, 500.0, 0) // Past dose (fixed) - mask[0] = false +/// .bolus(6.0, 0.0, 0) // Future dose (optimize) - mask[1] = true +/// .observation(30.0, 10.0, 0) +/// .build(); +/// let mask = calculate_dose_optimization_mask(&subject); +/// assert_eq!(mask, vec![false, true]); +/// ``` +fn calculate_dose_optimization_mask(subject: &pharmsol::prelude::Subject) -> Vec { + use pharmsol::prelude::*; + + let mut mask = Vec::new(); + + for occasion in subject.occasions() { + for event in occasion.events() { + match event { + Event::Bolus(bolus) => { + // Dose is optimizable if amount is 0 (placeholder) + mask.push(bolus.amount() == 0.0); + } + Event::Infusion(infusion) => { + // Infusion is optimizable if amount is 0 (placeholder) + mask.push(infusion.amount() == 0.0); + } + Event::Observation(_) => { + // Observations don't go in the mask + } + } + } + } + + mask +} + use anyhow::Result; use pharmsol::prelude::*; use pharmsol::ODE; -use crate::routines::settings::Settings; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; - -use types::BestDoseProblem; +use crate::estimation::nonparametric::{Theta, Weights}; // ═════════════════════════════════════════════════════════════════════════════ // BestDosePosterior: Public two-stage API // ═════════════════════════════════════════════════════════════════════════════ impl BestDosePosterior { - /// **Stage 1**: Compute the Bayesian posterior density from population prior and patient data - /// - /// This performs the expensive posterior calculation (NPAGFULL11 filtering + NPAGFULL refinement) - /// and returns a reusable `BestDosePosterior` that can be optimized multiple times. - /// - /// # Algorithm - /// - /// ```text - /// Prior (N support points) - /// ↓ - /// NPAGFULL11: Bayesian filtering - /// P(θᵢ|data) ∝ P(data|θᵢ) × P(θᵢ) - /// ↓ - /// Filtered posterior (M points) - /// ↓ - /// NPAGFULL: Local refinement (max_cycles iterations) - /// ↓ - /// Refined posterior (M points with updated weights) - /// ``` - /// - /// # Arguments - /// - /// * `population_theta` - Population support points from NPAG - /// * `population_weights` - Population probabilities - /// * `past_data` - Patient history (`None` = use prior directly) - /// * `eq` - Pharmacokinetic/pharmacodynamic model - /// * `settings` - NPAG settings (includes error models and posterior refinement config) - /// - /// # Example - /// - /// ```rust,no_run,ignore - /// let posterior = BestDosePosterior::compute( - /// &theta, &weights, - /// Some(past_subject), - /// eq, settings, - /// )?; - /// println!("Posterior has {} support points", posterior.n_support_points()); - /// ``` + /// Stage 1: compute the reusable posterior density from the population prior and patient data. pub fn compute( population_theta: &Theta, population_weights: &Weights, past_data: Option, eq: ODE, - settings: Settings, + config: BestDoseConfig, ) -> Result { tracing::info!("╔══════════════════════════════════════════════════════════╗"); tracing::info!("║ BestDose Algorithm: STAGE 1 ║"); @@ -454,8 +484,8 @@ impl BestDosePosterior { population_weights, past_data.as_ref(), &eq, - &settings.errormodels, - &settings, + config.error_models(), + &config, )?; tracing::info!("╔══════════════════════════════════════════════════════════╗"); @@ -469,41 +499,11 @@ impl BestDosePosterior { population_weights: filtered_population_weights, past_data, eq, - settings, + config, }) } - /// **Stage 2**: Optimize doses for target outcomes using the computed posterior - /// - /// This runs the dual optimization (posterior weights vs uniform weights) and - /// returns the best dosing regimen. Can be called multiple times on the same - /// posterior with different parameters. - /// - /// # Arguments - /// - /// * `target` - Future dosing template with target observations - /// * `time_offset` - Optional gap (in hours) between the last past event and the start of - /// the future target. 0 means the future starts immediately after the last past event. - /// The effective absolute offset is `max_past_time + time_offset`. - /// * `dose_range` - Allowable dose constraints - /// * `bias_weight` - λ in \[0,1\]: 0=personalized, 1=population - /// * `target_type` - Concentration or AUC targets - /// - /// # Example - /// - /// ```rust,no_run,ignore - /// // Try different bias weights - /// for &bw in &[0.0, 0.25, 0.5, 0.75, 1.0] { - /// let result = posterior.optimize( - /// target.clone(), - /// None, - /// DoseRange::new(0.0, 300.0), - /// bw, - /// Target::Concentration, - /// )?; - /// println!("λ={}: dose={:.1}", bw, result.doses()[0]); - /// } - /// ``` + /// Stage 2: optimize future doses against the computed posterior. pub fn optimize( &self, target: Subject, @@ -519,10 +519,6 @@ impl BestDosePosterior { tracing::info!(" Target type: {:?}", target_type); tracing::info!(" Bias weight (λ): {}", bias_weight); - // Validate and compute effective time_offset - // time_offset is a gap relative to the last past event: - // effective_offset = max_past_time + time_offset - // So time_offset=0 means "future starts right after last past event" if let Some(t) = time_offset { if t < 0.0 { return Err(anyhow::anyhow!( @@ -533,7 +529,6 @@ impl BestDosePosterior { } } - // Compute the absolute offset for concatenation let effective_offset = time_offset.map(|t| { let max_past_time = self .past_data @@ -553,9 +548,6 @@ impl BestDosePosterior { max_past_time + t }); - // Handle past/future concatenation if needed - // When time_offset is provided, offset all target event times by the - // effective offset (max_past_time + gap) and prepend past doses. let final_target = match effective_offset { None => target, Some(eff) => { @@ -581,7 +573,6 @@ impl BestDosePosterior { } }; - // Validate that the target has observations let has_observations = final_target .occasions() .iter() @@ -593,7 +584,6 @@ impl BestDosePosterior { )); } - // Build the internal optimization problem let problem = BestDoseProblem { target: final_target, target_type, @@ -601,12 +591,11 @@ impl BestDosePosterior { theta: self.theta.clone(), posterior: self.posterior.clone(), eq: self.eq.clone(), - settings: self.settings.clone(), + config: self.config.clone(), doserange: dose_range, bias_weight, }; - // Run dual optimization + final predictions optimization::dual_optimization(&problem) } } @@ -615,6 +604,32 @@ impl BestDosePosterior { // Helper Functions for STAGE 1: Posterior Density Calculation // ═════════════════════════════════════════════════════════════════════════════ +/// Validate time_offset parameter for past/future separation mode +fn validate_time_offset(time_offset: f64, past_data: &Option) -> Result<()> { + if let Some(past_subject) = past_data { + let max_past_time = past_subject + .occasions() + .iter() + .flat_map(|occ| occ.events()) + .map(|event| match event { + Event::Bolus(b) => b.time(), + Event::Infusion(i) => i.time(), + Event::Observation(o) => o.time(), + }) + .fold(0.0_f64, |max, time| max.max(time)); + + if time_offset < max_past_time { + return Err(anyhow::anyhow!( + "Invalid time_offset: {} is before the last past_data event at time {}. \ + time_offset must be >= the maximum time in past_data to avoid time travel!", + time_offset, + max_past_time + )); + } + } + Ok(()) +} + /// Calculate posterior density (STAGE 1: Two-step process) /// /// # Algorithm Flow (Matches Diagram) @@ -647,7 +662,7 @@ fn calculate_posterior_density( past_data: Option<&Subject>, eq: &ODE, error_models: &AssayErrorModels, - settings: &Settings, + config: &BestDoseConfig, ) -> Result<(Theta, Weights, Weights, Subject)> { match past_data { None => { @@ -691,7 +706,7 @@ fn calculate_posterior_density( &past_data_obj, eq, error_models, - settings, + config, )?; Ok(( @@ -704,3 +719,235 @@ fn calculate_posterior_density( } } } + +/// Prepare target subject by handling past/future concatenation if needed +/// +/// # Returns +/// +/// Tuple: (final_target, final_past_data) +fn prepare_target_subject( + past_subject: Subject, + target: Subject, + time_offset: Option, +) -> Result<(Subject, Subject)> { + match time_offset { + None => { + tracing::info!(" Mode: Standard (single subject)"); + Ok((target, past_subject)) + } + Some(t) => { + tracing::info!(" Mode: Past/Future separation (Fortran MAKETMP approach)"); + tracing::info!(" Current time boundary: {} hours", t); + tracing::info!(" Concatenating past and future subjects..."); + + let combined = concatenate_past_and_future(&past_subject, &target, t); + + // Log dose structure + let mask = calculate_dose_optimization_mask(&combined); + let num_fixed = mask.iter().filter(|&&x| !x).count(); + let num_optimizable = mask.iter().filter(|&&x| x).count(); + tracing::info!(" Fixed doses (from past): {}", num_fixed); + tracing::info!(" Optimizable doses (from future): {}", num_optimizable); + + Ok((combined, past_subject)) + } + } +} + +// ═════════════════════════════════════════════════════════════════════════════ + +impl BestDoseProblem { + /// Create a new BestDose problem with automatic posterior calculation + /// + /// This is the main entry point for the BestDose algorithm. + /// + /// # Algorithm Structure (Matches Flowchart) + /// + /// ```text + /// ┌─────────────────────────────────────────┐ + /// │ STAGE 1: Posterior Density Calculation │ + /// │ │ + /// │ Prior Density (N points) │ + /// │ ↓ │ + /// │ Has past data with observations? │ + /// │ ↓ Yes ↓ No │ + /// │ Step 1.1: Use prior │ + /// │ NPAGFULL11 directly │ + /// │ (Filter) │ + /// │ ↓ │ + /// │ Step 1.2: │ + /// │ NPAGFULL │ + /// │ (Refine) │ + /// │ ↓ │ + /// │ Posterior Density │ + /// └─────────────────────────────────────────┘ + /// ``` + /// + /// # Parameters + /// + /// * `population_theta` - Population support points from NPAG + /// * `population_weights` - Population probabilities + /// * `past_data` - Patient history (None = use prior directly) + /// * `target` - Future dosing template with targets + /// * `time_offset` - Optional time offset for concatenation (None = standard mode, Some(t) = Fortran mode) + /// * `eq` - Pharmacokinetic/pharmacodynamic model + /// * `error_models` - Error model specifications + /// * `doserange` - Allowable dose constraints + /// * `bias_weight` - λ ∈ [0,1]: 0=personalized, 1=population + /// * `config` - BestDose nonparametric configuration + /// * `target_type` - Concentration or AUC targets + /// + /// # Returns + /// + /// BestDoseProblem ready for `optimize()` + #[allow(clippy::too_many_arguments)] + pub fn new( + population_theta: &Theta, + population_weights: &Weights, + past_data: Option, + target: Subject, + time_offset: Option, + eq: ODE, + doserange: DoseRange, + bias_weight: f64, + config: BestDoseConfig, + target_type: Target, + ) -> Result { + tracing::info!("╔══════════════════════════════════════════════════════════╗"); + tracing::info!("║ BestDose Algorithm: STAGE 1 ║"); + tracing::info!("║ Posterior Density Calculation ║"); + tracing::info!("╚══════════════════════════════════════════════════════════╝"); + + // Validate input if using past/future separation mode + if let Some(t) = time_offset { + validate_time_offset(t, &past_data)?; + } + + // ═════════════════════════════════════════════════════════════ + // STAGE 1: Calculate Posterior Density + // ═════════════════════════════════════════════════════════════ + let (posterior_theta, posterior_weights, filtered_population_weights, past_subject) = + calculate_posterior_density( + population_theta, + population_weights, + past_data.as_ref(), + &eq, + config.error_models(), + &config, + )?; + + // Handle past/future concatenation if needed + let (final_target, _) = prepare_target_subject(past_subject, target, time_offset)?; + + tracing::info!("╔══════════════════════════════════════════════════════════╗"); + tracing::info!("║ Stage 1 Complete - Ready for Optimization ║"); + tracing::info!("╚══════════════════════════════════════════════════════════╝"); + tracing::info!(" Support points: {}", posterior_theta.matrix().nrows()); + tracing::info!(" Target type: {:?}", target_type); + tracing::info!(" Bias weight (λ): {}", bias_weight); + + Ok(BestDoseProblem { + target: final_target, + target_type, + population_weights: filtered_population_weights, + theta: posterior_theta, + posterior: posterior_weights, + eq, + config, + doserange, + bias_weight, + }) + } + + /// Run the complete BestDose optimization algorithm + /// + /// # Algorithm Flow (Matches Diagram!) + /// + /// ```text + /// ┌─────────────────────────────────────────┐ + /// │ STAGE 1: Posterior Calculation │ + /// │ [COMPLETED in new()] │ + /// └────────────┬────────────────────────────┘ + /// ↓ + /// ┌─────────────────────────────────────────┐ + /// │ STAGE 2: Dual Optimization │ + /// │ │ + /// │ Optimization 1: Posterior Weights │ + /// │ (Patient-specific) │ + /// │ ↓ │ + /// │ Result 1: (doses₁, cost₁) │ + /// │ │ + /// │ Optimization 2: Uniform Weights │ + /// │ (Population-based) │ + /// │ ↓ │ + /// │ Result 2: (doses₂, cost₂) │ + /// │ │ + /// │ Select: min(cost₁, cost₂) │ + /// └────────────┬────────────────────────────┘ + /// ↓ + /// ┌─────────────────────────────────────────┐ + /// │ STAGE 3: Final Predictions │ + /// │ │ + /// │ Calculate predictions with │ + /// │ optimal doses and winning weights │ + /// └─────────────────────────────────────────┘ + /// ``` + /// + /// # Returns + /// + /// `BestDoseResult` containing: + /// - `dose`: Optimal dose amount(s) + /// - `objf`: Final cost function value + /// - `preds`: Concentration-time predictions + /// - `auc_predictions`: AUC values (if target_type is AUC) + /// - `optimization_method`: "posterior" or "uniform" + pub fn optimize(self) -> Result { + tracing::info!("╔══════════════════════════════════════════════════════════╗"); + tracing::info!("║ BestDose Algorithm: STAGE 2 & 3 ║"); + tracing::info!("║ Dual Optimization + Final Predictions ║"); + tracing::info!("╚══════════════════════════════════════════════════════════╝"); + + // STAGE 2 & 3: Dual optimization + predictions + optimization::dual_optimization(&self) + } + + /// Set the bias weight (lambda parameter) + /// + /// - λ = 0.0 (default): Full personalization (minimize patient-specific variance) + /// - λ = 0.5: Balanced between individual and population + /// - λ = 1.0: Population-based (minimize deviation from population mean) + pub fn with_bias_weight(mut self, weight: f64) -> Self { + self.bias_weight = weight; + self + } + + /// Get a reference to the refined posterior support points (Θ) + pub fn posterior_theta(&self) -> &Theta { + &self.theta + } + + /// Get the posterior probability weights + pub fn posterior_weights(&self) -> &Weights { + &self.posterior + } + + /// Get the filtered population weights used for the bias term + pub fn population_weights(&self) -> &Weights { + &self.population_weights + } + + /// Get the prepared target subject + pub fn target_subject(&self) -> &Subject { + &self.target + } + + /// Get the currently configured bias weight (λ) + pub fn bias_weight(&self) -> f64 { + self.bias_weight + } + + /// Get the selected optimization target type + pub fn target_type(&self) -> Target { + self.target_type + } +} diff --git a/src/bestdose/optimization.rs b/src/bestdose/optimization.rs index bd4056ca2..f06b0382c 100644 --- a/src/bestdose/optimization.rs +++ b/src/bestdose/optimization.rs @@ -46,7 +46,7 @@ use argmin::solver::neldermead::NelderMead; use crate::bestdose::cost::calculate_cost; use crate::bestdose::predictions::calculate_final_predictions; use crate::bestdose::types::{BestDoseProblem, BestDoseResult, BestDoseStatus, OptimalMethod}; -use crate::structs::weights::Weights; +use crate::estimation::nonparametric::Weights; use pharmsol::prelude::*; /// Create initial simplex for Nelder-Mead optimization diff --git a/src/bestdose/posterior.rs b/src/bestdose/posterior.rs index 9506d390e..28a30ba0c 100644 --- a/src/bestdose/posterior.rs +++ b/src/bestdose/posterior.rs @@ -53,14 +53,14 @@ use anyhow::Result; use faer::Mat; -use crate::algorithms::npag::burke; -use crate::algorithms::npag::NPAG; +use crate::algorithms::nonparametric::npag::burke; +use crate::algorithms::nonparametric::npag::NPAG; use crate::algorithms::Algorithms; +use crate::algorithms::NativeNonparametricConfig; use crate::algorithms::Status; +use crate::bestdose::types::BestDoseConfig; +use crate::estimation::nonparametric::{calculate_psi, Prior, Theta, Weights}; use crate::prelude::*; -use crate::structs::psi::calculate_psi; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; use pharmsol::prelude::*; // ============================================================================= @@ -172,21 +172,38 @@ pub fn npagfull_refinement( filtered_weights: &Weights, past_data: &Data, eq: &ODE, - settings: &Settings, + config: &BestDoseConfig, ) -> Result<(Theta, Weights)> { - if settings.config.cycles == 0 { + if config.refinement_cycles() == 0 { tracing::info!("Stage 1.2: NPAGFULL refinement skipped (max_cycles=0)"); return Ok((filtered_theta.clone(), filtered_weights.clone())); } tracing::info!( "Stage 1.2: NPAGFULL refinement (max_cycles={})", - settings.config.cycles + config.refinement_cycles() ); let mut refined_points = Vec::new(); let mut kept_weights: Vec = Vec::new(); let num_points = filtered_theta.matrix().nrows(); + let parameter_space = config.parameter_space().clone(); + let runtime = RuntimeOptions { + cycles: config.refinement_cycles(), + cache: true, + progress: config.progress(), + idelta: config.prediction_interval(), + tad: 0.0, + prior: None, + logging: LoggingOptions { + initialize: false, + level: LoggingLevel::Info, + write: false, + stdout: false, + }, + convergence: ConvergenceOptions::default(), + tuning: AlgorithmTuning::default(), + }; for i in 0..num_points { tracing::debug!(" Refining point {}/{}", i + 1, num_points); @@ -197,18 +214,27 @@ pub fn npagfull_refinement( // Create a single-point theta for NPAG initialization let n_params = point.len(); let single_point_matrix = Mat::from_fn(1, n_params, |_r, c| point[c]); - let single_point_theta = - Theta::from_parts(single_point_matrix, settings.parameters().clone()).unwrap(); - - // Configure NPAG for refinement - let mut npag_settings = settings.clone(); - npag_settings.disable_output(); // Don't write files for each refinement - npag_settings.set_prior(crate::routines::initialization::Prior::Theta( - single_point_theta.clone(), - )); + let single_point_theta = Theta::from_parts(single_point_matrix, parameter_space.clone())?; // Create and run NPAG - let mut npag = NPAG::new(npag_settings, eq.clone(), past_data.clone())?; + let mut npag = NPAG::from_config( + eq.clone(), + past_data.clone(), + config.error_models().clone(), + NativeNonparametricConfig { + ranges: parameter_space.finite_ranges()?, + parameter_space: parameter_space.clone(), + prior: Prior::Theta(single_point_theta.clone()), + max_cycles: config.refinement_cycles(), + progress: config.progress(), + run_configuration: crate::output::shared::RunConfiguration::new( + Algorithm::NPAG, + &OutputPlan::disabled(), + &runtime, + config.parameter_names(), + ), + }, + ); npag.set_theta(single_point_theta); // Run NPAG optimization @@ -280,10 +306,10 @@ pub fn npagfull_refinement( } // Build refined theta matrix - let n_params = settings.parameters().len(); + let n_params = parameter_space.len(); let n_points = refined_points.len(); let refined_matrix = Mat::from_fn(n_points, n_params, |r, c| refined_points[r][c]); - let refined_theta = Theta::from_parts(refined_matrix, settings.parameters().clone()).unwrap(); + let refined_theta = Theta::from_parts(refined_matrix, parameter_space).unwrap(); // Renormalize weights let weight_sum: f64 = kept_weights.iter().sum(); @@ -314,7 +340,7 @@ pub fn calculate_two_step_posterior( past_data: &Data, eq: &ODE, error_models: &AssayErrorModels, - settings: &Settings, + config: &BestDoseConfig, ) -> Result<(Theta, Weights, Weights)> { tracing::info!("=== STAGE 1: Posterior Density Calculation ==="); @@ -334,7 +360,7 @@ pub fn calculate_two_step_posterior( &filtered_posterior_weights, past_data, eq, - settings, + config, )?; tracing::info!( diff --git a/src/bestdose/predictions.rs b/src/bestdose/predictions.rs index 369c1065a..263449797 100644 --- a/src/bestdose/predictions.rs +++ b/src/bestdose/predictions.rs @@ -25,15 +25,13 @@ //! //! # See Also //! -//! - Configuration: `settings.predictions().idelta` controls time grid resolution +//! - Configuration: `BestDoseConfig::prediction_interval()` controls time grid resolution use anyhow::Result; use faer::Mat; use crate::bestdose::types::{BestDoseProblem, Target}; -use crate::routines::output::posterior::Posterior; -use crate::routines::output::predictions::NPPredictions; -use crate::structs::weights::Weights; +use crate::estimation::nonparametric::{NPPredictions, Posterior, Weights}; use pharmsol::prelude::*; use pharmsol::Equation; @@ -97,13 +95,27 @@ pub fn find_last_dose_time_before(subject: &Subject, obs_time: f64) -> f64 { /// /// # Returns /// Sorted, unique time vector suitable for AUC calculation +fn prediction_interval_hours(interval: f64) -> f64 { + if interval <= 0.0 { + return 1.0 / 60.0; + } + + if interval < 1.0 { + interval + } else { + interval / 60.0 + } +} + pub fn calculate_dense_times( start_time: f64, end_time: f64, obs_times: &[f64], - idelta: usize, + idelta: f64, ) -> Vec { - let idelta_hours = (idelta as f64) / 60.0; + // BestDose historically used both sub-hour values like 0.12 and minute-style + // values like 60.0. Treat values below 1.0 as hours and larger values as minutes. + let idelta_hours = prediction_interval_hours(idelta); let mut times = Vec::new(); // Add observation times @@ -341,11 +353,10 @@ pub(crate) fn calculate_final_predictions( }) .collect(); - let idelta = problem.settings.predictions().idelta; + let idelta = problem.config.prediction_interval(); let start_time = 0.0; let end_time = obs_times.last().copied().unwrap_or(0.0); - let dense_times = - calculate_dense_times(start_time, end_time, &obs_times, idelta as usize); + let dense_times = calculate_dense_times(start_time, end_time, &obs_times, idelta); let subject_id = target_with_optimal.id().to_string(); let mut builder = Subject::builder(&subject_id); diff --git a/src/bestdose/types.rs b/src/bestdose/types.rs index 1e2a943c5..d759d9d7d 100644 --- a/src/bestdose/types.rs +++ b/src/bestdose/types.rs @@ -1,18 +1,16 @@ //! Core data types for the BestDose algorithm //! //! This module defines the main structures used throughout the BestDose optimization: -//! - [`BestDosePosterior`]: Two-stage API entry point — compute posterior, then optimize +//! - [`BestDosePosterior`]: Reusable posterior from stage 1 +//! - [`BestDoseProblem`]: The complete optimization problem specification //! - [`BestDoseResult`]: Output structure containing optimal doses and predictions //! - [`Target`]: Enum specifying concentration or AUC targets //! - [`DoseRange`]: Dose constraint specification use std::fmt::Display; +use crate::estimation::nonparametric::{NPPredictions, Prior, Theta, Weights}; use crate::prelude::*; -use crate::routines::output::predictions::NPPredictions; -use crate::routines::settings::Settings; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; use pharmsol::prelude::*; use serde::{Deserialize, Serialize}; @@ -51,7 +49,8 @@ use serde::{Deserialize, Serialize}; /// - Formula: `AUC(t) = ∫ₜ_last_dose^t C(τ) dτ` /// - Automatically finds the most recent bolus/infusion before each observation /// -/// Both methods use trapezoidal rule on a dense time grid controlled by `settings.predictions().idelta`. +/// Both methods use trapezoidal rule on a dense time grid controlled by +/// `BestDoseConfig::prediction_interval()`. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum Target { /// Target concentrations at observation times @@ -85,9 +84,10 @@ pub enum Target { /// /// # Time Grid Resolution /// - /// Control the time grid density via settings: + /// Control the time grid density via BestDoseConfig: /// ```rust,ignore - /// settings.predictions().idelta = 15; // 15-minute intervals + /// let config = BestDoseConfig::new(parameter_space, error_models) + /// .with_prediction_interval(15.0); /// ``` AUCFromZero, @@ -183,131 +183,239 @@ impl Default for DoseRange { } } -/// The computed Bayesian posterior for a patient -/// -/// This is the main public entry point for the two-stage BestDose API: -/// -/// 1. **Stage 1: Posterior computation** ([`BestDosePosterior::compute()`]) -/// - NPAGFULL11: Bayesian filtering of prior support points -/// - NPAGFULL: Local refinement of each filtered point -/// -/// 2. **Stage 2: Dose optimization** ([`BestDosePosterior::optimize()`]) -/// - Dual optimization (posterior vs uniform weights) -/// - Final predictions with optimal doses -/// -/// The posterior can be reused across multiple `optimize()` calls with -/// different targets, dose ranges, or bias weights. -/// -/// # Example -/// -/// ```rust,no_run,ignore -/// use pmcore::bestdose::{BestDosePosterior, Target, DoseRange}; -/// -/// # fn example(population_theta: pmcore::structs::theta::Theta, -/// # population_weights: pmcore::structs::weights::Weights, -/// # past: pharmsol::prelude::Subject, -/// # target: pharmsol::prelude::Subject, -/// # eq: pharmsol::prelude::ODE, -/// # settings: pmcore::routines::settings::Settings) -/// # -> anyhow::Result<()> { -/// // Stage 1: Compute posterior (expensive, done once) -/// let posterior = BestDosePosterior::compute( -/// &population_theta, -/// &population_weights, -/// Some(past), -/// eq, -/// settings, -/// )?; +#[derive(Debug, Clone)] +pub struct BestDoseConfig { + pub(crate) parameter_space: ParameterSpace, + pub(crate) error_models: AssayErrorModels, + pub(crate) prior: Prior, + pub(crate) refinement_cycles: usize, + pub(crate) progress: bool, + pub(crate) prediction_interval: f64, +} + +impl BestDoseConfig { + pub fn new(parameter_space: ParameterSpace, error_models: AssayErrorModels) -> Self { + Self { + parameter_space, + error_models, + prior: Prior::default(), + refinement_cycles: 500, + progress: true, + prediction_interval: 0.12, + } + } + + pub fn with_prior(mut self, prior: Prior) -> Self { + self.prior = prior; + self + } + + pub fn with_refinement_cycles(mut self, refinement_cycles: usize) -> Self { + self.refinement_cycles = refinement_cycles; + self + } + + pub fn with_progress(mut self, progress: bool) -> Self { + self.progress = progress; + self + } + + pub fn with_prediction_interval(mut self, prediction_interval: f64) -> Self { + self.prediction_interval = prediction_interval; + self + } + + pub fn parameter_space(&self) -> &ParameterSpace { + &self.parameter_space + } + + pub fn error_models(&self) -> &AssayErrorModels { + &self.error_models + } + + pub fn prior(&self) -> &Prior { + &self.prior + } + + pub fn refinement_cycles(&self) -> usize { + self.refinement_cycles + } + + pub fn progress(&self) -> bool { + self.progress + } + + pub fn prediction_interval(&self) -> f64 { + self.prediction_interval + } + + pub(crate) fn parameter_names(&self) -> Vec { + self.parameter_space + .iter() + .map(|parameter| parameter.name.clone()) + .collect() + } +} + +/// The computed Bayesian posterior for a patient. /// -/// // Stage 2: Optimize doses (can be called multiple times) -/// let result = posterior.optimize( -/// target, -/// None, // No time offset -/// DoseRange::new(0.0, 1000.0), -/// 0.5, // bias_weight -/// Target::Concentration, -/// )?; -/// # Ok(()) -/// # } -/// ``` +/// This reusable object is the public two-stage BestDose entry point: +/// first compute the posterior once, then optimize multiple future targets. #[derive(Debug, Clone)] pub struct BestDosePosterior { - /// Refined posterior support points (from NPAGFULL11 + NPAGFULL) pub(crate) theta: Theta, - /// Posterior probability weights pub(crate) posterior: Weights, - /// Filtered population weights (used for bias term in cost function) pub(crate) population_weights: Weights, - /// Past patient data (stored for use in optimize() with time_offset) pub(crate) past_data: Option, - /// PK/PD model pub(crate) eq: ODE, - /// Settings (used for prediction grid, error models, etc.) - pub(crate) settings: Settings, + pub(crate) config: BestDoseConfig, } impl BestDosePosterior { - /// Get the refined posterior support points (Θ) pub fn theta(&self) -> &Theta { &self.theta } - /// Get the posterior probability weights pub fn posterior_weights(&self) -> &Weights { &self.posterior } - /// Get the filtered population weights used for the bias term pub fn population_weights(&self) -> &Weights { &self.population_weights } - /// Get the number of support points in the posterior pub fn n_support_points(&self) -> usize { self.theta.matrix().nrows() } } -/// Internal optimization problem (not exposed in public API) +/// The BestDose optimization problem +/// +/// Contains all data needed for the three-stage BestDose algorithm. +/// Create via [`BestDoseProblem::new()`], then call [`.optimize()`](BestDoseProblem::optimize) +/// to run the full algorithm. +/// +/// # Three-Stage Algorithm /// -/// Contains all data needed for dose optimization. -/// Created internally by [`BestDosePosterior::optimize()`]. +/// 1. **Posterior Density Calculation** (automatic in `new()`) +/// - NPAGFULL11: Bayesian filtering of prior support points +/// - NPAGFULL: Local refinement of each filtered point +/// +/// 2. **Dual Optimization** (automatic in `optimize()`) +/// - Optimization with posterior weights (patient-specific) +/// - Optimization with uniform weights (population-based) +/// - Selection of better result +/// +/// 3. **Final Predictions** (automatic in `optimize()`) +/// - Concentration or AUC predictions with optimal doses +/// +/// # Fields +/// +/// ## Input Data +/// - `target`: Future dosing template with target observations +/// - `target_type`: [`Target::Concentration`] or [`Target::AUC`] +/// +/// ## Population Prior +/// - `population_weights`: Filtered population probability weights (used for bias term) +/// +/// ## Patient-Specific Posterior +/// - `theta`: Refined posterior support points (from NPAGFULL11 + NPAGFULL) +/// - `posterior`: Posterior probability weights +/// +/// ## Model Components +/// - `eq`: Pharmacokinetic/pharmacodynamic ODE model +/// - `config`: BestDose nonparametric configuration (used for prediction grid) +/// +/// ## Optimization Parameters +/// - `doserange`: Min/max dose constraints +/// - `bias_weight` (λ): Personalization parameter (0=personalized, 1=population) +/// +/// # Example +/// +/// ```rust,no_run,ignore +/// use pmcore::bestdose::{BestDoseProblem, Target, DoseRange}; +/// +/// # fn example(population_theta: pmcore::estimation::nonparametric::Theta, +/// # population_weights: pmcore::estimation::nonparametric::Weights, +/// # past: pharmsol::prelude::Subject, +/// # target: pharmsol::prelude::Subject, +/// # eq: pharmsol::prelude::ODE, +/// # config: pmcore::bestdose::BestDoseConfig) +/// # -> anyhow::Result<()> { +/// let problem = BestDoseProblem::new( +/// &population_theta, +/// &population_weights, +/// Some(past), // Patient history +/// target, // Dosing template with targets +/// None, // time offset +/// eq, +/// DoseRange::new(0.0, 1000.0), +/// 0.5, // Balanced personalization +/// config, +/// Target::Concentration, +/// )?; +/// +/// let result = problem.optimize()?; +/// # Ok(()) +/// # } +/// ``` #[derive(Debug, Clone)] -pub(crate) struct BestDoseProblem { +pub struct BestDoseProblem { + /// Target subject with dosing template and target observations + /// + /// This [Subject] defines the targets for optimization, including + /// dose events (with amounts to be optimized) and observation events + /// (with desired target values). + /// + /// For a `Target::Concentration`, observation values are target concentrations. + /// For a `Target::AUC`, observation values are target cumulative AUC. + /// + /// Only doses with a value of `0.0` will be optimized; non-zero doses remain fixed. pub(crate) target: Subject, + /// Target type for optimization + /// + /// Specifies whether to optimize for concentrations or AUC values. pub(crate) target_type: Target, + + /// The population prior weights ([Weights]), representing the probability of each support point in the population. pub(crate) population_weights: Weights, + + // Patient-specific posterior (from NPAGFULL11 + NPAGFULL) pub(crate) theta: Theta, pub(crate) posterior: Weights, + + // Model and configuration pub(crate) eq: ODE, - pub(crate) settings: Settings, + pub(crate) config: BestDoseConfig, + + // Optimization parameters pub(crate) doserange: DoseRange, - pub(crate) bias_weight: f64, + pub(crate) bias_weight: f64, // λ: 0=personalized, 1=population } /// Result from BestDose optimization /// /// Contains the optimal doses and associated predictions from running -/// [`BestDosePosterior::optimize()`]. +/// [`BestDoseProblem::optimize()`]. /// /// # Fields /// -/// - `doses`: Optimal dose amount(s) in the same order as doses in target subject +/// - `dose`: Optimal dose amount(s) in the same order as doses in target subject /// - `objf`: Final cost function value at optimal doses -/// - `status`: Optimization status (converged or max iterations) -/// - `predictions`: Concentration-time predictions using optimal doses -/// - `auc_predictions`: AUC values at observation times (only for AUC targets) -/// - `optimization_method`: Which method won: `Posterior` or `Uniform` +/// - `status`: Optimization status message (e.g., "converged", "max iterations") +/// - `preds`: Concentration-time predictions using optimal doses +/// - `auc_predictions`: AUC values at observation times (only for [`Target::AUC`]) +/// - `optimization_method`: Which method won: `"posterior"` or `"uniform"` /// /// # Interpretation /// /// ## Optimization Method /// -/// - **`Posterior`**: Patient-specific optimization won (uses posterior weights) +/// - **"posterior"**: Patient-specific optimization won (uses posterior weights) /// - Indicates patient differs from population or has sufficient history /// - Doses are highly personalized /// -/// - **`Uniform`**: Population-based optimization won (uses uniform weights) +/// - **"uniform"**: Population-based optimization won (uses uniform weights) /// - Indicates patient is population-typical or has limited history /// - Doses are more conservative/robust /// @@ -323,26 +431,32 @@ pub(crate) struct BestDoseProblem { /// ## Extracting Results /// /// ```rust,no_run,ignore -/// # use pmcore::bestdose::{BestDosePosterior, Target, DoseRange, BestDoseResult}; -/// # fn example(posterior: BestDosePosterior, -/// # target: pharmsol::prelude::Subject) -> anyhow::Result<()> { -/// let result = posterior.optimize( -/// target, None, DoseRange::new(0.0, 1000.0), 0.5, Target::Concentration, -/// )?; +/// # use pmcore::bestdose::BestDoseProblem; +/// # fn example(problem: BestDoseProblem) -> anyhow::Result<()> { +/// let result = problem.optimize()?; /// /// // Single dose -/// println!("Optimal dose: {} mg", result.doses()[0]); +/// println!("Optimal dose: {} mg", result.dose[0]); /// /// // Multiple doses -/// for (i, dose) in result.doses().iter().enumerate() { +/// for (i, &dose) in result.dose.iter().enumerate() { /// println!("Dose {}: {} mg", i + 1, dose); /// } /// /// // Check which method was used -/// println!("Method: {}", result.optimization_method()); +/// match result.optimization_method.as_str() { +/// "posterior" => println!("Patient-specific optimization"), +/// "uniform" => println!("Population-based optimization"), +/// _ => {} +/// } +/// +/// // Access predictions +/// for pred in result.preds.iter() { +/// println!("t={:.1}h: {:.2} mg/L", pred.time(), pred.prediction()); +/// } /// /// // For AUC targets -/// if let Some(auc_values) = result.auc_predictions() { +/// if let Some(auc_values) = result.auc_predictions { /// for (time, auc) in auc_values { /// println!("AUC at t={:.1}h: {:.1} mg·h/L", time, auc); /// } diff --git a/src/compile/caches.rs b/src/compile/caches.rs new file mode 100644 index 000000000..1ceeb2961 --- /dev/null +++ b/src/compile/caches.rs @@ -0,0 +1,6 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct ExecutionCaches { + pub prediction_cache_enabled: bool, +} diff --git a/src/compile/compiled_problem.rs b/src/compile/compiled_problem.rs new file mode 100644 index 000000000..caf486bc1 --- /dev/null +++ b/src/compile/compiled_problem.rs @@ -0,0 +1,58 @@ +use pharmsol::equation::Equation; + +use crate::api::{EstimationMethod, OutputPlan, RuntimeOptions}; +use crate::compile::{DesignContext, ExecutionCaches, ObservationIndex}; +use crate::model::ModelDefinition; +use pharmsol::Data; + +#[derive(Debug, Clone)] +pub struct CompiledProblem { + pub model: ModelDefinition, + pub data: Data, + method: EstimationMethod, + output: OutputPlan, + runtime: RuntimeOptions, + pub design: DesignContext, + pub observation_index: ObservationIndex, + pub caches: ExecutionCaches, +} + +impl CompiledProblem { + pub fn new( + model: ModelDefinition, + data: Data, + method: EstimationMethod, + output: OutputPlan, + runtime: RuntimeOptions, + design: DesignContext, + observation_index: ObservationIndex, + caches: ExecutionCaches, + ) -> Self { + Self { + model, + data, + method, + output, + runtime, + design, + observation_index, + caches, + } + } + + pub fn method(&self) -> EstimationMethod { + self.method + } + + pub fn output_plan(&self) -> &OutputPlan { + &self.output + } + + pub fn runtime_options(&self) -> &RuntimeOptions { + &self.runtime + } + + pub fn into_parts(self) -> (ModelDefinition, Data) { + (self.model, self.data) + } +} diff --git a/src/compile/design_context.rs b/src/compile/design_context.rs new file mode 100644 index 000000000..9927e1b7f --- /dev/null +++ b/src/compile/design_context.rs @@ -0,0 +1,59 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct DesignContext { + pub parameter_names: Vec, + pub subjects: Vec, + pub occasions: Vec, + pub structured_covariates: StructuredCovariateDesign, +} + +impl DesignContext { + pub fn subject_count(&self) -> usize { + self.subjects.len() + } + + pub fn occasion_count(&self) -> usize { + self.occasions.len() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct SubjectDesign { + pub subject_index: usize, + pub id: String, + pub occasion_count: usize, + pub observation_count: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct OccasionDesign { + pub subject_index: usize, + pub occasion_index: usize, + pub event_count: usize, + pub observation_count: usize, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)] +pub struct StructuredCovariateDesign { + pub subject_columns: Vec, + pub subject_rows: Vec, + pub occasion_columns: Vec, + pub occasion_rows: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SubjectCovariateRow { + pub subject_index: usize, + pub id: String, + pub anchor_time: f64, + pub values: Vec>, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct OccasionCovariateRow { + pub subject_index: usize, + pub occasion_index: usize, + pub anchor_time: f64, + pub values: Vec>, +} diff --git a/src/compile/mod.rs b/src/compile/mod.rs new file mode 100644 index 000000000..267c9ed0a --- /dev/null +++ b/src/compile/mod.rs @@ -0,0 +1,242 @@ +use anyhow::Result; +use pharmsol::{Data, Equation, Event}; + +use crate::api::EstimationProblem; +use crate::model::{CovariateSpec, ParameterSpace}; + +mod caches; +mod compiled_problem; +mod design_context; +mod observation_index; +mod validation; + +pub use caches::ExecutionCaches; +pub use compiled_problem::CompiledProblem; +pub use design_context::{ + DesignContext, OccasionCovariateRow, OccasionDesign, StructuredCovariateDesign, + SubjectCovariateRow, SubjectDesign, +}; +pub use observation_index::{ObservationIndex, ObservationRecord}; +pub use validation::validate_problem; + +impl EstimationProblem { + pub fn compile(self) -> Result> { + compile_problem(self) + } + + pub fn initialize_logs(&self) -> Result<()> { + crate::output::logging::setup_log_with_options(&self.output, &self.runtime.logging) + } +} + +pub fn compile_problem( + problem: EstimationProblem, +) -> Result> { + validate_problem(&problem)?; + + let design = build_design_context( + &problem.model.parameters, + &problem.model.covariates, + &problem.data, + ); + let observation_index = build_observation_index(&problem.data); + let caches = ExecutionCaches { + prediction_cache_enabled: problem.runtime.cache, + }; + + Ok(CompiledProblem::new( + problem.model, + problem.data, + problem.method, + problem.output, + problem.runtime, + design, + observation_index, + caches, + )) +} + +fn build_design_context( + parameter_space: &ParameterSpace, + covariates: &CovariateSpec, + data: &Data, +) -> DesignContext { + let subjects = data.subjects(); + + let subject_design = subjects + .iter() + .enumerate() + .map(|(subject_index, subject)| { + let occasions = subject.occasions(); + let observation_count = occasions + .iter() + .map(|occasion| { + occasion + .events() + .iter() + .filter(|event| matches!(event, Event::Observation(_))) + .count() + }) + .sum(); + + SubjectDesign { + subject_index, + id: subject.id().clone(), + occasion_count: occasions.len(), + observation_count, + } + }) + .collect::>(); + + let occasion_design = subjects + .iter() + .enumerate() + .flat_map(|(subject_index, subject)| { + subject.occasions().into_iter().map(move |occasion| { + let events = occasion.events(); + let observation_count = events + .iter() + .filter(|event| matches!(event, Event::Observation(_))) + .count(); + + OccasionDesign { + subject_index, + occasion_index: occasion.index(), + event_count: events.len(), + observation_count, + } + }) + }) + .collect::>(); + + let structured_covariates = match covariates { + CovariateSpec::InEquation => StructuredCovariateDesign::default(), + CovariateSpec::Structured(spec) => build_structured_covariate_design( + &spec.subject_columns(), + &spec.occasion_columns(), + data, + ), + }; + + DesignContext { + parameter_names: parameter_space + .iter() + .map(|item| item.name.clone()) + .collect(), + subjects: subject_design, + occasions: occasion_design, + structured_covariates, + } +} + +fn build_structured_covariate_design( + subject_columns: &[String], + occasion_columns: &[String], + data: &Data, +) -> StructuredCovariateDesign { + let subject_rows = data + .subjects() + .iter() + .enumerate() + .map(|(subject_index, subject)| { + let anchor_time = subject_anchor_time(subject); + let values = subject_columns + .iter() + .map(|name| subject_covariate_value(subject, name)) + .collect(); + + SubjectCovariateRow { + subject_index, + id: subject.id().clone(), + anchor_time, + values, + } + }) + .collect(); + + let occasion_rows = data + .subjects() + .iter() + .enumerate() + .flat_map(|(subject_index, subject)| { + subject.occasions().into_iter().map(move |occasion| { + let anchor_time = occasion_anchor_time(occasion); + let values = occasion_columns + .iter() + .map(|name| { + occasion + .covariates() + .get_covariate(name) + .and_then(|covariate| covariate.interpolate(anchor_time).ok()) + }) + .collect(); + + OccasionCovariateRow { + subject_index, + occasion_index: occasion.index(), + anchor_time, + values, + } + }) + }) + .collect(); + + StructuredCovariateDesign { + subject_columns: subject_columns.to_vec(), + subject_rows, + occasion_columns: occasion_columns.to_vec(), + occasion_rows, + } +} + +fn subject_anchor_time(subject: &pharmsol::Subject) -> f64 { + subject + .occasions() + .into_iter() + .find_map(|occasion| occasion.events().first().map(|event| event.time())) + .unwrap_or(0.0) +} + +fn subject_covariate_value(subject: &pharmsol::Subject, name: &str) -> Option { + subject.occasions().into_iter().find_map(|occasion| { + let anchor_time = occasion_anchor_time(occasion); + occasion + .covariates() + .get_covariate(name) + .and_then(|covariate| covariate.interpolate(anchor_time).ok()) + }) +} + +fn occasion_anchor_time(occasion: &pharmsol::Occasion) -> f64 { + occasion + .events() + .first() + .map(|event| event.time()) + .unwrap_or(0.0) +} + +fn build_observation_index(data: &Data) -> ObservationIndex { + let records = + data.subjects() + .iter() + .enumerate() + .flat_map(|(subject_index, subject)| { + subject.occasions().into_iter().flat_map(move |occasion| { + occasion.events().into_iter().enumerate().filter_map( + move |(event_index, event)| match event { + Event::Observation(observation) => Some(ObservationRecord { + subject_index, + occasion_index: occasion.index(), + event_index, + outeq: observation.outeq(), + time: observation.time(), + }), + _ => None, + }, + ) + }) + }) + .collect::>(); + + ObservationIndex { records } +} diff --git a/src/compile/observation_index.rs b/src/compile/observation_index.rs new file mode 100644 index 000000000..7cd989de9 --- /dev/null +++ b/src/compile/observation_index.rs @@ -0,0 +1,25 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)] +pub struct ObservationIndex { + pub records: Vec, +} + +impl ObservationIndex { + pub fn len(&self) -> usize { + self.records.len() + } + + pub fn is_empty(&self) -> bool { + self.records.is_empty() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ObservationRecord { + pub subject_index: usize, + pub occasion_index: usize, + pub event_index: usize, + pub outeq: usize, + pub time: f64, +} diff --git a/src/compile/validation.rs b/src/compile/validation.rs new file mode 100644 index 000000000..9ded6f62c --- /dev/null +++ b/src/compile/validation.rs @@ -0,0 +1,22 @@ +use anyhow::{bail, Result}; +use pharmsol::equation::Equation; + +use crate::api::EstimationProblem; + +pub fn validate_problem(problem: &EstimationProblem) -> Result<()> { + if problem.model.parameters.is_empty() { + bail!("estimation problem requires at least one parameter"); + } + + if problem.runtime.cycles == 0 { + bail!("runtime cycles must be greater than zero"); + } + + if problem.model.observations.channels.is_empty() { + bail!("at least one observation channel is required"); + } + + problem.model.parameters.finite_ranges()?; + + Ok(()) +} diff --git a/src/estimation/mod.rs b/src/estimation/mod.rs new file mode 100644 index 000000000..d97ede1fc --- /dev/null +++ b/src/estimation/mod.rs @@ -0,0 +1 @@ +pub mod nonparametric; diff --git a/src/routines/output/cycles.rs b/src/estimation/nonparametric/cycles.rs similarity index 83% rename from src/routines/output/cycles.rs rename to src/estimation/nonparametric/cycles.rs index 24af587e0..6a60e4fd6 100644 --- a/src/routines/output/cycles.rs +++ b/src/estimation/nonparametric/cycles.rs @@ -5,20 +5,11 @@ use serde::Serialize; use crate::{ algorithms::{Status, StopReason}, - prelude::Settings, - routines::output::{median, OutputFile}, - structs::theta::Theta, + estimation::nonparametric::median, + estimation::nonparametric::theta::Theta, + output::OutputFile, }; -/// An [NPCycle] object contains the summary of a cycle -/// It holds the following information: -/// - `cycle`: The cycle number -/// - `objf`: The objective function value -/// - `gamlam`: The assay noise parameter, either gamma or lambda -/// - `theta`: The support points and their associated probabilities -/// - `nspp`: The number of support points -/// - `delta_objf`: The change in objective function value from last cycle -/// - `converged`: Whether the algorithm has reached convergence #[derive(Debug, Clone, Serialize)] pub struct NPCycle { cycle: usize, @@ -86,7 +77,6 @@ impl NPCycle { } } -/// This holdes a vector of [NPCycle] objects to provide a more detailed log #[derive(Debug, Clone, Serialize)] pub struct CycleLog { cycles: Vec, @@ -105,14 +95,13 @@ impl CycleLog { self.cycles.push(cycle); } - pub fn write(&self, settings: &Settings) -> Result<()> { + pub fn write(&self, folder: &str, parameter_names: &[String]) -> Result<()> { tracing::debug!("Writing cycles..."); - let outputfile = OutputFile::new(&settings.output().path, "cycles.csv")?; + let outputfile = OutputFile::new(folder, "iterations.csv")?; let mut writer = WriterBuilder::new() .has_headers(false) - .from_writer(&outputfile.file); + .from_writer(outputfile.file()); - // Write headers writer.write_field("cycle")?; writer.write_field("converged")?; writer.write_field("status")?; @@ -135,8 +124,7 @@ impl CycleLog { )?; } - let parameter_names = settings.parameters().names(); - for param_name in ¶meter_names { + for param_name in parameter_names { writer.write_field(format!("{}.mean", param_name))?; writer.write_field(format!("{}.median", param_name))?; writer.write_field(format!("{}.sd", param_name))?; @@ -156,7 +144,6 @@ impl CycleLog { .write_field(format!("{}", cycle.theta.nspp())) .unwrap(); - // Write the error models cycle.error_models.iter().try_for_each( |(_, errmod): (usize, &AssayErrorModel)| -> Result<()> { match errmod { diff --git a/src/estimation/nonparametric/engine.rs b/src/estimation/nonparametric/engine.rs new file mode 100644 index 000000000..a81b3e563 --- /dev/null +++ b/src/estimation/nonparametric/engine.rs @@ -0,0 +1,31 @@ +use anyhow::Result; +use pharmsol::Equation; + +use crate::algorithms::{run_nonparametric_algorithm, NonparametricAlgorithmInput}; +use crate::api::EstimationMethod; +use crate::compile::CompiledProblem; +use crate::estimation::nonparametric::workspace::NonparametricWorkspace; +use crate::results::FitResult; + +#[derive(Debug, Default, Clone, Copy)] +pub struct NonparametricEngine; + +impl NonparametricEngine { + pub fn fit( + problem: CompiledProblem, + ) -> Result> { + let EstimationMethod::Nonparametric(method) = problem.method(); + let output = problem.output_plan().clone(); + let runtime = problem.runtime_options().clone(); + let (model, data) = problem.into_parts(); + let input = NonparametricAlgorithmInput::new(method, model, data, output, runtime); + run_nonparametric_algorithm(input) + } +} + +pub fn fit( + problem: CompiledProblem, +) -> Result> { + let workspace = NonparametricEngine::fit(problem)?; + Ok(workspace.into_fit_result()) +} diff --git a/src/estimation/nonparametric/expansion.rs b/src/estimation/nonparametric/expansion.rs new file mode 100644 index 000000000..900e9ca13 --- /dev/null +++ b/src/estimation/nonparametric/expansion.rs @@ -0,0 +1,63 @@ +use crate::estimation::nonparametric::Theta; +use anyhow::Result; +use faer::Row; + +/// Implements the adaptive grid algorithm for support point expansion. +pub fn adaptative_grid( + theta: &mut Theta, + eps: f64, + ranges: &[(f64, f64)], + min_dist: f64, +) -> Result<()> { + let mut candidates = Vec::new(); + + for spp in theta.matrix().row_iter() { + for (j, val) in spp.iter().enumerate() { + let l = eps * (ranges[j].1 - ranges[j].0); + if val + l < ranges[j].1 { + let mut plus = Row::zeros(spp.ncols()); + plus[j] = l; + plus += spp; + candidates.push(plus.iter().copied().collect::>()); + } + if val - l > ranges[j].0 { + let mut minus = Row::zeros(spp.ncols()); + minus[j] = -l; + minus += spp; + candidates.push(minus.iter().copied().collect::>()); + } + } + } + + let keep = candidates + .iter() + .filter(|point| theta.check_point(point, min_dist)) + .cloned() + .collect::>(); + + for point in keep { + theta.add_point(point.as_slice())?; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::{ParameterSpace, ParameterSpec}; + use faer::mat; + + #[test] + fn adaptive_grid_expands_points_within_bounds() { + let parameters = ParameterSpace::new() + .add(ParameterSpec::bounded("x", 0.0, 1.0)) + .add(ParameterSpec::bounded("y", 0.0, 1.0)); + let mut theta = Theta::from_parts(mat![[0.5, 0.5]], parameters).unwrap(); + let ranges = [(0.0, 1.0), (0.0, 1.0)]; + + adaptative_grid(&mut theta, 0.1, &ranges, 0.05).unwrap(); + + assert_eq!(theta.matrix().nrows(), 5); + } +} diff --git a/src/estimation/nonparametric/ipm.rs b/src/estimation/nonparametric/ipm.rs new file mode 100644 index 000000000..719fe1b55 --- /dev/null +++ b/src/estimation/nonparametric/ipm.rs @@ -0,0 +1,242 @@ +use crate::estimation::nonparametric::{Psi, Weights}; +use anyhow::bail; +use faer::linalg::triangular_solve::solve_lower_triangular_in_place; +use faer::linalg::triangular_solve::solve_upper_triangular_in_place; +use faer::{Col, Mat, Row}; +use rayon::prelude::*; + +/// Applies Burke's Interior Point Method (IPM) to solve a convex optimization problem. +pub fn burke(psi: &Psi) -> anyhow::Result<(Weights, f64)> { + let mut psi = psi.matrix().to_owned(); + + psi.row_iter_mut().try_for_each(|row| { + row.iter_mut().try_for_each(|x| { + if !x.is_finite() { + bail!("Input matrix must have finite entries") + } else { + *x = x.abs(); + Ok(()) + } + }) + })?; + + let (n_sub, n_point) = psi.shape(); + let ecol: Col = Col::from_fn(n_point, |_| 1.0); + let erow: Row = Row::from_fn(n_sub, |_| 1.0); + let mut plam: Col = &psi * &ecol; + let eps: f64 = 1e-8; + let mut sig: f64 = 0.0; + let mut lam = ecol.clone(); + let mut w: Col = Col::from_fn(plam.nrows(), |i| 1.0 / plam.get(i)); + let mut ptw: Col = psi.transpose() * &w; + + let ptw_max = ptw.iter().fold(f64::NEG_INFINITY, |acc, &x| x.max(acc)); + let shrink = 2.0 * ptw_max; + lam *= shrink; + plam *= shrink; + w /= shrink; + ptw /= shrink; + + let mut y: Col = &ecol - &ptw; + let mut r: Col = Col::from_fn(n_sub, |i| erow.get(i) - w.get(i) * plam.get(i)); + let mut norm_r: f64 = r.iter().fold(0.0, |max, &val| max.max(val.abs())); + let sum_log_plam: f64 = plam.iter().map(|x| x.ln()).sum(); + let sum_log_w: f64 = w.iter().map(|x| x.ln()).sum(); + let mut gap: f64 = (sum_log_w + sum_log_plam).abs() / (1.0 + sum_log_plam); + let mut mu = lam.transpose() * &y / n_point as f64; + + let mut psi_inner: Mat = Mat::zeros(psi.nrows(), psi.ncols()); + let n_threads = faer::get_global_parallelism().degree(); + let rows = psi.nrows(); + let mut output: Vec> = (0..n_threads).map(|_| Mat::zeros(rows, rows)).collect(); + let mut h: Mat = Mat::zeros(rows, rows); + + while mu > eps || norm_r > eps || gap > eps { + let smu = sig * mu; + let inner = Col::from_fn(lam.nrows(), |i| lam.get(i) / y.get(i)); + let w_plam = Col::from_fn(plam.nrows(), |i| plam.get(i) / w.get(i)); + + if psi.ncols() > n_threads * 128 { + psi_inner + .par_col_partition_mut(n_threads) + .zip(psi.par_col_partition(n_threads)) + .zip(inner.par_partition(n_threads)) + .zip(output.par_iter_mut()) + .for_each(|(((mut psi_inner, psi), inner), output)| { + psi_inner + .as_mut() + .col_iter_mut() + .zip(psi.col_iter()) + .zip(inner.iter()) + .for_each(|((col, psi_col), inner_val)| { + col.iter_mut().zip(psi_col.iter()).for_each(|(x, psi_val)| { + *x = psi_val * inner_val; + }); + }); + faer::linalg::matmul::triangular::matmul( + output.as_mut(), + faer::linalg::matmul::triangular::BlockStructure::TriangularLower, + faer::Accum::Replace, + &psi_inner, + faer::linalg::matmul::triangular::BlockStructure::Rectangular, + psi.transpose(), + faer::linalg::matmul::triangular::BlockStructure::Rectangular, + 1.0, + faer::Par::Seq, + ); + }); + + let mut first_iter = true; + for output in &output { + if first_iter { + h.copy_from(output); + first_iter = false; + } else { + h += output; + } + } + } else { + psi_inner + .as_mut() + .col_iter_mut() + .zip(psi.col_iter()) + .zip(inner.iter()) + .for_each(|((col, psi_col), inner_val)| { + col.iter_mut().zip(psi_col.iter()).for_each(|(x, psi_val)| { + *x = psi_val * inner_val; + }); + }); + faer::linalg::matmul::triangular::matmul( + h.as_mut(), + faer::linalg::matmul::triangular::BlockStructure::TriangularLower, + faer::Accum::Replace, + &psi_inner, + faer::linalg::matmul::triangular::BlockStructure::Rectangular, + psi.transpose(), + faer::linalg::matmul::triangular::BlockStructure::Rectangular, + 1.0, + faer::Par::Seq, + ); + } + + for i in 0..h.nrows() { + h[(i, i)] += w_plam[i]; + } + + let uph = match h.llt(faer::Side::Lower) { + Ok(llt) => llt, + Err(_) => { + bail!("Error during Cholesky decomposition. The matrix might not be positive definite. This is usually due to model misspecification or numerical issues.") + } + }; + let uph = uph.L().transpose().to_owned(); + + let smuyinv: Col = Col::from_fn(ecol.nrows(), |i| smu * (ecol[i] / y[i])); + let psi_dot_muyinv: Col = &psi * &smuyinv; + let rhsdw: Row = Row::from_fn(erow.ncols(), |i| erow[i] / w[i] - psi_dot_muyinv[i]); + let mut dw = Mat::from_fn(rhsdw.ncols(), 1, |i, _j| *rhsdw.get(i)); + + solve_lower_triangular_in_place(uph.transpose().as_ref(), dw.as_mut(), faer::Par::rayon(0)); + solve_upper_triangular_in_place(uph.as_ref(), dw.as_mut(), faer::Par::rayon(0)); + + let dw = dw.col(0); + let dy = -(psi.transpose() * dw); + let inner_times_dy = Col::from_fn(ecol.nrows(), |i| inner[i] * dy[i]); + let dlam: Row = + Row::from_fn(ecol.nrows(), |i| smuyinv[i] - lam[i] - inner_times_dy[i]); + + let ratio_dlam_lam = Row::from_fn(lam.nrows(), |i| dlam[i] / lam[i]); + let min_ratio_dlam = ratio_dlam_lam.iter().cloned().fold(f64::INFINITY, f64::min); + let mut alfpri: f64 = -1.0 / min_ratio_dlam.min(-0.5); + alfpri = (0.99995 * alfpri).min(1.0); + + let ratio_dy_y = Row::from_fn(y.nrows(), |i| dy[i] / y[i]); + let min_ratio_dy = ratio_dy_y.iter().cloned().fold(f64::INFINITY, f64::min); + let ratio_dw_w = Row::from_fn(dw.nrows(), |i| dw[i] / w[i]); + let min_ratio_dw = ratio_dw_w.iter().cloned().fold(f64::INFINITY, f64::min); + let mut alfdual = -1.0 / min_ratio_dy.min(-0.5); + alfdual = alfdual.min(-1.0 / min_ratio_dw.min(-0.5)); + alfdual = (0.99995 * alfdual).min(1.0); + + lam += alfpri * dlam.transpose(); + w += alfdual * dw; + y += alfdual * &dy; + + mu = lam.transpose() * &y / n_point as f64; + plam = &psi * &lam; + r = Col::from_fn(n_sub, |i| erow.get(i) - w.get(i) * plam.get(i)); + ptw -= alfdual * dy; + + norm_r = r.norm_max(); + let sum_log_plam: f64 = plam.iter().map(|x| x.ln()).sum(); + let sum_log_w: f64 = w.iter().map(|x| x.ln()).sum(); + gap = (sum_log_w + sum_log_plam).abs() / (1.0 + sum_log_plam); + + if mu < eps && norm_r > eps { + sig = 1.0; + } else { + let candidate1 = (1.0 - alfpri).powi(2); + let candidate2 = (1.0 - alfdual).powi(2); + let candidate3 = (norm_r - mu) / (norm_r + 100.0 * mu); + sig = candidate1.max(candidate2).max(candidate3).min(0.3); + } + } + + lam /= n_sub as f64; + let obj = (psi * &lam).iter().map(|x| x.ln()).sum(); + let lam_sum: f64 = lam.iter().sum(); + lam = &lam / lam_sum; + + Ok((lam.into(), obj)) +} + +#[cfg(test)] +mod tests { + use super::*; + use approx::assert_relative_eq; + use faer::Mat; + + #[test] + fn test_burke_identity() { + let n = 100; + let mat = Mat::identity(n, n); + let psi = Psi::from(mat); + let (lam, _) = burke(&psi).unwrap(); + + let expected = 1.0 / n as f64; + for i in 0..n { + assert_relative_eq!(lam[i], expected, epsilon = 1e-10); + } + assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); + } + + #[test] + fn test_burke_uniform_square() { + let n_sub = 10; + let n_point = 10; + let mat = Mat::from_fn(n_sub, n_point, |_, _| 1.0); + let psi = Psi::from(mat); + let (lam, _) = burke(&psi).unwrap(); + + assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); + let expected = 1.0 / n_point as f64; + for i in 0..n_point { + assert_relative_eq!(lam[i], expected, epsilon = 1e-10); + } + } + + #[test] + fn test_burke_with_non_finite_values() { + let n_sub = 10; + let n_point = 10; + let mat = Mat::from_fn(n_sub, n_point, |i, j| { + if i == 0 && j == 0 { + f64::NAN + } else { + 1.0 + } + }); + let psi = Psi::from(mat); + assert!(burke(&psi).is_err()); + } +} diff --git a/src/estimation/nonparametric/mod.rs b/src/estimation/nonparametric/mod.rs new file mode 100644 index 000000000..03e1d7c22 --- /dev/null +++ b/src/estimation/nonparametric/mod.rs @@ -0,0 +1,30 @@ +mod cycles; +mod engine; +mod expansion; +pub(crate) mod ipm; +mod posterior; +mod predictions; +mod prior; +mod psi; +pub(crate) mod qr; +mod statistics; +mod summaries; +mod theta; +mod weights; +mod workspace; + +pub use cycles::{CycleLog, NPCycle}; +pub use engine::{fit, NonparametricEngine}; +pub(crate) use expansion::adaptative_grid; +pub use ipm::burke; +pub use posterior::{posterior, Posterior}; +pub use predictions::{NPPredictionRow, NPPredictions}; +pub(crate) use prior::sample_space_for_parameters; +pub use prior::{read_prior, Prior}; +pub(crate) use psi::calculate_psi; +pub use psi::Psi; +pub use statistics::{median, population_mean_median, posterior_mean_median, weighted_median}; +pub use summaries::{fit_summary, individual_summaries, population_summary}; +pub use theta::Theta; +pub use weights::Weights; +pub use workspace::NonparametricWorkspace; diff --git a/src/routines/output/posterior.rs b/src/estimation/nonparametric/posterior.rs similarity index 68% rename from src/routines/output/posterior.rs rename to src/estimation/nonparametric/posterior.rs index 008ce16c1..d9ff19a18 100644 --- a/src/routines/output/posterior.rs +++ b/src/estimation/nonparametric/posterior.rs @@ -2,31 +2,18 @@ pub use anyhow::{bail, Result}; use faer::Mat; use serde::{Deserialize, Serialize}; -use crate::structs::{psi::Psi, weights::Weights}; +use crate::estimation::nonparametric::{psi::Psi, weights::Weights}; -/// Posterior probabilities for each support points #[derive(Debug, Clone)] pub struct Posterior { mat: Mat, } impl Posterior { - /// Create a new Posterior from a matrix fn new(mat: Mat) -> Self { Posterior { mat } } - /// Calculate the posterior probabilities for each support point given the weights - /// - /// The shape is the same as [Psi], and thus subjects are the rows and support points are the columns. - /// /// # Errors - /// Returns an error if the number of rows in `psi` does not match the number of weights in `w`. - /// # Arguments - /// * `psi` - The Psi object containing the matrix of support points. - /// * `w` - The weights for each support point. - /// # Returns - /// A Result containing the Posterior probabilities if successful, or an error if the - /// dimensions do not match. pub fn calculate(psi: &Psi, w: &Weights) -> Result { if psi.matrix().ncols() != w.weights().nrows() { bail!( @@ -46,17 +33,13 @@ impl Posterior { Ok(posterior.into()) } - /// Get a reference to the underlying matrix pub fn matrix(&self) -> &Mat { &self.mat } - /// Write the posterior probabilities to a CSV file - /// Each row represents a subject, each column represents a support point pub fn to_csv(&self, writer: W) -> Result<()> { let mut csv_writer = csv::Writer::from_writer(writer); - // Write each row for i in 0..self.mat.nrows() { let row: Vec = (0..self.mat.ncols()).map(|j| *self.mat.get(i, j)).collect(); csv_writer.serialize(row)?; @@ -66,8 +49,6 @@ impl Posterior { Ok(()) } - /// Read posterior probabilities from a CSV file - /// Each row represents a subject, each column represents a support point pub fn from_csv(reader: R) -> Result { let mut csv_reader = csv::Reader::from_reader(reader); let mut rows: Vec> = Vec::new(); @@ -84,21 +65,18 @@ impl Posterior { let nrows = rows.len(); let ncols = rows[0].len(); - // Verify all rows have the same length for (i, row) in rows.iter().enumerate() { if row.len() != ncols { bail!("Row {} has {} columns, expected {}", i, row.len(), ncols); } } - // Create matrix from rows let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); Ok(Posterior::new(mat)) } } -/// Convert a matrix to a [Posterior] impl From> for Posterior { fn from(mat: Mat) -> Self { Posterior::new(mat) @@ -114,7 +92,6 @@ impl Serialize for Posterior { let mut seq = serializer.serialize_seq(Some(self.mat.nrows()))?; - // Serialize each row as a vector for i in 0..self.mat.nrows() { let row: Vec = (0..self.mat.ncols()).map(|j| *self.mat.get(i, j)).collect(); seq.serialize_element(&row)?; @@ -158,7 +135,6 @@ impl<'de> Deserialize<'de> for Posterior { let nrows = rows.len(); let ncols = rows[0].len(); - // Verify all rows have the same length for (i, row) in rows.iter().enumerate() { if row.len() != ncols { return Err(serde::de::Error::custom(format!( @@ -170,7 +146,6 @@ impl<'de> Deserialize<'de> for Posterior { } } - // Create matrix from rows let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); Ok(Posterior::new(mat)) @@ -181,24 +156,6 @@ impl<'de> Deserialize<'de> for Posterior { } } -/// Calculates the posterior probabilities for each support point given the weights -/// -/// The shape is the same as [Psi], and thus subjects are the rows and support points are the columns. pub fn posterior(psi: &Psi, w: &Weights) -> Result { - if psi.matrix().ncols() != w.len() { - bail!( - "Number of rows in psi ({}) and number of weights ({}) do not match.", - psi.matrix().nrows(), - w.len() - ); - } - - let psi_matrix = psi.matrix(); - let py = psi_matrix * w.weights(); - - let posterior = Mat::from_fn(psi_matrix.nrows(), psi_matrix.ncols(), |i, j| { - psi_matrix.get(i, j) * w.weights().get(j) / py.get(i) - }); - - Ok(posterior.into()) + Posterior::calculate(psi, w) } diff --git a/src/routines/output/predictions.rs b/src/estimation/nonparametric/predictions.rs similarity index 63% rename from src/routines/output/predictions.rs rename to src/estimation/nonparametric/predictions.rs index 64f78e41b..4cbd9d011 100644 --- a/src/routines/output/predictions.rs +++ b/src/estimation/nonparametric/predictions.rs @@ -3,36 +3,21 @@ use pharmsol::{prelude::simulator::Prediction, Censor, Data, Predictions as Pred use serde::{Deserialize, Serialize}; use crate::{ - routines::output::{posterior::Posterior, weighted_median}, - structs::{theta::Theta, weights::Weights}, + estimation::nonparametric::{theta::Theta, weights::Weights}, + estimation::nonparametric::{weighted_median, Posterior}, }; -/// Container for the multiple model estimated predictions -/// -/// Each row contains the predictions for a single time point for a single subject -/// It includes the population and posterior mean and median predictions -/// These are defined by the mean and median of the prediction for each model, weighted by the population or posterior weights #[derive(Debug, Clone, Serialize, Deserialize)] pub struct NPPredictionRow { - /// The subject ID id: String, - /// The time of the prediction time: f64, - /// The output equation number outeq: usize, - /// The occasion of the prediction block: usize, - /// The observed value, if any obs: Option, - /// Censored observation flag cens: Censor, - /// The population mean prediction pop_mean: f64, - /// The population median prediction pop_median: f64, - /// The posterior mean prediction post_mean: f64, - /// The posterior median prediction post_median: f64, } @@ -97,28 +82,14 @@ impl NPPredictions { } } - /// Add a [NPPredictionRow] to the predictions pub fn add(&mut self, row: NPPredictionRow) { self.predictions.push(row); } - /// Get a reference to the predictions pub fn predictions(&self) -> &[NPPredictionRow] { &self.predictions } - /// Calculate the population and posterior predictions - /// - /// # Arguments - /// * `equation` - The equation to use for simulation - /// * `data` - The data to use for simulation - /// * `theta` - The theta values for the simulation - /// * `w` - The weights for the simulation - /// * `posterior` - The posterior probabilities for the simulation - /// * `idelta` - The delta for the simulation - /// * `tad` - The time after dose for the simulation - /// # Returns - /// A Result containing the NPPredictions or an error pub fn calculate( equation: &impl pharmsol::prelude::simulator::Equation, data: &Data, @@ -128,10 +99,8 @@ impl NPPredictions { idelta: f64, tad: f64, ) -> Result { - // Create a new NPPredictions instance let mut container = NPPredictions::new(); - // Expand data let data = data.clone().expand(idelta, tad); let subjects = data.subjects(); @@ -139,19 +108,10 @@ impl NPPredictions { bail!("Number of subjects and number of posterior means do not match"); }; - // Iterate over each subject and then each support point - for subject in subjects.iter().enumerate() { - let (subject_index, subject) = subject; - - // Container for predictions for this subject - // This will hold predictions for each support point - // The outer vector is for each support point - // The inner vector is for the vector of predictions for that support point + for (subject_index, subject) in subjects.iter().enumerate() { let mut predictions: Vec> = Vec::new(); - // And each support points for spp in theta.matrix().row_iter() { - // Simulate the subject with the current support point let spp_values = spp.iter().cloned().collect::>(); let pred = equation .simulate_subject(subject, &spp_values, None)? @@ -161,20 +121,16 @@ impl NPPredictions { } if predictions.is_empty() { - continue; // Skip this subject if no predictions are available + continue; } - // Calculate population mean using let mut pop_mean: Vec = vec![0.0; predictions.first().unwrap().len()]; - for outer_pred in predictions.iter().enumerate() { - let (i, outer_pred) = outer_pred; - for inner_pred in outer_pred.iter().enumerate() { - let (j, pred) = inner_pred; + for (i, outer_pred) in predictions.iter().enumerate() { + for (j, pred) in outer_pred.iter().enumerate() { pop_mean[j] += pred.prediction() * w[i]; } } - // Calculate population median let mut pop_median: Vec = Vec::new(); for j in 0..predictions.first().unwrap().len() { let mut values: Vec = Vec::new(); @@ -189,17 +145,13 @@ impl NPPredictions { pop_median.push(median_val); } - // Calculate posterior mean let mut posterior_mean: Vec = vec![0.0; predictions.first().unwrap().len()]; - for outer_pred in predictions.iter().enumerate() { - let (i, outer_pred) = outer_pred; - for inner_pred in outer_pred.iter().enumerate() { - let (j, pred) = inner_pred; + for (i, outer_pred) in predictions.iter().enumerate() { + for (j, pred) in outer_pred.iter().enumerate() { posterior_mean[j] += pred.prediction() * posterior.matrix()[(subject_index, i)]; } } - // Calculate posterior median let mut posterior_median: Vec = Vec::new(); for j in 0..predictions.first().unwrap().len() { let mut values: Vec = Vec::new(); @@ -214,8 +166,6 @@ impl NPPredictions { posterior_median.push(median_val); } - // Iterate over the aggregated predictions (one row per timepoint per subject) - // Use the first support point predictions to get time, outeq, block, and obs info if let Some(first_spp_preds) = predictions.first() { for (j, p) in first_spp_preds.iter().enumerate() { let row = NPPredictionRow { diff --git a/src/estimation/nonparametric/prior.rs b/src/estimation/nonparametric/prior.rs new file mode 100644 index 000000000..dbd4dc2eb --- /dev/null +++ b/src/estimation/nonparametric/prior.rs @@ -0,0 +1,274 @@ +use std::fs::File; + +use crate::estimation::nonparametric::{Theta, Weights}; +use crate::model::{ParameterDomain, ParameterSpace}; +use anyhow::{bail, Context, Result}; +use faer::Mat; +use serde::{Deserialize, Serialize}; + +pub mod latin; +pub mod sobol; + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub enum Prior { + Sobol(usize, usize), + Latin(usize, usize), + File(String), + #[serde(skip)] + Theta(Theta), +} + +impl Prior { + pub fn sobol(points: usize, seed: usize) -> Prior { + Prior::Sobol(points, seed) + } + + pub fn points(&self) -> Option { + match self { + Prior::Sobol(points, _) => Some(*points), + Prior::Latin(points, _) => Some(*points), + Prior::File(_) => None, + Prior::Theta(theta) => Some(theta.nspp()), + } + } + + pub fn seed(&self) -> Option { + match self { + Prior::Sobol(_, seed) => Some(*seed), + Prior::Latin(_, seed) => Some(*seed), + Prior::File(_) => None, + Prior::Theta(_) => None, + } + } +} + +impl Default for Prior { + fn default() -> Self { + Prior::Sobol(2028, 22) + } +} + +pub fn read_prior( + path: impl AsRef, + parameters: impl Into, +) -> Result<(Theta, Option)> { + let path = path.as_ref().to_string(); + parse_prior_for_parameters(&path, parameters) +} + +pub(crate) fn sample_space_for_parameters( + parameters: impl Into, + prior: &Prior, +) -> Result { + let parameter_space = parameters.into(); + + for parameter in parameter_space.iter() { + let (lower, upper) = match parameter.domain { + ParameterDomain::Bounded { lower, upper } => (lower, upper), + ParameterDomain::Positive { + lower: Some(lower), + upper: Some(upper), + } + | ParameterDomain::Unbounded { + lower: Some(lower), + upper: Some(upper), + } => (lower, upper), + _ => bail!( + "Parameter '{}' is missing finite bounds required for nonparametric initialization", + parameter.name + ), + }; + + if lower.is_infinite() || upper.is_infinite() { + bail!( + "Parameter '{}' has infinite bounds: [{}, {}]", + parameter.name, + lower, + upper + ); + } + + if lower >= upper { + bail!( + "Parameter '{}' has invalid bounds: [{}, {}]. Lower bound must be less than upper bound.", + parameter.name, + lower, + upper + ); + } + } + + let prior = match prior { + Prior::Sobol(points, seed) => sobol::generate(¶meter_space, *points, *seed)?, + Prior::Latin(points, seed) => latin::generate(¶meter_space, *points, *seed)?, + Prior::File(path) => parse_prior_for_parameters(path, ¶meter_space)?.0, + Prior::Theta(theta) => return Ok(theta.clone()), + }; + Ok(prior) +} + +pub(crate) fn parse_prior_for_parameters( + path: &String, + parameters: impl Into, +) -> Result<(Theta, Option)> { + let parameters = parameters.into(); + tracing::info!("Reading prior from {}", path); + let file = File::open(path).context(format!("Unable to open the prior file '{}'", path))?; + let mut reader = csv::ReaderBuilder::new() + .has_headers(true) + .from_reader(file); + + let mut parameter_names: Vec = reader + .headers()? + .clone() + .into_iter() + .map(|s| s.trim().to_owned()) + .collect(); + + let prob_index = parameter_names.iter().position(|name| name == "prob"); + if let Some(index) = prob_index { + parameter_names.remove(index); + } + + let random_names: Vec = parameters.names(); + + let mut reordered_indices: Vec = Vec::new(); + for random_name in &random_names { + match parameter_names.iter().position(|name| name == random_name) { + Some(index) => { + let adjusted_index = if let Some(prob_idx) = prob_index { + if index >= prob_idx { + index + 1 + } else { + index + } + } else { + index + }; + reordered_indices.push(adjusted_index); + } + None => bail!("Parameter {} is not present in the CSV file.", random_name), + } + } + + if parameter_names.len() > random_names.len() { + let extra_parameters: Vec<&String> = parameter_names.iter().collect(); + bail!( + "Found parameters in the prior not present in configuration: {:?}", + extra_parameters + ); + } + + let mut theta_values = Vec::new(); + let mut prob_values = Vec::new(); + + for result in reader.records() { + let record = result.unwrap(); + let values: Vec = reordered_indices + .iter() + .map(|&i| record[i].parse::().unwrap()) + .collect(); + theta_values.push(values); + + if let Some(prob_idx) = prob_index { + let prob_value: f64 = record[prob_idx].parse::().unwrap(); + prob_values.push(prob_value); + } + } + + let n_points = theta_values.len(); + let n_params = random_names.len(); + let theta_values: Vec = theta_values.into_iter().flatten().collect(); + let theta_matrix: Mat = + Mat::from_fn(n_points, n_params, |i, j| theta_values[i * n_params + j]); + + let theta = Theta::from_parts(theta_matrix, parameters.clone())?; + let weights = if !prob_values.is_empty() { + Some(Weights::from_vec(prob_values)) + } else { + None + }; + + Ok((theta, weights)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::{ParameterSpace, ParameterSpec}; + use std::fs; + + fn parameter_space() -> ParameterSpace { + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 5.0, 50.0)) + } + + fn temp_csv_path() -> String { + format!("test_temp_prior_{}.csv", rand::random::()) + } + + #[test] + fn prior_metadata_accessors() { + let sobol = Prior::sobol(100, 42); + assert_eq!(sobol.points(), Some(100)); + assert_eq!(sobol.seed(), Some(42)); + + let latin = Prior::Latin(50, 7); + assert_eq!(latin.points(), Some(50)); + assert_eq!(latin.seed(), Some(7)); + + let file = Prior::File("prior.csv".to_string()); + assert_eq!(file.points(), None); + assert_eq!(file.seed(), None); + } + + #[test] + fn sample_space_generates_expected_shape() { + let theta = sample_space_for_parameters(parameter_space(), &Prior::sobol(10, 42)).unwrap(); + assert_eq!(theta.nspp(), 10); + assert_eq!(theta.matrix().ncols(), 2); + } + + #[test] + fn sample_space_returns_custom_theta_verbatim() { + let parameters = parameter_space(); + let matrix = Mat::from_fn(3, 2, |i, j| (i + j) as f64); + let custom = Theta::from_parts(matrix, parameters).unwrap(); + + let theta = + sample_space_for_parameters(parameter_space(), &Prior::Theta(custom.clone())).unwrap(); + assert_eq!(theta.matrix(), custom.matrix()); + } + + #[test] + fn read_prior_parses_weights_and_reorders_columns() { + let path = temp_csv_path(); + fs::write(&path, "v,ke,prob\n10.0,0.5,0.3\n15.0,0.7,0.7\n").unwrap(); + + let (theta, weights) = read_prior(&path, parameter_space()).unwrap(); + let _ = fs::remove_file(&path); + + assert_eq!(theta.nspp(), 2); + assert_eq!(theta.matrix()[(0, 0)], 0.5); + assert_eq!(theta.matrix()[(0, 1)], 10.0); + + let weights = weights.expect("weights should be parsed from prob column"); + assert_eq!(weights.len(), 2); + assert_eq!(weights[0], 0.3); + assert_eq!(weights[1], 0.7); + } + + #[test] + fn read_prior_rejects_extra_parameters() { + let path = temp_csv_path(); + fs::write(&path, "ke,v,extra\n0.5,10.0,1.0\n").unwrap(); + + let err = read_prior(&path, parameter_space()).unwrap_err(); + let _ = fs::remove_file(&path); + + assert!(err + .to_string() + .contains("Found parameters in the prior not present in configuration")); + } +} diff --git a/src/estimation/nonparametric/prior/latin.rs b/src/estimation/nonparametric/prior/latin.rs new file mode 100644 index 000000000..a9e139403 --- /dev/null +++ b/src/estimation/nonparametric/prior/latin.rs @@ -0,0 +1,52 @@ +use anyhow::Result; +use faer::Mat; +use rand::prelude::*; +use rand::rngs::StdRng; + +use crate::estimation::nonparametric::Theta; +use crate::model::ParameterSpace; + +pub fn generate( + parameters: impl Into, + points: usize, + seed: usize, +) -> Result { + let parameters = parameters.into(); + let ranges = parameters.finite_ranges()?; + let mut rng = StdRng::seed_from_u64(seed as u64); + + let mut intervals = Vec::new(); + for _ in 0..ranges.len() { + let mut param_intervals: Vec = (0..points).map(|i| i as f64).collect(); + param_intervals.shuffle(&mut rng); + intervals.push(param_intervals); + } + + let rand_matrix = Mat::from_fn(points, ranges.len(), |i, j| { + let interval = intervals[j][i]; + let random_offset = rng.random::(); + let unscaled = (interval + random_offset) / points as f64; + let (lower, upper) = ranges[j]; + lower + unscaled * (upper - lower) + }); + + Theta::from_parts(rand_matrix, parameters.clone()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::{ParameterSpace, ParameterSpec}; + + #[test] + fn latin_generate_produces_requested_shape() { + let params = ParameterSpace::new() + .add(ParameterSpec::bounded("a", 0.0, 1.0)) + .add(ParameterSpec::bounded("b", 0.0, 1.0)) + .add(ParameterSpec::bounded("c", 0.0, 1.0)); + + let theta = generate(¶ms, 10, 22).unwrap(); + assert_eq!(theta.nspp(), 10); + assert_eq!(theta.matrix().ncols(), 3); + } +} diff --git a/src/estimation/nonparametric/prior/sobol.rs b/src/estimation/nonparametric/prior/sobol.rs new file mode 100644 index 000000000..9703de60f --- /dev/null +++ b/src/estimation/nonparametric/prior/sobol.rs @@ -0,0 +1,42 @@ +use anyhow::Result; +use faer::Mat; +use sobol_burley::sample; + +use crate::estimation::nonparametric::Theta; +use crate::model::ParameterSpace; + +pub fn generate( + parameters: impl Into, + points: usize, + seed: usize, +) -> Result { + let parameters = parameters.into(); + let seed = seed as u32; + let ranges = parameters.finite_ranges()?; + + let rand_matrix = Mat::from_fn(points, ranges.len(), |i, j| { + let unscaled = sample((i).try_into().unwrap(), j.try_into().unwrap(), seed) as f64; + let (lower, upper) = ranges[j]; + lower + unscaled * (upper - lower) + }); + + Theta::from_parts(rand_matrix, parameters.clone()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::{ParameterSpace, ParameterSpec}; + + #[test] + fn sobol_generate_produces_requested_shape() { + let params = ParameterSpace::new() + .add(ParameterSpec::bounded("a", 0.0, 1.0)) + .add(ParameterSpec::bounded("b", 0.0, 1.0)) + .add(ParameterSpec::bounded("c", 0.0, 1.0)); + + let theta = generate(¶ms, 10, 22).unwrap(); + assert_eq!(theta.nspp(), 10); + assert_eq!(theta.matrix().ncols(), 3); + } +} diff --git a/src/structs/psi.rs b/src/estimation/nonparametric/psi.rs similarity index 79% rename from src/structs/psi.rs rename to src/estimation/nonparametric/psi.rs index e50dfd0c7..82b8a7d37 100644 --- a/src/structs/psi.rs +++ b/src/estimation/nonparametric/psi.rs @@ -33,7 +33,11 @@ impl Psi { self.matrix.ncols() } - /// Modify the [Psi::matrix] to only include the columns specified by `indices` + pub fn to_ndarray(&self) -> Array2 { + let m = &self.matrix; + Array2::from_shape_fn((m.nrows(), m.ncols()), |(i, j)| m[(i, j)]) + } + pub(crate) fn filter_column_indices(&mut self, indices: &[usize]) { let matrix = self.matrix.to_owned(); @@ -44,7 +48,6 @@ impl Psi { self.matrix = new; } - /// Write the matrix to a CSV file pub fn write(&self, path: &str) { let mut writer = csv::Writer::from_path(path).unwrap(); for row in self.matrix.row_iter() { @@ -54,12 +57,9 @@ impl Psi { } } - /// Write the psi matrix to a CSV writer - /// Each row represents a subject, each column represents a support point pub fn to_csv(&self, writer: W) -> Result<()> { let mut csv_writer = csv::Writer::from_writer(writer); - // Write each row for i in 0..self.matrix.nrows() { let row: Vec = (0..self.matrix.ncols()) .map(|j| *self.matrix.get(i, j)) @@ -71,8 +71,6 @@ impl Psi { Ok(()) } - /// Read psi matrix from a CSV reader - /// Each row represents a subject, each column represents a support point pub fn from_csv(reader: R) -> Result { let mut csv_reader = csv::Reader::from_reader(reader); let mut rows: Vec> = Vec::new(); @@ -89,14 +87,12 @@ impl Psi { let nrows = rows.len(); let ncols = rows[0].len(); - // Verify all rows have the same length for (i, row) in rows.iter().enumerate() { if row.len() != ncols { bail!("Row {} has {} columns, expected {}", i, row.len(), ncols); } } - // Create matrix from rows let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); Ok(Psi { matrix: mat }) @@ -138,7 +134,6 @@ impl Serialize for Psi { let mut seq = serializer.serialize_seq(Some(self.matrix.nrows()))?; - // Serialize each row as a vector for i in 0..self.matrix.nrows() { let row: Vec = (0..self.matrix.ncols()) .map(|j| *self.matrix.get(i, j)) @@ -184,7 +179,6 @@ impl<'de> Deserialize<'de> for Psi { let nrows = rows.len(); let ncols = rows[0].len(); - // Verify all rows have the same length for (i, row) in rows.iter().enumerate() { if row.len() != ncols { return Err(serde::de::Error::custom(format!( @@ -196,7 +190,6 @@ impl<'de> Deserialize<'de> for Psi { } } - // Create matrix from rows let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); Ok(Psi { matrix: mat }) @@ -230,16 +223,13 @@ mod tests { #[test] fn test_from_array2() { - // Create a test 2x3 array let array = Array2::from_shape_vec((2, 3), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).unwrap(); let psi = Psi::from(array.clone()); - // Check dimensions assert_eq!(psi.nspp(), 2); assert_eq!(psi.nsub(), 3); - // Check values using faer matrix directly let m = psi.matrix(); for i in 0..2 { for j in 0..3 { @@ -250,17 +240,14 @@ mod tests { #[test] fn test_from_array2_ref() { - // Create a test 3x2 array let array = Array2::from_shape_vec((3, 2), vec![10.0, 20.0, 30.0, 40.0, 50.0, 60.0]).unwrap(); let psi = Psi::from(&array); - // Check dimensions assert_eq!(psi.nspp(), 3); assert_eq!(psi.nsub(), 2); - // Check values using faer matrix directly let m = psi.matrix(); for i in 0..3 { for j in 0..2 { @@ -271,7 +258,6 @@ mod tests { #[test] fn test_nspp() { - // Test with a 4x2 matrix let array = Array2::from_shape_vec((4, 2), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]).unwrap(); let psi = Psi::from(array); @@ -281,14 +267,12 @@ mod tests { #[test] fn test_nspp_empty() { - // Test with empty matrix let psi = Psi::new(); assert_eq!(psi.nspp(), 0); } #[test] fn test_nspp_single_row() { - // Test with 1x3 matrix let array = Array2::from_shape_vec((1, 3), vec![1.0, 2.0, 3.0]).unwrap(); let psi = Psi::from(array); @@ -297,7 +281,6 @@ mod tests { #[test] fn test_nsub() { - // Test with a 2x5 matrix let array = Array2::from_shape_vec( (2, 5), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], @@ -310,40 +293,7 @@ mod tests { #[test] fn test_nsub_empty() { - // Test with empty matrix let psi = Psi::new(); assert_eq!(psi.nsub(), 0); } - - #[test] - fn test_nsub_single_column() { - // Test with 3x1 matrix - let array = Array2::from_shape_vec((3, 1), vec![1.0, 2.0, 3.0]).unwrap(); - let psi = Psi::from(array); - - assert_eq!(psi.nsub(), 1); - } - - #[test] - fn test_from_implementations_consistency() { - // Test that both From implementations produce the same result - let array = Array2::from_shape_vec((2, 3), vec![1.5, 2.5, 3.5, 4.5, 5.5, 6.5]).unwrap(); - - let psi_from_owned = Psi::from(array.clone()); - let psi_from_ref = Psi::from(&array); - - // Both should have the same dimensions - assert_eq!(psi_from_owned.nspp(), psi_from_ref.nspp()); - assert_eq!(psi_from_owned.nsub(), psi_from_ref.nsub()); - - // And the same values - let owned_m = psi_from_owned.matrix(); - let ref_m = psi_from_ref.matrix(); - - for i in 0..2 { - for j in 0..3 { - assert_eq!(owned_m[(i, j)], ref_m[(i, j)]); - } - } - } } diff --git a/src/estimation/nonparametric/qr.rs b/src/estimation/nonparametric/qr.rs new file mode 100644 index 000000000..6338d80d1 --- /dev/null +++ b/src/estimation/nonparametric/qr.rs @@ -0,0 +1,38 @@ +use crate::estimation::nonparametric::Psi; +use anyhow::{bail, Result}; +use faer::linalg::solvers::ColPivQr; +use faer::Mat; + +/// Perform a QR decomposition on the Psi matrix. +pub fn qrd(psi: &Psi) -> Result<(Mat, Vec)> { + let mut mat = psi.matrix().to_owned(); + + for (index, row) in mat.row_iter_mut().enumerate() { + let row_sum: f64 = row.as_ref().iter().sum(); + if row_sum.abs() == 0.0 { + bail!("In psi, the row with index {} sums to zero", index); + } + row.iter_mut().for_each(|x| *x /= row_sum); + } + + let qr: ColPivQr = mat.col_piv_qr(); + let r_mat: faer::Mat = qr.R().to_owned(); + let perm = qr.P().arrays().0.to_vec(); + Ok((r_mat, perm)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_identity() { + let mat: Mat = Mat::identity(10, 10); + let psi = Psi::from(mat); + let (r_mat, perm) = qrd(&psi).unwrap(); + + let expected_r_mat: Mat = Mat::identity(10, 10); + assert_eq!(r_mat, expected_r_mat); + assert_eq!(perm, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + } +} diff --git a/src/estimation/nonparametric/statistics.rs b/src/estimation/nonparametric/statistics.rs new file mode 100644 index 000000000..8c85e801d --- /dev/null +++ b/src/estimation/nonparametric/statistics.rs @@ -0,0 +1,238 @@ +use anyhow::{bail, Result}; +use ndarray::{Array, Array1, Array2, Axis}; + +pub fn median(data: &[f64]) -> f64 { + let mut data: Vec = data.to_vec(); + data.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + let size = data.len(); + match size { + even if even % 2 == 0 => { + let fst = data.get(even / 2 - 1).unwrap(); + let snd = data.get(even / 2).unwrap(); + (fst + snd) / 2.0 + } + odd => *data.get(odd / 2_usize).unwrap(), + } +} + +pub fn weighted_median(data: &[f64], weights: &[f64]) -> f64 { + assert_eq!( + data.len(), + weights.len(), + "The length of data and weights must be the same" + ); + assert!( + weights.iter().all(|&x| x >= 0.0), + "Weights must be non-negative, weights: {:?}", + weights + ); + + let mut weighted_data: Vec<(f64, f64)> = data + .iter() + .zip(weights.iter()) + .map(|(&d, &w)| (d, w)) + .collect(); + + weighted_data.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); + + let total_weight: f64 = weights.iter().sum(); + let mut cumulative_sum = 0.0; + + for (i, &(_, weight)) in weighted_data.iter().enumerate() { + cumulative_sum += weight; + + if cumulative_sum == total_weight / 2.0 { + if i + 1 < weighted_data.len() { + return (weighted_data[i].0 + weighted_data[i + 1].0) / 2.0; + } else { + return weighted_data[i].0; + } + } else if cumulative_sum > total_weight / 2.0 { + return weighted_data[i].0; + } + } + + unreachable!("The function should have returned a value before reaching this point."); +} + +pub fn population_mean_median( + theta: &Array2, + w: &Array1, +) -> Result<(Array1, Array1)> { + let w = if w.is_empty() { + tracing::warn!("w.len() == 0, setting all weights to 1/n"); + Array1::from_elem(theta.nrows(), 1.0 / theta.nrows() as f64) + } else { + w.clone() + }; + + if theta.nrows() != w.len() { + bail!( + "Number of parameters and number of weights do not match. Theta: {}, w: {}", + theta.nrows(), + w.len() + ); + } + + let mut mean = Array1::zeros(theta.ncols()); + let mut median = Array1::zeros(theta.ncols()); + + for (i, (mn, mdn)) in mean.iter_mut().zip(&mut median).enumerate() { + let col = theta.column(i).to_owned() * w.to_owned(); + *mn = col.sum(); + + let ct = theta.column(i); + let mut params = vec![]; + let mut weights = vec![]; + for (ti, wi) in ct.iter().zip(w.clone()) { + params.push(*ti); + weights.push(wi); + } + + *mdn = weighted_median(¶ms, &weights); + } + + Ok((mean, median)) +} + +pub fn posterior_mean_median( + theta: &Array2, + psi: &Array2, + w: &Array1, +) -> Result<(Array2, Array2)> { + let mut mean = Array2::zeros((0, theta.ncols())); + let mut median = Array2::zeros((0, theta.ncols())); + + let w = if w.is_empty() { + tracing::warn!("w is empty, setting all weights to 1/n"); + Array1::from_elem(theta.nrows(), 1.0 / theta.nrows() as f64) + } else { + w.clone() + }; + + if theta.nrows() != w.len() || theta.nrows() != psi.ncols() || psi.ncols() != w.len() { + bail!("Number of parameters and number of weights do not match, theta.nrows(): {}, w.len(): {}, psi.ncols(): {}", theta.nrows(), w.len(), psi.ncols()); + } + + let mut psi_norm: Array2 = Array2::zeros((0, psi.ncols())); + for (i, row) in psi.axis_iter(Axis(0)).enumerate() { + let row_w = row.to_owned() * w.to_owned(); + let row_sum = row_w.sum(); + let row_norm = if row_sum == 0.0 { + tracing::warn!("Sum of row {} of psi is 0.0, setting that row to 1/n", i); + Array1::from_elem(psi.ncols(), 1.0 / psi.ncols() as f64) + } else { + &row_w / row_sum + }; + psi_norm.push_row(row_norm.view())?; + } + if psi_norm.iter().any(|&x| x.is_nan()) { + dbg!(&psi); + bail!("NaN values found in psi_norm"); + }; + + for probs in psi_norm.axis_iter(Axis(0)) { + let mut post_mean: Vec = Vec::new(); + let mut post_median: Vec = Vec::new(); + + for pars in theta.axis_iter(Axis(1)) { + let weighted_par = &probs * &pars; + let the_mean = weighted_par.sum(); + post_mean.push(the_mean); + + let median = weighted_median(&pars.to_vec(), &probs.to_vec()); + post_median.push(median); + } + + mean.push_row(Array::from(post_mean.clone()).view())?; + median.push_row(Array::from(post_median.clone()).view())?; + } + + Ok((mean, median)) +} + +#[cfg(test)] +mod tests { + use super::{median, weighted_median}; + + #[test] + fn test_median_odd() { + let data = vec![1.0, 3.0, 2.0]; + assert_eq!(median(&data), 2.0); + } + + #[test] + fn test_median_even() { + let data = vec![1.0, 2.0, 3.0, 4.0]; + assert_eq!(median(&data), 2.5); + } + + #[test] + fn test_median_single() { + let data = vec![42.0]; + assert_eq!(median(&data), 42.0); + } + + #[test] + fn test_median_sorted() { + let data = vec![5.0, 10.0, 15.0, 20.0, 25.0]; + assert_eq!(median(&data), 15.0); + } + + #[test] + fn test_median_unsorted() { + let data = vec![10.0, 30.0, 20.0, 50.0, 40.0]; + assert_eq!(median(&data), 30.0); + } + + #[test] + fn test_median_with_duplicates() { + let data = vec![1.0, 2.0, 2.0, 3.0, 4.0]; + assert_eq!(median(&data), 2.0); + } + + #[test] + fn test_weighted_median_simple() { + let data = vec![1.0, 2.0, 3.0]; + let weights = vec![0.2, 0.5, 0.3]; + assert_eq!(weighted_median(&data, &weights), 2.0); + } + + #[test] + fn test_weighted_median_even_weights() { + let data = vec![1.0, 2.0, 3.0, 4.0]; + let weights = vec![0.25, 0.25, 0.25, 0.25]; + assert_eq!(weighted_median(&data, &weights), 2.5); + } + + #[test] + fn test_weighted_median_single_element() { + let data = vec![42.0]; + let weights = vec![1.0]; + assert_eq!(weighted_median(&data, &weights), 42.0); + } + + #[test] + #[should_panic(expected = "The length of data and weights must be the same")] + fn test_weighted_median_mismatched_lengths() { + let data = vec![1.0, 2.0, 3.0]; + let weights = vec![0.1, 0.2]; + weighted_median(&data, &weights); + } + + #[test] + fn test_weighted_median_all_same_elements() { + let data = vec![5.0, 5.0, 5.0, 5.0]; + let weights = vec![0.1, 0.2, 0.3, 0.4]; + assert_eq!(weighted_median(&data, &weights), 5.0); + } + + #[test] + #[should_panic(expected = "Weights must be non-negative")] + fn test_weighted_median_negative_weights() { + let data = vec![1.0, 2.0, 3.0, 4.0]; + let weights = vec![0.2, -0.5, 0.5, 0.8]; + assert_eq!(weighted_median(&data, &weights), 4.0); + } +} diff --git a/src/estimation/nonparametric/summaries.rs b/src/estimation/nonparametric/summaries.rs new file mode 100644 index 000000000..e0d145af9 --- /dev/null +++ b/src/estimation/nonparametric/summaries.rs @@ -0,0 +1,101 @@ +use ndarray::{Array1, Array2}; +use pharmsol::{Data, Equation, Event}; + +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::estimation::nonparametric::{population_mean_median, posterior_mean_median}; +use crate::results::{FitSummary, IndividualSummary, ParameterSummary, PopulationSummary}; + +pub fn fit_summary(result: &NonparametricWorkspace) -> FitSummary { + FitSummary { + objective_function: result.objf(), + converged: result.converged(), + iterations: result.cycles(), + subject_count: result.data().subjects().len(), + observation_count: count_observations(result.data()), + parameter_count: result.get_theta().parameters().len(), + algorithm: format!("{:?}", result.algorithm()), + } +} + +pub fn population_summary(result: &NonparametricWorkspace) -> PopulationSummary { + let theta_matrix = to_ndarray_matrix(result.get_theta().matrix()); + let weights = Array1::from_iter(result.weights().iter()); + let (mean, median) = population_mean_median(&theta_matrix, &weights) + .expect("population summary should be derivable from theta and weights"); + + let parameters = result + .get_theta() + .parameters() + .names() + .into_iter() + .enumerate() + .map(|(index, name)| { + let column = theta_matrix.column(index).to_vec(); + let mean_value = mean[index]; + let sd = weighted_sd(&column, &weights, mean_value); + let cv_percent = if mean_value.abs() > f64::EPSILON { + (sd / mean_value.abs()) * 100.0 + } else { + 0.0 + }; + + ParameterSummary { + name, + mean: mean_value, + median: median[index], + sd, + cv_percent, + } + }) + .collect(); + + PopulationSummary { parameters } +} + +pub fn individual_summaries( + result: &NonparametricWorkspace, +) -> Vec { + let theta_matrix = to_ndarray_matrix(result.get_theta().matrix()); + let psi_matrix = to_ndarray_matrix(result.psi().matrix()); + let weights = Array1::from_iter(result.weights().iter()); + let (means, _) = posterior_mean_median(&theta_matrix, &psi_matrix, &weights) + .expect("individual summaries should be derivable from theta, psi, and weights"); + let parameter_names = result.get_theta().parameters().names(); + + result + .data() + .subjects() + .iter() + .enumerate() + .map(|(subject_index, subject)| IndividualSummary { + id: subject.id().clone(), + parameter_names: parameter_names.clone(), + estimates: means.row(subject_index).to_vec(), + standard_errors: None, + }) + .collect() +} + +fn count_observations(data: &Data) -> usize { + data.subjects() + .iter() + .flat_map(|subject| subject.occasions()) + .flat_map(|occasion| occasion.events()) + .filter(|event| matches!(event, Event::Observation(_))) + .count() +} + +fn to_ndarray_matrix(matrix: &faer::Mat) -> Array2 { + Array2::from_shape_fn((matrix.nrows(), matrix.ncols()), |(row, col)| { + matrix[(row, col)] + }) +} + +fn weighted_sd(values: &[f64], weights: &Array1, mean: f64) -> f64 { + let variance = values + .iter() + .zip(weights.iter()) + .map(|(value, weight)| weight * (value - mean).powi(2)) + .sum::(); + variance.sqrt() +} diff --git a/src/structs/theta.rs b/src/estimation/nonparametric/theta.rs similarity index 55% rename from src/structs/theta.rs rename to src/estimation/nonparametric/theta.rs index 37d9f65e7..2d5052bd3 100644 --- a/src/structs/theta.rs +++ b/src/estimation/nonparametric/theta.rs @@ -4,7 +4,8 @@ use anyhow::{bail, Result}; use faer::Mat; use serde::{Deserialize, Serialize}; -use crate::{prelude::Parameters, structs::weights::Weights}; +use super::weights::Weights; +use crate::model::ParameterSpace; /// [Theta] is a structure that holds the support points /// These represent the joint population parameter distribution @@ -13,14 +14,14 @@ use crate::{prelude::Parameters, structs::weights::Weights}; #[derive(Clone, PartialEq)] pub struct Theta { matrix: Mat, - parameters: Parameters, + parameters: ParameterSpace, } impl Default for Theta { fn default() -> Self { Theta { matrix: Mat::new(), - parameters: Parameters::new(), + parameters: ParameterSpace::new(), } } } @@ -30,13 +31,14 @@ impl Theta { Theta::default() } - /// Create a new [Theta] from a matrix and [Parameters] + /// Create a new [Theta] from a matrix and [ParameterSpace] /// /// It is important that the number of columns in the matrix matches the number of parameters - /// in the [Parameters] object + /// in the [ParameterSpace] /// - /// The order of parameters in the [Parameters] object should match the order of columns in the matrix - pub fn from_parts(matrix: Mat, parameters: Parameters) -> Result { + /// The order of parameters in the [ParameterSpace] should match the order of columns in the matrix + pub fn from_parts(matrix: Mat, parameters: impl Into) -> Result { + let parameters = parameters.into(); if matrix.ncols() != parameters.len() { bail!( "Number of columns in matrix ({}) does not match number of parameters ({})", @@ -60,13 +62,13 @@ impl Theta { &mut self.matrix } - /// Get the [Parameters] object associated with this [Theta] - pub fn parameters(&self) -> &Parameters { + /// Get the [ParameterSpace] associated with this [Theta] + pub fn parameters(&self) -> &ParameterSpace { &self.parameters } - /// Get a mutable reference to the [Parameters] object - pub fn parameters_mut(&mut self) -> &mut Parameters { + /// Get a mutable reference to the [ParameterSpace] + pub fn parameters_mut(&mut self) -> &mut ParameterSpace { &mut self.parameters } @@ -122,22 +124,24 @@ impl Theta { return true; } - let limits = self.parameters.ranges(); + let limits = self + .parameters + .finite_ranges() + .expect("theta requires finite parameter bounds"); for row_idx in 0..self.matrix.nrows() { let mut squared_dist = 0.0; for (i, val) in spp.iter().enumerate() { - // Normalized squared difference for this dimension let normalized_diff = (val - self.matrix.get(row_idx, i)) / (limits[i].1 - limits[i].0); squared_dist += normalized_diff * normalized_diff; } let dist = squared_dist.sqrt(); if dist <= min_dist { - return false; // This point is too close to an existing point + return false; } } - true // Point is sufficiently distant from all existing points + true } /// Write the matrix to a CSV file @@ -185,7 +189,6 @@ impl Theta { pub fn to_csv(&self, writer: W) -> Result<()> { let mut csv_writer = csv::Writer::from_writer(writer); - // Write each row for i in 0..self.matrix.nrows() { let row: Vec = (0..self.matrix.ncols()) .map(|j| *self.matrix.get(i, j)) @@ -216,18 +219,14 @@ impl Theta { let nrows = rows.len(); let ncols = rows[0].len(); - // Verify all rows have the same length for (i, row) in rows.iter().enumerate() { if row.len() != ncols { bail!("Row {} has {} columns, expected {}", i, row.len(), ncols); } } - // Create matrix from rows let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); - - // Create empty parameters - user will need to set these separately - let parameters = Parameters::new(); + let parameters = ParameterSpace::new(); Theta::from_parts(mat, parameters) } @@ -235,15 +234,12 @@ impl Theta { impl Debug for Theta { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // Write nspp and nsub writeln!(f, "\nTheta contains {} support points\n", self.nspp())?; - // Write the parameter names for name in self.parameters.names().iter() { write!(f, "\t{}", name)?; } writeln!(f)?; - // Write the matrix self.matrix.row_iter().enumerate().for_each(|(index, row)| { write!(f, "{}", index).unwrap(); for val in row.iter() { @@ -260,19 +256,20 @@ impl Serialize for Theta { where S: serde::Serializer, { - use serde::ser::SerializeSeq; - - let mut seq = serializer.serialize_seq(Some(self.matrix.nrows()))?; - - // Serialize each row as a vector - for i in 0..self.matrix.nrows() { - let row: Vec = (0..self.matrix.ncols()) - .map(|j| *self.matrix.get(i, j)) - .collect(); - seq.serialize_element(&row)?; - } + use serde::ser::SerializeStruct; + + let rows: Vec> = (0..self.matrix.nrows()) + .map(|i| { + (0..self.matrix.ncols()) + .map(|j| *self.matrix.get(i, j)) + .collect() + }) + .collect(); - seq.end() + let mut state = serializer.serialize_struct("Theta", 2)?; + state.serialize_field("matrix", &rows)?; + state.serialize_field("parameters", &self.parameters)?; + state.end() } } @@ -281,132 +278,35 @@ impl<'de> Deserialize<'de> for Theta { where D: serde::Deserializer<'de>, { - use serde::de::{SeqAccess, Visitor}; - use std::fmt; - - struct ThetaVisitor; + #[derive(Deserialize)] + struct ThetaSerde { + matrix: Vec>, + parameters: ParameterSpace, + } - impl<'de> Visitor<'de> for ThetaVisitor { - type Value = Theta; + let decoded = ThetaSerde::deserialize(deserializer)?; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a sequence of rows (vectors of f64)") - } + if decoded.matrix.is_empty() { + return Ok(Self { + matrix: Mat::new(), + parameters: decoded.parameters, + }); + } - fn visit_seq(self, mut seq: A) -> std::result::Result - where - A: SeqAccess<'de>, - { - let mut rows: Vec> = Vec::new(); - - while let Some(row) = seq.next_element::>()? { - rows.push(row); - } - - if rows.is_empty() { - return Err(serde::de::Error::custom("Empty matrix not allowed")); - } - - let nrows = rows.len(); - let ncols = rows[0].len(); - - // Verify all rows have the same length - for (i, row) in rows.iter().enumerate() { - if row.len() != ncols { - return Err(serde::de::Error::custom(format!( - "Row {} has {} columns, expected {}", - i, - row.len(), - ncols - ))); - } - } - - // Create matrix from rows - let mat = Mat::from_fn(nrows, ncols, |i, j| rows[i][j]); - - // Create empty parameters - user will need to set these separately - let parameters = Parameters::new(); - - Theta::from_parts(mat, parameters).map_err(serde::de::Error::custom) + let nrows = decoded.matrix.len(); + let ncols = decoded.matrix[0].len(); + for (index, row) in decoded.matrix.iter().enumerate() { + if row.len() != ncols { + return Err(serde::de::Error::custom(format!( + "Row {} has {} columns, expected {}", + index, + row.len(), + ncols + ))); } } - deserializer.deserialize_seq(ThetaVisitor) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use faer::mat; - - #[test] - fn test_filter_indices() { - // Create a 4x2 matrix with recognizable values - let matrix = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]; - - let parameters = Parameters::new().add("A", 0.0, 10.0).add("B", 0.0, 10.0); - - let mut theta = Theta::from_parts(matrix, parameters).unwrap(); - - theta.filter_indices(&[0, 3]); - - // Expected result is a 2x2 matrix with filtered rows - let expected = mat![[1.0, 2.0], [7.0, 8.0]]; - - assert_eq!(theta.matrix, expected); - } - - #[test] - fn test_add_point() { - let matrix = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]; - - let parameters = Parameters::new().add("A", 0.0, 10.0).add("B", 0.0, 10.0); - - let mut theta = Theta::from_parts(matrix, parameters).unwrap(); - - theta.add_point(&[7.0, 8.0]).unwrap(); - - let expected = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]; - - assert_eq!(theta.matrix, expected); - } - - #[test] - fn test_suggest_point() { - let matrix = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]; - let parameters = Parameters::new().add("A", 0.0, 10.0).add("B", 0.0, 10.0); - let mut theta = Theta::from_parts(matrix, parameters).unwrap(); - theta.suggest_point(&[7.0, 8.0], 0.2).unwrap(); - let expected = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]; - assert_eq!(theta.matrix, expected); - - // Suggest a point that is too close - theta.suggest_point(&[7.1, 8.1], 0.2).unwrap(); - // The point should not be added - assert_eq!(theta.matrix.nrows(), 4); - } - - #[test] - fn test_param_names() { - let matrix = mat![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]; - let parameters = Parameters::new().add("A", 0.0, 10.0).add("B", 0.0, 10.0); - - let theta = Theta::from_parts(matrix, parameters).unwrap(); - let names = theta.param_names(); - assert_eq!(names, vec!["A".to_string(), "B".to_string()]); - } - - #[test] - fn test_set_matrix() { - let matrix = mat![[1.0, 2.0], [3.0, 4.0]]; - let parameters = Parameters::new().add("A", 0.0, 10.0).add("B", 0.0, 10.0); - let mut theta = Theta::from_parts(matrix, parameters).unwrap(); - - let new_matrix = mat![[5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]; - theta.matrix_mut().clone_from(&new_matrix); - - assert_eq!(theta.matrix(), &new_matrix); + let matrix = Mat::from_fn(nrows, ncols, |i, j| decoded.matrix[i][j]); + Self::from_parts(matrix, decoded.parameters).map_err(serde::de::Error::custom) } } diff --git a/src/structs/weights.rs b/src/estimation/nonparametric/weights.rs similarity index 86% rename from src/structs/weights.rs rename to src/estimation/nonparametric/weights.rs index 483817d48..2ed06f75f 100644 --- a/src/structs/weights.rs +++ b/src/estimation/nonparametric/weights.rs @@ -24,15 +24,12 @@ impl Weights { Self { weights } } - /// Create a new [Weights] instance from a vector of weights. pub fn from_vec(weights: Vec) -> Self { Self { weights: Col::from_fn(weights.len(), |i| weights[i]), } } - /// Create a new [Weights] instance with uniform weights. - /// If `n` is 0, returns an empty [Weights] instance. pub fn uniform(n: usize) -> Self { if n == 0 { return Self::default(); @@ -43,27 +40,22 @@ impl Weights { } } - /// Get a reference to the weights. pub fn weights(&self) -> &Col { &self.weights } - /// Get a mutable reference to the weights. pub fn weights_mut(&mut self) -> &mut Col { &mut self.weights } - /// Get the number of weights. pub fn len(&self) -> usize { self.weights.nrows() } - // Check if there are no weights. pub fn is_empty(&self) -> bool { self.len() == 0 } - /// Get a vector representation of the weights. pub fn to_vec(&self) -> Vec { self.weights.iter().cloned().collect() } diff --git a/src/estimation/nonparametric/workspace.rs b/src/estimation/nonparametric/workspace.rs new file mode 100644 index 000000000..156ce0470 --- /dev/null +++ b/src/estimation/nonparametric/workspace.rs @@ -0,0 +1,282 @@ +use pharmsol::Equation; + +use crate::algorithms::{Status, StopReason}; +use crate::estimation::nonparametric::{ + posterior, CycleLog, NPPredictions, Posterior, Psi, Theta, Weights, +}; +use crate::output::shared::RunConfiguration; +use crate::results::FitResult; +use pharmsol::Data; + +#[derive(Debug)] +pub struct NonparametricWorkspace { + equation: E, + data: Data, + theta: Theta, + psi: Psi, + weights: Weights, + objf: f64, + cycles: usize, + status: Status, + run_configuration: RunConfiguration, + cyclelog: CycleLog, + predictions: Option, + posterior: Posterior, +} + +impl NonparametricWorkspace { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + equation: E, + data: Data, + theta: Theta, + psi: Psi, + weights: Weights, + objf: f64, + cycles: usize, + status: Status, + run_configuration: RunConfiguration, + cyclelog: CycleLog, + ) -> anyhow::Result { + let posterior = posterior::posterior(&psi, &weights)?; + + Ok(Self { + equation, + data, + theta, + psi, + weights, + objf, + cycles, + status, + run_configuration, + cyclelog, + predictions: None, + posterior, + }) + } + + pub fn cycles(&self) -> usize { + self.cycles + } + + pub fn objf(&self) -> f64 { + self.objf + } + + pub fn converged(&self) -> bool { + self.status == Status::Stop(StopReason::Converged) + } + + pub fn get_theta(&self) -> &Theta { + &self.theta + } + + pub fn data(&self) -> &Data { + &self.data + } + + pub fn cycle_log(&self) -> &CycleLog { + &self.cyclelog + } + + pub(crate) fn run_configuration(&self) -> &RunConfiguration { + &self.run_configuration + } + + pub(crate) fn algorithm(&self) -> crate::algorithms::Algorithm { + self.run_configuration.algorithm + } + + pub(crate) fn output_folder(&self) -> &str { + self.run_configuration.output_path() + } + + pub(crate) fn should_write_outputs(&self) -> bool { + self.run_configuration.should_write_outputs() + } + + pub(crate) fn prediction_interval(&self) -> (f64, f64) { + ( + self.run_configuration.runtime.idelta, + self.run_configuration.runtime.tad, + ) + } + + pub fn predictions(&self) -> Option<&NPPredictions> { + self.predictions.as_ref() + } + + pub fn psi(&self) -> &Psi { + &self.psi + } + + pub fn weights(&self) -> &Weights { + &self.weights + } + + pub fn posterior(&self) -> &Posterior { + &self.posterior + } + + pub fn calculate_predictions(&mut self, idelta: f64, tad: f64) -> anyhow::Result<()> { + let predictions = NPPredictions::calculate( + &self.equation, + &self.data, + &self.theta, + &self.weights, + &self.posterior, + idelta, + tad, + )?; + self.predictions = Some(predictions); + Ok(()) + } + + pub fn write_theta(&self) -> anyhow::Result<()> { + use anyhow::{bail, Context}; + use csv::WriterBuilder; + + tracing::debug!("Writing population parameter distribution..."); + + let w: Vec = self.weights.to_vec(); + if w.len() != self.theta.matrix().nrows() { + bail!( + "Number of weights ({}) and number of support points ({}) do not match.", + w.len(), + self.theta.matrix().nrows() + ); + } + + let outputfile = crate::output::OutputFile::new(self.output_folder(), "theta.csv") + .context("Failed to create output file for theta")?; + + let mut writer = WriterBuilder::new() + .has_headers(true) + .from_writer(outputfile.file()); + + let mut theta_header = self.run_configuration.parameter_names.clone(); + theta_header.push("prob".to_string()); + writer.write_record(&theta_header)?; + + for (theta_row, &w_val) in self.theta.matrix().row_iter().zip(w.iter()) { + let mut row: Vec = theta_row.iter().map(|&val| val.to_string()).collect(); + row.push(w_val.to_string()); + writer.write_record(&row)?; + } + writer.flush()?; + Ok(()) + } + + pub fn write_posterior(&self) -> anyhow::Result<()> { + use csv::WriterBuilder; + + tracing::debug!("Writing posterior parameter probabilities..."); + + let outputfile = crate::output::OutputFile::new(self.output_folder(), "posterior.csv")?; + + let mut writer = WriterBuilder::new() + .has_headers(true) + .from_writer(outputfile.file()); + + writer.write_field("id")?; + writer.write_field("point")?; + self.theta.param_names().iter().for_each(|name| { + writer.write_field(name).unwrap(); + }); + writer.write_field("prob")?; + writer.write_record(None::<&[u8]>)?; + + let subjects = self.data.subjects(); + self.posterior + .matrix() + .row_iter() + .enumerate() + .for_each(|(i, row)| { + let subject = subjects.get(i).unwrap(); + let id = subject.id(); + + row.iter().enumerate().for_each(|(spp, prob)| { + writer.write_field(id.clone()).unwrap(); + writer.write_field(spp.to_string()).unwrap(); + + self.theta.matrix().row(spp).iter().for_each(|val| { + writer.write_field(val.to_string()).unwrap(); + }); + + writer.write_field(prob.to_string()).unwrap(); + writer.write_record(None::<&[u8]>).unwrap(); + }); + }); + + writer.flush()?; + Ok(()) + } + + pub fn write_covariates(&self) -> anyhow::Result<()> { + use csv::WriterBuilder; + use pharmsol::Event; + + tracing::debug!("Writing covariates..."); + let outputfile = crate::output::OutputFile::new(self.output_folder(), "covariates.csv")?; + let mut writer = WriterBuilder::new() + .has_headers(true) + .from_writer(outputfile.file()); + + let mut covariate_names = std::collections::HashSet::new(); + for subject in self.data.subjects() { + for occasion in subject.occasions() { + let covmap = occasion.covariates().covariates(); + for cov_name in covmap.keys() { + covariate_names.insert(cov_name.clone()); + } + } + } + let mut covariate_names: Vec = covariate_names.into_iter().collect(); + covariate_names.sort(); + + let mut headers = vec!["id", "time", "block"]; + headers.extend(covariate_names.iter().map(|s| s.as_str())); + writer.write_record(&headers)?; + + for subject in self.data.subjects() { + for occasion in subject.occasions() { + let covmap = occasion.covariates().covariates(); + + for event in occasion.iter() { + let time = match event { + Event::Bolus(bolus) => bolus.time(), + Event::Infusion(infusion) => infusion.time(), + Event::Observation(observation) => observation.time(), + }; + + let mut row: Vec = Vec::new(); + row.push(subject.id().clone()); + row.push(time.to_string()); + row.push(occasion.index().to_string()); + + for cov_name in &covariate_names { + if let Some(cov) = covmap.get(cov_name) { + if let Ok(value) = cov.interpolate(time) { + row.push(value.to_string()); + } else { + row.push(String::new()); + } + } else { + row.push(String::new()); + } + } + + writer.write_record(&row)?; + } + } + } + + writer.flush()?; + Ok(()) + } + + pub fn into_fit_result(self) -> FitResult { + FitResult::Nonparametric(self) + } +} diff --git a/src/lib.rs b/src/lib.rs index 41cbc9af8..f578c0d4e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,25 +1,46 @@ -//! PMcore is a framework for developing and running non-parametric algorithms for population pharmacokinetic modelling +//! PMcore is a framework for developing and running population pharmacokinetic algorithms //! -//! The framework is designed to be modular and flexible, allowing for easy integration of new algorithms and methods. It is heavily designed around the specifications for Pmetrics, a package for R, and is designed to be used in conjunction with it. However, as a general rust library, it can be used for a wide variety of applications, not limited to pharmacometrics. +//! The framework exposes a unified modeling platform for PMcore algorithms. //! -//! # Configuration +//! # Algorithm Types //! -//! PMcore is configured using [routines::settings::Settings], which specifies the settings for the program. +//! The structure branch keeps the unified modeling/compilation layer and the baseline +//! non-parametric algorithms already present in PMcore. +//! +//! # Public API +//! +//! PMcore centers on the model/problem API in [api]. Models are defined with +//! [api::ModelDefinition], configured with [api::EstimationProblem], and executed through +//! [api::fit]. //! //! # Data format //! -//! PMcore is heavily linked to [pharmsol], which provides the data structures and routines for handling pharmacokinetic data. The data is stored in a [pharmsol::Data] structure, and can either be read from a CSV file, using [pharmsol::data::parse_pmetrics::read_pmetrics], or created dynamically using the [pharmsol::data::builder::SubjectBuilder]. +//! PMcore is heavily linked to [pharmsol], which provides the data structures and routines for handling +//! pharmacokinetic data. The data is stored in a [pharmsol::Data] structure, and can either be read +//! from a CSV file, using [pharmsol::data::parse_pmetrics::read_pmetrics], or created dynamically +//! using the [pharmsol::data::builder::SubjectBuilder]. //! /// Provides the various algorithms used within the framework -// pub mod algorithms; pub mod algorithms; -/// Routines -pub mod routines; +/// New public modeling and execution API. +pub mod api; + +/// Shared preprocessing and compilation layer. +pub mod compile; + +/// Estimation family boundaries for the new architecture. +pub mod estimation; -// Structures -pub mod structs; +/// Public model-domain types used by the new API. +pub mod model; + +/// Shared result and summary types for the new API. +pub mod results; + +/// Shared output writers for the new API. +pub mod output; // Re-export commonly used items pub use anyhow::Result; @@ -33,18 +54,33 @@ pub mod prelude { pub use super::HashMap; pub use super::Result; pub use crate::algorithms; - pub use crate::algorithms::dispatch_algorithm; pub use crate::algorithms::Algorithm; - pub use crate::routines; - pub use crate::routines::logger; + pub use crate::api::fit; + pub use crate::api::{ + AlgorithmTuning, ConvergenceOptions, EstimationMethod, EstimationProblem, LoggingLevel, + LoggingOptions, ModelDefinition, NonparametricMethod, NpagOptions, NpodOptions, OutputPlan, + PostProbOptions, RuntimeOptions, + }; + pub use crate::compile::{CompiledProblem, DesignContext, ObservationIndex}; + pub use crate::estimation::nonparametric::{ + CycleLog, NPCycle, NPPredictions, NonparametricEngine, NonparametricWorkspace, Posterior, + Psi, Theta, Weights, + }; + pub use crate::model::{ + ContinuousObservationSpec, CovariateEffectsSpec, CovariateModel, CovariateSpec, + ModelMetadata, ObservationChannel, ObservationLikelihood, ObservationSpec, ParameterDomain, + ParameterSpace, ParameterSpec, ParameterTransform as ModelParameterTransform, + ParameterVariability, RandomEffectsSpec, VariabilityModel, + }; + pub use crate::results::{ + ArtifactIndex, DiagnosticsBundle, FitResult, FitSummary, IndividualSummary, + ParameterSummary, PopulationSummary, PredictionsBundle, + }; pub use pharmsol::optimize::effect::get_e2; pub use pharmsol; - pub use crate::routines::initialization::Prior; - - pub use crate::routines::settings::*; - pub use crate::structs::*; + pub use crate::estimation::nonparametric::{read_prior, Prior}; pub mod simulator { pub use pharmsol::prelude::simulator::*; diff --git a/src/model/covariate_model.rs b/src/model/covariate_model.rs new file mode 100644 index 000000000..0de14c163 --- /dev/null +++ b/src/model/covariate_model.rs @@ -0,0 +1,444 @@ +//! Structured covariate model for population parameter regression. + +use anyhow::{bail, Result}; +use faer::{Col, Mat}; +use serde::Serialize; +use std::collections::HashMap; + +/// Covariate model specification. +#[derive(Debug, Clone)] +pub struct CovariateModel { + param_names: Vec, + covariate_names: Vec, + covariate_mask: Vec>, + beta: Col, + estimate_beta: Vec, + reference_values: HashMap, +} + +impl CovariateModel { + pub fn new( + param_names: Vec>, + covariate_names: Vec>, + covariate_mask: Vec>, + ) -> Result { + let param_names: Vec = param_names.into_iter().map(|s| s.into()).collect(); + let covariate_names: Vec = covariate_names.into_iter().map(|s| s.into()).collect(); + + let n_params = param_names.len(); + let n_covs = covariate_names.len(); + + if covariate_mask.len() != n_params { + bail!( + "Covariate mask rows ({}) must match number of parameters ({})", + covariate_mask.len(), + n_params + ); + } + + for (i, row) in covariate_mask.iter().enumerate() { + if row.len() != n_covs { + bail!( + "Covariate mask row {} has {} columns, expected {}", + i, + row.len(), + n_covs + ); + } + } + + let n_beta = Self::count_beta_coefficients(&covariate_mask, n_params); + + Ok(Self { + param_names, + covariate_names, + covariate_mask, + beta: Col::zeros(n_beta), + estimate_beta: vec![true; n_beta], + reference_values: HashMap::new(), + }) + } + + pub fn intercept_only(param_names: Vec>) -> Result { + let param_names: Vec = param_names.into_iter().map(|s| s.into()).collect(); + let n_params = param_names.len(); + + Ok(Self { + param_names, + covariate_names: Vec::new(), + covariate_mask: vec![Vec::new(); n_params], + beta: Col::zeros(n_params), + estimate_beta: vec![true; n_params], + reference_values: HashMap::new(), + }) + } + + pub fn from_saemix_matrix( + param_names: Vec>, + covariate_names: Vec>, + matrix: &[f64], + ) -> Result { + let param_names: Vec = param_names.into_iter().map(|s| s.into()).collect(); + let covariate_names: Vec = covariate_names.into_iter().map(|s| s.into()).collect(); + + let n_params = param_names.len(); + let n_covs = covariate_names.len(); + + if matrix.len() != n_params * n_covs { + bail!( + "Matrix length ({}) doesn't match n_params × n_covs ({} × {} = {})", + matrix.len(), + n_params, + n_covs, + n_params * n_covs + ); + } + + let covariate_mask: Vec> = (0..n_params) + .map(|i| (0..n_covs).map(|j| matrix[i * n_covs + j] != 0.0).collect()) + .collect(); + + Self::new(param_names, covariate_names, covariate_mask) + } + + pub fn set_beta(&mut self, beta: Col) -> Result<()> { + let expected = self.n_beta(); + if beta.nrows() != expected { + bail!( + "Beta length ({}) doesn't match expected ({})", + beta.nrows(), + expected + ); + } + self.beta = beta; + Ok(()) + } + + pub fn set_intercepts(&mut self, intercepts: &[f64]) -> Result<()> { + if intercepts.len() != self.n_params() { + bail!( + "Intercepts length ({}) doesn't match n_params ({})", + intercepts.len(), + self.n_params() + ); + } + + let mut idx = 0; + for (i, &intercept) in intercepts.iter().enumerate() { + self.beta[idx] = intercept; + idx += 1; + idx += self.covariate_mask[i].iter().filter(|&&x| x).count(); + } + + Ok(()) + } + + pub fn set_estimate_beta(&mut self, estimate: Vec) -> Result<()> { + if estimate.len() != self.beta.nrows() { + bail!( + "estimate_beta length ({}) doesn't match n_beta ({})", + estimate.len(), + self.beta.nrows() + ); + } + self.estimate_beta = estimate; + Ok(()) + } + + pub fn fix_intercept(&mut self, param_idx: usize) -> Result<()> { + let beta_idx = self.intercept_beta_index(param_idx)?; + self.estimate_beta[beta_idx] = false; + Ok(()) + } + + pub fn set_reference(&mut self, covariate: &str, value: f64) -> Result<()> { + if !self.covariate_names.contains(&covariate.to_string()) { + bail!("Unknown covariate: {}", covariate); + } + self.reference_values.insert(covariate.to_string(), value); + Ok(()) + } + + pub fn compute_mu(&self, covariates: &HashMap) -> Col { + let n_params = self.param_names.len(); + let mut mu = Col::zeros(n_params); + let mut beta_idx = 0; + + for i in 0..n_params { + mu[i] = self.beta[beta_idx]; + beta_idx += 1; + + for (j, cov_name) in self.covariate_names.iter().enumerate() { + if self.covariate_mask[i][j] { + let cov_value = covariates.get(cov_name).copied().unwrap_or(0.0); + let reference = self.reference_values.get(cov_name).copied().unwrap_or(0.0); + mu[i] += self.beta[beta_idx] * (cov_value - reference); + beta_idx += 1; + } + } + } + + mu + } + + pub fn build_design_row(&self, covariates: &HashMap) -> Col { + let n_beta = self.n_beta(); + let mut x = Col::zeros(n_beta); + let mut beta_idx = 0; + + for i in 0..self.n_params() { + x[beta_idx] = 1.0; + beta_idx += 1; + + for (j, cov_name) in self.covariate_names.iter().enumerate() { + if self.covariate_mask[i][j] { + let cov_value = covariates.get(cov_name).copied().unwrap_or(0.0); + let reference = self.reference_values.get(cov_name).copied().unwrap_or(0.0); + x[beta_idx] = cov_value - reference; + beta_idx += 1; + } + } + } + + x + } + + pub fn build_design_matrix(&self, all_covariates: &[HashMap]) -> Mat { + let n_subjects = all_covariates.len(); + let n_params = self.n_params(); + let n_beta = self.n_beta(); + let n_rows = n_subjects * n_params; + let mut x = Mat::zeros(n_rows, n_beta); + + for (subject_idx, covs) in all_covariates.iter().enumerate() { + let mut beta_idx = 0; + for param_idx in 0..n_params { + let row_idx = subject_idx * n_params + param_idx; + x[(row_idx, beta_idx)] = 1.0; + beta_idx += 1; + + for (j, cov_name) in self.covariate_names.iter().enumerate() { + if self.covariate_mask[param_idx][j] { + let cov_value = covs.get(cov_name).copied().unwrap_or(0.0); + let reference = self.reference_values.get(cov_name).copied().unwrap_or(0.0); + x[(row_idx, beta_idx)] = cov_value - reference; + beta_idx += 1; + } + } + } + } + + x + } + + pub fn n_params(&self) -> usize { + self.param_names.len() + } + + pub fn n_covariates(&self) -> usize { + self.covariate_names.len() + } + + pub fn n_beta(&self) -> usize { + Self::count_beta_coefficients(&self.covariate_mask, self.param_names.len()) + } + + pub fn n_beta_estimated(&self) -> usize { + self.estimate_beta.iter().filter(|&&x| x).count() + } + + pub fn param_names(&self) -> &[String] { + &self.param_names + } + + pub fn covariate_names(&self) -> &[String] { + &self.covariate_names + } + + pub fn covariate_mask(&self) -> &[Vec] { + &self.covariate_mask + } + + pub fn beta(&self) -> &Col { + &self.beta + } + + pub fn beta_mut(&mut self) -> &mut Col { + &mut self.beta + } + + pub fn estimate_beta(&self) -> &[bool] { + &self.estimate_beta + } + + pub fn intercept(&self, param_idx: usize) -> Option { + let beta_idx = self.intercept_beta_index(param_idx).ok()?; + Some(self.beta[beta_idx]) + } + + pub fn has_covariates(&self, param_idx: usize) -> bool { + param_idx < self.covariate_mask.len() && self.covariate_mask[param_idx].iter().any(|&x| x) + } + + fn count_beta_coefficients(mask: &[Vec], n_params: usize) -> usize { + let mut count = n_params; + for row in mask { + count += row.iter().filter(|&&x| x).count(); + } + count + } + + fn intercept_beta_index(&self, param_idx: usize) -> Result { + if param_idx >= self.n_params() { + bail!("Parameter index {} out of range", param_idx); + } + + let mut idx = 0; + for i in 0..param_idx { + idx += 1; + idx += self.covariate_mask[i].iter().filter(|&&x| x).count(); + } + Ok(idx) + } + + pub fn estimated_beta_indices(&self) -> Vec { + self.estimate_beta + .iter() + .enumerate() + .filter_map(|(i, &est)| if est { Some(i) } else { None }) + .collect() + } +} + +impl Default for CovariateModel { + fn default() -> Self { + Self { + param_names: Vec::new(), + covariate_names: Vec::new(), + covariate_mask: Vec::new(), + beta: Col::zeros(0), + estimate_beta: Vec::new(), + reference_values: HashMap::new(), + } + } +} + +impl Serialize for CovariateModel { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + + let mut state = serializer.serialize_struct("CovariateModel", 6)?; + state.serialize_field("param_names", &self.param_names)?; + state.serialize_field("covariate_names", &self.covariate_names)?; + state.serialize_field("covariate_mask", &self.covariate_mask)?; + let beta_vec: Vec = (0..self.beta.nrows()).map(|i| self.beta[i]).collect(); + state.serialize_field("beta", &beta_vec)?; + state.serialize_field("estimate_beta", &self.estimate_beta)?; + state.serialize_field("reference_values", &self.reference_values)?; + state.end() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_intercept_only() { + let model = CovariateModel::intercept_only(vec!["CL", "V"]).unwrap(); + + assert_eq!(model.n_params(), 2); + assert_eq!(model.n_covariates(), 0); + assert_eq!(model.n_beta(), 2); + + let mut model = model; + model.set_intercepts(&[5.0, 50.0]).unwrap(); + + let mu = model.compute_mu(&HashMap::new()); + assert_eq!(mu[0], 5.0); + assert_eq!(mu[1], 50.0); + } + + #[test] + fn test_with_covariates() { + let model = CovariateModel::new( + vec!["CL", "V"], + vec!["WT", "SEX"], + vec![vec![true, false], vec![true, true]], + ) + .unwrap(); + + assert_eq!(model.n_params(), 2); + assert_eq!(model.n_covariates(), 2); + assert_eq!(model.n_beta(), 5); + } + + #[test] + fn test_compute_mu() { + let mut model = + CovariateModel::new(vec!["CL", "V"], vec!["WT"], vec![vec![true], vec![true]]).unwrap(); + + model + .set_beta(Col::from_fn(4, |i| match i { + 0 => 5.0, + 1 => 0.1, + 2 => 50.0, + 3 => 1.0, + _ => 0.0, + })) + .unwrap(); + + let mut covs = HashMap::new(); + covs.insert("WT".to_string(), 70.0); + let mu = model.compute_mu(&covs); + + assert!((mu[0] - 12.0).abs() < 1e-10); + assert!((mu[1] - 120.0).abs() < 1e-10); + } + + #[test] + fn test_centering() { + let mut model = CovariateModel::new(vec!["CL"], vec!["WT"], vec![vec![true]]).unwrap(); + model.set_reference("WT", 70.0).unwrap(); + model + .set_beta(Col::from_fn(2, |i| if i == 0 { 5.0 } else { 0.1 })) + .unwrap(); + + let mut covs = HashMap::new(); + covs.insert("WT".to_string(), 70.0); + let mu = model.compute_mu(&covs); + assert!((mu[0] - 5.0).abs() < 1e-10); + + covs.insert("WT".to_string(), 80.0); + let mu = model.compute_mu(&covs); + assert!((mu[0] - 6.0).abs() < 1e-10); + } + + #[test] + fn test_from_saemix_matrix() { + let model = CovariateModel::from_saemix_matrix( + vec!["CL", "V"], + vec!["WT", "SEX"], + &[1.0, 0.0, 1.0, 1.0], + ) + .unwrap(); + + assert!(model.covariate_mask[0][0]); + assert!(!model.covariate_mask[0][1]); + assert!(model.covariate_mask[1][0]); + assert!(model.covariate_mask[1][1]); + } + + #[test] + fn test_fix_intercept() { + let mut model = CovariateModel::intercept_only(vec!["CL", "V"]).unwrap(); + + assert!(model.estimate_beta[0]); + model.fix_intercept(0).unwrap(); + assert!(!model.estimate_beta[0]); + assert!(model.estimate_beta[1]); + } +} diff --git a/src/model/covariates.rs b/src/model/covariates.rs new file mode 100644 index 000000000..a7d3ee955 --- /dev/null +++ b/src/model/covariates.rs @@ -0,0 +1,32 @@ +use serde::Serialize; + +use crate::model::CovariateModel; + +#[derive(Debug, Clone, Default, Serialize)] +pub enum CovariateSpec { + #[default] + InEquation, + Structured(CovariateEffectsSpec), +} + +#[derive(Debug, Clone, Default, Serialize)] +pub struct CovariateEffectsSpec { + pub subject_effects: Option, + pub occasion_effects: Option, +} + +impl CovariateEffectsSpec { + pub fn subject_columns(&self) -> Vec { + self.subject_effects + .as_ref() + .map(|model| model.covariate_names().to_vec()) + .unwrap_or_default() + } + + pub fn occasion_columns(&self) -> Vec { + self.occasion_effects + .as_ref() + .map(|model| model.covariate_names().to_vec()) + .unwrap_or_default() + } +} diff --git a/src/model/metadata.rs b/src/model/metadata.rs new file mode 100644 index 000000000..7363ad36a --- /dev/null +++ b/src/model/metadata.rs @@ -0,0 +1,8 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct ModelMetadata { + pub name: Option, + pub description: Option, + pub tags: Vec, +} diff --git a/src/model/mod.rs b/src/model/mod.rs new file mode 100644 index 000000000..3766c3b3a --- /dev/null +++ b/src/model/mod.rs @@ -0,0 +1,97 @@ +use anyhow::{bail, Result}; +use pharmsol::equation::Equation; + +pub mod covariate_model; +pub mod covariates; +pub mod metadata; +pub mod observation_spec; +pub mod parameter_space; +pub mod variability; + +pub use covariate_model::CovariateModel; +pub use covariates::{CovariateEffectsSpec, CovariateSpec}; +pub use metadata::ModelMetadata; +pub use observation_spec::{ + ContinuousObservationSpec, ObservationChannel, ObservationLikelihood, ObservationSpec, +}; +pub use parameter_space::{ + ParameterDomain, ParameterSpace, ParameterSpec, ParameterTransform, ParameterVariability, +}; +pub use variability::{CovarianceStructure, RandomEffectsSpec, VariabilityModel}; + +#[derive(Debug, Clone)] +pub struct ModelDefinition { + pub equation: E, + pub parameters: ParameterSpace, + pub observations: ObservationSpec, + pub variability: VariabilityModel, + pub covariates: CovariateSpec, + pub metadata: ModelMetadata, +} + +impl ModelDefinition { + pub fn builder(equation: E) -> ModelDefinitionBuilder { + ModelDefinitionBuilder { + equation, + parameters: None, + observations: None, + variability: Some(VariabilityModel::default()), + covariates: Some(CovariateSpec::InEquation), + metadata: Some(ModelMetadata::default()), + } + } +} + +pub struct ModelDefinitionBuilder { + equation: E, + parameters: Option, + observations: Option, + variability: Option, + covariates: Option, + metadata: Option, +} + +impl ModelDefinitionBuilder { + pub fn parameters(mut self, parameters: ParameterSpace) -> Self { + self.parameters = Some(parameters); + self + } + + pub fn observations(mut self, observations: ObservationSpec) -> Self { + self.observations = Some(observations); + self + } + + pub fn variability(mut self, variability: VariabilityModel) -> Self { + self.variability = Some(variability); + self + } + + pub fn covariates(mut self, covariates: CovariateSpec) -> Self { + self.covariates = Some(covariates); + self + } + + pub fn metadata(mut self, metadata: ModelMetadata) -> Self { + self.metadata = Some(metadata); + self + } + + pub fn build(self) -> Result> { + let parameters = self + .parameters + .ok_or_else(|| anyhow::anyhow!("model parameters are required"))?; + if parameters.is_empty() { + bail!("model parameters cannot be empty"); + } + + Ok(ModelDefinition { + equation: self.equation, + parameters, + observations: self.observations.unwrap_or_default(), + variability: self.variability.unwrap_or_default(), + covariates: self.covariates.unwrap_or_default(), + metadata: self.metadata.unwrap_or_default(), + }) + } +} diff --git a/src/model/observation_spec.rs b/src/model/observation_spec.rs new file mode 100644 index 000000000..761fcc157 --- /dev/null +++ b/src/model/observation_spec.rs @@ -0,0 +1,70 @@ +use pharmsol::prelude::data::{AssayErrorModels, ResidualErrorModels}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ObservationSpec { + pub channels: Vec, + pub assay_error_models: AssayErrorModels, + pub residual_error_models: Option, +} + +impl ObservationSpec { + pub fn new() -> Self { + Self { + channels: Vec::new(), + assay_error_models: AssayErrorModels::new(), + residual_error_models: None, + } + } + + pub fn add_channel(mut self, channel: ObservationChannel) -> Self { + self.channels.push(channel); + self + } + + pub fn with_assay_error_models(mut self, assay_error_models: AssayErrorModels) -> Self { + self.assay_error_models = assay_error_models; + self + } + + pub fn with_residual_error_models( + mut self, + residual_error_models: ResidualErrorModels, + ) -> Self { + self.residual_error_models = Some(residual_error_models); + self + } +} + +impl Default for ObservationSpec { + fn default() -> Self { + Self::new() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ObservationChannel { + pub outeq: usize, + pub name: String, + pub likelihood: ObservationLikelihood, +} + +impl ObservationChannel { + pub fn continuous(outeq: usize, name: impl Into) -> Self { + Self { + outeq, + name: name.into(), + likelihood: ObservationLikelihood::Continuous(ContinuousObservationSpec::default()), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum ObservationLikelihood { + Continuous(ContinuousObservationSpec), +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct ContinuousObservationSpec { + pub supports_censoring: bool, +} diff --git a/src/model/parameter_space.rs b/src/model/parameter_space.rs new file mode 100644 index 000000000..6fa6859cb --- /dev/null +++ b/src/model/parameter_space.rs @@ -0,0 +1,136 @@ +use anyhow::{bail, Result}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ParameterSpace { + pub items: Vec, +} + +impl ParameterSpace { + pub fn new() -> Self { + Self { items: Vec::new() } + } + + pub fn add(mut self, item: ParameterSpec) -> Self { + self.items.push(item); + self + } + + pub fn len(&self) -> usize { + self.items.len() + } + + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } + + pub fn iter(&self) -> std::slice::Iter<'_, ParameterSpec> { + self.items.iter() + } + + pub fn names(&self) -> Vec { + self.items.iter().map(|item| item.name.clone()).collect() + } + + pub fn finite_ranges(&self) -> Result> { + self.items + .iter() + .map(|parameter| match parameter.domain { + ParameterDomain::Bounded { lower, upper } => Ok((lower, upper)), + ParameterDomain::Positive { + lower: Some(lower), + upper: Some(upper), + } + | ParameterDomain::Unbounded { + lower: Some(lower), + upper: Some(upper), + } => Ok((lower, upper)), + _ => bail!( + "nonparametric initialization requires finite lower/upper bounds for parameter '{}'", + parameter.name + ), + }) + .collect() + } +} + +impl Default for ParameterSpace { + fn default() -> Self { + Self::new() + } +} + +impl From<&ParameterSpace> for ParameterSpace { + fn from(parameter_space: &ParameterSpace) -> Self { + parameter_space.clone() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ParameterSpec { + pub name: String, + pub domain: ParameterDomain, + pub transform: ParameterTransform, + pub initial: Option, + pub estimate: bool, + pub variability: ParameterVariability, +} + +impl ParameterSpec { + pub fn bounded(name: impl Into, lower: f64, upper: f64) -> Self { + Self { + name: name.into(), + domain: ParameterDomain::Bounded { lower, upper }, + transform: ParameterTransform::Identity, + initial: None, + estimate: true, + variability: ParameterVariability::Subject, + } + } + + pub fn positive(name: impl Into) -> Self { + Self { + name: name.into(), + domain: ParameterDomain::Positive { + lower: Some(0.0), + upper: None, + }, + transform: ParameterTransform::LogNormal, + initial: None, + estimate: true, + variability: ParameterVariability::Subject, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum ParameterDomain { + Positive { + lower: Option, + upper: Option, + }, + Unbounded { + lower: Option, + upper: Option, + }, + Bounded { + lower: f64, + upper: f64, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum ParameterTransform { + Identity, + LogNormal, + Probit, + Logit, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum ParameterVariability { + FixedOnly, + Subject, + Occasion, + SubjectAndOccasion, +} diff --git a/src/model/variability.rs b/src/model/variability.rs new file mode 100644 index 000000000..02a5679b6 --- /dev/null +++ b/src/model/variability.rs @@ -0,0 +1,37 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct VariabilityModel { + pub subject: RandomEffectsSpec, + pub occasion: Option, +} + +impl Default for VariabilityModel { + fn default() -> Self { + Self { + subject: RandomEffectsSpec::default(), + occasion: None, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct RandomEffectsSpec { + pub enabled_for: Vec, + pub covariance: CovarianceStructure, +} + +impl Default for RandomEffectsSpec { + fn default() -> Self { + Self { + enabled_for: Vec::new(), + covariance: CovarianceStructure::Diagonal, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum CovarianceStructure { + Diagonal, + Full, +} diff --git a/src/output/file.rs b/src/output/file.rs new file mode 100644 index 000000000..e48210f4a --- /dev/null +++ b/src/output/file.rs @@ -0,0 +1,45 @@ +use anyhow::{Context, Result}; +use std::fs::{create_dir_all, File, OpenOptions}; +use std::path::{Path, PathBuf}; + +/// Contains all the necessary information of an output file. +#[derive(Debug)] +pub struct OutputFile { + file: File, + relative_path: PathBuf, +} + +impl OutputFile { + pub fn new(folder: &str, file_name: &str) -> Result { + let relative_path = Path::new(folder).join(file_name); + + if let Some(parent) = relative_path.parent() { + create_dir_all(parent) + .with_context(|| format!("Failed to create directories for {:?}", parent))?; + } + + let file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&relative_path) + .with_context(|| format!("Failed to open file: {:?}", relative_path))?; + + Ok(Self { + file, + relative_path, + }) + } + + pub fn file(&self) -> &File { + &self.file + } + + pub fn file_owned(self) -> File { + self.file + } + + pub fn relative_path(&self) -> &Path { + &self.relative_path + } +} diff --git a/src/routines/logger.rs b/src/output/logging.rs similarity index 54% rename from src/routines/logger.rs rename to src/output/logging.rs index d9ba24e6a..f3140d4be 100644 --- a/src/routines/logger.rs +++ b/src/output/logging.rs @@ -1,7 +1,5 @@ use std::time::Instant; -use crate::routines::output::OutputFile; -use crate::routines::settings::Settings; use anyhow::Result; use tracing_subscriber::fmt::time::FormatTime; use tracing_subscriber::fmt::{self}; @@ -10,42 +8,33 @@ use tracing_subscriber::registry::Registry; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; -/// Setup logging for the library -/// -/// This function sets up logging for the library. It uses the `tracing` crate, and the `tracing-subscriber` crate for formatting. -/// -/// The log level is defined in the configuration file, and defaults to `INFO`. -/// -/// If `log_out` is specifified in the configuration file, a log file is created with the specified name. -/// -/// If not, the log messages are written to stdout. -pub(crate) fn setup_log(settings: &mut Settings) -> Result<()> { - // If neither `stdout` nor `file` are specified, return without setting the subscriber - if !settings.log().stdout && !settings.log().write { - return Ok(()); - } +use crate::api::{LoggingLevel, LoggingOptions, OutputPlan}; +use crate::output::OutputFile; - // Use the log level defined in configuration file - let log_level = settings.log().level.clone(); +pub(crate) fn setup_log_with_options(output: &OutputPlan, logging: &LoggingOptions) -> Result<()> { + let log_level = log_level_filter(logging.level); let env_filter = EnvFilter::new(format!("{},diffsol=off", log_level)); + if !logging.stdout && !logging.write { + let subscriber = Registry::default().with(env_filter); + let _ = subscriber.try_init(); + return Ok(()); + } + let timestamper = CompactTimestamp { start: Instant::now(), }; - // Define a registry with that level as an environment filter let subscriber = Registry::default().with(env_filter); - // If we do not want output files, we must create the log in the current directory - let outputfile = if !settings.output().write { + let outputfile = if !output.write { let cd = std::env::current_dir()?; OutputFile::new(&cd.to_string_lossy(), "log.txt")? } else { - OutputFile::new(&settings.output().path, "log.txt")? + OutputFile::new(output.path.as_deref().unwrap_or("outputs/"), "log.txt")? }; - // Define layer for file - let file_layer = match settings.log().write { + let file_layer = match logging.write { true => { let layer = fmt::layer() .with_writer(outputfile.file_owned()) @@ -57,8 +46,7 @@ pub(crate) fn setup_log(settings: &mut Settings) -> Result<()> { false => None, }; - // Define layer for stdout - let stdout_layer = match settings.log().stdout { + let stdout_layer = match logging.stdout { true => { let layer = fmt::layer() .with_writer(std::io::stdout) @@ -71,16 +59,21 @@ pub(crate) fn setup_log(settings: &mut Settings) -> Result<()> { false => None, }; - // Combine layers with subscriber - let res = subscriber.with(file_layer).with(stdout_layer).try_init(); - match res { - Ok(_) => {} - Err(e) => tracing::warn!("Failed to initialize logger: {}", e), - } + let _ = subscriber.with(file_layer).with(stdout_layer).try_init(); Ok(()) } +fn log_level_filter(level: LoggingLevel) -> &'static str { + match level { + LoggingLevel::Trace => "trace", + LoggingLevel::Debug => "debug", + LoggingLevel::Info => "info", + LoggingLevel::Warn => "warn", + LoggingLevel::Error => "error", + } +} + #[derive(Clone)] struct CompactTimestamp { start: Instant, diff --git a/src/output/mod.rs b/src/output/mod.rs new file mode 100644 index 000000000..a80a2878d --- /dev/null +++ b/src/output/mod.rs @@ -0,0 +1,8 @@ +mod file; +pub(crate) mod logging; +pub mod nonparametric; +pub mod shared; +pub mod writer; + +pub use file::OutputFile; +pub use writer::write_result; diff --git a/src/output/nonparametric.rs b/src/output/nonparametric.rs new file mode 100644 index 000000000..f0fa9d38b --- /dev/null +++ b/src/output/nonparametric.rs @@ -0,0 +1,44 @@ +use anyhow::Result; + +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::output::shared::shared_output_file_names; + +pub(crate) fn output_file_names( + result: &NonparametricWorkspace, +) -> Vec { + let mut files = shared_output_file_names(); + files.extend( + ["iterations.csv", "theta.csv", "posterior.csv"] + .into_iter() + .map(str::to_string), + ); + + let has_covariates = result.data().subjects().iter().any(|subject| { + subject + .occasions() + .iter() + .any(|occasion| !occasion.covariates().covariates().is_empty()) + }); + if has_covariates { + files.push("covariates.csv".to_string()); + } + + files.sort(); + files.dedup(); + files +} + +pub fn write_nonparametric_outputs( + result: &mut NonparametricWorkspace, +) -> Result<()> { + let parameter_names = result.get_theta().parameters().names(); + result + .cycle_log() + .write(result.output_folder(), ¶meter_names)?; + result.write_theta()?; + result.write_covariates()?; + result.write_posterior()?; + let (idelta, tad) = result.prediction_interval(); + result.calculate_predictions(idelta, tad)?; + Ok(()) +} diff --git a/src/output/shared.rs b/src/output/shared.rs new file mode 100644 index 000000000..4836d84fc --- /dev/null +++ b/src/output/shared.rs @@ -0,0 +1,113 @@ +use anyhow::Result; +use csv::WriterBuilder; +use serde::Serialize; + +use crate::algorithms::Algorithm; +use crate::api::{OutputPlan, RuntimeOptions}; +use crate::output::OutputFile; +use crate::results::{DiagnosticsBundle, FitSummary}; + +pub(crate) fn shared_output_file_names() -> Vec { + vec![ + "settings.json", + "summary.json", + "summary.csv", + "diagnostics.json", + "predictions.csv", + ] + .into_iter() + .map(str::to_string) + .collect() +} + +#[derive(Debug, Clone, Serialize)] +pub(crate) struct RunConfiguration { + pub algorithm: Algorithm, + pub output: OutputPlan, + pub runtime: RuntimeOptions, + pub parameter_names: Vec, +} + +impl RunConfiguration { + pub(crate) fn new( + algorithm: Algorithm, + output: &OutputPlan, + runtime: &RuntimeOptions, + parameter_names: Vec, + ) -> Self { + Self { + algorithm, + output: output.clone(), + runtime: runtime.clone(), + parameter_names, + } + } + + pub(crate) fn output_path(&self) -> &str { + self.output.path.as_deref().unwrap_or("outputs/") + } + + pub(crate) fn should_write_outputs(&self) -> bool { + self.output.write + } +} + +pub(crate) fn write_settings(folder: &str, configuration: &RunConfiguration) -> Result<()> { + let outputfile = OutputFile::new(folder, "settings.json")?; + let mut file = outputfile.file_owned(); + let serialized = serde_json::to_string_pretty(configuration)?; + std::io::Write::write_all(&mut file, serialized.as_bytes())?; + Ok(()) +} + +pub fn write_summary(folder: &str, summary: &FitSummary) -> Result<()> { + let outputfile = OutputFile::new(folder, "summary.json")?; + let mut file = outputfile.file_owned(); + let serialized = serde_json::to_string_pretty(summary)?; + std::io::Write::write_all(&mut file, serialized.as_bytes())?; + + let outputfile = OutputFile::new(folder, "summary.csv")?; + let mut writer = WriterBuilder::new() + .has_headers(true) + .from_writer(outputfile.file_owned()); + writer.write_record(["metric", "value"])?; + writer.write_record([ + "objective_function", + &summary.objective_function.to_string(), + ])?; + writer.write_record(["converged", &summary.converged.to_string()])?; + writer.write_record(["iterations", &summary.iterations.to_string()])?; + writer.write_record(["subject_count", &summary.subject_count.to_string()])?; + writer.write_record(["observation_count", &summary.observation_count.to_string()])?; + writer.write_record(["parameter_count", &summary.parameter_count.to_string()])?; + writer.write_record(["algorithm", &summary.algorithm])?; + writer.flush()?; + + Ok(()) +} + +pub fn write_diagnostics(folder: &str, diagnostics: &DiagnosticsBundle) -> Result<()> { + let outputfile = OutputFile::new(folder, "diagnostics.json")?; + let mut file = outputfile.file_owned(); + let serialized = serde_json::to_string_pretty(diagnostics)?; + std::io::Write::write_all(&mut file, serialized.as_bytes())?; + Ok(()) +} + +pub fn write_csv_rows( + folder: &str, + file_name: &str, + rows: impl IntoIterator, +) -> Result<()> { + let outputfile = OutputFile::new(folder, file_name)?; + let mut writer = WriterBuilder::new() + .has_headers(true) + .from_writer(outputfile.file_owned()); + + for row in rows { + writer.serialize(row)?; + } + + writer.flush()?; + Ok(()) +} diff --git a/src/output/writer.rs b/src/output/writer.rs new file mode 100644 index 000000000..60ef89a9f --- /dev/null +++ b/src/output/writer.rs @@ -0,0 +1,72 @@ +use anyhow::Result; +use pharmsol::{Censor, Equation}; +use serde::Serialize; + +use crate::estimation::nonparametric as np_estimation; +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::output::{nonparametric as np_output, shared}; +use crate::results::FitResult; +use crate::results::{nonparametric_diagnostics, FitSummary}; + +#[derive(Debug, Clone, Serialize)] +struct SharedPredictionRow { + id: String, + time: f64, + outeq: usize, + block: usize, + obs: Option, + cens: Censor, + pred_population: f64, + pred_individual: f64, + residual_population: Option, + residual_individual: Option, + source_method: String, +} + +pub fn write_result(result: &mut FitResult) -> Result<()> { + match result { + FitResult::Nonparametric(inner) => write_nonparametric_result(inner)?, + } + + Ok(()) +} + +pub fn write_nonparametric_result( + result: &mut NonparametricWorkspace, +) -> Result<()> { + if !result.should_write_outputs() { + return Ok(()); + } + + let folder = result.output_folder().to_string(); + shared::write_settings(&folder, result.run_configuration())?; + shared::write_summary(&folder, &nonparametric_summary(result))?; + shared::write_diagnostics(&folder, &nonparametric_diagnostics(result))?; + np_output::write_nonparametric_outputs(result)?; + + if let Some(predictions) = result.predictions() { + let rows = predictions + .predictions() + .iter() + .map(|row| SharedPredictionRow { + id: row.id().to_string(), + time: row.time(), + outeq: row.outeq(), + block: row.block(), + obs: row.obs(), + cens: row.censoring(), + pred_population: row.pop_mean(), + pred_individual: row.post_mean(), + residual_population: row.obs().map(|obs| obs - row.pop_mean()), + residual_individual: row.obs().map(|obs| obs - row.post_mean()), + source_method: "nonparametric".to_string(), + }); + shared::write_csv_rows(&folder, "predictions.csv", rows)?; + } + + Ok(()) +} + +fn nonparametric_summary(result: &NonparametricWorkspace) -> FitSummary { + np_estimation::fit_summary(result) +} diff --git a/src/results/artifacts.rs b/src/results/artifacts.rs new file mode 100644 index 000000000..abd817c3e --- /dev/null +++ b/src/results/artifacts.rs @@ -0,0 +1,88 @@ +use std::fs; +use std::path::Path; + +use pharmsol::Equation; +use serde::{Deserialize, Serialize}; + +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::output::shared::shared_output_file_names; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct ArtifactIndex { + pub files: Vec, + pub expected_files: Vec, + pub missing_files: Vec, + pub shared_expected_files: Vec, + pub method_specific_expected_files: Vec, +} + +pub(crate) fn nonparametric_artifacts( + result: &NonparametricWorkspace, +) -> ArtifactIndex { + artifact_index( + result.output_folder(), + result.should_write_outputs(), + crate::output::nonparametric::output_file_names(result), + ) +} + +fn artifact_index( + folder: &str, + should_write_outputs: bool, + mut expected_files: Vec, +) -> ArtifactIndex { + if !should_write_outputs { + return ArtifactIndex::default(); + } + + expected_files.sort(); + expected_files.dedup(); + + let shared_output_files = shared_output_file_names(); + let shared_expected_files = expected_files + .iter() + .filter(|file| shared_output_files.contains(*file)) + .cloned() + .collect::>(); + let method_specific_expected_files = expected_files + .iter() + .filter(|file| !shared_output_files.contains(*file)) + .cloned() + .collect::>(); + + let path = Path::new(folder); + if !path.exists() { + return ArtifactIndex { + files: Vec::new(), + missing_files: expected_files.clone(), + expected_files, + shared_expected_files, + method_specific_expected_files, + }; + } + + let mut files = expected_files + .iter() + .filter(|file| { + fs::metadata(path.join(file)) + .map(|meta| meta.is_file()) + .unwrap_or(false) + }) + .cloned() + .collect::>(); + files.sort(); + + let missing_files = expected_files + .iter() + .filter(|file| !files.contains(file)) + .cloned() + .collect(); + + ArtifactIndex { + files, + expected_files, + missing_files, + shared_expected_files, + method_specific_expected_files, + } +} diff --git a/src/results/diagnostics.rs b/src/results/diagnostics.rs new file mode 100644 index 000000000..cfeaf2b5a --- /dev/null +++ b/src/results/diagnostics.rs @@ -0,0 +1,59 @@ +use std::collections::BTreeMap; + +use pharmsol::Equation; +use serde::{Deserialize, Serialize}; + +use crate::estimation::nonparametric::NonparametricWorkspace; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct DiagnosticsBundle { + pub warnings: Vec, + pub deferred_features: Vec, + pub convergence_notes: Vec, + pub estimator_metadata: BTreeMap, +} + +pub(crate) fn nonparametric_diagnostics( + result: &NonparametricWorkspace, +) -> DiagnosticsBundle { + let mut convergence_notes = Vec::new(); + if result.converged() { + convergence_notes.push("Estimator reported convergence.".to_string()); + } else { + convergence_notes.push("Estimator stopped without convergence.".to_string()); + } + + let status = result + .cycle_log() + .cycles() + .last() + .map(|cycle| format!("{:?}", cycle.status())) + .unwrap_or_else(|| "Continue".to_string()); + + let mut estimator_metadata = BTreeMap::new(); + estimator_metadata.insert("algorithm".to_string(), format!("{:?}", result.algorithm())); + estimator_metadata.insert("status".to_string(), status); + estimator_metadata.insert( + "outputs_requested".to_string(), + result.should_write_outputs().to_string(), + ); + estimator_metadata.insert( + "support_point_count".to_string(), + result.get_theta().nspp().to_string(), + ); + estimator_metadata.insert( + "prediction_cache".to_string(), + if result.predictions().is_some() { + "available".to_string() + } else { + "not_materialized".to_string() + }, + ); + + DiagnosticsBundle { + warnings: Vec::new(), + deferred_features: Vec::new(), + convergence_notes, + estimator_metadata, + } +} diff --git a/src/results/fit_result.rs b/src/results/fit_result.rs new file mode 100644 index 000000000..daa0db4ce --- /dev/null +++ b/src/results/fit_result.rs @@ -0,0 +1,74 @@ +use anyhow::Result; +use pharmsol::Equation; + +use crate::estimation::nonparametric; +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::results::{ + nonparametric_artifacts, nonparametric_diagnostics, nonparametric_predictions, ArtifactIndex, + DiagnosticsBundle, FitSummary, IndividualSummary, PopulationSummary, PredictionsBundle, +}; + +#[derive(Debug)] +pub enum FitResult { + Nonparametric(NonparametricWorkspace), +} + +impl FitResult { + pub fn objf(&self) -> f64 { + match self { + Self::Nonparametric(result) => result.objf(), + } + } + + pub fn converged(&self) -> bool { + match self { + Self::Nonparametric(result) => result.converged(), + } + } + + pub fn write_outputs(&mut self) -> Result<()> { + crate::output::write_result(self) + } + + pub fn summary(&self) -> FitSummary { + match self { + Self::Nonparametric(result) => nonparametric::fit_summary(result), + } + } + + pub fn population_summary(&self) -> PopulationSummary { + match self { + Self::Nonparametric(result) => nonparametric::population_summary(result), + } + } + + pub fn individual_summaries(&self) -> Vec { + match self { + Self::Nonparametric(result) => nonparametric::individual_summaries(result), + } + } + + pub fn diagnostics(&self) -> DiagnosticsBundle { + match self { + Self::Nonparametric(result) => nonparametric_diagnostics(result), + } + } + + pub fn predictions(&self) -> PredictionsBundle { + match self { + Self::Nonparametric(result) => nonparametric_predictions(result), + } + } + + pub fn artifacts(&self) -> ArtifactIndex { + match self { + Self::Nonparametric(result) => nonparametric_artifacts(result), + } + } + + pub fn as_nonparametric(&self) -> Option<&NonparametricWorkspace> { + match self { + Self::Nonparametric(result) => Some(result), + } + } +} diff --git a/src/results/mod.rs b/src/results/mod.rs new file mode 100644 index 000000000..3ff684906 --- /dev/null +++ b/src/results/mod.rs @@ -0,0 +1,15 @@ +mod artifacts; +mod diagnostics; +mod fit_result; +mod predictions; +mod summary; + +pub use artifacts::ArtifactIndex; +pub use diagnostics::DiagnosticsBundle; +pub use fit_result::FitResult; +pub use predictions::PredictionsBundle; +pub use summary::{FitSummary, IndividualSummary, ParameterSummary, PopulationSummary}; + +pub(crate) use artifacts::nonparametric_artifacts; +pub(crate) use diagnostics::nonparametric_diagnostics; +pub(crate) use predictions::nonparametric_predictions; diff --git a/src/results/predictions.rs b/src/results/predictions.rs new file mode 100644 index 000000000..8f3118999 --- /dev/null +++ b/src/results/predictions.rs @@ -0,0 +1,38 @@ +use pharmsol::Equation; +use serde::{Deserialize, Serialize}; + +use crate::estimation::nonparametric::NonparametricWorkspace; +use crate::results::nonparametric_artifacts; + +#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct PredictionsBundle { + pub available: bool, + pub row_count: Option, + pub source: Option, + pub artifact: Option, +} + +pub(crate) fn nonparametric_predictions( + result: &NonparametricWorkspace, +) -> PredictionsBundle { + let artifact = nonparametric_artifacts(result) + .files + .into_iter() + .find(|file| file == "predictions.csv"); + + if let Some(predictions) = result.predictions() { + return PredictionsBundle { + available: true, + row_count: Some(predictions.predictions().len()), + source: Some("in_memory".to_string()), + artifact, + }; + } + + PredictionsBundle { + available: artifact.is_some(), + row_count: None, + source: artifact.as_ref().map(|_| "artifact".to_string()), + artifact, + } +} diff --git a/src/results/summary.rs b/src/results/summary.rs new file mode 100644 index 000000000..1ad5de295 --- /dev/null +++ b/src/results/summary.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct FitSummary { + pub objective_function: f64, + pub converged: bool, + pub iterations: usize, + pub subject_count: usize, + pub observation_count: usize, + pub parameter_count: usize, + pub algorithm: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct PopulationSummary { + pub parameters: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ParameterSummary { + pub name: String, + pub mean: f64, + pub median: f64, + pub sd: f64, + pub cv_percent: f64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct IndividualSummary { + pub id: String, + pub parameter_names: Vec, + pub estimates: Vec, + pub standard_errors: Option>, +} diff --git a/src/routines/condensation/mod.rs b/src/routines/condensation/mod.rs deleted file mode 100644 index d01533b6b..000000000 --- a/src/routines/condensation/mod.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::algorithms::npag::{burke, qr}; -use crate::structs::psi::Psi; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; -use anyhow::Result; - -/// Apply lambda filtering and QR decomposition to condense support points -/// -/// This implements the condensation step used in NPAG algorithms: -/// 1. Filter support points by lambda (probability) threshold -/// 2. Apply QR decomposition to remove linearly dependent points -/// 3. Recalculate weights with Burke's IPM on filtered points -/// -/// # Arguments -/// -/// * `theta` - Support points matrix -/// * `psi` - Likelihood matrix (subjects × support points) -/// * `lambda` - Initial probability weights for support points -/// * `lambda_threshold` - Minimum lambda value (relative to max) to keep a point -/// * `qr_threshold` - QR decomposition threshold for linear independence (typically 1e-8) -/// -/// # Returns -/// -/// Returns filtered theta, psi, and recalculated weights, plus the objective function value -pub fn condense_support_points( - theta: &Theta, - psi: &Psi, - lambda: &Weights, - lambda_threshold: f64, - qr_threshold: f64, -) -> Result<(Theta, Psi, Weights, f64)> { - let mut filtered_theta = theta.clone(); - let mut filtered_psi = psi.clone(); - - // Step 1: Lambda filtering - let max_lambda = lambda.iter().fold(f64::NEG_INFINITY, |acc, x| x.max(acc)); - - let threshold = max_lambda * lambda_threshold; - - let keep_lambda: Vec = lambda - .iter() - .enumerate() - .filter(|(_, lam)| *lam > threshold) - .map(|(i, _)| i) - .collect(); - - let initial_count = theta.matrix().nrows(); - let after_lambda = keep_lambda.len(); - - if initial_count != after_lambda { - tracing::debug!( - "Lambda filtering ({:.0e} × max): {} -> {} support points", - lambda_threshold, - initial_count, - after_lambda - ); - } - - filtered_theta.filter_indices(&keep_lambda); - filtered_psi.filter_column_indices(&keep_lambda); - - // Step 2: QR decomposition filtering - let (r, perm) = qr::qrd(&filtered_psi)?; - - let mut keep_qr = Vec::::new(); - - // The minimum between the number of subjects and the actual number of support points - let keep_n = filtered_psi - .matrix() - .ncols() - .min(filtered_psi.matrix().nrows()); - - for i in 0..keep_n { - let test = r.col(i).norm_l2(); - let r_diag_val = r.get(i, i); - let ratio = r_diag_val / test; - if ratio.abs() >= qr_threshold { - keep_qr.push(*perm.get(i).unwrap()); - } - } - - let after_qr = keep_qr.len(); - - if after_lambda != after_qr { - tracing::debug!( - "QR decomposition (threshold {:.0e}): {} -> {} support points", - qr_threshold, - after_lambda, - after_qr - ); - } - - filtered_theta.filter_indices(&keep_qr); - filtered_psi.filter_column_indices(&keep_qr); - - // Step 3: Recalculate weights with Burke's IPM - let (final_weights, objf) = burke(&filtered_psi)?; - - tracing::debug!( - "Condensation complete: {} -> {} support points (objective: {:.4})", - initial_count, - filtered_theta.matrix().nrows(), - objf - ); - - Ok((filtered_theta, filtered_psi, final_weights, objf)) -} diff --git a/src/routines/estimation/ipm.rs b/src/routines/estimation/ipm.rs deleted file mode 100644 index fbb1768b2..000000000 --- a/src/routines/estimation/ipm.rs +++ /dev/null @@ -1,517 +0,0 @@ -use crate::structs::psi::Psi; -use crate::structs::weights::Weights; -use anyhow::bail; -use faer::linalg::triangular_solve::solve_lower_triangular_in_place; -use faer::linalg::triangular_solve::solve_upper_triangular_in_place; -use faer::{Col, Mat, Row}; -use rayon::prelude::*; -/// Applies Burke's Interior Point Method (IPM) to solve a convex optimization problem. -/// -/// The objective function to maximize is: -/// f(x) = Σ(log(Σ(ψ_ij * x_j))) for i = 1 to n_sub -/// -/// subject to: -/// 1. x_j ≥ 0 for all j = 1 to n_point, -/// 2. Σ(x_j) = 1, -/// -/// where ψ is an n_sub×n_point matrix with non-negative entries and x is a probability vector. -/// -/// # Arguments -/// -/// * `psi` - A reference to a Psi structure containing the input matrix. -/// -/// # Returns -/// -/// On success, returns a tuple `(weights, obj)` where: -/// - [Weights] contains the optimized weights (probabilities) for each support point. -/// - `obj` is the value of the objective function at the solution. -/// -/// # Errors -/// -/// This function returns an error if any step in the optimization (e.g. Cholesky factorization) -/// fails. -pub fn burke(psi: &Psi) -> anyhow::Result<(Weights, f64)> { - let mut psi = psi.matrix().to_owned(); - - // Ensure all entries are finite and make them non-negative. - psi.row_iter_mut().try_for_each(|row| { - row.iter_mut().try_for_each(|x| { - if !x.is_finite() { - bail!("Input matrix must have finite entries") - } else { - // Coerce negatives to non-negative (could alternatively return an error) - *x = x.abs(); - Ok(()) - } - }) - })?; - - // Let psi be of shape (n_sub, n_point) - let (n_sub, n_point) = psi.shape(); - - // Create unit vectors: - // ecol: ones vector of length n_point (used for sums over points) - // erow: ones row of length n_sub (used for sums over subproblems) - let ecol: Col = Col::from_fn(n_point, |_| 1.0); - let erow: Row = Row::from_fn(n_sub, |_| 1.0); - - // Compute plam = psi · ecol. This gives a column vector of length n_sub. - let mut plam: Col = &psi * &ecol; - let eps: f64 = 1e-8; - let mut sig: f64 = 0.0; - - // Initialize lam (the variable we optimize) as a column vector of ones (length n_point). - let mut lam = ecol.clone(); - - // w = 1 ./ plam, elementwise. - let mut w: Col = Col::from_fn(plam.nrows(), |i| 1.0 / plam.get(i)); - - // ptw = ψᵀ · w, which will be a vector of length n_point. - let mut ptw: Col = psi.transpose() * &w; - - // Use the maximum entry in ptw for scaling (the "shrink" factor). - let ptw_max = ptw.iter().fold(f64::NEG_INFINITY, |acc, &x| x.max(acc)); - let shrink = 2.0 * ptw_max; - lam *= shrink; - plam *= shrink; - w /= shrink; - ptw /= shrink; - - // y = ecol - ptw (a vector of length n_point). - let mut y: Col = &ecol - &ptw; - // r = erow - (w .* plam) (elementwise product; r has length n_sub). - let mut r: Col = Col::from_fn(n_sub, |i| erow.get(i) - w.get(i) * plam.get(i)); - let mut norm_r: f64 = r.iter().fold(0.0, |max, &val| max.max(val.abs())); - - // Compute the duality gap. - let sum_log_plam: f64 = plam.iter().map(|x| x.ln()).sum(); - let sum_log_w: f64 = w.iter().map(|x| x.ln()).sum(); - let mut gap: f64 = (sum_log_w + sum_log_plam).abs() / (1.0 + sum_log_plam); - - // Compute the duality measure mu. - let mut mu = lam.transpose() * &y / n_point as f64; - - let mut psi_inner: Mat = Mat::zeros(psi.nrows(), psi.ncols()); - - let n_threads = faer::get_global_parallelism().degree(); - - let rows = psi.nrows(); - - let mut output: Vec> = (0..n_threads).map(|_| Mat::zeros(rows, rows)).collect(); - - let mut h: Mat = Mat::zeros(rows, rows); - - while mu > eps || norm_r > eps || gap > eps { - let smu = sig * mu; - // inner = lam ./ y, elementwise. - let inner = Col::from_fn(lam.nrows(), |i| lam.get(i) / y.get(i)); - // w_plam = plam ./ w, elementwise (length n_sub). - let w_plam = Col::from_fn(plam.nrows(), |i| plam.get(i) / w.get(i)); - - // Scale each column of psi by the corresponding element of 'inner' - - if psi.ncols() > n_threads * 128 { - psi_inner - .par_col_partition_mut(n_threads) - .zip(psi.par_col_partition(n_threads)) - .zip(inner.par_partition(n_threads)) - .zip(output.par_iter_mut()) - .for_each(|(((mut psi_inner, psi), inner), output)| { - psi_inner - .as_mut() - .col_iter_mut() - .zip(psi.col_iter()) - .zip(inner.iter()) - .for_each(|((col, psi_col), inner_val)| { - col.iter_mut().zip(psi_col.iter()).for_each(|(x, psi_val)| { - *x = psi_val * inner_val; - }); - }); - faer::linalg::matmul::triangular::matmul( - output.as_mut(), - faer::linalg::matmul::triangular::BlockStructure::TriangularLower, - faer::Accum::Replace, - &psi_inner, - faer::linalg::matmul::triangular::BlockStructure::Rectangular, - psi.transpose(), - faer::linalg::matmul::triangular::BlockStructure::Rectangular, - 1.0, - faer::Par::Seq, - ); - }); - - let mut first_iter = true; - for output in &output { - if first_iter { - h.copy_from(output); - first_iter = false; - } else { - h += output; - } - } - } else { - psi_inner - .as_mut() - .col_iter_mut() - .zip(psi.col_iter()) - .zip(inner.iter()) - .for_each(|((col, psi_col), inner_val)| { - col.iter_mut().zip(psi_col.iter()).for_each(|(x, psi_val)| { - *x = psi_val * inner_val; - }); - }); - faer::linalg::matmul::triangular::matmul( - h.as_mut(), - faer::linalg::matmul::triangular::BlockStructure::TriangularLower, - faer::Accum::Replace, - &psi_inner, - faer::linalg::matmul::triangular::BlockStructure::Rectangular, - psi.transpose(), - faer::linalg::matmul::triangular::BlockStructure::Rectangular, - 1.0, - faer::Par::Seq, - ); - } - - for i in 0..h.nrows() { - h[(i, i)] += w_plam[i]; - } - - let uph = match h.llt(faer::Side::Lower) { - Ok(llt) => llt, - Err(_) => { - bail!("Error during Cholesky decomposition. The matrix might not be positive definite. This is usually due to model misspecification or numerical issues.") - } - }; - let uph = uph.L().transpose().to_owned(); - - // smuyinv = smu * (ecol ./ y) - let smuyinv: Col = Col::from_fn(ecol.nrows(), |i| smu * (ecol[i] / y[i])); - - // let smuyinv = smu * (&ecol / &y); - // rhsdw = (erow ./ w) - (psi · smuyinv) - let psi_dot_muyinv: Col = &psi * &smuyinv; - - let rhsdw: Row = Row::from_fn(erow.ncols(), |i| erow[i] / w[i] - psi_dot_muyinv[i]); - - //let rhsdw = (&erow / &w) - psi * &smuyinv; - // Reshape rhsdw into a column vector. - let mut dw = Mat::from_fn(rhsdw.ncols(), 1, |i, _j| *rhsdw.get(i)); - - // let a = rhsdw - // .into_shape((n_sub, 1)) - // .context("Failed to reshape rhsdw").unwrap(); - - // Solve the triangular systems: - - solve_lower_triangular_in_place(uph.transpose().as_ref(), dw.as_mut(), faer::Par::rayon(0)); - - solve_upper_triangular_in_place(uph.as_ref(), dw.as_mut(), faer::Par::rayon(0)); - - // Extract dw (a column vector) from the solution. - let dw = dw.col(0); - - // let dw = dw_aux.column(0); - // Compute dy = - (ψᵀ · dw) - let dy = -(psi.transpose() * dw); - - let inner_times_dy = Col::from_fn(ecol.nrows(), |i| inner[i] * dy[i]); - - let dlam: Row = - Row::from_fn(ecol.nrows(), |i| smuyinv[i] - lam[i] - inner_times_dy[i]); - // let dlam = &smuyinv - &lam - inner.transpose() * &dy; - - // Compute the primal step length alfpri. - let ratio_dlam_lam = Row::from_fn(lam.nrows(), |i| dlam[i] / lam[i]); - //let ratio_dlam_lam = &dlam / &lam; - let min_ratio_dlam = ratio_dlam_lam.iter().cloned().fold(f64::INFINITY, f64::min); - let mut alfpri: f64 = -1.0 / min_ratio_dlam.min(-0.5); - alfpri = (0.99995 * alfpri).min(1.0); - - // Compute the dual step length alfdual. - let ratio_dy_y = Row::from_fn(y.nrows(), |i| dy[i] / y[i]); - // let ratio_dy_y = &dy / &y; - let min_ratio_dy = ratio_dy_y.iter().cloned().fold(f64::INFINITY, f64::min); - let ratio_dw_w = Row::from_fn(dw.nrows(), |i| dw[i] / w[i]); - //let ratio_dw_w = &dw / &w; - let min_ratio_dw = ratio_dw_w.iter().cloned().fold(f64::INFINITY, f64::min); - let mut alfdual = -1.0 / min_ratio_dy.min(-0.5); - alfdual = alfdual.min(-1.0 / min_ratio_dw.min(-0.5)); - alfdual = (0.99995 * alfdual).min(1.0); - - // Update the iterates. - lam += alfpri * dlam.transpose(); - w += alfdual * dw; - y += alfdual * &dy; - - mu = lam.transpose() * &y / n_point as f64; - plam = &psi * &lam; - - // mu = lam.dot(&y) / n_point as f64; - // plam = psi.dot(&lam); - r = Col::from_fn(n_sub, |i| erow.get(i) - w.get(i) * plam.get(i)); - ptw -= alfdual * dy; - - norm_r = r.norm_max(); - let sum_log_plam: f64 = plam.iter().map(|x| x.ln()).sum(); - let sum_log_w: f64 = w.iter().map(|x| x.ln()).sum(); - gap = (sum_log_w + sum_log_plam).abs() / (1.0 + sum_log_plam); - - // Adjust sigma. - if mu < eps && norm_r > eps { - sig = 1.0; - } else { - let candidate1 = (1.0 - alfpri).powi(2); - let candidate2 = (1.0 - alfdual).powi(2); - let candidate3 = (norm_r - mu) / (norm_r + 100.0 * mu); - sig = candidate1.max(candidate2).max(candidate3).min(0.3); - } - } - // Scale lam. - lam /= n_sub as f64; - // Compute the objective function value: sum(ln(psi·lam)). - let obj = (psi * &lam).iter().map(|x| x.ln()).sum(); - // Normalize lam to sum to 1. - let lam_sum: f64 = lam.iter().sum(); - lam = &lam / lam_sum; - - Ok((lam.into(), obj)) -} - -#[cfg(test)] -mod tests { - use super::*; - use approx::assert_relative_eq; - use faer::Mat; - - #[test] - fn test_burke_identity() { - // Test with a small identity matrix - // For an identity matrix, each support point should have equal weight - let n = 100; - let mat = Mat::identity(n, n); - let psi = Psi::from(mat); - - let (lam, _) = burke(&psi).unwrap(); - - // For identity matrix, all lambda values should be equal - let expected = 1.0 / n as f64; - for i in 0..n { - assert_relative_eq!(lam[i], expected, epsilon = 1e-10); - } - - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - } - - #[test] - fn test_burke_uniform_square() { - // Test with a matrix of all ones - // This should also result in uniform weights - let n_sub = 10; - let n_point = 10; - let mat = Mat::from_fn(n_sub, n_point, |_, _| 1.0); - let psi = Psi::from(mat); - - let (lam, _) = burke(&psi).unwrap(); - - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // For uniform matrix, all lambda values should be equal - let expected = 1.0 / n_point as f64; - for i in 0..n_point { - assert_relative_eq!(lam[i], expected, epsilon = 1e-10); - } - } - - #[test] - fn test_burke_uniform_wide() { - // Test with a matrix of all ones - // This should also result in uniform weights - let n_sub = 10; - let n_point = 100; - let mat = Mat::from_fn(n_sub, n_point, |_, _| 1.0); - let psi = Psi::from(mat); - - let (lam, _) = burke(&psi).unwrap(); - - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // For uniform matrix, all lambda values should be equal - let expected = 1.0 / n_point as f64; - for i in 0..n_point { - assert_relative_eq!(lam[i], expected, epsilon = 1e-10); - } - } - - #[test] - fn test_burke_uniform_long() { - // Test with a matrix of all ones - // This should also result in uniform weights - let n_sub = 100; - let n_point = 10; - let mat = Mat::from_fn(n_sub, n_point, |_, _| 1.0); - let psi = Psi::from(mat); - - let (lam, _) = burke(&psi).unwrap(); - - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // For uniform matrix, all lambda values should be equal - let expected = 1.0 / n_point as f64; - for i in 0..n_point { - assert_relative_eq!(lam[i], expected, epsilon = 1e-10); - } - } - - #[test] - fn test_burke_with_non_uniform_matrix() { - // Test with a non-uniform matrix - // Create a matrix where one column is clearly better - let n_sub = 3; - let n_point = 4; - let mat = Mat::from_fn(n_sub, n_point, |_, j| if j == 0 { 10.0 } else { 1.0 }); - let psi = Psi::from(mat); - - let (lam, _) = burke(&psi).unwrap(); - - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // First support point should have highest weight - assert!(lam[0] > lam[1]); - assert!(lam[0] > lam[2]); - assert!(lam[0] > lam[3]); - } - - #[test] - fn test_burke_with_negative_values() { - // The algorithm should handle negative values by taking their absolute value - let n_sub = 2; - let n_point = 3; - let mat = Mat::from_fn( - n_sub, - n_point, - |i, j| if i == 0 && j == 0 { -5.0 } else { 1.0 }, - ); - let psi = Psi::from(mat); - - let result = burke(&psi); - assert!(result.is_ok()); - - let (lam, _) = result.unwrap(); - // Check that lambda sums to 1 - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // First support point should have highest weight due to the high absolute value - assert!(lam[0] > lam[1]); - assert!(lam[0] > lam[2]); - } - - #[test] - fn test_burke_with_non_finite_values() { - // The algorithm should return an error for non-finite values - let n_sub = 10; - let n_point = 10; - let mat = Mat::from_fn(n_sub, n_point, |i, j| { - if i == 0 && j == 0 { - f64::NAN - } else { - 1.0 - } - }); - let psi = Psi::from(mat); - - let result = burke(&psi); - assert!(result.is_err()); - } - - #[test] - fn test_burke_large_matrix_parallel_processing() { - // Test with a large matrix to trigger the parallel processing code path - // This should exceed n_threads * 128 threshold - let n_sub = 50; - let n_point = 10000; - - // Create a simple uniform matrix - // The main goal is to test that parallel processing works correctly - let mat = Mat::from_fn(n_sub, n_point, |_i, _j| 1.0); - let psi = Psi::from(mat); - - let result = burke(&psi); - assert!( - result.is_ok(), - "Burke algorithm should succeed with large matrix" - ); - - let (lam, obj) = result.unwrap(); - - // Verify basic mathematical properties of the solution - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // All lambda values should be non-negative - for i in 0..n_point { - assert!(lam[i] >= 0.0, "Lambda values should be non-negative"); - } - - // The objective function should be finite - assert!(obj.is_finite(), "Objective function should be finite"); - - // The main test: verify that the parallel processing path was executed - // and produced a valid probability distribution - // For a uniform matrix, we expect roughly uniform weights, but the exact - // distribution depends on the optimization algorithm's convergence - - // Just verify that no single weight dominates excessively (basic sanity check) - let max_weight = lam - .weights() - .iter() - .cloned() - .fold(f64::NEG_INFINITY, f64::max); - assert!( - max_weight < 0.1, - "No single weight should dominate in uniform matrix (max weight: {})", - max_weight - ); - } - - #[test] - fn test_burke_medium_matrix_sequential_processing() { - // Test with a medium-sized matrix that should NOT trigger parallel processing - // This serves as a comparison to ensure both code paths produce similar results - let n_sub = 50; - let n_point = 500; // This should be < n_threads * 128 threshold - - // Use the same pattern as the large matrix test - let mat = Mat::from_fn(n_sub, n_point, |i, j| { - if j % 100 == 0 { - 5.0 + 0.1 * (i as f64) - } else { - 1.0 + 0.01 * (i as f64) + 0.001 * (j as f64) - } - }); - let psi = Psi::from(mat); - - let result = burke(&psi); - assert!( - result.is_ok(), - "Burke algorithm should succeed with medium matrix" - ); - - let (lam, obj) = result.unwrap(); - - // Verify basic properties of the solution - assert_relative_eq!(lam.iter().sum::(), 1.0, epsilon = 1e-10); - - // All lambda values should be non-negative - for i in 0..n_point { - assert!(lam[i] >= 0.0, "Lambda values should be non-negative"); - } - - // The objective function should be finite - assert!(obj.is_finite(), "Objective function should be finite"); - } -} diff --git a/src/routines/estimation/mod.rs b/src/routines/estimation/mod.rs deleted file mode 100644 index b9ad757e7..000000000 --- a/src/routines/estimation/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod ipm; -pub mod qr; diff --git a/src/routines/estimation/qr.rs b/src/routines/estimation/qr.rs deleted file mode 100644 index acc104d26..000000000 --- a/src/routines/estimation/qr.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::structs::psi::Psi; -use anyhow::{bail, Result}; -use faer::linalg::solvers::ColPivQr; -use faer::Mat; - -/// Perform a QR decomposition on the Psi matrix -/// -/// Normalizes each row of the matrix to sum to 1 before decomposition. -/// Returns the R matrix from QR decomposition and the column permutation vector. -/// -/// # Arguments -/// * `psi` - The Psi matrix to decompose -/// -/// # Returns -/// * Tuple containing the R matrix (as [faer::Mat]) and permutation vector (as [Vec]) -/// * Error if any row in the matrix sums to zero -pub fn qrd(psi: &Psi) -> Result<(Mat, Vec)> { - let mut mat = psi.matrix().to_owned(); - - // Normalize the rows to sum to 1 - for (index, row) in mat.row_iter_mut().enumerate() { - let row_sum: f64 = row.as_ref().iter().sum(); - - // Check if the row sum is zero - if row_sum.abs() == 0.0 { - bail!("In psi, the row with index {} sums to zero", index); - } - row.iter_mut().for_each(|x| *x /= row_sum); - } - - // Perform column pivoted QR decomposition - let qr: ColPivQr = mat.col_piv_qr(); - - // Extract the R matrix - let r_mat: faer::Mat = qr.R().to_owned(); - - // Get the permutation information - let perm = qr.P().arrays().0.to_vec(); - Ok((r_mat, perm)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_identity() { - // Create a 2x2 identity matrix - let mat: Mat = Mat::identity(10, 10); - let psi = Psi::from(mat); - - // Perform the QR decomposition - let (r_mat, perm) = qrd(&psi).unwrap(); - - // Check that R is an identity matrix - let expected_r_mat: Mat = Mat::identity(10, 10); - assert_eq!(r_mat, expected_r_mat); - - // Check that the permutation is the identity - assert_eq!(perm, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); - } - - #[test] - fn test_with_zero_row_sum() { - // Create a test matrix with a row that sums to zero - let mat = Mat::from_fn(2, 2, |i, j| { - if i == 0 && j == 0 { - 1.0 - } else if i == 0 && j == 1 { - 2.0 - } else { - 0.0 // Row that sums to zero for i == 1 - } - }); - let psi = Psi::from(mat); - - // Perform the QR decomposition - let result = qrd(&psi); - - // Confirm that the function returns an error - assert!(result.is_err(), "Expected an error due to zero row sum"); - } - - #[test] - fn test_empty_matrix() { - // Create an empty Psi - let mat = Mat::::new(); - let psi = Psi::from(mat); - - // Should not panic - let (r_mat, perm) = qrd(&psi).unwrap(); - - // Empty matrix should produce empty results - assert_eq!(r_mat.nrows(), 0); - assert_eq!(r_mat.ncols(), 0); - assert_eq!(perm.len(), 0); - } -} diff --git a/src/routines/expansion/adaptative_grid.rs b/src/routines/expansion/adaptative_grid.rs deleted file mode 100644 index e721aebc5..000000000 --- a/src/routines/expansion/adaptative_grid.rs +++ /dev/null @@ -1,243 +0,0 @@ -use crate::structs::theta::Theta; -use anyhow::Result; -use faer::Row; - -/// Implements the adaptive grid algorithm for support point expansion. -/// -/// This function generates up to 2 new support points in each dimension for each existing support point. -/// New support points are symmetrically placed around the original support point, at a distance of `eps` * (range_max - range_min). -/// If the new support point is too close to an existing support point, or it is outside the given range, it is discarded. -/// -/// # Arguments -/// -/// * `theta` - A mutable reference to a 2D array representing the existing support points. -/// * `eps` - A floating-point value representing the fraction of the range to use for generating new support points. -/// * `ranges` - A slice of tuples representing the range of values for each dimension. -/// * `min_dist` - A floating-point value representing the minimum distance between support points. -/// -/// # Returns -/// -/// A 2D array containing the updated support points after the adaptive grid expansion. -/// -pub fn adaptative_grid( - theta: &mut Theta, - eps: f64, - ranges: &[(f64, f64)], - min_dist: f64, -) -> Result<()> { - let mut candidates = Vec::new(); - - // Collect all points first to avoid borrowing conflicts - for spp in theta.matrix().row_iter() { - for (j, val) in spp.iter().enumerate() { - let l = eps * (ranges[j].1 - ranges[j].0); //abs? - if val + l < ranges[j].1 { - let mut plus = Row::zeros(spp.ncols()); - plus[j] = l; - plus += spp; - candidates.push(plus.iter().copied().collect::>()); - } - if val - l > ranges[j].0 { - let mut minus = Row::zeros(spp.ncols()); - minus[j] = -l; - minus += spp; - candidates.push(minus.iter().copied().collect::>()); - } - } - } - - // Option 1: Check all points against the original theta, then add them - let keep = candidates - .iter() - .filter(|point| theta.check_point(point, min_dist)) - .cloned() - .collect::>(); - - for point in keep { - theta.add_point(point.as_slice())?; - } - - Ok(()) - - // Option 2: Check and add points one by one - // Now add all the points after the immutable borrow is released - //for point in candidates { - // theta.suggest_point(point, min_dist, ranges); - //} -} -/* -#[cfg(test)] -mod tests { - use super::*; - use crate::structs::theta::Theta; - use faer::mat; - - #[test] - fn test_expected() { - let original = Theta::from(mat![[1.0, 10.0]]); - - let ranges = [(0.0, 1.0), (0.0, 10.0)]; - let eps = 0.1; - let min_dist = 0.05; - - let mut theta = original.clone(); - adaptative_grid(&mut theta, eps, &ranges, min_dist); - - let expected = mat![[1.0, 10.0], [0.9, 10.0], [1.0, 9.0]]; - - // Check that both matrices have the same number of rows - assert_eq!( - theta.matrix().nrows(), - expected.nrows(), - "Number of points in theta doesn't match expected" - ); - - // Check that all points in expected are in theta - for i in 0..expected.nrows() { - let expected_point = expected.row(i); - let mut found = false; - - for j in 0..theta.matrix().nrows() { - let theta_point = theta.matrix().row(j); - - // Check if points match (within small epsilon for floating-point comparison) - if (expected_point[0] - theta_point[0]).abs() < 1e-10 - && (expected_point[1] - theta_point[1]).abs() < 1e-10 - { - found = true; - break; - } - } - - assert!( - found, - "Expected point [{}, {}] not found in theta", - expected_point[0], expected_point[1] - ); - } - - // Check that all points in theta are in expected - for i in 0..theta.matrix().nrows() { - let theta_point = theta.matrix().row(i); - let mut found = false; - - for j in 0..expected.nrows() { - let expected_point = expected.row(j); - - // Check if points match (within small epsilon) - if (theta_point[0] - expected_point[0]).abs() < 1e-10 - && (theta_point[1] - expected_point[1]).abs() < 1e-10 - { - found = true; - break; - } - } - - assert!( - found, - "Point [{}, {}] in theta was not expected", - theta_point[0], theta_point[1] - ); - } - } - - #[test] - fn test_basic_expansion() { - // Create initial theta with a single point [0.5, 0.5] - let mut theta = Theta::from(mat![[0.5, 0.5]]); - - // Define ranges for two dimensions - let ranges = [(0.0, 1.0), (0.0, 1.0)]; - - // Set expansion parameters - let eps = 0.1; - let min_dist = 0.05; - - // Apply adaptive grid - adaptative_grid(&mut theta, eps, &ranges, min_dist); - - // Should generate 4 new points around the original: - // [0.6, 0.5], [0.4, 0.5], [0.5, 0.6], [0.5, 0.4] - // Total 5 points including the original - assert_eq!(theta.matrix().nrows(), 5); - - // Verify the original point is preserved - let matrix = theta.matrix(); - let mut has_original = false; - - for i in 0..matrix.nrows() { - let row = matrix.row(i); - if (row[0] - 0.5).abs() < 1e-10 && (row[1] - 0.5).abs() < 1e-10 { - has_original = true; - break; - } - } - assert!(has_original, "Original point should be preserved"); - - // Verify expansion points were created - let expected_points = vec![(0.6, 0.5), (0.4, 0.5), (0.5, 0.6), (0.5, 0.4)]; - for (x, y) in expected_points { - let mut found = false; - for i in 0..matrix.nrows() { - let row = matrix.row(i); - if (row[0] - x).abs() < 1e-10 && (row[1] - y).abs() < 1e-10 { - found = true; - break; - } - } - assert!(found, "Expected point ({}, {}) not found", x, y); - } - } - - #[test] - fn test_boundary_conditions() { - // Create initial theta with points near boundaries - let mut theta = Theta::from(mat![ - [0.05, 0.5], // Near lower boundary in x - [0.95, 0.5], // Near upper boundary in x - [0.5, 0.05], // Near lower boundary in y - [0.5, 0.95], // Near upper boundary in y - ]); - - let ranges = [(0.0, 1.0), (0.0, 1.0)]; - let eps = 0.1; - let min_dist = 0.05; - - // Store original count - let original_count = theta.matrix().nrows(); - - adaptative_grid(&mut theta, eps, &ranges, min_dist); - - // Each point should generate fewer than 4 new points due to boundaries - assert!(theta.matrix().nrows() > original_count); - assert!(theta.matrix().nrows() < original_count + 4 * 4); - - // Verify no points are outside the range - let matrix = theta.matrix(); - for i in 0..matrix.nrows() { - let row = matrix.row(i); - assert!(row[0] >= ranges[0].0 && row[0] <= ranges[0].1); - assert!(row[1] >= ranges[1].0 && row[1] <= ranges[1].1); - } - } - - #[test] - fn test_min_distance_constraint() { - // Create initial theta with close points - let mut theta = Theta::from(mat![ - [0.5, 0.5], - [0.55, 0.5], // Close to first point - ]); - - let ranges = [(0.0, 1.0), (0.0, 10.0)]; - let eps = 0.1; - let min_dist = 0.15; // Large enough to prevent some points from being added - - adaptative_grid(&mut theta, eps, &ranges, min_dist); - - // We should have fewer points than the maximum possible expansion - // due to the minimum distance constraint - assert!(theta.matrix().nrows() < 2 + 2 * 4); - } -} - */ diff --git a/src/routines/expansion/mod.rs b/src/routines/expansion/mod.rs deleted file mode 100644 index a84c84fad..000000000 --- a/src/routines/expansion/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod adaptative_grid; diff --git a/src/routines/initialization/latin.rs b/src/routines/initialization/latin.rs deleted file mode 100644 index bf6f83418..000000000 --- a/src/routines/initialization/latin.rs +++ /dev/null @@ -1,98 +0,0 @@ -use anyhow::Result; -use faer::Mat; -use rand::prelude::*; -use rand::rngs::StdRng; -use rand::Rng; - -use crate::prelude::Parameters; -use crate::structs::theta::Theta; - -/// Generates an instance of [Theta] using Latin Hypercube Sampling. -/// -/// # Arguments -/// -/// * `parameters` - The [Parameters] struct, which contains the parameters to be sampled. -/// * `points` - The number of points to generate, i.e. the number of rows in the matrix. -/// * `seed` - The seed for the Sobol sequence generator. -/// -/// # Returns -/// -/// [Theta], a structure that holds the support point matrix -/// -pub fn generate(parameters: &Parameters, points: usize, seed: usize) -> Result { - let params: Vec<(String, f64, f64)> = parameters - .iter() - .map(|p| (p.name.clone(), p.lower, p.upper)) - .collect(); - - // Initialize random number generator with the provided seed - let mut rng = StdRng::seed_from_u64(seed as u64); - - // Create and shuffle intervals for each parameter - let mut intervals = Vec::new(); - for _ in 0..params.len() { - let mut param_intervals: Vec = (0..points).map(|i| i as f64).collect(); - param_intervals.shuffle(&mut rng); - intervals.push(param_intervals); - } - - let rand_matrix = Mat::from_fn(points, params.len(), |i, j| { - // Get the interval for this parameter and point - let interval = intervals[j][i]; - let random_offset = rng.random::(); - // Calculate normalized value in [0,1] - let unscaled = (interval + random_offset) / points as f64; - // Scale to parameter range - let (_name, lower, upper) = params.get(j).unwrap(); // Fixed: use j instead of i - lower + unscaled * (upper - lower) - }); - - let theta = Theta::from_parts(rand_matrix, parameters.clone())?; - - Ok(theta) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::prelude::Parameters; - use faer::mat; - - #[test] - fn test_latin_hypercube() { - let params = Parameters::new() - .add("a", 0.0, 1.0) - .add("b", 0.0, 1.0) - .add("c", 0.0, 1.0); - - let theta = generate(¶ms, 10, 22).unwrap(); - - assert_eq!(theta.nspp(), 10); - assert_eq!(theta.matrix().ncols(), 3); - } - - #[test] - fn test_latin_hypercube_values() { - let params = Parameters::new() - .add("a", 0.0, 1.0) - .add("b", 0.0, 1.0) - .add("c", 0.0, 1.0); - - let theta = generate(¶ms, 10, 22).unwrap(); - - let expected = mat![ - [0.9318592685623417, 0.5609665425179973, 0.3351914901515939], // - [0.5470144220416706, 0.13513808559222779, 0.1067962439473777], // - [0.34525902829190547, 0.4636722699673962, 0.9142146621998218], // - [0.24828355387285125, 0.8638104433695395, 0.41653980640777954], // - [0.7642037770085612, 0.6806932027789437, 0.5608053599272136], // - [0.19409389824004936, 0.9378790633419902, 0.6039530631991072], // - [0.04886813284275151, 0.7140428162864041, 0.7855069414226704], // - [0.6987026842780971, 0.32378779989236495, 0.8888807957183007], // - [0.4221279608793599, 0.08001464382386277, 0.20689573661666943], // - [0.8310112718320113, 0.29390050406905127, 0.04806137233953963], // - ]; - - assert_eq!(theta.matrix().to_owned(), expected); - } -} diff --git a/src/routines/initialization/mod.rs b/src/routines/initialization/mod.rs deleted file mode 100644 index fa03c4442..000000000 --- a/src/routines/initialization/mod.rs +++ /dev/null @@ -1,530 +0,0 @@ -use std::fs::File; - -use crate::structs::{theta::Theta, weights::Weights}; -use anyhow::{bail, Context, Result}; -use faer::Mat; -use serde::{Deserialize, Serialize}; - -use crate::routines::settings::Settings; - -pub mod latin; -pub mod sobol; - -/// The sampler used to generate the grid of support points -/// -/// The sampler can be one of the following: -/// -/// - `Sobol`: Generates a Sobol sequence -/// - `Latin`: Generates a Latin hypercube -/// - `File`: Reads the prior distribution from a CSV file -#[derive(Debug, Deserialize, Clone, Serialize)] -pub enum Prior { - Sobol(usize, usize), - Latin(usize, usize), - File(String), - #[serde(skip)] - Theta(Theta), -} - -impl Prior { - pub fn sobol(points: usize, seed: usize) -> Prior { - Prior::Sobol(points, seed) - } - - /// Get the number of initial support points - /// - /// This function returns the number of points for Sobol and Latin samplers, - /// and returns `None` for file-based priors since they do not have a fixed number of points. - /// For custom priors ([Prior::Theta]), it returns the number of support points in the original [Theta] structure. - pub fn points(&self) -> Option { - match self { - Prior::Sobol(points, _) => Some(*points), - Prior::Latin(points, _) => Some(*points), - Prior::File(_) => None, // File-based prior does not have a fixed number of points - Prior::Theta(theta) => Some(theta.nspp()), - } - } - - /// Get the seed used for the random number generator - /// - /// This function returns the seed for Sobol and Latin samplers, - /// and returns `None` for file-based priors since they do not have a fixed seed. - /// For custom priors ([Prior::Theta]), it returns `None` as they do not have a fixed seed. - pub fn seed(&self) -> Option { - match self { - Prior::Sobol(_, seed) => Some(*seed), - Prior::Latin(_, seed) => Some(*seed), - Prior::File(_) => None, // "File-based prior does not have a fixed seed" - Prior::Theta(_) => None, // Custom prior does not have a fixed seed - } - } -} - -impl Default for Prior { - fn default() -> Self { - Prior::Sobol(2028, 22) - } -} - -/// This function generates the grid of support points according to the sampler specified in the [Settings] -pub fn sample_space(settings: &Settings) -> Result { - // Ensure that the parameter ranges are not infinite - for param in settings.parameters().iter() { - if param.lower.is_infinite() || param.upper.is_infinite() { - bail!( - "Parameter '{}' has infinite bounds: [{}, {}]", - param.name, - param.lower, - param.upper - ); - } - - // Ensure that the lower bound is less than the upper bound - if param.lower >= param.upper { - bail!( - "Parameter '{}' has invalid bounds: [{}, {}]. Lower bound must be less than upper bound.", - param.name, - param.lower, - param.upper - ); - } - } - - // Otherwise, parse the sampler type and generate the grid - let prior = match settings.prior() { - Prior::Sobol(points, seed) => sobol::generate(settings.parameters(), *points, *seed)?, - Prior::Latin(points, seed) => latin::generate(settings.parameters(), *points, *seed)?, - Prior::File(ref path) => parse_prior(path, settings)?.0, - Prior::Theta(ref theta) => { - // If a custom prior is provided, return it directly - return Ok(theta.clone()); - } - }; - Ok(prior) -} - -/// This function reads the prior distribution from a file -pub fn parse_prior(path: &String, settings: &Settings) -> Result<(Theta, Option)> { - tracing::info!("Reading prior from {}", path); - let file = File::open(path).context(format!("Unable to open the prior file '{}'", path))?; - let mut reader = csv::ReaderBuilder::new() - .has_headers(true) - .from_reader(file); - - let mut parameter_names: Vec = reader - .headers()? - .clone() - .into_iter() - .map(|s| s.trim().to_owned()) - .collect(); - - // Check if "prob" column is present and get its index - let prob_index = parameter_names.iter().position(|name| name == "prob"); - - // Remove "prob" column from parameter_names if present - if let Some(index) = prob_index { - parameter_names.remove(index); - } - - // Check and reorder parameters to match names in settings.parsed.random - let random_names: Vec = settings.parameters().names(); - - let mut reordered_indices: Vec = Vec::new(); - for random_name in &random_names { - match parameter_names.iter().position(|name| name == random_name) { - Some(index) => { - // Adjust index if prob column was present and came before this parameter - let adjusted_index = if let Some(prob_idx) = prob_index { - if index >= prob_idx { - index + 1 // Add 1 back since we removed prob from parameter_names - } else { - index - } - } else { - index - }; - reordered_indices.push(adjusted_index); - } - None => { - bail!("Parameter {} is not present in the CSV file.", random_name); - } - } - } - - // Check if there are remaining parameters not present in settings.parsed.random - if parameter_names.len() > random_names.len() { - let extra_parameters: Vec<&String> = parameter_names.iter().collect(); - bail!( - "Found parameters in the prior not present in configuration: {:?}", - extra_parameters - ); - } - - // Read parameter values and probabilities row by row - let mut theta_values = Vec::new(); - let mut prob_values = Vec::new(); - - for result in reader.records() { - let record = result.unwrap(); - - // Extract parameter values using reordered indices - let values: Vec = reordered_indices - .iter() - .map(|&i| record[i].parse::().unwrap()) - .collect(); - theta_values.push(values); - - // Extract probability value if prob column exists - if let Some(prob_idx) = prob_index { - let prob_value: f64 = record[prob_idx].parse::().unwrap(); - prob_values.push(prob_value); - } - } - - let n_points = theta_values.len(); - let n_params = random_names.len(); - - // Convert nested Vec into a single Vec - let theta_values: Vec = theta_values.into_iter().flatten().collect(); - - let theta_matrix: Mat = - Mat::from_fn(n_points, n_params, |i, j| theta_values[i * n_params + j]); - - let theta = Theta::from_parts(theta_matrix, settings.parameters().clone())?; - - // Create weights if prob column was present - let weights = if !prob_values.is_empty() { - Some(Weights::from_vec(prob_values)) - } else { - None - }; - - Ok((theta, weights)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::prelude::*; - use pharmsol::{AssayErrorModel, AssayErrorModels, ErrorPoly}; - use std::fs; - - fn create_test_settings() -> Settings { - let parameters = Parameters::new().add("ke", 0.1, 1.0).add("v", 5.0, 50.0); - - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(parameters) - .set_error_models(ems) - .build() - } - - fn create_temp_csv_file(content: &str) -> String { - let temp_path = format!("test_temp_{}.csv", rand::random::()); - fs::write(&temp_path, content).unwrap(); - temp_path - } - - fn cleanup_temp_file(path: &str) { - let _ = fs::remove_file(path); - } - - #[test] - fn test_prior_sobol_creation() { - let prior = Prior::sobol(100, 42); - assert_eq!(prior.points(), Some(100)); - assert_eq!(prior.seed(), Some(42)); - } - - #[test] - fn test_prior_latin_creation() { - let prior = Prior::Latin(50, 123); - assert_eq!(prior.points(), Some(50)); - assert_eq!(prior.seed(), Some(123)); - } - - #[test] - fn test_prior_default() { - let prior = Prior::default(); - assert_eq!(prior.points(), Some(2028)); - assert_eq!(prior.seed(), Some(22)); - } - - #[test] - fn test_prior_file_points() { - let prior = Prior::File("test.csv".to_string()); - assert_eq!(prior.points(), None); - } - - #[test] - fn test_prior_file_seed() { - let prior = Prior::File("test.csv".to_string()); - assert_eq!(prior.seed(), None); - } - - #[test] - fn test_sample_space_sobol() { - let mut settings = create_test_settings(); - settings.set_prior(Prior::sobol(10, 42)); - - let result = sample_space(&settings); - assert!(result.is_ok()); - - let theta = result.unwrap(); - assert_eq!(theta.nspp(), 10); - assert_eq!(theta.matrix().ncols(), 2); - } - - #[test] - fn test_sample_space_latin() { - let mut settings = create_test_settings(); - settings.set_prior(Prior::Latin(15, 123)); - - let result = sample_space(&settings); - assert!(result.is_ok()); - - let theta = result.unwrap(); - assert_eq!(theta.nspp(), 15); - assert_eq!(theta.matrix().ncols(), 2); - } - - #[test] - fn test_sample_space_custom_theta() { - let mut settings = create_test_settings(); - - // Create a custom theta - let parameters = settings.parameters().clone(); - let matrix = faer::Mat::from_fn(3, 2, |i, j| (i + j) as f64); - let custom_theta = Theta::from_parts(matrix, parameters).unwrap(); - - let prior = Prior::Theta(custom_theta.clone()); - settings.set_prior(Prior::Theta(custom_theta.clone())); - - let result = sample_space(&settings); - assert!(result.is_ok()); - - let theta = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - assert_eq!(theta, custom_theta); - assert!(prior.points() == Some(3)); - } - - #[test] - fn test_sample_space_infinite_bounds_error() { - let parameters = Parameters::new() - .add("ke", f64::NEG_INFINITY, 1.0) // Invalid: infinite lower bound - .add("v", 5.0, 50.0); - - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(parameters) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(10, 42)); - - let result = sample_space(&settings); - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("infinite bounds")); - } - - #[test] - fn test_sample_space_invalid_bounds_error() { - let parameters = Parameters::new() - .add("ke", 1.0, 0.5) // Invalid: lower bound >= upper bound - .add("v", 5.0, 50.0); - - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(parameters) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(10, 42)); - - let result = sample_space(&settings); - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("invalid bounds")); - } - - #[test] - fn test_parse_prior_valid_file() { - let csv_content = "ke,v\n0.1,10.0\n0.2,15.0\n0.3,20.0\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_ok()); - - let (theta, weights) = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - assert!(weights.is_none()); // No prob column, so no weights - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_parse_prior_with_prob_column() { - let csv_content = "ke,v,prob\n0.1,10.0,0.5\n0.2,15.0,0.3\n0.3,20.0,0.2\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_ok()); - - let (theta, weights) = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - - // Verify that weights were read correctly - assert!(weights.is_some()); - let weights = weights.unwrap(); - assert_eq!(weights.len(), 3); - assert!((weights[0] - 0.5).abs() < 1e-10); - assert!((weights[1] - 0.3).abs() < 1e-10); - assert!((weights[2] - 0.2).abs() < 1e-10); - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_parse_prior_missing_parameter() { - let csv_content = "ke\n0.1\n0.2\n0.3\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("Parameter v is not present")); - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_parse_prior_extra_parameters() { - let csv_content = "ke,v,extra_param\n0.1,10.0,1.0\n0.2,15.0,2.0\n0.3,20.0,3.0\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("Found parameters in the prior not present in configuration")); - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_parse_prior_nonexistent_file() { - let settings = create_test_settings(); - let file_path = "nonexistent_file.csv".to_string(); - - let result = parse_prior(&file_path, &settings); - assert!(result.is_err()); - assert!(result - .unwrap_err() - .to_string() - .contains("Unable to open the prior file")); - } - - #[test] - fn test_parse_prior_reordered_columns() { - let csv_content = "v,ke\n10.0,0.1\n15.0,0.2\n20.0,0.3\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_ok()); - - let (theta, weights) = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - assert!(weights.is_none()); // No prob column, so no weights - - // Verify the values are correctly reordered (ke should be first, v second) - let matrix = theta.matrix(); - assert!((matrix[(0, 0)] - 0.1).abs() < 1e-10); // First row, ke value - assert!((matrix[(0, 1)] - 10.0).abs() < 1e-10); // First row, v value - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_parse_prior_with_prob_column_reordered() { - let csv_content = "prob,v,ke\n0.5,10.0,0.1\n0.3,15.0,0.2\n0.2,20.0,0.3\n"; - let temp_path = create_temp_csv_file(csv_content); - - let settings = create_test_settings(); - - let result = parse_prior(&temp_path, &settings); - assert!(result.is_ok()); - - let (theta, weights) = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - - // Verify that weights were read correctly - assert!(weights.is_some()); - let weights = weights.unwrap(); - assert_eq!(weights.len(), 3); - assert!((weights[0] - 0.5).abs() < 1e-10); - assert!((weights[1] - 0.3).abs() < 1e-10); - assert!((weights[2] - 0.2).abs() < 1e-10); - - // Verify the parameter values are correctly reordered (ke should be first, v second) - let matrix = theta.matrix(); - assert!((matrix[(0, 0)] - 0.1).abs() < 1e-10); // First row, ke value - assert!((matrix[(0, 1)] - 10.0).abs() < 1e-10); // First row, v value - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_sample_space_file_based() { - let csv_content = "ke,v\n0.1,10.0\n0.2,15.0\n0.3,20.0\n"; - let temp_path = create_temp_csv_file(csv_content); - - let mut settings = create_test_settings(); - settings.set_prior(Prior::File(temp_path.clone())); - - let result = sample_space(&settings); - assert!(result.is_ok()); - - let theta = result.unwrap(); - assert_eq!(theta.nspp(), 3); - assert_eq!(theta.matrix().ncols(), 2); - - cleanup_temp_file(&temp_path); - } - - #[test] - fn test_prior_theta_no_seed_panic() { - let parameters = Parameters::new().add("ke", 0.1, 1.0); - let matrix = faer::Mat::from_fn(1, 1, |_, _| 0.5); - let theta = Theta::from_parts(matrix, parameters).unwrap(); - let prior = Prior::Theta(theta); - - assert_eq!(prior.seed(), None, "Theta prior should not have a seed"); - } -} diff --git a/src/routines/initialization/sobol.rs b/src/routines/initialization/sobol.rs deleted file mode 100644 index 674580df8..000000000 --- a/src/routines/initialization/sobol.rs +++ /dev/null @@ -1,103 +0,0 @@ -use crate::structs::theta::Theta; -use anyhow::Result; -use faer::Mat; - -use sobol_burley::sample; - -use crate::prelude::Parameters; - -/// Generates an instance of [Theta] from a Sobol sequence. -/// -/// The sequence samples [0, 1), and the values are scaled to the parameter ranges. -/// -/// # Arguments -/// -/// * `parameters` - The [Parameters] struct, which contains the parameters to be sampled. -/// * `points` - The number of points to generate, i.e. the number of rows in the matrix. -/// * `seed` - The seed for the Sobol sequence generator. -/// -/// # Returns -/// -/// [Theta], a structure that holds the support point matrix -/// -pub fn generate(parameters: &Parameters, points: usize, seed: usize) -> Result { - let seed = seed as u32; - let params: Vec<(String, f64, f64)> = parameters - .iter() - .map(|p| (p.name.clone(), p.lower, p.upper)) - .collect(); - - let rand_matrix = Mat::from_fn(points, params.len(), |i, j| { - let unscaled = sample((i).try_into().unwrap(), j.try_into().unwrap(), seed) as f64; - let (_name, lower, upper) = params.get(j).unwrap(); - lower + unscaled * (upper - lower) - }); - - let theta = Theta::from_parts(rand_matrix, parameters.clone())?; - Ok(theta) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::prelude::Parameters; - - #[test] - fn test_sobol() { - let params = Parameters::new() - .add("a", 0.0, 1.0) - .add("b", 0.0, 1.0) - .add("c", 0.0, 1.0); - - let theta = generate(¶ms, 10, 22).unwrap(); - - assert_eq!(theta.nspp(), 10); - assert_eq!(theta.matrix().ncols(), 3); - } - - #[test] - fn test_sobol_ranges() { - let params = Parameters::new() - .add("a", 0.0, 1.0) - .add("b", 0.0, 1.0) - .add("c", 0.0, 1.0); - - let theta = generate(¶ms, 10, 22).unwrap(); - - theta.matrix().row_iter().for_each(|row| { - row.iter().for_each(|&value| { - assert!(value >= 0.0 && value <= 1.0); - }); - }); - } - - #[test] - fn test_sobol_values() { - use faer::mat; - let params = Parameters::new() - .add("a", 0.0, 1.0) - .add("b", 0.0, 1.0) - .add("c", 0.0, 1.0); - - let theta = generate(¶ms, 10, 22).unwrap(); - - let expected = mat![ - [0.05276215076446533, 0.609707236289978, 0.29471302032470703], // - [0.6993427276611328, 0.4142681360244751, 0.6447571516036987], // - [0.860404372215271, 0.769607663154602, 0.1742185354232788], // - [0.3863574266433716, 0.07018685340881348, 0.9825305938720703], // - [0.989533543586731, 0.19934570789337158, 0.4716176986694336], // - [0.29962968826293945, 0.899970293045044, 0.5400241613388062], // - [0.5577576160430908, 0.6990838050842285, 0.859503984451294], // - [ - 0.19194257259368896, - 0.31645333766937256, - 0.042426824569702150 - ], // - [0.8874167203903198, 0.5214653015136719, 0.5899909734725952], // - [0.35627472400665283, 0.4780532121658325, 0.42954015731811523] // - ]; - - assert_eq!(theta.matrix().to_owned(), expected); - } -} diff --git a/src/routines/mod.rs b/src/routines/mod.rs deleted file mode 100644 index af25d67e6..000000000 --- a/src/routines/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -// Routines for condensation -pub mod condensation; -// Routines for estimation -pub mod estimation; -// Routines for expansion -pub mod expansion; -// Routines for initialization -pub mod initialization; -// Routines for logging -pub mod logger; -// Routines for output -pub mod output; -// Routines for settings -pub mod settings; diff --git a/src/routines/output/mod.rs b/src/routines/output/mod.rs deleted file mode 100644 index 3835904d2..000000000 --- a/src/routines/output/mod.rs +++ /dev/null @@ -1,796 +0,0 @@ -use crate::algorithms::{Status, StopReason}; -use crate::prelude::*; -use crate::routines::output::cycles::CycleLog; -use crate::routines::output::posterior::Posterior; -use crate::routines::output::predictions::NPPredictions; -use crate::routines::settings::Settings; -use crate::structs::psi::Psi; -use crate::structs::theta::Theta; -use crate::structs::weights::Weights; -use anyhow::{bail, Context, Result}; -use csv::WriterBuilder; -use ndarray::{Array, Array1, Array2, Axis}; -use pharmsol::prelude::data::*; -use pharmsol::prelude::simulator::Equation; -use serde::Serialize; -use std::fs::{create_dir_all, File, OpenOptions}; -use std::path::{Path, PathBuf}; - -pub mod cycles; -pub mod posterior; -pub mod predictions; - -use posterior::posterior; - -/// Defines the result objects from an NPAG run -/// An [NPResult] contains the necessary information to generate predictions and summary statistics -#[derive(Debug, Serialize)] -pub struct NPResult { - #[serde(skip)] - equation: E, - data: Data, - theta: Theta, - psi: Psi, - w: Weights, - objf: f64, - cycles: usize, - status: Status, - settings: Settings, - cyclelog: CycleLog, - predictions: Option, - posterior: Posterior, -} - -#[allow(clippy::too_many_arguments)] -impl NPResult { - /// Create a new NPResult object - /// - /// This will also calculate the [Posterior] structure and add it to the NPResult - pub(crate) fn new( - equation: E, - data: Data, - theta: Theta, - psi: Psi, - w: Weights, - objf: f64, - cycles: usize, - status: Status, - settings: Settings, - cyclelog: CycleLog, - ) -> Result { - // Calculate the posterior probabilities - let posterior = posterior(&psi, &w) - .context("Failed to calculate posterior during initialization of NPResult")?; - - let result = Self { - equation, - data, - theta, - psi, - w, - objf, - cycles, - status, - settings, - cyclelog, - predictions: None, - posterior, - }; - - Ok(result) - } - - pub fn cycles(&self) -> usize { - self.cycles - } - - pub fn objf(&self) -> f64 { - self.objf - } - - pub fn converged(&self) -> bool { - self.status == Status::Stop(StopReason::Converged) - } - - pub fn get_theta(&self) -> &Theta { - &self.theta - } - - pub fn data(&self) -> &Data { - &self.data - } - - pub fn cycle_log(&self) -> &CycleLog { - &self.cyclelog - } - - pub fn settings(&self) -> &Settings { - &self.settings - } - - /// Get the [Psi] structure - pub fn psi(&self) -> &Psi { - &self.psi - } - - /// Get the weights (probabilities) of the support points - pub fn weights(&self) -> &Weights { - &self.w - } - - /// Calculate and store the [NPPredictions] in the [NPResult] - /// - /// This will overwrite any existing predictions stored in the result! - pub fn calculate_predictions(&mut self, idelta: f64, tad: f64) -> Result<()> { - let predictions = NPPredictions::calculate( - &self.equation, - &self.data, - &self.theta, - &self.w, - &self.posterior, - idelta, - tad, - )?; - self.predictions = Some(predictions); - Ok(()) - } - - pub fn write_outputs(&mut self) -> Result<()> { - if self.settings.output().write { - tracing::debug!("Writing outputs to {:?}", self.settings.output().path); - self.settings.write()?; - let idelta: f64 = self.settings.predictions().idelta; - let tad = self.settings.predictions().tad; - self.cyclelog.write(&self.settings)?; - self.write_theta().context("Failed to write theta")?; - self.write_covs().context("Failed to write covariates")?; - self.write_predictions(idelta, tad) - .context("Failed to write predictions")?; - self.write_posterior() - .context("Failed to write posterior")?; - } - Ok(()) - } - - /// Writes the observations and predictions to a single file - pub fn write_obspred(&self) -> Result<()> { - tracing::debug!("Writing observations and predictions..."); - - #[derive(Debug, Clone, Serialize)] - struct Row { - id: String, - time: f64, - outeq: usize, - block: usize, - obs: Option, - pop_mean: f64, - pop_median: f64, - post_mean: f64, - post_median: f64, - } - - let tm = self.theta.matrix(); - let theta = Array2::from_shape_fn((tm.nrows(), tm.ncols()), |(i, j)| tm[(i, j)]); - let w: Array1 = self.w.iter().collect(); - let pm = self.psi.matrix(); - let psi = Array2::from_shape_fn((pm.nrows(), pm.ncols()), |(i, j)| pm[(i, j)]); - - let (post_mean, post_median) = posterior_mean_median(&theta, &psi, &w) - .context("Failed to calculate posterior mean and median")?; - - let (pop_mean, pop_median) = population_mean_median(&theta, &w) - .context("Failed to calculate posterior mean and median")?; - - let subjects = self.data.subjects(); - if subjects.len() != post_mean.nrows() { - bail!( - "Number of subjects: {} and number of posterior means: {} do not match", - subjects.len(), - post_mean.nrows() - ); - } - - let outputfile = OutputFile::new(&self.settings.output().path, "op.csv")?; - let mut writer = WriterBuilder::new() - .has_headers(true) - .from_writer(&outputfile.file); - - for (i, subject) in subjects.iter().enumerate() { - for occasion in subject.occasions() { - let id = subject.id(); - let occ = occasion.index(); - - let subject = Subject::from_occasions(id.clone(), vec![occasion.clone()]); - - // Population predictions - let pop_mean_pred = self - .equation - .simulate_subject(&subject, &pop_mean.to_vec(), None)? - .0 - .get_predictions() - .clone(); - - let pop_median_pred = self - .equation - .simulate_subject(&subject, &pop_median.to_vec(), None)? - .0 - .get_predictions() - .clone(); - - // Posterior predictions - let post_mean_spp: Vec = post_mean.row(i).to_vec(); - let post_mean_pred = self - .equation - .simulate_subject(&subject, &post_mean_spp, None)? - .0 - .get_predictions() - .clone(); - let post_median_spp: Vec = post_median.row(i).to_vec(); - let post_median_pred = self - .equation - .simulate_subject(&subject, &post_median_spp, None)? - .0 - .get_predictions() - .clone(); - assert_eq!( - pop_mean_pred.len(), - pop_median_pred.len(), - "The number of predictions do not match (pop_mean vs pop_median)" - ); - - assert_eq!( - post_mean_pred.len(), - post_median_pred.len(), - "The number of predictions do not match (post_mean vs post_median)" - ); - - assert_eq!( - pop_mean_pred.len(), - post_mean_pred.len(), - "The number of predictions do not match (pop_mean vs post_mean)" - ); - - for (((pop_mean_pred, pop_median_pred), post_mean_pred), post_median_pred) in - pop_mean_pred - .iter() - .zip(pop_median_pred.iter()) - .zip(post_mean_pred.iter()) - .zip(post_median_pred.iter()) - { - let row = Row { - id: id.clone(), - time: pop_mean_pred.time(), - outeq: pop_mean_pred.outeq(), - block: occ, - obs: pop_mean_pred.observation(), - pop_mean: pop_mean_pred.prediction(), - pop_median: pop_median_pred.prediction(), - post_mean: post_mean_pred.prediction(), - post_median: post_median_pred.prediction(), - }; - writer.serialize(row)?; - } - } - } - writer.flush()?; - tracing::debug!( - "Observations with predictions written to {:?}", - &outputfile.relative_path() - ); - Ok(()) - } - - /// Writes theta, which contains the population support points and their associated probabilities - /// Each row is one support point, the last column being probability - pub fn write_theta(&self) -> Result<()> { - tracing::debug!("Writing population parameter distribution..."); - - let theta = &self.theta; - let w: Vec = self.w.to_vec(); - - if w.len() != theta.matrix().nrows() { - bail!( - "Number of weights ({}) and number of support points ({}) do not match.", - w.len(), - theta.matrix().nrows() - ); - } - - let outputfile = OutputFile::new(&self.settings.output().path, "theta.csv") - .context("Failed to create output file for theta")?; - - let mut writer = WriterBuilder::new() - .has_headers(true) - .from_writer(&outputfile.file); - - // Create the headers - let mut theta_header = self.settings.parameters().names(); - theta_header.push("prob".to_string()); - writer.write_record(&theta_header)?; - - // Write contents - for (theta_row, &w_val) in theta.matrix().row_iter().zip(w.iter()) { - let mut row: Vec = theta_row.iter().map(|&val| val.to_string()).collect(); - row.push(w_val.to_string()); - writer.write_record(&row)?; - } - writer.flush()?; - tracing::debug!( - "Population parameter distribution written to {:?}", - &outputfile.relative_path() - ); - Ok(()) - } - - /// Writes the posterior support points for each individual - pub fn write_posterior(&self) -> Result<()> { - tracing::debug!("Writing posterior parameter probabilities..."); - let theta = &self.theta; - - // Calculate the posterior probabilities - let posterior = self.posterior.clone(); - - // Create the output folder if it doesn't exist - let outputfile = match OutputFile::new(&self.settings.output().path, "posterior.csv") { - Ok(of) => of, - Err(e) => { - tracing::error!("Failed to create output file: {}", e); - return Err(e.context("Failed to create output file")); - } - }; - - // Create a new writer - let mut writer = WriterBuilder::new() - .has_headers(true) - .from_writer(&outputfile.file); - - // Create the headers - writer.write_field("id")?; - writer.write_field("point")?; - theta.param_names().iter().for_each(|name| { - writer.write_field(name).unwrap(); - }); - writer.write_field("prob")?; - writer.write_record(None::<&[u8]>)?; - - // Write contents - let subjects = self.data.subjects(); - posterior - .matrix() - .row_iter() - .enumerate() - .for_each(|(i, row)| { - let subject = subjects.get(i).unwrap(); - let id = subject.id(); - - row.iter().enumerate().for_each(|(spp, prob)| { - writer.write_field(id.clone()).unwrap(); - writer.write_field(spp.to_string()).unwrap(); - - theta.matrix().row(spp).iter().for_each(|val| { - writer.write_field(val.to_string()).unwrap(); - }); - - writer.write_field(prob.to_string()).unwrap(); - writer.write_record(None::<&[u8]>).unwrap(); - }); - }); - - writer.flush()?; - tracing::debug!( - "Posterior parameters written to {:?}", - &outputfile.relative_path() - ); - - Ok(()) - } - - /// Writes the predictions - pub fn write_predictions(&mut self, idelta: f64, tad: f64) -> Result<()> { - tracing::debug!("Writing predictions..."); - - self.calculate_predictions(idelta, tad)?; - - let predictions = self - .predictions - .as_ref() - .expect("Predictions should have been calculated, but are of type None."); - - // Write (full) predictions to pred.csv - let outputfile_pred = OutputFile::new(&self.settings.output().path, "pred.csv")?; - let mut writer = WriterBuilder::new() - .has_headers(true) - .from_writer(&outputfile_pred.file); - - // Write each prediction row - for row in predictions.predictions() { - writer.serialize(row)?; - } - - writer.flush()?; - tracing::debug!( - "Predictions written to {:?}", - &outputfile_pred.relative_path() - ); - - Ok(()) - } - - /// Writes the covariates - pub fn write_covs(&self) -> Result<()> { - tracing::debug!("Writing covariates..."); - let outputfile = OutputFile::new(&self.settings.output().path, "covs.csv")?; - let mut writer = WriterBuilder::new() - .has_headers(true) - .from_writer(&outputfile.file); - - // Collect all unique covariate names - let mut covariate_names = std::collections::HashSet::new(); - for subject in self.data.subjects() { - for occasion in subject.occasions() { - let cov = occasion.covariates(); - let covmap = cov.covariates(); - for cov_name in covmap.keys() { - covariate_names.insert(cov_name.clone()); - } - } - } - let mut covariate_names: Vec = covariate_names.into_iter().collect(); - covariate_names.sort(); // Ensure consistent order - - // Write the header row: id, time, block, covariate names - let mut headers = vec!["id", "time", "block"]; - headers.extend(covariate_names.iter().map(|s| s.as_str())); - writer.write_record(&headers)?; - - // Write the data rows - for subject in self.data.subjects() { - for occasion in subject.occasions() { - let cov = occasion.covariates(); - let covmap = cov.covariates(); - - for event in occasion.iter() { - let time = match event { - Event::Bolus(bolus) => bolus.time(), - Event::Infusion(infusion) => infusion.time(), - Event::Observation(observation) => observation.time(), - }; - - let mut row: Vec = Vec::new(); - row.push(subject.id().clone()); - row.push(time.to_string()); - row.push(occasion.index().to_string()); - - // Add covariate values to the row - for cov_name in &covariate_names { - if let Some(cov) = covmap.get(cov_name) { - if let Ok(value) = cov.interpolate(time) { - row.push(value.to_string()); - } else { - row.push(String::new()); - } - } else { - row.push(String::new()); - } - } - - writer.write_record(&row)?; - } - } - } - - writer.flush()?; - tracing::debug!("Covariates written to {:?}", &outputfile.relative_path()); - Ok(()) - } -} - -pub(crate) fn median(data: &[f64]) -> f64 { - let mut data: Vec = data.to_vec(); - data.sort_by(|a, b| a.partial_cmp(b).unwrap()); - - let size = data.len(); - match size { - even if even % 2 == 0 => { - let fst = data.get(even / 2 - 1).unwrap(); - let snd = data.get(even / 2).unwrap(); - (fst + snd) / 2.0 - } - odd => *data.get(odd / 2_usize).unwrap(), - } -} - -fn weighted_median(data: &[f64], weights: &[f64]) -> f64 { - // Ensure the data and weights arrays have the same length - assert_eq!( - data.len(), - weights.len(), - "The length of data and weights must be the same" - ); - assert!( - weights.iter().all(|&x| x >= 0.0), - "Weights must be non-negative, weights: {:?}", - weights - ); - - // Create a vector of tuples (data, weight) - let mut weighted_data: Vec<(f64, f64)> = data - .iter() - .zip(weights.iter()) - .map(|(&d, &w)| (d, w)) - .collect(); - - // Sort the vector by the data values - weighted_data.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); - - // Calculate the cumulative sum of weights - let total_weight: f64 = weights.iter().sum(); - let mut cumulative_sum = 0.0; - - for (i, &(_, weight)) in weighted_data.iter().enumerate() { - cumulative_sum += weight; - - if cumulative_sum == total_weight / 2.0 { - // If the cumulative sum equals half the total weight, average this value with the next - if i + 1 < weighted_data.len() { - return (weighted_data[i].0 + weighted_data[i + 1].0) / 2.0; - } else { - return weighted_data[i].0; - } - } else if cumulative_sum > total_weight / 2.0 { - return weighted_data[i].0; - } - } - - unreachable!("The function should have returned a value before reaching this point."); -} - -pub fn population_mean_median( - theta: &Array2, - w: &Array1, -) -> Result<(Array1, Array1)> { - let w = if w.is_empty() { - tracing::warn!("w.len() == 0, setting all weights to 1/n"); - Array1::from_elem(theta.nrows(), 1.0 / theta.nrows() as f64) - } else { - w.clone() - }; - // Check for compatible sizes - if theta.nrows() != w.len() { - bail!( - "Number of parameters and number of weights do not match. Theta: {}, w: {}", - theta.nrows(), - w.len() - ); - } - - let mut mean = Array1::zeros(theta.ncols()); - let mut median = Array1::zeros(theta.ncols()); - - for (i, (mn, mdn)) in mean.iter_mut().zip(&mut median).enumerate() { - // Calculate the weighted mean - let col = theta.column(i).to_owned() * w.to_owned(); - *mn = col.sum(); - - // Calculate the median - let ct = theta.column(i); - let mut params = vec![]; - let mut weights = vec![]; - for (ti, wi) in ct.iter().zip(w.clone()) { - params.push(*ti); - weights.push(wi); - } - - *mdn = weighted_median(¶ms, &weights); - } - - Ok((mean, median)) -} - -pub fn posterior_mean_median( - theta: &Array2, - psi: &Array2, - w: &Array1, -) -> Result<(Array2, Array2)> { - let mut mean = Array2::zeros((0, theta.ncols())); - let mut median = Array2::zeros((0, theta.ncols())); - - let w = if w.is_empty() { - tracing::warn!("w is empty, setting all weights to 1/n"); - Array1::from_elem(theta.nrows(), 1.0 / theta.nrows() as f64) - } else { - w.clone() - }; - - // Check for compatible sizes - if theta.nrows() != w.len() || theta.nrows() != psi.ncols() || psi.ncols() != w.len() { - bail!("Number of parameters and number of weights do not match, theta.nrows(): {}, w.len(): {}, psi.ncols(): {}", theta.nrows(), w.len(), psi.ncols()); - } - - // Normalize psi to get probabilities of each spp for each id - let mut psi_norm: Array2 = Array2::zeros((0, psi.ncols())); - for (i, row) in psi.axis_iter(Axis(0)).enumerate() { - let row_w = row.to_owned() * w.to_owned(); - let row_sum = row_w.sum(); - let row_norm = if row_sum == 0.0 { - tracing::warn!("Sum of row {} of psi is 0.0, setting that row to 1/n", i); - Array1::from_elem(psi.ncols(), 1.0 / psi.ncols() as f64) - } else { - &row_w / row_sum - }; - psi_norm.push_row(row_norm.view())?; - } - if psi_norm.iter().any(|&x| x.is_nan()) { - dbg!(&psi); - bail!("NaN values found in psi_norm"); - }; - - // Transpose normalized psi to get ID (col) by prob (row) - // let psi_norm_transposed = psi_norm.t(); - - // For each subject.. - for probs in psi_norm.axis_iter(Axis(0)) { - let mut post_mean: Vec = Vec::new(); - let mut post_median: Vec = Vec::new(); - - // For each parameter - for pars in theta.axis_iter(Axis(1)) { - // Calculate the mean - let weighted_par = &probs * &pars; - let the_mean = weighted_par.sum(); - post_mean.push(the_mean); - - // Calculate the median - let median = weighted_median(&pars.to_vec(), &probs.to_vec()); - post_median.push(median); - } - - mean.push_row(Array::from(post_mean.clone()).view())?; - median.push_row(Array::from(post_median.clone()).view())?; - } - - Ok((mean, median)) -} - -/// Contains all the necessary information of an output file -#[derive(Debug)] -pub struct OutputFile { - file: File, - relative_path: PathBuf, -} - -impl OutputFile { - pub fn new(folder: &str, file_name: &str) -> Result { - let relative_path = Path::new(&folder).join(file_name); - - if let Some(parent) = relative_path.parent() { - create_dir_all(parent) - .with_context(|| format!("Failed to create directories for {:?}", parent))?; - } - - let file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&relative_path) - .with_context(|| format!("Failed to open file: {:?}", relative_path))?; - - Ok(OutputFile { - file, - relative_path, - }) - } - - pub fn file(&self) -> &File { - &self.file - } - - pub fn file_owned(self) -> File { - self.file - } - - pub fn relative_path(&self) -> &Path { - &self.relative_path - } -} - -#[cfg(test)] -mod tests { - use super::median; - - #[test] - fn test_median_odd() { - let data = vec![1.0, 3.0, 2.0]; - assert_eq!(median(&data), 2.0); - } - - #[test] - fn test_median_even() { - let data = vec![1.0, 2.0, 3.0, 4.0]; - assert_eq!(median(&data), 2.5); - } - - #[test] - fn test_median_single() { - let data = vec![42.0]; - assert_eq!(median(&data), 42.0); - } - - #[test] - fn test_median_sorted() { - let data = vec![5.0, 10.0, 15.0, 20.0, 25.0]; - assert_eq!(median(&data), 15.0); - } - - #[test] - fn test_median_unsorted() { - let data = vec![10.0, 30.0, 20.0, 50.0, 40.0]; - assert_eq!(median(&data), 30.0); - } - - #[test] - fn test_median_with_duplicates() { - let data = vec![1.0, 2.0, 2.0, 3.0, 4.0]; - assert_eq!(median(&data), 2.0); - } - - use super::weighted_median; - - #[test] - fn test_weighted_median_simple() { - let data = vec![1.0, 2.0, 3.0]; - let weights = vec![0.2, 0.5, 0.3]; - assert_eq!(weighted_median(&data, &weights), 2.0); - } - - #[test] - fn test_weighted_median_even_weights() { - let data = vec![1.0, 2.0, 3.0, 4.0]; - let weights = vec![0.25, 0.25, 0.25, 0.25]; - assert_eq!(weighted_median(&data, &weights), 2.5); - } - - #[test] - fn test_weighted_median_single_element() { - let data = vec![42.0]; - let weights = vec![1.0]; - assert_eq!(weighted_median(&data, &weights), 42.0); - } - - #[test] - #[should_panic(expected = "The length of data and weights must be the same")] - fn test_weighted_median_mismatched_lengths() { - let data = vec![1.0, 2.0, 3.0]; - let weights = vec![0.1, 0.2]; - weighted_median(&data, &weights); - } - - #[test] - fn test_weighted_median_all_same_elements() { - let data = vec![5.0, 5.0, 5.0, 5.0]; - let weights = vec![0.1, 0.2, 0.3, 0.4]; - assert_eq!(weighted_median(&data, &weights), 5.0); - } - - #[test] - #[should_panic(expected = "Weights must be non-negative")] - fn test_weighted_median_negative_weights() { - let data = vec![1.0, 2.0, 3.0, 4.0]; - let weights = vec![0.2, -0.5, 0.5, 0.8]; - assert_eq!(weighted_median(&data, &weights), 4.0); - } - - #[test] - fn test_weighted_median_unsorted_data() { - let data = vec![3.0, 1.0, 4.0, 2.0]; - let weights = vec![0.1, 0.3, 0.4, 0.2]; - assert_eq!(weighted_median(&data, &weights), 2.5); - } - - #[test] - fn test_weighted_median_with_zero_weights() { - let data = vec![1.0, 2.0, 3.0, 4.0]; - let weights = vec![0.0, 0.0, 1.0, 0.0]; - assert_eq!(weighted_median(&data, &weights), 3.0); - } -} diff --git a/src/routines/settings.rs b/src/routines/settings.rs deleted file mode 100644 index 45383b287..000000000 --- a/src/routines/settings.rs +++ /dev/null @@ -1,609 +0,0 @@ -use crate::algorithms::Algorithm; -use crate::routines::initialization::Prior; -use crate::routines::output::OutputFile; -use anyhow::{bail, Result}; -use pharmsol::prelude::data::AssayErrorModels; - -use serde::{Deserialize, Serialize}; -use serde_json; -use std::fmt::Display; -use std::path::PathBuf; - -/// Contains all settings for PMcore -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Settings { - /// General configuration settings - pub(crate) config: Config, - /// Parameters to be estimated - pub(crate) parameters: Parameters, - /// Defines the error models and polynomials to be used - pub(crate) errormodels: AssayErrorModels, - /// Configuration for predictions - pub(crate) predictions: Predictions, - /// Configuration for logging - pub(crate) log: Log, - /// Configuration for (optional) prior - pub(crate) prior: Prior, - /// Configuration for the output files - pub(crate) output: Output, - /// Configuration for the convergence criteria - pub(crate) convergence: Convergence, - /// Advanced options, mostly hyperparameters, for the algorithm(s) - pub(crate) advanced: Advanced, -} - -impl Settings { - /// Create a new [SettingsBuilder] - pub fn builder() -> SettingsBuilder { - SettingsBuilder::new() - } - - /* Getters */ - pub fn config(&self) -> &Config { - &self.config - } - - pub fn parameters(&self) -> &Parameters { - &self.parameters - } - - pub fn errormodels(&self) -> &AssayErrorModels { - &self.errormodels - } - - pub fn predictions(&self) -> &Predictions { - &self.predictions - } - - pub fn log(&self) -> &Log { - &self.log - } - - pub fn prior(&self) -> &Prior { - &self.prior - } - - pub fn output(&self) -> &Output { - &self.output - } - pub fn convergence(&self) -> &Convergence { - &self.convergence - } - - pub fn advanced(&self) -> &Advanced { - &self.advanced - } - - /* Setters */ - pub fn set_cycles(&mut self, cycles: usize) { - self.config.cycles = cycles; - } - - pub fn set_algorithm(&mut self, algorithm: Algorithm) { - self.config.algorithm = algorithm; - } - - pub fn set_cache(&mut self, cache: bool) { - self.config.cache = cache; - } - - pub fn set_idelta(&mut self, idelta: f64) { - self.predictions.idelta = idelta; - } - - pub fn set_tad(&mut self, tad: f64) { - self.predictions.tad = tad; - } - - pub fn set_prior(&mut self, prior: Prior) { - self.prior = prior; - } - - pub fn disable_output(&mut self) { - self.output.write = false; - } - - pub fn set_output_path(&mut self, path: impl Into) { - self.output.path = parse_output_folder(path.into()); - } - - pub fn set_log_stdout(&mut self, stdout: bool) { - self.log.stdout = stdout; - } - - pub fn set_write_logs(&mut self, write: bool) { - self.log.write = write; - } - - pub fn set_log_level(&mut self, level: LogLevel) { - self.log.level = level; - } - - pub fn set_progress(&mut self, progress: bool) { - self.config.progress = progress; - } - - pub fn initialize_logs(&mut self) -> Result<()> { - crate::routines::logger::setup_log(self) - } - - /// Writes a copy of the settings to file - /// The is written to output folder specified in the [Output] and is named `settings.json`. - pub fn write(&self) -> Result<()> { - let serialized = serde_json::to_string_pretty(self).map_err(std::io::Error::other)?; - - let outputfile = OutputFile::new(self.output.path.as_str(), "settings.json")?; - let mut file = outputfile.file_owned(); - std::io::Write::write_all(&mut file, serialized.as_bytes())?; - Ok(()) - } -} - -/// General configuration settings -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Config { - /// Maximum number of cycles to run - pub cycles: usize, - /// Denotes the algorithm to use - pub algorithm: Algorithm, - /// If true (default), cache predicted values - pub cache: bool, - /// Should a progress bar be displayed for the first cycle - /// - /// The progress bar is not written to logs, but is written to stdout. It incurs a minor performance penalty. - pub progress: bool, -} - -impl Default for Config { - fn default() -> Self { - Config { - cycles: 100, - algorithm: Algorithm::NPAG, - cache: true, - progress: true, - } - } -} - -/// Defines a parameter to be estimated -/// -/// In non-parametric algorithms, parameters must be bounded. The lower and upper bounds are defined by the `lower` and `upper` fields, respectively. -#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] -pub struct Parameter { - pub(crate) name: String, - pub(crate) lower: f64, - pub(crate) upper: f64, -} - -impl Parameter { - /// Create a new parameter - pub fn new(name: impl Into, lower: f64, upper: f64) -> Self { - Self { - name: name.into(), - lower, - upper, - } - } -} - -/// This structure contains information on all [Parameter]s to be estimated -#[derive(Debug, Clone, Deserialize, Serialize, Default, PartialEq)] -pub struct Parameters { - pub(crate) parameters: Vec, -} - -impl Parameters { - pub fn new() -> Self { - Parameters { - parameters: Vec::new(), - } - } - - pub fn add(mut self, name: impl Into, lower: f64, upper: f64) -> Parameters { - let parameter = Parameter::new(name, lower, upper); - self.parameters.push(parameter); - self - } - - // Get a parameter by name - pub fn get(&self, name: impl Into) -> Option<&Parameter> { - let name = name.into(); - self.parameters.iter().find(|p| p.name == name) - } - - /// Get the names of the parameters - pub fn names(&self) -> Vec { - self.parameters.iter().map(|p| p.name.clone()).collect() - } - /// Get the ranges of the parameters - /// - /// Returns a vector of tuples, where each tuple contains the lower and upper bounds of the parameter - pub fn ranges(&self) -> Vec<(f64, f64)> { - self.parameters.iter().map(|p| (p.lower, p.upper)).collect() - } - - /// Get the number of parameters - pub fn len(&self) -> usize { - self.parameters.len() - } - - /// Check if the parameters are empty - pub fn is_empty(&self) -> bool { - self.parameters.is_empty() - } - - /// Iterate over the parameters - pub fn iter(&self) -> std::slice::Iter<'_, Parameter> { - self.parameters.iter() - } -} - -impl IntoIterator for Parameters { - type Item = Parameter; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.parameters.into_iter() - } -} - -impl From> for Parameters { - fn from(parameters: Vec) -> Self { - Parameters { parameters } - } -} - -/// This struct contains advanced options and hyperparameters -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Advanced { - /// The minimum distance required between a candidate point and the existing grid (THETA_D) - /// - /// This is general for all non-parametric algorithms - pub min_distance: f64, - /// Maximum number of steps in Nelder-Mead optimization - /// This is used in the [NPOD](crate::algorithms::npod) algorithm, specifically in the [D-optimizer](crate::routines::optimization::d_optimizer) - pub nm_steps: usize, - /// Tolerance (in standard deviations) for the Nelder-Mead optimization - /// - /// This is used in the [NPOD](crate::algorithms::npod) algorithm, specifically in the [D-optimizer](crate::routines::optimization::d_optimizer) - pub tolerance: f64, -} - -impl Default for Advanced { - fn default() -> Self { - Advanced { - min_distance: 1e-4, - nm_steps: 100, - tolerance: 1e-6, - } - } -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -/// This struct contains the convergence criteria for the algorithm -pub struct Convergence { - /// The objective function convergence criterion for the algorithm - /// - /// The objective function is the negative log likelihood - /// Previously referred to as THETA_G - pub likelihood: f64, - /// The PYL convergence criterion for the algorithm - /// - /// P(Y|L) represents the probability of the observation given its weighted support - /// Previously referred to as THETA_F - pub pyl: f64, - /// Precision convergence criterion for the algorithm - /// - /// The precision variable, sometimes referred to as `eps`, is the distance from existing points in the grid to the candidate point. A candidate point is suggested at a distance of `eps` times the range of the parameter. - /// For example, if the parameter `alpha` has a range of `[0.0, 1.0]`, and `eps` is `0.1`, then the candidate point will be at a distance of `0.1 * (1.0 - 0.0) = 0.1` from the existing grid point(s). - /// Previously referred to as THETA_E - pub eps: f64, -} - -impl Default for Convergence { - fn default() -> Self { - Convergence { - likelihood: 1e-4, - pyl: 1e-2, - eps: 1e-2, - } - } -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Predictions { - /// The interval for which predictions are generated - pub idelta: f64, - /// The time after the last dose for which predictions are generated - /// - /// Predictions will always be generated until the last event (observation or dose) in the data. - /// This setting is used to generate predictions beyond the last event if the `tad` if sufficiently large. - /// This can be useful for generating predictions for a subject who only received a dose, but has no observations. - pub tad: f64, -} - -impl Default for Predictions { - fn default() -> Self { - Predictions { - idelta: 0.12, - tad: 0.0, - } - } -} - -impl Predictions { - /// Validate the prediction settings - pub fn validate(&self) -> Result<()> { - if self.idelta < 0.0 { - bail!("The interval for predictions must be non-negative"); - } - if self.tad < 0.0 { - bail!("The time after dose for predictions must be non-negative"); - } - Ok(()) - } -} - -/// The log level, which can be one of the following: -/// - `TRACE` -/// - `DEBUG` -/// - `INFO` (Default) -/// - `WARN` -/// - `ERROR` -#[derive(Debug, Deserialize, Clone, Serialize, Default)] -pub enum LogLevel { - TRACE, - DEBUG, - #[default] - INFO, - WARN, - ERROR, -} - -impl From for tracing::Level { - fn from(log_level: LogLevel) -> tracing::Level { - match log_level { - LogLevel::TRACE => tracing::Level::TRACE, - LogLevel::DEBUG => tracing::Level::DEBUG, - LogLevel::INFO => tracing::Level::INFO, - LogLevel::WARN => tracing::Level::WARN, - LogLevel::ERROR => tracing::Level::ERROR, - } - } -} - -impl AsRef for LogLevel { - fn as_ref(&self) -> &str { - match self { - LogLevel::TRACE => "trace", - LogLevel::DEBUG => "debug", - LogLevel::INFO => "info", - LogLevel::WARN => "warn", - LogLevel::ERROR => "error", - } - } -} - -impl Display for LogLevel { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.as_ref()) - } -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Log { - /// The maximum log level to display, as defined by [LogLevel] - /// - /// [LogLevel] is a thin wrapper around `tracing::Level`, but can be serialized - pub level: LogLevel, - /// Should the logs be written to a file - /// - /// If true, a file will be created in the output folder with the name `log.txt`, or, if [Output::write] is false, in the current directory. - pub write: bool, - /// Define if logs should be written to stdout - pub stdout: bool, -} - -impl Default for Log { - fn default() -> Self { - Log { - level: LogLevel::INFO, - write: false, - stdout: true, - } - } -} - -/// Configuration for the output files -#[derive(Debug, Deserialize, Clone, Serialize)] -#[serde(deny_unknown_fields, default)] -pub struct Output { - /// Whether to write the output files - pub write: bool, - /// The (relative) path to write the output files to - pub path: String, -} - -impl Default for Output { - fn default() -> Self { - let path = PathBuf::from("outputs/").to_string_lossy().to_string(); - - Output { write: true, path } - } -} - -pub struct SettingsBuilder { - config: Option, - parameters: Option, - errormodels: Option, - predictions: Option, - log: Option, - prior: Option, - output: Option, - convergence: Option, - advanced: Option, - _marker: std::marker::PhantomData, -} - -// Marker traits for builder states -pub trait AlgorithmDefined {} -pub trait ParametersDefined {} -pub trait ErrorModelDefined {} - -// Implement marker traits for PhantomData states -pub struct InitialState; -pub struct AlgorithmSet; -pub struct ParametersSet; -pub struct ErrorSet; - -// Initial state: no algorithm set yet -impl SettingsBuilder { - pub fn new() -> Self { - SettingsBuilder { - config: None, - parameters: None, - errormodels: None, - predictions: None, - log: None, - prior: None, - output: None, - convergence: None, - advanced: None, - _marker: std::marker::PhantomData, - } - } - - pub fn set_algorithm(self, algorithm: Algorithm) -> SettingsBuilder { - SettingsBuilder { - config: Some(Config { - algorithm, - ..Config::default() - }), - parameters: self.parameters, - errormodels: self.errormodels, - predictions: self.predictions, - log: self.log, - prior: self.prior, - output: self.output, - convergence: self.convergence, - advanced: self.advanced, - _marker: std::marker::PhantomData, - } - } -} - -impl Default for SettingsBuilder { - fn default() -> Self { - SettingsBuilder::new() - } -} - -// Algorithm is set, move to defining parameters -impl SettingsBuilder { - pub fn set_parameters(self, parameters: Parameters) -> SettingsBuilder { - SettingsBuilder { - config: self.config, - parameters: Some(parameters), - errormodels: self.errormodels, - predictions: self.predictions, - log: self.log, - prior: self.prior, - output: self.output, - convergence: self.convergence, - advanced: self.advanced, - _marker: std::marker::PhantomData, - } - } -} - -// Parameters are set, move to defining error model -impl SettingsBuilder { - pub fn set_error_models(self, ems: AssayErrorModels) -> SettingsBuilder { - SettingsBuilder { - config: self.config, - parameters: self.parameters, - errormodels: Some(ems), - predictions: self.predictions, - log: self.log, - prior: self.prior, - output: self.output, - convergence: self.convergence, - advanced: self.advanced, - _marker: std::marker::PhantomData, - } - } -} - -// Error model is set, allow optional settings and final build -impl SettingsBuilder { - pub fn build(self) -> Settings { - Settings { - config: self.config.unwrap(), - parameters: self.parameters.unwrap(), - errormodels: self.errormodels.unwrap(), - predictions: self.predictions.unwrap_or_default(), - log: self.log.unwrap_or_default(), - prior: self.prior.unwrap_or_default(), - output: self.output.unwrap_or_default(), - convergence: self.convergence.unwrap_or_default(), - advanced: self.advanced.unwrap_or_default(), - } - } -} - -fn parse_output_folder(path: String) -> String { - // If the path doesn't contain a "#", just return it as is - if !path.contains("#") { - return path; - } - - // If it does contain "#", perform the incrementation logic - let mut num = 1; - while std::path::Path::new(&path.replace("#", &num.to_string())).exists() { - num += 1; - } - - path.replace("#", &num.to_string()) -} - -#[cfg(test)] - -mod tests { - use pharmsol::{AssayErrorModel, AssayErrorModels, ErrorPoly}; - - use super::*; - use crate::algorithms::Algorithm; - - #[test] - fn test_builder() { - let parameters = Parameters::new().add("Ke", 0.0, 5.0).add("V", 10.0, 200.0); - - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::Proportional { - gamma: pharmsol::Factor::Variable(5.0), - poly: ErrorPoly::new(0.0, 0.1, 0.0, 0.0), - }, - ) - .unwrap(); - let mut settings = SettingsBuilder::new() - .set_algorithm(Algorithm::NPAG) // Step 1: Define algorithm - .set_parameters(parameters) // Step 2: Define parameters - .set_error_models(ems) - .build(); - - settings.set_cycles(100); - - assert_eq!(settings.config.algorithm, Algorithm::NPAG); - assert_eq!(settings.config.cycles, 100); - assert_eq!(settings.config.cache, true); - assert_eq!(settings.parameters().names(), vec!["Ke", "V"]); - } -} diff --git a/src/structs/mod.rs b/src/structs/mod.rs deleted file mode 100644 index eb24bf1e4..000000000 --- a/src/structs/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod psi; -pub mod theta; -pub mod weights; diff --git a/tests/acceptance_baseline_tests.rs b/tests/acceptance_baseline_tests.rs new file mode 100644 index 000000000..91462faad --- /dev/null +++ b/tests/acceptance_baseline_tests.rs @@ -0,0 +1,90 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; + +fn bimodal_ode_equation() -> equation::ODE { + ode! { + diffeq: |x, p, _t, dx, b, rateiv, _cov| { + fetch_params!(p, ke, _v); + dx[0] = -ke * x[0] + rateiv[1] + b[1]; + }, + out: |x, p, _t, _cov, y| { + fetch_params!(p, _ke, v); + y[1] = x[0] / v; + }, + } + .with_solver(OdeSolver::ExplicitRk(ExplicitRkTableau::Tsit45)) +} + +fn bimodal_data() -> Result { + Ok(data::read_pmetrics("examples/bimodal_ke/bimodal_ke.csv")?) +} + +fn bimodal_npag_model() -> Result> { + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(1, "cp")) + .with_assay_error_models(AssayErrorModels::new().add( + 1, + AssayErrorModel::additive(ErrorPoly::new(0.0, 0.5, 0.0, 0.0), 0.0), + )?); + + ModelDefinition::builder(bimodal_ode_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.001, 3.0)) + .add(ParameterSpec::bounded("v", 25.0, 250.0)), + ) + .observations(observations) + .build() +} + +fn assert_close(actual: f64, expected: f64, tolerance: f64, label: &str) { + let delta = (actual - expected).abs(); + assert!( + delta <= tolerance, + "{label}: expected {expected}, got {actual}, delta {delta} > {tolerance}" + ); +} + +#[test] +fn test_acceptance_baseline_npag_bimodal_ke() -> Result<()> { + let result = EstimationProblem::builder(bimodal_npag_model()?, bimodal_data()?) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 1000, + progress: false, + ..RuntimeOptions::default() + }) + .run()?; + let summary = result.summary(); + let population = result.population_summary(); + let result = result + .as_nonparametric() + .expect("NPAG acceptance baseline should yield a nonparametric result"); + + assert_close( + summary.objective_function, + -425.60904902364695, + 1e-6, + "npag.objf", + ); + assert!(summary.converged); + assert_eq!(summary.iterations, 288); + assert_eq!(result.get_theta().nspp(), 46); + assert_close( + population.parameters[0].mean, + 0.187047284678325, + 1e-6, + "npag.ke.mean", + ); + assert_close( + population.parameters[1].mean, + 107.94241284196241, + 1e-6, + "npag.v.mean", + ); + Ok(()) +} diff --git a/tests/api_smoke_tests.rs b/tests/api_smoke_tests.rs new file mode 100644 index 000000000..5c0679414 --- /dev/null +++ b/tests/api_smoke_tests.rs @@ -0,0 +1,89 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; + +fn simple_equation() -> equation::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _t, _cov| lag! {}, + |_p, _t, _cov| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, v); + y[0] = x[0] / v; + }, + ) +} + +fn simple_data() -> Data { + let subject = Subject::builder("1") + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .observation(2.0, 8.0, 0) + .build(); + + Data::new(vec![subject]) +} + +#[test] +fn test_model_definition_builder() -> Result<()> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + assert_eq!(model.parameters.len(), 2); + assert_eq!(model.observations.channels.len(), 1); + Ok(()) +} + +#[test] +fn test_unified_fit_nonparametric_smoke() -> Result<()> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + let result = EstimationProblem::builder(model, simple_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 1, + cache: true, + progress: false, + idelta: 0.12, + tad: 0.0, + prior: None, + ..RuntimeOptions::default() + }) + .run()?; + + assert!(result.objf().is_finite()); + assert_eq!(result.summary().parameter_count, 2); + assert_eq!(result.population_summary().parameters.len(), 2); + assert_eq!(result.individual_summaries().len(), 1); + Ok(()) +} diff --git a/tests/bestdose_tests.rs b/tests/bestdose_tests.rs index 145bf11ae..4158f402f 100644 --- a/tests/bestdose_tests.rs +++ b/tests/bestdose_tests.rs @@ -1,8 +1,62 @@ use anyhow::Result; -use pmcore::bestdose::{BestDosePosterior, DoseRange, Target}; +use pmcore::bestdose::{BestDoseConfig, BestDosePosterior, BestDoseProblem, DoseRange, Target}; +use pmcore::estimation::nonparametric::{Theta, Weights}; use pmcore::prelude::*; -use pmcore::structs::theta::Theta; -use pmcore::structs::weights::Weights; + +fn pk_parameter_space(ke_lower: f64, ke_upper: f64, v_lower: f64, v_upper: f64) -> ParameterSpace { + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", ke_lower, ke_upper)) + .add(ParameterSpec::bounded("v", v_lower, v_upper)) +} + +fn bestdose_config( + params: &ParameterSpace, + error_models: AssayErrorModels, + refinement_cycles: usize, + prediction_interval: f64, +) -> BestDoseConfig { + BestDoseConfig::new(params.clone(), error_models) + .with_refinement_cycles(refinement_cycles) + .with_progress(false) + .with_prediction_interval(prediction_interval) +} + +fn one_compartment_model() -> pharmsol::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke, _v); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _, _| lag! {}, + |_p, _, _| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, _ke, v); + y[0] = x[0] / v; + }, + ) +} + +fn minimal_config() -> BestDoseConfig { + let params = pk_parameter_space(0.001, 3.0, 25.0, 250.0); + let ems = AssayErrorModels::new() + .add( + 0, + AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), + ) + .unwrap(); + bestdose_config(¶ms, ems, 0, 0.12) +} + +fn simple_prior(config: &BestDoseConfig) -> Result<(Theta, Weights)> { + let mat = faer::Mat::from_fn(1, 2, |_r, c| match c { + 0 => 0.3, + 1 => 50.0, + _ => 0.0, + }); + let theta = Theta::from_parts(mat, config.parameter_space().clone())?; + Ok((theta, Weights::uniform(1))) +} /// Test that infusions are properly included in the dose optimization mask /// This test verifies that infusions with amount=0 are treated as optimizable doses @@ -23,21 +77,14 @@ fn test_infusion_mask_inclusion() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Create a target subject with an optimizable infusion // Use reasonable target concentrations that match typical PK behavior @@ -54,17 +101,22 @@ fn test_infusion_mask_inclusion() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - // Create BestDose posterior - let posterior = BestDosePosterior::compute( + // Create BestDose problem + let problem = BestDoseProblem::new( &prior_theta, &prior_weights, None, + target.clone(), + None, eq.clone(), - settings.clone(), + DoseRange::new(10.0, 300.0), + 0.5, + config, + Target::Concentration, )?; // Count optimizable doses in the target @@ -85,13 +137,7 @@ fn test_infusion_mask_inclusion() -> Result<()> { ); // Run optimization - it should not panic and should handle infusion - let result = posterior.optimize( - target.clone(), - None, - DoseRange::new(10.0, 300.0), - 0.5, - Target::Concentration, - ); + let result = problem.optimize(); // The optimization should succeed assert!( @@ -138,23 +184,14 @@ fn test_fixed_infusion_preservation() -> Result<()> { }, ); - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); + let params = pk_parameter_space(0.001, 3.0, 25.0, 250.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Create past data with a fixed infusion let past = Subject::builder("test_patient") @@ -174,29 +211,27 @@ fn test_fixed_infusion_preservation() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); // Use current_time to separate past and future - let posterior = BestDosePosterior::compute( + let problem = BestDoseProblem::new( &prior_theta, &prior_weights, Some(past), - eq.clone(), - settings.clone(), - )?; - - let result = posterior.optimize( target, - Some(0.0), // No gap after past (past ends at t=2.0) + Some(2.0), // Current time = 2.0 hours + eq.clone(), DoseRange::new(0.0, 500.0), 0.5, + config, Target::Concentration, )?; - // With time_offset, past doses are concatenated with future target. - // Result should have 2 doses: fixed past infusion + optimized future bolus. + let result = problem.optimize()?; + + // Should only optimize the future bolus, not the past infusion let doses = result.doses(); eprintln!("Optimized doses: {:?}", doses); assert_eq!( @@ -204,11 +239,7 @@ fn test_fixed_infusion_preservation() -> Result<()> { 2, "Should have 2 doses (past infusion + future bolus)" ); - assert!( - (doses[0] - 200.0).abs() < 1e-6, - "Past infusion should remain fixed at 200.0, got {}", - doses[0] - ); + assert_eq!(doses[0], 200.0, "Past infusion dose should be preserved"); assert!(doses[1] > 0.0, "Future bolus dose should be optimized"); Ok(()) @@ -217,6 +248,8 @@ fn test_fixed_infusion_preservation() -> Result<()> { /// Test that dose count validation works #[test] fn test_dose_count_validation() -> Result<()> { + use pmcore::bestdose::cost::calculate_cost; + let eq = equation::ODE::new( |x, p, _t, dx, b, _rateiv, _cov| { fetch_params!(p, ke, _v); @@ -231,19 +264,13 @@ fn test_dose_count_validation() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - settings.disable_output(); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Create target with 2 optimizable doses let target = Subject::builder("test_patient") @@ -259,27 +286,34 @@ fn test_dose_count_validation() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - // Optimize with the correct target (2 optimizable doses, 2 observations) - should succeed - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(10.0, 300.0), 0.5, + config, Target::Concentration, - ); + )?; + + // Try with wrong number of doses - should fail + let result_wrong = calculate_cost(&problem, &[100.0]); // Only 1 dose, need 2 + assert!(result_wrong.is_err(), "Should fail with wrong dose count"); + assert!(result_wrong.unwrap_err().to_string().contains("mismatch")); + + // Try with correct number of doses - should succeed + let result_correct = calculate_cost(&problem, &[100.0, 150.0]); assert!( - result.is_ok(), - "Should succeed with correct target: {:?}", - result.err() + result_correct.is_ok(), + "Should succeed with correct dose count" ); - let result = result?; - assert_eq!(result.doses().len(), 2, "Should have 2 optimized doses"); Ok(()) } @@ -287,6 +321,8 @@ fn test_dose_count_validation() -> Result<()> { /// Test that empty observations are caught #[test] fn test_empty_observations_validation() -> Result<()> { + use pmcore::bestdose::cost::calculate_cost; + let eq = equation::ODE::new( |x, p, _t, dx, b, _rateiv, _cov| { fetch_params!(p, ke, _v); @@ -301,19 +337,13 @@ fn test_empty_observations_validation() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - settings.disable_output(); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Create target with doses but NO observations let target = Subject::builder("test_patient").bolus(0.0, 0.0, 0).build(); // No observations! @@ -324,20 +354,25 @@ fn test_empty_observations_validation() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - // Try to optimize - should fail with no observations - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(10.0, 300.0), 0.5, + config, Target::Concentration, - ); + )?; + + // Try to calculate cost - should fail with no observations + let result = calculate_cost(&problem, &[100.0]); assert!(result.is_err(), "Should fail with no observations"); assert!(result.unwrap_err().to_string().contains("no observations")); @@ -361,22 +396,14 @@ fn test_basic_auc_mode() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_idelta(30.0); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 30.0); let target = Subject::builder("test_patient") .bolus(0.0, 0.0, 0) // Optimizable bolus @@ -389,19 +416,24 @@ fn test_basic_auc_mode() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(100.0, 2000.0), 0.8, + config, Target::AUCFromZero, - ); + )?; + + let result = problem.optimize(); assert!( result.is_ok(), @@ -446,22 +478,14 @@ fn test_infusion_auc_mode() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_idelta(30.0); // 30-minute intervals for AUC calculation - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 30.0); // Create a target with an optimizable infusion and AUC targets let target = Subject::builder("test_patient") @@ -476,21 +500,26 @@ fn test_infusion_auc_mode() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - // Create BestDose posterior and optimize in AUC mode - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - // Run optimization - let result = posterior.optimize( + // Create BestDose problem in AUC mode + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(100.0, 2000.0), - 0.8, // Higher bias weight typically works better for AUC targets + 0.8, // Higher bias weight typically works better for AUC targets + config, Target::AUCFromZero, // AUC mode! - ); + )?; + + // Run optimization + let result = problem.optimize(); assert!( result.is_ok(), @@ -550,22 +579,14 @@ fn test_multi_outeq_auc_mode() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let error_model = AssayErrorModel::additive(ErrorPoly::new(0.0, 5.0, 0.0, 0.0), 0.0); let ems = AssayErrorModels::new() .add(0, error_model.clone())? .add(1, error_model)?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params.clone()) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(30.0); // 30-minute intervals for AUC calculation + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Subject with fixed dose and target observations at multiple outeqs let target = Subject::builder("test") @@ -581,22 +602,26 @@ fn test_multi_outeq_auc_mode() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let _result = posterior.optimize( + let _problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(0.0, 2000.0), 0.5, + config, Target::AUCFromZero, )?; - // Just verify that posterior compute and optimize succeed + // Just verify that problem was created successfully // This tests that cost calculation works with multi-outeq + // (cost is calculated during problem validation) Ok(()) } @@ -620,19 +645,13 @@ fn test_multi_outeq_auc_optimization() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let error_model = AssayErrorModel::additive(ErrorPoly::new(0.0, 5.0, 0.0, 0.0), 0.0); let ems = AssayErrorModels::new() .add(0, error_model.clone())? .add(1, error_model)?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params.clone()) - .set_error_models(ems.clone()) - .build(); - settings.disable_output(); - settings.set_cycles(3); + let config = bestdose_config(¶ms, ems.clone(), 3, 0.12); let target = Subject::builder("test") .bolus(0.0, 0.0, 0) @@ -646,19 +665,24 @@ fn test_multi_outeq_auc_optimization() -> Result<()> { 1 => 50.0, _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(0.0, 2000.0), 0.5, + config, Target::AUCFromZero, - ); + )?; + + let result = problem.optimize(); assert!( result.is_ok(), "Multi-outeq AUC optimization failed: {:?}", @@ -705,22 +729,14 @@ fn test_auc_from_zero_single_dose() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.2, 0.4).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.2, 0.4, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(10.0); // 10-minute intervals for AUC calculation + let config = bestdose_config(¶ms, ems.clone(), 0, 10.0); // Target: Single dose, cumulative AUC from 0 to 12h let target = Subject::builder("patient_auc_zero") @@ -734,20 +750,25 @@ fn test_auc_from_zero_single_dose() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(100.0, 1000.0), 0.8, + config, Target::AUCFromZero, // Cumulative AUC from time 0 )?; + let result = problem.optimize()?; + let doses: Vec = result.doses(); // Verify we got a result @@ -789,22 +810,14 @@ fn test_auc_from_last_dose_maintenance() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.2, 0.4).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.2, 0.4, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(10.0); + let config = bestdose_config(¶ms, ems.clone(), 0, 10.0); // Target: Loading dose (fixed) + maintenance dose (optimize) // Target interval AUC from t=12 to t=24 @@ -820,19 +833,24 @@ fn test_auc_from_last_dose_maintenance() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(50.0, 500.0), 0.8, + config, Target::AUCFromLastDose, // Interval AUC from last dose )?; + + let result = problem.optimize()?; let doses = result.doses(); // Verify we got a result @@ -877,22 +895,14 @@ fn test_auc_modes_comparison() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.3, 0.3).add("v", 50.0, 50.0); + let params = pk_parameter_space(0.3, 0.3, 50.0, 50.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(10.0); + let config = bestdose_config(¶ms, ems.clone(), 0, 10.0); let prior_theta = { let mat = faer::Mat::from_fn(1, 2, |_r, c| match c { @@ -900,7 +910,7 @@ fn test_auc_modes_comparison() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); @@ -914,21 +924,20 @@ fn test_auc_modes_comparison() -> Result<()> { .observation(24.0, 100.0, 0) // Target: AUC₀₋₂₄ = 100 .build(); - let posterior_zero = BestDosePosterior::compute( + let problem_zero = BestDoseProblem::new( &prior_theta, &prior_weights, None, - eq.clone(), - settings.clone(), - )?; - - let result_zero = posterior_zero.optimize( target_zero, None, + eq.clone(), DoseRange::new(10.0, 2000.0), 0.8, + config.clone(), Target::AUCFromZero, )?; + + let result_zero = problem_zero.optimize()?; // Extract only the second dose (the optimized one at t=12) let dose_zero = result_zero.doses()[1]; @@ -939,16 +948,20 @@ fn test_auc_modes_comparison() -> Result<()> { .observation(24.0, 100.0, 0) // Target: AUC₁₂₋₂₄ = 100 .build(); - let posterior_last = - BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result_last = posterior_last.optimize( + let problem_last = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target_last, None, + eq, DoseRange::new(10.0, 2000.0), 0.8, + config, Target::AUCFromLastDose, )?; + + let result_last = problem_last.optimize()?; // Extract only the second dose (the optimized one at t=12) let dose_last = result_last.doses()[1]; @@ -1006,22 +1019,14 @@ fn test_auc_from_last_dose_multiple_observations() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.2, 0.4).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.2, 0.4, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(10.0); + let config = bestdose_config(¶ms, ems.clone(), 0, 10.0); // Multiple doses and observations - each observation measures AUC from its preceding dose let target = Subject::builder("patient_multi") @@ -1037,19 +1042,24 @@ fn test_auc_from_last_dose_multiple_observations() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(50.0, 500.0), 0.8, + config, Target::AUCFromLastDose, )?; + + let result = problem.optimize()?; let doses: Vec = result.doses(); // Should optimize 2 doses @@ -1100,22 +1110,14 @@ fn test_auc_from_last_dose_no_prior_dose() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.2, 0.4).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.2, 0.4, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); - settings.set_idelta(10.0); + let config = bestdose_config(¶ms, ems.clone(), 0, 10.0); // Edge case: observation at t=6, but dose is at t=12 (after the observation) let target = Subject::builder("patient_edge") @@ -1129,19 +1131,24 @@ fn test_auc_from_last_dose_no_prior_dose() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); - let posterior = BestDosePosterior::compute(&prior_theta, &prior_weights, None, eq, settings)?; - - let result = posterior.optimize( + let problem = BestDoseProblem::new( + &prior_theta, + &prior_weights, + None, target, None, + eq, DoseRange::new(50.0, 500.0), 0.8, + config, Target::AUCFromLastDose, )?; + + let result = problem.optimize()?; let doses: Vec = result.doses(); assert_eq!(doses.len(), 1); @@ -1189,21 +1196,14 @@ fn test_dose_range_bounds_respected() -> Result<()> { }, ); - let params = Parameters::new().add("ke", 0.1, 0.5).add("v", 40.0, 60.0); + let params = pk_parameter_space(0.1, 0.5, 40.0, 60.0); let ems = AssayErrorModels::new().add( 0, AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), )?; - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems.clone()) - .build(); - - settings.disable_output(); - settings.set_cycles(0); + let config = bestdose_config(¶ms, ems.clone(), 0, 0.12); // Target with high concentration requiring large dose let target = Subject::builder("test_patient") @@ -1217,23 +1217,27 @@ fn test_dose_range_bounds_respected() -> Result<()> { 1 => 50.0, // v _ => 0.0, }); - Theta::from_parts(mat, settings.parameters().clone())? + Theta::from_parts(mat, params.clone())? }; let prior_weights = Weights::uniform(1); // Set a narrow dose range: 50-200 mg let dose_range = DoseRange::new(50.0, 200.0); - let posterior = BestDosePosterior::compute( + let problem = BestDoseProblem::new( &prior_theta, &prior_weights, None, + target.clone(), + None, eq.clone(), - settings.clone(), + dose_range, + 0.0, + config, + Target::Concentration, )?; - let result = - posterior.optimize(target.clone(), None, dose_range, 0.0, Target::Concentration)?; + let result = problem.optimize()?; let doses: Vec = result.doses(); println!("Optimal dose: {:.1} mg", doses[0]); @@ -1262,543 +1266,143 @@ fn test_dose_range_bounds_respected() -> Result<()> { Ok(()) } -// ═════════════════════════════════════════════════════════════════════════════ -// Tests for time_offset behavior -// ═════════════════════════════════════════════════════════════════════════════ - -/// Helper to build a simple one-compartment model used by multiple tests -fn one_compartment_model() -> pharmsol::ODE { - equation::ODE::new( - |x, p, _t, dx, b, _rateiv, _cov| { - fetch_params!(p, ke, _v); - dx[0] = -ke * x[0] + b[0]; - }, - |_p, _, _| lag! {}, - |_p, _, _| fa! {}, - |_p, _t, _cov, _x| {}, - |x, p, _t, _cov, y| { - fetch_params!(p, _ke, v); - y[0] = x[0] / v; - }, - ) -} - -/// Helper to build minimal settings for tests (no posterior refinement) -fn minimal_settings() -> Settings { - let params = Parameters::new() - .add("ke", 0.001, 3.0) - .add("v", 25.0, 250.0); - let ems = AssayErrorModels::new() - .add( - 0, - AssayErrorModel::additive(ErrorPoly::new(0.0, 0.20, 0.0, 0.0), 0.0), - ) - .unwrap(); - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - settings.disable_output(); - settings.set_cycles(0); - settings -} - -/// Helper to build a simple prior (single support point) -fn simple_prior(settings: &Settings) -> (Theta, Weights) { - let mat = faer::Mat::from_fn(1, 2, |_r, c| match c { - 0 => 0.3, // ke - 1 => 50.0, // v - _ => 0.0, - }); - let theta = Theta::from_parts(mat, settings.parameters().clone()).unwrap(); - let weights = Weights::uniform(1); - (theta, weights) -} - -/// Test that gap=0 and gap=12 produce different results -/// -/// When time_offset is applied as a gap after the last past event, -/// different gaps change when the future dose is given relative to -/// the past, affecting the PK simulation outcome. #[test] -fn test_time_offset_zero_vs_nonzero_differ() -> Result<()> { +fn test_posterior_accessors() -> Result<()> { let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - // Past data: dose at t=0, observation at t=6 - let past = Subject::builder("patient") - .bolus(0.0, 500.0, 0) - .observation(6.0, 5.0, 0) - .build(); - - let posterior = - BestDosePosterior::compute(&theta, &weights, Some(past), eq.clone(), settings.clone())?; - - // Target: optimizable dose at t=0 (relative), target conc at t=1 (relative) - // Short observation window so residual from past dose matters - let target = Subject::builder("patient") - .bolus(0.0, 0.0, 0) - .observation(1.0, 5.0, 0) // target: 5 mg/L at 1h after the future dose - .build(); + let config = minimal_config(); + let (theta, weights) = simple_prior(&config)?; - // gap=0: target dose at t=6 absolute (right after past), obs at t=7 - // Past dose (500mg at t=0): C(7) = 500/50 * e^(-0.3*7) ≈ 1.22 mg/L residual - let result_gap0 = posterior.optimize( - target.clone(), - Some(0.0), - DoseRange::new(10.0, 1000.0), - 0.5, - Target::Concentration, - )?; + let posterior = BestDosePosterior::compute(&theta, &weights, None, eq, config)?; - // gap=12: target dose at t=18 absolute, obs at t=19 - // Past dose (500mg at t=0): C(19) = 500/50 * e^(-0.3*19) ≈ 0.003 mg/L (negligible) - let result_gap12 = posterior.optimize( - target, - Some(12.0), - DoseRange::new(10.0, 1000.0), - 0.5, - Target::Concentration, - )?; + assert!(posterior.n_support_points() > 0); + assert_eq!( + posterior.theta().matrix().nrows(), + posterior.n_support_points() + ); - let doses_gap0 = result_gap0.doses(); - let doses_gap12 = result_gap12.doses(); + let posterior_sum: f64 = posterior.posterior_weights().iter().sum(); + assert!((posterior_sum - 1.0).abs() < 1e-6); - eprintln!("Gap=0 doses: {:?}", doses_gap0); - eprintln!("Gap=12 doses: {:?}", doses_gap12); - - // With gap=0, there's still significant residual from the past dose (~1.2 mg/L), - // so the optimizer needs less future dose. With gap=12, the past dose is negligible, - // so it needs more future dose. The optimizable doses should differ. - assert!( - (doses_gap0.last().unwrap() - doses_gap12.last().unwrap()).abs() > 1e-3, - "gap=0 and gap=12 must produce different optimizable doses, \ - but got {:.4} vs {:.4}", - doses_gap0.last().unwrap(), - doses_gap12.last().unwrap() - ); + let population_sum: f64 = posterior.population_weights().iter().sum(); + assert!((population_sum - 1.0).abs() < 1e-6); Ok(()) } -/// Test that the first target event lands at last_past_time + gap -/// and subsequent target times are shifted correctly. #[test] -fn test_time_offset_event_placement() -> Result<()> { +fn test_result_accessors_for_two_stage_api() -> Result<()> { let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); + let config = minimal_config(); + let (theta, weights) = simple_prior(&config)?; - // Past: dose at t=0, observation at t=6 (last event at t=6) - let past = Subject::builder("patient") - .bolus(0.0, 500.0, 0) - .observation(6.0, 5.0, 0) - .build(); - - let posterior = - BestDosePosterior::compute(&theta, &weights, Some(past), eq.clone(), settings.clone())?; - - // Target: dose at t=0, dose at t=12, obs at t=24 (all relative) + let posterior = BestDosePosterior::compute(&theta, &weights, None, eq, config)?; let target = Subject::builder("patient") .bolus(0.0, 0.0, 0) - .bolus(12.0, 0.0, 0) - .observation(24.0, 5.0, 0) + .observation(6.0, 5.0, 0) .build(); - // gap=0: future starts immediately after last past event (t=6) - // effective_offset = 6 + 0 = 6 - let gap = 0.0; let result = posterior.optimize( target, - Some(gap), + None, DoseRange::new(10.0, 500.0), 0.5, Target::Concentration, )?; - // After concatenation we should have: - // past dose at t=0 (fixed 500mg) - // target dose at t=0+6=6 (optimizable) - // target dose at t=12+6=18 (optimizable) - // target obs at t=24+6=30 - - let optimal_subject = result.optimal_subject(); - let mut dose_times = Vec::new(); - let mut obs_times = Vec::new(); - - for occ in optimal_subject.occasions() { - for event in occ.events() { - match event { - Event::Bolus(b) => dose_times.push(b.time()), - Event::Infusion(i) => dose_times.push(i.time()), - Event::Observation(o) => obs_times.push(o.time()), - } - } - } - - eprintln!("Dose times: {:?}", dose_times); - eprintln!("Obs times: {:?}", obs_times); - - // Past dose at t=0 - assert!( - (dose_times[0] - 0.0).abs() < 1e-10, - "First dose (past) should be at t=0, got {}", - dose_times[0] - ); - // First target dose at t = 0 + 6 = 6 - assert!( - (dose_times[1] - 6.0).abs() < 1e-10, - "Second dose should be at t=0+effective_offset=6, got {}", - dose_times[1] - ); - // Second target dose at t = 12 + 6 = 18 - assert!( - (dose_times[2] - 18.0).abs() < 1e-10, - "Third dose should be at t=12+effective_offset=18, got {}", - dose_times[2] - ); - // Observation at t = 24 + 6 = 30 - assert!( - (obs_times[0] - 30.0).abs() < 1e-10, - "Observation should be at t=24+effective_offset=30, got {}", - obs_times[0] + assert_eq!(result.doses().len(), 1); + assert!(result.doses()[0].is_finite()); + assert!(result.objf().is_finite()); + assert!(result.objf() >= 0.0); + assert_eq!( + *result.status(), + pmcore::bestdose::BestDoseStatus::Converged ); + assert!(!result.predictions().predictions().is_empty()); + assert!(result.auc_predictions().is_none()); - // Past dose should remain fixed at 500 - let doses = result.doses(); + let method = result.optimization_method(); assert!( - (doses[0] - 500.0).abs() < 1e-6, - "Past dose should be fixed at 500, got {}", - doses[0] + method == pmcore::bestdose::OptimalMethod::Posterior + || method == pmcore::bestdose::OptimalMethod::Uniform ); Ok(()) } -/// Test that time_offset=None leaves target events unchanged #[test] -fn test_time_offset_none_no_shift() -> Result<()> { +fn test_negative_time_offset_rejected() -> Result<()> { let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; + let config = minimal_config(); + let (theta, weights) = simple_prior(&config)?; + let posterior = BestDosePosterior::compute(&theta, &weights, None, eq, config)?; let target = Subject::builder("patient") .bolus(0.0, 0.0, 0) - .bolus(12.0, 0.0, 0) - .observation(24.0, 5.0, 0) - .build(); - - let result = posterior.optimize( - target, - None, // No offset - DoseRange::new(10.0, 500.0), - 0.5, - Target::Concentration, - )?; - - let optimal_subject = result.optimal_subject(); - let mut dose_times = Vec::new(); - let mut obs_times = Vec::new(); - - for occ in optimal_subject.occasions() { - for event in occ.events() { - match event { - Event::Bolus(b) => dose_times.push(b.time()), - Event::Infusion(i) => dose_times.push(i.time()), - Event::Observation(o) => obs_times.push(o.time()), - } - } - } - - // Without offset, times should be exactly as specified in target - assert!((dose_times[0] - 0.0).abs() < 1e-10); - assert!((dose_times[1] - 12.0).abs() < 1e-10); - assert!((obs_times[0] - 24.0).abs() < 1e-10); - - Ok(()) -} - -// ═════════════════════════════════════════════════════════════════════════════ -// Tests for multi-target / multi-dose optimization -// ═════════════════════════════════════════════════════════════════════════════ - -/// Test that multiple optimizable doses all get meaningful values -#[test] -fn test_multi_dose_all_optimized() -> Result<()> { - let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; - - // Two optimizable doses, two target concentrations - let target = Subject::builder("patient") - .bolus(0.0, 0.0, 0) - .bolus(12.0, 0.0, 0) - .observation(6.0, 5.0, 0) // Target 5 mg/L at t=6 - .observation(18.0, 5.0, 0) // Target 5 mg/L at t=18 + .observation(6.0, 5.0, 0) .build(); let result = posterior.optimize( target, - None, + Some(-1.0), DoseRange::new(10.0, 500.0), 0.5, Target::Concentration, - )?; - - let doses = result.doses(); - eprintln!("Multi-dose optimization: {:?}", doses); - - assert_eq!(doses.len(), 2, "Should optimize 2 doses"); - - // Both doses should be meaningful (not collapsed to minimum) - assert!( - doses[0] > 10.0 + 1.0, - "Dose 1 should be above minimum bound, got {}", - doses[0] - ); - assert!( - doses[1] > 10.0 + 1.0, - "Dose 2 should be above minimum bound, got {}", - doses[1] ); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("negative")); + Ok(()) } -/// Test that changing target for dose 2 changes dose 2's result #[test] -fn test_multi_target_second_dose_responds_to_target_change() -> Result<()> { +fn test_time_offset_zero_vs_nonzero_differ() -> Result<()> { let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; + let config = minimal_config(); + let (theta, weights) = simple_prior(&config)?; - // Scenario A: second target is LOW (2 mg/L) - let target_low = Subject::builder("patient") - .bolus(0.0, 0.0, 0) - .bolus(12.0, 0.0, 0) + let past = Subject::builder("patient") + .bolus(0.0, 500.0, 0) .observation(6.0, 5.0, 0) - .observation(18.0, 2.0, 0) // Low second target .build(); - // Scenario B: second target is HIGH (15 mg/L) - let target_high = Subject::builder("patient") + let posterior = BestDosePosterior::compute(&theta, &weights, Some(past), eq, config)?; + let target = Subject::builder("patient") .bolus(0.0, 0.0, 0) - .bolus(12.0, 0.0, 0) - .observation(6.0, 5.0, 0) - .observation(18.0, 15.0, 0) // High second target + .observation(1.0, 5.0, 0) .build(); - let result_low = posterior.optimize( - target_low, - None, - DoseRange::new(10.0, 1000.0), - 0.5, - Target::Concentration, - )?; - - let result_high = posterior.optimize( - target_high, - None, + let result_gap0 = posterior.optimize( + target.clone(), + Some(0.0), DoseRange::new(10.0, 1000.0), 0.5, Target::Concentration, )?; - let doses_low = result_low.doses(); - let doses_high = result_high.doses(); - - eprintln!("Low second target: doses = {:?}", doses_low); - eprintln!("High second target: doses = {:?}", doses_high); - - // The second dose should be higher when the second target is higher - assert!( - doses_high[1] > doses_low[1], - "Higher second target ({}) should produce higher second dose, \ - but got low={:.2} vs high={:.2}", - 15.0, - doses_low[1], - doses_high[1] - ); - - Ok(()) -} - -// ═════════════════════════════════════════════════════════════════════════════ -// Tests for BestDosePosterior and BestDoseResult API surface -// ═════════════════════════════════════════════════════════════════════════════ - -/// Test BestDosePosterior accessor methods -#[test] -fn test_posterior_accessors() -> Result<()> { - let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; - - // n_support_points should match the prior (no filtering with 0 cycles and no data) - assert!( - posterior.n_support_points() > 0, - "Posterior should have at least 1 support point" - ); - - // theta() should return a valid Theta with the correct number of rows - assert_eq!( - posterior.theta().matrix().nrows(), - posterior.n_support_points() - ); - - // posterior_weights() should sum to ~1 - let weight_sum: f64 = posterior.posterior_weights().iter().sum(); - assert!( - (weight_sum - 1.0).abs() < 1e-6, - "Posterior weights should sum to 1.0, got {}", - weight_sum - ); - - // population_weights() should also sum to ~1 - let pop_weight_sum: f64 = posterior.population_weights().iter().sum(); - assert!( - (pop_weight_sum - 1.0).abs() < 1e-6, - "Population weights should sum to 1.0, got {}", - pop_weight_sum - ); - - Ok(()) -} - -/// Test BestDoseResult accessor methods -#[test] -fn test_result_accessors() -> Result<()> { - let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; - - let target = Subject::builder("patient") - .bolus(0.0, 0.0, 0) - .observation(6.0, 5.0, 0) - .build(); - - let result = posterior.optimize( + let result_gap12 = posterior.optimize( target, - None, - DoseRange::new(10.0, 500.0), + Some(12.0), + DoseRange::new(10.0, 1000.0), 0.5, Target::Concentration, )?; - // doses() should return 1 dose - assert_eq!(result.doses().len(), 1); - assert!(result.doses()[0].is_finite()); - - // objf() should be finite and non-negative - assert!(result.objf().is_finite()); - assert!(result.objf() >= 0.0, "Cost should be non-negative"); - - // status() should be Converged (1000 iterations is usually enough for 1D) - assert_eq!( - *result.status(), - pmcore::bestdose::BestDoseStatus::Converged - ); - - // predictions() should have predictions - assert!( - !result.predictions().predictions().is_empty(), - "Predictions should not be empty" - ); - - // optimization_method() should be Posterior or Uniform - let method = result.optimization_method(); - assert!( - method == pmcore::bestdose::OptimalMethod::Posterior - || method == pmcore::bestdose::OptimalMethod::Uniform - ); - - // auc_predictions() should be None for concentration targets - assert!( - result.auc_predictions().is_none(), - "AUC predictions should be None for concentration targets" - ); - - // optimal_subject() should have the optimized dose - let optimal_subj = result.optimal_subject(); - let mut found_dose = false; - for occ in optimal_subj.occasions() { - for event in occ.events() { - if let Event::Bolus(b) = event { - assert!(b.amount() > 0.0, "Optimized dose should be > 0"); - found_dose = true; - } - } - } - assert!( - found_dose, - "Should find at least one dose in optimal subject" - ); - - Ok(()) -} - -/// Test that negative time_offset is rejected -#[test] -fn test_negative_time_offset_rejected() -> Result<()> { - let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; - - let target = Subject::builder("patient") - .bolus(0.0, 0.0, 0) - .observation(6.0, 5.0, 0) - .build(); - - let result = posterior.optimize( - target, - Some(-1.0), // Negative offset should be rejected - DoseRange::new(10.0, 500.0), - 0.5, - Target::Concentration, - ); + let doses_gap0 = result_gap0.doses(); + let doses_gap12 = result_gap12.doses(); - assert!(result.is_err(), "Negative time_offset should be rejected"); - assert!( - result.unwrap_err().to_string().contains("negative"), - "Error message should mention negative time_offset" - ); + assert!((doses_gap0.last().unwrap() - doses_gap12.last().unwrap()).abs() > 1e-3); Ok(()) } -/// Test that posterior can be reused for multiple optimizations -/// This is the key new feature of the two-stage API #[test] fn test_posterior_reuse() -> Result<()> { let eq = one_compartment_model(); - let settings = minimal_settings(); - let (theta, weights) = simple_prior(&settings); - - // Compute posterior once - let posterior = - BestDosePosterior::compute(&theta, &weights, None, eq.clone(), settings.clone())?; + let config = minimal_config(); + let (theta, weights) = simple_prior(&config)?; - // Optimize with different dose ranges + let posterior = BestDosePosterior::compute(&theta, &weights, None, eq, config)?; let target = Subject::builder("patient") .bolus(0.0, 0.0, 0) .observation(6.0, 5.0, 0) @@ -1820,24 +1424,15 @@ fn test_posterior_reuse() -> Result<()> { Target::Concentration, )?; - // Both should succeed assert!(result_narrow.doses()[0].is_finite()); assert!(result_wide.doses()[0].is_finite()); + assert!(result_wide.objf() <= result_narrow.objf() + 1e-6); - // Wide range should allow a potentially better (lower cost) result - assert!( - result_wide.objf() <= result_narrow.objf() + 1e-6, - "Wider dose range should give equal or better cost: wide={:.6} vs narrow={:.6}", - result_wide.objf(), - result_narrow.objf() - ); - - // Optimize with different bias weights let result_personal = posterior.optimize( target.clone(), None, DoseRange::new(10.0, 500.0), - 0.0, // Full personalization + 0.0, Target::Concentration, )?; @@ -1845,11 +1440,10 @@ fn test_posterior_reuse() -> Result<()> { target, None, DoseRange::new(10.0, 500.0), - 1.0, // Full population weighting + 1.0, Target::Concentration, )?; - // Both should succeed assert!(result_personal.doses()[0].is_finite()); assert!(result_population.doses()[0].is_finite()); diff --git a/tests/compile_layer_tests.rs b/tests/compile_layer_tests.rs new file mode 100644 index 000000000..b93ec5734 --- /dev/null +++ b/tests/compile_layer_tests.rs @@ -0,0 +1,156 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; + +fn simple_equation() -> equation::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _t, _cov| lag! {}, + |_p, _t, _cov| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, v); + y[0] = x[0] / v; + }, + ) +} + +fn multi_subject_data() -> Data { + let first = Subject::builder("1") + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .observation(2.0, 8.0, 0) + .build(); + + let second = Subject::builder("2") + .bolus(0.0, 100.0, 0) + .observation(1.0, 9.0, 0) + .observation(3.0, 7.0, 0) + .build(); + + Data::new(vec![first, second]) +} + +fn structured_covariate_data() -> Data { + let subject = Subject::builder("1") + .covariate("wt", 0.0, 70.0) + .covariate("study_day", 0.0, 1.0) + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .reset() + .covariate("wt", 0.0, 72.0) + .covariate("study_day", 0.0, 2.0) + .bolus(0.0, 100.0, 0) + .observation(1.5, 8.0, 0) + .build(); + + Data::new(vec![subject]) +} + +fn simple_problem() -> Result> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + EstimationProblem::builder(model, multi_subject_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan::disabled()) + .build() +} + +#[test] +fn test_compile_problem_builds_indexes() -> Result<()> { + let compiled = simple_problem()?.compile()?; + + assert_eq!(compiled.design.subject_count(), 2); + assert_eq!(compiled.design.occasion_count(), 2); + assert_eq!(compiled.observation_index.len(), 4); + assert_eq!(compiled.design.parameter_names, vec!["ke", "v"]); + Ok(()) +} + +#[test] +fn test_compile_problem_builds_algorithm_settings() -> Result<()> { + let compiled = simple_problem()?.compile()?; + + assert_eq!(compiled.method().algorithm(), Algorithm::NPAG); + assert_eq!(compiled.design.parameter_names.len(), 2); + assert!(!compiled.output_plan().write); + Ok(()) +} + +#[test] +fn test_compile_problem_extracts_structured_covariate_values() -> Result<()> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .covariates(CovariateSpec::Structured(CovariateEffectsSpec { + subject_effects: Some(CovariateModel::new( + vec!["ke", "v"], + vec!["wt"], + vec![vec![true], vec![false]], + )?), + occasion_effects: Some(CovariateModel::new( + vec!["ke", "v"], + vec!["study_day"], + vec![vec![true], vec![false]], + )?), + })) + .build()?; + + let compiled = EstimationProblem::builder(model, structured_covariate_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan::disabled()) + .build()? + .compile()?; + + assert_eq!( + compiled.design.structured_covariates.subject_columns, + vec!["wt"] + ); + assert_eq!( + compiled.design.structured_covariates.occasion_columns, + vec!["study_day"] + ); + assert_eq!(compiled.design.structured_covariates.subject_rows.len(), 1); + assert_eq!(compiled.design.structured_covariates.occasion_rows.len(), 2); + assert_eq!( + compiled.design.structured_covariates.subject_rows[0].values, + vec![Some(70.0)] + ); + assert_eq!( + compiled.design.structured_covariates.occasion_rows[0].values, + vec![Some(1.0)] + ); + assert_eq!( + compiled.design.structured_covariates.occasion_rows[1].values, + vec![Some(2.0)] + ); + Ok(()) +} diff --git a/tests/ipm_tests.rs b/tests/ipm_tests.rs index efcd3e0e2..a3fdb0ae4 100644 --- a/tests/ipm_tests.rs +++ b/tests/ipm_tests.rs @@ -1,6 +1,6 @@ use anyhow::Result; use faer::Mat; -use pmcore::structs::psi::Psi; +use pmcore::estimation::nonparametric::Psi; /// Test the IPM with a simple 2x2 matrix #[test] @@ -19,7 +19,7 @@ fn test_burke_ipm_simple() -> Result<()> { let psi = Psi::from(mat); // Run Burke's IPM - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); // Should succeed assert!(result.is_ok()); @@ -58,7 +58,7 @@ fn test_burke_ipm_larger() -> Result<()> { let psi = Psi::from(mat); // Run Burke's IPM - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_ok()); @@ -92,7 +92,7 @@ fn test_burke_ipm_uniform() -> Result<()> { let psi = Psi::from(mat); // Run Burke's IPM - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_ok()); @@ -131,7 +131,7 @@ fn test_burke_ipm_with_negatives() -> Result<()> { let psi = Psi::from(mat); // Run Burke's IPM - should handle negatives by taking absolute value - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_ok()); @@ -160,7 +160,7 @@ fn test_burke_ipm_with_infinites() { let psi = Psi::from(mat); // Run Burke's IPM - should fail with infinite values - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_err(), "Should fail with infinite values"); } @@ -177,7 +177,7 @@ fn test_burke_ipm_with_nan() { let psi = Psi::from(mat); // Run Burke's IPM - should fail with NaN values - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_err(), "Should fail with NaN values"); } @@ -195,7 +195,7 @@ fn test_burke_ipm_high_dimensional() -> Result<()> { let psi = Psi::from(mat); // Run Burke's IPM - let result = pmcore::routines::estimation::ipm::burke(&psi); + let result = pmcore::estimation::nonparametric::burke(&psi); assert!(result.is_ok()); diff --git a/tests/nonparametric_engine_tests.rs b/tests/nonparametric_engine_tests.rs new file mode 100644 index 000000000..5e3bae256 --- /dev/null +++ b/tests/nonparametric_engine_tests.rs @@ -0,0 +1,72 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; + +fn simple_equation() -> equation::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _t, _cov| lag! {}, + |_p, _t, _cov| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, v); + y[0] = x[0] / v; + }, + ) +} + +fn simple_data() -> Data { + let subject = Subject::builder("1") + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .observation(2.0, 8.0, 0) + .build(); + + Data::new(vec![subject]) +} + +#[test] +fn test_nonparametric_engine_returns_workspace() -> Result<()> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + let compiled = EstimationProblem::builder(model, simple_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 1, + cache: true, + progress: false, + idelta: 0.12, + tad: 0.0, + prior: None, + ..RuntimeOptions::default() + }) + .build()? + .compile()?; + + let workspace = NonparametricEngine::fit(compiled)?; + assert!(workspace.objf().is_finite()); + assert_eq!(workspace.get_theta().parameters().len(), 2); + + let fit_result = workspace.into_fit_result(); + assert_eq!(fit_result.population_summary().parameters.len(), 2); + assert_eq!(fit_result.individual_summaries().len(), 1); + Ok(()) +} diff --git a/tests/onecomp.rs b/tests/onecomp.rs index 58230aca0..ed19fb3d9 100644 --- a/tests/onecomp.rs +++ b/tests/onecomp.rs @@ -18,22 +18,19 @@ fn test_one_compartment_npag() -> Result<()> { }, ); - // Define parameters - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 1.0, 20.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - // Create settings - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(64, 22)); - - settings.set_cycles(100); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, em).unwrap()); + + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; // Let known support points let spps: Vec<(f64, f64)> = vec![(0.85, 12.0), (0.52, 5.0), (0.15, 3.0)]; @@ -57,9 +54,20 @@ fn test_one_compartment_npag() -> Result<()> { let data = data::Data::new(subjects); - // Run the algorithm - let mut algorithm = dispatch_algorithm(settings, eq, data)?; - let result = algorithm.fit()?; + let result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions::default(), + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 100, + prior: Some(Prior::sobol(64, 22)), + ..RuntimeOptions::default() + }) + .run()?; + let result = result + .as_nonparametric() + .expect("NPAG should yield a nonparametric result"); // Check the results assert_eq!(result.cycles(), 32); @@ -85,22 +93,19 @@ fn test_one_compartment_npod() -> Result<()> { }, ); - // Define parameters - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 1.0, 20.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - // Create settings - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPOD) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(64, 22)); - - settings.set_cycles(100); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, em).unwrap()); + + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; // Let known support points let spps: Vec<(f64, f64)> = vec![(0.85, 12.0), (0.52, 5.0), (0.15, 3.0)]; @@ -124,9 +129,20 @@ fn test_one_compartment_npod() -> Result<()> { let data = data::Data::new(subjects); - // Run the algorithm - let mut algorithm = dispatch_algorithm(settings, eq, data)?; - let result = algorithm.fit()?; + let result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npod( + NpodOptions::default(), + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 100, + prior: Some(Prior::sobol(64, 22)), + ..RuntimeOptions::default() + }) + .run()?; + let result = result + .as_nonparametric() + .expect("NPOD should yield a nonparametric result"); // Check the results assert_eq!(result.cycles(), 11); @@ -152,22 +168,19 @@ fn test_one_compartment_postprob() -> Result<()> { }, ); - // Define parameters - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 1.0, 20.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em).unwrap(); - - // Create settings - let mut settings = Settings::builder() - .set_algorithm(Algorithm::POSTPROB) - .set_parameters(params) - .set_error_models(ems) - .build(); - - settings.set_prior(Prior::sobol(64, 22)); - - settings.set_cycles(100); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, em).unwrap()); + + let model = ModelDefinition::builder(eq) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; // Let known support points let spps: Vec<(f64, f64)> = vec![(0.85, 12.0), (0.52, 5.0), (0.15, 3.0)]; @@ -191,9 +204,20 @@ fn test_one_compartment_postprob() -> Result<()> { let data = data::Data::new(subjects); - // Run the algorithm - let mut algorithm = dispatch_algorithm(settings, eq, data)?; - let result = algorithm.fit()?; + let result = EstimationProblem::builder(model, data) + .method(EstimationMethod::Nonparametric( + NonparametricMethod::Postprob(PostProbOptions::default()), + )) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 100, + prior: Some(Prior::sobol(64, 22)), + ..RuntimeOptions::default() + }) + .run()?; + let result = result + .as_nonparametric() + .expect("POSTPROB should yield a nonparametric result"); // Check the results assert_eq!(result.cycles(), 0); diff --git a/tests/output_writer_tests.rs b/tests/output_writer_tests.rs new file mode 100644 index 000000000..bdb1514c5 --- /dev/null +++ b/tests/output_writer_tests.rs @@ -0,0 +1,116 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; + +fn simple_equation() -> equation::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _t, _cov| lag! {}, + |_p, _t, _cov| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, v); + y[0] = x[0] / v; + }, + ) +} + +fn simple_data() -> Data { + let subject = Subject::builder("1") + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .observation(2.0, 8.0, 0) + .build(); + + Data::new(vec![subject]) +} + +fn temp_output_dir() -> PathBuf { + let unique = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("clock should be after unix epoch") + .as_nanos(); + std::env::temp_dir().join(format!("pmcore-output-writer-{unique}")) +} + +#[test] +fn test_fit_result_writes_shared_output_files() -> Result<()> { + let output_dir = temp_output_dir(); + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + let mut result = EstimationProblem::builder(model, simple_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan { + write: true, + path: Some(output_dir.to_string_lossy().to_string()), + }) + .runtime(RuntimeOptions { + cycles: 1, + cache: true, + progress: false, + idelta: 0.12, + tad: 0.0, + prior: None, + ..RuntimeOptions::default() + }) + .run()?; + + result.write_outputs()?; + + assert!(output_dir.join("settings.json").exists()); + assert!(output_dir.join("summary.json").exists()); + assert!(output_dir.join("summary.csv").exists()); + assert!(output_dir.join("diagnostics.json").exists()); + assert!(output_dir.join("predictions.csv").exists()); + assert!(output_dir.join("iterations.csv").exists()); + assert!(!output_dir.join("pred.csv").exists()); + assert!(!output_dir.join("cycles.csv").exists()); + assert!(!output_dir.join("covs.csv").exists()); + + let artifacts = result.artifacts(); + assert!(artifacts.files.iter().any(|file| file == "settings.json")); + assert!(artifacts.files.iter().any(|file| file == "predictions.csv")); + assert!(artifacts + .expected_files + .iter() + .any(|file| file == "settings.json")); + assert!(artifacts + .shared_expected_files + .iter() + .any(|file| file == "settings.json")); + assert!(artifacts + .method_specific_expected_files + .iter() + .any(|file| file == "iterations.csv")); + assert!(artifacts.missing_files.is_empty()); + + let predictions = result.predictions(); + assert!(predictions.available); + assert_eq!(predictions.artifact.as_deref(), Some("predictions.csv")); + assert_eq!(predictions.source.as_deref(), Some("in_memory")); + + let diagnostics = result.diagnostics(); + assert!(diagnostics.estimator_metadata.contains_key("algorithm")); + + let _ = std::fs::remove_dir_all(output_dir); + Ok(()) +} diff --git a/tests/results_summary_tests.rs b/tests/results_summary_tests.rs new file mode 100644 index 000000000..283b244d9 --- /dev/null +++ b/tests/results_summary_tests.rs @@ -0,0 +1,86 @@ +use anyhow::Result; +use pharmsol::{AssayErrorModel, ErrorPoly}; +use pmcore::prelude::*; + +fn simple_equation() -> equation::ODE { + equation::ODE::new( + |x, p, _t, dx, b, _rateiv, _cov| { + fetch_params!(p, ke); + dx[0] = -ke * x[0] + b[0]; + }, + |_p, _t, _cov| lag! {}, + |_p, _t, _cov| fa! {}, + |_p, _t, _cov, _x| {}, + |x, p, _t, _cov, y| { + fetch_params!(p, v); + y[0] = x[0] / v; + }, + ) +} + +fn simple_data() -> Data { + let subject = Subject::builder("1") + .bolus(0.0, 100.0, 0) + .observation(1.0, 10.0, 0) + .observation(2.0, 8.0, 0) + .build(); + + Data::new(vec![subject]) +} + +#[test] +fn test_nonparametric_fit_result_summary_surface() -> Result<()> { + let assay_error = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); + let observations = ObservationSpec::new() + .add_channel(ObservationChannel::continuous(0, "cp")) + .with_assay_error_models(AssayErrorModels::new().add(0, assay_error)?); + + let model = ModelDefinition::builder(simple_equation()) + .parameters( + ParameterSpace::new() + .add(ParameterSpec::bounded("ke", 0.1, 1.0)) + .add(ParameterSpec::bounded("v", 1.0, 20.0)), + ) + .observations(observations) + .build()?; + + let result = EstimationProblem::builder(model, simple_data()) + .method(EstimationMethod::Nonparametric(NonparametricMethod::Npag( + NpagOptions, + ))) + .output(OutputPlan::disabled()) + .runtime(RuntimeOptions { + cycles: 1, + cache: true, + progress: false, + idelta: 0.12, + tad: 0.0, + prior: None, + ..RuntimeOptions::default() + }) + .run()?; + + let summary = result.summary(); + + assert_eq!(summary.parameter_count, 2); + assert_eq!(summary.subject_count, 1); + assert_eq!(summary.observation_count, 2); + assert_eq!(result.population_summary().parameters.len(), 2); + assert_eq!(result.individual_summaries().len(), 1); + let diagnostics = result.diagnostics(); + assert_eq!( + diagnostics.estimator_metadata.get("algorithm"), + Some(&"NPAG".to_string()) + ); + assert_eq!( + diagnostics.estimator_metadata.get("outputs_requested"), + Some(&"false".to_string()) + ); + assert!(!diagnostics.convergence_notes.is_empty()); + assert!(diagnostics.deferred_features.is_empty()); + let predictions = result.predictions(); + assert!(!predictions.available); + assert!(result.artifacts().files.is_empty()); + assert!(result.artifacts().expected_files.is_empty()); + Ok(()) +} diff --git a/tests/settings_tests.rs b/tests/settings_tests.rs deleted file mode 100644 index 23992c34b..000000000 --- a/tests/settings_tests.rs +++ /dev/null @@ -1,218 +0,0 @@ -use anyhow::Result; -use pmcore::prelude::*; - -/// Test basic Settings builder construction -#[test] -fn test_settings_builder_basic() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 1.0, 20.0); - - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - // Test getters - assert_eq!(settings.config().algorithm, Algorithm::NPAG); - assert_eq!(settings.parameters().names().len(), 2); - - Ok(()) -} - -/// Test Settings serialization to JSON -#[test] -fn test_settings_serialization() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 5.0, 15.0); - - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let settings = Settings::builder() - .set_algorithm(Algorithm::NPOD) - .set_parameters(params) - .set_error_models(ems) - .build(); - - // Serialize to JSON - let json = serde_json::to_string(&settings)?; - - // Should be valid JSON - assert!(json.contains("\"algorithm\"")); - assert!(json.contains("\"parameters\"")); - - // Deserialize back - let deserialized: Settings = serde_json::from_str(&json)?; - assert_eq!(deserialized.config().algorithm, Algorithm::NPOD); - - Ok(()) -} - -/// Test Settings with different algorithms -#[test] -fn test_settings_algorithms() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - // Test NPAG - let settings_npag = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params.clone()) - .set_error_models(ems.clone()) - .build(); - assert_eq!(settings_npag.config().algorithm, Algorithm::NPAG); - - // Test NPOD - let settings_npod = Settings::builder() - .set_algorithm(Algorithm::NPOD) - .set_parameters(params.clone()) - .set_error_models(ems.clone()) - .build(); - assert_eq!(settings_npod.config().algorithm, Algorithm::NPOD); - - Ok(()) -} - -/// Test Settings setters -#[test] -fn test_settings_setters() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - // Test set_cycles - settings.set_cycles(50); - assert_eq!(settings.config().cycles, 50); - - // Test set_algorithm - settings.set_algorithm(Algorithm::NPOD); - assert_eq!(settings.config().algorithm, Algorithm::NPOD); - - // Test set_cache - settings.set_cache(false); - assert_eq!(settings.config().cache, false); - - // Test set_idelta - settings.set_idelta(0.5); - assert_eq!(settings.predictions().idelta, 0.5); - - // Test set_tad - settings.set_tad(24.0); - assert_eq!(settings.predictions().tad, 24.0); - - Ok(()) -} - -/// Test Settings with prior -#[test] -fn test_settings_with_prior() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - // Set Sobol prior - settings.set_prior(Prior::sobol(100, 42)); - - // Verify prior was set using accessor methods - assert_eq!(settings.prior().points(), Some(100)); - assert_eq!(settings.prior().seed(), Some(42)); - - Ok(()) -} - -/// Test Settings with Latin Hypercube prior -#[test] -fn test_settings_latin_prior() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0).add("v", 5.0, 15.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let mut settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - // Set Latin Hypercube prior - settings.set_prior(Prior::Latin(50, 123)); - - // Verify prior was set using accessor methods - assert_eq!(settings.prior().points(), Some(50)); - assert_eq!(settings.prior().seed(), Some(123)); - - Ok(()) -} - -/// Test Parameters functionality -#[test] -fn test_parameters() { - let mut params = Parameters::new(); - - // Add parameters - params = params.add("ke", 0.1, 1.0); - params = params.add("v", 5.0, 20.0); - params = params.add("ka", 0.5, 2.0); - - // Check parameter count - assert_eq!(params.names().len(), 3); - - // Check parameter names - let names = params.names(); - assert!(names.contains(&"ke".to_string())); - assert!(names.contains(&"v".to_string())); - assert!(names.contains(&"ka".to_string())); -} - -/// Test ErrorModels construction -#[test] -fn test_error_models() -> Result<()> { - let em1 = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let em2 = AssayErrorModel::proportional(ErrorPoly::new(0.0, 0.0, 0.15, 0.0), 2.0); - - let mut ems = AssayErrorModels::new(); - ems = ems.add(0, em1)?; - ems = ems.add(1, em2)?; - - // Should have 2 error models - assert_eq!(ems.len(), 2); - - Ok(()) -} - -/// Test Config accessors -#[test] -fn test_config_accessors() -> Result<()> { - let params = Parameters::new().add("ke", 0.1, 1.0); - let em = AssayErrorModel::additive(ErrorPoly::new(0.0, 0.10, 0.0, 0.0), 2.0); - let ems = AssayErrorModels::new().add(0, em)?; - - let settings = Settings::builder() - .set_algorithm(Algorithm::NPAG) - .set_parameters(params) - .set_error_models(ems) - .build(); - - let config = settings.config(); - - // Test default values - assert_eq!(config.algorithm, Algorithm::NPAG); - assert!(config.cycles > 0); - assert_eq!(config.cache, true); - - Ok(()) -} diff --git a/tests/structs_tests.rs b/tests/structs_tests.rs index ba2b894d9..ae308d430 100644 --- a/tests/structs_tests.rs +++ b/tests/structs_tests.rs @@ -1,6 +1,6 @@ use anyhow::Result; use faer::{Col, Mat}; -use pmcore::structs::{psi::Psi, theta::Theta, weights::Weights}; +use pmcore::estimation::nonparametric::{Psi, Theta, Weights}; use std::io::Cursor; /// Test Psi creation and basic operations